[Libreoffice-commits] online.git: common/Authorization.cpp common/Util.cpp common/Util.hpp test/WhiteBoxTests.cpp

Jan Holesovsky (via logerrit) logerrit at kemper.freedesktop.org
Fri Jun 19 09:49:03 UTC 2020


 common/Authorization.cpp |    5 ++-
 common/Util.cpp          |   45 +++++++++++++++++++++++++++++++++
 common/Util.hpp          |   27 +++-----------------
 test/WhiteBoxTests.cpp   |   63 +++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 116 insertions(+), 24 deletions(-)

New commits:
commit 2c246eed85e065f07b756ec065d12b47cdac5f17
Author:     Jan Holesovsky <kendy at collabora.com>
AuthorDate: Thu Jun 18 11:07:54 2020 +0200
Commit:     Andras Timar <andras.timar at collabora.com>
CommitDate: Fri Jun 19 11:48:44 2020 +0200

    Sanitize the access_header.
    
    The access_header can contain a lot of nonsense, like whitespace around
    or additional \n's or \r's.  We used to sanitize that, but then
    regressed in e95413d151c3f0d9476063c8520dd477342ed235 where the
    "tokenize by any of \n\r" was by mistake replaced with "tokenize by
    string '\n\r'".
    
    Unfortunately the unit test didn't uncover that, and the further
    refactorings of the related code have hidden that even more.
    
    Change-Id: Ie2bf950d0426292770b599e40ee2401101162ff2
    Reviewed-on: https://gerrit.libreoffice.org/c/online/+/96638
    Tested-by: Jenkins
    Tested-by: Jenkins CollaboraOffice <jenkinscollaboraoffice at gmail.com>
    Reviewed-by: Andras Timar <andras.timar at collabora.com>

diff --git a/common/Authorization.cpp b/common/Authorization.cpp
index abaddae26..ad4381ef5 100644
--- a/common/Authorization.cpp
+++ b/common/Authorization.cpp
@@ -52,8 +52,9 @@ void Authorization::authorizeRequest(Poco::Net::HTTPRequest& request) const
             // there might be more headers in here; like
             //   Authorization: Basic ....
             //   X-Something-Custom: Huh
-            // Regular expression evaluates and finds "\n\r" and tokenizes accordingly
-            StringVector tokens(Util::tokenize(_data, "\n\r"));
+            // Split based on \n's or \r's and trim, to avoid nonsense in the
+            // headers
+            StringVector tokens(Util::tokenizeAnyOf(_data, "\n\r"));
             for (auto it = tokens.begin(); it != tokens.end(); ++it)
             {
                 std::string token = tokens.getParam(*it);
diff --git a/common/Util.cpp b/common/Util.cpp
index 347fc4562..ee1aa19b1 100644
--- a/common/Util.cpp
+++ b/common/Util.cpp
@@ -987,6 +987,51 @@ namespace Util
         }
     #endif
 
+    StringVector tokenizeAnyOf(const std::string& s, const char* delimiters)
+    {
+        // trim from the end so that we do not have to check this exact case
+        // later
+        std::size_t length = s.length();
+        while (length > 0 && s[length - 1] == ' ')
+            --length;
+
+        if (length == 0)
+            return StringVector();
+
+        std::size_t delimitersLength = std::strlen(delimiters);
+        std::size_t start = 0;
+
+        std::vector<StringToken> tokens;
+        tokens.reserve(16);
+
+        while (start < length)
+        {
+            // ignore the leading whitespace
+            while (start < length && s[start] == ' ')
+                ++start;
+
+            // anything left?
+            if (start == length)
+                break;
+
+            std::size_t end = s.find_first_of(delimiters, start, delimitersLength);
+            if (end == std::string::npos)
+                end = length;
+
+            // trim the trailing whitespace
+            std::size_t trimEnd = end;
+            while (start < trimEnd && s[trimEnd - 1] == ' ')
+                --trimEnd;
+
+            // add only non-empty tokens
+            if (start < trimEnd)
+                tokens.emplace_back(start, trimEnd - start);
+
+            start = end + 1;
+        }
+
+        return StringVector(s, std::move(tokens));
+    }
 }
 
 /* vim:set shiftwidth=4 softtabstop=4 expandtab: */
diff --git a/common/Util.hpp b/common/Util.hpp
index fed1525e6..9c82b3068 100644
--- a/common/Util.hpp
+++ b/common/Util.hpp
@@ -433,29 +433,12 @@ namespace Util
         return StringVector(s, std::move(tokens));
     }
 
-    inline StringVector tokenize(const std::string& s, const char* delimiter)
-    {
-        if (s.empty())
-            return StringVector();
-
-        std::size_t start = 0;
-        std::size_t end = s.find(delimiter, start);
-
-        std::vector<StringToken> tokens;
-        tokens.reserve(16);
+    /** Tokenize based on any of the characters in 'delimiters'.
 
-        tokens.emplace_back(start, end - start);
-        start = end + std::strlen(delimiter);
-
-        while (end != std::string::npos)
-        {
-            end = s.find(delimiter, start);
-            tokens.emplace_back(start, end - start);
-            start = end + std::strlen(delimiter);
-        }
-
-        return StringVector(s, std::move(tokens));
-    }
+        Ie. when there is '\n\r' in there, any of them means a delimiter.
+        In addition, trim the values so there are no leadiding or trailing spaces.
+    */
+    StringVector tokenizeAnyOf(const std::string& s, const char* delimiters);
 
 #ifdef IOS
 
diff --git a/test/WhiteBoxTests.cpp b/test/WhiteBoxTests.cpp
index bb2f57e4c..a2e676198 100644
--- a/test/WhiteBoxTests.cpp
+++ b/test/WhiteBoxTests.cpp
@@ -33,6 +33,7 @@ class WhiteBoxTests : public CPPUNIT_NS::TestFixture
     CPPUNIT_TEST(testSplitting);
     CPPUNIT_TEST(testMessageAbbreviation);
     CPPUNIT_TEST(testTokenizer);
+    CPPUNIT_TEST(testTokenizerTokenizeAnyOf);
     CPPUNIT_TEST(testReplace);
     CPPUNIT_TEST(testRegexListMatcher);
     CPPUNIT_TEST(testRegexListMatcher_Init);
@@ -54,6 +55,7 @@ class WhiteBoxTests : public CPPUNIT_NS::TestFixture
     void testSplitting();
     void testMessageAbbreviation();
     void testTokenizer();
+    void testTokenizerTokenizeAnyOf();
     void testReplace();
     void testRegexListMatcher();
     void testRegexListMatcher_Init();
@@ -426,6 +428,67 @@ void WhiteBoxTests::testTokenizer()
     LOK_ASSERT_EQUAL(static_cast<size_t>(0), ints.size());
 }
 
+void WhiteBoxTests::testTokenizerTokenizeAnyOf()
+{
+    StringVector tokens;
+    const char delimiters[] = "\n\r"; // any of these delimits; and we trim whitespace
+
+    tokens = Util::tokenizeAnyOf("", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(0), tokens.size());
+
+    tokens = Util::tokenizeAnyOf("  ", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(0), tokens.size());
+
+    tokens = Util::tokenizeAnyOf("A", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(1), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A"), tokens[0]);
+
+    tokens = Util::tokenizeAnyOf("  A", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(1), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A"), tokens[0]);
+
+    tokens = Util::tokenizeAnyOf("A  ", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(1), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A"), tokens[0]);
+
+    tokens = Util::tokenizeAnyOf(" A ", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(1), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A"), tokens[0]);
+
+    tokens = Util::tokenizeAnyOf(" A  Z ", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(1), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A  Z"), tokens[0]);
+
+    tokens = Util::tokenizeAnyOf("\n", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(0), tokens.size());
+
+    tokens = Util::tokenizeAnyOf("\n\r\r\n", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(0), tokens.size());
+
+    tokens = Util::tokenizeAnyOf(" A  \nZ ", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(2), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A"), tokens[0]);
+    LOK_ASSERT_EQUAL(std::string("Z"), tokens[1]);
+
+    tokens = Util::tokenizeAnyOf(" A  Z\n ", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(1), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A  Z"), tokens[0]);
+
+    tokens = Util::tokenizeAnyOf(" A  Z  \n\r\r\n ", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(1), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A  Z"), tokens[0]);
+
+    tokens = Util::tokenizeAnyOf(" A  \n\r\r\n  \r  \n  Z  \n ", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(2), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A"), tokens[0]);
+    LOK_ASSERT_EQUAL(std::string("Z"), tokens[1]);
+
+    tokens = Util::tokenizeAnyOf("  \r A  \n  \r  \n  Z  \n ", delimiters);
+    LOK_ASSERT_EQUAL(static_cast<size_t>(2), tokens.size());
+    LOK_ASSERT_EQUAL(std::string("A"), tokens[0]);
+    LOK_ASSERT_EQUAL(std::string("Z"), tokens[1]);
+}
+
 void WhiteBoxTests::testReplace()
 {
     LOK_ASSERT_EQUAL(std::string("zesz one zwo flee"), Util::replace("test one two flee", "t", "z"));


More information about the Libreoffice-commits mailing list