🐛 fix lexer to properly cope with repeated comments #2330

parent 1da93173
...@@ -1511,7 +1511,7 @@ scan_number_done: ...@@ -1511,7 +1511,7 @@ scan_number_done:
skip_whitespace(); skip_whitespace();
// ignore comments // ignore comments
if (ignore_comments && current == '/') while (ignore_comments && current == '/')
{ {
if (!scan_comment()) if (!scan_comment())
{ {
......
...@@ -7390,7 +7390,7 @@ scan_number_done: ...@@ -7390,7 +7390,7 @@ scan_number_done:
skip_whitespace(); skip_whitespace();
// ignore comments // ignore comments
if (ignore_comments && current == '/') while (ignore_comments && current == '/')
{ {
if (!scan_comment()) if (!scan_comment())
{ {
......
...@@ -241,5 +241,8 @@ TEST_CASE("lexer class") ...@@ -241,5 +241,8 @@ TEST_CASE("lexer class")
CHECK((scan_string("/* true */", true) == json::lexer::token_type::end_of_input)); CHECK((scan_string("/* true */", true) == json::lexer::token_type::end_of_input));
CHECK((scan_string("/*/**/", true) == json::lexer::token_type::end_of_input)); CHECK((scan_string("/*/**/", true) == json::lexer::token_type::end_of_input));
CHECK((scan_string("/*/* */", true) == json::lexer::token_type::end_of_input)); CHECK((scan_string("/*/* */", true) == json::lexer::token_type::end_of_input));
CHECK((scan_string("//\n//\n", true) == json::lexer::token_type::end_of_input));
CHECK((scan_string("/**//**//**/", true) == json::lexer::token_type::end_of_input));
} }
} }
...@@ -478,4 +478,11 @@ TEST_CASE("regression tests 2") ...@@ -478,4 +478,11 @@ TEST_CASE("regression tests 2")
CHECK(jsonObj["aaaa"] == 11); CHECK(jsonObj["aaaa"] == 11);
CHECK(jsonObj["bbb"] == 222); CHECK(jsonObj["bbb"] == 222);
} }
SECTION("issue #2330 - ignore_comment=true fails on multiple consecutive lines starting with comments")
{
std::string ss = "//\n//\n{\n}\n";
json j = json::parse(ss, nullptr, true, true);
CHECK(j.dump() == "{}");
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment