Lexer.cpp 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. /*
  2. * Copyright (c) 2023, Dan Klishch <danilklishch@gmail.com>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/NonnullOwnPtr.h>
  7. #include <LibXML/Parser/Parser.h>
  8. #include "Parser/Lexer.h"
  9. #include "Parser/SpecParser.h"
  10. #include "Parser/XMLUtils.h"
  11. namespace JSSpecCompiler {
  12. namespace {
  13. Optional<Token> consume_number(XML::LineTrackingLexer& lexer, XML::Node const* node, Location& location)
  14. {
  15. u64 start = lexer.tell();
  16. if (lexer.next_is('-'))
  17. lexer.consume(1);
  18. if (!lexer.next_is(is_ascii_digit)) {
  19. lexer.retreat(lexer.tell() - start);
  20. return {};
  21. }
  22. lexer.consume_while(is_ascii_digit);
  23. if (lexer.next_is('.')) {
  24. lexer.consume(1);
  25. if (lexer.consume_while(is_ascii_digit).length() == 0)
  26. lexer.retreat(1);
  27. }
  28. auto length = lexer.tell() - start;
  29. lexer.retreat(length);
  30. return { Token { TokenType::Number, lexer.consume(length), node, move(location) } };
  31. }
  32. bool can_end_word_token(char c)
  33. {
  34. return is_ascii_space(c) || ".,"sv.contains(c);
  35. }
  36. void tokenize_string(SpecificationParsingContext& ctx, XML::Node const* node, StringView view, Vector<Token>& tokens)
  37. {
  38. static constexpr struct {
  39. StringView text_to_match;
  40. TokenType token_type;
  41. } choices[] = {
  42. { "-"sv, TokenType::AmbiguousMinus },
  43. { "}"sv, TokenType::BraceClose },
  44. { "{"sv, TokenType::BraceOpen },
  45. { ":"sv, TokenType::Colon },
  46. { ","sv, TokenType::Comma },
  47. { "/"sv, TokenType::Division },
  48. { ". "sv, TokenType::Dot },
  49. { ".\n"sv, TokenType::Dot },
  50. { "="sv, TokenType::Equals },
  51. { "is equal to"sv, TokenType::Equals },
  52. { "!"sv, TokenType::ExclamationMark },
  53. { ">"sv, TokenType::Greater },
  54. { "is"sv, TokenType::Is },
  55. { "<"sv, TokenType::Less },
  56. { "."sv, TokenType::MemberAccess },
  57. { "×"sv, TokenType::Multiplication },
  58. { "is not equal to"sv, TokenType::NotEquals },
  59. { "≠"sv, TokenType::NotEquals },
  60. { ")"sv, TokenType::ParenClose },
  61. { "("sv, TokenType::ParenOpen },
  62. { "+"sv, TokenType::Plus },
  63. };
  64. XML::LineTrackingLexer lexer(view, node->offset);
  65. while (!lexer.is_eof()) {
  66. lexer.ignore_while(is_ascii_space);
  67. // FIXME: This is incorrect since we count text offset after XML reference resolution. To do
  68. // this properly, we need support from XML::Parser.
  69. Location token_location = ctx.location_from_xml_offset(lexer.offset_for(lexer.tell()));
  70. if (auto result = consume_number(lexer, node, token_location); result.has_value()) {
  71. tokens.append(result.release_value());
  72. continue;
  73. }
  74. bool matched = false;
  75. for (auto const& [text_to_match, token_type] : choices) {
  76. if (lexer.consume_specific(text_to_match)) {
  77. tokens.append({ token_type, ""sv, node, move(token_location) });
  78. matched = true;
  79. break;
  80. }
  81. }
  82. if (matched)
  83. continue;
  84. StringView word = lexer.consume_until(can_end_word_token);
  85. if (word.length())
  86. tokens.append({ TokenType::Word, word, node, move(token_location) });
  87. }
  88. }
  89. enum class TreeType {
  90. AlgorithmStep,
  91. Header,
  92. };
  93. struct TokenizerState {
  94. Vector<Token> tokens;
  95. XML::Node const* substeps = nullptr;
  96. bool has_errors = false;
  97. };
  98. void tokenize_tree(SpecificationParsingContext& ctx, TokenizerState& state, XML::Node const* node, TreeType tree_type)
  99. {
  100. // FIXME: Use structured binding once macOS Lagom CI updates to Clang >= 16.
  101. auto& tokens = state.tokens;
  102. auto& substeps = state.substeps;
  103. auto& has_errors = state.has_errors;
  104. for (auto const& child : node->as_element().children) {
  105. if (has_errors)
  106. break;
  107. child->content.visit(
  108. [&](XML::Node::Element const& element) -> void {
  109. Location child_location = ctx.location_from_xml_offset(child->offset);
  110. auto report_error = [&]<typename... Parameters>(AK::CheckedFormatString<Parameters...>&& fmt, Parameters const&... parameters) {
  111. ctx.diag().error(child_location, move(fmt), parameters...);
  112. has_errors = true;
  113. };
  114. if (substeps) {
  115. report_error("substeps list must be the last child of algorithm step");
  116. return;
  117. }
  118. if (element.name == tag_var) {
  119. auto variable_name = get_text_contents(child);
  120. if (!variable_name.has_value())
  121. report_error("malformed <var> subtree, expected single text child node");
  122. tokens.append({ TokenType::Identifier, variable_name.value_or(""sv), child, move(child_location) });
  123. return;
  124. }
  125. if (element.name == tag_emu_val) {
  126. auto maybe_contents = get_text_contents(child);
  127. if (!maybe_contents.has_value())
  128. report_error("malformed <emu-val> subtree, expected single text child node");
  129. auto contents = maybe_contents.value_or(""sv);
  130. if (contents.length() >= 2 && contents.starts_with('"') && contents.ends_with('"'))
  131. tokens.append({ TokenType::String, contents.substring_view(1, contents.length() - 2), child, move(child_location) });
  132. else if (contents == "undefined")
  133. tokens.append({ TokenType::Undefined, contents, child, move(child_location) });
  134. else
  135. tokens.append({ TokenType::Identifier, contents, child, move(child_location) });
  136. return;
  137. }
  138. if (element.name == tag_emu_xref) {
  139. auto identifier = get_single_child_with_tag(child, "a"sv).map([](XML::Node const* node) {
  140. return get_text_contents(node).value_or(""sv);
  141. });
  142. if (!identifier.has_value() || identifier.value().is_empty())
  143. report_error("malformed <emu-xref> subtree, expected <a> with nested single text node");
  144. tokens.append({ TokenType::Identifier, identifier.value_or(""sv), child, move(child_location) });
  145. return;
  146. }
  147. if (tree_type == TreeType::Header && element.name == tag_span) {
  148. auto element_class = get_attribute_by_name(child, attribute_class);
  149. if (element_class != class_secnum)
  150. report_error("expected <span> to have class='secnum' attribute");
  151. auto section_number = get_text_contents(child);
  152. if (!section_number.has_value())
  153. report_error("malformed section number span subtree, expected single text child node");
  154. tokens.append({ TokenType::SectionNumber, section_number.value_or(""sv), child, move(child_location) });
  155. return;
  156. }
  157. if (tree_type == TreeType::AlgorithmStep && element.name == tag_ol) {
  158. substeps = child;
  159. return;
  160. }
  161. report_error("<{}> should not be a child of algorithm step", element.name);
  162. },
  163. [&](XML::Node::Text const& text) {
  164. auto view = text.builder.string_view();
  165. if (substeps != nullptr && !contains_empty_text(child)) {
  166. ctx.diag().error(ctx.location_from_xml_offset(child->offset),
  167. "substeps list must be the last child of algorithm step");
  168. } else {
  169. tokenize_string(ctx, child, view, tokens);
  170. }
  171. },
  172. [&](auto const&) {});
  173. }
  174. if (tokens.size() && tokens.last().type == TokenType::MemberAccess)
  175. tokens.last().type = TokenType::Dot;
  176. }
  177. }
  178. StepTokenizationResult tokenize_step(SpecificationParsingContext& ctx, XML::Node const* node)
  179. {
  180. TokenizerState state;
  181. tokenize_tree(ctx, state, node, TreeType::AlgorithmStep);
  182. return {
  183. .tokens = state.has_errors ? OptionalNone {} : Optional<Vector<Token>> { move(state.tokens) },
  184. .substeps = state.substeps,
  185. };
  186. }
  187. Optional<Vector<Token>> tokenize_header(SpecificationParsingContext& ctx, XML::Node const* node)
  188. {
  189. TokenizerState state;
  190. tokenize_tree(ctx, state, node, TreeType::Header);
  191. return state.has_errors ? OptionalNone {} : Optional<Vector<Token>> { state.tokens };
  192. }
  193. }