Tokenizer.cpp 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350
  1. /*
  2. * Copyright (c) 2020-2022, the SerenityOS developers.
  3. * Copyright (c) 2021, Sam Atkins <atkinssj@serenityos.org>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/CharacterTypes.h>
  8. #include <AK/Debug.h>
  9. #include <AK/SourceLocation.h>
  10. #include <AK/Vector.h>
  11. #include <LibTextCodec/Decoder.h>
  12. #include <LibWeb/CSS/Parser/Tokenizer.h>
  13. #include <math.h>
  14. // U+FFFD REPLACEMENT CHARACTER (�)
  15. #define REPLACEMENT_CHARACTER 0xFFFD
  16. static constexpr u32 TOKENIZER_EOF = 0xFFFFFFFF;
  17. static inline void log_parse_error(const SourceLocation& location = SourceLocation::current())
  18. {
  19. dbgln_if(CSS_TOKENIZER_DEBUG, "Parse error (css tokenization) {} ", location);
  20. }
  21. static inline bool is_eof(u32 code_point)
  22. {
  23. return code_point == TOKENIZER_EOF;
  24. }
  25. static inline bool is_quotation_mark(u32 code_point)
  26. {
  27. return code_point == 0x22;
  28. }
  29. static inline bool is_greater_than_maximum_allowed_code_point(u32 code_point)
  30. {
  31. return code_point > 0x10FFFF;
  32. }
  33. static inline bool is_low_line(u32 code_point)
  34. {
  35. return code_point == 0x5F;
  36. }
  37. static inline bool is_name_start_code_point(u32 code_point)
  38. {
  39. // FIXME: We use !is_ascii() for "non-ASCII code point" in the spec, but it's not quite right -
  40. // it treats EOF as a valid! The spec also lacks a definition of code point. For now, the
  41. // !is_eof() check is a hack, but it should work.
  42. return !is_eof(code_point) && (is_ascii_alpha(code_point) || !is_ascii(code_point) || is_low_line(code_point));
  43. }
  44. static inline bool is_hyphen_minus(u32 code_point)
  45. {
  46. return code_point == 0x2D;
  47. }
  48. static inline bool is_name_code_point(u32 code_point)
  49. {
  50. return is_name_start_code_point(code_point) || is_ascii_digit(code_point) || is_hyphen_minus(code_point);
  51. }
  52. static inline bool is_non_printable(u32 code_point)
  53. {
  54. return code_point <= 0x8 || code_point == 0xB || (code_point >= 0xE && code_point <= 0x1F) || code_point == 0x7F;
  55. }
  56. static inline bool is_number_sign(u32 code_point)
  57. {
  58. return code_point == 0x23;
  59. }
  60. static inline bool is_reverse_solidus(u32 code_point)
  61. {
  62. return code_point == 0x5C;
  63. }
  64. static inline bool is_apostrophe(u32 code_point)
  65. {
  66. return code_point == 0x27;
  67. }
  68. static inline bool is_left_paren(u32 code_point)
  69. {
  70. return code_point == 0x28;
  71. }
  72. static inline bool is_right_paren(u32 code_point)
  73. {
  74. return code_point == 0x29;
  75. }
  76. static inline bool is_plus_sign(u32 code_point)
  77. {
  78. return code_point == 0x2B;
  79. }
  80. static inline bool is_comma(u32 code_point)
  81. {
  82. return code_point == 0x2C;
  83. }
  84. static inline bool is_full_stop(u32 code_point)
  85. {
  86. return code_point == 0x2E;
  87. }
  88. static inline bool is_newline(u32 code_point)
  89. {
  90. return code_point == 0xA;
  91. }
  92. static inline bool is_asterisk(u32 code_point)
  93. {
  94. return code_point == 0x2A;
  95. }
  96. static inline bool is_solidus(u32 code_point)
  97. {
  98. return code_point == 0x2F;
  99. }
  100. static inline bool is_colon(u32 code_point)
  101. {
  102. return code_point == 0x3A;
  103. }
  104. static inline bool is_semicolon(u32 code_point)
  105. {
  106. return code_point == 0x3B;
  107. }
  108. static inline bool is_less_than_sign(u32 code_point)
  109. {
  110. return code_point == 0x3C;
  111. }
  112. static inline bool is_greater_than_sign(u32 code_point)
  113. {
  114. return code_point == 0x3E;
  115. }
  116. static inline bool is_at(u32 code_point)
  117. {
  118. return code_point == 0x40;
  119. }
  120. static inline bool is_open_square_bracket(u32 code_point)
  121. {
  122. return code_point == 0x5B;
  123. }
  124. static inline bool is_closed_square_bracket(u32 code_point)
  125. {
  126. return code_point == 0x5D;
  127. }
  128. static inline bool is_open_curly_bracket(u32 code_point)
  129. {
  130. return code_point == 0x7B;
  131. }
  132. static inline bool is_closed_curly_bracket(u32 code_point)
  133. {
  134. return code_point == 0x7D;
  135. }
  136. static inline bool is_whitespace(u32 code_point)
  137. {
  138. return code_point == 0x9 || code_point == 0xA || code_point == 0x20;
  139. }
  140. static inline bool is_percent(u32 code_point)
  141. {
  142. return code_point == 0x25;
  143. }
  144. static inline bool is_exclamation_mark(u32 code_point)
  145. {
  146. return code_point == 0x21;
  147. }
  148. static inline bool is_e(u32 code_point)
  149. {
  150. return code_point == 0x65;
  151. }
  152. static inline bool is_E(u32 code_point)
  153. {
  154. return code_point == 0x45;
  155. }
  156. namespace Web::CSS {
  157. Tokenizer::Tokenizer(StringView input, const String& encoding)
  158. {
  159. auto* decoder = TextCodec::decoder_for(encoding);
  160. VERIFY(decoder);
  161. StringBuilder builder(input.length());
  162. // Preprocess the stream, by doing the following:
  163. // - Replace \r, \f and \r\n with \n
  164. // - replace \0 and anything between U+D800 to U+DFFF with the replacement
  165. // character.
  166. // https://www.w3.org/TR/css-syntax-3/#input-preprocessing
  167. bool last_was_carriage_return = false;
  168. decoder->process(input, [&builder, &last_was_carriage_return](u32 code_point) {
  169. if (code_point == '\r') {
  170. if (last_was_carriage_return) {
  171. builder.append('\n');
  172. } else {
  173. last_was_carriage_return = true;
  174. }
  175. } else {
  176. if (last_was_carriage_return) {
  177. builder.append('\n');
  178. }
  179. if (code_point == '\n') {
  180. if (!last_was_carriage_return) {
  181. builder.append('\n');
  182. }
  183. } else if (code_point == '\f') {
  184. builder.append('\n');
  185. } else if (code_point >= 0xD800 && code_point <= 0xDFFF) {
  186. builder.append_code_point(REPLACEMENT_CHARACTER);
  187. } else {
  188. builder.append_code_point(code_point);
  189. }
  190. last_was_carriage_return = false;
  191. }
  192. });
  193. m_decoded_input = builder.to_string();
  194. m_utf8_view = Utf8View(m_decoded_input);
  195. m_utf8_iterator = m_utf8_view.begin();
  196. }
  197. Vector<Token> Tokenizer::parse()
  198. {
  199. Vector<Token> tokens;
  200. for (;;) {
  201. auto token_start = m_position;
  202. auto token = consume_a_token();
  203. token.m_start_position = token_start;
  204. token.m_end_position = m_position;
  205. tokens.append(token);
  206. if (token.is(Token::Type::EndOfFile)) {
  207. return tokens;
  208. }
  209. }
  210. }
  211. u32 Tokenizer::next_code_point()
  212. {
  213. if (m_utf8_iterator == m_utf8_view.end())
  214. return TOKENIZER_EOF;
  215. m_prev_utf8_iterator = m_utf8_iterator;
  216. ++m_utf8_iterator;
  217. auto code_point = *m_prev_utf8_iterator;
  218. m_prev_position = m_position;
  219. if (is_newline(code_point)) {
  220. m_position.line++;
  221. m_position.column = 0;
  222. } else {
  223. m_position.column++;
  224. }
  225. dbgln_if(CSS_TOKENIZER_DEBUG, "(Tokenizer) Next code_point: {:d}", code_point);
  226. return code_point;
  227. }
  228. u32 Tokenizer::peek_code_point(size_t offset) const
  229. {
  230. auto it = m_utf8_iterator;
  231. for (size_t i = 0; i < offset && it != m_utf8_view.end(); ++i)
  232. ++it;
  233. if (it == m_utf8_view.end())
  234. return TOKENIZER_EOF;
  235. dbgln_if(CSS_TOKENIZER_DEBUG, "(Tokenizer) Peek code_point: {:d}", *m_prev_utf8_iterator);
  236. return *it;
  237. }
  238. U32Twin Tokenizer::peek_twin() const
  239. {
  240. U32Twin values { TOKENIZER_EOF, TOKENIZER_EOF };
  241. auto it = m_utf8_iterator;
  242. for (size_t i = 0; i < 2 && it != m_utf8_view.end(); ++i) {
  243. values.set(i, *it);
  244. ++it;
  245. }
  246. dbgln_if(CSS_TOKENIZER_DEBUG, "(Tokenizer) Peek twin: {:d},{:d}", values.first, values.second);
  247. return values;
  248. }
  249. U32Triplet Tokenizer::peek_triplet() const
  250. {
  251. U32Triplet values { TOKENIZER_EOF, TOKENIZER_EOF, TOKENIZER_EOF };
  252. auto it = m_utf8_iterator;
  253. for (size_t i = 0; i < 3 && it != m_utf8_view.end(); ++i) {
  254. values.set(i, *it);
  255. ++it;
  256. }
  257. dbgln_if(CSS_TOKENIZER_DEBUG, "(Tokenizer) Peek triplet: {:d},{:d},{:d}", values.first, values.second, values.third);
  258. return values;
  259. }
  260. U32Twin Tokenizer::start_of_input_stream_twin()
  261. {
  262. U32Twin twin;
  263. // FIXME: Reconsuming just to read the current code point again is weird.
  264. reconsume_current_input_code_point();
  265. twin.first = next_code_point();
  266. twin.second = peek_code_point();
  267. return twin;
  268. }
  269. U32Triplet Tokenizer::start_of_input_stream_triplet()
  270. {
  271. U32Triplet triplet;
  272. // FIXME: Reconsuming just to read the current code point again is weird.
  273. reconsume_current_input_code_point();
  274. triplet.first = next_code_point();
  275. auto next_two = peek_twin();
  276. triplet.second = next_two.first;
  277. triplet.third = next_two.second;
  278. return triplet;
  279. }
  280. Token Tokenizer::create_new_token(Token::Type type)
  281. {
  282. Token token = {};
  283. token.m_type = type;
  284. return token;
  285. }
  286. Token Tokenizer::create_eof_token()
  287. {
  288. return create_new_token(Token::Type::EndOfFile);
  289. }
  290. Token Tokenizer::create_value_token(Token::Type type, String value)
  291. {
  292. Token token;
  293. token.m_type = type;
  294. token.m_value = move(value);
  295. return token;
  296. }
  297. Token Tokenizer::create_value_token(Token::Type type, u32 value)
  298. {
  299. Token token = {};
  300. token.m_type = type;
  301. // FIXME: Avoid temporary StringBuilder here
  302. StringBuilder builder;
  303. builder.append_code_point(value);
  304. token.m_value = builder.to_string();
  305. return token;
  306. }
  307. // https://www.w3.org/TR/css-syntax-3/#consume-escaped-code-point
  308. u32 Tokenizer::consume_escaped_code_point()
  309. {
  310. // This section describes how to consume an escaped code point.
  311. // It assumes that the U+005C REVERSE SOLIDUS (\) has already been consumed and that the next
  312. // input code point has already been verified to be part of a valid escape.
  313. // It will return a code point.
  314. // Consume the next input code point.
  315. auto input = next_code_point();
  316. // hex digit
  317. if (is_ascii_hex_digit(input)) {
  318. // Consume as many hex digits as possible, but no more than 5.
  319. // Note that this means 1-6 hex digits have been consumed in total.
  320. StringBuilder builder;
  321. builder.append_code_point(input);
  322. size_t counter = 0;
  323. while (is_ascii_hex_digit(peek_code_point()) && counter++ < 5) {
  324. builder.append_code_point(next_code_point());
  325. }
  326. // If the next input code point is whitespace, consume it as well.
  327. if (is_whitespace(peek_code_point())) {
  328. (void)next_code_point();
  329. }
  330. // Interpret the hex digits as a hexadecimal number.
  331. auto unhexed = strtoul(builder.to_string().characters(), nullptr, 16);
  332. // If this number is zero, or is for a surrogate, or is greater than the maximum allowed
  333. // code point, return U+FFFD REPLACEMENT CHARACTER (�).
  334. if (unhexed == 0 || is_unicode_surrogate(unhexed) || is_greater_than_maximum_allowed_code_point(unhexed)) {
  335. return REPLACEMENT_CHARACTER;
  336. }
  337. // Otherwise, return the code point with that value.
  338. return unhexed;
  339. }
  340. // EOF
  341. if (is_eof(input)) {
  342. // This is a parse error. Return U+FFFD REPLACEMENT CHARACTER (�).
  343. log_parse_error();
  344. return REPLACEMENT_CHARACTER;
  345. }
  346. // anything else
  347. // Return the current input code point.
  348. return input;
  349. }
  350. // https://www.w3.org/TR/css-syntax-3/#consume-ident-like-token
  351. Token Tokenizer::consume_an_ident_like_token()
  352. {
  353. // This section describes how to consume an ident-like token from a stream of code points.
  354. // It returns an <ident-token>, <function-token>, <url-token>, or <bad-url-token>.
  355. // Consume a name, and let string be the result.
  356. auto string = consume_a_name();
  357. // If string’s value is an ASCII case-insensitive match for "url", and the next input code
  358. // point is U+0028 LEFT PARENTHESIS ((), consume it.
  359. if (string.equals_ignoring_case("url") && is_left_paren(peek_code_point())) {
  360. (void)next_code_point();
  361. // While the next two input code points are whitespace, consume the next input code point.
  362. for (;;) {
  363. auto maybe_whitespace = peek_twin();
  364. if (!(is_whitespace(maybe_whitespace.first) && is_whitespace(maybe_whitespace.second))) {
  365. break;
  366. }
  367. (void)next_code_point();
  368. }
  369. // If the next one or two input code points are U+0022 QUOTATION MARK ("), U+0027 APOSTROPHE ('),
  370. // or whitespace followed by U+0022 QUOTATION MARK (") or U+0027 APOSTROPHE ('), then create a
  371. // <function-token> with its value set to string and return it.
  372. auto next_two = peek_twin();
  373. if (is_quotation_mark(next_two.first) || is_apostrophe(next_two.first) || (is_whitespace(next_two.first) && (is_quotation_mark(next_two.second) || is_apostrophe(next_two.second)))) {
  374. return create_value_token(Token::Type::Function, string);
  375. }
  376. // Otherwise, consume a url token, and return it.
  377. return consume_a_url_token();
  378. }
  379. // Otherwise, if the next input code point is U+0028 LEFT PARENTHESIS ((), consume it.
  380. if (is_left_paren(peek_code_point())) {
  381. (void)next_code_point();
  382. // Create a <function-token> with its value set to string and return it.
  383. return create_value_token(Token::Type::Function, string);
  384. }
  385. // Otherwise, create an <ident-token> with its value set to string and return it.
  386. return create_value_token(Token::Type::Ident, string);
  387. }
  388. // https://www.w3.org/TR/css-syntax-3/#consume-number
  389. CSSNumber Tokenizer::consume_a_number()
  390. {
  391. // This section describes how to consume a number from a stream of code points.
  392. // It returns a numeric value, and a type which is either "integer" or "number".
  393. //
  394. // Note: This algorithm does not do the verification of the first few code points
  395. // that are necessary to ensure a number can be obtained from the stream. Ensure
  396. // that the stream starts with a number before calling this algorithm.
  397. // Execute the following steps in order:
  398. // 1. Initially set type to "integer". Let repr be the empty string.
  399. StringBuilder repr;
  400. Token::NumberType type = Token::NumberType::Integer;
  401. // 2. If the next input code point is U+002B PLUS SIGN (+) or U+002D HYPHEN-MINUS (-),
  402. // consume it and append it to repr.
  403. auto next_input = peek_code_point();
  404. if (is_plus_sign(next_input) || is_hyphen_minus(next_input)) {
  405. repr.append_code_point(next_code_point());
  406. }
  407. // 3. While the next input code point is a digit, consume it and append it to repr.
  408. for (;;) {
  409. auto digits = peek_code_point();
  410. if (!is_ascii_digit(digits))
  411. break;
  412. repr.append_code_point(next_code_point());
  413. }
  414. // 4. If the next 2 input code points are U+002E FULL STOP (.) followed by a digit, then:
  415. auto maybe_number = peek_twin();
  416. if (is_full_stop(maybe_number.first) && is_ascii_digit(maybe_number.second)) {
  417. // 1. Consume them.
  418. // 2. Append them to repr.
  419. repr.append_code_point(next_code_point());
  420. repr.append_code_point(next_code_point());
  421. // 3. Set type to "number".
  422. type = Token::NumberType::Number;
  423. // 4. While the next input code point is a digit, consume it and append it to repr.
  424. for (;;) {
  425. auto digit = peek_code_point();
  426. if (!is_ascii_digit(digit))
  427. break;
  428. repr.append_code_point(next_code_point());
  429. }
  430. }
  431. // 5. If the next 2 or 3 input code points are U+0045 LATIN CAPITAL LETTER E (E) or
  432. // U+0065 LATIN SMALL LETTER E (e), optionally followed by U+002D HYPHEN-MINUS (-)
  433. // or U+002B PLUS SIGN (+), followed by a digit, then:
  434. auto maybe_exp = peek_triplet();
  435. if (is_E(maybe_exp.first) || is_e(maybe_exp.first)) {
  436. // 1. Consume them.
  437. // 2. Append them to repr.
  438. // FIXME: These conditions should be part of step 5 above.
  439. if (is_plus_sign(maybe_exp.second) || is_hyphen_minus(maybe_exp.second)) {
  440. if (is_ascii_digit(maybe_exp.third)) {
  441. repr.append_code_point(next_code_point());
  442. repr.append_code_point(next_code_point());
  443. repr.append_code_point(next_code_point());
  444. }
  445. } else if (is_ascii_digit(maybe_exp.second)) {
  446. repr.append_code_point(next_code_point());
  447. repr.append_code_point(next_code_point());
  448. }
  449. // 3. Set type to "number".
  450. type = Token::NumberType::Number;
  451. // 4. While the next input code point is a digit, consume it and append it to repr.
  452. for (;;) {
  453. auto digits = peek_code_point();
  454. if (!is_ascii_digit(digits))
  455. break;
  456. repr.append_code_point(next_code_point());
  457. }
  458. }
  459. // 6. Convert repr to a number, and set the value to the returned value.
  460. auto value = convert_a_string_to_a_number(repr.string_view());
  461. // 7. Return value and type.
  462. return { repr.to_string(), value, type };
  463. }
  464. // https://www.w3.org/TR/css-syntax-3/#convert-string-to-number
  465. double Tokenizer::convert_a_string_to_a_number(StringView string)
  466. {
  467. auto code_point_at = [&](size_t index) -> u32 {
  468. if (index < string.length())
  469. return string[index];
  470. return TOKENIZER_EOF;
  471. };
  472. // This algorithm does not do any verification to ensure that the string contains only a number.
  473. // Ensure that the string contains only a valid CSS number before calling this algorithm.
  474. // Divide the string into seven components, in order from left to right:
  475. size_t position = 0;
  476. // 1. A sign: a single U+002B PLUS SIGN (+) or U+002D HYPHEN-MINUS (-), or the empty string.
  477. // Let s [sign] be the number -1 if the sign is U+002D HYPHEN-MINUS (-); otherwise, let s be the number 1.
  478. int sign = 1;
  479. if (is_plus_sign(code_point_at(position)) || is_hyphen_minus(code_point_at(position))) {
  480. sign = is_hyphen_minus(code_point_at(position)) ? -1 : 1;
  481. position++;
  482. }
  483. // 2. An integer part: zero or more digits.
  484. // If there is at least one digit, let i [integer_part] be the number formed by interpreting the digits
  485. // as a base-10 integer; otherwise, let i be the number 0.
  486. double integer_part = 0;
  487. while (is_ascii_digit(code_point_at(position))) {
  488. integer_part = (integer_part * 10) + (code_point_at(position) - '0');
  489. position++;
  490. }
  491. // 3. A decimal point: a single U+002E FULL STOP (.), or the empty string.
  492. if (is_full_stop(code_point_at(position)))
  493. position++;
  494. // 4. A fractional part: zero or more digits.
  495. // If there is at least one digit, let f [fractional_part] be the number formed by interpreting the digits
  496. // as a base-10 integer and d [fractional_digits] be the number of digits; otherwise, let f and d be the number 0.
  497. double fractional_part = 0;
  498. int fractional_digits = 0;
  499. while (is_ascii_digit(code_point_at(position))) {
  500. fractional_part = (fractional_part * 10) + (code_point_at(position) - '0');
  501. position++;
  502. fractional_digits++;
  503. }
  504. // 5. An exponent indicator: a single U+0045 LATIN CAPITAL LETTER E (E) or U+0065 LATIN SMALL LETTER E (e),
  505. // or the empty string.
  506. if (is_e(code_point_at(position)) || is_E(code_point_at(position)))
  507. position++;
  508. // 6. An exponent sign: a single U+002B PLUS SIGN (+) or U+002D HYPHEN-MINUS (-), or the empty string.
  509. // Let t [exponent_sign] be the number -1 if the sign is U+002D HYPHEN-MINUS (-); otherwise, let t be the number 1.
  510. int exponent_sign = 1;
  511. if (is_plus_sign(code_point_at(position)) || is_hyphen_minus(code_point_at(position))) {
  512. exponent_sign = is_hyphen_minus(code_point_at(position)) ? -1 : 1;
  513. position++;
  514. }
  515. // 7. An exponent: zero or more digits.
  516. // If there is at least one digit, let e [exponent] be the number formed by interpreting the digits as a
  517. // base-10 integer; otherwise, let e be the number 0.
  518. double exponent = 0;
  519. while (is_ascii_digit(code_point_at(position))) {
  520. exponent = (exponent * 10) + (code_point_at(position) - '0');
  521. position++;
  522. }
  523. // NOTE: We checked before calling this function that the string is a valid number,
  524. // so if there is anything at the end, something has gone wrong!
  525. VERIFY(position == string.length());
  526. // Return the number s·(i + f·10^-d)·10^te.
  527. return sign * (integer_part + fractional_part * pow(10, -fractional_digits)) * pow(10, exponent_sign * exponent);
  528. }
  529. // https://www.w3.org/TR/css-syntax-3/#consume-name
  530. String Tokenizer::consume_a_name()
  531. {
  532. // This section describes how to consume a name from a stream of code points.
  533. // It returns a string containing the largest name that can be formed from adjacent
  534. // code points in the stream, starting from the first.
  535. //
  536. // Note: This algorithm does not do the verification of the first few code points that
  537. // are necessary to ensure the returned code points would constitute an <ident-token>.
  538. // If that is the intended use, ensure that the stream starts with an identifier before
  539. // calling this algorithm.
  540. // Let result initially be an empty string.
  541. StringBuilder result;
  542. // Repeatedly consume the next input code point from the stream:
  543. for (;;) {
  544. auto input = next_code_point();
  545. if (is_eof(input))
  546. break;
  547. // name code point
  548. if (is_name_code_point(input)) {
  549. // Append the code point to result.
  550. result.append_code_point(input);
  551. continue;
  552. }
  553. // the stream starts with a valid escape
  554. if (is_valid_escape_sequence(start_of_input_stream_twin())) {
  555. // Consume an escaped code point. Append the returned code point to result.
  556. result.append_code_point(consume_escaped_code_point());
  557. continue;
  558. }
  559. // anything else
  560. // Reconsume the current input code point. Return result.
  561. reconsume_current_input_code_point();
  562. break;
  563. }
  564. return result.to_string();
  565. }
  566. // https://www.w3.org/TR/css-syntax-3/#consume-url-token
  567. Token Tokenizer::consume_a_url_token()
  568. {
  569. // This section describes how to consume a url token from a stream of code points.
  570. // It returns either a <url-token> or a <bad-url-token>.
  571. //
  572. // Note: This algorithm assumes that the initial "url(" has already been consumed.
  573. // This algorithm also assumes that it’s being called to consume an "unquoted" value,
  574. // like url(foo). A quoted value, like url("foo"), is parsed as a <function-token>.
  575. // Consume an ident-like token automatically handles this distinction; this algorithm
  576. // shouldn’t be called directly otherwise.
  577. // 1. Initially create a <url-token> with its value set to the empty string.
  578. auto token = create_new_token(Token::Type::Url);
  579. StringBuilder builder;
  580. // 2. Consume as much whitespace as possible.
  581. consume_as_much_whitespace_as_possible();
  582. auto make_token = [&]() {
  583. token.m_value = builder.to_string();
  584. return token;
  585. };
  586. // 3. Repeatedly consume the next input code point from the stream:
  587. for (;;) {
  588. auto input = next_code_point();
  589. // U+0029 RIGHT PARENTHESIS ())
  590. if (is_right_paren(input)) {
  591. // Return the <url-token>.
  592. return make_token();
  593. }
  594. // EOF
  595. if (is_eof(input)) {
  596. // This is a parse error. Return the <url-token>.
  597. log_parse_error();
  598. return make_token();
  599. }
  600. // whitespace
  601. if (is_whitespace(input)) {
  602. // Consume as much whitespace as possible.
  603. consume_as_much_whitespace_as_possible();
  604. // If the next input code point is U+0029 RIGHT PARENTHESIS ()) or EOF, consume it
  605. // and return the <url-token> (if EOF was encountered, this is a parse error);
  606. input = peek_code_point();
  607. if (is_right_paren(input)) {
  608. (void)next_code_point();
  609. return make_token();
  610. }
  611. if (is_eof(input)) {
  612. (void)next_code_point();
  613. log_parse_error();
  614. return make_token();
  615. }
  616. // otherwise, consume the remnants of a bad url, create a <bad-url-token>, and return it.
  617. consume_the_remnants_of_a_bad_url();
  618. return create_new_token(Token::Type::BadUrl);
  619. }
  620. // U+0022 QUOTATION MARK (")
  621. // U+0027 APOSTROPHE (')
  622. // U+0028 LEFT PARENTHESIS (()
  623. // non-printable code point
  624. if (is_quotation_mark(input) || is_apostrophe(input) || is_left_paren(input) || is_non_printable(input)) {
  625. // This is a parse error. Consume the remnants of a bad url, create a <bad-url-token>, and return it.
  626. log_parse_error();
  627. consume_the_remnants_of_a_bad_url();
  628. return create_new_token(Token::Type::BadUrl);
  629. }
  630. // U+005C REVERSE SOLIDUS (\)
  631. if (is_reverse_solidus(input)) {
  632. // If the stream starts with a valid escape,
  633. if (is_valid_escape_sequence(start_of_input_stream_twin())) {
  634. // consume an escaped code point and append the returned code point to the <url-token>’s value.
  635. builder.append_code_point(consume_escaped_code_point());
  636. continue;
  637. } else {
  638. // Otherwise, this is a parse error.
  639. log_parse_error();
  640. // Consume the remnants of a bad url, create a <bad-url-token>, and return it.
  641. consume_the_remnants_of_a_bad_url();
  642. return create_new_token(Token::Type::BadUrl);
  643. }
  644. }
  645. // anything else
  646. // Append the current input code point to the <url-token>’s value.
  647. builder.append_code_point(input);
  648. }
  649. }
  650. // https://www.w3.org/TR/css-syntax-3/#consume-remnants-of-bad-url
  651. void Tokenizer::consume_the_remnants_of_a_bad_url()
  652. {
  653. // This section describes how to consume the remnants of a bad url from a stream of code points,
  654. // "cleaning up" after the tokenizer realizes that it’s in the middle of a <bad-url-token> rather
  655. // than a <url-token>. It returns nothing; its sole use is to consume enough of the input stream
  656. // to reach a recovery point where normal tokenizing can resume.
  657. // Repeatedly consume the next input code point from the stream:
  658. for (;;) {
  659. auto input = next_code_point();
  660. // U+0029 RIGHT PARENTHESIS ())
  661. // EOF
  662. if (is_eof(input) || is_right_paren(input)) {
  663. // Return.
  664. return;
  665. }
  666. // the input stream starts with a valid escape
  667. if (is_valid_escape_sequence(start_of_input_stream_twin())) {
  668. // Consume an escaped code point.
  669. // This allows an escaped right parenthesis ("\)") to be encountered without ending
  670. // the <bad-url-token>. This is otherwise identical to the "anything else" clause.
  671. (void)consume_escaped_code_point();
  672. }
  673. // anything else
  674. // Do nothing.
  675. }
  676. }
  677. void Tokenizer::consume_as_much_whitespace_as_possible()
  678. {
  679. while (is_whitespace(peek_code_point())) {
  680. (void)next_code_point();
  681. }
  682. }
  683. void Tokenizer::reconsume_current_input_code_point()
  684. {
  685. m_utf8_iterator = m_prev_utf8_iterator;
  686. m_position = m_prev_position;
  687. }
  688. // https://www.w3.org/TR/css-syntax-3/#consume-numeric-token
  689. Token Tokenizer::consume_a_numeric_token()
  690. {
  691. // This section describes how to consume a numeric token from a stream of code points.
  692. // It returns either a <number-token>, <percentage-token>, or <dimension-token>.
  693. // Consume a number and let number be the result.
  694. auto number = consume_a_number();
  695. // If the next 3 input code points would start an identifier, then:
  696. if (would_start_an_identifier(peek_triplet())) {
  697. // 1. Create a <dimension-token> with the same value and type flag as number,
  698. // and a unit set initially to the empty string.
  699. auto token = create_new_token(Token::Type::Dimension);
  700. token.m_value = move(number.string);
  701. token.m_number_type = number.type;
  702. token.m_number_value = number.value;
  703. // 2. Consume a name. Set the <dimension-token>’s unit to the returned value.
  704. auto unit = consume_a_name();
  705. VERIFY(!unit.is_empty());
  706. token.m_unit = move(unit);
  707. // 3. Return the <dimension-token>.
  708. return token;
  709. }
  710. // Otherwise, if the next input code point is U+0025 PERCENTAGE SIGN (%), consume it.
  711. if (is_percent(peek_code_point())) {
  712. (void)next_code_point();
  713. // Create a <percentage-token> with the same value as number, and return it.
  714. auto token = create_new_token(Token::Type::Percentage);
  715. token.m_value = move(number.string);
  716. token.m_number_type = number.type;
  717. token.m_number_value = number.value;
  718. return token;
  719. }
  720. // Otherwise, create a <number-token> with the same value and type flag as number, and return it.
  721. auto token = create_new_token(Token::Type::Number);
  722. token.m_value = move(number.string);
  723. token.m_number_type = number.type;
  724. token.m_number_value = number.value;
  725. return token;
  726. }
  727. // https://www.w3.org/TR/css-syntax-3/#starts-with-a-number
  728. bool Tokenizer::would_start_a_number(U32Triplet values)
  729. {
  730. // This section describes how to check if three code points would start a number.
  731. // The algorithm described here can be called explicitly with three code points,
  732. // or can be called with the input stream itself. In the latter case, the three
  733. // code points in question are the current input code point and the next two input
  734. // code points, in that order.
  735. //
  736. // Note: This algorithm will not consume any additional code points.
  737. // Look at the first code point:
  738. // U+002B PLUS SIGN (+)
  739. // U+002D HYPHEN-MINUS (-)
  740. if (is_plus_sign(values.first) || is_hyphen_minus(values.first)) {
  741. // If the second code point is a digit, return true.
  742. if (is_ascii_digit(values.second))
  743. return true;
  744. // Otherwise, if the second code point is a U+002E FULL STOP (.) and the third
  745. // code point is a digit, return true.
  746. if (is_full_stop(values.second) && is_ascii_digit(values.third))
  747. return true;
  748. // Otherwise, return false.
  749. return false;
  750. }
  751. // U+002E FULL STOP (.)
  752. if (is_full_stop(values.first))
  753. // If the second code point is a digit, return true. Otherwise, return false.
  754. return is_ascii_digit(values.second);
  755. // digit
  756. if (is_ascii_digit(values.first))
  757. // Return true.
  758. return true;
  759. // anything else
  760. // Return false.
  761. return false;
  762. }
  763. // https://www.w3.org/TR/css-syntax-3/#starts-with-a-valid-escape
  764. bool Tokenizer::is_valid_escape_sequence(U32Twin values)
  765. {
  766. // This section describes how to check if two code points are a valid escape.
  767. // The algorithm described here can be called explicitly with two code points,
  768. // or can be called with the input stream itself. In the latter case, the two
  769. // code points in question are the current input code point and the next input
  770. // code point, in that order.
  771. //
  772. // Note: This algorithm will not consume any additional code point.
  773. // If the first code point is not U+005C REVERSE SOLIDUS (\), return false.
  774. if (!is_reverse_solidus(values.first))
  775. return false;
  776. // Otherwise, if the second code point is a newline, return false.
  777. if (is_newline(values.second))
  778. return false;
  779. // Otherwise, return true.
  780. return true;
  781. }
  782. // https://www.w3.org/TR/css-syntax-3/#would-start-an-identifier
  783. bool Tokenizer::would_start_an_identifier(U32Triplet values)
  784. {
  785. // This section describes how to check if three code points would start an identifier.
  786. // The algorithm described here can be called explicitly with three code points, or
  787. // can be called with the input stream itself. In the latter case, the three code
  788. // points in question are the current input code point and the next two input code
  789. // points, in that order.
  790. //
  791. // Note: This algorithm will not consume any additional code points.
  792. // Look at the first code point:
  793. // U+002D HYPHEN-MINUS
  794. if (is_hyphen_minus(values.first)) {
  795. // If the second code point is a name-start code point or a U+002D HYPHEN-MINUS,
  796. // or the second and third code points are a valid escape, return true.
  797. if (is_name_start_code_point(values.second) || is_hyphen_minus(values.second) || is_valid_escape_sequence(values.to_twin_23()))
  798. return true;
  799. // Otherwise, return false.
  800. return false;
  801. }
  802. // name-start code point
  803. if (is_name_start_code_point(values.first)) {
  804. // Return true.
  805. return true;
  806. }
  807. // U+005C REVERSE SOLIDUS (\)
  808. if (is_reverse_solidus(values.first)) {
  809. // If the first and second code points are a valid escape, return true.
  810. if (is_valid_escape_sequence(values.to_twin_12()))
  811. return true;
  812. // Otherwise, return false.
  813. return false;
  814. }
  815. // anything else
  816. // Return false.
  817. return false;
  818. }
  819. // https://www.w3.org/TR/css-syntax-3/#consume-string-token
  820. Token Tokenizer::consume_string_token(u32 ending_code_point)
  821. {
  822. // This section describes how to consume a string token from a stream of code points.
  823. // It returns either a <string-token> or <bad-string-token>.
  824. //
  825. // This algorithm may be called with an ending code point, which denotes the code point
  826. // that ends the string. If an ending code point is not specified, the current input
  827. // code point is used.
  828. // Initially create a <string-token> with its value set to the empty string.
  829. auto token = create_new_token(Token::Type::String);
  830. StringBuilder builder;
  831. auto make_token = [&]() {
  832. token.m_value = builder.to_string();
  833. return token;
  834. };
  835. // Repeatedly consume the next input code point from the stream:
  836. for (;;) {
  837. auto input = next_code_point();
  838. // ending code point
  839. if (input == ending_code_point)
  840. return make_token();
  841. // EOF
  842. if (is_eof(input)) {
  843. // This is a parse error. Return the <string-token>.
  844. log_parse_error();
  845. return make_token();
  846. }
  847. // newline
  848. if (is_newline(input)) {
  849. // This is a parse error. Reconsume the current input code point, create a
  850. // <bad-string-token>, and return it.
  851. reconsume_current_input_code_point();
  852. return create_new_token(Token::Type::BadString);
  853. }
  854. // U+005C REVERSE SOLIDUS (\)
  855. if (is_reverse_solidus(input)) {
  856. // If the next input code point is EOF, do nothing.
  857. auto next_input = peek_code_point();
  858. if (is_eof(next_input))
  859. continue;
  860. // Otherwise, if the next input code point is a newline, consume it.
  861. if (is_newline(next_input)) {
  862. (void)next_code_point();
  863. continue;
  864. }
  865. // Otherwise, (the stream starts with a valid escape) consume an escaped code
  866. // point and append the returned code point to the <string-token>’s value.
  867. auto escaped = consume_escaped_code_point();
  868. builder.append_code_point(escaped);
  869. continue;
  870. }
  871. // anything else
  872. // Append the current input code point to the <string-token>’s value.
  873. builder.append_code_point(input);
  874. }
  875. }
  876. // https://www.w3.org/TR/css-syntax-3/#consume-comment
  877. void Tokenizer::consume_comments()
  878. {
  879. // This section describes how to consume comments from a stream of code points.
  880. // It returns nothing.
  881. start:
  882. // If the next two input code point are U+002F SOLIDUS (/) followed by a U+002A ASTERISK (*),
  883. // consume them and all following code points up to and including the first U+002A ASTERISK (*)
  884. // followed by a U+002F SOLIDUS (/), or up to an EOF code point. Return to the start of this step.
  885. //
  886. // If the preceding paragraph ended by consuming an EOF code point, this is a parse error.
  887. //
  888. // Return nothing.
  889. auto twin = peek_twin();
  890. if (!(is_solidus(twin.first) && is_asterisk(twin.second)))
  891. return;
  892. (void)next_code_point();
  893. (void)next_code_point();
  894. for (;;) {
  895. auto twin_inner = peek_twin();
  896. if (is_eof(twin_inner.first) || is_eof(twin_inner.second)) {
  897. log_parse_error();
  898. return;
  899. }
  900. if (is_asterisk(twin_inner.first) && is_solidus(twin_inner.second)) {
  901. (void)next_code_point();
  902. (void)next_code_point();
  903. goto start;
  904. }
  905. (void)next_code_point();
  906. }
  907. }
  908. // https://www.w3.org/TR/css-syntax-3/#consume-token
  909. Token Tokenizer::consume_a_token()
  910. {
  911. // This section describes how to consume a token from a stream of code points.
  912. // It will return a single token of any type.
  913. // Consume comments.
  914. consume_comments();
  915. // Consume the next input code point.
  916. auto input = next_code_point();
  917. // whitespace
  918. if (is_whitespace(input)) {
  919. dbgln_if(CSS_TOKENIZER_DEBUG, "is whitespace");
  920. // Consume as much whitespace as possible. Return a <whitespace-token>.
  921. consume_as_much_whitespace_as_possible();
  922. return create_new_token(Token::Type::Whitespace);
  923. }
  924. // U+0022 QUOTATION MARK (")
  925. if (is_quotation_mark(input)) {
  926. dbgln_if(CSS_TOKENIZER_DEBUG, "is quotation mark");
  927. // Consume a string token and return it.
  928. return consume_string_token(input);
  929. }
  930. // U+0023 NUMBER SIGN (#)
  931. if (is_number_sign(input)) {
  932. dbgln_if(CSS_TOKENIZER_DEBUG, "is number sign");
  933. // If the next input code point is a name code point or the next two input code points
  934. // are a valid escape, then:
  935. auto next_input = peek_code_point();
  936. auto maybe_escape = peek_twin();
  937. if (is_name_code_point(next_input) || is_valid_escape_sequence(maybe_escape)) {
  938. // 1. Create a <hash-token>.
  939. auto token = create_new_token(Token::Type::Hash);
  940. // 2. If the next 3 input code points would start an identifier, set the <hash-token>’s
  941. // type flag to "id".
  942. if (would_start_an_identifier(peek_triplet()))
  943. token.m_hash_type = Token::HashType::Id;
  944. // 3. Consume a name, and set the <hash-token>’s value to the returned string.
  945. auto name = consume_a_name();
  946. token.m_value = move(name);
  947. // 4. Return the <hash-token>.
  948. return token;
  949. }
  950. // Otherwise, return a <delim-token> with its value set to the current input code point.
  951. return create_value_token(Token::Type::Delim, input);
  952. }
  953. // U+0027 APOSTROPHE (')
  954. if (is_apostrophe(input)) {
  955. dbgln_if(CSS_TOKENIZER_DEBUG, "is apostrophe");
  956. // Consume a string token and return it.
  957. return consume_string_token(input);
  958. }
  959. // U+0028 LEFT PARENTHESIS (()
  960. if (is_left_paren(input)) {
  961. dbgln_if(CSS_TOKENIZER_DEBUG, "is left paren");
  962. // Return a <(-token>.
  963. return create_new_token(Token::Type::OpenParen);
  964. }
  965. // U+0029 RIGHT PARENTHESIS ())
  966. if (is_right_paren(input)) {
  967. dbgln_if(CSS_TOKENIZER_DEBUG, "is right paren");
  968. // Return a <)-token>.
  969. return create_new_token(Token::Type::CloseParen);
  970. }
  971. // U+002B PLUS SIGN (+)
  972. if (is_plus_sign(input)) {
  973. dbgln_if(CSS_TOKENIZER_DEBUG, "is plus sign");
  974. // If the input stream starts with a number, reconsume the current input code point,
  975. // consume a numeric token and return it.
  976. if (would_start_a_number(start_of_input_stream_triplet())) {
  977. reconsume_current_input_code_point();
  978. return consume_a_numeric_token();
  979. }
  980. // Otherwise, return a <delim-token> with its value set to the current input code point.
  981. return create_value_token(Token::Type::Delim, input);
  982. }
  983. // U+002C COMMA (,)
  984. if (is_comma(input)) {
  985. dbgln_if(CSS_TOKENIZER_DEBUG, "is comma");
  986. // Return a <comma-token>.
  987. return create_new_token(Token::Type::Comma);
  988. }
  989. // U+002D HYPHEN-MINUS (-)
  990. if (is_hyphen_minus(input)) {
  991. dbgln_if(CSS_TOKENIZER_DEBUG, "is hyphen minus");
  992. // If the input stream starts with a number, reconsume the current input code point,
  993. // consume a numeric token, and return it.
  994. if (would_start_a_number(start_of_input_stream_triplet())) {
  995. reconsume_current_input_code_point();
  996. return consume_a_numeric_token();
  997. }
  998. // Otherwise, if the next 2 input code points are U+002D HYPHEN-MINUS U+003E
  999. // GREATER-THAN SIGN (->), consume them and return a <CDC-token>.
  1000. auto next_twin = peek_twin();
  1001. if (is_hyphen_minus(next_twin.first) && is_greater_than_sign(next_twin.second)) {
  1002. (void)next_code_point();
  1003. (void)next_code_point();
  1004. return create_new_token(Token::Type::CDC);
  1005. }
  1006. // Otherwise, if the input stream starts with an identifier, reconsume the current
  1007. // input code point, consume an ident-like token, and return it.
  1008. if (would_start_an_identifier(start_of_input_stream_triplet())) {
  1009. reconsume_current_input_code_point();
  1010. return consume_an_ident_like_token();
  1011. }
  1012. // Otherwise, return a <delim-token> with its value set to the current input code point.
  1013. return create_value_token(Token::Type::Delim, input);
  1014. }
  1015. // U+002E FULL STOP (.)
  1016. if (is_full_stop(input)) {
  1017. dbgln_if(CSS_TOKENIZER_DEBUG, "is full stop");
  1018. // If the input stream starts with a number, reconsume the current input code point,
  1019. // consume a numeric token, and return it.
  1020. if (would_start_a_number(start_of_input_stream_triplet())) {
  1021. reconsume_current_input_code_point();
  1022. return consume_a_numeric_token();
  1023. }
  1024. // Otherwise, return a <delim-token> with its value set to the current input code point.
  1025. return create_value_token(Token::Type::Delim, input);
  1026. }
  1027. // U+003A COLON (:)
  1028. if (is_colon(input)) {
  1029. dbgln_if(CSS_TOKENIZER_DEBUG, "is colon");
  1030. // Return a <colon-token>.
  1031. return create_new_token(Token::Type::Colon);
  1032. }
  1033. // U+003B SEMICOLON (;)
  1034. if (is_semicolon(input)) {
  1035. dbgln_if(CSS_TOKENIZER_DEBUG, "is semicolon");
  1036. // Return a <semicolon-token>.
  1037. return create_new_token(Token::Type::Semicolon);
  1038. }
  1039. // U+003C LESS-THAN SIGN (<)
  1040. if (is_less_than_sign(input)) {
  1041. dbgln_if(CSS_TOKENIZER_DEBUG, "is less than");
  1042. // If the next 3 input code points are U+0021 EXCLAMATION MARK U+002D HYPHEN-MINUS
  1043. // U+002D HYPHEN-MINUS (!--), consume them and return a <CDO-token>.
  1044. auto maybe_cdo = peek_triplet();
  1045. if (is_exclamation_mark(maybe_cdo.first) && is_hyphen_minus(maybe_cdo.second) && is_hyphen_minus(maybe_cdo.third)) {
  1046. (void)next_code_point();
  1047. (void)next_code_point();
  1048. (void)next_code_point();
  1049. return create_new_token(Token::Type::CDO);
  1050. }
  1051. // Otherwise, return a <delim-token> with its value set to the current input code point.
  1052. return create_value_token(Token::Type::Delim, input);
  1053. }
  1054. // U+0040 COMMERCIAL AT (@)
  1055. if (is_at(input)) {
  1056. dbgln_if(CSS_TOKENIZER_DEBUG, "is at");
  1057. // If the next 3 input code points would start an identifier, consume a name, create
  1058. // an <at-keyword-token> with its value set to the returned value, and return it.
  1059. if (would_start_an_identifier(peek_triplet())) {
  1060. auto name = consume_a_name();
  1061. return create_value_token(Token::Type::AtKeyword, name);
  1062. }
  1063. // Otherwise, return a <delim-token> with its value set to the current input code point.
  1064. return create_value_token(Token::Type::Delim, input);
  1065. }
  1066. // U+005B LEFT SQUARE BRACKET ([)
  1067. if (is_open_square_bracket(input)) {
  1068. dbgln_if(CSS_TOKENIZER_DEBUG, "is open square");
  1069. // Return a <[-token>.
  1070. return create_new_token(Token::Type::OpenSquare);
  1071. }
  1072. // U+005C REVERSE SOLIDUS (\)
  1073. if (is_reverse_solidus(input)) {
  1074. dbgln_if(CSS_TOKENIZER_DEBUG, "is reverse solidus");
  1075. // If the input stream starts with a valid escape, reconsume the current input code point,
  1076. // consume an ident-like token, and return it.
  1077. if (is_valid_escape_sequence(start_of_input_stream_twin())) {
  1078. reconsume_current_input_code_point();
  1079. return consume_an_ident_like_token();
  1080. }
  1081. // Otherwise, this is a parse error. Return a <delim-token> with its value set to the
  1082. // current input code point.
  1083. log_parse_error();
  1084. return create_value_token(Token::Type::Delim, input);
  1085. }
  1086. // U+005D RIGHT SQUARE BRACKET (])
  1087. if (is_closed_square_bracket(input)) {
  1088. dbgln_if(CSS_TOKENIZER_DEBUG, "is closed square");
  1089. // Return a <]-token>.
  1090. return create_new_token(Token::Type::CloseSquare);
  1091. }
  1092. // U+007B LEFT CURLY BRACKET ({)
  1093. if (is_open_curly_bracket(input)) {
  1094. dbgln_if(CSS_TOKENIZER_DEBUG, "is open curly");
  1095. // Return a <{-token>.
  1096. return create_new_token(Token::Type::OpenCurly);
  1097. }
  1098. // U+007D RIGHT CURLY BRACKET (})
  1099. if (is_closed_curly_bracket(input)) {
  1100. dbgln_if(CSS_TOKENIZER_DEBUG, "is closed curly");
  1101. // Return a <}-token>.
  1102. return create_new_token(Token::Type::CloseCurly);
  1103. }
  1104. // digit
  1105. if (is_ascii_digit(input)) {
  1106. dbgln_if(CSS_TOKENIZER_DEBUG, "is digit");
  1107. // Reconsume the current input code point, consume a numeric token, and return it.
  1108. reconsume_current_input_code_point();
  1109. return consume_a_numeric_token();
  1110. }
  1111. // name-start code point
  1112. if (is_name_start_code_point(input)) {
  1113. dbgln_if(CSS_TOKENIZER_DEBUG, "is name start");
  1114. // Reconsume the current input code point, consume an ident-like token, and return it.
  1115. reconsume_current_input_code_point();
  1116. return consume_an_ident_like_token();
  1117. }
  1118. // EOF
  1119. if (is_eof(input)) {
  1120. // Return an <EOF-token>.
  1121. return create_new_token(Token::Type::EndOfFile);
  1122. }
  1123. // anything else
  1124. dbgln_if(CSS_TOKENIZER_DEBUG, "is delimiter");
  1125. // Return a <delim-token> with its value set to the current input code point.
  1126. return create_value_token(Token::Type::Delim, input);
  1127. }
  1128. }