Tokenizer.cpp 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346
  1. /*
  2. * Copyright (c) 2020-2022, the SerenityOS developers.
  3. * Copyright (c) 2021-2023, Sam Atkins <atkinssj@serenityos.org>
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/CharacterTypes.h>
  8. #include <AK/Debug.h>
  9. #include <AK/FloatingPointStringConversions.h>
  10. #include <AK/SourceLocation.h>
  11. #include <AK/Vector.h>
  12. #include <LibTextCodec/Decoder.h>
  13. #include <LibWeb/CSS/Parser/Tokenizer.h>
  14. #include <LibWeb/Infra/Strings.h>
  15. namespace Web::CSS::Parser {
  16. // U+FFFD REPLACEMENT CHARACTER (�)
  17. #define REPLACEMENT_CHARACTER 0xFFFD
  18. static constexpr u32 TOKENIZER_EOF = 0xFFFFFFFF;
  19. static inline void log_parse_error(SourceLocation const& location = SourceLocation::current())
  20. {
  21. dbgln_if(CSS_TOKENIZER_DEBUG, "Parse error (css tokenization) {} ", location);
  22. }
  23. static inline bool is_eof(u32 code_point)
  24. {
  25. return code_point == TOKENIZER_EOF;
  26. }
  27. static inline bool is_quotation_mark(u32 code_point)
  28. {
  29. return code_point == 0x22;
  30. }
  31. static inline bool is_greater_than_maximum_allowed_code_point(u32 code_point)
  32. {
  33. return code_point > 0x10FFFF;
  34. }
  35. static inline bool is_low_line(u32 code_point)
  36. {
  37. return code_point == 0x5F;
  38. }
  39. // https://www.w3.org/TR/css-syntax-3/#ident-start-code-point
  40. static inline bool is_ident_start_code_point(u32 code_point)
  41. {
  42. // FIXME: We use !is_ascii() for "non-ASCII code point" in the spec, but it's not quite right -
  43. // it treats EOF as a valid! The spec also lacks a definition of code point. For now, the
  44. // !is_eof() check is a hack, but it should work.
  45. return !is_eof(code_point) && (is_ascii_alpha(code_point) || !is_ascii(code_point) || is_low_line(code_point));
  46. }
  47. static inline bool is_hyphen_minus(u32 code_point)
  48. {
  49. return code_point == 0x2D;
  50. }
  51. // https://www.w3.org/TR/css-syntax-3/#ident-code-point
  52. static inline bool is_ident_code_point(u32 code_point)
  53. {
  54. return is_ident_start_code_point(code_point) || is_ascii_digit(code_point) || is_hyphen_minus(code_point);
  55. }
  56. static inline bool is_non_printable(u32 code_point)
  57. {
  58. return code_point <= 0x8 || code_point == 0xB || (code_point >= 0xE && code_point <= 0x1F) || code_point == 0x7F;
  59. }
  60. static inline bool is_number_sign(u32 code_point)
  61. {
  62. return code_point == 0x23;
  63. }
  64. static inline bool is_reverse_solidus(u32 code_point)
  65. {
  66. return code_point == 0x5C;
  67. }
  68. static inline bool is_apostrophe(u32 code_point)
  69. {
  70. return code_point == 0x27;
  71. }
  72. static inline bool is_left_paren(u32 code_point)
  73. {
  74. return code_point == 0x28;
  75. }
  76. static inline bool is_right_paren(u32 code_point)
  77. {
  78. return code_point == 0x29;
  79. }
  80. static inline bool is_plus_sign(u32 code_point)
  81. {
  82. return code_point == 0x2B;
  83. }
  84. static inline bool is_comma(u32 code_point)
  85. {
  86. return code_point == 0x2C;
  87. }
  88. static inline bool is_full_stop(u32 code_point)
  89. {
  90. return code_point == 0x2E;
  91. }
  92. static inline bool is_newline(u32 code_point)
  93. {
  94. return code_point == 0xA;
  95. }
  96. static inline bool is_asterisk(u32 code_point)
  97. {
  98. return code_point == 0x2A;
  99. }
  100. static inline bool is_solidus(u32 code_point)
  101. {
  102. return code_point == 0x2F;
  103. }
  104. static inline bool is_colon(u32 code_point)
  105. {
  106. return code_point == 0x3A;
  107. }
  108. static inline bool is_semicolon(u32 code_point)
  109. {
  110. return code_point == 0x3B;
  111. }
  112. static inline bool is_less_than_sign(u32 code_point)
  113. {
  114. return code_point == 0x3C;
  115. }
  116. static inline bool is_greater_than_sign(u32 code_point)
  117. {
  118. return code_point == 0x3E;
  119. }
  120. static inline bool is_at(u32 code_point)
  121. {
  122. return code_point == 0x40;
  123. }
  124. static inline bool is_open_square_bracket(u32 code_point)
  125. {
  126. return code_point == 0x5B;
  127. }
  128. static inline bool is_closed_square_bracket(u32 code_point)
  129. {
  130. return code_point == 0x5D;
  131. }
  132. static inline bool is_open_curly_bracket(u32 code_point)
  133. {
  134. return code_point == 0x7B;
  135. }
  136. static inline bool is_closed_curly_bracket(u32 code_point)
  137. {
  138. return code_point == 0x7D;
  139. }
  140. static inline bool is_whitespace(u32 code_point)
  141. {
  142. return code_point == 0x9 || code_point == 0xA || code_point == 0x20;
  143. }
  144. static inline bool is_percent(u32 code_point)
  145. {
  146. return code_point == 0x25;
  147. }
  148. static inline bool is_exclamation_mark(u32 code_point)
  149. {
  150. return code_point == 0x21;
  151. }
  152. static inline bool is_e(u32 code_point)
  153. {
  154. return code_point == 0x65;
  155. }
  156. static inline bool is_E(u32 code_point)
  157. {
  158. return code_point == 0x45;
  159. }
  160. ErrorOr<Vector<Token>> Tokenizer::tokenize(StringView input, StringView encoding)
  161. {
  162. // https://www.w3.org/TR/css-syntax-3/#css-filter-code-points
  163. auto filter_code_points = [](StringView input, auto encoding) -> ErrorOr<String> {
  164. auto decoder = TextCodec::decoder_for(encoding);
  165. VERIFY(decoder.has_value());
  166. StringBuilder builder { input.length() };
  167. bool last_was_carriage_return = false;
  168. // To filter code points from a stream of (unfiltered) code points input:
  169. TRY(decoder->process(input, [&builder, &last_was_carriage_return](u32 code_point) -> ErrorOr<void> {
  170. // Replace any U+000D CARRIAGE RETURN (CR) code points,
  171. // U+000C FORM FEED (FF) code points,
  172. // or pairs of U+000D CARRIAGE RETURN (CR) followed by U+000A LINE FEED (LF)
  173. // in input by a single U+000A LINE FEED (LF) code point.
  174. if (code_point == '\r') {
  175. if (last_was_carriage_return) {
  176. TRY(builder.try_append('\n'));
  177. } else {
  178. last_was_carriage_return = true;
  179. }
  180. } else {
  181. if (last_was_carriage_return)
  182. TRY(builder.try_append('\n'));
  183. if (code_point == '\n') {
  184. if (!last_was_carriage_return)
  185. TRY(builder.try_append('\n'));
  186. } else if (code_point == '\f') {
  187. TRY(builder.try_append('\n'));
  188. // Replace any U+0000 NULL or surrogate code points in input with U+FFFD REPLACEMENT CHARACTER (�).
  189. } else if (code_point == 0x00 || (code_point >= 0xD800 && code_point <= 0xDFFF)) {
  190. TRY(builder.try_append_code_point(REPLACEMENT_CHARACTER));
  191. } else {
  192. TRY(builder.try_append_code_point(code_point));
  193. }
  194. last_was_carriage_return = false;
  195. }
  196. return {};
  197. }));
  198. return builder.to_string();
  199. };
  200. Tokenizer tokenizer { TRY(filter_code_points(input, encoding)) };
  201. return tokenizer.tokenize();
  202. }
  203. Tokenizer::Tokenizer(String decoded_input)
  204. : m_decoded_input(move(decoded_input))
  205. , m_utf8_view(m_decoded_input)
  206. , m_utf8_iterator(m_utf8_view.begin())
  207. {
  208. }
  209. ErrorOr<Vector<Token>> Tokenizer::tokenize()
  210. {
  211. Vector<Token> tokens;
  212. for (;;) {
  213. auto token_start = m_position;
  214. auto token = TRY(consume_a_token());
  215. token.m_start_position = token_start;
  216. token.m_end_position = m_position;
  217. TRY(tokens.try_append(token));
  218. if (token.is(Token::Type::EndOfFile)) {
  219. return tokens;
  220. }
  221. }
  222. }
  223. u32 Tokenizer::next_code_point()
  224. {
  225. if (m_utf8_iterator == m_utf8_view.end())
  226. return TOKENIZER_EOF;
  227. m_prev_utf8_iterator = m_utf8_iterator;
  228. ++m_utf8_iterator;
  229. auto code_point = *m_prev_utf8_iterator;
  230. m_prev_position = m_position;
  231. if (is_newline(code_point)) {
  232. m_position.line++;
  233. m_position.column = 0;
  234. } else {
  235. m_position.column++;
  236. }
  237. dbgln_if(CSS_TOKENIZER_DEBUG, "(Tokenizer) Next code_point: {:d}", code_point);
  238. return code_point;
  239. }
  240. u32 Tokenizer::peek_code_point(size_t offset) const
  241. {
  242. auto it = m_utf8_iterator;
  243. for (size_t i = 0; i < offset && it != m_utf8_view.end(); ++i)
  244. ++it;
  245. if (it == m_utf8_view.end())
  246. return TOKENIZER_EOF;
  247. dbgln_if(CSS_TOKENIZER_DEBUG, "(Tokenizer) Peek code_point: {:d}", *m_prev_utf8_iterator);
  248. return *it;
  249. }
  250. U32Twin Tokenizer::peek_twin() const
  251. {
  252. U32Twin values { TOKENIZER_EOF, TOKENIZER_EOF };
  253. auto it = m_utf8_iterator;
  254. for (size_t i = 0; i < 2 && it != m_utf8_view.end(); ++i) {
  255. values.set(i, *it);
  256. ++it;
  257. }
  258. dbgln_if(CSS_TOKENIZER_DEBUG, "(Tokenizer) Peek twin: {:d},{:d}", values.first, values.second);
  259. return values;
  260. }
  261. U32Triplet Tokenizer::peek_triplet() const
  262. {
  263. U32Triplet values { TOKENIZER_EOF, TOKENIZER_EOF, TOKENIZER_EOF };
  264. auto it = m_utf8_iterator;
  265. for (size_t i = 0; i < 3 && it != m_utf8_view.end(); ++i) {
  266. values.set(i, *it);
  267. ++it;
  268. }
  269. dbgln_if(CSS_TOKENIZER_DEBUG, "(Tokenizer) Peek triplet: {:d},{:d},{:d}", values.first, values.second, values.third);
  270. return values;
  271. }
  272. U32Twin Tokenizer::start_of_input_stream_twin()
  273. {
  274. U32Twin twin;
  275. // FIXME: Reconsuming just to read the current code point again is weird.
  276. reconsume_current_input_code_point();
  277. twin.first = next_code_point();
  278. twin.second = peek_code_point();
  279. return twin;
  280. }
  281. U32Triplet Tokenizer::start_of_input_stream_triplet()
  282. {
  283. U32Triplet triplet;
  284. // FIXME: Reconsuming just to read the current code point again is weird.
  285. reconsume_current_input_code_point();
  286. triplet.first = next_code_point();
  287. auto next_two = peek_twin();
  288. triplet.second = next_two.first;
  289. triplet.third = next_two.second;
  290. return triplet;
  291. }
  292. Token Tokenizer::create_new_token(Token::Type type)
  293. {
  294. Token token = {};
  295. token.m_type = type;
  296. return token;
  297. }
  298. Token Tokenizer::create_eof_token()
  299. {
  300. return create_new_token(Token::Type::EndOfFile);
  301. }
  302. Token Tokenizer::create_value_token(Token::Type type, FlyString&& value, String&& representation)
  303. {
  304. auto token = create_new_token(type);
  305. token.m_value = move(value);
  306. token.m_representation = move(representation);
  307. return token;
  308. }
  309. Token Tokenizer::create_value_token(Token::Type type, u32 value, String&& representation)
  310. {
  311. auto token = create_new_token(type);
  312. token.m_value = String::from_code_point(value);
  313. token.m_representation = move(representation);
  314. return token;
  315. }
  316. // https://www.w3.org/TR/css-syntax-3/#consume-escaped-code-point
  317. u32 Tokenizer::consume_escaped_code_point()
  318. {
  319. // This section describes how to consume an escaped code point.
  320. // It assumes that the U+005C REVERSE SOLIDUS (\) has already been consumed and that the next
  321. // input code point has already been verified to be part of a valid escape.
  322. // It will return a code point.
  323. // Consume the next input code point.
  324. auto input = next_code_point();
  325. // hex digit
  326. if (is_ascii_hex_digit(input)) {
  327. // Consume as many hex digits as possible, but no more than 5.
  328. // Note that this means 1-6 hex digits have been consumed in total.
  329. StringBuilder builder;
  330. builder.append_code_point(input);
  331. size_t counter = 0;
  332. while (is_ascii_hex_digit(peek_code_point()) && counter++ < 5) {
  333. builder.append_code_point(next_code_point());
  334. }
  335. // If the next input code point is whitespace, consume it as well.
  336. if (is_whitespace(peek_code_point())) {
  337. (void)next_code_point();
  338. }
  339. // Interpret the hex digits as a hexadecimal number.
  340. auto unhexed = AK::StringUtils::convert_to_uint_from_hex<u32>(builder.string_view()).value_or(0);
  341. // If this number is zero, or is for a surrogate, or is greater than the maximum allowed
  342. // code point, return U+FFFD REPLACEMENT CHARACTER (�).
  343. if (unhexed == 0 || is_unicode_surrogate(unhexed) || is_greater_than_maximum_allowed_code_point(unhexed)) {
  344. return REPLACEMENT_CHARACTER;
  345. }
  346. // Otherwise, return the code point with that value.
  347. return unhexed;
  348. }
  349. // EOF
  350. if (is_eof(input)) {
  351. // This is a parse error. Return U+FFFD REPLACEMENT CHARACTER (�).
  352. log_parse_error();
  353. return REPLACEMENT_CHARACTER;
  354. }
  355. // anything else
  356. // Return the current input code point.
  357. return input;
  358. }
  359. // https://www.w3.org/TR/css-syntax-3/#consume-ident-like-token
  360. ErrorOr<Token> Tokenizer::consume_an_ident_like_token()
  361. {
  362. // This section describes how to consume an ident-like token from a stream of code points.
  363. // It returns an <ident-token>, <function-token>, <url-token>, or <bad-url-token>.
  364. // Consume an ident sequence, and let string be the result.
  365. auto start_byte_offset = current_byte_offset();
  366. auto string = TRY(consume_an_ident_sequence());
  367. // If string’s value is an ASCII case-insensitive match for "url", and the next input code
  368. // point is U+0028 LEFT PARENTHESIS ((), consume it.
  369. if (Infra::is_ascii_case_insensitive_match(string, "url"sv) && is_left_paren(peek_code_point())) {
  370. (void)next_code_point();
  371. // While the next two input code points are whitespace, consume the next input code point.
  372. for (;;) {
  373. auto maybe_whitespace = peek_twin();
  374. if (!(is_whitespace(maybe_whitespace.first) && is_whitespace(maybe_whitespace.second))) {
  375. break;
  376. }
  377. (void)next_code_point();
  378. }
  379. // If the next one or two input code points are U+0022 QUOTATION MARK ("), U+0027 APOSTROPHE ('),
  380. // or whitespace followed by U+0022 QUOTATION MARK (") or U+0027 APOSTROPHE ('), then create a
  381. // <function-token> with its value set to string and return it.
  382. auto next_two = peek_twin();
  383. if (is_quotation_mark(next_two.first) || is_apostrophe(next_two.first) || (is_whitespace(next_two.first) && (is_quotation_mark(next_two.second) || is_apostrophe(next_two.second)))) {
  384. return create_value_token(Token::Type::Function, move(string), TRY(input_since(start_byte_offset)));
  385. }
  386. // Otherwise, consume a url token, and return it.
  387. return consume_a_url_token();
  388. }
  389. // Otherwise, if the next input code point is U+0028 LEFT PARENTHESIS ((), consume it.
  390. if (is_left_paren(peek_code_point())) {
  391. (void)next_code_point();
  392. // Create a <function-token> with its value set to string and return it.
  393. return create_value_token(Token::Type::Function, move(string), TRY(input_since(start_byte_offset)));
  394. }
  395. // Otherwise, create an <ident-token> with its value set to string and return it.
  396. return create_value_token(Token::Type::Ident, move(string), TRY(input_since(start_byte_offset)));
  397. }
  398. // https://www.w3.org/TR/css-syntax-3/#consume-number
  399. Number Tokenizer::consume_a_number()
  400. {
  401. // This section describes how to consume a number from a stream of code points.
  402. // It returns a numeric value, and a type which is either "integer" or "number".
  403. //
  404. // Note: This algorithm does not do the verification of the first few code points
  405. // that are necessary to ensure a number can be obtained from the stream. Ensure
  406. // that the stream starts with a number before calling this algorithm.
  407. // Execute the following steps in order:
  408. // 1. Initially set type to "integer". Let repr be the empty string.
  409. StringBuilder repr;
  410. Number::Type type = Number::Type::Integer;
  411. // 2. If the next input code point is U+002B PLUS SIGN (+) or U+002D HYPHEN-MINUS (-),
  412. // consume it and append it to repr.
  413. bool has_explicit_sign = false;
  414. auto next_input = peek_code_point();
  415. if (is_plus_sign(next_input) || is_hyphen_minus(next_input)) {
  416. has_explicit_sign = true;
  417. repr.append_code_point(next_code_point());
  418. }
  419. // 3. While the next input code point is a digit, consume it and append it to repr.
  420. for (;;) {
  421. auto digits = peek_code_point();
  422. if (!is_ascii_digit(digits))
  423. break;
  424. repr.append_code_point(next_code_point());
  425. }
  426. // 4. If the next 2 input code points are U+002E FULL STOP (.) followed by a digit, then:
  427. auto maybe_number = peek_twin();
  428. if (is_full_stop(maybe_number.first) && is_ascii_digit(maybe_number.second)) {
  429. // 1. Consume them.
  430. // 2. Append them to repr.
  431. repr.append_code_point(next_code_point());
  432. repr.append_code_point(next_code_point());
  433. // 3. Set type to "number".
  434. type = Number::Type::Number;
  435. // 4. While the next input code point is a digit, consume it and append it to repr.
  436. for (;;) {
  437. auto digit = peek_code_point();
  438. if (!is_ascii_digit(digit))
  439. break;
  440. repr.append_code_point(next_code_point());
  441. }
  442. }
  443. // 5. If the next 2 or 3 input code points are U+0045 LATIN CAPITAL LETTER E (E) or
  444. // U+0065 LATIN SMALL LETTER E (e), optionally followed by U+002D HYPHEN-MINUS (-)
  445. // or U+002B PLUS SIGN (+), followed by a digit, then:
  446. auto maybe_exp = peek_triplet();
  447. if ((is_E(maybe_exp.first) || is_e(maybe_exp.first))
  448. && (((is_plus_sign(maybe_exp.second) || is_hyphen_minus(maybe_exp.second)) && is_ascii_digit(maybe_exp.third))
  449. || (is_ascii_digit(maybe_exp.second)))) {
  450. // 1. Consume them.
  451. // 2. Append them to repr.
  452. if (is_plus_sign(maybe_exp.second) || is_hyphen_minus(maybe_exp.second)) {
  453. if (is_ascii_digit(maybe_exp.third)) {
  454. repr.append_code_point(next_code_point());
  455. repr.append_code_point(next_code_point());
  456. repr.append_code_point(next_code_point());
  457. }
  458. } else if (is_ascii_digit(maybe_exp.second)) {
  459. repr.append_code_point(next_code_point());
  460. repr.append_code_point(next_code_point());
  461. }
  462. // 3. Set type to "number".
  463. type = Number::Type::Number;
  464. // 4. While the next input code point is a digit, consume it and append it to repr.
  465. for (;;) {
  466. auto digits = peek_code_point();
  467. if (!is_ascii_digit(digits))
  468. break;
  469. repr.append_code_point(next_code_point());
  470. }
  471. }
  472. // 6. Convert repr to a number, and set the value to the returned value.
  473. auto value = convert_a_string_to_a_number(repr.string_view());
  474. // 7. Return value and type.
  475. if (type == Number::Type::Integer && has_explicit_sign)
  476. return Number { Number::Type::IntegerWithExplicitSign, value };
  477. return Number { type, value };
  478. }
  479. // https://www.w3.org/TR/css-syntax-3/#convert-string-to-number
  480. float Tokenizer::convert_a_string_to_a_number(StringView string)
  481. {
  482. // FIXME: We already found the whole part, fraction part and exponent during
  483. // validation, we could probably skip
  484. return string.to_float(AK::TrimWhitespace::No).release_value();
  485. }
  486. // https://www.w3.org/TR/css-syntax-3/#consume-name
  487. ErrorOr<FlyString> Tokenizer::consume_an_ident_sequence()
  488. {
  489. // This section describes how to consume an ident sequence from a stream of code points.
  490. // It returns a string containing the largest name that can be formed from adjacent
  491. // code points in the stream, starting from the first.
  492. //
  493. // Note: This algorithm does not do the verification of the first few code points that
  494. // are necessary to ensure the returned code points would constitute an <ident-token>.
  495. // If that is the intended use, ensure that the stream starts with an ident sequence before
  496. // calling this algorithm.
  497. // Let result initially be an empty string.
  498. StringBuilder result;
  499. // Repeatedly consume the next input code point from the stream:
  500. for (;;) {
  501. auto input = next_code_point();
  502. if (is_eof(input))
  503. break;
  504. // name code point
  505. if (is_ident_code_point(input)) {
  506. // Append the code point to result.
  507. TRY(result.try_append_code_point(input));
  508. continue;
  509. }
  510. // the stream starts with a valid escape
  511. if (is_valid_escape_sequence(start_of_input_stream_twin())) {
  512. // Consume an escaped code point. Append the returned code point to result.
  513. TRY(result.try_append_code_point(consume_escaped_code_point()));
  514. continue;
  515. }
  516. // anything else
  517. // Reconsume the current input code point. Return result.
  518. reconsume_current_input_code_point();
  519. break;
  520. }
  521. return result.to_fly_string();
  522. }
  523. // https://www.w3.org/TR/css-syntax-3/#consume-url-token
  524. ErrorOr<Token> Tokenizer::consume_a_url_token()
  525. {
  526. // This section describes how to consume a url token from a stream of code points.
  527. // It returns either a <url-token> or a <bad-url-token>.
  528. //
  529. // Note: This algorithm assumes that the initial "url(" has already been consumed.
  530. // This algorithm also assumes that it’s being called to consume an "unquoted" value,
  531. // like url(foo). A quoted value, like url("foo"), is parsed as a <function-token>.
  532. // Consume an ident-like token automatically handles this distinction; this algorithm
  533. // shouldn’t be called directly otherwise.
  534. // 1. Initially create a <url-token> with its value set to the empty string.
  535. auto start_byte_offset = current_byte_offset();
  536. auto token = create_new_token(Token::Type::Url);
  537. StringBuilder builder;
  538. // 2. Consume as much whitespace as possible.
  539. consume_as_much_whitespace_as_possible();
  540. auto make_token = [&]() -> ErrorOr<Token> {
  541. token.m_value = TRY(FlyString::from_utf8(builder.string_view()));
  542. token.m_representation = TRY(input_since(start_byte_offset));
  543. return token;
  544. };
  545. // 3. Repeatedly consume the next input code point from the stream:
  546. for (;;) {
  547. auto input = next_code_point();
  548. // U+0029 RIGHT PARENTHESIS ())
  549. if (is_right_paren(input)) {
  550. // Return the <url-token>.
  551. return make_token();
  552. }
  553. // EOF
  554. if (is_eof(input)) {
  555. // This is a parse error. Return the <url-token>.
  556. log_parse_error();
  557. return make_token();
  558. }
  559. // whitespace
  560. if (is_whitespace(input)) {
  561. // Consume as much whitespace as possible.
  562. consume_as_much_whitespace_as_possible();
  563. // If the next input code point is U+0029 RIGHT PARENTHESIS ()) or EOF, consume it
  564. // and return the <url-token> (if EOF was encountered, this is a parse error);
  565. input = peek_code_point();
  566. if (is_right_paren(input)) {
  567. (void)next_code_point();
  568. return make_token();
  569. }
  570. if (is_eof(input)) {
  571. (void)next_code_point();
  572. log_parse_error();
  573. return make_token();
  574. }
  575. // otherwise, consume the remnants of a bad url, create a <bad-url-token>, and return it.
  576. consume_the_remnants_of_a_bad_url();
  577. auto bad_url_token = create_new_token(Token::Type::BadUrl);
  578. bad_url_token.m_representation = TRY(input_since(start_byte_offset));
  579. return bad_url_token;
  580. }
  581. // U+0022 QUOTATION MARK (")
  582. // U+0027 APOSTROPHE (')
  583. // U+0028 LEFT PARENTHESIS (()
  584. // non-printable code point
  585. if (is_quotation_mark(input) || is_apostrophe(input) || is_left_paren(input) || is_non_printable(input)) {
  586. // This is a parse error. Consume the remnants of a bad url, create a <bad-url-token>, and return it.
  587. log_parse_error();
  588. consume_the_remnants_of_a_bad_url();
  589. auto bad_url_token = create_new_token(Token::Type::BadUrl);
  590. bad_url_token.m_representation = TRY(input_since(start_byte_offset));
  591. return bad_url_token;
  592. }
  593. // U+005C REVERSE SOLIDUS (\)
  594. if (is_reverse_solidus(input)) {
  595. // If the stream starts with a valid escape,
  596. if (is_valid_escape_sequence(start_of_input_stream_twin())) {
  597. // consume an escaped code point and append the returned code point to the <url-token>’s value.
  598. builder.append_code_point(consume_escaped_code_point());
  599. continue;
  600. } else {
  601. // Otherwise, this is a parse error.
  602. log_parse_error();
  603. // Consume the remnants of a bad url, create a <bad-url-token>, and return it.
  604. consume_the_remnants_of_a_bad_url();
  605. auto bad_url_token = create_new_token(Token::Type::BadUrl);
  606. bad_url_token.m_representation = TRY(input_since(start_byte_offset));
  607. return bad_url_token;
  608. }
  609. }
  610. // anything else
  611. // Append the current input code point to the <url-token>’s value.
  612. builder.append_code_point(input);
  613. }
  614. }
  615. // https://www.w3.org/TR/css-syntax-3/#consume-remnants-of-bad-url
  616. void Tokenizer::consume_the_remnants_of_a_bad_url()
  617. {
  618. // This section describes how to consume the remnants of a bad url from a stream of code points,
  619. // "cleaning up" after the tokenizer realizes that it’s in the middle of a <bad-url-token> rather
  620. // than a <url-token>. It returns nothing; its sole use is to consume enough of the input stream
  621. // to reach a recovery point where normal tokenizing can resume.
  622. // Repeatedly consume the next input code point from the stream:
  623. for (;;) {
  624. auto input = next_code_point();
  625. // U+0029 RIGHT PARENTHESIS ())
  626. // EOF
  627. if (is_eof(input) || is_right_paren(input)) {
  628. // Return.
  629. return;
  630. }
  631. // the input stream starts with a valid escape
  632. if (is_valid_escape_sequence(start_of_input_stream_twin())) {
  633. // Consume an escaped code point.
  634. // This allows an escaped right parenthesis ("\)") to be encountered without ending
  635. // the <bad-url-token>. This is otherwise identical to the "anything else" clause.
  636. (void)consume_escaped_code_point();
  637. }
  638. // anything else
  639. // Do nothing.
  640. }
  641. }
  642. void Tokenizer::consume_as_much_whitespace_as_possible()
  643. {
  644. while (is_whitespace(peek_code_point())) {
  645. (void)next_code_point();
  646. }
  647. }
  648. void Tokenizer::reconsume_current_input_code_point()
  649. {
  650. m_utf8_iterator = m_prev_utf8_iterator;
  651. m_position = m_prev_position;
  652. }
  653. // https://www.w3.org/TR/css-syntax-3/#consume-numeric-token
  654. ErrorOr<Token> Tokenizer::consume_a_numeric_token()
  655. {
  656. // This section describes how to consume a numeric token from a stream of code points.
  657. // It returns either a <number-token>, <percentage-token>, or <dimension-token>.
  658. auto start_byte_offset = current_byte_offset();
  659. // Consume a number and let number be the result.
  660. auto number = consume_a_number();
  661. // If the next 3 input code points would start an ident sequence, then:
  662. if (would_start_an_ident_sequence(peek_triplet())) {
  663. // 1. Create a <dimension-token> with the same value and type flag as number,
  664. // and a unit set initially to the empty string.
  665. auto token = create_new_token(Token::Type::Dimension);
  666. token.m_number_value = number;
  667. // 2. Consume an ident sequence. Set the <dimension-token>’s unit to the returned value.
  668. auto unit = TRY(consume_an_ident_sequence());
  669. VERIFY(!unit.is_empty());
  670. // NOTE: We intentionally store this in the `value`, to save space.
  671. token.m_value = move(unit);
  672. // 3. Return the <dimension-token>.
  673. token.m_representation = TRY(input_since(start_byte_offset));
  674. return token;
  675. }
  676. // Otherwise, if the next input code point is U+0025 PERCENTAGE SIGN (%), consume it.
  677. if (is_percent(peek_code_point())) {
  678. (void)next_code_point();
  679. // Create a <percentage-token> with the same value as number, and return it.
  680. auto token = create_new_token(Token::Type::Percentage);
  681. token.m_number_value = number;
  682. token.m_representation = TRY(input_since(start_byte_offset));
  683. return token;
  684. }
  685. // Otherwise, create a <number-token> with the same value and type flag as number, and return it.
  686. auto token = create_new_token(Token::Type::Number);
  687. token.m_number_value = number;
  688. token.m_representation = TRY(input_since(start_byte_offset));
  689. return token;
  690. }
  691. // https://www.w3.org/TR/css-syntax-3/#starts-with-a-number
  692. bool Tokenizer::would_start_a_number(U32Triplet values)
  693. {
  694. // This section describes how to check if three code points would start a number.
  695. // The algorithm described here can be called explicitly with three code points,
  696. // or can be called with the input stream itself. In the latter case, the three
  697. // code points in question are the current input code point and the next two input
  698. // code points, in that order.
  699. //
  700. // Note: This algorithm will not consume any additional code points.
  701. // Look at the first code point:
  702. // U+002B PLUS SIGN (+)
  703. // U+002D HYPHEN-MINUS (-)
  704. if (is_plus_sign(values.first) || is_hyphen_minus(values.first)) {
  705. // If the second code point is a digit, return true.
  706. if (is_ascii_digit(values.second))
  707. return true;
  708. // Otherwise, if the second code point is a U+002E FULL STOP (.) and the third
  709. // code point is a digit, return true.
  710. if (is_full_stop(values.second) && is_ascii_digit(values.third))
  711. return true;
  712. // Otherwise, return false.
  713. return false;
  714. }
  715. // U+002E FULL STOP (.)
  716. if (is_full_stop(values.first))
  717. // If the second code point is a digit, return true. Otherwise, return false.
  718. return is_ascii_digit(values.second);
  719. // digit
  720. if (is_ascii_digit(values.first))
  721. // Return true.
  722. return true;
  723. // anything else
  724. // Return false.
  725. return false;
  726. }
  727. // https://www.w3.org/TR/css-syntax-3/#starts-with-a-valid-escape
  728. bool Tokenizer::is_valid_escape_sequence(U32Twin values)
  729. {
  730. // This section describes how to check if two code points are a valid escape.
  731. // The algorithm described here can be called explicitly with two code points,
  732. // or can be called with the input stream itself. In the latter case, the two
  733. // code points in question are the current input code point and the next input
  734. // code point, in that order.
  735. //
  736. // Note: This algorithm will not consume any additional code point.
  737. // If the first code point is not U+005C REVERSE SOLIDUS (\), return false.
  738. if (!is_reverse_solidus(values.first))
  739. return false;
  740. // Otherwise, if the second code point is a newline, return false.
  741. if (is_newline(values.second))
  742. return false;
  743. // Otherwise, return true.
  744. return true;
  745. }
  746. // https://www.w3.org/TR/css-syntax-3/#would-start-an-identifier
  747. bool Tokenizer::would_start_an_ident_sequence(U32Triplet values)
  748. {
  749. // This section describes how to check if three code points would start an ident sequence.
  750. // The algorithm described here can be called explicitly with three code points, or
  751. // can be called with the input stream itself. In the latter case, the three code
  752. // points in question are the current input code point and the next two input code
  753. // points, in that order.
  754. //
  755. // Note: This algorithm will not consume any additional code points.
  756. // Look at the first code point:
  757. // U+002D HYPHEN-MINUS
  758. if (is_hyphen_minus(values.first)) {
  759. // If the second code point is a name-start code point or a U+002D HYPHEN-MINUS,
  760. // or the second and third code points are a valid escape, return true.
  761. if (is_ident_start_code_point(values.second) || is_hyphen_minus(values.second) || is_valid_escape_sequence(values.to_twin_23()))
  762. return true;
  763. // Otherwise, return false.
  764. return false;
  765. }
  766. // name-start code point
  767. if (is_ident_start_code_point(values.first)) {
  768. // Return true.
  769. return true;
  770. }
  771. // U+005C REVERSE SOLIDUS (\)
  772. if (is_reverse_solidus(values.first)) {
  773. // If the first and second code points are a valid escape, return true.
  774. if (is_valid_escape_sequence(values.to_twin_12()))
  775. return true;
  776. // Otherwise, return false.
  777. return false;
  778. }
  779. // anything else
  780. // Return false.
  781. return false;
  782. }
  783. // https://www.w3.org/TR/css-syntax-3/#consume-string-token
  784. ErrorOr<Token> Tokenizer::consume_string_token(u32 ending_code_point)
  785. {
  786. // This section describes how to consume a string token from a stream of code points.
  787. // It returns either a <string-token> or <bad-string-token>.
  788. //
  789. // This algorithm may be called with an ending code point, which denotes the code point
  790. // that ends the string. If an ending code point is not specified, the current input
  791. // code point is used.
  792. // Initially create a <string-token> with its value set to the empty string.
  793. auto start_byte_offset = current_byte_offset();
  794. auto token = create_new_token(Token::Type::String);
  795. StringBuilder builder;
  796. auto make_token = [&]() -> ErrorOr<Token> {
  797. token.m_value = TRY(FlyString::from_utf8(builder.string_view()));
  798. token.m_representation = TRY(input_since(start_byte_offset));
  799. return token;
  800. };
  801. // Repeatedly consume the next input code point from the stream:
  802. for (;;) {
  803. auto input = next_code_point();
  804. // ending code point
  805. if (input == ending_code_point)
  806. return make_token();
  807. // EOF
  808. if (is_eof(input)) {
  809. // This is a parse error. Return the <string-token>.
  810. log_parse_error();
  811. return make_token();
  812. }
  813. // newline
  814. if (is_newline(input)) {
  815. // This is a parse error. Reconsume the current input code point, create a
  816. // <bad-string-token>, and return it.
  817. reconsume_current_input_code_point();
  818. auto bad_string_token = create_new_token(Token::Type::BadString);
  819. bad_string_token.m_representation = TRY(input_since(start_byte_offset));
  820. return bad_string_token;
  821. }
  822. // U+005C REVERSE SOLIDUS (\)
  823. if (is_reverse_solidus(input)) {
  824. // If the next input code point is EOF, do nothing.
  825. auto next_input = peek_code_point();
  826. if (is_eof(next_input))
  827. continue;
  828. // Otherwise, if the next input code point is a newline, consume it.
  829. if (is_newline(next_input)) {
  830. (void)next_code_point();
  831. continue;
  832. }
  833. // Otherwise, (the stream starts with a valid escape) consume an escaped code
  834. // point and append the returned code point to the <string-token>’s value.
  835. auto escaped = consume_escaped_code_point();
  836. builder.append_code_point(escaped);
  837. continue;
  838. }
  839. // anything else
  840. // Append the current input code point to the <string-token>’s value.
  841. builder.append_code_point(input);
  842. }
  843. }
  844. // https://www.w3.org/TR/css-syntax-3/#consume-comment
  845. void Tokenizer::consume_comments()
  846. {
  847. // This section describes how to consume comments from a stream of code points.
  848. // It returns nothing.
  849. start:
  850. // If the next two input code point are U+002F SOLIDUS (/) followed by a U+002A ASTERISK (*),
  851. // consume them and all following code points up to and including the first U+002A ASTERISK (*)
  852. // followed by a U+002F SOLIDUS (/), or up to an EOF code point. Return to the start of this step.
  853. //
  854. // If the preceding paragraph ended by consuming an EOF code point, this is a parse error.
  855. //
  856. // Return nothing.
  857. auto twin = peek_twin();
  858. if (!(is_solidus(twin.first) && is_asterisk(twin.second)))
  859. return;
  860. (void)next_code_point();
  861. (void)next_code_point();
  862. for (;;) {
  863. auto twin_inner = peek_twin();
  864. if (is_eof(twin_inner.first) || is_eof(twin_inner.second)) {
  865. log_parse_error();
  866. return;
  867. }
  868. if (is_asterisk(twin_inner.first) && is_solidus(twin_inner.second)) {
  869. (void)next_code_point();
  870. (void)next_code_point();
  871. goto start;
  872. }
  873. (void)next_code_point();
  874. }
  875. }
  876. // https://www.w3.org/TR/css-syntax-3/#consume-token
  877. ErrorOr<Token> Tokenizer::consume_a_token()
  878. {
  879. // This section describes how to consume a token from a stream of code points.
  880. // It will return a single token of any type.
  881. // Consume comments.
  882. consume_comments();
  883. // Consume the next input code point.
  884. auto start_byte_offset = current_byte_offset();
  885. auto input = next_code_point();
  886. // whitespace
  887. if (is_whitespace(input)) {
  888. dbgln_if(CSS_TOKENIZER_DEBUG, "is whitespace");
  889. // Consume as much whitespace as possible. Return a <whitespace-token>.
  890. consume_as_much_whitespace_as_possible();
  891. auto token = create_new_token(Token::Type::Whitespace);
  892. token.m_representation = TRY(input_since(start_byte_offset));
  893. return token;
  894. }
  895. // U+0022 QUOTATION MARK (")
  896. if (is_quotation_mark(input)) {
  897. dbgln_if(CSS_TOKENIZER_DEBUG, "is quotation mark");
  898. // Consume a string token and return it.
  899. return consume_string_token(input);
  900. }
  901. // U+0023 NUMBER SIGN (#)
  902. if (is_number_sign(input)) {
  903. dbgln_if(CSS_TOKENIZER_DEBUG, "is number sign");
  904. // If the next input code point is an ident code point or the next two input code points
  905. // are a valid escape, then:
  906. auto next_input = peek_code_point();
  907. auto maybe_escape = peek_twin();
  908. if (is_ident_code_point(next_input) || is_valid_escape_sequence(maybe_escape)) {
  909. // 1. Create a <hash-token>.
  910. auto token = create_new_token(Token::Type::Hash);
  911. // 2. If the next 3 input code points would start an ident sequence, set the <hash-token>’s
  912. // type flag to "id".
  913. if (would_start_an_ident_sequence(peek_triplet()))
  914. token.m_hash_type = Token::HashType::Id;
  915. // 3. Consume an ident sequence, and set the <hash-token>’s value to the returned string.
  916. auto name = TRY(consume_an_ident_sequence());
  917. token.m_value = move(name);
  918. // 4. Return the <hash-token>.
  919. token.m_representation = TRY(input_since(start_byte_offset));
  920. return token;
  921. }
  922. // Otherwise, return a <delim-token> with its value set to the current input code point.
  923. return create_value_token(Token::Type::Delim, input, TRY(input_since(start_byte_offset)));
  924. }
  925. // U+0027 APOSTROPHE (')
  926. if (is_apostrophe(input)) {
  927. dbgln_if(CSS_TOKENIZER_DEBUG, "is apostrophe");
  928. // Consume a string token and return it.
  929. return consume_string_token(input);
  930. }
  931. // U+0028 LEFT PARENTHESIS (()
  932. if (is_left_paren(input)) {
  933. dbgln_if(CSS_TOKENIZER_DEBUG, "is left paren");
  934. // Return a <(-token>.
  935. Token token = create_new_token(Token::Type::OpenParen);
  936. token.m_representation = TRY(input_since(start_byte_offset));
  937. return token;
  938. }
  939. // U+0029 RIGHT PARENTHESIS ())
  940. if (is_right_paren(input)) {
  941. dbgln_if(CSS_TOKENIZER_DEBUG, "is right paren");
  942. // Return a <)-token>.
  943. Token token = create_new_token(Token::Type::CloseParen);
  944. token.m_representation = TRY(input_since(start_byte_offset));
  945. return token;
  946. }
  947. // U+002B PLUS SIGN (+)
  948. if (is_plus_sign(input)) {
  949. dbgln_if(CSS_TOKENIZER_DEBUG, "is plus sign");
  950. // If the input stream starts with a number, reconsume the current input code point,
  951. // consume a numeric token and return it.
  952. if (would_start_a_number(start_of_input_stream_triplet())) {
  953. reconsume_current_input_code_point();
  954. return consume_a_numeric_token();
  955. }
  956. // Otherwise, return a <delim-token> with its value set to the current input code point.
  957. return create_value_token(Token::Type::Delim, input, TRY(input_since(start_byte_offset)));
  958. }
  959. // U+002C COMMA (,)
  960. if (is_comma(input)) {
  961. dbgln_if(CSS_TOKENIZER_DEBUG, "is comma");
  962. // Return a <comma-token>.
  963. Token token = create_new_token(Token::Type::Comma);
  964. token.m_representation = TRY(input_since(start_byte_offset));
  965. return token;
  966. }
  967. // U+002D HYPHEN-MINUS (-)
  968. if (is_hyphen_minus(input)) {
  969. dbgln_if(CSS_TOKENIZER_DEBUG, "is hyphen minus");
  970. // If the input stream starts with a number, reconsume the current input code point,
  971. // consume a numeric token, and return it.
  972. if (would_start_a_number(start_of_input_stream_triplet())) {
  973. reconsume_current_input_code_point();
  974. return consume_a_numeric_token();
  975. }
  976. // Otherwise, if the next 2 input code points are U+002D HYPHEN-MINUS U+003E
  977. // GREATER-THAN SIGN (->), consume them and return a <CDC-token>.
  978. auto next_twin = peek_twin();
  979. if (is_hyphen_minus(next_twin.first) && is_greater_than_sign(next_twin.second)) {
  980. (void)next_code_point();
  981. (void)next_code_point();
  982. Token token = create_new_token(Token::Type::CDC);
  983. token.m_representation = TRY(input_since(start_byte_offset));
  984. return token;
  985. }
  986. // Otherwise, if the input stream starts with an identifier, reconsume the current
  987. // input code point, consume an ident-like token, and return it.
  988. if (would_start_an_ident_sequence(start_of_input_stream_triplet())) {
  989. reconsume_current_input_code_point();
  990. return consume_an_ident_like_token();
  991. }
  992. // Otherwise, return a <delim-token> with its value set to the current input code point.
  993. return create_value_token(Token::Type::Delim, input, TRY(input_since(start_byte_offset)));
  994. }
  995. // U+002E FULL STOP (.)
  996. if (is_full_stop(input)) {
  997. dbgln_if(CSS_TOKENIZER_DEBUG, "is full stop");
  998. // If the input stream starts with a number, reconsume the current input code point,
  999. // consume a numeric token, and return it.
  1000. if (would_start_a_number(start_of_input_stream_triplet())) {
  1001. reconsume_current_input_code_point();
  1002. return consume_a_numeric_token();
  1003. }
  1004. // Otherwise, return a <delim-token> with its value set to the current input code point.
  1005. return create_value_token(Token::Type::Delim, input, TRY(input_since(start_byte_offset)));
  1006. }
  1007. // U+003A COLON (:)
  1008. if (is_colon(input)) {
  1009. dbgln_if(CSS_TOKENIZER_DEBUG, "is colon");
  1010. // Return a <colon-token>.
  1011. Token token = create_new_token(Token::Type::Colon);
  1012. token.m_representation = TRY(input_since(start_byte_offset));
  1013. return token;
  1014. }
  1015. // U+003B SEMICOLON (;)
  1016. if (is_semicolon(input)) {
  1017. dbgln_if(CSS_TOKENIZER_DEBUG, "is semicolon");
  1018. // Return a <semicolon-token>.
  1019. Token token = create_new_token(Token::Type::Semicolon);
  1020. token.m_representation = TRY(input_since(start_byte_offset));
  1021. return token;
  1022. }
  1023. // U+003C LESS-THAN SIGN (<)
  1024. if (is_less_than_sign(input)) {
  1025. dbgln_if(CSS_TOKENIZER_DEBUG, "is less than");
  1026. // If the next 3 input code points are U+0021 EXCLAMATION MARK U+002D HYPHEN-MINUS
  1027. // U+002D HYPHEN-MINUS (!--), consume them and return a <CDO-token>.
  1028. auto maybe_cdo = peek_triplet();
  1029. if (is_exclamation_mark(maybe_cdo.first) && is_hyphen_minus(maybe_cdo.second) && is_hyphen_minus(maybe_cdo.third)) {
  1030. (void)next_code_point();
  1031. (void)next_code_point();
  1032. (void)next_code_point();
  1033. Token token = create_new_token(Token::Type::CDO);
  1034. token.m_representation = TRY(input_since(start_byte_offset));
  1035. return token;
  1036. }
  1037. // Otherwise, return a <delim-token> with its value set to the current input code point.
  1038. return create_value_token(Token::Type::Delim, input, TRY(input_since(start_byte_offset)));
  1039. }
  1040. // U+0040 COMMERCIAL AT (@)
  1041. if (is_at(input)) {
  1042. dbgln_if(CSS_TOKENIZER_DEBUG, "is at");
  1043. // If the next 3 input code points would start an ident sequence, consume an ident sequence, create
  1044. // an <at-keyword-token> with its value set to the returned value, and return it.
  1045. if (would_start_an_ident_sequence(peek_triplet())) {
  1046. auto name = TRY(consume_an_ident_sequence());
  1047. return create_value_token(Token::Type::AtKeyword, move(name), TRY(input_since(start_byte_offset)));
  1048. }
  1049. // Otherwise, return a <delim-token> with its value set to the current input code point.
  1050. return create_value_token(Token::Type::Delim, input, TRY(input_since(start_byte_offset)));
  1051. }
  1052. // U+005B LEFT SQUARE BRACKET ([)
  1053. if (is_open_square_bracket(input)) {
  1054. dbgln_if(CSS_TOKENIZER_DEBUG, "is open square");
  1055. // Return a <[-token>.
  1056. Token token = create_new_token(Token::Type::OpenSquare);
  1057. token.m_representation = TRY(input_since(start_byte_offset));
  1058. return token;
  1059. }
  1060. // U+005C REVERSE SOLIDUS (\)
  1061. if (is_reverse_solidus(input)) {
  1062. dbgln_if(CSS_TOKENIZER_DEBUG, "is reverse solidus");
  1063. // If the input stream starts with a valid escape, reconsume the current input code point,
  1064. // consume an ident-like token, and return it.
  1065. if (is_valid_escape_sequence(start_of_input_stream_twin())) {
  1066. reconsume_current_input_code_point();
  1067. return consume_an_ident_like_token();
  1068. }
  1069. // Otherwise, this is a parse error. Return a <delim-token> with its value set to the
  1070. // current input code point.
  1071. log_parse_error();
  1072. return create_value_token(Token::Type::Delim, input, TRY(input_since(start_byte_offset)));
  1073. }
  1074. // U+005D RIGHT SQUARE BRACKET (])
  1075. if (is_closed_square_bracket(input)) {
  1076. dbgln_if(CSS_TOKENIZER_DEBUG, "is closed square");
  1077. // Return a <]-token>.
  1078. Token token = create_new_token(Token::Type::CloseSquare);
  1079. token.m_representation = TRY(input_since(start_byte_offset));
  1080. return token;
  1081. }
  1082. // U+007B LEFT CURLY BRACKET ({)
  1083. if (is_open_curly_bracket(input)) {
  1084. dbgln_if(CSS_TOKENIZER_DEBUG, "is open curly");
  1085. // Return a <{-token>.
  1086. Token token = create_new_token(Token::Type::OpenCurly);
  1087. token.m_representation = TRY(input_since(start_byte_offset));
  1088. return token;
  1089. }
  1090. // U+007D RIGHT CURLY BRACKET (})
  1091. if (is_closed_curly_bracket(input)) {
  1092. dbgln_if(CSS_TOKENIZER_DEBUG, "is closed curly");
  1093. // Return a <}-token>.
  1094. Token token = create_new_token(Token::Type::CloseCurly);
  1095. token.m_representation = TRY(input_since(start_byte_offset));
  1096. return token;
  1097. }
  1098. // digit
  1099. if (is_ascii_digit(input)) {
  1100. dbgln_if(CSS_TOKENIZER_DEBUG, "is digit");
  1101. // Reconsume the current input code point, consume a numeric token, and return it.
  1102. reconsume_current_input_code_point();
  1103. return consume_a_numeric_token();
  1104. }
  1105. // name-start code point
  1106. if (is_ident_start_code_point(input)) {
  1107. dbgln_if(CSS_TOKENIZER_DEBUG, "is name start");
  1108. // Reconsume the current input code point, consume an ident-like token, and return it.
  1109. reconsume_current_input_code_point();
  1110. return consume_an_ident_like_token();
  1111. }
  1112. // EOF
  1113. if (is_eof(input)) {
  1114. // Return an <EOF-token>.
  1115. return create_eof_token();
  1116. }
  1117. // anything else
  1118. dbgln_if(CSS_TOKENIZER_DEBUG, "is delimiter");
  1119. // Return a <delim-token> with its value set to the current input code point.
  1120. return create_value_token(Token::Type::Delim, input, TRY(input_since(start_byte_offset)));
  1121. }
  1122. size_t Tokenizer::current_byte_offset() const
  1123. {
  1124. return m_utf8_iterator.ptr() - m_utf8_view.bytes();
  1125. }
  1126. ErrorOr<String> Tokenizer::input_since(size_t offset) const
  1127. {
  1128. return m_decoded_input.substring_from_byte_offset_with_shared_superstring(offset, current_byte_offset() - offset);
  1129. }
  1130. }