2020-03-11 18:27:43 +00:00
|
|
|
/*
|
2021-05-29 10:38:28 +00:00
|
|
|
* Copyright (c) 2020, Stephan Unverwerth <s.unverwerth@serenityos.org>
|
2021-07-16 18:06:05 +00:00
|
|
|
* Copyright (c) 2020-2021, Linus Groh <linusg@serenityos.org>
|
2020-03-11 18:27:43 +00:00
|
|
|
*
|
2021-04-22 08:24:48 +00:00
|
|
|
* SPDX-License-Identifier: BSD-2-Clause
|
2020-03-11 18:27:43 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "Token.h"
|
2020-03-12 22:02:41 +00:00
|
|
|
#include <AK/Assertions.h>
|
2021-06-13 08:47:09 +00:00
|
|
|
#include <AK/CharacterTypes.h>
|
2020-10-28 09:29:11 +00:00
|
|
|
#include <AK/GenericLexer.h>
|
2020-03-14 12:39:05 +00:00
|
|
|
#include <AK/StringBuilder.h>
|
2021-08-02 14:06:42 +00:00
|
|
|
#include <AK/Utf16View.h>
|
2020-03-11 18:27:43 +00:00
|
|
|
|
|
|
|
namespace JS {
|
|
|
|
|
|
|
|
const char* Token::name(TokenType type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
LibJS: Unify syntax highlighting
So far we have three different syntax highlighters for LibJS:
- js's Line::Editor stylization
- JS::MarkupGenerator
- GUI::JSSyntaxHighlighter
This not only caused repetition of most token types in each highlighter
but also a lot of inconsistency regarding the styling of certain tokens:
- JSSyntaxHighlighter was considering TokenType::Period to be an
operator whereas MarkupGenerator categorized it as punctuation.
- MarkupGenerator was considering TokenType::{Break,Case,Continue,
Default,Switch,With} control keywords whereas JSSyntaxHighlighter just
disregarded them
- MarkupGenerator considered some future reserved keywords invalid and
others not. JSSyntaxHighlighter and js disregarded most
Adding a new token type meant adding it to ENUMERATE_JS_TOKENS as well
as each individual highlighter's switch/case construct.
I added a TokenCategory enum, and each TokenType is now associated to a
certain category, which the syntax highlighters then can use for styling
rather than operating on the token type directly. This also makes
changing a token's category everywhere easier, should we need to do that
(e.g. I decided to make TokenType::{Period,QuestionMarkPeriod}
TokenCategory::Operator for now, but we might want to change them to
Punctuation.
2020-10-04 21:28:59 +00:00
|
|
|
#define __ENUMERATE_JS_TOKEN(type, category) \
|
|
|
|
case TokenType::type: \
|
|
|
|
return #type;
|
2020-03-30 11:11:07 +00:00
|
|
|
ENUMERATE_JS_TOKENS
|
|
|
|
#undef __ENUMERATE_JS_TOKEN
|
2020-03-11 18:27:43 +00:00
|
|
|
default:
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_NOT_REACHED();
|
2020-03-11 18:27:43 +00:00
|
|
|
return "<Unknown>";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Token::name() const
|
|
|
|
{
|
|
|
|
return name(m_type);
|
|
|
|
}
|
|
|
|
|
LibJS: Unify syntax highlighting
So far we have three different syntax highlighters for LibJS:
- js's Line::Editor stylization
- JS::MarkupGenerator
- GUI::JSSyntaxHighlighter
This not only caused repetition of most token types in each highlighter
but also a lot of inconsistency regarding the styling of certain tokens:
- JSSyntaxHighlighter was considering TokenType::Period to be an
operator whereas MarkupGenerator categorized it as punctuation.
- MarkupGenerator was considering TokenType::{Break,Case,Continue,
Default,Switch,With} control keywords whereas JSSyntaxHighlighter just
disregarded them
- MarkupGenerator considered some future reserved keywords invalid and
others not. JSSyntaxHighlighter and js disregarded most
Adding a new token type meant adding it to ENUMERATE_JS_TOKENS as well
as each individual highlighter's switch/case construct.
I added a TokenCategory enum, and each TokenType is now associated to a
certain category, which the syntax highlighters then can use for styling
rather than operating on the token type directly. This also makes
changing a token's category everywhere easier, should we need to do that
(e.g. I decided to make TokenType::{Period,QuestionMarkPeriod}
TokenCategory::Operator for now, but we might want to change them to
Punctuation.
2020-10-04 21:28:59 +00:00
|
|
|
TokenCategory Token::category(TokenType type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
#define __ENUMERATE_JS_TOKEN(type, category) \
|
|
|
|
case TokenType::type: \
|
|
|
|
return TokenCategory::category;
|
|
|
|
ENUMERATE_JS_TOKENS
|
|
|
|
#undef __ENUMERATE_JS_TOKEN
|
|
|
|
default:
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY_NOT_REACHED();
|
LibJS: Unify syntax highlighting
So far we have three different syntax highlighters for LibJS:
- js's Line::Editor stylization
- JS::MarkupGenerator
- GUI::JSSyntaxHighlighter
This not only caused repetition of most token types in each highlighter
but also a lot of inconsistency regarding the styling of certain tokens:
- JSSyntaxHighlighter was considering TokenType::Period to be an
operator whereas MarkupGenerator categorized it as punctuation.
- MarkupGenerator was considering TokenType::{Break,Case,Continue,
Default,Switch,With} control keywords whereas JSSyntaxHighlighter just
disregarded them
- MarkupGenerator considered some future reserved keywords invalid and
others not. JSSyntaxHighlighter and js disregarded most
Adding a new token type meant adding it to ENUMERATE_JS_TOKENS as well
as each individual highlighter's switch/case construct.
I added a TokenCategory enum, and each TokenType is now associated to a
certain category, which the syntax highlighters then can use for styling
rather than operating on the token type directly. This also makes
changing a token's category everywhere easier, should we need to do that
(e.g. I decided to make TokenType::{Period,QuestionMarkPeriod}
TokenCategory::Operator for now, but we might want to change them to
Punctuation.
2020-10-04 21:28:59 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TokenCategory Token::category() const
|
|
|
|
{
|
|
|
|
return category(m_type);
|
|
|
|
}
|
|
|
|
|
2020-03-11 18:27:43 +00:00
|
|
|
double Token::double_value() const
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(type() == TokenType::NumericLiteral);
|
2021-06-26 14:30:05 +00:00
|
|
|
|
|
|
|
StringBuilder builder;
|
|
|
|
|
|
|
|
for (auto ch : m_value) {
|
|
|
|
if (ch == '_')
|
|
|
|
continue;
|
|
|
|
builder.append(ch);
|
|
|
|
}
|
|
|
|
|
|
|
|
String value_string = builder.to_string();
|
2020-04-05 12:20:58 +00:00
|
|
|
if (value_string[0] == '0' && value_string.length() >= 2) {
|
|
|
|
if (value_string[1] == 'x' || value_string[1] == 'X') {
|
|
|
|
// hexadecimal
|
|
|
|
return static_cast<double>(strtoul(value_string.characters() + 2, nullptr, 16));
|
|
|
|
} else if (value_string[1] == 'o' || value_string[1] == 'O') {
|
|
|
|
// octal
|
|
|
|
return static_cast<double>(strtoul(value_string.characters() + 2, nullptr, 8));
|
|
|
|
} else if (value_string[1] == 'b' || value_string[1] == 'B') {
|
|
|
|
// binary
|
|
|
|
return static_cast<double>(strtoul(value_string.characters() + 2, nullptr, 2));
|
2021-06-13 08:47:09 +00:00
|
|
|
} else if (is_ascii_digit(value_string[1])) {
|
2020-04-05 12:20:58 +00:00
|
|
|
// also octal, but syntax error in strict mode
|
2020-10-27 19:30:27 +00:00
|
|
|
if (!m_value.contains('8') && !m_value.contains('9'))
|
|
|
|
return static_cast<double>(strtoul(value_string.characters() + 1, nullptr, 8));
|
2020-04-05 12:20:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return strtod(value_string.characters(), nullptr);
|
2020-03-11 18:27:43 +00:00
|
|
|
}
|
|
|
|
|
2020-05-17 06:27:25 +00:00
|
|
|
static u32 hex2int(char x)
|
|
|
|
{
|
2021-06-13 08:47:09 +00:00
|
|
|
VERIFY(is_ascii_hex_digit(x));
|
2020-05-17 06:27:25 +00:00
|
|
|
if (x >= '0' && x <= '9')
|
|
|
|
return x - '0';
|
2021-06-13 08:47:09 +00:00
|
|
|
return 10u + (to_ascii_lowercase(x) - 'a');
|
2020-05-17 06:27:25 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
String Token::string_value(StringValueStatus& status) const
|
2020-03-11 18:27:43 +00:00
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(type() == TokenType::StringLiteral || type() == TokenType::TemplateLiteralString);
|
2020-10-24 12:30:57 +00:00
|
|
|
|
2020-10-28 09:29:11 +00:00
|
|
|
auto is_template = type() == TokenType::TemplateLiteralString;
|
|
|
|
GenericLexer lexer(is_template ? m_value : m_value.substring_view(1, m_value.length() - 2));
|
LibJS: Add template literals
Adds fully functioning template literals. Because template literals
contain expressions, most of the work has to be done in the Lexer rather
than the Parser. And because of the complexity of template literals
(expressions, nesting, escapes, etc), the Lexer needs to have some
template-related state.
When entering a new template literal, a TemplateLiteralStart token is
emitted. When inside a literal, all text will be parsed up until a '${'
or '`' (or EOF, but that's a syntax error) is seen, and then a
TemplateLiteralExprStart token is emitted. At this point, the Lexer
proceeds as normal, however it keeps track of the number of opening
and closing curly braces it has seen in order to determine the close
of the expression. Once it finds a matching curly brace for the '${',
a TemplateLiteralExprEnd token is emitted and the state is updated
accordingly.
When the Lexer is inside of a template literal, but not an expression,
and sees a '`', this must be the closing grave: a TemplateLiteralEnd
token is emitted.
The state required to correctly parse template strings consists of a
vector (for nesting) of two pieces of information: whether or not we
are in a template expression (as opposed to a template string); and
the count of the number of unmatched open curly braces we have seen
(only applicable if the Lexer is currently in a template expression).
TODO: Add support for template literal newlines in the JS REPL (this will
cause a syntax error currently):
> `foo
> bar`
'foo
bar'
2020-05-03 22:41:14 +00:00
|
|
|
|
2020-05-17 06:27:25 +00:00
|
|
|
auto encoding_failure = [&status](StringValueStatus parse_status) -> String {
|
|
|
|
status = parse_status;
|
|
|
|
return {};
|
|
|
|
};
|
|
|
|
|
2021-08-02 14:06:42 +00:00
|
|
|
auto decode_surrogate = [&lexer]() -> Optional<u16> {
|
|
|
|
u16 surrogate = 0;
|
|
|
|
for (int j = 0; j < 4; ++j) {
|
|
|
|
if (!lexer.next_is(is_ascii_hex_digit))
|
|
|
|
return {};
|
|
|
|
surrogate = (surrogate << 4u) | hex2int(lexer.consume());
|
|
|
|
}
|
|
|
|
return surrogate;
|
|
|
|
};
|
|
|
|
|
2020-03-14 12:39:05 +00:00
|
|
|
StringBuilder builder;
|
2020-10-28 09:29:11 +00:00
|
|
|
while (!lexer.is_eof()) {
|
|
|
|
// No escape, consume one char and continue
|
|
|
|
if (!lexer.next_is('\\')) {
|
|
|
|
builder.append(lexer.consume());
|
|
|
|
continue;
|
|
|
|
}
|
2020-05-17 06:27:25 +00:00
|
|
|
|
2020-10-28 09:29:11 +00:00
|
|
|
lexer.ignore();
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(!lexer.is_eof());
|
2020-05-17 06:27:25 +00:00
|
|
|
|
2020-10-28 09:29:11 +00:00
|
|
|
// Line continuation
|
|
|
|
if (lexer.next_is('\n') || lexer.next_is('\r')) {
|
|
|
|
lexer.ignore();
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Line continuation
|
|
|
|
if (lexer.next_is(LINE_SEPARATOR) || lexer.next_is(PARAGRAPH_SEPARATOR)) {
|
|
|
|
lexer.ignore(3);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Null-byte escape
|
2021-06-13 08:47:09 +00:00
|
|
|
if (lexer.next_is('0') && !is_ascii_digit(lexer.peek(1))) {
|
2020-10-28 09:29:11 +00:00
|
|
|
lexer.ignore();
|
|
|
|
builder.append('\0');
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Hex escape
|
|
|
|
if (lexer.next_is('x')) {
|
|
|
|
lexer.ignore();
|
2021-06-13 08:47:09 +00:00
|
|
|
if (!is_ascii_hex_digit(lexer.peek()) || !is_ascii_hex_digit(lexer.peek(1)))
|
2020-10-28 09:29:11 +00:00
|
|
|
return encoding_failure(StringValueStatus::MalformedHexEscape);
|
|
|
|
auto code_point = hex2int(lexer.consume()) * 16 + hex2int(lexer.consume());
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(code_point <= 255);
|
2020-10-28 09:29:11 +00:00
|
|
|
builder.append_code_point(code_point);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
// Unicode escape
|
|
|
|
if (lexer.next_is('u')) {
|
|
|
|
lexer.ignore();
|
|
|
|
u32 code_point = 0;
|
|
|
|
if (lexer.next_is('{')) {
|
|
|
|
lexer.ignore();
|
|
|
|
while (true) {
|
2021-06-13 08:47:09 +00:00
|
|
|
if (!lexer.next_is(is_ascii_hex_digit))
|
2020-05-17 06:27:25 +00:00
|
|
|
return encoding_failure(StringValueStatus::MalformedUnicodeEscape);
|
2020-10-28 09:29:11 +00:00
|
|
|
auto new_code_point = (code_point << 4u) | hex2int(lexer.consume());
|
|
|
|
if (new_code_point < code_point)
|
|
|
|
return encoding_failure(StringValueStatus::UnicodeEscapeOverflow);
|
|
|
|
code_point = new_code_point;
|
|
|
|
if (lexer.next_is('}'))
|
2020-10-25 18:36:10 +00:00
|
|
|
break;
|
2020-10-24 12:30:57 +00:00
|
|
|
}
|
2020-10-28 09:29:11 +00:00
|
|
|
lexer.ignore();
|
|
|
|
} else {
|
2021-08-02 14:06:42 +00:00
|
|
|
auto high_surrogate = decode_surrogate();
|
|
|
|
if (!high_surrogate.has_value())
|
|
|
|
return encoding_failure(StringValueStatus::MalformedUnicodeEscape);
|
|
|
|
|
|
|
|
if (Utf16View::is_high_surrogate(*high_surrogate) && lexer.consume_specific("\\u"sv)) {
|
|
|
|
auto low_surrogate = decode_surrogate();
|
|
|
|
if (!low_surrogate.has_value())
|
2020-10-28 09:29:11 +00:00
|
|
|
return encoding_failure(StringValueStatus::MalformedUnicodeEscape);
|
2021-08-02 14:06:42 +00:00
|
|
|
|
|
|
|
if (Utf16View::is_low_surrogate(*low_surrogate)) {
|
|
|
|
code_point = Utf16View::decode_surrogate_pair(*high_surrogate, *low_surrogate);
|
|
|
|
} else {
|
|
|
|
builder.append_code_point(*high_surrogate);
|
|
|
|
code_point = *low_surrogate;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
code_point = *high_surrogate;
|
2020-10-24 12:30:57 +00:00
|
|
|
}
|
2020-03-14 12:39:05 +00:00
|
|
|
}
|
2020-10-28 09:29:11 +00:00
|
|
|
builder.append_code_point(code_point);
|
|
|
|
continue;
|
2020-03-14 12:39:05 +00:00
|
|
|
}
|
2020-10-28 09:29:11 +00:00
|
|
|
|
|
|
|
// In non-strict mode LegacyOctalEscapeSequence is allowed in strings:
|
|
|
|
// https://tc39.es/ecma262/#sec-additional-syntax-string-literals
|
|
|
|
String octal_str;
|
|
|
|
|
|
|
|
auto is_octal_digit = [](char ch) { return ch >= '0' && ch <= '7'; };
|
|
|
|
auto is_zero_to_three = [](char ch) { return ch >= '0' && ch <= '3'; };
|
|
|
|
auto is_four_to_seven = [](char ch) { return ch >= '4' && ch <= '7'; };
|
|
|
|
|
|
|
|
// OctalDigit [lookahead ∉ OctalDigit]
|
|
|
|
if (is_octal_digit(lexer.peek()) && !is_octal_digit(lexer.peek(1)))
|
|
|
|
octal_str = lexer.consume(1);
|
|
|
|
// ZeroToThree OctalDigit [lookahead ∉ OctalDigit]
|
|
|
|
else if (is_zero_to_three(lexer.peek()) && is_octal_digit(lexer.peek(1)) && !is_octal_digit(lexer.peek(2)))
|
|
|
|
octal_str = lexer.consume(2);
|
|
|
|
// FourToSeven OctalDigit
|
|
|
|
else if (is_four_to_seven(lexer.peek()) && is_octal_digit(lexer.peek(1)))
|
|
|
|
octal_str = lexer.consume(2);
|
|
|
|
// ZeroToThree OctalDigit OctalDigit
|
|
|
|
else if (is_zero_to_three(lexer.peek()) && is_octal_digit(lexer.peek(1)) && is_octal_digit(lexer.peek(2)))
|
|
|
|
octal_str = lexer.consume(3);
|
|
|
|
|
|
|
|
if (!octal_str.is_null()) {
|
|
|
|
status = StringValueStatus::LegacyOctalEscapeSequence;
|
|
|
|
auto code_point = strtoul(octal_str.characters(), nullptr, 8);
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(code_point <= 255);
|
2020-10-28 09:29:11 +00:00
|
|
|
builder.append_code_point(code_point);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
lexer.retreat();
|
|
|
|
builder.append(lexer.consume_escaped_character('\\', "b\bf\fn\nr\rt\tv\v"));
|
2020-03-14 11:40:06 +00:00
|
|
|
}
|
2020-03-14 12:39:05 +00:00
|
|
|
return builder.to_string();
|
2020-03-11 18:27:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Token::bool_value() const
|
|
|
|
{
|
2021-02-23 19:42:32 +00:00
|
|
|
VERIFY(type() == TokenType::BoolLiteral);
|
2020-03-11 18:27:43 +00:00
|
|
|
return m_value == "true";
|
|
|
|
}
|
|
|
|
|
2020-04-18 18:31:27 +00:00
|
|
|
bool Token::is_identifier_name() const
|
|
|
|
{
|
|
|
|
// IdentifierNames are Identifiers + ReservedWords
|
|
|
|
// The standard defines this reversed: Identifiers are IdentifierNames except reserved words
|
2021-06-12 23:22:35 +00:00
|
|
|
// https://tc39.es/ecma262/#prod-Identifier
|
2020-04-18 18:31:27 +00:00
|
|
|
return m_type == TokenType::Identifier
|
|
|
|
|| m_type == TokenType::Await
|
|
|
|
|| m_type == TokenType::BoolLiteral
|
|
|
|
|| m_type == TokenType::Break
|
|
|
|
|| m_type == TokenType::Case
|
|
|
|
|| m_type == TokenType::Catch
|
|
|
|
|| m_type == TokenType::Class
|
|
|
|
|| m_type == TokenType::Const
|
|
|
|
|| m_type == TokenType::Continue
|
2021-07-16 18:06:05 +00:00
|
|
|
|| m_type == TokenType::Debugger
|
2020-04-18 18:31:27 +00:00
|
|
|
|| m_type == TokenType::Default
|
|
|
|
|| m_type == TokenType::Delete
|
|
|
|
|| m_type == TokenType::Do
|
|
|
|
|| m_type == TokenType::Else
|
2020-08-14 08:34:49 +00:00
|
|
|
|| m_type == TokenType::Enum
|
|
|
|
|| m_type == TokenType::Export
|
|
|
|
|| m_type == TokenType::Extends
|
2020-04-18 18:31:27 +00:00
|
|
|
|| m_type == TokenType::Finally
|
|
|
|
|| m_type == TokenType::For
|
|
|
|
|| m_type == TokenType::Function
|
|
|
|
|| m_type == TokenType::If
|
2020-08-14 08:34:49 +00:00
|
|
|
|| m_type == TokenType::Import
|
2020-04-18 18:31:27 +00:00
|
|
|
|| m_type == TokenType::In
|
|
|
|
|| m_type == TokenType::Instanceof
|
|
|
|
|| m_type == TokenType::Let
|
|
|
|
|| m_type == TokenType::New
|
|
|
|
|| m_type == TokenType::NullLiteral
|
|
|
|
|| m_type == TokenType::Return
|
2020-08-14 08:34:49 +00:00
|
|
|
|| m_type == TokenType::Super
|
2020-04-18 18:31:27 +00:00
|
|
|
|| m_type == TokenType::Switch
|
|
|
|
|| m_type == TokenType::This
|
|
|
|
|| m_type == TokenType::Throw
|
|
|
|
|| m_type == TokenType::Try
|
|
|
|
|| m_type == TokenType::Typeof
|
|
|
|
|| m_type == TokenType::Var
|
|
|
|
|| m_type == TokenType::Void
|
|
|
|
|| m_type == TokenType::While
|
2021-07-16 18:06:05 +00:00
|
|
|
|| m_type == TokenType::With
|
2020-04-18 18:31:27 +00:00
|
|
|
|| m_type == TokenType::Yield;
|
|
|
|
}
|
|
|
|
|
2020-10-21 21:16:45 +00:00
|
|
|
bool Token::trivia_contains_line_terminator() const
|
|
|
|
{
|
|
|
|
return m_trivia.contains('\n') || m_trivia.contains('\r') || m_trivia.contains(LINE_SEPARATOR) || m_trivia.contains(PARAGRAPH_SEPARATOR);
|
|
|
|
}
|
|
|
|
|
2020-03-11 18:27:43 +00:00
|
|
|
}
|