2020-03-11 18:27:43 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2020, Stephan Unverwerth <s.unverwerth@gmx.de>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "Token.h"
|
2020-03-12 22:02:41 +00:00
|
|
|
#include <AK/Assertions.h>
|
2020-03-14 12:39:05 +00:00
|
|
|
#include <AK/StringBuilder.h>
|
2020-05-17 06:27:25 +00:00
|
|
|
#include <AK/Utf32View.h>
|
2020-04-05 12:20:58 +00:00
|
|
|
#include <ctype.h>
|
2020-03-11 18:27:43 +00:00
|
|
|
|
|
|
|
namespace JS {
|
|
|
|
|
|
|
|
const char* Token::name(TokenType type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
LibJS: Unify syntax highlighting
So far we have three different syntax highlighters for LibJS:
- js's Line::Editor stylization
- JS::MarkupGenerator
- GUI::JSSyntaxHighlighter
This not only caused repetition of most token types in each highlighter
but also a lot of inconsistency regarding the styling of certain tokens:
- JSSyntaxHighlighter was considering TokenType::Period to be an
operator whereas MarkupGenerator categorized it as punctuation.
- MarkupGenerator was considering TokenType::{Break,Case,Continue,
Default,Switch,With} control keywords whereas JSSyntaxHighlighter just
disregarded them
- MarkupGenerator considered some future reserved keywords invalid and
others not. JSSyntaxHighlighter and js disregarded most
Adding a new token type meant adding it to ENUMERATE_JS_TOKENS as well
as each individual highlighter's switch/case construct.
I added a TokenCategory enum, and each TokenType is now associated to a
certain category, which the syntax highlighters then can use for styling
rather than operating on the token type directly. This also makes
changing a token's category everywhere easier, should we need to do that
(e.g. I decided to make TokenType::{Period,QuestionMarkPeriod}
TokenCategory::Operator for now, but we might want to change them to
Punctuation.
2020-10-04 21:28:59 +00:00
|
|
|
#define __ENUMERATE_JS_TOKEN(type, category) \
|
|
|
|
case TokenType::type: \
|
|
|
|
return #type;
|
2020-03-30 11:11:07 +00:00
|
|
|
ENUMERATE_JS_TOKENS
|
|
|
|
#undef __ENUMERATE_JS_TOKEN
|
2020-03-11 18:27:43 +00:00
|
|
|
default:
|
2020-03-12 22:02:41 +00:00
|
|
|
ASSERT_NOT_REACHED();
|
2020-03-11 18:27:43 +00:00
|
|
|
return "<Unknown>";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
const char* Token::name() const
|
|
|
|
{
|
|
|
|
return name(m_type);
|
|
|
|
}
|
|
|
|
|
LibJS: Unify syntax highlighting
So far we have three different syntax highlighters for LibJS:
- js's Line::Editor stylization
- JS::MarkupGenerator
- GUI::JSSyntaxHighlighter
This not only caused repetition of most token types in each highlighter
but also a lot of inconsistency regarding the styling of certain tokens:
- JSSyntaxHighlighter was considering TokenType::Period to be an
operator whereas MarkupGenerator categorized it as punctuation.
- MarkupGenerator was considering TokenType::{Break,Case,Continue,
Default,Switch,With} control keywords whereas JSSyntaxHighlighter just
disregarded them
- MarkupGenerator considered some future reserved keywords invalid and
others not. JSSyntaxHighlighter and js disregarded most
Adding a new token type meant adding it to ENUMERATE_JS_TOKENS as well
as each individual highlighter's switch/case construct.
I added a TokenCategory enum, and each TokenType is now associated to a
certain category, which the syntax highlighters then can use for styling
rather than operating on the token type directly. This also makes
changing a token's category everywhere easier, should we need to do that
(e.g. I decided to make TokenType::{Period,QuestionMarkPeriod}
TokenCategory::Operator for now, but we might want to change them to
Punctuation.
2020-10-04 21:28:59 +00:00
|
|
|
TokenCategory Token::category(TokenType type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
#define __ENUMERATE_JS_TOKEN(type, category) \
|
|
|
|
case TokenType::type: \
|
|
|
|
return TokenCategory::category;
|
|
|
|
ENUMERATE_JS_TOKENS
|
|
|
|
#undef __ENUMERATE_JS_TOKEN
|
|
|
|
default:
|
|
|
|
ASSERT_NOT_REACHED();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TokenCategory Token::category() const
|
|
|
|
{
|
|
|
|
return category(m_type);
|
|
|
|
}
|
|
|
|
|
2020-03-11 18:27:43 +00:00
|
|
|
double Token::double_value() const
|
|
|
|
{
|
2020-03-14 12:39:05 +00:00
|
|
|
ASSERT(type() == TokenType::NumericLiteral);
|
2020-04-05 12:20:58 +00:00
|
|
|
String value_string(m_value);
|
|
|
|
if (value_string[0] == '0' && value_string.length() >= 2) {
|
|
|
|
if (value_string[1] == 'x' || value_string[1] == 'X') {
|
|
|
|
// hexadecimal
|
|
|
|
return static_cast<double>(strtoul(value_string.characters() + 2, nullptr, 16));
|
|
|
|
} else if (value_string[1] == 'o' || value_string[1] == 'O') {
|
|
|
|
// octal
|
|
|
|
return static_cast<double>(strtoul(value_string.characters() + 2, nullptr, 8));
|
|
|
|
} else if (value_string[1] == 'b' || value_string[1] == 'B') {
|
|
|
|
// binary
|
|
|
|
return static_cast<double>(strtoul(value_string.characters() + 2, nullptr, 2));
|
|
|
|
} else if (isdigit(value_string[1])) {
|
|
|
|
// also octal, but syntax error in strict mode
|
|
|
|
return static_cast<double>(strtoul(value_string.characters() + 1, nullptr, 8));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return strtod(value_string.characters(), nullptr);
|
2020-03-11 18:27:43 +00:00
|
|
|
}
|
|
|
|
|
2020-05-17 06:27:25 +00:00
|
|
|
static u32 hex2int(char x)
|
|
|
|
{
|
|
|
|
ASSERT(isxdigit(x));
|
|
|
|
if (x >= '0' && x <= '9')
|
|
|
|
return x - '0';
|
|
|
|
return 10u + (tolower(x) - 'a');
|
|
|
|
}
|
|
|
|
|
|
|
|
String Token::string_value(StringValueStatus& status) const
|
2020-03-11 18:27:43 +00:00
|
|
|
{
|
LibJS: Add template literals
Adds fully functioning template literals. Because template literals
contain expressions, most of the work has to be done in the Lexer rather
than the Parser. And because of the complexity of template literals
(expressions, nesting, escapes, etc), the Lexer needs to have some
template-related state.
When entering a new template literal, a TemplateLiteralStart token is
emitted. When inside a literal, all text will be parsed up until a '${'
or '`' (or EOF, but that's a syntax error) is seen, and then a
TemplateLiteralExprStart token is emitted. At this point, the Lexer
proceeds as normal, however it keeps track of the number of opening
and closing curly braces it has seen in order to determine the close
of the expression. Once it finds a matching curly brace for the '${',
a TemplateLiteralExprEnd token is emitted and the state is updated
accordingly.
When the Lexer is inside of a template literal, but not an expression,
and sees a '`', this must be the closing grave: a TemplateLiteralEnd
token is emitted.
The state required to correctly parse template strings consists of a
vector (for nesting) of two pieces of information: whether or not we
are in a template expression (as opposed to a template string); and
the count of the number of unmatched open curly braces we have seen
(only applicable if the Lexer is currently in a template expression).
TODO: Add support for template literal newlines in the JS REPL (this will
cause a syntax error currently):
> `foo
> bar`
'foo
bar'
2020-05-03 22:41:14 +00:00
|
|
|
ASSERT(type() == TokenType::StringLiteral || type() == TokenType::TemplateLiteralString);
|
|
|
|
auto is_template = type() == TokenType::TemplateLiteralString;
|
|
|
|
|
|
|
|
auto offset = type() == TokenType::TemplateLiteralString ? 0 : 1;
|
|
|
|
|
2020-05-17 06:27:25 +00:00
|
|
|
auto encoding_failure = [&status](StringValueStatus parse_status) -> String {
|
|
|
|
status = parse_status;
|
|
|
|
return {};
|
|
|
|
};
|
|
|
|
|
2020-03-14 12:39:05 +00:00
|
|
|
StringBuilder builder;
|
LibJS: Add template literals
Adds fully functioning template literals. Because template literals
contain expressions, most of the work has to be done in the Lexer rather
than the Parser. And because of the complexity of template literals
(expressions, nesting, escapes, etc), the Lexer needs to have some
template-related state.
When entering a new template literal, a TemplateLiteralStart token is
emitted. When inside a literal, all text will be parsed up until a '${'
or '`' (or EOF, but that's a syntax error) is seen, and then a
TemplateLiteralExprStart token is emitted. At this point, the Lexer
proceeds as normal, however it keeps track of the number of opening
and closing curly braces it has seen in order to determine the close
of the expression. Once it finds a matching curly brace for the '${',
a TemplateLiteralExprEnd token is emitted and the state is updated
accordingly.
When the Lexer is inside of a template literal, but not an expression,
and sees a '`', this must be the closing grave: a TemplateLiteralEnd
token is emitted.
The state required to correctly parse template strings consists of a
vector (for nesting) of two pieces of information: whether or not we
are in a template expression (as opposed to a template string); and
the count of the number of unmatched open curly braces we have seen
(only applicable if the Lexer is currently in a template expression).
TODO: Add support for template literal newlines in the JS REPL (this will
cause a syntax error currently):
> `foo
> bar`
'foo
bar'
2020-05-03 22:41:14 +00:00
|
|
|
for (size_t i = offset; i < m_value.length() - offset; ++i) {
|
|
|
|
if (m_value[i] == '\\' && i + 1 < m_value.length() - offset) {
|
2020-03-14 12:39:05 +00:00
|
|
|
i++;
|
|
|
|
switch (m_value[i]) {
|
|
|
|
case 'b':
|
|
|
|
builder.append('\b');
|
|
|
|
break;
|
|
|
|
case 'f':
|
|
|
|
builder.append('\f');
|
|
|
|
break;
|
|
|
|
case 'n':
|
|
|
|
builder.append('\n');
|
|
|
|
break;
|
|
|
|
case 'r':
|
|
|
|
builder.append('\r');
|
|
|
|
break;
|
|
|
|
case 't':
|
|
|
|
builder.append('\t');
|
|
|
|
break;
|
|
|
|
case 'v':
|
|
|
|
builder.append('\v');
|
|
|
|
break;
|
|
|
|
case '0':
|
|
|
|
builder.append((char)0);
|
|
|
|
break;
|
|
|
|
case '\'':
|
|
|
|
builder.append('\'');
|
|
|
|
break;
|
|
|
|
case '"':
|
|
|
|
builder.append('"');
|
|
|
|
break;
|
|
|
|
case '\\':
|
|
|
|
builder.append('\\');
|
|
|
|
break;
|
2020-05-17 06:27:25 +00:00
|
|
|
case 'x': {
|
|
|
|
if (i + 2 >= m_value.length() - offset)
|
|
|
|
return encoding_failure(StringValueStatus::MalformedHexEscape);
|
|
|
|
|
|
|
|
auto digit1 = m_value[++i];
|
|
|
|
auto digit2 = m_value[++i];
|
|
|
|
if (!isxdigit(digit1) || !isxdigit(digit2))
|
|
|
|
return encoding_failure(StringValueStatus::MalformedHexEscape);
|
2020-08-05 20:31:20 +00:00
|
|
|
builder.append_code_point(hex2int(digit1) * 16 + hex2int(digit2));
|
2020-05-17 06:27:25 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case 'u': {
|
|
|
|
if (i + 1 >= m_value.length() - offset)
|
|
|
|
return encoding_failure(StringValueStatus::MalformedUnicodeEscape);
|
|
|
|
u32 code_point = m_value[++i];
|
|
|
|
|
|
|
|
if (code_point == '{') {
|
|
|
|
code_point = 0;
|
2020-06-01 13:56:39 +00:00
|
|
|
while (true) {
|
2020-05-17 06:27:25 +00:00
|
|
|
if (i + 1 >= m_value.length() - offset)
|
|
|
|
return encoding_failure(StringValueStatus::MalformedUnicodeEscape);
|
|
|
|
|
|
|
|
auto ch = m_value[++i];
|
2020-06-01 13:56:39 +00:00
|
|
|
if (ch == '}')
|
|
|
|
break;
|
2020-05-17 06:27:25 +00:00
|
|
|
if (!isxdigit(ch))
|
|
|
|
return encoding_failure(StringValueStatus::MalformedUnicodeEscape);
|
|
|
|
|
|
|
|
auto new_code_point = (code_point << 4u) | hex2int(ch);
|
|
|
|
if (new_code_point < code_point)
|
|
|
|
return encoding_failure(StringValueStatus::UnicodeEscapeOverflow);
|
|
|
|
code_point = new_code_point;
|
2020-06-01 13:56:39 +00:00
|
|
|
}
|
2020-05-17 06:27:25 +00:00
|
|
|
} else {
|
|
|
|
if (i + 3 >= m_value.length() - offset || !isxdigit(code_point))
|
|
|
|
return encoding_failure(StringValueStatus::MalformedUnicodeEscape);
|
|
|
|
|
|
|
|
code_point = hex2int(code_point);
|
|
|
|
for (int j = 0; j < 3; ++j) {
|
|
|
|
auto ch = m_value[++i];
|
|
|
|
if (!isxdigit(ch))
|
|
|
|
return encoding_failure(StringValueStatus::MalformedUnicodeEscape);
|
|
|
|
code_point = (code_point << 4u) | hex2int(ch);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-05 20:31:20 +00:00
|
|
|
builder.append_code_point(code_point);
|
2020-05-17 06:27:25 +00:00
|
|
|
break;
|
|
|
|
}
|
2020-03-14 12:39:05 +00:00
|
|
|
default:
|
LibJS: Add template literals
Adds fully functioning template literals. Because template literals
contain expressions, most of the work has to be done in the Lexer rather
than the Parser. And because of the complexity of template literals
(expressions, nesting, escapes, etc), the Lexer needs to have some
template-related state.
When entering a new template literal, a TemplateLiteralStart token is
emitted. When inside a literal, all text will be parsed up until a '${'
or '`' (or EOF, but that's a syntax error) is seen, and then a
TemplateLiteralExprStart token is emitted. At this point, the Lexer
proceeds as normal, however it keeps track of the number of opening
and closing curly braces it has seen in order to determine the close
of the expression. Once it finds a matching curly brace for the '${',
a TemplateLiteralExprEnd token is emitted and the state is updated
accordingly.
When the Lexer is inside of a template literal, but not an expression,
and sees a '`', this must be the closing grave: a TemplateLiteralEnd
token is emitted.
The state required to correctly parse template strings consists of a
vector (for nesting) of two pieces of information: whether or not we
are in a template expression (as opposed to a template string); and
the count of the number of unmatched open curly braces we have seen
(only applicable if the Lexer is currently in a template expression).
TODO: Add support for template literal newlines in the JS REPL (this will
cause a syntax error currently):
> `foo
> bar`
'foo
bar'
2020-05-03 22:41:14 +00:00
|
|
|
if (is_template && (m_value[i] == '$' || m_value[i] == '`')) {
|
|
|
|
builder.append(m_value[i]);
|
2020-05-17 06:27:25 +00:00
|
|
|
break;
|
LibJS: Add template literals
Adds fully functioning template literals. Because template literals
contain expressions, most of the work has to be done in the Lexer rather
than the Parser. And because of the complexity of template literals
(expressions, nesting, escapes, etc), the Lexer needs to have some
template-related state.
When entering a new template literal, a TemplateLiteralStart token is
emitted. When inside a literal, all text will be parsed up until a '${'
or '`' (or EOF, but that's a syntax error) is seen, and then a
TemplateLiteralExprStart token is emitted. At this point, the Lexer
proceeds as normal, however it keeps track of the number of opening
and closing curly braces it has seen in order to determine the close
of the expression. Once it finds a matching curly brace for the '${',
a TemplateLiteralExprEnd token is emitted and the state is updated
accordingly.
When the Lexer is inside of a template literal, but not an expression,
and sees a '`', this must be the closing grave: a TemplateLiteralEnd
token is emitted.
The state required to correctly parse template strings consists of a
vector (for nesting) of two pieces of information: whether or not we
are in a template expression (as opposed to a template string); and
the count of the number of unmatched open curly braces we have seen
(only applicable if the Lexer is currently in a template expression).
TODO: Add support for template literal newlines in the JS REPL (this will
cause a syntax error currently):
> `foo
> bar`
'foo
bar'
2020-05-03 22:41:14 +00:00
|
|
|
}
|
2020-05-17 06:27:25 +00:00
|
|
|
|
|
|
|
// FIXME: Also parse octal. Should anything else generate a syntax error?
|
|
|
|
builder.append(m_value[i]);
|
2020-03-14 12:39:05 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
builder.append(m_value[i]);
|
|
|
|
}
|
2020-03-14 11:40:06 +00:00
|
|
|
}
|
2020-03-14 12:39:05 +00:00
|
|
|
return builder.to_string();
|
2020-03-11 18:27:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Token::bool_value() const
|
|
|
|
{
|
2020-03-14 12:39:05 +00:00
|
|
|
ASSERT(type() == TokenType::BoolLiteral);
|
2020-03-11 18:27:43 +00:00
|
|
|
return m_value == "true";
|
|
|
|
}
|
|
|
|
|
2020-04-18 18:31:27 +00:00
|
|
|
bool Token::is_identifier_name() const
|
|
|
|
{
|
|
|
|
// IdentifierNames are Identifiers + ReservedWords
|
|
|
|
// The standard defines this reversed: Identifiers are IdentifierNames except reserved words
|
|
|
|
// https://www.ecma-international.org/ecma-262/5.1/#sec-7.6
|
|
|
|
return m_type == TokenType::Identifier
|
|
|
|
|| m_type == TokenType::Await
|
|
|
|
|| m_type == TokenType::BoolLiteral
|
|
|
|
|| m_type == TokenType::Break
|
|
|
|
|| m_type == TokenType::Case
|
|
|
|
|| m_type == TokenType::Catch
|
|
|
|
|| m_type == TokenType::Class
|
|
|
|
|| m_type == TokenType::Const
|
|
|
|
|| m_type == TokenType::Continue
|
|
|
|
|| m_type == TokenType::Default
|
|
|
|
|| m_type == TokenType::Delete
|
|
|
|
|| m_type == TokenType::Do
|
|
|
|
|| m_type == TokenType::Else
|
2020-08-14 08:34:49 +00:00
|
|
|
|| m_type == TokenType::Enum
|
|
|
|
|| m_type == TokenType::Export
|
|
|
|
|| m_type == TokenType::Extends
|
2020-04-18 18:31:27 +00:00
|
|
|
|| m_type == TokenType::Finally
|
|
|
|
|| m_type == TokenType::For
|
|
|
|
|| m_type == TokenType::Function
|
|
|
|
|| m_type == TokenType::If
|
2020-08-14 08:34:49 +00:00
|
|
|
|| m_type == TokenType::Import
|
2020-04-18 18:31:27 +00:00
|
|
|
|| m_type == TokenType::In
|
|
|
|
|| m_type == TokenType::Instanceof
|
|
|
|
|| m_type == TokenType::Interface
|
|
|
|
|| m_type == TokenType::Let
|
|
|
|
|| m_type == TokenType::New
|
|
|
|
|| m_type == TokenType::NullLiteral
|
|
|
|
|| m_type == TokenType::Return
|
2020-08-14 08:34:49 +00:00
|
|
|
|| m_type == TokenType::Super
|
2020-04-18 18:31:27 +00:00
|
|
|
|| m_type == TokenType::Switch
|
|
|
|
|| m_type == TokenType::This
|
|
|
|
|| m_type == TokenType::Throw
|
|
|
|
|| m_type == TokenType::Try
|
|
|
|
|| m_type == TokenType::Typeof
|
|
|
|
|| m_type == TokenType::Var
|
|
|
|
|| m_type == TokenType::Void
|
|
|
|
|| m_type == TokenType::While
|
|
|
|
|| m_type == TokenType::Yield;
|
|
|
|
}
|
|
|
|
|
2020-10-21 21:16:45 +00:00
|
|
|
bool Token::trivia_contains_line_terminator() const
|
|
|
|
{
|
|
|
|
return m_trivia.contains('\n') || m_trivia.contains('\r') || m_trivia.contains(LINE_SEPARATOR) || m_trivia.contains(PARAGRAPH_SEPARATOR);
|
|
|
|
}
|
|
|
|
|
2020-03-11 18:27:43 +00:00
|
|
|
}
|