mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-12-03 21:10:30 +00:00
Everywhere: Fix a bunch of typos
This commit is contained in:
parent
cebd3f740b
commit
2b0c361d04
Notes:
sideshowbarker
2024-07-18 19:29:14 +09:00
30 changed files with 42 additions and 42 deletions
2
.github/workflows/discord.yml
vendored
2
.github/workflows/discord.yml
vendored
|
@ -22,7 +22,7 @@ jobs:
|
|||
- name: Discord action notification
|
||||
env:
|
||||
DISCORD_WEBHOOK: ${{ secrets.DISCORD_WEBHOOK }}
|
||||
CUSTOM_GITHUB_EVENT_NAME: ${{ github.event_name == 'pull_request_target' && 'pull_request' || github.event_name }} # fake the event type as discord doesnt know how to parse the special pull_request_target context
|
||||
CUSTOM_GITHUB_EVENT_NAME: ${{ github.event_name == 'pull_request_target' && 'pull_request' || github.event_name }} # fake the event type as discord doesn't know how to parse the special pull_request_target context
|
||||
uses: IdanHo/action-discord@754598254f288e6d8e9fca637832e3c163515ba8
|
||||
if: ${{ (github.event['pull_request'] && github.event['action'] == 'opened' && !(github.event['pull_request'] == 'draft')) || github.event['commits'] }}
|
||||
|
||||
|
|
|
@ -233,7 +233,7 @@ protected:
|
|||
m_minimum = successor(node);
|
||||
|
||||
// removal assumes the node has 0 or 1 child, so if we have 2, relink with the successor first (by definition the successor has no left child)
|
||||
// FIXME: since we dont know how a value is represented in the node, we cant simply swap the values and keys, and instead we relink the nodes
|
||||
// FIXME: since we dont know how a value is represented in the node, we can't simply swap the values and keys, and instead we relink the nodes
|
||||
// in place, this is quite a bit more expensive, as well as much less readable, is there a better way?
|
||||
if (node->left_child && node->right_child) {
|
||||
auto* successor_node = successor(node); // this is always non-null as all nodes besides the maximum node have a successor, and the maximum node has no right child
|
||||
|
@ -352,7 +352,7 @@ protected:
|
|||
}
|
||||
parent = node->parent;
|
||||
}
|
||||
node->color = Color::Black; // by this point node cant be null
|
||||
node->color = Color::Black; // by this point node can't be null
|
||||
}
|
||||
|
||||
static Node* successor(Node* node)
|
||||
|
|
|
@ -80,7 +80,7 @@ Removes the prefix _prefix_ (if present) from the given _string_.
|
|||
Concatenates all the given expressions as lists, and evaluates to a list.
|
||||
|
||||
- ${regex\_replace _pattern_ _replacement-template_ _string_}
|
||||
Replaces all occurences of the regular expression _pattern_ in the given _string_, using the given _replacement-template_.
|
||||
Replaces all occurrences of the regular expression _pattern_ in the given _string_, using the given _replacement-template_.
|
||||
Capture groups in _pattern_ can be referred to as `\<group_number>` in the _replacement template_, for example, to reference capture group 1, use `\1`.
|
||||
|
||||
##### Evaluate expression
|
||||
|
|
|
@ -7,7 +7,7 @@ tests run on the Serenity machine, either emulated or bare metal.
|
|||
### Running Host Tests
|
||||
|
||||
There are two ways to build host tests: from a full build, or from a Lagom-only build. The only difference is the CMake
|
||||
command used to initailize the build directory.
|
||||
command used to initialize the build directory.
|
||||
|
||||
For a full build, pass `-DBUILD_LAGOM=ON` to the CMake command.
|
||||
|
||||
|
@ -73,7 +73,7 @@ BootModes=self-test
|
|||
```
|
||||
|
||||
`/dev/ttyS0` is used as stdio because that serial port is connected when qemu is run with `-display none` and
|
||||
`-nographic`, and output to it will show up in the stdout of the qemu window. Seperately, the CI run script redirects
|
||||
`-nographic`, and output to it will show up in the stdout of the qemu window. Separately, the CI run script redirects
|
||||
the serial debug output to `./debug.log` so that both stdout of the tests and the dbgln from the kernel/tests can be
|
||||
captured.
|
||||
|
||||
|
|
|
@ -498,8 +498,8 @@ void unregister_generic_interrupt_handler(u8 interrupt_number, GenericInterruptH
|
|||
UNMAP_AFTER_INIT void register_interrupt_handler(u8 index, void (*handler)())
|
||||
{
|
||||
// FIXME: Why is that with selector 8?
|
||||
// FIXME: Is the Gate Type really required to be an Interupt
|
||||
// FIXME: Whats up with that storage segment 0?
|
||||
// FIXME: Is the Gate Type really required to be an Interrupt
|
||||
// FIXME: What's up with that storage segment 0?
|
||||
s_idt[index] = IDTEntry((FlatPtr)handler, 8, IDTEntryType::InterruptGate32, 0, 0);
|
||||
}
|
||||
|
||||
|
@ -507,7 +507,7 @@ UNMAP_AFTER_INIT void register_user_callable_interrupt_handler(u8 index, void (*
|
|||
{
|
||||
// FIXME: Why is that with selector 8?
|
||||
// FIXME: Is the Gate Type really required to be a Trap
|
||||
// FIXME: Whats up with that storage segment 0?
|
||||
// FIXME: What's up with that storage segment 0?
|
||||
s_idt[index] = IDTEntry((FlatPtr)handler, 8, IDTEntryType::TrapGate32, 0, 3);
|
||||
}
|
||||
|
||||
|
|
|
@ -222,7 +222,7 @@ start:
|
|||
movl %cr0, %eax
|
||||
orl $0x80000000, %eax
|
||||
movl %eax, %cr0
|
||||
/* Now we are in 32-bit compatablity mode, We still need to load a 64-bit GDT */
|
||||
/* Now we are in 32-bit compatibility mode, We still need to load a 64-bit GDT */
|
||||
|
||||
/* set up stack */
|
||||
mov $stack_top, %esp
|
||||
|
|
|
@ -516,7 +516,7 @@ bool AHCIPort::spin_until_ready() const
|
|||
spin++;
|
||||
}
|
||||
if (spin == 100) {
|
||||
dbgln_if(AHCI_DEBUG, "AHCI Port {}: SPIN exceeded 100 miliseconds threshold", representative_port_index());
|
||||
dbgln_if(AHCI_DEBUG, "AHCI Port {}: SPIN exceeded 100 milliseconds threshold", representative_port_index());
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -122,7 +122,7 @@ struct [[gnu::packed]] ATAIdentifyBlock {
|
|||
|
||||
u16 maximum_logical_sectors_per_drq;
|
||||
u16 trusted_computing_features;
|
||||
u16 capabilites[2];
|
||||
u16 capabilities[2];
|
||||
u16 obsolete5[2];
|
||||
u16 validity_flags;
|
||||
u16 obsolete6[5];
|
||||
|
|
|
@ -357,7 +357,7 @@ UNMAP_AFTER_INIT void IDEChannel::detect_disks()
|
|||
|
||||
volatile ATAIdentifyBlock& identify_block = (volatile ATAIdentifyBlock&)(*wbuf.data());
|
||||
|
||||
u16 capabilities = identify_block.capabilites[0];
|
||||
u16 capabilities = identify_block.capabilities[0];
|
||||
|
||||
// If the drive is so old that it doesn't support LBA, ignore it.
|
||||
if (!(capabilities & ATA_CAP_LBA))
|
||||
|
|
|
@ -506,7 +506,7 @@ KResultOr<int> Process::sys$munmap(Userspace<void*> addr, size_t size)
|
|||
// slow: without caching
|
||||
const auto& regions = space().find_regions_intersecting(range_to_unmap);
|
||||
|
||||
// check if any of the regions is not mmaped, to not accientally
|
||||
// check if any of the regions is not mmapped, to not accientally
|
||||
// error-out with just half a region map left
|
||||
for (auto* region : regions) {
|
||||
if (!region->is_mmap())
|
||||
|
|
|
@ -132,7 +132,7 @@ void __ubsan_handle_shift_out_of_bounds(const ShiftOutOfBoundsData& data, ValueH
|
|||
void __ubsan_handle_divrem_overflow(const OverflowData&, ValueHandle lhs, ValueHandle rhs);
|
||||
void __ubsan_handle_divrem_overflow(const OverflowData& data, ValueHandle, ValueHandle)
|
||||
{
|
||||
dbgln("KUBSAN: divrem overlow, {} ({}-bit)", data.type.name(), data.type.bit_width());
|
||||
dbgln("KUBSAN: divrem overflow, {} ({}-bit)", data.type.name(), data.type.bit_width());
|
||||
print_location(data.location);
|
||||
}
|
||||
|
||||
|
@ -189,7 +189,7 @@ void __ubsan_handle_alignment_assumption(const AlignmentAssumptionData& data, Va
|
|||
"of type {} failed",
|
||||
alignment, pointer, data.type.name());
|
||||
}
|
||||
// dbgln("KUBSAN: Assumption of pointer allignment failed");
|
||||
// dbgln("KUBSAN: Assumption of pointer alignment failed");
|
||||
print_location(data.location);
|
||||
}
|
||||
|
||||
|
|
|
@ -345,7 +345,7 @@ void VirtIODevice::finish_init()
|
|||
{
|
||||
VERIFY(m_did_accept_features); // ensure features were negotiated
|
||||
VERIFY(m_did_setup_queues); // ensure queues were set-up
|
||||
VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didnt already finish the initialization
|
||||
VERIFY(!(m_status & DEVICE_STATUS_DRIVER_OK)); // ensure we didn't already finish the initialization
|
||||
|
||||
set_status_bit(DEVICE_STATUS_DRIVER_OK);
|
||||
dbgln_if(VIRTIO_DEBUG, "{}: Finished initialization", m_class_name);
|
||||
|
|
|
@ -82,7 +82,7 @@ bool VirtIOQueue::supply_buffer(Badge<VirtIODevice>, const ScatterGatherList& sc
|
|||
last_index = descriptor_index;
|
||||
descriptor_index = m_descriptors[descriptor_index].next; // ensure we place the buffer in chain order
|
||||
});
|
||||
m_descriptors[last_index].flags &= ~(VIRTQ_DESC_F_NEXT); // last descriptor in chain doesnt have a next descriptor
|
||||
m_descriptors[last_index].flags &= ~(VIRTQ_DESC_F_NEXT); // last descriptor in chain doesn't have a next descriptor
|
||||
|
||||
m_driver->rings[m_driver_index_shadow % m_queue_size] = m_free_head; // m_driver_index_shadow is used to prevent accesses to index before the rings are updated
|
||||
m_tokens[m_free_head] = token;
|
||||
|
@ -139,7 +139,7 @@ void VirtIOQueue::pop_buffer(u16 descriptor_index)
|
|||
m_free_buffers++;
|
||||
i = m_descriptors[i].next;
|
||||
}
|
||||
m_free_buffers++; // the last descriptor in the chain doesnt have the NEXT flag
|
||||
m_free_buffers++; // the last descriptor in the chain doesn't have the NEXT flag
|
||||
|
||||
m_descriptors[i].next = m_free_head; // empend the popped descriptors to the free chain
|
||||
m_free_head = descriptor_index;
|
||||
|
|
|
@ -333,7 +333,7 @@ int main(int argc, char* argv[])
|
|||
}
|
||||
} else {
|
||||
GUI::MessageBox::show(window,
|
||||
String::formatted("Successfuly deleted \"{}\".", selected_node_path),
|
||||
String::formatted("Successfully deleted \"{}\".", selected_node_path),
|
||||
"Deletion completed",
|
||||
GUI::MessageBox::Type::Information,
|
||||
GUI::MessageBox::InputType::OK);
|
||||
|
|
|
@ -233,7 +233,7 @@ void NewProjectDialog::do_create_project()
|
|||
|
||||
auto creation_result = project_template->create_project(maybe_project_name.value(), maybe_project_full_path.value());
|
||||
if (!creation_result.is_error()) {
|
||||
// Succesfully created, attempt to open the new project
|
||||
// Successfully created, attempt to open the new project
|
||||
m_created_project_path = maybe_project_full_path.value();
|
||||
done(ExecResult::ExecOK);
|
||||
} else {
|
||||
|
|
|
@ -42,7 +42,7 @@ void AutoCompleteEngine::set_declarations_of_document(const String& filename, Ve
|
|||
{
|
||||
VERIFY(set_declarations_of_document_callback);
|
||||
|
||||
// Optimization - Only notify callback if declerations have changed
|
||||
// Optimization - Only notify callback if declarations have changed
|
||||
if (auto previous_declarations = m_all_declarations.get(filename); previous_declarations.has_value()) {
|
||||
if (previous_declarations.value() == declarations)
|
||||
return;
|
||||
|
|
|
@ -78,7 +78,7 @@ void GMLAutocompleteProvider::provide_completions(Function<void(Vector<Entry>)>
|
|||
break;
|
||||
case InClassName:
|
||||
if (token.m_type != GUI::GMLToken::Type::LeftCurly) {
|
||||
// Close empty class and imediately handle our parent's next child
|
||||
// Close empty class and immediately handle our parent's next child
|
||||
class_names.take_last();
|
||||
state = previous_states.take_last();
|
||||
|
||||
|
|
|
@ -952,12 +952,12 @@ u32 Emulator::virt$unveil(u32)
|
|||
u32 Emulator::virt$mprotect(FlatPtr base, size_t size, int prot)
|
||||
{
|
||||
round_to_page_size(base, size);
|
||||
bool has_non_mmaped_region = false;
|
||||
bool has_non_mmapped_region = false;
|
||||
|
||||
mmu().for_regions_in({ 0x23, base }, size, [&](Region* region) {
|
||||
if (region) {
|
||||
if (!is<MmapRegion>(*region)) {
|
||||
has_non_mmaped_region = true;
|
||||
has_non_mmapped_region = true;
|
||||
return IterationDecision::Break;
|
||||
}
|
||||
auto& mmap_region = *(MmapRegion*)region;
|
||||
|
@ -965,7 +965,7 @@ u32 Emulator::virt$mprotect(FlatPtr base, size_t size, int prot)
|
|||
}
|
||||
return IterationDecision::Continue;
|
||||
});
|
||||
if (has_non_mmaped_region)
|
||||
if (has_non_mmapped_region)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -85,8 +85,8 @@ public:
|
|||
void set_timestamp(time_t timestamp) { VERIFY(String::formatted("{:o}", timestamp).copy_characters_to_buffer(m_timestamp, sizeof(m_timestamp))); }
|
||||
void set_type_flag(TarFileType type) { m_type_flag = static_cast<char>(type); }
|
||||
void set_link_name(const String& link_name) { VERIFY(link_name.copy_characters_to_buffer(m_link_name, sizeof(m_link_name))); }
|
||||
void set_magic(const char* magic) { memcpy(m_magic, magic, sizeof(m_magic)); } // magic doesnt necessarily include a null byte
|
||||
void set_version(const char* version) { memcpy(m_version, version, sizeof(m_version)); } // version doesnt necessarily include a null byte
|
||||
void set_magic(const char* magic) { memcpy(m_magic, magic, sizeof(m_magic)); } // magic doesn't necessarily include a null byte
|
||||
void set_version(const char* version) { memcpy(m_version, version, sizeof(m_version)); } // version doesn't necessarily include a null byte
|
||||
void set_owner_name(const String& owner_name) { VERIFY(owner_name.copy_characters_to_buffer(m_owner_name, sizeof(m_owner_name))); }
|
||||
void set_group_name(const String& group_name) { VERIFY(group_name.copy_characters_to_buffer(m_group_name, sizeof(m_group_name))); }
|
||||
void set_major(int major) { VERIFY(String::formatted("{:o}", major).copy_characters_to_buffer(m_major, sizeof(m_major))); }
|
||||
|
|
|
@ -131,7 +131,7 @@ bool TarInputStream::valid() const
|
|||
|| (header_magic == posix1_tar_magic && header_version == posix1_tar_version)))
|
||||
return false;
|
||||
|
||||
// POSIX.1-1988 tar does not have magic numbers, so we also neet to verify the header checksum.
|
||||
// POSIX.1-1988 tar does not have magic numbers, so we also need to verify the header checksum.
|
||||
return header().checksum() == header().expected_checksum();
|
||||
}
|
||||
|
||||
|
|
|
@ -130,7 +130,7 @@ struct [[gnu::packed]] PtraceRegisters {
|
|||
};
|
||||
};
|
||||
|
||||
// These may not be used, unless we go back into compatability mode
|
||||
// These may not be used, unless we go back into compatibility mode
|
||||
u32 cs;
|
||||
u32 ss;
|
||||
u32 ds;
|
||||
|
|
|
@ -574,7 +574,7 @@ size_t DeflateCompressor::compare_match_candidate(size_t start, size_t candidate
|
|||
{
|
||||
VERIFY(previous_match_length < maximum_match_length);
|
||||
|
||||
// We firstly check that the match is at least (prev_match_length + 1) long, we check backwards as theres a higher chance the end mismatches
|
||||
// We firstly check that the match is at least (prev_match_length + 1) long, we check backwards as there's a higher chance the end mismatches
|
||||
for (ssize_t i = previous_match_length; i >= 0; i--) {
|
||||
if (m_rolling_window[start + i] != m_rolling_window[candidate + i])
|
||||
return 0;
|
||||
|
@ -597,7 +597,7 @@ size_t DeflateCompressor::find_back_match(size_t start, u16 hash, size_t previou
|
|||
if (previous_match_length == 0)
|
||||
previous_match_length = min_match_length - 1; // we only care about matches that are at least min_match_length long
|
||||
if (previous_match_length >= maximum_match_length)
|
||||
return 0; // we cant improve a maximum length match
|
||||
return 0; // we can't improve a maximum length match
|
||||
if (previous_match_length >= m_compression_constants.max_lazy_length)
|
||||
return 0; // the previous match is already pretty, we shouldn't waste another full search
|
||||
if (previous_match_length >= m_compression_constants.good_match_length)
|
||||
|
@ -627,7 +627,7 @@ size_t DeflateCompressor::find_back_match(size_t start, u16 hash, size_t previou
|
|||
candidate = m_hash_prev[candidate % window_size];
|
||||
}
|
||||
if (!match_found)
|
||||
return 0; // we didnt find any matches
|
||||
return 0; // we didn't find any matches
|
||||
return previous_match_length; // we found matches, but they were at most previous_match_length long
|
||||
}
|
||||
|
||||
|
@ -1040,7 +1040,7 @@ void DeflateCompressor::flush()
|
|||
auto fixed_huffman_size = fixed_block_length();
|
||||
auto dynamic_huffman_size = dynamic_block_length(dynamic_literal_bit_lengths, dynamic_distance_bit_lengths, code_lengths_bit_lengths, code_lengths_frequencies, code_lengths_count);
|
||||
|
||||
// If the compression somehow didnt reduce the size enough, just write out the block uncompressed as it allows for much faster decompression
|
||||
// If the compression somehow didn't reduce the size enough, just write out the block uncompressed as it allows for much faster decompression
|
||||
if (uncompressed_size <= min(fixed_huffman_size, dynamic_huffman_size)) {
|
||||
write_uncompressed();
|
||||
} else if (fixed_huffman_size <= dynamic_huffman_size) { // If the fixed and dynamic huffman codes come out the same size, prefer the fixed version, as it takes less time to decode
|
||||
|
|
|
@ -55,7 +55,7 @@ Optional<Zlib> Zlib::try_create(ReadonlyBytes data)
|
|||
return {}; // we dont support pre-defined dictionaries
|
||||
|
||||
if ((compression_info * 256 + flags) % 31 != 0)
|
||||
return {}; // error correction code doesnt match
|
||||
return {}; // error correction code doesn't match
|
||||
|
||||
zlib.m_data_bytes = data.slice(2, data.size() - 2 - 4);
|
||||
return zlib;
|
||||
|
|
|
@ -63,7 +63,7 @@ ByteBuffer Reader::decompress_coredump(const ReadonlyBytes& raw_coredump)
|
|||
return ByteBuffer::copy(raw_coredump); // handle old format core dumps (uncompressed)
|
||||
auto decompressed_coredump = Compress::GzipDecompressor::decompress_all(raw_coredump);
|
||||
if (!decompressed_coredump.has_value())
|
||||
return ByteBuffer::copy(raw_coredump); // if we didnt manage to decompress it, try and parse it as decompressed core dump
|
||||
return ByteBuffer::copy(raw_coredump); // if we didn't manage to decompress it, try and parse it as decompressed core dump
|
||||
return decompressed_coredump.value();
|
||||
}
|
||||
|
||||
|
|
|
@ -577,7 +577,7 @@ void TextEditor::paint_event(PaintEvent& event)
|
|||
if (span.range.end().line() > line_index || span.range.end().column() >= start_of_visual_line + visual_line_text.length()) {
|
||||
if (visual_line_text.length() == 0) {
|
||||
// subtracting 1 would wrap around
|
||||
// scince there is nothing to draw here just move on
|
||||
// since there is nothing to draw here just move on
|
||||
break;
|
||||
}
|
||||
span_end = visual_line_text.length() - 1;
|
||||
|
|
|
@ -897,7 +897,7 @@ public:
|
|||
if (imported_address.has_value())
|
||||
rules.append(CSS::CSSImportRule::create(m_context.complete_url(imported_address.value())));
|
||||
|
||||
// FIXME: We ignore possilbe media query list
|
||||
// FIXME: We ignore possible media query list
|
||||
while (peek() && peek() != ';')
|
||||
consume_one();
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ afterInitialPageLoad(() => {
|
|||
|
||||
expect(head.compareDocumentPosition(head)).toBe(0);
|
||||
|
||||
// FIXME: Can be uncommented once the IDL parser correctly implements nullable paramaters.
|
||||
// FIXME: Can be uncommented once the IDL parser correctly implements nullable parameters.
|
||||
// expect(head.compareDocumentPosition(null) & Node.DOCUMENT_POSITION_DISCONNECTED | Node.DOCUMENT_POSITION_IMPLEMENTATION_SPECIFIC).
|
||||
// toBe(Node.DOCUMENT_POSITION_DISCONNECTED | Node.DOCUMENT_POSITION_IMPLEMENTATION_SPECIFIC);
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ int main()
|
|||
}
|
||||
}
|
||||
{
|
||||
printf("Testing partial unmaping\n");
|
||||
printf("Testing partial unmapping\n");
|
||||
auto* map1 = mmap(nullptr, 2 * PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0);
|
||||
if (map1 == MAP_FAILED) {
|
||||
perror("mmap 1");
|
||||
|
|
|
@ -45,7 +45,7 @@ const char* usage = "usage:\n"
|
|||
"\tbs=<size>\tblocks size (default: 512)\n"
|
||||
"\tcount=<size>\t<size> blocks to copy (default: 0 (until end-of-file))\n"
|
||||
"\tseek=<size>\tskip <size> blocks at start of output (default: 0)\n"
|
||||
"\tskip=<size>\tskip <size> blocks at start of intput (default: 0)\n"
|
||||
"\tskip=<size>\tskip <size> blocks at start of input (default: 0)\n"
|
||||
"\tstatus=<level>\tlevel of output (default: default)\n"
|
||||
"\t\t\tdefault - error messages + final statistics\n"
|
||||
"\t\t\tnone - just error messages\n"
|
||||
|
|
|
@ -99,7 +99,7 @@ int main(int argc, char** argv)
|
|||
const char* path = "/res/fortunes.json";
|
||||
|
||||
Core::ArgsParser args_parser;
|
||||
args_parser.set_general_help("Open a fortune cookie, recieve a free quote for the day!");
|
||||
args_parser.set_general_help("Open a fortune cookie, receive a free quote for the day!");
|
||||
args_parser.add_positional_argument(path, "Path to JSON file with quotes (/res/fortunes.json by default)", "path", Core::ArgsParser::Required::No);
|
||||
args_parser.parse(argc, argv);
|
||||
|
||||
|
|
Loading…
Reference in a new issue