mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-11-22 15:40:19 +00:00
Kernel: Propagate overflow errors from Memory::page_round_up
Fixes #11402.
This commit is contained in:
parent
11599a3342
commit
33b78915d3
Notes:
sideshowbarker
2024-07-17 22:09:47 +09:00
Author: https://github.com/eggpi 🔰 Commit: https://github.com/SerenityOS/serenity/commit/33b78915d37 Pull-request: https://github.com/SerenityOS/serenity/pull/11433 Issue: https://github.com/SerenityOS/serenity/issues/11402 Reviewed-by: https://github.com/awesomekling
31 changed files with 112 additions and 100 deletions
|
@ -59,7 +59,17 @@ UNMAP_AFTER_INIT bool Access::search_pci_domains_from_acpi_mcfg_table(PhysicalAd
|
|||
|
||||
dbgln("PCI: MCFG, length: {}, revision: {}", length, revision);
|
||||
|
||||
auto mcfg_region_or_error = MM.allocate_kernel_region(mcfg_table.page_base(), Memory::page_round_up(length) + PAGE_SIZE, "PCI Parsing MCFG", Memory::Region::Access::ReadWrite);
|
||||
if (Checked<size_t>::addition_would_overflow(length, PAGE_SIZE)) {
|
||||
dbgln("Overflow when adding extra page to allocation of length {}", length);
|
||||
return false;
|
||||
}
|
||||
length += PAGE_SIZE;
|
||||
auto region_size_or_error = Memory::page_round_up(length);
|
||||
if (region_size_or_error.is_error()) {
|
||||
dbgln("Failed to round up length of {} to pages", length);
|
||||
return false;
|
||||
}
|
||||
auto mcfg_region_or_error = MM.allocate_kernel_region(mcfg_table.page_base(), region_size_or_error.value(), "PCI Parsing MCFG", Memory::Region::Access::ReadWrite);
|
||||
if (mcfg_region_or_error.is_error())
|
||||
return false;
|
||||
auto& mcfg = *(ACPI::Structures::MCFG*)mcfg_region_or_error.value()->vaddr().offset(mcfg_table.offset_in_page()).as_ptr();
|
||||
|
|
|
@ -131,7 +131,12 @@ UNMAP_AFTER_INIT void Device::initialize()
|
|||
auto& mapping = m_mmio[cfg.bar];
|
||||
mapping.size = PCI::get_BAR_space_size(pci_address(), cfg.bar);
|
||||
if (!mapping.base && mapping.size) {
|
||||
auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), cfg.bar))), Memory::page_round_up(mapping.size), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
|
||||
auto region_size_or_error = Memory::page_round_up(mapping.size);
|
||||
if (region_size_or_error.is_error()) {
|
||||
dbgln_if(VIRTIO_DEBUG, "{}: Failed to round up size={} to pages", m_class_name, mapping.size);
|
||||
continue;
|
||||
}
|
||||
auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR(pci_address(), cfg.bar))), region_size_or_error.value(), "VirtIO MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
|
||||
if (region_or_error.is_error()) {
|
||||
dbgln_if(VIRTIO_DEBUG, "{}: Failed to map bar {} - (size={}) {}", m_class_name, cfg.bar, mapping.size, region_or_error.error());
|
||||
} else {
|
||||
|
|
|
@ -17,7 +17,7 @@ Queue::Queue(u16 queue_size, u16 notify_offset)
|
|||
size_t size_of_descriptors = sizeof(QueueDescriptor) * queue_size;
|
||||
size_t size_of_driver = sizeof(QueueDriver) + queue_size * sizeof(u16);
|
||||
size_t size_of_device = sizeof(QueueDevice) + queue_size * sizeof(QueueDeviceItem);
|
||||
auto queue_region_size = Memory::page_round_up(size_of_descriptors + size_of_driver + size_of_device);
|
||||
auto queue_region_size = Memory::page_round_up(size_of_descriptors + size_of_driver + size_of_device).release_value_but_fixme_should_propagate_errors();
|
||||
if (queue_region_size <= PAGE_SIZE)
|
||||
m_queue_region = MM.allocate_kernel_region(queue_region_size, "VirtIO Queue", Memory::Region::Access::ReadWrite).release_value();
|
||||
else
|
||||
|
|
|
@ -22,7 +22,8 @@ static constexpr u16 pcm_sample_rate_maximum = 48000;
|
|||
|
||||
static ErrorOr<OwnPtr<Memory::Region>> allocate_physical_buffer(size_t size, StringView name)
|
||||
{
|
||||
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(Memory::page_round_up(size)));
|
||||
auto rounded_size = TRY(Memory::page_round_up(size));
|
||||
auto vmobject = TRY(Memory::AnonymousVMObject::try_create_physically_contiguous_with_size(rounded_size));
|
||||
return TRY(MM.allocate_kernel_region_with_vmobject(move(vmobject), vmobject->size(), name, Memory::Region::Access::Write));
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ ErrorOr<void> KCOVInstance::buffer_allocate(size_t buffer_size_in_entries)
|
|||
|
||||
// first entry contains index of last PC
|
||||
m_buffer_size_in_entries = buffer_size_in_entries - 1;
|
||||
m_buffer_size_in_bytes = Memory::page_round_up(buffer_size_in_entries * KCOV_ENTRY_SIZE);
|
||||
m_buffer_size_in_bytes = TRY(Memory::page_round_up(buffer_size_in_entries * KCOV_ENTRY_SIZE));
|
||||
|
||||
// one single vmobject is representing the buffer
|
||||
// - we allocate one kernel region using that vmobject
|
||||
|
|
|
@ -156,7 +156,7 @@ Memory::MappedROM map_bios()
|
|||
Memory::MappedROM mapping;
|
||||
mapping.size = 128 * KiB;
|
||||
mapping.paddr = PhysicalAddress(0xe0000);
|
||||
mapping.region = MM.allocate_kernel_region(mapping.paddr, Memory::page_round_up(mapping.size), {}, Memory::Region::Access::Read).release_value();
|
||||
mapping.region = MM.allocate_kernel_region(mapping.paddr, Memory::page_round_up(mapping.size).release_value_but_fixme_should_propagate_errors(), {}, Memory::Region::Access::Read).release_value();
|
||||
return mapping;
|
||||
}
|
||||
|
||||
|
@ -170,7 +170,7 @@ Memory::MappedROM map_ebda()
|
|||
size_t ebda_size = (*ebda_length_ptr_b1 << 8) | *ebda_length_ptr_b0;
|
||||
|
||||
Memory::MappedROM mapping;
|
||||
mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), Memory::page_round_up(ebda_size), {}, Memory::Region::Access::Read).release_value();
|
||||
mapping.region = MM.allocate_kernel_region(ebda_paddr.page_base(), Memory::page_round_up(ebda_size).release_value_but_fixme_should_propagate_errors(), {}, Memory::Region::Access::Read).release_value();
|
||||
mapping.offset = ebda_paddr.offset_in_page();
|
||||
mapping.size = ebda_size;
|
||||
mapping.paddr = ebda_paddr;
|
||||
|
|
|
@ -27,8 +27,9 @@ void ContiguousFramebufferConsole::set_resolution(size_t width, size_t height, s
|
|||
m_height = height;
|
||||
m_pitch = pitch;
|
||||
|
||||
dbgln("Framebuffer Console: taking {} bytes", Memory::page_round_up(pitch * height));
|
||||
auto region_or_error = MM.allocate_kernel_region(m_framebuffer_address, Memory::page_round_up(pitch * height), "Framebuffer Console", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes);
|
||||
size_t size = Memory::page_round_up(pitch * height).release_value_but_fixme_should_propagate_errors();
|
||||
dbgln("Framebuffer Console: taking {} bytes", size);
|
||||
auto region_or_error = MM.allocate_kernel_region(m_framebuffer_address, size, "Framebuffer Console", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::Yes);
|
||||
VERIFY(!region_or_error.is_error());
|
||||
m_framebuffer_region = region_or_error.release_value();
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ namespace Kernel::Graphics {
|
|||
|
||||
UNMAP_AFTER_INIT VGAConsole::VGAConsole(const VGACompatibleAdapter& adapter, Mode mode, size_t width, size_t height)
|
||||
: Console(width, height)
|
||||
, m_vga_region(MM.allocate_kernel_region(PhysicalAddress(0xa0000), Memory::page_round_up(0xc0000 - 0xa0000), "VGA Display", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_vga_region(MM.allocate_kernel_region(PhysicalAddress(0xa0000), Memory::page_round_up(0xc0000 - 0xa0000).release_value_but_fixme_should_propagate_errors(), "VGA Display", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_adapter(adapter)
|
||||
, m_mode(mode)
|
||||
{
|
||||
|
|
|
@ -36,14 +36,15 @@ ErrorOr<Memory::Region*> FramebufferDevice::mmap(Process& process, OpenFileDescr
|
|||
if (offset != 0)
|
||||
return ENXIO;
|
||||
auto framebuffer_length = TRY(buffer_length(0));
|
||||
if (range.size() != Memory::page_round_up(framebuffer_length))
|
||||
framebuffer_length = TRY(Memory::page_round_up(framebuffer_length));
|
||||
if (range.size() != framebuffer_length)
|
||||
return EOVERFLOW;
|
||||
|
||||
m_userspace_real_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_length)));
|
||||
m_real_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_length)));
|
||||
m_swapped_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_length), AllocationStrategy::AllocateNow));
|
||||
m_real_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_length), "Framebuffer", Memory::Region::Access::ReadWrite));
|
||||
m_swapped_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_length), "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite));
|
||||
m_userspace_real_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, framebuffer_length));
|
||||
m_real_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, framebuffer_length));
|
||||
m_swapped_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(framebuffer_length, AllocationStrategy::AllocateNow));
|
||||
m_real_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, framebuffer_length, "Framebuffer", Memory::Region::Access::ReadWrite));
|
||||
m_swapped_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, framebuffer_length, "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite));
|
||||
|
||||
RefPtr<Memory::VMObject> chosen_vmobject;
|
||||
if (m_graphical_writes_enabled) {
|
||||
|
@ -68,7 +69,8 @@ void FramebufferDevice::deactivate_writes()
|
|||
return;
|
||||
auto framebuffer_length_or_error = buffer_length(0);
|
||||
VERIFY(!framebuffer_length_or_error.is_error());
|
||||
memcpy(m_swapped_framebuffer_region->vaddr().as_ptr(), m_real_framebuffer_region->vaddr().as_ptr(), Memory::page_round_up(framebuffer_length_or_error.release_value()));
|
||||
size_t rounded_framebuffer_length = Memory::page_round_up(framebuffer_length_or_error.release_value()).release_value_but_fixme_should_propagate_errors();
|
||||
memcpy(m_swapped_framebuffer_region->vaddr().as_ptr(), m_real_framebuffer_region->vaddr().as_ptr(), rounded_framebuffer_length);
|
||||
auto vmobject = m_swapped_framebuffer_vmobject;
|
||||
m_userspace_framebuffer_region->set_vmobject(vmobject.release_nonnull());
|
||||
m_userspace_framebuffer_region->remap();
|
||||
|
@ -85,7 +87,8 @@ void FramebufferDevice::activate_writes()
|
|||
auto framebuffer_length_or_error = buffer_length(0);
|
||||
VERIFY(!framebuffer_length_or_error.is_error());
|
||||
|
||||
memcpy(m_real_framebuffer_region->vaddr().as_ptr(), m_swapped_framebuffer_region->vaddr().as_ptr(), Memory::page_round_up(framebuffer_length_or_error.release_value()));
|
||||
size_t rounded_framebuffer_length = Memory::page_round_up(framebuffer_length_or_error.release_value()).release_value_but_fixme_should_propagate_errors();
|
||||
memcpy(m_real_framebuffer_region->vaddr().as_ptr(), m_swapped_framebuffer_region->vaddr().as_ptr(), rounded_framebuffer_length);
|
||||
auto vmobject = m_userspace_real_framebuffer_vmobject;
|
||||
m_userspace_framebuffer_region->set_vmobject(vmobject.release_nonnull());
|
||||
m_userspace_framebuffer_region->remap();
|
||||
|
@ -97,10 +100,11 @@ UNMAP_AFTER_INIT ErrorOr<void> FramebufferDevice::try_to_initialize()
|
|||
// FIXME: Would be nice to be able to unify this with mmap above, but this
|
||||
// function is UNMAP_AFTER_INIT for the time being.
|
||||
auto framebuffer_length = TRY(buffer_length(0));
|
||||
m_real_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, Memory::page_round_up(framebuffer_length)));
|
||||
m_swapped_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(framebuffer_length), AllocationStrategy::AllocateNow));
|
||||
m_real_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, Memory::page_round_up(framebuffer_length), "Framebuffer", Memory::Region::Access::ReadWrite));
|
||||
m_swapped_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, Memory::page_round_up(framebuffer_length), "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite));
|
||||
framebuffer_length = TRY(Memory::page_round_up(framebuffer_length));
|
||||
m_real_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_for_physical_range(m_framebuffer_address, framebuffer_length));
|
||||
m_swapped_framebuffer_vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(framebuffer_length, AllocationStrategy::AllocateNow));
|
||||
m_real_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_real_framebuffer_vmobject, framebuffer_length, "Framebuffer", Memory::Region::Access::ReadWrite));
|
||||
m_swapped_framebuffer_region = TRY(MM.allocate_kernel_region_with_vmobject(*m_swapped_framebuffer_vmobject, framebuffer_length, "Framebuffer Swap (Blank)", Memory::Region::Access::ReadWrite));
|
||||
return {};
|
||||
}
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ public:
|
|||
static size_t calculate_framebuffer_size(size_t width, size_t height)
|
||||
{
|
||||
// VirtIO resources can only map on page boundaries!
|
||||
return Memory::page_round_up(sizeof(u32) * width * height);
|
||||
return Memory::page_round_up(sizeof(u32) * width * height).value();
|
||||
}
|
||||
|
||||
u8* framebuffer_data();
|
||||
|
|
|
@ -226,7 +226,11 @@ struct KmallocGlobalData {
|
|||
if (padded_allocation_request.has_overflow()) {
|
||||
PANIC("Integer overflow during kmalloc heap expansion");
|
||||
}
|
||||
size_t new_subheap_size = max(minimum_subheap_size, Memory::page_round_up(padded_allocation_request.value()));
|
||||
auto rounded_allocation_request = Memory::page_round_up(padded_allocation_request.value());
|
||||
if (rounded_allocation_request.is_error()) {
|
||||
PANIC("Integer overflow computing pages for kmalloc heap expansion");
|
||||
}
|
||||
size_t new_subheap_size = max(minimum_subheap_size, rounded_allocation_request.value());
|
||||
|
||||
dbgln("Unable to allocate {}, expanding kmalloc heap", allocation_request);
|
||||
|
||||
|
|
|
@ -334,7 +334,7 @@ UNMAP_AFTER_INIT void APIC::setup_ap_boot_environment()
|
|||
// * aps_to_enable u32 values for ap_cpu_init_processor_info_array
|
||||
constexpr u64 apic_startup_region_base = 0x8000;
|
||||
VERIFY(apic_startup_region_base + apic_ap_start_size < USER_RANGE_BASE);
|
||||
auto apic_startup_region = create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))));
|
||||
auto apic_startup_region = create_identity_mapped_region(PhysicalAddress(apic_startup_region_base), Memory::page_round_up(apic_ap_start_size + (2 * aps_to_enable * sizeof(u32))).release_value_but_fixme_should_propagate_errors());
|
||||
memcpy(apic_startup_region->vaddr().as_ptr(), reinterpret_cast<const void*>(apic_ap_start), apic_ap_start_size);
|
||||
|
||||
// Allocate enough stacks for all APs
|
||||
|
|
|
@ -26,7 +26,8 @@ class [[nodiscard]] KBuffer {
|
|||
public:
|
||||
static ErrorOr<NonnullOwnPtr<KBuffer>> try_create_with_size(size_t size, Memory::Region::Access access = Memory::Region::Access::ReadWrite, StringView name = "KBuffer", AllocationStrategy strategy = AllocationStrategy::Reserve)
|
||||
{
|
||||
auto region = TRY(MM.allocate_kernel_region(Memory::page_round_up(size), name, access, strategy));
|
||||
auto rounded_size = TRY(Memory::page_round_up(size));
|
||||
auto region = TRY(MM.allocate_kernel_region(rounded_size, name, access, strategy));
|
||||
return TRY(adopt_nonnull_own_or_enomem(new (nothrow) KBuffer { size, move(region) }));
|
||||
}
|
||||
|
||||
|
|
|
@ -20,8 +20,11 @@ inline bool KBufferBuilder::check_expand(size_t size)
|
|||
size_t new_buffer_size = m_size + size;
|
||||
if (Checked<size_t>::addition_would_overflow(new_buffer_size, 1 * MiB))
|
||||
return false;
|
||||
new_buffer_size = Memory::page_round_up(new_buffer_size + 1 * MiB);
|
||||
auto new_buffer_or_error = KBuffer::try_create_with_size(new_buffer_size);
|
||||
auto rounded_new_buffer_size_or_error = Memory::page_round_up(new_buffer_size + 1 * MiB);
|
||||
if (rounded_new_buffer_size_or_error.is_error()) {
|
||||
return false;
|
||||
}
|
||||
auto new_buffer_or_error = KBuffer::try_create_with_size(rounded_new_buffer_size_or_error.value());
|
||||
if (new_buffer_or_error.is_error())
|
||||
return false;
|
||||
auto new_buffer = new_buffer_or_error.release_value();
|
||||
|
|
|
@ -131,7 +131,7 @@ ErrorOr<void> AddressSpace::unmap_mmap_range(VirtualAddress addr, size_t size)
|
|||
ErrorOr<VirtualRange> AddressSpace::try_allocate_range(VirtualAddress vaddr, size_t size, size_t alignment)
|
||||
{
|
||||
vaddr.mask(PAGE_MASK);
|
||||
size = page_round_up(size);
|
||||
size = TRY(page_round_up(size));
|
||||
if (vaddr.is_null())
|
||||
return page_directory().range_allocator().try_allocate_anywhere(size, alignment);
|
||||
return page_directory().range_allocator().try_allocate_specific(vaddr, size);
|
||||
|
@ -222,8 +222,8 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
|
|||
if (!found_region)
|
||||
return nullptr;
|
||||
auto& region = *found_region;
|
||||
size_t size = page_round_up(range.size());
|
||||
if (region->size() != size)
|
||||
auto rounded_range_size = page_round_up(range.size());
|
||||
if (rounded_range_size.is_error() || region->size() != rounded_range_size.value())
|
||||
return nullptr;
|
||||
m_region_lookup_cache.range = range;
|
||||
m_region_lookup_cache.region = *region;
|
||||
|
|
|
@ -45,6 +45,14 @@ __attribute__((section(".super_pages"))) static u8 super_pages[4 * MiB];
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
ErrorOr<FlatPtr> page_round_up(FlatPtr x)
|
||||
{
|
||||
if (x > (explode_byte(0xFF) & ~0xFFF)) {
|
||||
return Error::from_errno(EINVAL);
|
||||
}
|
||||
return (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
|
||||
}
|
||||
|
||||
// NOTE: We can NOT use Singleton for this class, because
|
||||
// MemoryManager::initialize is called *before* global constructors are
|
||||
// run. If we do, then Singleton would get re-initialized, causing
|
||||
|
@ -137,7 +145,7 @@ void MemoryManager::unmap_text_after_init()
|
|||
SpinlockLocker mm_lock(s_mm_lock);
|
||||
|
||||
auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
|
||||
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
|
||||
auto end = page_round_up((FlatPtr)&end_of_unmap_after_init).release_value_but_fixme_should_propagate_errors();
|
||||
|
||||
// Unmap the entire .unmap_after_init section
|
||||
for (auto i = start; i < end; i += PAGE_SIZE) {
|
||||
|
@ -155,7 +163,7 @@ UNMAP_AFTER_INIT void MemoryManager::protect_ksyms_after_init()
|
|||
SpinlockLocker page_lock(kernel_page_directory().get_lock());
|
||||
|
||||
auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
|
||||
auto end = page_round_up((FlatPtr)end_of_kernel_ksyms);
|
||||
auto end = page_round_up((FlatPtr)end_of_kernel_ksyms).release_value_but_fixme_should_propagate_errors();
|
||||
|
||||
for (auto i = start; i < end; i += PAGE_SIZE) {
|
||||
auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
|
||||
|
@ -213,7 +221,7 @@ UNMAP_AFTER_INIT void MemoryManager::parse_memory_map()
|
|||
// Register used memory regions that we know of.
|
||||
m_used_memory_ranges.ensure_capacity(4);
|
||||
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::LowMemory, PhysicalAddress(0x00000000), PhysicalAddress(1 * MiB) });
|
||||
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image))) });
|
||||
m_used_memory_ranges.append(UsedMemoryRange { UsedMemoryRangeType::Kernel, PhysicalAddress(virtual_to_low_physical((FlatPtr)start_of_kernel_image)), PhysicalAddress(page_round_up(virtual_to_low_physical((FlatPtr)end_of_kernel_image)).release_value_but_fixme_should_propagate_errors()) });
|
||||
|
||||
if (multiboot_flags & 0x4) {
|
||||
auto* bootmods_start = multiboot_copy_boot_modules_array;
|
||||
|
@ -380,7 +388,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
|
|||
|
||||
// Calculate how many bytes the array will consume
|
||||
auto physical_page_array_size = m_physical_page_entries_count * sizeof(PhysicalPageEntry);
|
||||
auto physical_page_array_pages = page_round_up(physical_page_array_size) / PAGE_SIZE;
|
||||
auto physical_page_array_pages = page_round_up(physical_page_array_size).release_value_but_fixme_should_propagate_errors() / PAGE_SIZE;
|
||||
VERIFY(physical_page_array_pages * PAGE_SIZE >= physical_page_array_size);
|
||||
|
||||
// Calculate how many page tables we will need to be able to map them all
|
||||
|
|
|
@ -26,16 +26,7 @@ struct KmallocGlobalData;
|
|||
|
||||
namespace Kernel::Memory {
|
||||
|
||||
constexpr bool page_round_up_would_wrap(FlatPtr x)
|
||||
{
|
||||
return x > (explode_byte(0xFF) & ~0xFFF);
|
||||
}
|
||||
|
||||
constexpr FlatPtr page_round_up(FlatPtr x)
|
||||
{
|
||||
VERIFY(!page_round_up_would_wrap(x));
|
||||
return (((FlatPtr)(x)) + PAGE_SIZE - 1) & (~(PAGE_SIZE - 1));
|
||||
}
|
||||
ErrorOr<FlatPtr> page_round_up(FlatPtr x);
|
||||
|
||||
constexpr FlatPtr page_round_down(FlatPtr x)
|
||||
{
|
||||
|
@ -340,17 +331,11 @@ inline bool PhysicalPage::is_lazy_committed_page() const
|
|||
|
||||
inline ErrorOr<Memory::VirtualRange> expand_range_to_page_boundaries(FlatPtr address, size_t size)
|
||||
{
|
||||
if (Memory::page_round_up_would_wrap(size))
|
||||
return EINVAL;
|
||||
|
||||
if ((address + size) < address)
|
||||
return EINVAL;
|
||||
|
||||
if (Memory::page_round_up_would_wrap(address + size))
|
||||
return EINVAL;
|
||||
|
||||
auto base = VirtualAddress { address }.page_base();
|
||||
auto end = Memory::page_round_up(address + size);
|
||||
auto end = TRY(Memory::page_round_up(address + size));
|
||||
|
||||
return Memory::VirtualRange { base, end - base.get() };
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
namespace Kernel::Memory {
|
||||
|
||||
RingBuffer::RingBuffer(String region_name, size_t capacity)
|
||||
: m_region(MM.allocate_contiguous_kernel_region(page_round_up(capacity), move(region_name), Region::Access::Read | Region::Access::Write).release_value())
|
||||
: m_region(MM.allocate_contiguous_kernel_region(page_round_up(capacity).release_value_but_fixme_should_propagate_errors(), move(region_name), Region::Access::Read | Region::Access::Write).release_value())
|
||||
, m_capacity_in_bytes(capacity)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ RefPtr<ScatterGatherList> ScatterGatherList::try_create(AsyncBlockDeviceRequest&
|
|||
ScatterGatherList::ScatterGatherList(NonnullRefPtr<AnonymousVMObject> vm_object, AsyncBlockDeviceRequest& request, size_t device_block_size)
|
||||
: m_vm_object(move(vm_object))
|
||||
{
|
||||
auto region_or_error = MM.allocate_kernel_region_with_vmobject(m_vm_object, page_round_up((request.block_count() * device_block_size)), "AHCI Scattered DMA", Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes);
|
||||
auto region_or_error = MM.allocate_kernel_region_with_vmobject(m_vm_object, page_round_up((request.block_count() * device_block_size)).release_value_but_fixme_should_propagate_errors(), "AHCI Scattered DMA", Region::Access::Read | Region::Access::Write, Region::Cacheable::Yes);
|
||||
if (region_or_error.is_error())
|
||||
TODO();
|
||||
m_dma_region = region_or_error.release_value();
|
||||
|
|
|
@ -27,7 +27,7 @@ template<typename T>
|
|||
static TypedMapping<T> map_typed(PhysicalAddress paddr, size_t length, Region::Access access = Region::Access::Read)
|
||||
{
|
||||
TypedMapping<T> table;
|
||||
size_t mapping_length = page_round_up(paddr.offset_in_page() + length);
|
||||
size_t mapping_length = page_round_up(paddr.offset_in_page() + length).release_value_but_fixme_should_propagate_errors();
|
||||
auto region_or_error = MM.allocate_kernel_region(paddr.page_base(), mapping_length, {}, access);
|
||||
if (region_or_error.is_error())
|
||||
TODO();
|
||||
|
|
|
@ -38,18 +38,11 @@ VirtualRange VirtualRange::intersect(VirtualRange const& other) const
|
|||
|
||||
ErrorOr<VirtualRange> VirtualRange::expand_to_page_boundaries(FlatPtr address, size_t size)
|
||||
{
|
||||
if (page_round_up_would_wrap(size))
|
||||
return EINVAL;
|
||||
|
||||
if ((address + size) < address)
|
||||
return EINVAL;
|
||||
|
||||
if (page_round_up_would_wrap(address + size))
|
||||
return EINVAL;
|
||||
|
||||
auto base = VirtualAddress { address }.page_base();
|
||||
auto end = page_round_up(address + size);
|
||||
|
||||
auto end = TRY(page_round_up(address + size));
|
||||
return VirtualRange { base, end - base.get() };
|
||||
}
|
||||
|
||||
|
|
|
@ -209,7 +209,7 @@ UNMAP_AFTER_INIT bool E1000ENetworkAdapter::initialize()
|
|||
enable_bus_mastering(pci_address());
|
||||
|
||||
size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0);
|
||||
auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), Memory::page_round_up(mmio_base_size), "E1000e MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
|
||||
auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), Memory::page_round_up(mmio_base_size).release_value_but_fixme_should_propagate_errors(), "E1000e MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
|
||||
if (region_or_error.is_error())
|
||||
return false;
|
||||
m_mmio_region = region_or_error.release_value();
|
||||
|
|
|
@ -200,7 +200,7 @@ UNMAP_AFTER_INIT bool E1000NetworkAdapter::initialize()
|
|||
m_io_base = IOAddress(PCI::get_BAR1(pci_address()) & ~1);
|
||||
|
||||
size_t mmio_base_size = PCI::get_BAR_space_size(pci_address(), 0);
|
||||
auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), Memory::page_round_up(mmio_base_size), "E1000 MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
|
||||
auto region_or_error = MM.allocate_kernel_region(PhysicalAddress(page_base_of(PCI::get_BAR0(pci_address()))), Memory::page_round_up(mmio_base_size).release_value_but_fixme_should_propagate_errors(), "E1000 MMIO", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No);
|
||||
if (region_or_error.is_error())
|
||||
return false;
|
||||
m_mmio_region = region_or_error.release_value();
|
||||
|
@ -228,8 +228,8 @@ UNMAP_AFTER_INIT E1000NetworkAdapter::E1000NetworkAdapter(PCI::Address address,
|
|||
: NetworkAdapter(move(interface_name))
|
||||
, PCI::Device(address)
|
||||
, IRQHandler(irq)
|
||||
, m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16), "E1000 RX Descriptors", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16), "E1000 TX Descriptors", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(e1000_rx_desc) * number_of_rx_descriptors + 16).release_value_but_fixme_should_propagate_errors(), "E1000 RX Descriptors", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(e1000_tx_desc) * number_of_tx_descriptors + 16).release_value_but_fixme_should_propagate_errors(), "E1000 TX Descriptors", Memory::Region::Access::ReadWrite).release_value())
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -131,8 +131,8 @@ UNMAP_AFTER_INIT RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address addre
|
|||
, PCI::Device(address)
|
||||
, IRQHandler(irq)
|
||||
, m_io_base(PCI::get_BAR0(pci_address()) & ~1)
|
||||
, m_rx_buffer(MM.allocate_contiguous_kernel_region(Memory::page_round_up(RX_BUFFER_SIZE + PACKET_SIZE_MAX), "RTL8139 RX", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_packet_buffer(MM.allocate_contiguous_kernel_region(Memory::page_round_up(PACKET_SIZE_MAX), "RTL8139 Packet buffer", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_rx_buffer(MM.allocate_contiguous_kernel_region(Memory::page_round_up(RX_BUFFER_SIZE + PACKET_SIZE_MAX).release_value_but_fixme_should_propagate_errors(), "RTL8139 RX", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_packet_buffer(MM.allocate_contiguous_kernel_region(Memory::page_round_up(PACKET_SIZE_MAX).release_value_but_fixme_should_propagate_errors(), "RTL8139 Packet buffer", Memory::Region::Access::ReadWrite).release_value())
|
||||
{
|
||||
m_tx_buffers.ensure_capacity(RTL8139_TX_BUFFER_COUNT);
|
||||
|
||||
|
@ -149,7 +149,7 @@ UNMAP_AFTER_INIT RTL8139NetworkAdapter::RTL8139NetworkAdapter(PCI::Address addre
|
|||
dbgln("RTL8139: RX buffer: {}", m_rx_buffer->physical_page(0)->paddr());
|
||||
|
||||
for (int i = 0; i < RTL8139_TX_BUFFER_COUNT; i++) {
|
||||
m_tx_buffers.append(MM.allocate_contiguous_kernel_region(Memory::page_round_up(TX_BUFFER_SIZE), "RTL8139 TX", Memory::Region::Access::Write | Memory::Region::Access::Read).release_value());
|
||||
m_tx_buffers.append(MM.allocate_contiguous_kernel_region(Memory::page_round_up(TX_BUFFER_SIZE).release_value_but_fixme_should_propagate_errors(), "RTL8139 TX", Memory::Region::Access::Write | Memory::Region::Access::Read).release_value());
|
||||
dbgln("RTL8139: TX buffer {}: {}", i, m_tx_buffers[i]->physical_page(0)->paddr());
|
||||
}
|
||||
|
||||
|
|
|
@ -246,8 +246,8 @@ UNMAP_AFTER_INIT RTL8168NetworkAdapter::RTL8168NetworkAdapter(PCI::Address addre
|
|||
, PCI::Device(address)
|
||||
, IRQHandler(irq)
|
||||
, m_io_base(PCI::get_BAR0(pci_address()) & ~1)
|
||||
, m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(TXDescriptor) * (number_of_rx_descriptors + 1)), "RTL8168 RX", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(RXDescriptor) * (number_of_tx_descriptors + 1)), "RTL8168 TX", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_rx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(TXDescriptor) * (number_of_rx_descriptors + 1)).release_value_but_fixme_should_propagate_errors(), "RTL8168 RX", Memory::Region::Access::ReadWrite).release_value())
|
||||
, m_tx_descriptors_region(MM.allocate_contiguous_kernel_region(Memory::page_round_up(sizeof(RXDescriptor) * (number_of_tx_descriptors + 1)).release_value_but_fixme_should_propagate_errors(), "RTL8168 TX", Memory::Region::Access::ReadWrite).release_value())
|
||||
{
|
||||
dmesgln("RTL8168: Found @ {}", pci_address());
|
||||
dmesgln("RTL8168: I/O port base: {}", m_io_base);
|
||||
|
@ -1095,7 +1095,7 @@ UNMAP_AFTER_INIT void RTL8168NetworkAdapter::initialize_rx_descriptors()
|
|||
auto* rx_descriptors = (RXDescriptor*)m_rx_descriptors_region->vaddr().as_ptr();
|
||||
for (size_t i = 0; i < number_of_rx_descriptors; ++i) {
|
||||
auto& descriptor = rx_descriptors[i];
|
||||
auto region = MM.allocate_contiguous_kernel_region(Memory::page_round_up(RX_BUFFER_SIZE), "RTL8168 RX buffer", Memory::Region::Access::ReadWrite).release_value();
|
||||
auto region = MM.allocate_contiguous_kernel_region(Memory::page_round_up(RX_BUFFER_SIZE).release_value_but_fixme_should_propagate_errors(), "RTL8168 RX buffer", Memory::Region::Access::ReadWrite).release_value();
|
||||
memset(region->vaddr().as_ptr(), 0, region->size()); // MM already zeros out newly allocated pages, but we do it again in case that ever changes
|
||||
m_rx_buffers_regions.append(move(region));
|
||||
|
||||
|
@ -1113,7 +1113,7 @@ UNMAP_AFTER_INIT void RTL8168NetworkAdapter::initialize_tx_descriptors()
|
|||
auto* tx_descriptors = (TXDescriptor*)m_tx_descriptors_region->vaddr().as_ptr();
|
||||
for (size_t i = 0; i < number_of_tx_descriptors; ++i) {
|
||||
auto& descriptor = tx_descriptors[i];
|
||||
auto region = MM.allocate_contiguous_kernel_region(Memory::page_round_up(TX_BUFFER_SIZE), "RTL8168 TX buffer", Memory::Region::Access::ReadWrite).release_value();
|
||||
auto region = MM.allocate_contiguous_kernel_region(Memory::page_round_up(TX_BUFFER_SIZE).release_value_but_fixme_should_propagate_errors(), "RTL8168 TX buffer", Memory::Region::Access::ReadWrite).release_value();
|
||||
memset(region->vaddr().as_ptr(), 0, region->size()); // MM already zeros out newly allocated pages, but we do it again in case that ever changes
|
||||
m_tx_buffers_regions.append(move(region));
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ AHCI::HBADefinedCapabilities AHCIController::capabilities() const
|
|||
|
||||
NonnullOwnPtr<Memory::Region> AHCIController::default_hba_region() const
|
||||
{
|
||||
return MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR5(pci_address())).page_base(), Memory::page_round_up(sizeof(AHCI::HBA)), "AHCI HBA", Memory::Region::Access::ReadWrite).release_value();
|
||||
return MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR5(pci_address())).page_base(), Memory::page_round_up(sizeof(AHCI::HBA)).release_value_but_fixme_should_propagate_errors(), "AHCI HBA", Memory::Region::Access::ReadWrite).release_value();
|
||||
}
|
||||
|
||||
AHCIController::~AHCIController()
|
||||
|
|
|
@ -184,7 +184,7 @@ void AHCIPort::eject()
|
|||
// handshake error bit in PxSERR register if CFL is incorrect.
|
||||
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P | AHCI::CommandHeaderAttributes::C | AHCI::CommandHeaderAttributes::A;
|
||||
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)), "AHCI Command Table", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
|
||||
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
|
||||
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
|
||||
auto& fis = *(volatile FIS::HostToDevice::Register*)command_table.command_fis;
|
||||
|
@ -444,7 +444,7 @@ void AHCIPort::set_sleep_state() const
|
|||
size_t AHCIPort::calculate_descriptors_count(size_t block_count) const
|
||||
{
|
||||
VERIFY(m_connected_device);
|
||||
size_t needed_dma_regions_count = Memory::page_round_up((block_count * m_connected_device->block_size())) / PAGE_SIZE;
|
||||
size_t needed_dma_regions_count = Memory::page_round_up((block_count * m_connected_device->block_size())).value() / PAGE_SIZE;
|
||||
VERIFY(needed_dma_regions_count <= m_dma_buffers.size());
|
||||
return needed_dma_regions_count;
|
||||
}
|
||||
|
@ -548,7 +548,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
|
|||
|
||||
dbgln_if(AHCI_DEBUG, "AHCI Port {}: CLE: ctba={:#08x}, ctbau={:#08x}, prdbc={:#08x}, prdtl={:#04x}, attributes={:#04x}", representative_port_index(), (u32)command_list_entries[unused_command_header.value()].ctba, (u32)command_list_entries[unused_command_header.value()].ctbau, (u32)command_list_entries[unused_command_header.value()].prdbc, (u16)command_list_entries[unused_command_header.value()].prdtl, (u16)command_list_entries[unused_command_header.value()].attributes);
|
||||
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)), "AHCI Command Table", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table", Memory::Region::Access::ReadWrite, Memory::Region::Cacheable::No).release_value();
|
||||
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
|
||||
|
||||
dbgln_if(AHCI_DEBUG, "AHCI Port {}: Allocated command table at {}", representative_port_index(), command_table_region->vaddr());
|
||||
|
@ -632,7 +632,7 @@ bool AHCIPort::identify_device()
|
|||
// QEMU doesn't care if we don't set the correct CFL field in this register, real hardware will set an handshake error bit in PxSERR register.
|
||||
command_list_entries[unused_command_header.value()].attributes = (size_t)FIS::DwordCount::RegisterHostToDevice | AHCI::CommandHeaderAttributes::P;
|
||||
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)), "AHCI Command Table", Memory::Region::Access::ReadWrite).release_value();
|
||||
auto command_table_region = MM.allocate_kernel_region(m_command_table_pages[unused_command_header.value()].paddr().page_base(), Memory::page_round_up(sizeof(AHCI::CommandTable)).value(), "AHCI Command Table", Memory::Region::Access::ReadWrite).release_value();
|
||||
auto& command_table = *(volatile AHCI::CommandTable*)command_table_region->vaddr().as_ptr();
|
||||
memset(const_cast<u8*>(command_table.command_fis), 0, 64);
|
||||
command_table.descriptors[0].base_high = 0;
|
||||
|
|
|
@ -43,7 +43,7 @@ RamdiskController::RamdiskController()
|
|||
size_t count = 0;
|
||||
for (auto& used_memory_range : MM.used_memory_ranges()) {
|
||||
if (used_memory_range.type == Memory::UsedMemoryRangeType::BootModule) {
|
||||
size_t length = Memory::page_round_up(used_memory_range.end.get()) - used_memory_range.start.get();
|
||||
size_t length = Memory::page_round_up(used_memory_range.end.get()).release_value_but_fixme_should_propagate_errors() - used_memory_range.start.get();
|
||||
auto region_or_error = MM.allocate_kernel_region(used_memory_range.start, length, "Ramdisk", Memory::Region::Access::ReadWrite);
|
||||
if (region_or_error.is_error()) {
|
||||
dmesgln("RamdiskController: Failed to allocate kernel region of size {}", length);
|
||||
|
|
|
@ -166,8 +166,8 @@ static ErrorOr<RequiredLoadRange> get_required_load_range(OpenFileDescription& p
|
|||
auto vmobject = TRY(Memory::SharedInodeVMObject::try_create_with_inode(inode));
|
||||
|
||||
size_t executable_size = inode.size();
|
||||
|
||||
auto region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF memory range calculation", Memory::Region::Access::Read));
|
||||
size_t rounded_executable_size = TRY(Memory::page_round_up(executable_size));
|
||||
auto region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, rounded_executable_size, "ELF memory range calculation", Memory::Region::Access::Read));
|
||||
auto elf_image = ELF::Image(region->vaddr().as_ptr(), executable_size);
|
||||
if (!elf_image.is_valid()) {
|
||||
return EINVAL;
|
||||
|
@ -261,8 +261,9 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
|
|||
}
|
||||
|
||||
size_t executable_size = inode.size();
|
||||
size_t rounded_executable_size = TRY(Memory::page_round_up(executable_size));
|
||||
|
||||
auto executable_region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, Memory::page_round_up(executable_size), "ELF loading", Memory::Region::Access::Read));
|
||||
auto executable_region = TRY(MM.allocate_kernel_region_with_vmobject(*vmobject, rounded_executable_size, "ELF loading", Memory::Region::Access::Read));
|
||||
auto elf_image = ELF::Image(executable_region->vaddr().as_ptr(), executable_size);
|
||||
|
||||
if (!elf_image.is_valid())
|
||||
|
@ -313,7 +314,8 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
|
|||
auto region_name = String::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : "");
|
||||
|
||||
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
|
||||
auto range_end = VirtualAddress { Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
|
||||
size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
|
||||
auto range_end = VirtualAddress { rounded_range_end };
|
||||
|
||||
auto range = TRY(new_space->try_allocate_range(range_base, range_end.get() - range_base.get()));
|
||||
auto region = TRY(new_space->allocate_region(range, region_name, prot, AllocationStrategy::Reserve));
|
||||
|
@ -349,7 +351,8 @@ static ErrorOr<LoadResult> load_elf_object(NonnullOwnPtr<Memory::AddressSpace> n
|
|||
prot |= PROT_EXEC;
|
||||
|
||||
auto range_base = VirtualAddress { Memory::page_round_down(program_header.vaddr().offset(load_offset).get()) };
|
||||
auto range_end = VirtualAddress { Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()) };
|
||||
size_t rounded_range_end = TRY(Memory::page_round_up(program_header.vaddr().offset(load_offset).offset(program_header.size_in_memory()).get()));
|
||||
auto range_end = VirtualAddress { rounded_range_end };
|
||||
auto range = TRY(new_space->try_allocate_range(range_base, range_end.get() - range_base.get()));
|
||||
auto region = TRY(new_space->allocate_region_with_vmobject(range, *vmobject, program_header.offset(), elf_name->view(), prot, true));
|
||||
|
||||
|
|
|
@ -142,10 +142,8 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> use
|
|||
if (alignment & ~PAGE_MASK)
|
||||
return EINVAL;
|
||||
|
||||
if (Memory::page_round_up_would_wrap(size))
|
||||
return EINVAL;
|
||||
|
||||
if (!Memory::is_user_range(VirtualAddress(addr), Memory::page_round_up(size)))
|
||||
size_t rounded_size = TRY(Memory::page_round_up(size));
|
||||
if (!Memory::is_user_range(VirtualAddress(addr), rounded_size))
|
||||
return EFAULT;
|
||||
|
||||
OwnPtr<KString> name;
|
||||
|
@ -188,7 +186,7 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> use
|
|||
|
||||
auto range = TRY([&]() -> ErrorOr<Memory::VirtualRange> {
|
||||
if (map_randomized)
|
||||
return address_space().page_directory().range_allocator().try_allocate_randomized(Memory::page_round_up(size), alignment);
|
||||
return address_space().page_directory().range_allocator().try_allocate_randomized(rounded_size, alignment);
|
||||
|
||||
// If MAP_FIXED is specified, existing mappings that intersect the requested range are removed.
|
||||
if (map_fixed)
|
||||
|
@ -208,9 +206,9 @@ ErrorOr<FlatPtr> Process::sys$mmap(Userspace<const Syscall::SC_mmap_params*> use
|
|||
auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve;
|
||||
RefPtr<Memory::AnonymousVMObject> vmobject;
|
||||
if (flags & MAP_PURGEABLE) {
|
||||
vmobject = TRY(Memory::AnonymousVMObject::try_create_purgeable_with_size(Memory::page_round_up(size), strategy));
|
||||
vmobject = TRY(Memory::AnonymousVMObject::try_create_purgeable_with_size(rounded_size, strategy));
|
||||
} else {
|
||||
vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(Memory::page_round_up(size), strategy));
|
||||
vmobject = TRY(Memory::AnonymousVMObject::try_create_with_size(rounded_size, strategy));
|
||||
}
|
||||
|
||||
region = TRY(address_space().allocate_region_with_vmobject(range, vmobject.release_nonnull(), 0, {}, prot, map_shared));
|
||||
|
@ -587,15 +585,11 @@ ErrorOr<FlatPtr> Process::sys$msync(Userspace<void*> address, size_t size, int f
|
|||
if (address.ptr() % PAGE_SIZE != 0)
|
||||
return EINVAL;
|
||||
|
||||
if (Memory::page_round_up_would_wrap(size)) {
|
||||
return EINVAL;
|
||||
}
|
||||
|
||||
// Note: This is not specified
|
||||
size = Memory::page_round_up(size);
|
||||
auto rounded_size = TRY(Memory::page_round_up(size));
|
||||
|
||||
// FIXME: We probably want to sync all mappings in the address+size range.
|
||||
auto* region = address_space().find_region_containing(Memory::VirtualRange { address.vaddr(), size });
|
||||
auto* region = address_space().find_region_containing(Memory::VirtualRange { address.vaddr(), rounded_size });
|
||||
// All regions from address upto address+size shall be mapped
|
||||
if (!region)
|
||||
return ENOMEM;
|
||||
|
|
|
@ -131,7 +131,7 @@ UNMAP_AFTER_INIT void VirtualConsole::initialize()
|
|||
|
||||
// Allocate twice of the max row * max column * sizeof(Cell) to ensure we can have some sort of history mechanism...
|
||||
auto size = GraphicsManagement::the().console()->max_column() * GraphicsManagement::the().console()->max_row() * sizeof(Cell) * 2;
|
||||
m_cells = MM.allocate_kernel_region(Memory::page_round_up(size), "Virtual Console Cells", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
|
||||
m_cells = MM.allocate_kernel_region(Memory::page_round_up(size).release_value_but_fixme_should_propagate_errors(), "Virtual Console Cells", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
|
||||
|
||||
// Add the lines, so we also ensure they will be flushed now
|
||||
for (size_t row = 0; row < rows(); row++) {
|
||||
|
@ -150,7 +150,7 @@ void VirtualConsole::refresh_after_resolution_change()
|
|||
// Note: From now on, columns() and rows() are updated with the new settings.
|
||||
|
||||
auto size = GraphicsManagement::the().console()->max_column() * GraphicsManagement::the().console()->max_row() * sizeof(Cell) * 2;
|
||||
auto new_cells = MM.allocate_kernel_region(Memory::page_round_up(size), "Virtual Console Cells", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
|
||||
auto new_cells = MM.allocate_kernel_region(Memory::page_round_up(size).release_value_but_fixme_should_propagate_errors(), "Virtual Console Cells", Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow).release_value();
|
||||
|
||||
if (rows() < old_rows_count) {
|
||||
m_lines.shrink(rows());
|
||||
|
|
Loading…
Reference in a new issue