Kernel: Assert that copy_to/from_user() are called with user addresses

This will panic the kernel immediately if these functions are misused
so we can catch it and fix the misuse.

This patch fixes a couple of misuses:

    - create_signal_trampolines() writes to a user-accessible page
      above the 3GB address mark. We should really get rid of this
      page but that's a whole other thing.

    - CoW faults need to use copy_from_user rather than copy_to_user
      since it's the *source* pointer that points to user memory.

    - Inode faults need to use memcpy rather than copy_to_user since
      we're copying a kernel stack buffer into a quickmapped page.

This should make the copy_to/from_user() functions slightly less useful
for exploitation. Before this, they were essentially just glorified
memcpy() with SMAP disabled. :^)
This commit is contained in:
Andreas Kling 2020-01-19 09:14:14 +01:00
parent 2cd212e5df
commit f7b394e9a1
Notes: sideshowbarker 2024-07-19 09:57:54 +09:00
6 changed files with 33 additions and 15 deletions

View file

@ -1380,8 +1380,11 @@ void create_signal_trampolines()
u8* trampoline_end = (u8*)asm_signal_trampoline_end;
size_t trampoline_size = trampoline_end - trampoline;
u8* code_ptr = (u8*)trampoline_region->vaddr().as_ptr();
copy_to_user(code_ptr, trampoline, trampoline_size);
{
SmapDisabler disabler;
u8* code_ptr = (u8*)trampoline_region->vaddr().as_ptr();
memcpy(code_ptr, trampoline, trampoline_size);
}
trampoline_region->set_writable(false);
trampoline_region->remap();

View file

@ -30,6 +30,7 @@
#include <Kernel/Arch/i386/CPU.h>
#include <Kernel/Heap/kmalloc.h>
#include <Kernel/StdLib.h>
#include <Kernel/VM/MemoryManager.h>
String copy_string_from_user(const char* user_str, size_t user_str_size)
{
@ -40,16 +41,18 @@ String copy_string_from_user(const char* user_str, size_t user_str_size)
extern "C" {
void* copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
void copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
{
ASSERT(is_user_range(VirtualAddress(dest_ptr), n));
SmapDisabler disabler;
auto* ptr = memcpy(dest_ptr, src_ptr, n);
return ptr;
memcpy(dest_ptr, src_ptr, n);
}
void* copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
void copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
{
return copy_to_user(dest_ptr, src_ptr, n);
ASSERT(is_user_range(VirtualAddress(src_ptr), n));
SmapDisabler disabler;
memcpy(dest_ptr, src_ptr, n);
}
void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
@ -105,11 +108,11 @@ char* strncpy(char* dest, const char* src, size_t n)
return dest;
}
void* memset_user(void* dest_ptr, int c, size_t n)
void memset_user(void* dest_ptr, int c, size_t n)
{
ASSERT(is_user_range(VirtualAddress(dest_ptr), n));
SmapDisabler disabler;
auto* ptr = memset(dest_ptr, c, n);
return ptr;
memset(dest_ptr, c, n);
}
void* memset(void* dest_ptr, int c, size_t n)

View file

@ -42,9 +42,9 @@ extern "C" {
static_assert(sizeof(size_t) == 4);
void* copy_to_user(void*, const void*, size_t);
void* copy_from_user(void*, const void*, size_t);
void* memset_user(void*, int, size_t);
void copy_to_user(void*, const void*, size_t);
void copy_from_user(void*, const void*, size_t);
void memset_user(void*, int, size_t);
void* memcpy(void*, const void*, size_t);
char* strcpy(char*, const char*);

View file

@ -214,3 +214,10 @@ inline bool is_user_address(VirtualAddress vaddr)
{
return vaddr.get() < 0xc0000000;
}
inline bool is_user_range(VirtualAddress vaddr, size_t size)
{
if (vaddr.offset(size) < vaddr)
return false;
return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
}

View file

@ -424,7 +424,7 @@ PageFaultResponse Region::handle_cow_fault(size_t page_index_in_region)
#ifdef PAGE_FAULT_DEBUG
dbgprintf(" >> COW P%p <- P%p\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get());
#endif
copy_to_user(dest_ptr, src_ptr, PAGE_SIZE);
copy_from_user(dest_ptr, src_ptr, PAGE_SIZE);
vmobject_physical_page_entry = move(physical_page);
MM.unquickmap_page();
set_should_cow(page_index_in_region, false);
@ -481,7 +481,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
}
u8* dest_ptr = MM.quickmap_page(*vmobject_physical_page_entry);
copy_to_user(dest_ptr, page_buffer, PAGE_SIZE);
memcpy(dest_ptr, page_buffer, PAGE_SIZE);
MM.unquickmap_page();
remap_page(page_index_in_region);

View file

@ -37,6 +37,11 @@ public:
{
}
explicit VirtualAddress(const void* address)
: m_address((u32)address)
{
}
bool is_null() const { return m_address == 0; }
bool is_page_aligned() const { return (m_address & 0xfff) == 0; }