mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2024-12-04 05:20:30 +00:00
Give each task its own page directory.
This isn't finished but I'll commit as I go. We need to get to where context switching only needs to change CR3 and everything's ready to go. My basic idea is: - The first 4 kB is off-limits. This catches null dereferences. - Up to the 4 MB mark is identity-mapped and kernel-only. - The rest is available to everyone! While the first 4 MB is only available to the kernel, it's still mapped in every process, for convenience when entering the kernel.
This commit is contained in:
parent
cddd2f37e9
commit
1da0a7c949
Notes:
sideshowbarker
2024-07-19 18:35:19 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/1da0a7c949d
7 changed files with 47 additions and 15 deletions
|
@ -26,6 +26,14 @@ MemoryManager::~MemoryManager()
|
|||
{
|
||||
}
|
||||
|
||||
void MemoryManager::populatePageDirectory(Task& task)
|
||||
{
|
||||
memset(task.m_pageDirectory, 0, 4096);
|
||||
|
||||
task.m_pageDirectory[0] = m_pageDirectory[0];
|
||||
task.m_pageDirectory[1] = m_pageDirectory[1];
|
||||
}
|
||||
|
||||
void MemoryManager::initializePaging()
|
||||
{
|
||||
static_assert(sizeof(MemoryManager::PageDirectoryEntry) == 4);
|
||||
|
@ -41,6 +49,7 @@ void MemoryManager::initializePaging()
|
|||
// Make null dereferences crash.
|
||||
protectMap(LinearAddress(0), 4 * KB);
|
||||
|
||||
// The bottom 4 MB are identity mapped & supervisor only. Every process shares this mapping.
|
||||
identityMap(LinearAddress(4096), 4 * MB);
|
||||
|
||||
for (size_t i = (4 * MB) + PAGE_SIZE; i < (8 * MB); i += PAGE_SIZE) {
|
||||
|
@ -63,13 +72,13 @@ void* MemoryManager::allocatePageTable()
|
|||
return (void*)address;
|
||||
}
|
||||
|
||||
auto MemoryManager::ensurePTE(LinearAddress linearAddress) -> PageTableEntry
|
||||
auto MemoryManager::ensurePTE(dword* pageDirectory, LinearAddress linearAddress) -> PageTableEntry
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
dword pageDirectoryIndex = (linearAddress.get() >> 22) & 0x3ff;
|
||||
dword pageTableIndex = (linearAddress.get() >> 12) & 0x3ff;
|
||||
|
||||
PageDirectoryEntry pde = PageDirectoryEntry(&m_pageDirectory[pageDirectoryIndex]);
|
||||
PageDirectoryEntry pde = PageDirectoryEntry(&pageDirectory[pageDirectoryIndex]);
|
||||
if (!pde.isPresent()) {
|
||||
#ifdef MM_DEBUG
|
||||
kprintf("MM: PDE %u not present, allocating\n", pageDirectoryIndex);
|
||||
|
@ -103,7 +112,7 @@ void MemoryManager::protectMap(LinearAddress linearAddress, size_t length)
|
|||
// FIXME: ASSERT(linearAddress is 4KB aligned);
|
||||
for (dword offset = 0; offset < length; offset += 4096) {
|
||||
auto pteAddress = linearAddress.offset(offset);
|
||||
auto pte = ensurePTE(pteAddress);
|
||||
auto pte = ensurePTE(m_pageDirectory, pteAddress);
|
||||
pte.setPhysicalPageBase(pteAddress.get());
|
||||
pte.setUserAllowed(false);
|
||||
pte.setPresent(false);
|
||||
|
@ -118,7 +127,7 @@ void MemoryManager::identityMap(LinearAddress linearAddress, size_t length)
|
|||
// FIXME: ASSERT(linearAddress is 4KB aligned);
|
||||
for (dword offset = 0; offset < length; offset += 4096) {
|
||||
auto pteAddress = linearAddress.offset(offset);
|
||||
auto pte = ensurePTE(pteAddress);
|
||||
auto pte = ensurePTE(m_pageDirectory, pteAddress);
|
||||
pte.setPhysicalPageBase(pteAddress.get());
|
||||
pte.setUserAllowed(true);
|
||||
pte.setPresent(true);
|
||||
|
@ -195,7 +204,7 @@ Vector<PhysicalAddress> MemoryManager::allocatePhysicalPages(size_t count)
|
|||
byte* MemoryManager::quickMapOnePage(PhysicalAddress physicalAddress)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
auto pte = ensurePTE(LinearAddress(4 * MB));
|
||||
auto pte = ensurePTE(m_pageDirectory, LinearAddress(4 * MB));
|
||||
kprintf("MM: quickmap %x @ %x {pte @ %p}\n", physicalAddress.get(), 4*MB, pte.ptr());
|
||||
pte.setPhysicalPageBase(physicalAddress.pageBase());
|
||||
pte.setPresent(true);
|
||||
|
@ -223,7 +232,7 @@ bool MemoryManager::unmapRegion(Task& task, Task::Region& region)
|
|||
auto& zone = *region.zone;
|
||||
for (size_t i = 0; i < zone.m_pages.size(); ++i) {
|
||||
auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
|
||||
auto pte = ensurePTE(laddr);
|
||||
auto pte = ensurePTE(task.m_pageDirectory, laddr);
|
||||
pte.setPhysicalPageBase(0);
|
||||
pte.setPresent(false);
|
||||
pte.setWritable(false);
|
||||
|
@ -238,12 +247,11 @@ bool MemoryManager::unmapSubregion(Task& task, Task::Subregion& subregion)
|
|||
{
|
||||
InterruptDisabler disabler;
|
||||
auto& region = *subregion.region;
|
||||
auto& zone = *region.zone;
|
||||
size_t numPages = subregion.size / 4096;
|
||||
ASSERT(numPages);
|
||||
for (size_t i = 0; i < numPages; ++i) {
|
||||
auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE);
|
||||
auto pte = ensurePTE(laddr);
|
||||
auto pte = ensurePTE(task.m_pageDirectory, laddr);
|
||||
pte.setPhysicalPageBase(0);
|
||||
pte.setPresent(false);
|
||||
pte.setWritable(false);
|
||||
|
@ -278,7 +286,7 @@ bool MemoryManager::mapSubregion(Task& task, Task::Subregion& subregion)
|
|||
ASSERT(numPages);
|
||||
for (size_t i = 0; i < numPages; ++i) {
|
||||
auto laddr = subregion.linearAddress.offset(i * PAGE_SIZE);
|
||||
auto pte = ensurePTE(laddr);
|
||||
auto pte = ensurePTE(task.m_pageDirectory, laddr);
|
||||
pte.setPhysicalPageBase(zone.m_pages[firstPage + i].get());
|
||||
pte.setPresent(true);
|
||||
pte.setWritable(true);
|
||||
|
@ -295,11 +303,11 @@ bool MemoryManager::mapRegion(Task& task, Task::Region& region)
|
|||
auto& zone = *region.zone;
|
||||
for (size_t i = 0; i < zone.m_pages.size(); ++i) {
|
||||
auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
|
||||
auto pte = ensurePTE(laddr);
|
||||
auto pte = ensurePTE(task.m_pageDirectory,laddr);
|
||||
pte.setPhysicalPageBase(zone.m_pages[i].get());
|
||||
pte.setPresent(true);
|
||||
pte.setWritable(true);
|
||||
pte.setUserAllowed(!task.isRing0());
|
||||
pte.setUserAllowed(!task.isRing0()); // FIXME: This doesn't make sense. Allow USER if the TASK is RING0? Wh...what?
|
||||
flushTLB(laddr);
|
||||
//kprintf("MM: >> Mapped L%x => P%x <<\n", laddr, zone.m_pages[i].get());
|
||||
}
|
||||
|
|
|
@ -65,6 +65,8 @@ public:
|
|||
void registerZone(Zone&);
|
||||
void unregisterZone(Zone&);
|
||||
|
||||
void populatePageDirectory(Task&);
|
||||
|
||||
private:
|
||||
MemoryManager();
|
||||
~MemoryManager();
|
||||
|
@ -158,7 +160,7 @@ private:
|
|||
dword* m_pte;
|
||||
};
|
||||
|
||||
PageTableEntry ensurePTE(LinearAddress);
|
||||
PageTableEntry ensurePTE(dword* pageDirectory, LinearAddress);
|
||||
|
||||
dword* m_pageDirectory;
|
||||
dword* m_pageTableZero;
|
||||
|
|
|
@ -172,9 +172,9 @@ ByteBuffer procfs$mounts()
|
|||
ByteBuffer procfs$kmalloc()
|
||||
{
|
||||
InterruptDisabler disabler;
|
||||
auto buffer = ByteBuffer::createUninitialized(128);
|
||||
auto buffer = ByteBuffer::createUninitialized(256);
|
||||
char* ptr = (char*)buffer.pointer();
|
||||
ptr += ksprintf(ptr, "eternal: %u\nallocated: %u\nfree: %u\n", kmalloc_sum_eternal, sum_alloc, sum_free);
|
||||
ptr += ksprintf(ptr, "eternal: %u\npage-aligned: %u\nallocated: %u\nfree: %u\n", kmalloc_sum_eternal, sum_alloc, sum_free);
|
||||
buffer.trim(ptr - (char*)buffer.pointer());
|
||||
return buffer;
|
||||
}
|
||||
|
|
|
@ -408,6 +408,9 @@ Task::Task(String&& name, uid_t uid, gid_t gid, pid_t parentPID, RingLevel ring,
|
|||
, m_tty(tty)
|
||||
, m_parentPID(parentPID)
|
||||
{
|
||||
m_pageDirectory = (dword*)kmalloc_page_aligned(4096);
|
||||
MM.populatePageDirectory(*this);
|
||||
|
||||
if (tty) {
|
||||
m_fileHandles.append(tty->open(O_RDONLY)); // stdin
|
||||
m_fileHandles.append(tty->open(O_WRONLY)); // stdout
|
||||
|
@ -449,7 +452,7 @@ Task::Task(String&& name, uid_t uid, gid_t gid, pid_t parentPID, RingLevel ring,
|
|||
m_tss.ss = ss;
|
||||
m_tss.cs = cs;
|
||||
|
||||
m_tss.cr3 = MM.pageDirectoryBase().get();
|
||||
m_tss.cr3 = (dword)m_pageDirectory;
|
||||
|
||||
if (isRing0()) {
|
||||
// FIXME: This memory is leaked.
|
||||
|
|
|
@ -152,6 +152,8 @@ private:
|
|||
|
||||
void allocateLDT();
|
||||
|
||||
dword* m_pageDirectory { nullptr };
|
||||
|
||||
Task* m_prev { nullptr };
|
||||
Task* m_next { nullptr };
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ typedef struct
|
|||
#define CHUNK_SIZE 128
|
||||
#define POOL_SIZE (1024 * 1024)
|
||||
|
||||
#define PAGE_ALIGNED_BASE_PHYSICAL 0x380000
|
||||
#define ETERNAL_BASE_PHYSICAL 0x300000
|
||||
#define BASE_PHYS 0x200000
|
||||
|
||||
|
@ -30,8 +31,10 @@ PRIVATE BYTE alloc_map[POOL_SIZE / CHUNK_SIZE / 8];
|
|||
volatile DWORD sum_alloc = 0;
|
||||
volatile DWORD sum_free = POOL_SIZE;
|
||||
volatile size_t kmalloc_sum_eternal = 0;
|
||||
volatile size_t kmalloc_sum_page_aligned = 0;
|
||||
|
||||
static byte* s_next_eternal_ptr;
|
||||
static byte* s_next_page_aligned_ptr;
|
||||
|
||||
bool is_kmalloc_address(void* ptr)
|
||||
{
|
||||
|
@ -47,10 +50,12 @@ kmalloc_init()
|
|||
memset( (void *)BASE_PHYS, 0, POOL_SIZE );
|
||||
|
||||
kmalloc_sum_eternal = 0;
|
||||
kmalloc_sum_page_aligned = 0;
|
||||
sum_alloc = 0;
|
||||
sum_free = POOL_SIZE;
|
||||
|
||||
s_next_eternal_ptr = (byte*)ETERNAL_BASE_PHYSICAL;
|
||||
s_next_page_aligned_ptr = (byte*)PAGE_ALIGNED_BASE_PHYSICAL;
|
||||
}
|
||||
|
||||
void* kmalloc_eternal(size_t size)
|
||||
|
@ -61,6 +66,16 @@ void* kmalloc_eternal(size_t size)
|
|||
return ptr;
|
||||
}
|
||||
|
||||
void* kmalloc_page_aligned(size_t size)
|
||||
{
|
||||
ASSERT((size % 4096) == 0);
|
||||
void* ptr = s_next_page_aligned_ptr;
|
||||
s_next_page_aligned_ptr += size;
|
||||
kmalloc_sum_page_aligned += size;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
||||
PUBLIC void *
|
||||
kmalloc( DWORD size )
|
||||
{
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
void kmalloc_init();
|
||||
void *kmalloc(DWORD size) __attribute__ ((malloc));
|
||||
void* kmalloc_eternal(size_t) __attribute__ ((malloc));
|
||||
void* kmalloc_page_aligned(size_t) __attribute__ ((malloc));
|
||||
void kfree(void*);
|
||||
|
||||
bool is_kmalloc_address(void*);
|
||||
|
@ -10,6 +11,7 @@ bool is_kmalloc_address(void*);
|
|||
extern volatile DWORD sum_alloc;
|
||||
extern volatile DWORD sum_free;
|
||||
extern volatile dword kmalloc_sum_eternal;
|
||||
extern volatile dword kmalloc_sum_page_aligned;
|
||||
|
||||
inline void* operator new(size_t, void* p) { return p; }
|
||||
inline void* operator new[](size_t, void* p) { return p; }
|
||||
|
|
Loading…
Reference in a new issue