Selaa lähdekoodia

Kernel: Make SlabAllocator fall back to kmalloc() when slabs run out

This is obviously not ideal, and it would be better to teach it how to
allocate more pages, etc. But since the physical page allocator itself
currently uses SlabAllocator, it's a little bit tricky :^)
Andreas Kling 5 vuotta sitten
vanhempi
commit
a6e4c504e2
1 muutettua tiedostoa jossa 11 lisäystä ja 2 poistoa
  1. 11 2
      Kernel/Heap/SlabAllocator.cpp

+ 11 - 2
Kernel/Heap/SlabAllocator.cpp

@@ -10,8 +10,9 @@ public:
 
 
     void init(size_t size)
     void init(size_t size)
     {
     {
-        void* base = kmalloc_eternal(size);
-        FreeSlab* slabs = (FreeSlab*)base;
+        m_base = kmalloc_eternal(size);
+        m_end = (u8*)m_base + size;
+        FreeSlab* slabs = (FreeSlab*)m_base;
         size_t slab_count = size / templated_slab_size;
         size_t slab_count = size / templated_slab_size;
         for (size_t i = 1; i < slab_count; ++i) {
         for (size_t i = 1; i < slab_count; ++i) {
             slabs[i].next = &slabs[i - 1];
             slabs[i].next = &slabs[i - 1];
@@ -27,6 +28,8 @@ public:
     void* alloc()
     void* alloc()
     {
     {
         InterruptDisabler disabler;
         InterruptDisabler disabler;
+        if (!m_freelist)
+            return kmalloc(slab_size());
         ASSERT(m_freelist);
         ASSERT(m_freelist);
         void* ptr = m_freelist;
         void* ptr = m_freelist;
         m_freelist = m_freelist->next;
         m_freelist = m_freelist->next;
@@ -39,6 +42,10 @@ public:
     {
     {
         InterruptDisabler disabler;
         InterruptDisabler disabler;
         ASSERT(ptr);
         ASSERT(ptr);
+        if (ptr < m_base || ptr >= m_end) {
+            kfree(ptr);
+            return;
+        }
         ((FreeSlab*)ptr)->next = m_freelist;
         ((FreeSlab*)ptr)->next = m_freelist;
         m_freelist = (FreeSlab*)ptr;
         m_freelist = (FreeSlab*)ptr;
         ++m_num_allocated;
         ++m_num_allocated;
@@ -57,6 +64,8 @@ private:
     FreeSlab* m_freelist { nullptr };
     FreeSlab* m_freelist { nullptr };
     size_t m_num_allocated { 0 };
     size_t m_num_allocated { 0 };
     size_t m_num_free { 0 };
     size_t m_num_free { 0 };
+    void* m_base { nullptr };
+    void* m_end { nullptr };
 
 
     static_assert(sizeof(FreeSlab) == templated_slab_size);
     static_assert(sizeof(FreeSlab) == templated_slab_size);
 };
 };