瀏覽代碼

Kernel: Implement kmalloc_good_size for the new kmalloc

This lets kmalloc-aware data structures like Vector and HashTable use
up the extra wasted space we allocate in the slab heaps & heap chunks.
Idan Horowitz 3 年之前
父節點
當前提交
29eee390ec
共有 2 個文件被更改,包括 10 次插入1 次删除
  1. 2 0
      Kernel/Heap/Heap.h
  2. 8 1
      Kernel/Heap/kmalloc.cpp

+ 2 - 0
Kernel/Heap/Heap.h

@@ -44,6 +44,8 @@ class Heap {
     }
     }
 
 
 public:
 public:
+    static constexpr size_t AllocationHeaderSize = sizeof(AllocationHeader);
+
     Heap(u8* memory, size_t memory_size)
     Heap(u8* memory, size_t memory_size)
         : m_total_chunks(calculate_chunks(memory_size))
         : m_total_chunks(calculate_chunks(memory_size))
         , m_chunks(memory)
         , m_chunks(memory)

+ 8 - 1
Kernel/Heap/kmalloc.cpp

@@ -22,6 +22,7 @@ static constexpr size_t CHUNK_SIZE = 32;
 #else
 #else
 static constexpr size_t CHUNK_SIZE = 64;
 static constexpr size_t CHUNK_SIZE = 64;
 #endif
 #endif
+static_assert(is_power_of_two(CHUNK_SIZE));
 
 
 static constexpr size_t INITIAL_KMALLOC_MEMORY_SIZE = 2 * MiB;
 static constexpr size_t INITIAL_KMALLOC_MEMORY_SIZE = 2 * MiB;
 
 
@@ -435,7 +436,13 @@ void kfree_sized(void* ptr, size_t size)
 
 
 size_t kmalloc_good_size(size_t size)
 size_t kmalloc_good_size(size_t size)
 {
 {
-    return size;
+    VERIFY(size > 0);
+    // NOTE: There's no need to take the kmalloc lock, as the kmalloc slab-heaps (and their sizes) are constant
+    for (auto const& slabheap : g_kmalloc_global->slabheaps) {
+        if (size <= slabheap.slab_size())
+            return slabheap.slab_size();
+    }
+    return round_up_to_power_of_two(size + Heap<CHUNK_SIZE>::AllocationHeaderSize, CHUNK_SIZE) - Heap<CHUNK_SIZE>::AllocationHeaderSize;
 }
 }
 
 
 void* kmalloc_aligned(size_t size, size_t alignment)
 void* kmalloc_aligned(size_t size, size_t alignment)