Kernel: Add an LRU block cache.

This papers over some of the incredibly inefficient access patterns in the
Ext2FS implementation for a while longer. :^)
This commit is contained in:
Andreas Kling 2019-02-10 20:07:14 +01:00
parent 052a101cc5
commit 7014daa235
Notes: sideshowbarker 2024-07-19 15:47:56 +09:00
3 changed files with 122 additions and 2 deletions

70
AK/InlineLRUCache.h Normal file
View file

@ -0,0 +1,70 @@
#pragma once
#include <AK/HashMap.h>
#include <AK/InlineLinkedList.h>
namespace AK {
template<typename K, typename V>
class InlineLRUCache {
public:
~InlineLRUCache()
{
while (size())
remove_last();
}
size_t size() const { return m_map.size(); }
size_t capacity() const { return m_capacity; }
void set_capacity(size_t capacity)
{
if (capacity == m_capacity)
return;
m_capacity = capacity;
while (size() >= capacity)
remove_last();
}
V* get(const K& key)
{
auto it = m_map.find(key);
if (it == m_map.end())
return nullptr;
V* entry = (*it).value;
m_entries.remove(entry);
m_entries.prepend(entry);
return entry;
}
void put(K&& key, V&& value)
{
auto it = m_map.find(key);
if (it != m_map.end())
return;
V* new_entry = new V(move(value));
m_entries.prepend(new_entry);
m_map.set(key, new_entry);
while (size() >= capacity())
remove_last();
}
private:
void remove_last()
{
V* entry = m_entries.tail();
ASSERT(entry);
m_entries.remove(entry);
m_map.remove(entry->m_key);
delete entry;
}
InlineLinkedList<V> m_entries;
HashMap<K, V*> m_map;
size_t m_capacity { 16 };
};
}
using AK::InlineLRUCache;

View file

@ -1,8 +1,49 @@
#include "DiskBackedFileSystem.h"
#include "i386.h"
#include <AK/InlineLRUCache.h>
//#define DBFS_DEBUG
struct BlockIdentifier {
unsigned fsid { 0 };
unsigned index { 0 };
bool operator==(const BlockIdentifier& other) const { return fsid == other.fsid && index == other.index; }
};
namespace AK {
template<>
struct Traits<BlockIdentifier> {
static unsigned hash(const BlockIdentifier& block_id) { return pair_int_hash(block_id.fsid, block_id.index); }
static void dump(const BlockIdentifier& block_id) { kprintf("[block %02u:%08u]", block_id.fsid, block_id.index); }
};
}
class CachedBlock : public InlineLinkedListNode<CachedBlock> {
public:
CachedBlock(const BlockIdentifier& block_id, const ByteBuffer& buffer)
: m_key(block_id)
, m_buffer(buffer)
{
}
BlockIdentifier m_key;
CachedBlock* m_next { nullptr };
CachedBlock* m_prev { nullptr };
ByteBuffer m_buffer;
};
Lockable<InlineLRUCache<BlockIdentifier, CachedBlock>>& block_cache()
{
static Lockable<InlineLRUCache<BlockIdentifier, CachedBlock>>* s_cache;
if (!s_cache)
s_cache = new Lockable<InlineLRUCache<BlockIdentifier, CachedBlock>>;
return *s_cache;
}
DiskBackedFS::DiskBackedFS(RetainPtr<DiskDevice>&& device)
: m_device(move(device))
{
@ -37,6 +78,12 @@ ByteBuffer DiskBackedFS::read_block(unsigned index) const
#ifdef DBFS_DEBUG
kprintf("DiskBackedFileSystem::read_block %u\n", index);
#endif
{
LOCKER(block_cache().lock());
if (auto* cached_block = block_cache().resource().get({ fsid(), index }))
return cached_block->m_buffer;
}
auto buffer = ByteBuffer::create_uninitialized(block_size());
//kprintf("created block buffer with size %u\n", block_size());
DiskOffset base_offset = static_cast<DiskOffset>(index) * static_cast<DiskOffset>(block_size());
@ -44,6 +91,10 @@ ByteBuffer DiskBackedFS::read_block(unsigned index) const
bool success = device().read(base_offset, block_size(), buffer_pointer);
ASSERT(success);
ASSERT(buffer.size() == block_size());
{
LOCKER(block_cache().lock());
block_cache().resource().put({ fsid(), index }, CachedBlock({ fsid(), index }, buffer));
}
return buffer;
}

View file

@ -147,8 +147,7 @@ namespace AK {
template<>
struct Traits<InodeIdentifier> {
// FIXME: This is a shitty hash.
static unsigned hash(const InodeIdentifier& inode) { return Traits<unsigned>::hash(inode.fsid()) + Traits<unsigned>::hash(inode.index()); }
static unsigned hash(const InodeIdentifier& inode) { return pair_int_hash(inode.fsid(), inode.index()); }
static void dump(const InodeIdentifier& inode) { kprintf("%02u:%08u", inode.fsid(), inode.index()); }
};