Kernel: Make BlockBasedFS::BlockIndex a distinct integer type

This commit is contained in:
Andreas Kling 2021-02-12 11:59:27 +01:00
parent 900865975a
commit 489317e573
Notes: sideshowbarker 2024-07-18 22:24:35 +09:00
4 changed files with 72 additions and 70 deletions

View file

@ -33,7 +33,7 @@ namespace Kernel {
struct CacheEntry {
IntrusiveListNode list_node;
u32 block_index { 0 };
BlockBasedFS::BlockIndex block_index { 0 };
u8* data { nullptr };
bool has_data { false };
};
@ -74,7 +74,7 @@ public:
m_clean_list.prepend(entry);
}
CacheEntry& get(u32 block_index) const
CacheEntry& get(BlockBasedFS::BlockIndex block_index) const
{
if (auto it = m_hash.find(block_index); it != m_hash.end()) {
auto& entry = const_cast<CacheEntry&>(*it->value);
@ -106,13 +106,6 @@ public:
const CacheEntry* entries() const { return (const CacheEntry*)m_entries.data(); }
CacheEntry* entries() { return (CacheEntry*)m_entries.data(); }
template<typename Callback>
void for_each_clean_entry(Callback callback)
{
for (auto& entry : m_clean_list)
callback(entry);
}
template<typename Callback>
void for_each_dirty_entry(Callback callback)
{
@ -123,7 +116,7 @@ public:
private:
BlockBasedFS& m_fs;
size_t m_entry_count { 10000 };
mutable HashMap<u32, CacheEntry*> m_hash;
mutable HashMap<BlockBasedFS::BlockIndex, CacheEntry*> m_hash;
mutable IntrusiveList<CacheEntry, &CacheEntry::list_node> m_clean_list;
mutable IntrusiveList<CacheEntry, &CacheEntry::list_node> m_dirty_list;
KBuffer m_cached_block_data;
@ -141,7 +134,7 @@ BlockBasedFS::~BlockBasedFS()
{
}
KResult BlockBasedFS::write_block(unsigned index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
KResult BlockBasedFS::write_block(BlockIndex index, const UserOrKernelBuffer& data, size_t count, size_t offset, bool allow_cache)
{
ASSERT(m_logical_block_size);
ASSERT(offset + count <= block_size());
@ -151,7 +144,7 @@ KResult BlockBasedFS::write_block(unsigned index, const UserOrKernelBuffer& data
if (!allow_cache) {
flush_specific_block_if_needed(index);
u32 base_offset = static_cast<u32>(index) * static_cast<u32>(block_size()) + offset;
u32 base_offset = index.value() * block_size() + offset;
file_description().seek(base_offset, SEEK_SET);
auto nwritten = file_description().write(data, count);
if (nwritten.is_error())
@ -175,18 +168,18 @@ KResult BlockBasedFS::write_block(unsigned index, const UserOrKernelBuffer& data
return KSuccess;
}
bool BlockBasedFS::raw_read(unsigned index, UserOrKernelBuffer& buffer)
bool BlockBasedFS::raw_read(BlockIndex index, UserOrKernelBuffer& buffer)
{
u32 base_offset = static_cast<u32>(index) * static_cast<u32>(m_logical_block_size);
u32 base_offset = index.value() * m_logical_block_size;
file_description().seek(base_offset, SEEK_SET);
auto nread = file_description().read(buffer, m_logical_block_size);
ASSERT(!nread.is_error());
ASSERT(nread.value() == m_logical_block_size);
return true;
}
bool BlockBasedFS::raw_write(unsigned index, const UserOrKernelBuffer& buffer)
bool BlockBasedFS::raw_write(BlockIndex index, const UserOrKernelBuffer& buffer)
{
u32 base_offset = static_cast<u32>(index) * static_cast<u32>(m_logical_block_size);
size_t base_offset = index.value() * m_logical_block_size;
file_description().seek(base_offset, SEEK_SET);
auto nwritten = file_description().write(buffer, m_logical_block_size);
ASSERT(!nwritten.is_error());
@ -194,20 +187,20 @@ bool BlockBasedFS::raw_write(unsigned index, const UserOrKernelBuffer& buffer)
return true;
}
bool BlockBasedFS::raw_read_blocks(unsigned index, size_t count, UserOrKernelBuffer& buffer)
bool BlockBasedFS::raw_read_blocks(BlockIndex index, size_t count, UserOrKernelBuffer& buffer)
{
auto current = buffer;
for (unsigned block = index; block < (index + count); block++) {
if (!raw_read(block, current))
for (unsigned block = index.value(); block < (index.value() + count); block++) {
if (!raw_read(BlockIndex { block }, current))
return false;
current = current.offset(logical_block_size());
}
return true;
}
bool BlockBasedFS::raw_write_blocks(unsigned index, size_t count, const UserOrKernelBuffer& buffer)
bool BlockBasedFS::raw_write_blocks(BlockIndex index, size_t count, const UserOrKernelBuffer& buffer)
{
auto current = buffer;
for (unsigned block = index; block < (index + count); block++) {
for (unsigned block = index.value(); block < (index.value() + count); block++) {
if (!raw_write(block, current))
return false;
current = current.offset(logical_block_size());
@ -215,21 +208,21 @@ bool BlockBasedFS::raw_write_blocks(unsigned index, size_t count, const UserOrKe
return true;
}
KResult BlockBasedFS::write_blocks(unsigned index, unsigned count, const UserOrKernelBuffer& data, bool allow_cache)
KResult BlockBasedFS::write_blocks(BlockIndex index, unsigned count, const UserOrKernelBuffer& data, bool allow_cache)
{
ASSERT(m_logical_block_size);
#if BBFS_DEBUG
klog() << "BlockBasedFileSystem::write_blocks " << index << " x" << count;
#endif
for (unsigned i = 0; i < count; ++i) {
auto result = write_block(index + i, data.offset(i * block_size()), block_size(), 0, allow_cache);
auto result = write_block(BlockIndex { index.value() + i }, data.offset(i * block_size()), block_size(), 0, allow_cache);
if (result.is_error())
return result;
}
return KSuccess;
}
KResult BlockBasedFS::read_block(unsigned index, UserOrKernelBuffer* buffer, size_t count, size_t offset, bool allow_cache) const
KResult BlockBasedFS::read_block(BlockIndex index, UserOrKernelBuffer* buffer, size_t count, size_t offset, bool allow_cache) const
{
ASSERT(m_logical_block_size);
ASSERT(offset + count <= block_size());
@ -239,7 +232,7 @@ KResult BlockBasedFS::read_block(unsigned index, UserOrKernelBuffer* buffer, siz
if (!allow_cache) {
const_cast<BlockBasedFS*>(this)->flush_specific_block_if_needed(index);
u32 base_offset = static_cast<u32>(index) * static_cast<u32>(block_size()) + static_cast<u32>(offset);
size_t base_offset = index.value() * block_size() + offset;
file_description().seek(base_offset, SEEK_SET);
auto nread = file_description().read(*buffer, count);
if (nread.is_error())
@ -250,7 +243,7 @@ KResult BlockBasedFS::read_block(unsigned index, UserOrKernelBuffer* buffer, siz
auto& entry = cache().get(index);
if (!entry.has_data) {
u32 base_offset = static_cast<u32>(index) * static_cast<u32>(block_size());
size_t base_offset = index.value() * block_size();
file_description().seek(base_offset, SEEK_SET);
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
auto nread = file_description().read(entry_data_buffer, block_size());
@ -264,7 +257,7 @@ KResult BlockBasedFS::read_block(unsigned index, UserOrKernelBuffer* buffer, siz
return KSuccess;
}
KResult BlockBasedFS::read_blocks(unsigned index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
KResult BlockBasedFS::read_blocks(BlockIndex index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache) const
{
ASSERT(m_logical_block_size);
if (!count)
@ -273,7 +266,7 @@ KResult BlockBasedFS::read_blocks(unsigned index, unsigned count, UserOrKernelBu
return read_block(index, &buffer, block_size(), 0, allow_cache);
auto out = buffer;
for (unsigned i = 0; i < count; ++i) {
auto result = read_block(index + i, &out, block_size(), 0, allow_cache);
auto result = read_block(BlockIndex { index.value() + i }, &out, block_size(), 0, allow_cache);
if (result.is_error())
return result;
out = out.offset(block_size());
@ -282,7 +275,7 @@ KResult BlockBasedFS::read_blocks(unsigned index, unsigned count, UserOrKernelBu
return KSuccess;
}
void BlockBasedFS::flush_specific_block_if_needed(unsigned index)
void BlockBasedFS::flush_specific_block_if_needed(BlockIndex index)
{
LOCKER(m_lock);
if (!cache().is_dirty())
@ -290,7 +283,7 @@ void BlockBasedFS::flush_specific_block_if_needed(unsigned index)
Vector<CacheEntry*, 32> cleaned_entries;
cache().for_each_dirty_entry([&](CacheEntry& entry) {
if (entry.block_index != index) {
u32 base_offset = static_cast<u32>(entry.block_index) * static_cast<u32>(block_size());
size_t base_offset = entry.block_index.value() * block_size();
file_description().seek(base_offset, SEEK_SET);
// FIXME: Should this error path be surfaced somehow?
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);
@ -311,7 +304,7 @@ void BlockBasedFS::flush_writes_impl()
return;
u32 count = 0;
cache().for_each_dirty_entry([&](CacheEntry& entry) {
u32 base_offset = static_cast<u32>(entry.block_index) * static_cast<u32>(block_size());
u32 base_offset = entry.block_index.value() * block_size();
file_description().seek(base_offset, SEEK_SET);
// FIXME: Should this error path be surfaced somehow?
auto entry_data_buffer = UserOrKernelBuffer::for_kernel_buffer(entry.data);

View file

@ -32,6 +32,8 @@ namespace Kernel {
class BlockBasedFS : public FileBackedFS {
public:
TYPEDEF_DISTINCT_ORDERED_ID(unsigned, BlockIndex);
virtual ~BlockBasedFS() override;
size_t logical_block_size() const { return m_logical_block_size; };
@ -42,25 +44,33 @@ public:
protected:
explicit BlockBasedFS(FileDescription&);
KResult read_block(unsigned index, UserOrKernelBuffer* buffer, size_t count, size_t offset = 0, bool allow_cache = true) const;
KResult read_blocks(unsigned index, unsigned count, UserOrKernelBuffer& buffer, bool allow_cache = true) const;
KResult read_block(BlockIndex, UserOrKernelBuffer*, size_t count, size_t offset = 0, bool allow_cache = true) const;
KResult read_blocks(BlockIndex, unsigned count, UserOrKernelBuffer&, bool allow_cache = true) const;
bool raw_read(unsigned index, UserOrKernelBuffer& buffer);
bool raw_write(unsigned index, const UserOrKernelBuffer& buffer);
bool raw_read(BlockIndex, UserOrKernelBuffer&);
bool raw_write(BlockIndex, const UserOrKernelBuffer&);
bool raw_read_blocks(unsigned index, size_t count, UserOrKernelBuffer& buffer);
bool raw_write_blocks(unsigned index, size_t count, const UserOrKernelBuffer& buffer);
bool raw_read_blocks(BlockIndex index, size_t count, UserOrKernelBuffer&);
bool raw_write_blocks(BlockIndex index, size_t count, const UserOrKernelBuffer&);
KResult write_block(unsigned index, const UserOrKernelBuffer& buffer, size_t count, size_t offset = 0, bool allow_cache = true);
KResult write_blocks(unsigned index, unsigned count, const UserOrKernelBuffer&, bool allow_cache = true);
KResult write_block(BlockIndex, const UserOrKernelBuffer&, size_t count, size_t offset = 0, bool allow_cache = true);
KResult write_blocks(BlockIndex, unsigned count, const UserOrKernelBuffer&, bool allow_cache = true);
size_t m_logical_block_size { 512 };
private:
DiskCache& cache() const;
void flush_specific_block_if_needed(unsigned index);
void flush_specific_block_if_needed(BlockIndex index);
mutable OwnPtr<DiskCache> m_cache;
};
}
template<>
struct AK::Formatter<Kernel::BlockBasedFS::BlockIndex> : AK::Formatter<FormatString> {
void format(FormatBuilder& builder, Kernel::BlockBasedFS::BlockIndex value)
{
return AK::Formatter<FormatString>::format(builder, "{}", value.value());
}
};

View file

@ -172,7 +172,7 @@ NonnullRefPtr<Inode> Ext2FS::root_inode() const
return *get_inode({ fsid(), EXT2_ROOT_INO });
}
bool Ext2FS::find_block_containing_inode(InodeIndex inode, unsigned& block_index, unsigned& offset) const
bool Ext2FS::find_block_containing_inode(InodeIndex inode, BlockIndex& block_index, unsigned& offset) const
{
LOCKER(m_lock);
auto& super_block = this->super_block();
@ -262,7 +262,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
for (unsigned i = 0; i < new_shape.direct_blocks; ++i) {
if (e2inode.i_block[i] != blocks[output_block_index])
inode_dirty = true;
e2inode.i_block[i] = blocks[output_block_index];
e2inode.i_block[i] = blocks[output_block_index].value();
++output_block_index;
--remaining_blocks;
}
@ -286,7 +286,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
BlockIndex new_indirect_block = new_meta_blocks.take_last();
if (e2inode.i_block[EXT2_IND_BLOCK] != new_indirect_block)
inode_dirty = true;
e2inode.i_block[EXT2_IND_BLOCK] = new_indirect_block;
e2inode.i_block[EXT2_IND_BLOCK] = new_indirect_block.value();
if (inode_dirty) {
dbgln_if(EXT2_DEBUG, "Ext2FS: Adding the indirect block to i_block array of inode {}", inode_index);
write_ext2_inode(inode_index, e2inode);
@ -304,7 +304,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
ASSERT(new_shape.indirect_blocks <= entries_per_block);
for (unsigned i = 0; i < new_shape.indirect_blocks; ++i) {
stream << blocks[output_block_index++];
stream << blocks[output_block_index++].value();
--remaining_blocks;
}
@ -326,7 +326,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
BlockIndex new_dindirect_block = new_meta_blocks.take_last();
if (e2inode.i_block[EXT2_DIND_BLOCK] != new_dindirect_block)
inode_dirty = true;
e2inode.i_block[EXT2_DIND_BLOCK] = new_dindirect_block;
e2inode.i_block[EXT2_DIND_BLOCK] = new_dindirect_block.value();
if (inode_dirty) {
dbgln_if(EXT2_DEBUG, "Ext2FS: Adding the doubly-indirect block to i_block array of inode {}", inode_index);
write_ext2_inode(inode_index, e2inode);
@ -366,7 +366,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
bool ind_block_new = !indirect_block_index;
if (ind_block_new) {
indirect_block_index = new_meta_blocks.take_last();
dind_block_as_pointers[i] = indirect_block_index;
dind_block_as_pointers[i] = indirect_block_index.value();
dind_block_dirty = true;
}
@ -392,7 +392,7 @@ KResult Ext2FS::write_block_list_for_inode(InodeIndex inode_index, ext2_inode& e
for (unsigned j = 0; j < entries_to_write; ++j) {
BlockIndex output_block = blocks[output_block_index++];
if (ind_block_as_pointers[j] != output_block) {
ind_block_as_pointers[j] = output_block;
ind_block_as_pointers[j] = output_block.value();
ind_block_dirty = true;
}
--remaining_blocks;
@ -491,7 +491,7 @@ Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode_impl(const ext2_inode& e
// Don't need to make copy of add_block, since this capture will only
// be called before block_list_for_inode_impl finishes.
auto process_block_array = [&](unsigned array_block_index, auto&& callback) {
auto process_block_array = [&](BlockIndex array_block_index, auto&& callback) {
if (include_block_list_blocks)
add_block(array_block_index);
auto count = min(blocks_remaining, entries_per_block);
@ -504,19 +504,19 @@ Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode_impl(const ext2_inode& e
// FIXME: Stop here and propagate this error.
dbgln("Ext2FS: block_list_for_inode_impl had error: {}", result.error());
}
for (BlockIndex i = 0; i < count; ++i)
callback(array[i]);
for (unsigned i = 0; i < count; ++i)
callback(BlockIndex(array[i]));
};
process_block_array(e2inode.i_block[EXT2_IND_BLOCK], [&](unsigned block_index) {
process_block_array(e2inode.i_block[EXT2_IND_BLOCK], [&](BlockIndex block_index) {
add_block(block_index);
});
if (!blocks_remaining)
return list;
process_block_array(e2inode.i_block[EXT2_DIND_BLOCK], [&](unsigned block_index) {
process_block_array(block_index, [&](unsigned block_index2) {
process_block_array(e2inode.i_block[EXT2_DIND_BLOCK], [&](BlockIndex block_index) {
process_block_array(block_index, [&](BlockIndex block_index2) {
add_block(block_index2);
});
});
@ -524,9 +524,9 @@ Vector<Ext2FS::BlockIndex> Ext2FS::block_list_for_inode_impl(const ext2_inode& e
if (!blocks_remaining)
return list;
process_block_array(e2inode.i_block[EXT2_TIND_BLOCK], [&](unsigned block_index) {
process_block_array(block_index, [&](unsigned block_index2) {
process_block_array(block_index2, [&](unsigned block_index3) {
process_block_array(e2inode.i_block[EXT2_TIND_BLOCK], [&](BlockIndex block_index) {
process_block_array(block_index, [&](BlockIndex block_index2) {
process_block_array(block_index2, [&](BlockIndex block_index3) {
add_block(block_index3);
});
});
@ -545,7 +545,7 @@ void Ext2FS::free_inode(Ext2FSInode& inode)
auto block_list = block_list_for_inode(inode.m_raw_inode, true);
for (auto block_index : block_list) {
ASSERT(block_index <= super_block().s_blocks_count);
if (block_index)
if (block_index.value())
set_block_allocation_state(block_index, false);
}
@ -689,7 +689,7 @@ RefPtr<Inode> Ext2FS::get_inode(InodeIdentifier inode) const
return nullptr;
}
unsigned block_index;
BlockIndex block_index;
unsigned offset;
if (!find_block_containing_inode(inode.index(), block_index, offset))
return {};
@ -750,13 +750,13 @@ ssize_t Ext2FSInode::read_bytes(off_t offset, ssize_t count, UserOrKernelBuffer&
for (size_t bi = first_block_logical_index; remaining_count && bi <= last_block_logical_index; ++bi) {
auto block_index = m_block_list[bi];
ASSERT(block_index);
ASSERT(block_index.value());
size_t offset_into_block = (bi == first_block_logical_index) ? offset_into_first_block : 0;
size_t num_bytes_to_copy = min(block_size - offset_into_block, remaining_count);
auto buffer_offset = buffer.offset(nread);
int err = fs().read_block(block_index, &buffer_offset, num_bytes_to_copy, offset_into_block, allow_cache);
if (err < 0) {
klog() << "ext2fs: read_bytes: read_block(" << block_index << ") failed (lbi: " << bi << ")";
dmesgln("Ext2FS: read_bytes: read_block({}) failed (bi: {})", block_index.value(), bi);
return err;
}
remaining_count -= num_bytes_to_copy;
@ -805,7 +805,7 @@ KResult Ext2FSInode::resize(u64 new_size)
}
while (block_list.size() != blocks_needed_after) {
auto block_index = block_list.take_last();
if (block_index)
if (block_index.value())
fs().set_block_allocation_state(block_index, false);
}
}
@ -1124,7 +1124,7 @@ unsigned Ext2FS::blocks_per_group() const
bool Ext2FS::write_ext2_inode(InodeIndex inode, const ext2_inode& e2inode)
{
LOCKER(m_lock);
unsigned block_index;
BlockIndex block_index;
unsigned offset;
if (!find_block_containing_inode(inode, block_index, offset))
return false;
@ -1132,7 +1132,7 @@ bool Ext2FS::write_ext2_inode(InodeIndex inode, const ext2_inode& e2inode)
return write_block(block_index, buffer, inode_size(), offset) >= 0;
}
Vector<Ext2FS::BlockIndex> Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count)
auto Ext2FS::allocate_blocks(GroupIndex preferred_group_index, size_t count) -> Vector<BlockIndex>
{
LOCKER(m_lock);
dbgln_if(EXT2_DEBUG, "Ext2FS: allocate_blocks(preferred group: {}, count {})", preferred_group_index, count);
@ -1172,13 +1172,13 @@ Vector<Ext2FS::BlockIndex> Ext2FS::allocate_blocks(GroupIndex preferred_group_in
int blocks_in_group = min(blocks_per_group(), super_block().s_blocks_count);
auto block_bitmap = Bitmap::wrap(cached_bitmap.buffer.data(), blocks_in_group);
BlockIndex first_block_in_group = (group_index - 1) * blocks_per_group() + first_block_index();
BlockIndex first_block_in_group = (group_index - 1) * blocks_per_group() + first_block_index().value();
size_t free_region_size = 0;
auto first_unset_bit_index = block_bitmap.find_longest_range_of_unset_bits(count - blocks.size(), free_region_size);
ASSERT(first_unset_bit_index.has_value());
dbgln_if(EXT2_DEBUG, "Ext2FS: allocating free region of size: {} [{}]", free_region_size, group_index);
for (size_t i = 0; i < free_region_size; ++i) {
BlockIndex block_index = (first_unset_bit_index.value() + i) + first_block_in_group;
BlockIndex block_index = (first_unset_bit_index.value() + i) + first_block_in_group.value();
set_block_allocation_state(block_index, true);
blocks.unchecked_append(block_index);
dbgln_if(EXT2_DEBUG, " allocated > {}", block_index);
@ -1250,7 +1250,7 @@ Ext2FS::GroupIndex Ext2FS::group_index_from_block_index(BlockIndex block_index)
{
if (!block_index)
return 0;
return (block_index - 1) / blocks_per_group() + 1;
return (block_index.value() - 1) / blocks_per_group() + 1;
}
unsigned Ext2FS::group_index_from_inode(InodeIndex inode) const
@ -1340,7 +1340,7 @@ bool Ext2FS::set_block_allocation_state(BlockIndex block_index, bool new_state)
GroupIndex group_index = group_index_from_block_index(block_index);
auto& bgd = group_descriptor(group_index);
BlockIndex index_in_group = (block_index - first_block_index()) - ((group_index - 1) * blocks_per_group());
unsigned index_in_group = (block_index.value() - first_block_index().value()) - ((group_index - 1) * blocks_per_group());
unsigned bit_index = index_in_group % blocks_per_group();
auto& cached_bitmap = get_bitmap_block(bgd.bg_block_bitmap);
@ -1618,7 +1618,7 @@ KResultOr<int> Ext2FSInode::get_block_address(int index)
if (index < 0 || (size_t)index >= m_block_list.size())
return 0;
return m_block_list[index];
return m_block_list[index].value();
}
unsigned Ext2FS::total_block_count() const

View file

@ -87,7 +87,7 @@ private:
const Ext2FS& fs() const;
Ext2FSInode(Ext2FS&, InodeIndex);
mutable Vector<unsigned> m_block_list;
mutable Vector<BlockBasedFS::BlockIndex> m_block_list;
mutable HashMap<String, InodeIndex> m_lookup_cache;
ext2_inode m_raw_inode;
};
@ -113,7 +113,6 @@ public:
virtual u8 internal_file_type_to_directory_entry_type(const DirectoryEntryView& entry) const override;
private:
typedef unsigned BlockIndex;
typedef unsigned GroupIndex;
explicit Ext2FS(FileDescription&);