AK: Make HashTable and HashMap use size_t for size and capacity
This commit is contained in:
parent
3252a6925e
commit
b813b2f871
Notes:
sideshowbarker
2024-07-19 09:08:01 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/b813b2f8717
4 changed files with 28 additions and 28 deletions
|
@ -50,8 +50,8 @@ public:
|
|||
HashMap() {}
|
||||
|
||||
bool is_empty() const { return m_table.is_empty(); }
|
||||
int size() const { return m_table.size(); }
|
||||
int capacity() const { return m_table.capacity(); }
|
||||
size_t size() const { return m_table.size(); }
|
||||
size_t capacity() const { return m_table.capacity(); }
|
||||
void clear() { m_table.clear(); }
|
||||
|
||||
void set(const K& key, const V& value) { m_table.set({ key, value }); }
|
||||
|
@ -92,7 +92,7 @@ public:
|
|||
return m_table.find(hash, finder);
|
||||
}
|
||||
|
||||
void ensure_capacity(int capacity) { m_table.ensure_capacity(capacity); }
|
||||
void ensure_capacity(size_t capacity) { m_table.ensure_capacity(capacity); }
|
||||
|
||||
Optional<typename Traits<V>::PeekType> get(const K& key) const
|
||||
{
|
||||
|
|
|
@ -79,7 +79,7 @@ public:
|
|||
private:
|
||||
friend HashTableType;
|
||||
|
||||
explicit HashTableIterator(HashTableType& table, bool is_end, BucketIteratorType bucket_iterator = BucketIteratorType::universal_end(), int bucket_index = 0)
|
||||
explicit HashTableIterator(HashTableType& table, bool is_end, BucketIteratorType bucket_iterator = BucketIteratorType::universal_end(), size_t bucket_index = 0)
|
||||
: m_table(table)
|
||||
, m_bucket_index(bucket_index)
|
||||
, m_is_end(is_end)
|
||||
|
@ -95,7 +95,7 @@ private:
|
|||
}
|
||||
|
||||
HashTableType& m_table;
|
||||
int m_bucket_index { 0 };
|
||||
size_t m_bucket_index { 0 };
|
||||
bool m_is_end { false };
|
||||
BucketIteratorType m_bucket_iterator;
|
||||
};
|
||||
|
@ -148,10 +148,10 @@ public:
|
|||
|
||||
~HashTable() { clear(); }
|
||||
bool is_empty() const { return !m_size; }
|
||||
int size() const { return m_size; }
|
||||
int capacity() const { return m_capacity; }
|
||||
size_t size() const { return m_size; }
|
||||
size_t capacity() const { return m_capacity; }
|
||||
|
||||
void ensure_capacity(int capacity)
|
||||
void ensure_capacity(size_t capacity)
|
||||
{
|
||||
ASSERT(capacity >= size());
|
||||
rehash(capacity);
|
||||
|
@ -177,7 +177,7 @@ public:
|
|||
{
|
||||
if (is_empty())
|
||||
return end();
|
||||
int bucket_index;
|
||||
size_t bucket_index;
|
||||
auto& bucket = lookup_with_hash(hash, &bucket_index);
|
||||
auto bucket_iterator = bucket.find(finder);
|
||||
if (bucket_iterator != bucket.end())
|
||||
|
@ -190,7 +190,7 @@ public:
|
|||
{
|
||||
if (is_empty())
|
||||
return end();
|
||||
int bucket_index;
|
||||
size_t bucket_index;
|
||||
auto& bucket = lookup_with_hash(hash, &bucket_index);
|
||||
auto bucket_iterator = bucket.find(finder);
|
||||
if (bucket_iterator != bucket.end())
|
||||
|
@ -218,34 +218,34 @@ public:
|
|||
void remove(Iterator);
|
||||
|
||||
private:
|
||||
Bucket& lookup(const T&, int* bucket_index = nullptr);
|
||||
const Bucket& lookup(const T&, int* bucket_index = nullptr) const;
|
||||
Bucket& lookup(const T&, size_t* bucket_index = nullptr);
|
||||
const Bucket& lookup(const T&, size_t* bucket_index = nullptr) const;
|
||||
|
||||
Bucket& lookup_with_hash(unsigned hash, int* bucket_index)
|
||||
Bucket& lookup_with_hash(unsigned hash, size_t* bucket_index)
|
||||
{
|
||||
if (bucket_index)
|
||||
*bucket_index = hash % m_capacity;
|
||||
return m_buckets[hash % m_capacity];
|
||||
}
|
||||
|
||||
const Bucket& lookup_with_hash(unsigned hash, int* bucket_index) const
|
||||
const Bucket& lookup_with_hash(unsigned hash, size_t* bucket_index) const
|
||||
{
|
||||
if (bucket_index)
|
||||
*bucket_index = hash % m_capacity;
|
||||
return m_buckets[hash % m_capacity];
|
||||
}
|
||||
|
||||
void rehash(int capacity);
|
||||
void rehash(size_t capacity);
|
||||
void insert(const T&);
|
||||
void insert(T&&);
|
||||
|
||||
Bucket& bucket(int index) { return m_buckets[index]; }
|
||||
const Bucket& bucket(int index) const { return m_buckets[index]; }
|
||||
Bucket& bucket(size_t index) { return m_buckets[index]; }
|
||||
const Bucket& bucket(size_t index) const { return m_buckets[index]; }
|
||||
|
||||
Bucket* m_buckets { nullptr };
|
||||
|
||||
int m_size { 0 };
|
||||
int m_capacity { 0 };
|
||||
size_t m_size { 0 };
|
||||
size_t m_capacity { 0 };
|
||||
bool m_clearing { false };
|
||||
bool m_rehashing { false };
|
||||
};
|
||||
|
@ -293,17 +293,17 @@ void HashTable<T, TraitsForT>::set(const T& value)
|
|||
}
|
||||
|
||||
template<typename T, typename TraitsForT>
|
||||
void HashTable<T, TraitsForT>::rehash(int new_capacity)
|
||||
void HashTable<T, TraitsForT>::rehash(size_t new_capacity)
|
||||
{
|
||||
TemporaryChange<bool> change(m_rehashing, true);
|
||||
new_capacity *= 2;
|
||||
auto* new_buckets = new Bucket[new_capacity];
|
||||
auto* old_buckets = m_buckets;
|
||||
int old_capacity = m_capacity;
|
||||
size_t old_capacity = m_capacity;
|
||||
m_buckets = new_buckets;
|
||||
m_capacity = new_capacity;
|
||||
|
||||
for (int i = 0; i < old_capacity; ++i) {
|
||||
for (size_t i = 0; i < old_capacity; ++i) {
|
||||
for (auto& value : old_buckets[i]) {
|
||||
insert(move(value));
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ void HashTable<T, TraitsForT>::remove(Iterator it)
|
|||
}
|
||||
|
||||
template<typename T, typename TraitsForT>
|
||||
auto HashTable<T, TraitsForT>::lookup(const T& value, int* bucket_index) -> Bucket&
|
||||
auto HashTable<T, TraitsForT>::lookup(const T& value, size_t* bucket_index) -> Bucket&
|
||||
{
|
||||
unsigned hash = TraitsForT::hash(value);
|
||||
if (bucket_index)
|
||||
|
@ -369,7 +369,7 @@ auto HashTable<T, TraitsForT>::lookup(const T& value, int* bucket_index) -> Buck
|
|||
}
|
||||
|
||||
template<typename T, typename TraitsForT>
|
||||
auto HashTable<T, TraitsForT>::lookup(const T& value, int* bucket_index) const -> const Bucket&
|
||||
auto HashTable<T, TraitsForT>::lookup(const T& value, size_t* bucket_index) const -> const Bucket&
|
||||
{
|
||||
unsigned hash = TraitsForT::hash(value);
|
||||
if (bucket_index)
|
||||
|
|
|
@ -490,7 +490,7 @@ void Field::set_chord_preview(Square& square, bool chord_preview)
|
|||
});
|
||||
}
|
||||
|
||||
void Field::set_field_size(int rows, int columns, int mine_count)
|
||||
void Field::set_field_size(int rows, int columns, size_t mine_count)
|
||||
{
|
||||
if (m_rows == rows && m_columns == columns && m_mine_count == mine_count)
|
||||
return;
|
||||
|
|
|
@ -65,11 +65,11 @@ public:
|
|||
|
||||
int rows() const { return m_rows; }
|
||||
int columns() const { return m_columns; }
|
||||
int mine_count() const { return m_mine_count; }
|
||||
size_t mine_count() const { return m_mine_count; }
|
||||
int square_size() const { return 15; }
|
||||
bool is_single_chording() const { return m_single_chording; }
|
||||
|
||||
void set_field_size(int rows, int columns, int mine_count);
|
||||
void set_field_size(int rows, int columns, size_t mine_count);
|
||||
void set_single_chording(bool new_val);
|
||||
|
||||
void reset();
|
||||
|
@ -105,7 +105,7 @@ private:
|
|||
|
||||
int m_rows { 0 };
|
||||
int m_columns { 0 };
|
||||
int m_mine_count { 0 };
|
||||
size_t m_mine_count { 0 };
|
||||
int m_unswept_empties { 0 };
|
||||
Vector<OwnPtr<Square>> m_squares;
|
||||
RefPtr<Gfx::Bitmap> m_mine_bitmap;
|
||||
|
|
Loading…
Add table
Reference in a new issue