2020-01-18 08:38:21 +00:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
|
|
|
|
* All rights reserved.
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions are met:
|
|
|
|
*
|
|
|
|
* 1. Redistributions of source code must retain the above copyright notice, this
|
|
|
|
* list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* 2. Redistributions in binary form must reproduce the above copyright notice,
|
|
|
|
* this list of conditions and the following disclaimer in the documentation
|
|
|
|
* and/or other materials provided with the distribution.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
|
|
|
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
|
|
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
|
|
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
|
|
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
|
|
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
2018-10-10 09:53:07 +00:00
|
|
|
#pragma once
|
|
|
|
|
2019-08-01 09:34:36 +00:00
|
|
|
#include <AK/Assertions.h>
|
|
|
|
#include <AK/Noncopyable.h>
|
2020-02-24 08:55:46 +00:00
|
|
|
#include <AK/Optional.h>
|
2020-03-08 14:53:38 +00:00
|
|
|
#include <AK/Platform.h>
|
2019-08-01 09:34:36 +00:00
|
|
|
#include <AK/StdLibExtras.h>
|
|
|
|
#include <AK/Types.h>
|
|
|
|
#include <AK/kmalloc.h>
|
2018-10-10 09:53:07 +00:00
|
|
|
|
|
|
|
namespace AK {
|
|
|
|
|
|
|
|
class Bitmap {
|
2019-08-01 09:34:36 +00:00
|
|
|
AK_MAKE_NONCOPYABLE(Bitmap)
|
2018-10-10 09:53:07 +00:00
|
|
|
public:
|
|
|
|
// NOTE: A wrapping Bitmap won't try to free the wrapped data.
|
2020-02-25 14:11:15 +00:00
|
|
|
static Bitmap wrap(u8* data, size_t size)
|
2018-10-10 09:53:07 +00:00
|
|
|
{
|
|
|
|
return Bitmap(data, size);
|
|
|
|
}
|
|
|
|
|
2020-02-25 14:11:15 +00:00
|
|
|
static Bitmap create(size_t size, bool default_value = 0)
|
2018-10-10 09:53:07 +00:00
|
|
|
{
|
2018-11-05 12:48:07 +00:00
|
|
|
return Bitmap(size, default_value);
|
2018-10-10 09:53:07 +00:00
|
|
|
}
|
|
|
|
|
2019-06-11 12:50:41 +00:00
|
|
|
static Bitmap create()
|
|
|
|
{
|
|
|
|
return Bitmap();
|
|
|
|
}
|
|
|
|
|
2019-08-01 09:34:36 +00:00
|
|
|
Bitmap(Bitmap&& other)
|
|
|
|
{
|
|
|
|
m_owned = exchange(other.m_owned, false);
|
|
|
|
m_data = exchange(other.m_data, nullptr);
|
|
|
|
m_size = exchange(other.m_size, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
Bitmap& operator=(Bitmap&& other)
|
|
|
|
{
|
|
|
|
if (this != &other) {
|
|
|
|
if (m_owned)
|
|
|
|
kfree(m_data);
|
|
|
|
m_owned = exchange(other.m_owned, false);
|
|
|
|
m_data = exchange(other.m_data, nullptr);
|
|
|
|
m_size = exchange(other.m_size, 0);
|
|
|
|
}
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2018-10-10 09:53:07 +00:00
|
|
|
~Bitmap()
|
|
|
|
{
|
|
|
|
if (m_owned)
|
|
|
|
kfree(m_data);
|
|
|
|
m_data = nullptr;
|
|
|
|
}
|
|
|
|
|
2020-02-24 08:55:46 +00:00
|
|
|
size_t size() const { return m_size; }
|
|
|
|
bool get(size_t index) const
|
2018-10-10 09:53:07 +00:00
|
|
|
{
|
|
|
|
ASSERT(index < m_size);
|
|
|
|
return 0 != (m_data[index / 8] & (1u << (index % 8)));
|
|
|
|
}
|
2020-02-24 08:55:46 +00:00
|
|
|
void set(size_t index, bool value) const
|
2018-10-10 09:53:07 +00:00
|
|
|
{
|
|
|
|
ASSERT(index < m_size);
|
|
|
|
if (value)
|
2019-07-03 19:17:35 +00:00
|
|
|
m_data[index / 8] |= static_cast<u8>((1u << (index % 8)));
|
2018-10-10 09:53:07 +00:00
|
|
|
else
|
2019-07-03 19:17:35 +00:00
|
|
|
m_data[index / 8] &= static_cast<u8>(~(1u << (index % 8)));
|
2018-10-10 09:53:07 +00:00
|
|
|
}
|
2020-02-28 11:05:22 +00:00
|
|
|
void set_range(size_t start, size_t len, bool value)
|
|
|
|
{
|
|
|
|
for (size_t index = start; index < start + len; ++index) {
|
|
|
|
set(index, value);
|
|
|
|
}
|
|
|
|
}
|
2018-10-10 09:53:07 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
u8* data() { return m_data; }
|
|
|
|
const u8* data() const { return m_data; }
|
2018-10-10 09:53:07 +00:00
|
|
|
|
2020-02-24 08:55:46 +00:00
|
|
|
void grow(size_t size, bool default_value)
|
2019-06-11 12:50:41 +00:00
|
|
|
{
|
|
|
|
ASSERT(size > m_size);
|
|
|
|
|
|
|
|
auto previous_size_bytes = size_in_bytes();
|
|
|
|
auto previous_size = m_size;
|
|
|
|
auto previous_data = m_data;
|
|
|
|
|
|
|
|
m_size = size;
|
2019-07-03 19:17:35 +00:00
|
|
|
m_data = reinterpret_cast<u8*>(kmalloc(size_in_bytes()));
|
2019-06-11 12:50:41 +00:00
|
|
|
|
|
|
|
fill(default_value);
|
|
|
|
|
|
|
|
if (previous_data != nullptr) {
|
2020-03-08 10:57:24 +00:00
|
|
|
__builtin_memcpy(m_data, previous_data, previous_size_bytes);
|
2020-03-08 14:53:38 +00:00
|
|
|
if (previous_size % 8)
|
|
|
|
set_range(previous_size, 8 - previous_size % 8, default_value);
|
2019-06-11 12:50:41 +00:00
|
|
|
kfree(previous_data);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-14 15:31:57 +00:00
|
|
|
void fill(bool value)
|
|
|
|
{
|
2020-03-08 10:57:24 +00:00
|
|
|
__builtin_memset(m_data, value ? 0xff : 0x00, size_in_bytes());
|
2019-05-14 15:31:57 +00:00
|
|
|
}
|
|
|
|
|
2020-02-24 08:55:46 +00:00
|
|
|
Optional<size_t> find_first_set() const
|
2019-06-11 12:50:41 +00:00
|
|
|
{
|
2020-02-24 08:55:46 +00:00
|
|
|
size_t i = 0;
|
2019-06-11 12:50:41 +00:00
|
|
|
while (i < m_size / 8 && m_data[i] == 0x00)
|
|
|
|
i++;
|
|
|
|
|
2020-02-28 11:05:22 +00:00
|
|
|
for (size_t j = i * 8; j < m_size; j++) {
|
2019-06-11 12:50:41 +00:00
|
|
|
if (get(j))
|
|
|
|
return j;
|
2020-02-24 08:55:46 +00:00
|
|
|
}
|
2019-06-11 12:50:41 +00:00
|
|
|
|
2020-02-24 08:55:46 +00:00
|
|
|
return {};
|
2019-06-11 12:50:41 +00:00
|
|
|
}
|
|
|
|
|
2020-02-24 08:55:46 +00:00
|
|
|
Optional<size_t> find_first_unset() const
|
2019-06-11 12:50:41 +00:00
|
|
|
{
|
2020-02-24 08:55:46 +00:00
|
|
|
size_t i = 0;
|
2019-06-11 12:50:41 +00:00
|
|
|
while (i < m_size / 8 && m_data[i] == 0xff)
|
|
|
|
i++;
|
|
|
|
|
2020-02-28 11:05:22 +00:00
|
|
|
for (size_t j = i * 8; j < m_size; j++)
|
2019-06-11 12:50:41 +00:00
|
|
|
if (!get(j))
|
|
|
|
return j;
|
|
|
|
|
2020-03-08 11:58:22 +00:00
|
|
|
return {};
|
2019-06-11 12:50:41 +00:00
|
|
|
}
|
|
|
|
|
2020-03-08 14:53:38 +00:00
|
|
|
// The function will return the next range of unset bits starting from the
|
|
|
|
// @from value.
|
|
|
|
// @from: the postition from which the search starts. The var will be
|
|
|
|
// changed and new value is the offset of the found block.
|
|
|
|
// @min_length: minimum size of the range which will be returned.
|
|
|
|
// @max_length: maximum size of the range which will be returned.
|
|
|
|
// This is used to increase performance, since the range of
|
|
|
|
// unset bits can be long, and we don't need the while range,
|
|
|
|
// so we can stop when we've reached @max_length.
|
|
|
|
inline Optional<size_t> find_next_range_of_unset_bits(size_t& from, size_t min_length = 1, size_t max_length = max_size) const
|
2020-01-26 08:48:24 +00:00
|
|
|
{
|
2020-03-08 14:53:38 +00:00
|
|
|
if (min_length > max_length) {
|
2020-02-24 08:55:46 +00:00
|
|
|
return {};
|
2020-03-08 14:53:38 +00:00
|
|
|
}
|
2020-01-26 08:48:24 +00:00
|
|
|
|
2020-03-08 14:53:38 +00:00
|
|
|
u32* bitmap32 = (u32*)m_data;
|
2020-01-26 08:48:24 +00:00
|
|
|
|
2020-03-08 14:53:38 +00:00
|
|
|
// Calculating the start offset.
|
|
|
|
size_t start_bucket_index = from / 32;
|
|
|
|
size_t start_bucket_bit = from % 32;
|
2020-01-26 08:48:24 +00:00
|
|
|
|
2020-03-08 14:53:38 +00:00
|
|
|
size_t* start_of_free_chunks = &from;
|
|
|
|
size_t free_chunks = 0;
|
|
|
|
|
|
|
|
for (size_t bucket_index = start_bucket_index; bucket_index < m_size / 32; ++bucket_index) {
|
|
|
|
if (bitmap32[bucket_index] == 0xffffffff) {
|
|
|
|
// Skip over completely full bucket of size 32.
|
|
|
|
if (free_chunks >= min_length) {
|
|
|
|
return min(free_chunks, max_length);
|
2020-01-26 08:48:24 +00:00
|
|
|
}
|
2020-03-08 14:53:38 +00:00
|
|
|
free_chunks = 0;
|
|
|
|
start_bucket_bit = 0;
|
|
|
|
continue;
|
2020-01-26 08:48:24 +00:00
|
|
|
}
|
2020-03-08 14:53:38 +00:00
|
|
|
if (bitmap32[bucket_index] == 0x0) {
|
|
|
|
// Skip over completely empty bucket of size 32.
|
|
|
|
if (free_chunks == 0) {
|
|
|
|
*start_of_free_chunks = bucket_index * 32;
|
|
|
|
}
|
|
|
|
free_chunks += 32;
|
|
|
|
if (free_chunks >= max_length) {
|
|
|
|
return max_length;
|
|
|
|
}
|
|
|
|
start_bucket_bit = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 bucket = bitmap32[bucket_index];
|
|
|
|
u8 viewed_bits = start_bucket_bit;
|
|
|
|
u32 trailing_zeroes = 0;
|
|
|
|
|
|
|
|
bucket >>= viewed_bits;
|
|
|
|
start_bucket_bit = 0;
|
|
|
|
|
|
|
|
while (viewed_bits < 32) {
|
|
|
|
if (bucket == 0) {
|
|
|
|
if (free_chunks == 0) {
|
|
|
|
*start_of_free_chunks = bucket_index * 32 + viewed_bits;
|
|
|
|
}
|
|
|
|
free_chunks += 32 - viewed_bits;
|
|
|
|
viewed_bits = 32;
|
|
|
|
} else {
|
|
|
|
trailing_zeroes = count_trailing_zeroes_32(bucket);
|
|
|
|
bucket >>= trailing_zeroes;
|
|
|
|
|
|
|
|
if (free_chunks == 0) {
|
|
|
|
*start_of_free_chunks = bucket_index * 32 + viewed_bits;
|
|
|
|
}
|
|
|
|
free_chunks += trailing_zeroes;
|
|
|
|
viewed_bits += trailing_zeroes;
|
|
|
|
|
|
|
|
if (free_chunks >= min_length) {
|
|
|
|
return min(free_chunks, max_length);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deleting trailing ones.
|
|
|
|
u32 trailing_ones = count_trailing_zeroes_32(~bucket);
|
|
|
|
bucket >>= trailing_ones;
|
|
|
|
viewed_bits += trailing_ones;
|
|
|
|
free_chunks = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (free_chunks < min_length) {
|
2020-05-06 18:14:59 +00:00
|
|
|
size_t first_trailing_bit = (m_size / 32) * 32;
|
|
|
|
size_t trailing_bits = size() % 32;
|
|
|
|
for (size_t i = 0; i < trailing_bits; ++i) {
|
|
|
|
if (!get(first_trailing_bit + i)) {
|
|
|
|
if (!free_chunks)
|
|
|
|
*start_of_free_chunks = first_trailing_bit + i;
|
|
|
|
if (++free_chunks >= min_length)
|
|
|
|
return min(free_chunks, max_length);
|
|
|
|
} else {
|
|
|
|
free_chunks = 0;
|
|
|
|
}
|
|
|
|
}
|
2020-03-08 14:53:38 +00:00
|
|
|
return {};
|
2020-01-26 08:48:24 +00:00
|
|
|
}
|
|
|
|
|
2020-03-08 14:53:38 +00:00
|
|
|
return min(free_chunks, max_length);
|
|
|
|
}
|
|
|
|
|
|
|
|
Optional<size_t> find_longest_range_of_unset_bits(size_t max_length, size_t& found_range_size) const
|
|
|
|
{
|
|
|
|
size_t start = 0;
|
|
|
|
size_t max_region_start = 0;
|
|
|
|
size_t max_region_size = 0;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// Look for the next block which is bigger than currunt.
|
|
|
|
auto length_of_found_range = find_next_range_of_unset_bits(start, max_region_size + 1, max_length);
|
|
|
|
if (length_of_found_range.has_value()) {
|
|
|
|
max_region_start = start;
|
|
|
|
max_region_size = length_of_found_range.value();
|
|
|
|
start += max_region_size;
|
|
|
|
} else {
|
|
|
|
// No ranges which are bigger than current were found.
|
|
|
|
break;
|
|
|
|
}
|
2020-01-26 08:48:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
found_range_size = max_region_size;
|
2020-03-08 14:53:38 +00:00
|
|
|
if (max_region_size) {
|
2020-01-26 08:48:24 +00:00
|
|
|
return max_region_start;
|
2020-03-08 14:53:38 +00:00
|
|
|
}
|
|
|
|
return {};
|
2020-01-26 08:48:24 +00:00
|
|
|
}
|
|
|
|
|
2020-02-29 16:11:54 +00:00
|
|
|
Optional<size_t> find_first_fit(size_t minimum_length) const
|
|
|
|
{
|
2020-03-08 14:53:38 +00:00
|
|
|
size_t start = 0;
|
|
|
|
auto length_of_found_range = find_next_range_of_unset_bits(start, minimum_length, minimum_length);
|
|
|
|
if (length_of_found_range.has_value()) {
|
|
|
|
return start;
|
2020-02-29 16:11:54 +00:00
|
|
|
}
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-03-10 11:36:20 +00:00
|
|
|
Optional<size_t> find_best_fit(size_t minimum_length) const
|
|
|
|
{
|
|
|
|
size_t start = 0;
|
|
|
|
size_t best_region_start = 0;
|
|
|
|
size_t best_region_size = max_size;
|
|
|
|
bool found = false;
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
// Look for the next block which is bigger than requested length.
|
|
|
|
auto length_of_found_range = find_next_range_of_unset_bits(start, minimum_length, best_region_size);
|
|
|
|
if (length_of_found_range.has_value()) {
|
|
|
|
if (best_region_size > length_of_found_range.value() || !found) {
|
|
|
|
best_region_start = start;
|
|
|
|
best_region_size = length_of_found_range.value();
|
|
|
|
found = true;
|
|
|
|
}
|
|
|
|
start += length_of_found_range.value();
|
|
|
|
} else {
|
|
|
|
// There are no ranges which can fit requested length.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found) {
|
|
|
|
return best_region_start;
|
|
|
|
}
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
2020-03-02 08:50:22 +00:00
|
|
|
Bitmap()
|
2019-06-11 12:50:41 +00:00
|
|
|
: m_size(0)
|
|
|
|
, m_owned(true)
|
|
|
|
{
|
|
|
|
m_data = nullptr;
|
|
|
|
}
|
|
|
|
|
2020-03-02 08:50:22 +00:00
|
|
|
Bitmap(size_t size, bool default_value)
|
2018-10-10 09:53:07 +00:00
|
|
|
: m_size(size)
|
|
|
|
, m_owned(true)
|
|
|
|
{
|
|
|
|
ASSERT(m_size != 0);
|
2019-07-03 19:17:35 +00:00
|
|
|
m_data = reinterpret_cast<u8*>(kmalloc(size_in_bytes()));
|
2019-05-14 15:31:57 +00:00
|
|
|
fill(default_value);
|
2018-10-10 09:53:07 +00:00
|
|
|
}
|
|
|
|
|
2020-02-25 14:11:15 +00:00
|
|
|
Bitmap(u8* data, size_t size)
|
2018-10-10 09:53:07 +00:00
|
|
|
: m_data(data)
|
|
|
|
, m_size(size)
|
|
|
|
, m_owned(false)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-03-08 14:53:38 +00:00
|
|
|
static constexpr u32 max_size = 0xffffffff;
|
|
|
|
|
2019-10-01 17:58:07 +00:00
|
|
|
private:
|
2020-05-06 18:14:59 +00:00
|
|
|
size_t size_in_bytes() const { return ceil_div(m_size, static_cast<size_t>(8)); }
|
2019-05-14 15:31:57 +00:00
|
|
|
|
2019-07-03 19:17:35 +00:00
|
|
|
u8* m_data { nullptr };
|
2020-02-24 08:55:46 +00:00
|
|
|
size_t m_size { 0 };
|
2018-10-10 09:53:07 +00:00
|
|
|
bool m_owned { false };
|
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
using AK::Bitmap;
|