Make /proc/PID/vm more readable.

And move the unreadable detail-heavy mess to /proc/PID/vmo.
This commit is contained in:
Andreas Kling 2018-11-19 01:49:19 +01:00
parent 526dcafd57
commit e88f306d07
Notes: sideshowbarker 2024-07-19 16:09:41 +09:00
2 changed files with 49 additions and 32 deletions

View file

@ -46,6 +46,25 @@ ByteBuffer procfs$pid_fds(Process& process)
}
ByteBuffer procfs$pid_vm(Process& process)
{
ProcessInspectionHandle handle(process);
char* buffer;
auto stringImpl = StringImpl::createUninitialized(80 + process.regionCount() * 160 + 4096, buffer);
memset(buffer, 0, stringImpl->length());
char* ptr = buffer;
ptr += ksprintf(ptr, "BEGIN END SIZE NAME\n");
for (auto& region : process.regions()) {
ptr += ksprintf(ptr, "%x -- %x %x %s\n",
region->linearAddress.get(),
region->linearAddress.offset(region->size - 1).get(),
region->size,
region->name.characters());
}
*ptr = '\0';
return ByteBuffer::copy((byte*)buffer, ptr - buffer);
}
ByteBuffer procfs$pid_vmo(Process& process)
{
ProcessInspectionHandle handle(process);
char* buffer;
@ -155,6 +174,7 @@ void ProcFS::addProcess(Process& process)
auto dir = addFile(create_directory(buf));
m_pid2inode.set(process.pid(), dir.index());
addFile(create_generated_file("vm", [&process] { return procfs$pid_vm(process); }), dir.index());
addFile(create_generated_file("vmo", [&process] { return procfs$pid_vmo(process); }), dir.index());
addFile(create_generated_file("stack", [&process] { return procfs$pid_stack(process); }), dir.index());
addFile(create_generated_file("regs", [&process] { return procfs$pid_regs(process); }), dir.index());
addFile(create_generated_file("fds", [&process] { return procfs$pid_fds(process); }), dir.index());

View file

@ -64,39 +64,36 @@ void* malloc(size_t size)
// FIXME: This scan can be optimized further with TZCNT.
for (unsigned j = 0; j < 8; ++j) {
// FIXME: Invert loop.
if (!(s_malloc_map[i] & (1<<j))) {
if (chunks_here == 0) {
// Mark where potential allocation starts.
first_chunk = i * 8 + j;
}
chunks_here++;
if (chunks_here == chunks_needed) {
auto* header = (MallocHeader*)(s_malloc_pool + (first_chunk * CHUNK_SIZE));
byte* ptr = ((byte*)header) + sizeof(MallocHeader);
header->chunk_count = chunks_needed;
header->first_chunk_index = first_chunk;
header->size = size;
auto* footer = (MallocFooter*)((byte*)header + (header->chunk_count * CHUNK_SIZE) - sizeof(MallocFooter));
footer->xorcheck = header->compute_xorcheck();
for (size_t k = first_chunk; k < (first_chunk + chunks_needed); ++k)
s_malloc_map[k / 8] |= 1 << (k % 8);
s_malloc_sum_alloc += header->chunk_count * CHUNK_SIZE;
s_malloc_sum_free -= header->chunk_count * CHUNK_SIZE;
memset(ptr, MALLOC_SCRUB_BYTE, (header->chunk_count * CHUNK_SIZE) - (sizeof(MallocHeader) + sizeof(MallocFooter)));
return ptr;
}
}
else
{
/* This is in use, so restart chunks_here counter. */
if ((s_malloc_map[i] & (1<<j))) {
// This is in use, so restart chunks_here counter.
chunks_here = 0;
continue;
}
if (chunks_here == 0) {
// Mark where potential allocation starts.
first_chunk = i * 8 + j;
}
++chunks_here;
if (chunks_here == chunks_needed) {
auto* header = (MallocHeader*)(s_malloc_pool + (first_chunk * CHUNK_SIZE));
byte* ptr = ((byte*)header) + sizeof(MallocHeader);
header->chunk_count = chunks_needed;
header->first_chunk_index = first_chunk;
header->size = size;
auto* footer = (MallocFooter*)((byte*)header + (header->chunk_count * CHUNK_SIZE) - sizeof(MallocFooter));
footer->xorcheck = header->compute_xorcheck();
for (size_t k = first_chunk; k < (first_chunk + chunks_needed); ++k)
s_malloc_map[k / 8] |= 1 << (k % 8);
s_malloc_sum_alloc += header->chunk_count * CHUNK_SIZE;
s_malloc_sum_free -= header->chunk_count * CHUNK_SIZE;
memset(ptr, MALLOC_SCRUB_BYTE, (header->chunk_count * CHUNK_SIZE) - (sizeof(MallocHeader) + sizeof(MallocFooter)));
return ptr;
}
}
}