Преглед на файлове

Kernel: Rename SpinLock => Spinlock

Andreas Kling преди 4 години
родител
ревизия
55adace359
променени са 100 файла, в които са добавени 373 реда и са изтрити 373 реда
  1. 1 1
      Kernel/Arch/x86/common/Processor.cpp
  2. 6 6
      Kernel/Bus/PCI/MMIOAccess.cpp
  3. 2 2
      Kernel/Bus/PCI/MMIOAccess.h
  4. 4 4
      Kernel/Bus/USB/SysFSUSB.cpp
  5. 1 1
      Kernel/Bus/USB/SysFSUSB.h
  6. 7 7
      Kernel/Bus/VirtIO/VirtIOConsole.cpp
  7. 9 9
      Kernel/Bus/VirtIO/VirtIOConsolePort.cpp
  8. 2 2
      Kernel/Bus/VirtIO/VirtIOQueue.cpp
  9. 3 3
      Kernel/Bus/VirtIO/VirtIOQueue.h
  10. 2 2
      Kernel/Bus/VirtIO/VirtIORNG.cpp
  11. 3 3
      Kernel/ConsoleDevice.cpp
  12. 2 2
      Kernel/CoreDump.cpp
  13. 5 5
      Kernel/Devices/AsyncDeviceRequest.cpp
  14. 2 2
      Kernel/Devices/AsyncDeviceRequest.h
  15. 1 1
      Kernel/Devices/Device.cpp
  16. 2 2
      Kernel/Devices/Device.h
  17. 1 1
      Kernel/Devices/HID/HIDManagement.h
  18. 3 3
      Kernel/Devices/HID/I8042Controller.cpp
  19. 8 8
      Kernel/Devices/HID/I8042Controller.h
  20. 2 2
      Kernel/Devices/HID/KeyboardDevice.cpp
  21. 1 1
      Kernel/Devices/HID/KeyboardDevice.h
  22. 2 2
      Kernel/Devices/HID/MouseDevice.cpp
  23. 1 1
      Kernel/Devices/HID/MouseDevice.h
  24. 1 1
      Kernel/Devices/HID/PS2MouseDevice.cpp
  25. 1 1
      Kernel/Devices/HID/VMWareMouseDevice.cpp
  26. 1 1
      Kernel/Devices/KCOVDevice.cpp
  27. 2 2
      Kernel/Devices/KCOVInstance.h
  28. 2 2
      Kernel/Devices/SerialDevice.cpp
  29. 1 1
      Kernel/Devices/SerialDevice.h
  30. 1 1
      Kernel/FileSystem/File.h
  31. 2 2
      Kernel/FileSystem/Inode.cpp
  32. 1 1
      Kernel/FileSystem/Inode.h
  33. 6 6
      Kernel/FileSystem/Plan9FileSystem.cpp
  34. 3 3
      Kernel/FileSystem/Plan9FileSystem.h
  35. 2 2
      Kernel/FileSystem/SysFSComponent.cpp
  36. 3 3
      Kernel/Forward.h
  37. 7 7
      Kernel/FutexQueue.cpp
  38. 2 2
      Kernel/FutexQueue.h
  39. 2 2
      Kernel/GlobalProcessExposed.cpp
  40. 2 2
      Kernel/Graphics/Bochs/GraphicsAdapter.cpp
  41. 1 1
      Kernel/Graphics/Bochs/GraphicsAdapter.h
  42. 4 4
      Kernel/Graphics/Console/GenericFramebufferConsole.cpp
  43. 1 1
      Kernel/Graphics/Console/GenericFramebufferConsole.h
  44. 10 10
      Kernel/Graphics/Console/TextModeConsole.cpp
  45. 2 2
      Kernel/Graphics/Console/TextModeConsole.h
  46. 3 3
      Kernel/Graphics/FramebufferDevice.cpp
  47. 2 2
      Kernel/Graphics/FramebufferDevice.h
  48. 2 2
      Kernel/Graphics/GraphicsManagement.h
  49. 6 6
      Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp
  50. 3 3
      Kernel/Graphics/Intel/NativeGraphicsAdapter.h
  51. 2 2
      Kernel/Graphics/VirtIOGPU/GPU.cpp
  52. 7 7
      Kernel/Heap/kmalloc.cpp
  53. 1 1
      Kernel/Library/ListedRefCounted.h
  54. 6 6
      Kernel/Locking/Mutex.cpp
  55. 4 4
      Kernel/Locking/Mutex.h
  56. 15 15
      Kernel/Locking/Spinlock.h
  57. 8 8
      Kernel/Locking/SpinlockProtected.h
  58. 15 15
      Kernel/Memory/AddressSpace.cpp
  59. 2 2
      Kernel/Memory/AddressSpace.h
  60. 6 6
      Kernel/Memory/AnonymousVMObject.cpp
  61. 1 1
      Kernel/Memory/AnonymousVMObject.h
  62. 1 1
      Kernel/Memory/InodeVMObject.cpp
  63. 33 33
      Kernel/Memory/MemoryManager.cpp
  64. 4 4
      Kernel/Memory/MemoryManager.h
  65. 3 3
      Kernel/Memory/PageDirectory.cpp
  66. 2 2
      Kernel/Memory/PageDirectory.h
  67. 12 12
      Kernel/Memory/Region.cpp
  68. 2 2
      Kernel/Memory/RingBuffer.h
  69. 2 2
      Kernel/Memory/VMObject.cpp
  70. 5 5
      Kernel/Memory/VMObject.h
  71. 4 4
      Kernel/Memory/VirtualRangeAllocator.cpp
  72. 2 2
      Kernel/Memory/VirtualRangeAllocator.h
  73. 2 2
      Kernel/Net/Routing.cpp
  74. 1 1
      Kernel/PerformanceEventBuffer.cpp
  75. 11 11
      Kernel/Process.cpp
  76. 8 8
      Kernel/Process.h
  77. 2 2
      Kernel/ProcessExposed.cpp
  78. 2 2
      Kernel/ProcessGroup.cpp
  79. 2 2
      Kernel/ProcessGroup.h
  80. 1 1
      Kernel/ProcessSpecificExposed.cpp
  81. 1 1
      Kernel/Random.cpp
  82. 5 5
      Kernel/Random.h
  83. 7 7
      Kernel/Scheduler.cpp
  84. 2 2
      Kernel/Scheduler.h
  85. 9 9
      Kernel/Storage/AHCIPort.cpp
  86. 5 5
      Kernel/Storage/AHCIPort.h
  87. 4 4
      Kernel/Storage/BMIDEChannel.cpp
  88. 4 4
      Kernel/Storage/IDEChannel.cpp
  89. 1 1
      Kernel/Storage/IDEChannel.h
  90. 1 1
      Kernel/Syscalls/execve.cpp
  91. 2 2
      Kernel/Syscalls/fork.cpp
  92. 5 5
      Kernel/Syscalls/futex.cpp
  93. 4 4
      Kernel/Syscalls/profiling.cpp
  94. 2 2
      Kernel/Syscalls/ptrace.cpp
  95. 2 2
      Kernel/Syscalls/sched.cpp
  96. 2 2
      Kernel/Syscalls/thread.cpp
  97. 2 2
      Kernel/TTY/ConsoleManagement.cpp
  98. 3 3
      Kernel/TTY/ConsoleManagement.h
  99. 2 2
      Kernel/TTY/SlavePTY.cpp
  100. 1 1
      Kernel/TTY/SlavePTY.h

+ 1 - 1
Kernel/Arch/x86/common/Processor.cpp

@@ -501,7 +501,7 @@ Vector<FlatPtr> Processor::capture_stack_trace(Thread& thread, size_t max_frames
     // is a chance a context switch may happen while we're trying
     // to get it. It also won't be entirely accurate and merely
     // reflect the status at the last context switch.
-    ScopedSpinLock lock(g_scheduler_lock);
+    ScopedSpinlock lock(g_scheduler_lock);
     if (&thread == Processor::current_thread()) {
         VERIFY(thread.state() == Thread::Running);
         // Leave the scheduler lock. If we trigger page faults we may

+ 6 - 6
Kernel/Bus/PCI/MMIOAccess.cpp

@@ -117,7 +117,7 @@ VirtualAddress MMIOAccess::get_device_configuration_space(Address address)
 
 u8 MMIOAccess::read8_field(Address address, u32 field)
 {
-    ScopedSpinLock lock(m_access_lock);
+    ScopedSpinlock lock(m_access_lock);
     VERIFY(field <= 0xfff);
     dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 8-bit field {:#08x} for {}", field, address);
     return *((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff)));
@@ -125,7 +125,7 @@ u8 MMIOAccess::read8_field(Address address, u32 field)
 
 u16 MMIOAccess::read16_field(Address address, u32 field)
 {
-    ScopedSpinLock lock(m_access_lock);
+    ScopedSpinlock lock(m_access_lock);
     VERIFY(field < 0xfff);
     dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 16-bit field {:#08x} for {}", field, address);
     u16 data = 0;
@@ -135,7 +135,7 @@ u16 MMIOAccess::read16_field(Address address, u32 field)
 
 u32 MMIOAccess::read32_field(Address address, u32 field)
 {
-    ScopedSpinLock lock(m_access_lock);
+    ScopedSpinlock lock(m_access_lock);
     VERIFY(field <= 0xffc);
     dbgln_if(PCI_DEBUG, "PCI: MMIO Reading 32-bit field {:#08x} for {}", field, address);
     u32 data = 0;
@@ -145,21 +145,21 @@ u32 MMIOAccess::read32_field(Address address, u32 field)
 
 void MMIOAccess::write8_field(Address address, u32 field, u8 value)
 {
-    ScopedSpinLock lock(m_access_lock);
+    ScopedSpinlock lock(m_access_lock);
     VERIFY(field <= 0xfff);
     dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 8-bit field {:#08x}, value={:#02x} for {}", field, value, address);
     *((volatile u8*)(get_device_configuration_space(address).get() + (field & 0xfff))) = value;
 }
 void MMIOAccess::write16_field(Address address, u32 field, u16 value)
 {
-    ScopedSpinLock lock(m_access_lock);
+    ScopedSpinlock lock(m_access_lock);
     VERIFY(field < 0xfff);
     dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 16-bit field {:#08x}, value={:#02x} for {}", field, value, address);
     ByteReader::store<u16>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value);
 }
 void MMIOAccess::write32_field(Address address, u32 field, u32 value)
 {
-    ScopedSpinLock lock(m_access_lock);
+    ScopedSpinlock lock(m_access_lock);
     VERIFY(field <= 0xffc);
     dbgln_if(PCI_DEBUG, "PCI: MMIO Writing 32-bit field {:#08x}, value={:#02x} for {}", field, value, address);
     ByteReader::store<u32>(get_device_configuration_space(address).offset(field & 0xfff).as_ptr(), value);

+ 2 - 2
Kernel/Bus/PCI/MMIOAccess.h

@@ -12,7 +12,7 @@
 #include <AK/Types.h>
 #include <Kernel/ACPI/Definitions.h>
 #include <Kernel/Bus/PCI/Access.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/AnonymousVMObject.h>
 #include <Kernel/Memory/PhysicalRegion.h>
 #include <Kernel/Memory/Region.h>
@@ -44,7 +44,7 @@ private:
     PhysicalAddress determine_memory_mapped_bus_region(u32 segment, u8 bus) const;
     void map_bus_region(u32, u8);
     VirtualAddress get_device_configuration_space(Address address);
-    SpinLock<u8> m_access_lock;
+    Spinlock<u8> m_access_lock;
     u8 m_mapped_bus { 0 };
     OwnPtr<Memory::Region> m_mapped_region;
 

+ 4 - 4
Kernel/Bus/USB/SysFSUSB.cpp

@@ -57,7 +57,7 @@ KResultOr<size_t> SysFSUSBDeviceInformation::read_bytes(off_t offset, size_t cou
 
 KResult SysFSUSBBusDirectory::traverse_as_directory(unsigned fsid, Function<bool(FileSystem::DirectoryEntryView const&)> callback) const
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     // Note: if the parent directory is null, it means something bad happened as this should not happen for the USB directory.
     VERIFY(m_parent_directory);
     callback({ ".", { fsid, component_index() }, 0 });
@@ -72,7 +72,7 @@ KResult SysFSUSBBusDirectory::traverse_as_directory(unsigned fsid, Function<bool
 
 RefPtr<SysFSComponent> SysFSUSBBusDirectory::lookup(StringView name)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     for (auto& device_node : m_device_nodes) {
         if (device_node.name() == name) {
             return device_node;
@@ -93,7 +93,7 @@ RefPtr<SysFSUSBDeviceInformation> SysFSUSBBusDirectory::device_node_for(USB::Dev
 
 void SysFSUSBBusDirectory::plug(USB::Device& new_device)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     auto device_node = device_node_for(new_device);
     VERIFY(!device_node);
     m_device_nodes.append(SysFSUSBDeviceInformation::create(new_device));
@@ -101,7 +101,7 @@ void SysFSUSBBusDirectory::plug(USB::Device& new_device)
 
 void SysFSUSBBusDirectory::unplug(USB::Device& deleted_device)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     auto device_node = device_node_for(deleted_device);
     VERIFY(device_node);
     device_node->m_list_node.remove();

+ 1 - 1
Kernel/Bus/USB/SysFSUSB.h

@@ -48,7 +48,7 @@ private:
     RefPtr<SysFSUSBDeviceInformation> device_node_for(USB::Device& device);
 
     IntrusiveList<SysFSUSBDeviceInformation, RefPtr<SysFSUSBDeviceInformation>, &SysFSUSBDeviceInformation::m_list_node> m_device_nodes;
-    mutable SpinLock<u8> m_lock;
+    mutable Spinlock<u8> m_lock;
 };
 
 }

+ 7 - 7
Kernel/Bus/VirtIO/VirtIOConsole.cpp

@@ -64,9 +64,9 @@ void VirtIOConsole::handle_queue_update(u16 queue_index)
     dbgln_if(VIRTIO_DEBUG, "VirtIOConsole: Handle queue update {}", queue_index);
 
     if (queue_index == CONTROL_RECEIVEQ) {
-        ScopedSpinLock ringbuffer_lock(m_control_receive_buffer->lock());
+        ScopedSpinlock ringbuffer_lock(m_control_receive_buffer->lock());
         auto& queue = get_queue(CONTROL_RECEIVEQ);
-        ScopedSpinLock queue_lock(queue.lock());
+        ScopedSpinlock queue_lock(queue.lock());
         size_t used;
         VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
 
@@ -81,9 +81,9 @@ void VirtIOConsole::handle_queue_update(u16 queue_index)
             popped_chain = queue.pop_used_buffer_chain(used);
         }
     } else if (queue_index == CONTROL_TRANSMITQ) {
-        ScopedSpinLock ringbuffer_lock(m_control_transmit_buffer->lock());
+        ScopedSpinlock ringbuffer_lock(m_control_transmit_buffer->lock());
         auto& queue = get_queue(CONTROL_TRANSMITQ);
-        ScopedSpinLock queue_lock(queue.lock());
+        ScopedSpinlock queue_lock(queue.lock());
         size_t used;
         VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
         auto number_of_messages = 0;
@@ -112,7 +112,7 @@ void VirtIOConsole::setup_multiport()
     m_control_transmit_buffer = make<Memory::RingBuffer>("VirtIOConsole control transmit queue", CONTROL_BUFFER_SIZE);
 
     auto& queue = get_queue(CONTROL_RECEIVEQ);
-    ScopedSpinLock queue_lock(queue.lock());
+    ScopedSpinlock queue_lock(queue.lock());
     VirtIOQueueChain chain(queue);
     auto offset = 0ul;
 
@@ -184,7 +184,7 @@ void VirtIOConsole::process_control_message(ControlMessage message)
 }
 void VirtIOConsole::write_control_message(ControlMessage message)
 {
-    ScopedSpinLock ringbuffer_lock(m_control_transmit_buffer->lock());
+    ScopedSpinlock ringbuffer_lock(m_control_transmit_buffer->lock());
 
     PhysicalAddress start_of_chunk;
     size_t length_of_chunk;
@@ -197,7 +197,7 @@ void VirtIOConsole::write_control_message(ControlMessage message)
     }
 
     auto& queue = get_queue(CONTROL_TRANSMITQ);
-    ScopedSpinLock queue_lock(queue.lock());
+    ScopedSpinlock queue_lock(queue.lock());
     VirtIOQueueChain chain(queue);
 
     bool did_add_buffer = chain.add_buffer_to_chain(start_of_chunk, length_of_chunk, BufferType::DeviceReadable);

+ 9 - 9
Kernel/Bus/VirtIO/VirtIOConsolePort.cpp

@@ -27,7 +27,7 @@ VirtIOConsolePort::VirtIOConsolePort(unsigned port, VirtIOConsole& console)
 void VirtIOConsolePort::init_receive_buffer()
 {
     auto& queue = m_console.get_queue(m_receive_queue);
-    ScopedSpinLock queue_lock(queue.lock());
+    ScopedSpinlock queue_lock(queue.lock());
     VirtIOQueueChain chain(queue);
 
     auto buffer_start = m_receive_buffer->start_of_region();
@@ -42,11 +42,11 @@ void VirtIOConsolePort::handle_queue_update(Badge<VirtIOConsole>, u16 queue_inde
     VERIFY(queue_index == m_transmit_queue || queue_index == m_receive_queue);
     if (queue_index == m_receive_queue) {
         auto& queue = m_console.get_queue(m_receive_queue);
-        ScopedSpinLock queue_lock(queue.lock());
+        ScopedSpinlock queue_lock(queue.lock());
         size_t used;
         VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
 
-        ScopedSpinLock ringbuffer_lock(m_receive_buffer->lock());
+        ScopedSpinlock ringbuffer_lock(m_receive_buffer->lock());
         auto used_space = m_receive_buffer->reserve_space(used).value();
         auto remaining_space = m_receive_buffer->bytes_till_end();
 
@@ -65,9 +65,9 @@ void VirtIOConsolePort::handle_queue_update(Badge<VirtIOConsole>, u16 queue_inde
 
         evaluate_block_conditions();
     } else {
-        ScopedSpinLock ringbuffer_lock(m_transmit_buffer->lock());
+        ScopedSpinlock ringbuffer_lock(m_transmit_buffer->lock());
         auto& queue = m_console.get_queue(m_transmit_queue);
-        ScopedSpinLock queue_lock(queue.lock());
+        ScopedSpinlock queue_lock(queue.lock());
         size_t used;
         VirtIOQueueChain popped_chain = queue.pop_used_buffer_chain(used);
         do {
@@ -92,7 +92,7 @@ KResultOr<size_t> VirtIOConsolePort::read(FileDescription& desc, u64, UserOrKern
     if (!size)
         return 0;
 
-    ScopedSpinLock ringbuffer_lock(m_receive_buffer->lock());
+    ScopedSpinlock ringbuffer_lock(m_receive_buffer->lock());
 
     if (!can_read(desc, size))
         return EAGAIN;
@@ -102,7 +102,7 @@ KResultOr<size_t> VirtIOConsolePort::read(FileDescription& desc, u64, UserOrKern
 
     if (m_receive_buffer_exhausted && m_receive_buffer->used_bytes() == 0) {
         auto& queue = m_console.get_queue(m_receive_queue);
-        ScopedSpinLock queue_lock(queue.lock());
+        ScopedSpinlock queue_lock(queue.lock());
         VirtIOQueueChain new_chain(queue);
         new_chain.add_buffer_to_chain(m_receive_buffer->start_of_region(), RINGBUFFER_SIZE, BufferType::DeviceWritable);
         m_console.supply_chain_and_notify(m_receive_queue, new_chain);
@@ -122,9 +122,9 @@ KResultOr<size_t> VirtIOConsolePort::write(FileDescription& desc, u64, const Use
     if (!size)
         return 0;
 
-    ScopedSpinLock ringbuffer_lock(m_transmit_buffer->lock());
+    ScopedSpinlock ringbuffer_lock(m_transmit_buffer->lock());
     auto& queue = m_console.get_queue(m_transmit_queue);
-    ScopedSpinLock queue_lock(queue.lock());
+    ScopedSpinlock queue_lock(queue.lock());
 
     if (!can_write(desc, size))
         return EAGAIN;

+ 2 - 2
Kernel/Bus/VirtIO/VirtIOQueue.cpp

@@ -43,13 +43,13 @@ VirtIOQueue::~VirtIOQueue()
 
 void VirtIOQueue::enable_interrupts()
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     m_driver->flags = 0;
 }
 
 void VirtIOQueue::disable_interrupts()
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     m_driver->flags = 1;
 }
 

+ 3 - 3
Kernel/Bus/VirtIO/VirtIOQueue.h

@@ -6,7 +6,7 @@
 
 #pragma once
 
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/MemoryManager.h>
 #include <Kernel/Memory/ScatterGatherList.h>
 
@@ -47,7 +47,7 @@ public:
     VirtIOQueueChain pop_used_buffer_chain(size_t& used);
     void discard_used_buffers();
 
-    SpinLock<u8>& lock() { return m_lock; }
+    Spinlock<u8>& lock() { return m_lock; }
 
     bool should_notify() const;
 
@@ -94,7 +94,7 @@ private:
     OwnPtr<VirtIOQueueDriver> m_driver { nullptr };
     OwnPtr<VirtIOQueueDevice> m_device { nullptr };
     OwnPtr<Memory::Region> m_queue_region;
-    SpinLock<u8> m_lock;
+    Spinlock<u8> m_lock;
 
     friend class VirtIOQueueChain;
 };

+ 2 - 2
Kernel/Bus/VirtIO/VirtIORNG.cpp

@@ -44,7 +44,7 @@ void VirtIORNG::handle_queue_update(u16 queue_index)
     size_t available_entropy = 0, used;
     auto& queue = get_queue(REQUESTQ);
     {
-        ScopedSpinLock lock(queue.lock());
+        ScopedSpinlock lock(queue.lock());
         auto chain = queue.pop_used_buffer_chain(used);
         if (chain.is_empty())
             return;
@@ -64,7 +64,7 @@ void VirtIORNG::handle_queue_update(u16 queue_index)
 void VirtIORNG::request_entropy_from_host()
 {
     auto& queue = get_queue(REQUESTQ);
-    ScopedSpinLock lock(queue.lock());
+    ScopedSpinlock lock(queue.lock());
     VirtIOQueueChain chain(queue);
     chain.add_buffer_to_chain(m_entropy_buffer->physical_page(0)->paddr(), PAGE_SIZE, BufferType::DeviceWritable);
     supply_chain_and_notify(REQUESTQ, chain);

+ 3 - 3
Kernel/ConsoleDevice.cpp

@@ -7,7 +7,7 @@
 #include <AK/Singleton.h>
 #include <Kernel/ConsoleDevice.h>
 #include <Kernel/IO.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Sections.h>
 #include <Kernel/kstdio.h>
 
@@ -15,7 +15,7 @@
 #define CONSOLE_OUT_TO_BOCHS_DEBUG_PORT
 
 static Singleton<ConsoleDevice> s_the;
-static Kernel::SpinLock g_console_lock;
+static Kernel::Spinlock g_console_lock;
 
 UNMAP_AFTER_INIT void ConsoleDevice::initialize()
 {
@@ -67,7 +67,7 @@ Kernel::KResultOr<size_t> ConsoleDevice::write(FileDescription&, u64, const Kern
 
 void ConsoleDevice::put_char(char ch)
 {
-    Kernel::ScopedSpinLock lock(g_console_lock);
+    Kernel::ScopedSpinlock lock(g_console_lock);
 #ifdef CONSOLE_OUT_TO_BOCHS_DEBUG_PORT
     IO::out8(IO::BOCHS_DEBUG_PORT, ch);
 #endif

+ 2 - 2
Kernel/CoreDump.cpp

@@ -14,7 +14,7 @@
 #include <Kernel/FileSystem/FileDescription.h>
 #include <Kernel/FileSystem/VirtualFileSystem.h>
 #include <Kernel/KLexicalPath.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/ProcessPagingScope.h>
 #include <Kernel/Process.h>
 #include <Kernel/RTC.h>
@@ -321,7 +321,7 @@ ByteBuffer CoreDump::create_notes_segment_data() const
 
 KResult CoreDump::write()
 {
-    ScopedSpinLock lock(m_process->address_space().get_lock());
+    ScopedSpinlock lock(m_process->address_space().get_lock());
     ProcessPagingScope scope(m_process);
 
     ByteBuffer notes_segment = create_notes_segment_data();

+ 5 - 5
Kernel/Devices/AsyncDeviceRequest.cpp

@@ -18,7 +18,7 @@ AsyncDeviceRequest::AsyncDeviceRequest(Device& device)
 AsyncDeviceRequest::~AsyncDeviceRequest()
 {
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         VERIFY(is_completed_result(m_result));
         VERIFY(m_sub_requests_pending.is_empty());
     }
@@ -63,7 +63,7 @@ auto AsyncDeviceRequest::wait(Time* timeout) -> RequestWaitResult
 
 auto AsyncDeviceRequest::get_request_result() const -> RequestResult
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     return m_result;
 }
 
@@ -74,7 +74,7 @@ void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_r
     VERIFY(sub_request->m_parent_request == nullptr);
     sub_request->m_parent_request = this;
 
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     VERIFY(!is_completed_result(m_result));
     m_sub_requests_pending.append(sub_request);
     if (m_result == Started)
@@ -85,7 +85,7 @@ void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
 {
     bool all_completed;
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         VERIFY(m_result == Started);
 
         if (m_sub_requests_pending.contains(sub_request)) {
@@ -131,7 +131,7 @@ void AsyncDeviceRequest::complete(RequestResult result)
     VERIFY(result == Success || result == Failure || result == MemoryFault);
     ScopedCritical critical;
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         VERIFY(m_result == Started);
         m_result = result;
     }

+ 2 - 2
Kernel/Devices/AsyncDeviceRequest.h

@@ -61,7 +61,7 @@ public:
 
     [[nodiscard]] RequestWaitResult wait(Time* = nullptr);
 
-    void do_start(ScopedSpinLock<SpinLock<u8>>&& requests_lock)
+    void do_start(ScopedSpinlock<Spinlock<u8>>&& requests_lock)
     {
         if (is_completed_result(m_result))
             return;
@@ -150,7 +150,7 @@ private:
     WaitQueue m_queue;
     NonnullRefPtr<Process> m_process;
     void* m_private { nullptr };
-    mutable SpinLock<u8> m_lock;
+    mutable Spinlock<u8> m_lock;
 };
 
 }

+ 1 - 1
Kernel/Devices/Device.cpp

@@ -62,7 +62,7 @@ String Device::absolute_path(const FileDescription&) const
 
 void Device::process_next_queued_request(Badge<AsyncDeviceRequest>, const AsyncDeviceRequest& completed_request)
 {
-    ScopedSpinLock lock(m_requests_lock);
+    ScopedSpinlock lock(m_requests_lock);
     VERIFY(!m_requests.is_empty());
     VERIFY(m_requests.first().ptr() == &completed_request);
     m_requests.remove(m_requests.begin());

+ 2 - 2
Kernel/Devices/Device.h

@@ -52,7 +52,7 @@ public:
     NonnullRefPtr<AsyncRequestType> make_request(Args&&... args)
     {
         auto request = adopt_ref(*new AsyncRequestType(*this, forward<Args>(args)...));
-        ScopedSpinLock lock(m_requests_lock);
+        ScopedSpinlock lock(m_requests_lock);
         bool was_empty = m_requests.is_empty();
         m_requests.append(request);
         if (was_empty)
@@ -73,7 +73,7 @@ private:
     uid_t m_uid { 0 };
     gid_t m_gid { 0 };
 
-    SpinLock<u8> m_requests_lock;
+    Spinlock<u8> m_requests_lock;
     DoublyLinkedList<RefPtr<AsyncDeviceRequest>> m_requests;
 };
 

+ 1 - 1
Kernel/Devices/HID/HIDManagement.h

@@ -15,7 +15,7 @@
 #include <Kernel/API/KeyCode.h>
 #include <Kernel/API/MousePacket.h>
 #include <Kernel/KResult.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/UnixTypes.h>
 #include <LibKeyboard/CharacterMap.h>
 

+ 3 - 3
Kernel/Devices/HID/I8042Controller.cpp

@@ -35,7 +35,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices()
 {
     u8 configuration;
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         // Disable devices
         do_wait_then_write(I8042_STATUS, 0xad);
         do_wait_then_write(I8042_STATUS, 0xa7); // ignored if it doesn't exist
@@ -103,7 +103,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices()
             m_first_port_available = false;
             configuration &= ~1;
             configuration |= 1 << 4;
-            ScopedSpinLock lock(m_lock);
+            ScopedSpinlock lock(m_lock);
             do_wait_then_write(I8042_STATUS, 0x60);
             do_wait_then_write(I8042_BUFFER, configuration);
         }
@@ -116,7 +116,7 @@ UNMAP_AFTER_INIT void I8042Controller::detect_devices()
                 dbgln("I8042: Mouse device failed to initialize, disable");
                 m_second_port_available = false;
                 configuration |= 1 << 5;
-                ScopedSpinLock lock(m_lock);
+                ScopedSpinlock lock(m_lock);
                 do_wait_then_write(I8042_STATUS, 0x60);
                 do_wait_then_write(I8042_BUFFER, configuration);
             }

+ 8 - 8
Kernel/Devices/HID/I8042Controller.h

@@ -9,7 +9,7 @@
 #include <AK/RefCounted.h>
 #include <Kernel/Devices/HID/KeyboardDevice.h>
 #include <Kernel/Devices/HID/MouseDevice.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 
 namespace Kernel {
 
@@ -53,36 +53,36 @@ public:
 
     bool reset_device(HIDDevice::Type device)
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         return do_reset_device(device);
     }
 
     u8 send_command(HIDDevice::Type device, u8 command)
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         return do_send_command(device, command);
     }
     u8 send_command(HIDDevice::Type device, u8 command, u8 data)
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         return do_send_command(device, command, data);
     }
 
     u8 read_from_device(HIDDevice::Type device)
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         return do_read_from_device(device);
     }
 
     void wait_then_write(u8 port, u8 data)
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         do_wait_then_write(port, data);
     }
 
     u8 wait_then_read(u8 port)
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         return do_wait_then_read(port);
     }
 
@@ -105,7 +105,7 @@ private:
     void do_wait_then_write(u8 port, u8 data);
     u8 do_wait_then_read(u8 port);
 
-    SpinLock<u8> m_lock;
+    Spinlock<u8> m_lock;
     bool m_first_port_available { false };
     bool m_second_port_available { false };
     bool m_is_dual_channel { false };

+ 2 - 2
Kernel/Devices/HID/KeyboardDevice.cpp

@@ -251,7 +251,7 @@ void KeyboardDevice::key_state_changed(u8 scan_code, bool pressed)
         HIDManagement::the().m_client->on_key_pressed(event);
 
     {
-        ScopedSpinLock lock(m_queue_lock);
+        ScopedSpinlock lock(m_queue_lock);
         m_queue.enqueue(event);
     }
 
@@ -281,7 +281,7 @@ bool KeyboardDevice::can_read(const FileDescription&, size_t) const
 KResultOr<size_t> KeyboardDevice::read(FileDescription&, u64, UserOrKernelBuffer& buffer, size_t size)
 {
     size_t nread = 0;
-    ScopedSpinLock lock(m_queue_lock);
+    ScopedSpinlock lock(m_queue_lock);
     while (nread < size) {
         if (m_queue.is_empty())
             break;

+ 1 - 1
Kernel/Devices/HID/KeyboardDevice.h

@@ -51,7 +51,7 @@ public:
 
 protected:
     KeyboardDevice();
-    mutable SpinLock<u8> m_queue_lock;
+    mutable Spinlock<u8> m_queue_lock;
     CircularQueue<Event, 16> m_queue;
     // ^CharacterDevice
     virtual StringView class_name() const override { return "KeyboardDevice"; }

+ 2 - 2
Kernel/Devices/HID/MouseDevice.cpp

@@ -20,7 +20,7 @@ MouseDevice::~MouseDevice()
 
 bool MouseDevice::can_read(const FileDescription&, size_t) const
 {
-    ScopedSpinLock lock(m_queue_lock);
+    ScopedSpinlock lock(m_queue_lock);
     return !m_queue.is_empty();
 }
 
@@ -29,7 +29,7 @@ KResultOr<size_t> MouseDevice::read(FileDescription&, u64, UserOrKernelBuffer& b
     VERIFY(size > 0);
     size_t nread = 0;
     size_t remaining_space_in_buffer = static_cast<size_t>(size) - nread;
-    ScopedSpinLock lock(m_queue_lock);
+    ScopedSpinlock lock(m_queue_lock);
     while (!m_queue.is_empty() && remaining_space_in_buffer) {
         auto packet = m_queue.dequeue();
         lock.unlock();

+ 1 - 1
Kernel/Devices/HID/MouseDevice.h

@@ -41,7 +41,7 @@ protected:
     // ^CharacterDevice
     virtual StringView class_name() const override { return "MouseDevice"; }
 
-    mutable SpinLock<u8> m_queue_lock;
+    mutable Spinlock<u8> m_queue_lock;
     CircularQueue<MousePacket, 100> m_queue;
 };
 

+ 1 - 1
Kernel/Devices/HID/PS2MouseDevice.cpp

@@ -60,7 +60,7 @@ void PS2MouseDevice::irq_handle_byte_read(u8 byte)
         m_entropy_source.add_random_event(m_data.dword);
 
         {
-            ScopedSpinLock lock(m_queue_lock);
+            ScopedSpinlock lock(m_queue_lock);
             m_queue.enqueue(parse_data_packet(m_data));
         }
         evaluate_block_conditions();

+ 1 - 1
Kernel/Devices/HID/VMWareMouseDevice.cpp

@@ -36,7 +36,7 @@ void VMWareMouseDevice::irq_handle_byte_read(u8)
     if (mouse_packet.has_value()) {
         m_entropy_source.add_random_event(mouse_packet.value());
         {
-            ScopedSpinLock lock(m_queue_lock);
+            ScopedSpinlock lock(m_queue_lock);
             m_queue.enqueue(mouse_packet.value());
         }
         evaluate_block_conditions();

+ 1 - 1
Kernel/Devices/KCOVDevice.cpp

@@ -84,7 +84,7 @@ KResult KCOVDevice::ioctl(FileDescription&, unsigned request, Userspace<void*> a
         return ENXIO; // This proc hasn't opened the kcov dev yet
     auto kcov_instance = maybe_kcov_instance.value();
 
-    ScopedSpinLock lock(kcov_instance->lock);
+    ScopedSpinlock lock(kcov_instance->lock);
     switch (request) {
     case KCOV_SETBUFSIZE: {
         if (kcov_instance->state >= KCOVInstance::TRACING) {

+ 2 - 2
Kernel/Devices/KCOVInstance.h

@@ -6,7 +6,7 @@
 
 #pragma once
 
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/AnonymousVMObject.h>
 
 namespace Kernel {
@@ -35,7 +35,7 @@ public:
     bool has_buffer() const { return m_buffer != nullptr; }
     void buffer_add_pc(u64 pc);
 
-    SpinLock<u8> lock;
+    Spinlock<u8> lock;
     enum {
         UNUSED = 0,
         OPENED = 1,

+ 2 - 2
Kernel/Devices/SerialDevice.cpp

@@ -59,7 +59,7 @@ KResultOr<size_t> SerialDevice::read(FileDescription&, u64, UserOrKernelBuffer&
     if (!size)
         return 0;
 
-    ScopedSpinLock lock(m_serial_lock);
+    ScopedSpinlock lock(m_serial_lock);
     if (!(get_line_status() & DataReady))
         return 0;
 
@@ -80,7 +80,7 @@ KResultOr<size_t> SerialDevice::write(FileDescription& description, u64, const U
     if (!size)
         return 0;
 
-    ScopedSpinLock lock(m_serial_lock);
+    ScopedSpinlock lock(m_serial_lock);
     if (!can_write(description, size))
         return EAGAIN;
 

+ 1 - 1
Kernel/Devices/SerialDevice.h

@@ -133,7 +133,7 @@ private:
     bool m_break_enable { false };
     u8 m_modem_control { 0 };
     bool m_last_put_char_was_carriage_return { false };
-    SpinLock<u8> m_serial_lock;
+    Spinlock<u8> m_serial_lock;
 };
 
 }

+ 1 - 1
Kernel/FileSystem/File.h

@@ -34,7 +34,7 @@ public:
 
     void unblock()
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         do_unblock([&](auto& b, void* data, bool&) {
             VERIFY(b.blocker_type() == Thread::Blocker::Type::File);
             auto& blocker = static_cast<Thread::FileBlocker&>(b);

+ 2 - 2
Kernel/FileSystem/Inode.cpp

@@ -22,9 +22,9 @@
 
 namespace Kernel {
 
-static Singleton<SpinLockProtected<Inode::AllInstancesList>> s_all_instances;
+static Singleton<SpinlockProtected<Inode::AllInstancesList>> s_all_instances;
 
-SpinLockProtected<Inode::AllInstancesList>& Inode::all_instances()
+SpinlockProtected<Inode::AllInstancesList>& Inode::all_instances()
 {
     return s_all_instances;
 }

+ 1 - 1
Kernel/FileSystem/Inode.h

@@ -135,7 +135,7 @@ private:
 
 public:
     using AllInstancesList = IntrusiveList<Inode, RawPtr<Inode>, &Inode::m_inode_list_node>;
-    static SpinLockProtected<Inode::AllInstancesList>& all_instances();
+    static SpinlockProtected<Inode::AllInstancesList>& all_instances();
 };
 
 }

+ 6 - 6
Kernel/FileSystem/Plan9FileSystem.cpp

@@ -412,7 +412,7 @@ Plan9FS::ReceiveCompletion::~ReceiveCompletion()
 bool Plan9FS::Blocker::unblock(u16 tag)
 {
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         if (m_did_unblock)
             return false;
         m_did_unblock = true;
@@ -428,7 +428,7 @@ bool Plan9FS::Blocker::unblock(u16 tag)
 void Plan9FS::Blocker::not_blocking(bool)
 {
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         if (m_did_unblock)
             return;
     }
@@ -438,7 +438,7 @@ void Plan9FS::Blocker::not_blocking(bool)
 
 bool Plan9FS::Blocker::is_completed() const
 {
-    ScopedSpinLock lock(m_completion->lock);
+    ScopedSpinlock lock(m_completion->lock);
     return m_completion->completed;
 }
 
@@ -470,7 +470,7 @@ void Plan9FS::Plan9FSBlockCondition::unblock_all()
 void Plan9FS::Plan9FSBlockCondition::try_unblock(Plan9FS::Blocker& blocker)
 {
     if (m_fs.is_complete(*blocker.completion())) {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         blocker.unblock(blocker.completion()->tag);
     }
 }
@@ -576,7 +576,7 @@ KResult Plan9FS::read_and_dispatch_one_message()
     auto optional_completion = m_completions.get(header.tag);
     if (optional_completion.has_value()) {
         auto completion = optional_completion.value();
-        ScopedSpinLock lock(completion->lock);
+        ScopedSpinlock lock(completion->lock);
         completion->result = KSuccess;
         completion->message = adopt_own_if_nonnull(new (nothrow) Message { buffer.release_nonnull() });
         completion->completed = true;
@@ -666,7 +666,7 @@ void Plan9FS::thread_main()
 
 void Plan9FS::ensure_thread()
 {
-    ScopedSpinLock lock(m_thread_lock);
+    ScopedSpinlock lock(m_thread_lock);
     if (!m_thread_running.exchange(true, AK::MemoryOrder::memory_order_acq_rel)) {
         Process::create_kernel_process(m_thread, "Plan9FS", [&]() {
             thread_main();

+ 3 - 3
Kernel/FileSystem/Plan9FileSystem.h

@@ -66,11 +66,11 @@ private:
 
     private:
         Plan9FS& m_fs;
-        mutable SpinLock<u8> m_lock;
+        mutable Spinlock<u8> m_lock;
     };
 
     struct ReceiveCompletion : public RefCounted<ReceiveCompletion> {
-        mutable SpinLock<u8> lock;
+        mutable Spinlock<u8> lock;
         bool completed { false };
         const u16 tag;
         OwnPtr<Message> message;
@@ -139,7 +139,7 @@ private:
     Plan9FSBlockCondition m_completion_blocker;
     HashMap<u16, NonnullRefPtr<ReceiveCompletion>> m_completions;
 
-    SpinLock<u8> m_thread_lock;
+    Spinlock<u8> m_thread_lock;
     RefPtr<Thread> m_thread;
     Atomic<bool> m_thread_running { false };
     Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_thread_shutdown { false };

+ 2 - 2
Kernel/FileSystem/SysFSComponent.cpp

@@ -9,12 +9,12 @@
 
 namespace Kernel {
 
-static SpinLock<u8> s_index_lock;
+static Spinlock<u8> s_index_lock;
 static InodeIndex s_next_inode_index { 0 };
 
 static size_t allocate_inode_index()
 {
-    ScopedSpinLock lock(s_index_lock);
+    ScopedSpinlock lock(s_index_lock);
     s_next_inode_index = s_next_inode_index.value() + 1;
     VERIFY(s_next_inode_index > 0);
     return s_next_inode_index.value();

+ 3 - 3
Kernel/Forward.h

@@ -48,7 +48,7 @@ class ProcFSSystemBoolean;
 class ProcFSSystemDirectory;
 class Process;
 class ProcessGroup;
-class RecursiveSpinLock;
+class RecursiveSpinlock;
 class Scheduler;
 class Socket;
 class SysFS;
@@ -84,9 +84,9 @@ class VirtualRangeAllocator;
 }
 
 template<typename BaseType>
-class SpinLock;
+class Spinlock;
 template<typename LockType>
-class ScopedSpinLock;
+class ScopedSpinlock;
 template<typename T>
 class KResultOr;
 

+ 7 - 7
Kernel/FutexQueue.cpp

@@ -39,7 +39,7 @@ bool FutexQueue::should_add_blocker(Thread::Blocker& b, void* data)
 u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& get_target_queue, u32 requeue_count, bool& is_empty, bool& is_empty_target)
 {
     is_empty_target = false;
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
 
     dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n_requeue({}, {})", this, wake_count, requeue_count);
 
@@ -75,7 +75,7 @@ u32 FutexQueue::wake_n_requeue(u32 wake_count, const Function<FutexQueue*()>& ge
                 lock.unlock();
                 did_requeue = blockers_to_requeue.size();
 
-                ScopedSpinLock target_lock(target_futex_queue->m_lock);
+                ScopedSpinlock target_lock(target_futex_queue->m_lock);
                 // Now that we have the lock of the target, append the blockers
                 // and notify them that they completed the move
                 for (auto& info : blockers_to_requeue) {
@@ -100,7 +100,7 @@ u32 FutexQueue::wake_n(u32 wake_count, const Optional<u32>& bitset, bool& is_emp
         is_empty = false;
         return 0; // should we assert instead?
     }
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_n({})", this, wake_count);
     u32 did_wake = 0;
     do_unblock([&](Thread::Blocker& b, void* data, bool& stop_iterating) {
@@ -123,7 +123,7 @@ u32 FutexQueue::wake_n(u32 wake_count, const Optional<u32>& bitset, bool& is_emp
 
 u32 FutexQueue::wake_all(bool& is_empty)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     dbgln_if(FUTEXQUEUE_DEBUG, "FutexQueue @ {}: wake_all", this);
     u32 did_wake = 0;
     do_unblock([&](Thread::Blocker& b, void* data, bool&) {
@@ -148,7 +148,7 @@ bool FutexQueue::is_empty_and_no_imminent_waits_locked()
 
 bool FutexQueue::queue_imminent_wait()
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     if (m_was_removed)
         return false;
     m_imminent_waits++;
@@ -157,7 +157,7 @@ bool FutexQueue::queue_imminent_wait()
 
 bool FutexQueue::try_remove()
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     if (m_was_removed)
         return false;
     if (!is_empty_and_no_imminent_waits_locked())
@@ -168,7 +168,7 @@ bool FutexQueue::try_remove()
 
 void FutexQueue::did_remove()
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     VERIFY(m_was_removed);
     VERIFY(is_empty_and_no_imminent_waits_locked());
 }

+ 2 - 2
Kernel/FutexQueue.h

@@ -8,7 +8,7 @@
 
 #include <AK/Atomic.h>
 #include <AK/RefCounted.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/VMObject.h>
 #include <Kernel/Thread.h>
 
@@ -37,7 +37,7 @@ public:
 
     bool is_empty_and_no_imminent_waits()
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         return is_empty_and_no_imminent_waits_locked();
     }
     bool is_empty_and_no_imminent_waits_locked();

+ 2 - 2
Kernel/GlobalProcessExposed.cpp

@@ -474,7 +474,7 @@ private:
             process_object.add("kernel", process.is_kernel_process());
             auto thread_array = process_object.add_array("threads");
             process.for_each_thread([&](const Thread& thread) {
-                ScopedSpinLock locker(thread.get_lock());
+                ScopedSpinlock locker(thread.get_lock());
                 auto thread_object = thread_array.add_object();
 #if LOCK_DEBUG
                 thread_object.add("lock_count", thread.lock_count());
@@ -500,7 +500,7 @@ private:
             });
         };
 
-        ScopedSpinLock lock(g_scheduler_lock);
+        ScopedSpinlock lock(g_scheduler_lock);
         {
             {
                 auto array = json.add_array("processes");

+ 2 - 2
Kernel/Graphics/Bochs/GraphicsAdapter.cpp

@@ -208,7 +208,7 @@ bool BochsGraphicsAdapter::set_y_offset(size_t output_port_index, size_t y_offse
 
 void BochsGraphicsAdapter::enable_consoles()
 {
-    ScopedSpinLock lock(m_console_mode_switch_lock);
+    ScopedSpinlock lock(m_console_mode_switch_lock);
     VERIFY(m_framebuffer_console);
     m_console_enabled = true;
     m_registers->bochs_regs.y_offset = 0;
@@ -218,7 +218,7 @@ void BochsGraphicsAdapter::enable_consoles()
 }
 void BochsGraphicsAdapter::disable_consoles()
 {
-    ScopedSpinLock lock(m_console_mode_switch_lock);
+    ScopedSpinlock lock(m_console_mode_switch_lock);
     VERIFY(m_framebuffer_console);
     VERIFY(m_framebuffer_device);
     m_console_enabled = false;

+ 1 - 1
Kernel/Graphics/Bochs/GraphicsAdapter.h

@@ -60,7 +60,7 @@ private:
     Memory::TypedMapping<BochsDisplayMMIORegisters volatile> m_registers;
     RefPtr<FramebufferDevice> m_framebuffer_device;
     RefPtr<Graphics::GenericFramebufferConsole> m_framebuffer_console;
-    SpinLock<u8> m_console_mode_switch_lock;
+    Spinlock<u8> m_console_mode_switch_lock;
     bool m_console_enabled { false };
     bool m_io_required { false };
 };

+ 4 - 4
Kernel/Graphics/Console/GenericFramebufferConsole.cpp

@@ -224,7 +224,7 @@ void GenericFramebufferConsole::show_cursor()
 
 void GenericFramebufferConsole::clear(size_t x, size_t y, size_t length)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     if (x == 0 && length == max_column()) {
         // if we need to clear the entire row, just clean it with quick memset :)
         auto* offset_in_framebuffer = (u32*)&framebuffer_data()[x * sizeof(u32) * 8 + y * 8 * sizeof(u32) * width()];
@@ -264,19 +264,19 @@ void GenericFramebufferConsole::clear_glyph(size_t x, size_t y)
 
 void GenericFramebufferConsole::enable()
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     memset(framebuffer_data(), 0, height() * width() * sizeof(u32));
     m_enabled.store(true);
 }
 void GenericFramebufferConsole::disable()
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     m_enabled.store(false);
 }
 
 void GenericFramebufferConsole::write(size_t x, size_t y, char ch, Color background, Color foreground, bool critical)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     if (!m_enabled.load())
         return;
 

+ 1 - 1
Kernel/Graphics/Console/GenericFramebufferConsole.h

@@ -47,6 +47,6 @@ protected:
     virtual u8* framebuffer_data() = 0;
     void clear_glyph(size_t x, size_t y);
     size_t m_pitch;
-    mutable SpinLock<u8> m_lock;
+    mutable Spinlock<u8> m_lock;
 };
 }

+ 10 - 10
Kernel/Graphics/Console/TextModeConsole.cpp

@@ -87,8 +87,8 @@ enum VGAColor : u8 {
 
 void TextModeConsole::set_cursor(size_t x, size_t y)
 {
-    ScopedSpinLock main_lock(GraphicsManagement::the().main_vga_lock());
-    ScopedSpinLock lock(m_vga_lock);
+    ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock());
+    ScopedSpinlock lock(m_vga_lock);
     m_cursor_x = x;
     m_cursor_y = y;
     u16 value = m_current_vga_start_address + (y * width() + x);
@@ -99,22 +99,22 @@ void TextModeConsole::set_cursor(size_t x, size_t y)
 }
 void TextModeConsole::hide_cursor()
 {
-    ScopedSpinLock main_lock(GraphicsManagement::the().main_vga_lock());
-    ScopedSpinLock lock(m_vga_lock);
+    ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock());
+    ScopedSpinlock lock(m_vga_lock);
     IO::out8(0x3D4, 0xA);
     IO::out8(0x3D5, 0x20);
 }
 void TextModeConsole::show_cursor()
 {
-    ScopedSpinLock main_lock(GraphicsManagement::the().main_vga_lock());
-    ScopedSpinLock lock(m_vga_lock);
+    ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock());
+    ScopedSpinlock lock(m_vga_lock);
     IO::out8(0x3D4, 0xA);
     IO::out8(0x3D5, 0x20);
 }
 
 void TextModeConsole::clear(size_t x, size_t y, size_t length)
 {
-    ScopedSpinLock lock(m_vga_lock);
+    ScopedSpinlock lock(m_vga_lock);
     auto* buf = (u16*)(m_current_vga_window + (x * 2) + (y * width() * 2));
     for (size_t index = 0; index < length; index++) {
         buf[index] = 0x0720;
@@ -127,12 +127,12 @@ void TextModeConsole::write(size_t x, size_t y, char ch, bool critical)
 
 void TextModeConsole::write(size_t x, size_t y, char ch, Color background, Color foreground, bool critical)
 {
-    ScopedSpinLock lock(m_vga_lock);
+    ScopedSpinlock lock(m_vga_lock);
     // If we are in critical printing mode, we need to handle new lines here
     // because there's no other responsible object to do that in the print call path
     if (critical && (ch == '\r' || ch == '\n')) {
         // Disable hardware VGA cursor
-        ScopedSpinLock main_lock(GraphicsManagement::the().main_vga_lock());
+        ScopedSpinlock main_lock(GraphicsManagement::the().main_vga_lock());
         IO::out8(0x3D4, 0xA);
         IO::out8(0x3D5, 0x20);
 
@@ -162,7 +162,7 @@ void TextModeConsole::clear_vga_row(u16 row)
 
 void TextModeConsole::set_vga_start_row(u16 row)
 {
-    ScopedSpinLock lock(m_vga_lock);
+    ScopedSpinlock lock(m_vga_lock);
     m_vga_start_row = row;
     m_current_vga_start_address = row * width();
     m_current_vga_window = m_current_vga_window + row * width() * bytes_per_base_glyph();

+ 2 - 2
Kernel/Graphics/Console/TextModeConsole.h

@@ -9,7 +9,7 @@
 #include <AK/RefCounted.h>
 #include <AK/Types.h>
 #include <Kernel/Graphics/Console/VGAConsole.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 
 namespace Kernel::Graphics {
 class TextModeConsole final : public VGAConsole {
@@ -39,7 +39,7 @@ private:
 
     explicit TextModeConsole(const VGACompatibleAdapter&);
 
-    mutable SpinLock<u8> m_vga_lock;
+    mutable Spinlock<u8> m_vga_lock;
     u16 m_vga_start_row { 0 };
     u16 m_current_vga_start_address { 0 };
     u8* m_current_vga_window { nullptr };

+ 3 - 3
Kernel/Graphics/FramebufferDevice.cpp

@@ -27,7 +27,7 @@ NonnullRefPtr<FramebufferDevice> FramebufferDevice::create(const GraphicsDevice&
 
 KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescription&, Memory::VirtualRange const& range, u64 offset, int prot, bool shared)
 {
-    ScopedSpinLock lock(m_activation_lock);
+    ScopedSpinlock lock(m_activation_lock);
     REQUIRE_PROMISE(video);
     if (!shared)
         return ENODEV;
@@ -80,7 +80,7 @@ KResultOr<Memory::Region*> FramebufferDevice::mmap(Process& process, FileDescrip
 
 void FramebufferDevice::deactivate_writes()
 {
-    ScopedSpinLock lock(m_activation_lock);
+    ScopedSpinlock lock(m_activation_lock);
     if (!m_userspace_framebuffer_region)
         return;
     memcpy(m_swapped_framebuffer_region->vaddr().as_ptr(), m_real_framebuffer_region->vaddr().as_ptr(), Memory::page_round_up(framebuffer_size_in_bytes()));
@@ -91,7 +91,7 @@ void FramebufferDevice::deactivate_writes()
 }
 void FramebufferDevice::activate_writes()
 {
-    ScopedSpinLock lock(m_activation_lock);
+    ScopedSpinlock lock(m_activation_lock);
     if (!m_userspace_framebuffer_region || !m_real_framebuffer_vmobject)
         return;
     // restore the image we had in the void area

+ 2 - 2
Kernel/Graphics/FramebufferDevice.h

@@ -11,7 +11,7 @@
 #include <AK/Types.h>
 #include <Kernel/Devices/BlockDevice.h>
 #include <Kernel/Graphics/GraphicsDevice.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/AnonymousVMObject.h>
 #include <Kernel/PhysicalAddress.h>
 
@@ -53,7 +53,7 @@ private:
     size_t m_framebuffer_width { 0 };
     size_t m_framebuffer_height { 0 };
 
-    SpinLock<u8> m_activation_lock;
+    Spinlock<u8> m_activation_lock;
 
     RefPtr<Memory::AnonymousVMObject> m_real_framebuffer_vmobject;
     RefPtr<Memory::AnonymousVMObject> m_swapped_framebuffer_vmobject;

+ 2 - 2
Kernel/Graphics/GraphicsManagement.h

@@ -40,7 +40,7 @@ public:
     bool framebuffer_devices_allowed() const { return m_framebuffer_devices_allowed; }
     bool framebuffer_devices_exist() const;
 
-    SpinLock<u8>& main_vga_lock() { return m_main_vga_lock; }
+    Spinlock<u8>& main_vga_lock() { return m_main_vga_lock; }
     RefPtr<Graphics::Console> console() const { return m_console; }
 
     void deactivate_graphical_mode();
@@ -56,7 +56,7 @@ private:
     unsigned m_current_minor_number { 0 };
     const bool m_framebuffer_devices_allowed;
 
-    SpinLock<u8> m_main_vga_lock;
+    Spinlock<u8> m_main_vga_lock;
 };
 
 }

+ 6 - 6
Kernel/Graphics/Intel/NativeGraphicsAdapter.cpp

@@ -192,7 +192,7 @@ IntelNativeGraphicsAdapter::IntelNativeGraphicsAdapter(PCI::Address address)
     m_registers_region = MM.allocate_kernel_region(PhysicalAddress(PCI::get_BAR0(address)).page_base(), bar0_space_size, "Intel Native Graphics Registers", Memory::Region::Access::ReadWrite);
     PCI::enable_bus_mastering(address);
     {
-        ScopedSpinLock control_lock(m_control_lock);
+        ScopedSpinlock control_lock(m_control_lock);
         set_gmbus_default_rate();
         set_gmbus_pin_pair(GMBusPinPair::DedicatedAnalog);
     }
@@ -277,7 +277,7 @@ void IntelNativeGraphicsAdapter::write_to_register(IntelGraphics::RegisterIndex
 {
     VERIFY(m_control_lock.is_locked());
     VERIFY(m_registers_region);
-    ScopedSpinLock lock(m_registers_lock);
+    ScopedSpinlock lock(m_registers_lock);
     dbgln_if(INTEL_GRAPHICS_DEBUG, "Intel Graphics {}: Write to {} value of {:x}", pci_address(), convert_register_index_to_string(index), value);
     auto* reg = (volatile u32*)m_registers_region->vaddr().offset(index).as_ptr();
     *reg = value;
@@ -286,7 +286,7 @@ u32 IntelNativeGraphicsAdapter::read_from_register(IntelGraphics::RegisterIndex
 {
     VERIFY(m_control_lock.is_locked());
     VERIFY(m_registers_region);
-    ScopedSpinLock lock(m_registers_lock);
+    ScopedSpinlock lock(m_registers_lock);
     auto* reg = (volatile u32*)m_registers_region->vaddr().offset(index).as_ptr();
     u32 value = *reg;
     dbgln_if(INTEL_GRAPHICS_DEBUG, "Intel Graphics {}: Read from {} value of {:x}", pci_address(), convert_register_index_to_string(index), value);
@@ -373,7 +373,7 @@ void IntelNativeGraphicsAdapter::gmbus_read(unsigned address, u8* buf, size_t le
 
 void IntelNativeGraphicsAdapter::gmbus_read_edid()
 {
-    ScopedSpinLock control_lock(m_control_lock);
+    ScopedSpinlock control_lock(m_control_lock);
     gmbus_write(DDC2_I2C_ADDRESS, 0);
     gmbus_read(DDC2_I2C_ADDRESS, (u8*)&m_crt_edid, sizeof(Graphics::VideoInfoBlock));
 }
@@ -409,8 +409,8 @@ void IntelNativeGraphicsAdapter::enable_output(PhysicalAddress fb_address, size_
 
 bool IntelNativeGraphicsAdapter::set_crt_resolution(size_t width, size_t height)
 {
-    ScopedSpinLock control_lock(m_control_lock);
-    ScopedSpinLock modeset_lock(m_modeset_lock);
+    ScopedSpinlock control_lock(m_control_lock);
+    ScopedSpinlock modeset_lock(m_modeset_lock);
     if (!is_resolution_valid(width, height)) {
         return false;
     }

+ 3 - 3
Kernel/Graphics/Intel/NativeGraphicsAdapter.h

@@ -161,9 +161,9 @@ private:
 
     Optional<PLLSettings> create_pll_settings(u64 target_frequency, u64 reference_clock, const PLLMaxSettings&);
 
-    SpinLock<u8> m_control_lock;
-    SpinLock<u8> m_modeset_lock;
-    mutable SpinLock<u8> m_registers_lock;
+    Spinlock<u8> m_control_lock;
+    Spinlock<u8> m_modeset_lock;
+    mutable Spinlock<u8> m_registers_lock;
 
     Graphics::VideoInfoBlock m_crt_edid;
     const PhysicalAddress m_registers;

+ 2 - 2
Kernel/Graphics/VirtIOGPU/GPU.cpp

@@ -81,7 +81,7 @@ void GPU::handle_queue_update(u16 queue_index)
     VERIFY(queue_index == CONTROLQ);
 
     auto& queue = get_queue(CONTROLQ);
-    ScopedSpinLock queue_lock(queue.lock());
+    ScopedSpinlock queue_lock(queue.lock());
     queue.discard_used_buffers();
     m_outstanding_request.wake_all();
 }
@@ -242,7 +242,7 @@ void GPU::synchronous_virtio_gpu_command(PhysicalAddress buffer_start, size_t re
     VERIFY(m_outstanding_request.is_empty());
     auto& queue = get_queue(CONTROLQ);
     {
-        ScopedSpinLock lock(queue.lock());
+        ScopedSpinlock lock(queue.lock());
         VirtIOQueueChain chain { queue };
         chain.add_buffer_to_chain(buffer_start, request_size, BufferType::DeviceReadable);
         chain.add_buffer_to_chain(buffer_start.offset(request_size), response_size, BufferType::DeviceWritable);

+ 7 - 7
Kernel/Heap/kmalloc.cpp

@@ -16,7 +16,7 @@
 #include <Kernel/Heap/Heap.h>
 #include <Kernel/Heap/kmalloc.h>
 #include <Kernel/KSyms.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/MemoryManager.h>
 #include <Kernel/Panic.h>
 #include <Kernel/PerformanceManager.h>
@@ -31,7 +31,7 @@ namespace std {
 const nothrow_t nothrow;
 }
 
-static RecursiveSpinLock s_lock; // needs to be recursive because of dump_backtrace()
+static RecursiveSpinlock s_lock; // needs to be recursive because of dump_backtrace()
 
 static void kmalloc_allocate_backup_memory();
 
@@ -136,7 +136,7 @@ struct KmallocGlobalHeap {
                             // onto the region. Unless we already used the backup
                             // memory, in which case we want to use the region as the
                             // new backup.
-                            ScopedSpinLock lock(s_lock);
+                            ScopedSpinlock lock(s_lock);
                             if (!m_global_heap.m_backup_memory) {
                                 if constexpr (KMALLOC_DEBUG) {
                                     dmesgln("kmalloc: Queued memory region at {}, bytes: {} will be used as new backup", region->vaddr(), region->size());
@@ -235,7 +235,7 @@ void* kmalloc_eternal(size_t size)
 
     size = round_up_to_power_of_two(size, sizeof(void*));
 
-    ScopedSpinLock lock(s_lock);
+    ScopedSpinlock lock(s_lock);
     void* ptr = s_next_eternal_ptr;
     s_next_eternal_ptr += size;
     VERIFY(s_next_eternal_ptr < s_end_of_eternal_range);
@@ -246,7 +246,7 @@ void* kmalloc_eternal(size_t size)
 void* kmalloc(size_t size)
 {
     kmalloc_verify_nospinlock_held();
-    ScopedSpinLock lock(s_lock);
+    ScopedSpinlock lock(s_lock);
     ++g_kmalloc_call_count;
 
     if (g_dump_kmalloc_stacks && Kernel::g_kernel_symbols_available) {
@@ -277,7 +277,7 @@ void kfree(void* ptr)
         return;
 
     kmalloc_verify_nospinlock_held();
-    ScopedSpinLock lock(s_lock);
+    ScopedSpinlock lock(s_lock);
     ++g_kfree_call_count;
     ++g_nested_kfree_calls;
 
@@ -375,7 +375,7 @@ void operator delete[](void* ptr, size_t size) noexcept
 
 void get_kmalloc_stats(kmalloc_stats& stats)
 {
-    ScopedSpinLock lock(s_lock);
+    ScopedSpinlock lock(s_lock);
     stats.bytes_allocated = g_kmalloc_global->m_heap.allocated_bytes();
     stats.bytes_free = g_kmalloc_global->m_heap.free_bytes() + g_kmalloc_global->backup_memory_bytes();
     stats.bytes_eternal = g_kmalloc_bytes_eternal;

+ 1 - 1
Kernel/Library/ListedRefCounted.h

@@ -11,7 +11,7 @@
 namespace Kernel {
 
 // ListedRefCounted<T> is a slot-in replacement for RefCounted<T> to use in classes
-// that add themselves to a SpinLockProtected<IntrusiveList> when constructed.
+// that add themselves to a SpinlockProtected<IntrusiveList> when constructed.
 // The custom unref() implementation here ensures that the the list is locked during
 // unref(), and that the T is removed from the list before ~T() is invoked.
 

+ 6 - 6
Kernel/Locking/Mutex.cpp

@@ -8,7 +8,7 @@
 #include <Kernel/KSyms.h>
 #include <Kernel/Locking/LockLocation.h>
 #include <Kernel/Locking/Mutex.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Thread.h>
 
 namespace Kernel {
@@ -21,7 +21,7 @@ void Mutex::lock(Mode mode, [[maybe_unused]] LockLocation const& location)
     VERIFY(mode != Mode::Unlocked);
     auto current_thread = Thread::current();
 
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     bool did_block = false;
     Mode current_mode = m_mode;
     switch (current_mode) {
@@ -145,7 +145,7 @@ void Mutex::unlock()
     // and also from within critical sections!
     VERIFY(!Processor::current().in_irq());
     auto current_thread = Thread::current();
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     Mode current_mode = m_mode;
     if constexpr (LOCK_TRACE_DEBUG) {
         if (current_mode == Mode::Shared)
@@ -196,7 +196,7 @@ void Mutex::unlock()
     }
 }
 
-void Mutex::block(Thread& current_thread, Mode mode, ScopedSpinLock<SpinLock<u8>>& lock, u32 requested_locks)
+void Mutex::block(Thread& current_thread, Mode mode, ScopedSpinlock<Spinlock<u8>>& lock, u32 requested_locks)
 {
     auto& blocked_thread_list = thread_list_for_mode(mode);
     VERIFY(!blocked_thread_list.contains(current_thread));
@@ -255,7 +255,7 @@ auto Mutex::force_unlock_if_locked(u32& lock_count_to_restore) -> Mode
     // and also from within critical sections!
     VERIFY(!Processor::current().in_irq());
     auto current_thread = Thread::current();
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     auto current_mode = m_mode;
     switch (current_mode) {
     case Mode::Exclusive: {
@@ -319,7 +319,7 @@ void Mutex::restore_lock(Mode mode, u32 lock_count, [[maybe_unused]] LockLocatio
     VERIFY(!Processor::current().in_irq());
     auto current_thread = Thread::current();
     bool did_block = false;
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     switch (mode) {
     case Mode::Exclusive: {
         auto previous_mode = m_mode;

+ 4 - 4
Kernel/Locking/Mutex.h

@@ -39,12 +39,12 @@ public:
     [[nodiscard]] Mode force_unlock_if_locked(u32&);
     [[nodiscard]] bool is_locked() const
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         return m_mode != Mode::Unlocked;
     }
     [[nodiscard]] bool own_lock() const
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         if (m_mode == Mode::Exclusive)
             return m_holder == Thread::current();
         if (m_mode == Mode::Shared)
@@ -77,7 +77,7 @@ private:
         return mode == Mode::Exclusive ? m_blocked_threads_list_exclusive : m_blocked_threads_list_shared;
     }
 
-    void block(Thread&, Mode, ScopedSpinLock<SpinLock<u8>>&, u32);
+    void block(Thread&, Mode, ScopedSpinlock<Spinlock<u8>>&, u32);
     void unblock_waiters(Mode);
 
     const char* m_name { nullptr };
@@ -98,7 +98,7 @@ private:
     BlockedThreadList m_blocked_threads_list_exclusive;
     BlockedThreadList m_blocked_threads_list_shared;
 
-    mutable SpinLock<u8> m_lock;
+    mutable Spinlock<u8> m_lock;
 };
 
 class MutexLocker {

+ 15 - 15
Kernel/Locking/SpinLock.h → Kernel/Locking/Spinlock.h

@@ -14,12 +14,12 @@
 namespace Kernel {
 
 template<typename BaseType = u32>
-class SpinLock {
-    AK_MAKE_NONCOPYABLE(SpinLock);
-    AK_MAKE_NONMOVABLE(SpinLock);
+class Spinlock {
+    AK_MAKE_NONCOPYABLE(Spinlock);
+    AK_MAKE_NONMOVABLE(Spinlock);
 
 public:
-    SpinLock() = default;
+    Spinlock() = default;
 
     ALWAYS_INLINE u32 lock()
     {
@@ -57,12 +57,12 @@ private:
     Atomic<BaseType> m_lock { 0 };
 };
 
-class RecursiveSpinLock {
-    AK_MAKE_NONCOPYABLE(RecursiveSpinLock);
-    AK_MAKE_NONMOVABLE(RecursiveSpinLock);
+class RecursiveSpinlock {
+    AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
+    AK_MAKE_NONMOVABLE(RecursiveSpinlock);
 
 public:
-    RecursiveSpinLock() = default;
+    RecursiveSpinlock() = default;
 
     ALWAYS_INLINE u32 lock()
     {
@@ -116,15 +116,15 @@ private:
 };
 
 template<typename LockType>
-class [[nodiscard]] ScopedSpinLock {
+class [[nodiscard]] ScopedSpinlock {
 
-    AK_MAKE_NONCOPYABLE(ScopedSpinLock);
+    AK_MAKE_NONCOPYABLE(ScopedSpinlock);
 
 public:
-    ScopedSpinLock() = delete;
-    ScopedSpinLock& operator=(ScopedSpinLock&&) = delete;
+    ScopedSpinlock() = delete;
+    ScopedSpinlock& operator=(ScopedSpinlock&&) = delete;
 
-    ScopedSpinLock(LockType& lock)
+    ScopedSpinlock(LockType& lock)
         : m_lock(&lock)
     {
         VERIFY(m_lock);
@@ -132,7 +132,7 @@ public:
         m_have_lock = true;
     }
 
-    ScopedSpinLock(ScopedSpinLock&& from)
+    ScopedSpinlock(ScopedSpinlock&& from)
         : m_lock(from.m_lock)
         , m_prev_flags(from.m_prev_flags)
         , m_have_lock(from.m_have_lock)
@@ -142,7 +142,7 @@ public:
         from.m_have_lock = false;
     }
 
-    ~ScopedSpinLock()
+    ~ScopedSpinlock()
     {
         if (m_lock && m_have_lock) {
             m_lock->unlock(m_prev_flags);

+ 8 - 8
Kernel/Locking/SpinLockProtected.h → Kernel/Locking/SpinlockProtected.h

@@ -6,14 +6,14 @@
 
 #pragma once
 
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 
 namespace Kernel {
 
 template<typename T>
-class SpinLockProtected {
-    AK_MAKE_NONCOPYABLE(SpinLockProtected);
-    AK_MAKE_NONMOVABLE(SpinLockProtected);
+class SpinlockProtected {
+    AK_MAKE_NONCOPYABLE(SpinlockProtected);
+    AK_MAKE_NONMOVABLE(SpinlockProtected);
 
 private:
     template<typename U>
@@ -22,7 +22,7 @@ private:
         AK_MAKE_NONMOVABLE(Locked);
 
     public:
-        Locked(U& value, RecursiveSpinLock& spinlock)
+        Locked(U& value, RecursiveSpinlock& spinlock)
             : m_value(value)
             , m_locker(spinlock)
         {
@@ -39,14 +39,14 @@ private:
 
     private:
         U& m_value;
-        ScopedSpinLock<RecursiveSpinLock> m_locker;
+        ScopedSpinlock<RecursiveSpinlock> m_locker;
     };
 
     auto lock_const() const { return Locked<T const>(m_value, m_spinlock); }
     auto lock_mutable() { return Locked<T>(m_value, m_spinlock); }
 
 public:
-    SpinLockProtected() = default;
+    SpinlockProtected() = default;
 
     template<typename Callback>
     decltype(auto) with(Callback callback) const
@@ -82,7 +82,7 @@ public:
 
 private:
     T m_value;
-    RecursiveSpinLock mutable m_spinlock;
+    RecursiveSpinlock mutable m_spinlock;
 };
 
 }

+ 15 - 15
Kernel/Memory/AddressSpace.cpp

@@ -5,7 +5,7 @@
  * SPDX-License-Identifier: BSD-2-Clause
  */
 
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/AddressSpace.h>
 #include <Kernel/Memory/AnonymousVMObject.h>
 #include <Kernel/Memory/InodeVMObject.h>
@@ -223,7 +223,7 @@ void AddressSpace::deallocate_region(Region& region)
 
 NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
 
     if (m_region_lookup_cache.region.unsafe_ptr() == &region)
         m_region_lookup_cache.region = nullptr;
@@ -235,7 +235,7 @@ NonnullOwnPtr<Region> AddressSpace::take_region(Region& region)
 
 Region* AddressSpace::find_region_from_range(VirtualRange const& range)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region)
         return m_region_lookup_cache.region.unsafe_ptr();
 
@@ -253,7 +253,7 @@ Region* AddressSpace::find_region_from_range(VirtualRange const& range)
 
 Region* AddressSpace::find_region_containing(VirtualRange const& range)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     auto candidate = m_regions.find_largest_not_above(range.base().get());
     if (!candidate)
         return nullptr;
@@ -265,7 +265,7 @@ Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& rang
     Vector<Region*> regions = {};
     size_t total_size_collected = 0;
 
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
 
     auto found_region = m_regions.find_largest_not_above(range.base().get());
     if (!found_region)
@@ -286,7 +286,7 @@ Vector<Region*> AddressSpace::find_regions_intersecting(VirtualRange const& rang
 Region* AddressSpace::add_region(NonnullOwnPtr<Region> region)
 {
     auto* ptr = region.ptr();
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     auto success = m_regions.try_insert(region->vaddr().get(), move(region));
     return success ? ptr : nullptr;
 }
@@ -324,7 +324,7 @@ void AddressSpace::dump_regions()
     dbgln("BEGIN{}         END{}        SIZE{}       ACCESS NAME",
         addr_padding, addr_padding, addr_padding);
 
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
 
     for (auto& sorted_region : m_regions) {
         auto& region = *sorted_region;
@@ -342,13 +342,13 @@ void AddressSpace::dump_regions()
 
 void AddressSpace::remove_all_regions(Badge<Process>)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     m_regions.clear();
 }
 
 size_t AddressSpace::amount_dirty_private() const
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     // FIXME: This gets a bit more complicated for Regions sharing the same underlying VMObject.
     //        The main issue I'm thinking of is when the VMObject has physical pages that none of the Regions are mapping.
     //        That's probably a situation that needs to be looked at in general.
@@ -362,7 +362,7 @@ size_t AddressSpace::amount_dirty_private() const
 
 size_t AddressSpace::amount_clean_inode() const
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     HashTable<const InodeVMObject*> vmobjects;
     for (auto& region : m_regions) {
         if (region->vmobject().is_inode())
@@ -376,7 +376,7 @@ size_t AddressSpace::amount_clean_inode() const
 
 size_t AddressSpace::amount_virtual() const
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     size_t amount = 0;
     for (auto& region : m_regions) {
         amount += region->size();
@@ -386,7 +386,7 @@ size_t AddressSpace::amount_virtual() const
 
 size_t AddressSpace::amount_resident() const
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     // FIXME: This will double count if multiple regions use the same physical page.
     size_t amount = 0;
     for (auto& region : m_regions) {
@@ -397,7 +397,7 @@ size_t AddressSpace::amount_resident() const
 
 size_t AddressSpace::amount_shared() const
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     // FIXME: This will double count if multiple regions use the same physical page.
     // FIXME: It doesn't work at the moment, since it relies on PhysicalPage ref counts,
     //        and each PhysicalPage is only reffed by its VMObject. This needs to be refactored
@@ -411,7 +411,7 @@ size_t AddressSpace::amount_shared() const
 
 size_t AddressSpace::amount_purgeable_volatile() const
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     size_t amount = 0;
     for (auto& region : m_regions) {
         if (!region->vmobject().is_anonymous())
@@ -425,7 +425,7 @@ size_t AddressSpace::amount_purgeable_volatile() const
 
 size_t AddressSpace::amount_purgeable_nonvolatile() const
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     size_t amount = 0;
     for (auto& region : m_regions) {
         if (!region->vmobject().is_anonymous())

+ 2 - 2
Kernel/Memory/AddressSpace.h

@@ -55,7 +55,7 @@ public:
 
     void remove_all_regions(Badge<Process>);
 
-    RecursiveSpinLock& get_lock() const { return m_lock; }
+    RecursiveSpinlock& get_lock() const { return m_lock; }
 
     size_t amount_clean_inode() const;
     size_t amount_dirty_private() const;
@@ -68,7 +68,7 @@ public:
 private:
     explicit AddressSpace(NonnullRefPtr<PageDirectory>);
 
-    mutable RecursiveSpinLock m_lock;
+    mutable RecursiveSpinlock m_lock;
 
     RefPtr<PageDirectory> m_page_directory;
 

+ 6 - 6
Kernel/Memory/AnonymousVMObject.cpp

@@ -16,7 +16,7 @@ namespace Kernel::Memory {
 KResultOr<NonnullRefPtr<VMObject>> AnonymousVMObject::try_clone()
 {
     // We need to acquire our lock so we copy a sane state
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
 
     if (is_purgeable() && is_volatile()) {
         // If this object is purgeable+volatile, create a new zero-filled purgeable+volatile
@@ -178,7 +178,7 @@ AnonymousVMObject::~AnonymousVMObject()
 
 size_t AnonymousVMObject::purge()
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
 
     if (!is_purgeable() || !is_volatile())
         return 0;
@@ -206,7 +206,7 @@ KResult AnonymousVMObject::set_volatile(bool is_volatile, bool& was_purged)
 {
     VERIFY(is_purgeable());
 
-    ScopedSpinLock locker(m_lock);
+    ScopedSpinlock locker(m_lock);
 
     was_purged = m_was_purged;
     if (m_volatile == is_volatile)
@@ -306,7 +306,7 @@ size_t AnonymousVMObject::cow_pages() const
 PageFaultResponse AnonymousVMObject::handle_cow_fault(size_t page_index, VirtualAddress vaddr)
 {
     VERIFY_INTERRUPTS_DISABLED();
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
 
     if (is_volatile()) {
         // A COW fault in a volatile region? Userspace is writing to volatile memory, this is a bug. Crash.
@@ -379,13 +379,13 @@ AnonymousVMObject::SharedCommittedCowPages::~SharedCommittedCowPages()
 
 NonnullRefPtr<PhysicalPage> AnonymousVMObject::SharedCommittedCowPages::take_one()
 {
-    ScopedSpinLock locker(m_lock);
+    ScopedSpinlock locker(m_lock);
     return m_committed_pages.take_one();
 }
 
 void AnonymousVMObject::SharedCommittedCowPages::uncommit_one()
 {
-    ScopedSpinLock locker(m_lock);
+    ScopedSpinlock locker(m_lock);
     m_committed_pages.uncommit_one();
 }
 

+ 1 - 1
Kernel/Memory/AnonymousVMObject.h

@@ -76,7 +76,7 @@ private:
         void uncommit_one();
 
     public:
-        SpinLock<u8> m_lock;
+        Spinlock<u8> m_lock;
         CommittedPhysicalPageSet m_committed_pages;
     };
 

+ 1 - 1
Kernel/Memory/InodeVMObject.cpp

@@ -52,7 +52,7 @@ size_t InodeVMObject::amount_dirty() const
 
 int InodeVMObject::release_all_clean_pages()
 {
-    ScopedSpinLock locker(m_lock);
+    ScopedSpinlock locker(m_lock);
 
     int count = 0;
     for (size_t i = 0; i < page_count(); ++i) {

+ 33 - 33
Kernel/Memory/MemoryManager.cpp

@@ -47,7 +47,7 @@ namespace Kernel::Memory {
 // run. If we do, then Singleton would get re-initialized, causing
 // the memory manager to be initialized twice!
 static MemoryManager* s_the;
-RecursiveSpinLock s_mm_lock;
+RecursiveSpinlock s_mm_lock;
 
 MemoryManager& MemoryManager::the()
 {
@@ -63,7 +63,7 @@ UNMAP_AFTER_INIT MemoryManager::MemoryManager()
 {
     s_the = this;
 
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     parse_memory_map();
     write_cr3(kernel_page_directory().cr3());
     protect_kernel_image();
@@ -88,7 +88,7 @@ UNMAP_AFTER_INIT MemoryManager::~MemoryManager()
 
 UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
 {
-    ScopedSpinLock page_lock(kernel_page_directory().get_lock());
+    ScopedSpinlock page_lock(kernel_page_directory().get_lock());
     // Disable writing to the kernel text and rodata segments.
     for (auto i = start_of_kernel_text; i < start_of_kernel_data; i += PAGE_SIZE) {
         auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
@@ -105,8 +105,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_kernel_image()
 
 UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
 {
-    ScopedSpinLock page_lock(kernel_page_directory().get_lock());
-    ScopedSpinLock mm_lock(s_mm_lock);
+    ScopedSpinlock page_lock(kernel_page_directory().get_lock());
+    ScopedSpinlock mm_lock(s_mm_lock);
     // Disable writing to the .ro_after_init section
     for (auto i = (FlatPtr)&start_of_ro_after_init; i < (FlatPtr)&end_of_ro_after_init; i += PAGE_SIZE) {
         auto& pte = *ensure_pte(kernel_page_directory(), VirtualAddress(i));
@@ -117,8 +117,8 @@ UNMAP_AFTER_INIT void MemoryManager::protect_readonly_after_init_memory()
 
 void MemoryManager::unmap_text_after_init()
 {
-    ScopedSpinLock page_lock(kernel_page_directory().get_lock());
-    ScopedSpinLock mm_lock(s_mm_lock);
+    ScopedSpinlock page_lock(kernel_page_directory().get_lock());
+    ScopedSpinlock mm_lock(s_mm_lock);
 
     auto start = page_round_down((FlatPtr)&start_of_unmap_after_init);
     auto end = page_round_up((FlatPtr)&end_of_unmap_after_init);
@@ -135,8 +135,8 @@ void MemoryManager::unmap_text_after_init()
 
 void MemoryManager::unmap_ksyms_after_init()
 {
-    ScopedSpinLock mm_lock(s_mm_lock);
-    ScopedSpinLock page_lock(kernel_page_directory().get_lock());
+    ScopedSpinlock mm_lock(s_mm_lock);
+    ScopedSpinlock page_lock(kernel_page_directory().get_lock());
 
     auto start = page_round_down((FlatPtr)start_of_kernel_ksyms);
     auto end = page_round_up((FlatPtr)end_of_kernel_ksyms);
@@ -413,7 +413,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize_physical_pages()
     // try to map the entire region into kernel space so we always have it
     // We can't use ensure_pte here because it would try to allocate a PhysicalPage and we don't have the array
     // mapped yet so we can't create them
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
 
     // Create page tables at the beginning of m_physical_pages_region, followed by the PhysicalPageEntry array
     auto page_tables_base = m_physical_pages_region->lower();
@@ -612,7 +612,7 @@ UNMAP_AFTER_INIT void MemoryManager::initialize(u32 cpu)
 
 Region* MemoryManager::kernel_region_from_vaddr(VirtualAddress vaddr)
 {
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     for (auto& region : MM.m_kernel_regions) {
         if (region.contains(vaddr))
             return &region;
@@ -628,7 +628,7 @@ Region* MemoryManager::find_user_region_from_vaddr_no_lock(AddressSpace& space,
 
 Region* MemoryManager::find_user_region_from_vaddr(AddressSpace& space, VirtualAddress vaddr)
 {
-    ScopedSpinLock lock(space.get_lock());
+    ScopedSpinlock lock(space.get_lock());
     return find_user_region_from_vaddr_no_lock(space, vaddr);
 }
 
@@ -636,7 +636,7 @@ void MemoryManager::validate_syscall_preconditions(AddressSpace& space, Register
 {
     // We take the space lock once here and then use the no_lock variants
     // to avoid excessive spinlock recursion in this extemely common path.
-    ScopedSpinLock lock(space.get_lock());
+    ScopedSpinlock lock(space.get_lock());
 
     auto unlock_and_handle_crash = [&lock, &regs](const char* description, int signal) {
         lock.unlock();
@@ -702,7 +702,7 @@ PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
 OwnPtr<Region> MemoryManager::allocate_contiguous_kernel_region(size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
 {
     VERIFY(!(size % PAGE_SIZE));
-    ScopedSpinLock lock(kernel_page_directory().get_lock());
+    ScopedSpinlock lock(kernel_page_directory().get_lock());
     auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
     if (!range.has_value())
         return {};
@@ -721,7 +721,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(size_t size, StringView nam
     auto maybe_vm_object = AnonymousVMObject::try_create_with_size(size, strategy);
     if (maybe_vm_object.is_error())
         return {};
-    ScopedSpinLock lock(kernel_page_directory().get_lock());
+    ScopedSpinlock lock(kernel_page_directory().get_lock());
     auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
     if (!range.has_value())
         return {};
@@ -734,7 +734,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size
     if (maybe_vm_object.is_error())
         return {};
     VERIFY(!(size % PAGE_SIZE));
-    ScopedSpinLock lock(kernel_page_directory().get_lock());
+    ScopedSpinlock lock(kernel_page_directory().get_lock());
     auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
     if (!range.has_value())
         return {};
@@ -755,7 +755,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VirtualRange
 OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmobject, size_t size, StringView name, Region::Access access, Region::Cacheable cacheable)
 {
     VERIFY(!(size % PAGE_SIZE));
-    ScopedSpinLock lock(kernel_page_directory().get_lock());
+    ScopedSpinlock lock(kernel_page_directory().get_lock());
     auto range = kernel_page_directory().range_allocator().allocate_anywhere(size);
     if (!range.has_value())
         return {};
@@ -765,7 +765,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo
 Optional<CommittedPhysicalPageSet> MemoryManager::commit_user_physical_pages(size_t page_count)
 {
     VERIFY(page_count > 0);
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     if (m_system_memory_info.user_physical_pages_uncommitted < page_count)
         return {};
 
@@ -778,7 +778,7 @@ void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>
 {
     VERIFY(page_count > 0);
 
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     VERIFY(m_system_memory_info.user_physical_pages_committed >= page_count);
 
     m_system_memory_info.user_physical_pages_uncommitted += page_count;
@@ -787,7 +787,7 @@ void MemoryManager::uncommit_user_physical_pages(Badge<CommittedPhysicalPageSet>
 
 void MemoryManager::deallocate_physical_page(PhysicalAddress paddr)
 {
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
 
     // Are we returning a user page?
     for (auto& region : m_user_physical_regions) {
@@ -839,7 +839,7 @@ RefPtr<PhysicalPage> MemoryManager::find_free_user_physical_page(bool committed)
 
 NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page(Badge<CommittedPhysicalPageSet>, ShouldZeroFill should_zero_fill)
 {
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     auto page = find_free_user_physical_page(true);
     if (should_zero_fill == ShouldZeroFill::Yes) {
         auto* ptr = quickmap_page(*page);
@@ -851,7 +851,7 @@ NonnullRefPtr<PhysicalPage> MemoryManager::allocate_committed_user_physical_page
 
 RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill should_zero_fill, bool* did_purge)
 {
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     auto page = find_free_user_physical_page(false);
     bool purged_pages = false;
 
@@ -893,7 +893,7 @@ RefPtr<PhysicalPage> MemoryManager::allocate_user_physical_page(ShouldZeroFill s
 NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_physical_pages(size_t size)
 {
     VERIFY(!(size % PAGE_SIZE));
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     size_t count = ceil_div(size, static_cast<size_t>(PAGE_SIZE));
     auto physical_pages = m_super_physical_region->take_contiguous_free_pages(count);
 
@@ -911,7 +911,7 @@ NonnullRefPtrVector<PhysicalPage> MemoryManager::allocate_contiguous_supervisor_
 
 RefPtr<PhysicalPage> MemoryManager::allocate_supervisor_physical_page()
 {
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     auto page = m_super_physical_region->take_free_page();
 
     if (!page) {
@@ -934,7 +934,7 @@ void MemoryManager::enter_space(AddressSpace& space)
 {
     auto current_thread = Thread::current();
     VERIFY(current_thread != nullptr);
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
 
     current_thread->regs().cr3 = space.page_directory().cr3();
     write_cr3(space.page_directory().cr3());
@@ -1006,7 +1006,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
     VERIFY_INTERRUPTS_DISABLED();
     auto& mm_data = get_data();
     mm_data.m_quickmap_prev_flags = mm_data.m_quickmap_in_use.lock();
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
 
     VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
     u32 pte_idx = (vaddr.get() - KERNEL_PT1024_BASE) / PAGE_SIZE;
@@ -1025,7 +1025,7 @@ u8* MemoryManager::quickmap_page(PhysicalAddress const& physical_address)
 void MemoryManager::unquickmap_page()
 {
     VERIFY_INTERRUPTS_DISABLED();
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     auto& mm_data = get_data();
     VERIFY(mm_data.m_quickmap_in_use.is_locked());
     VirtualAddress vaddr(KERNEL_QUICKMAP_PER_CPU_BASE + Processor::id() * PAGE_SIZE);
@@ -1049,20 +1049,20 @@ bool MemoryManager::validate_user_stack_no_lock(AddressSpace& space, VirtualAddr
 
 bool MemoryManager::validate_user_stack(AddressSpace& space, VirtualAddress vaddr) const
 {
-    ScopedSpinLock lock(space.get_lock());
+    ScopedSpinlock lock(space.get_lock());
     return validate_user_stack_no_lock(space, vaddr);
 }
 
 void MemoryManager::register_region(Region& region)
 {
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     if (region.is_kernel())
         m_kernel_regions.append(region);
 }
 
 void MemoryManager::unregister_region(Region& region)
 {
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     if (region.is_kernel())
         m_kernel_regions.remove(region);
 }
@@ -1077,7 +1077,7 @@ void MemoryManager::dump_kernel_regions()
 #endif
     dbgln("BEGIN{}         END{}        SIZE{}       ACCESS NAME",
         addr_padding, addr_padding, addr_padding);
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     for (auto& region : m_kernel_regions) {
         dbgln("{:p} -- {:p} {:p} {:c}{:c}{:c}{:c}{:c}{:c} {}",
             region.vaddr().get(),
@@ -1095,8 +1095,8 @@ void MemoryManager::dump_kernel_regions()
 
 void MemoryManager::set_page_writable_direct(VirtualAddress vaddr, bool writable)
 {
-    ScopedSpinLock page_lock(kernel_page_directory().get_lock());
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock page_lock(kernel_page_directory().get_lock());
+    ScopedSpinlock lock(s_mm_lock);
     auto* pte = ensure_pte(kernel_page_directory(), vaddr);
     VERIFY(pte);
     if (pte->is_writable() == writable)

+ 4 - 4
Kernel/Memory/MemoryManager.h

@@ -14,7 +14,7 @@
 #include <Kernel/Arch/x86/PageFault.h>
 #include <Kernel/Arch/x86/TrapFrame.h>
 #include <Kernel/Forward.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/AllocationStrategy.h>
 #include <Kernel/Memory/PhysicalPage.h>
 #include <Kernel/Memory/PhysicalRegion.h>
@@ -93,14 +93,14 @@ struct PhysicalMemoryRange {
 struct MemoryManagerData {
     static ProcessorSpecificDataID processor_specific_data_id() { return ProcessorSpecificDataID::MemoryManager; }
 
-    SpinLock<u8> m_quickmap_in_use;
+    Spinlock<u8> m_quickmap_in_use;
     u32 m_quickmap_prev_flags;
 
     PhysicalAddress m_last_quickmap_pd;
     PhysicalAddress m_last_quickmap_pt;
 };
 
-extern RecursiveSpinLock s_mm_lock;
+extern RecursiveSpinlock s_mm_lock;
 
 // This class represents a set of committed physical pages.
 // When you ask MemoryManager to commit pages for you, you get one of these in return.
@@ -197,7 +197,7 @@ public:
 
     SystemMemoryInfo get_system_memory_info()
     {
-        ScopedSpinLock lock(s_mm_lock);
+        ScopedSpinlock lock(s_mm_lock);
         return m_system_memory_info;
     }
 

+ 3 - 3
Kernel/Memory/PageDirectory.cpp

@@ -27,7 +27,7 @@ static HashMap<FlatPtr, PageDirectory*>& cr3_map()
 
 RefPtr<PageDirectory> PageDirectory::find_by_cr3(FlatPtr cr3)
 {
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     return cr3_map().get(cr3).value_or({});
 }
 
@@ -60,7 +60,7 @@ RefPtr<PageDirectory> PageDirectory::try_create_for_userspace(VirtualRangeAlloca
     }
 
     // NOTE: Take the MM lock since we need it for quickmap.
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
 
 #if ARCH(X86_64)
     directory->m_pml4t = MM.allocate_user_physical_page();
@@ -159,7 +159,7 @@ UNMAP_AFTER_INIT void PageDirectory::allocate_kernel_directory()
 
 PageDirectory::~PageDirectory()
 {
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock lock(s_mm_lock);
     if (m_space)
         cr3_map().remove(cr3());
 }

+ 2 - 2
Kernel/Memory/PageDirectory.h

@@ -44,7 +44,7 @@ public:
 
     void set_space(Badge<AddressSpace>, AddressSpace& space) { m_space = &space; }
 
-    RecursiveSpinLock& get_lock() { return m_lock; }
+    RecursiveSpinlock& get_lock() { return m_lock; }
 
 private:
     PageDirectory();
@@ -61,7 +61,7 @@ private:
     RefPtr<PhysicalPage> m_directory_pages[4];
 #endif
     HashMap<FlatPtr, NonnullRefPtr<PhysicalPage>> m_page_tables;
-    RecursiveSpinLock m_lock;
+    RecursiveSpinlock m_lock;
 };
 
 }

+ 12 - 12
Kernel/Memory/Region.cpp

@@ -43,8 +43,8 @@ Region::~Region()
     MM.unregister_region(*this);
 
     if (m_page_directory) {
-        ScopedSpinLock page_lock(m_page_directory->get_lock());
-        ScopedSpinLock lock(s_mm_lock);
+        ScopedSpinlock page_lock(m_page_directory->get_lock());
+        ScopedSpinlock lock(s_mm_lock);
         unmap(ShouldDeallocateVirtualRange::Yes);
         VERIFY(!m_page_directory);
     }
@@ -183,7 +183,7 @@ bool Region::map_individual_page_impl(size_t page_index)
     }
 
     // NOTE: We have to take the MM lock for PTE's to stay valid while we use them.
-    ScopedSpinLock mm_locker(s_mm_lock);
+    ScopedSpinlock mm_locker(s_mm_lock);
 
     auto* pte = MM.ensure_pte(*m_page_directory, page_vaddr);
     if (!pte)
@@ -208,12 +208,12 @@ bool Region::map_individual_page_impl(size_t page_index)
 
 bool Region::do_remap_vmobject_page(size_t page_index, bool with_flush)
 {
-    ScopedSpinLock lock(vmobject().m_lock);
+    ScopedSpinlock lock(vmobject().m_lock);
     if (!m_page_directory)
         return true; // not an error, region may have not yet mapped it
     if (!translate_vmobject_page(page_index))
         return true; // not an error, region doesn't map this page
-    ScopedSpinLock page_lock(m_page_directory->get_lock());
+    ScopedSpinlock page_lock(m_page_directory->get_lock());
     VERIFY(physical_page(page_index));
     bool success = map_individual_page_impl(page_index);
     if (with_flush)
@@ -236,8 +236,8 @@ void Region::unmap(ShouldDeallocateVirtualRange deallocate_range)
 {
     if (!m_page_directory)
         return;
-    ScopedSpinLock page_lock(m_page_directory->get_lock());
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock page_lock(m_page_directory->get_lock());
+    ScopedSpinlock lock(s_mm_lock);
     size_t count = page_count();
     for (size_t i = 0; i < count; ++i) {
         auto vaddr = vaddr_from_page_index(i);
@@ -259,8 +259,8 @@ void Region::set_page_directory(PageDirectory& page_directory)
 
 bool Region::map(PageDirectory& page_directory, ShouldFlushTLB should_flush_tlb)
 {
-    ScopedSpinLock page_lock(page_directory.get_lock());
-    ScopedSpinLock lock(s_mm_lock);
+    ScopedSpinlock page_lock(page_directory.get_lock());
+    ScopedSpinlock lock(s_mm_lock);
 
     // FIXME: Find a better place for this sanity check(?)
     if (is_user() && !is_shared()) {
@@ -338,7 +338,7 @@ PageFaultResponse Region::handle_zero_fault(size_t page_index_in_region)
     auto& page_slot = physical_page_slot(page_index_in_region);
     auto page_index_in_vmobject = translate_to_vmobject_page(page_index_in_region);
 
-    ScopedSpinLock locker(vmobject().m_lock);
+    ScopedSpinlock locker(vmobject().m_lock);
 
     if (!page_slot.is_null() && !page_slot->is_shared_zero_page() && !page_slot->is_lazy_committed_page()) {
         dbgln_if(PAGE_FAULT_DEBUG, "MM: zero_page() but page already present. Fine with me!");
@@ -401,7 +401,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
     auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[page_index_in_vmobject];
 
     {
-        ScopedSpinLock locker(inode_vmobject.m_lock);
+        ScopedSpinlock locker(inode_vmobject.m_lock);
         if (!vmobject_physical_page_entry.is_null()) {
             dbgln_if(PAGE_FAULT_DEBUG, "handle_inode_fault: Page faulted in by someone else before reading, remapping.");
             if (!remap_vmobject_page(page_index_in_vmobject))
@@ -433,7 +433,7 @@ PageFaultResponse Region::handle_inode_fault(size_t page_index_in_region)
         memset(page_buffer + nread, 0, PAGE_SIZE - nread);
     }
 
-    ScopedSpinLock locker(inode_vmobject.m_lock);
+    ScopedSpinlock locker(inode_vmobject.m_lock);
 
     if (!vmobject_physical_page_entry.is_null()) {
         // Someone else faulted in this page while we were reading from the inode.

+ 2 - 2
Kernel/Memory/RingBuffer.h

@@ -23,7 +23,7 @@ public:
     void reclaim_space(PhysicalAddress chunk_start, size_t chunk_size);
     PhysicalAddress start_of_used() const;
 
-    SpinLock<u8>& lock() { return m_lock; }
+    Spinlock<u8>& lock() { return m_lock; }
     size_t used_bytes() const { return m_num_used_bytes; }
     PhysicalAddress start_of_region() const { return m_region->physical_page(0)->paddr(); }
     VirtualAddress vaddr() const { return m_region->vaddr(); }
@@ -31,7 +31,7 @@ public:
 
 private:
     OwnPtr<Memory::Region> m_region;
-    SpinLock<u8> m_lock;
+    Spinlock<u8> m_lock;
     size_t m_start_of_used {};
     size_t m_num_used_bytes {};
     size_t m_capacity_in_bytes {};

+ 2 - 2
Kernel/Memory/VMObject.cpp

@@ -10,9 +10,9 @@
 
 namespace Kernel::Memory {
 
-static Singleton<SpinLockProtected<VMObject::AllInstancesList>> s_all_instances;
+static Singleton<SpinlockProtected<VMObject::AllInstancesList>> s_all_instances;
 
-SpinLockProtected<VMObject::AllInstancesList>& VMObject::all_instances()
+SpinlockProtected<VMObject::AllInstancesList>& VMObject::all_instances()
 {
     return s_all_instances;
 }

+ 5 - 5
Kernel/Memory/VMObject.h

@@ -43,13 +43,13 @@ public:
 
     ALWAYS_INLINE void add_region(Region& region)
     {
-        ScopedSpinLock locker(m_lock);
+        ScopedSpinlock locker(m_lock);
         m_regions.append(region);
     }
 
     ALWAYS_INLINE void remove_region(Region& region)
     {
-        ScopedSpinLock locker(m_lock);
+        ScopedSpinlock locker(m_lock);
         m_regions.remove(region);
     }
 
@@ -63,7 +63,7 @@ protected:
     IntrusiveListNode<VMObject> m_list_node;
     FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
 
-    mutable RecursiveSpinLock m_lock;
+    mutable RecursiveSpinlock m_lock;
 
 private:
     VMObject& operator=(VMObject const&) = delete;
@@ -74,13 +74,13 @@ private:
 
 public:
     using AllInstancesList = IntrusiveList<VMObject, RawPtr<VMObject>, &VMObject::m_list_node>;
-    static SpinLockProtected<VMObject::AllInstancesList>& all_instances();
+    static SpinlockProtected<VMObject::AllInstancesList>& all_instances();
 };
 
 template<typename Callback>
 inline void VMObject::for_each_region(Callback callback)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     for (auto& region : m_regions) {
         callback(region);
     }

+ 4 - 4
Kernel/Memory/VirtualRangeAllocator.cpp

@@ -25,7 +25,7 @@ void VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t si
 
 void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
 {
-    ScopedSpinLock lock(parent_allocator.m_lock);
+    ScopedSpinlock lock(parent_allocator.m_lock);
     m_total_range = parent_allocator.m_total_range;
     m_available_ranges.clear();
     for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) {
@@ -103,7 +103,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_anywhere(size_t size, siz
     if (Checked<size_t>::addition_would_overflow(effective_size, alignment))
         return {};
 
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
 
     for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
         auto& available_range = *it;
@@ -142,7 +142,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress b
         return {};
     }
 
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     for (auto it = m_available_ranges.begin(); !it.is_end(); ++it) {
         auto& available_range = *it;
         if (!available_range.contains(base, size))
@@ -159,7 +159,7 @@ Optional<VirtualRange> VirtualRangeAllocator::allocate_specific(VirtualAddress b
 
 void VirtualRangeAllocator::deallocate(VirtualRange const& range)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     VERIFY(m_total_range.contains(range));
     VERIFY(range.size());
     VERIFY((range.size() % PAGE_SIZE) == 0);

+ 2 - 2
Kernel/Memory/VirtualRangeAllocator.h

@@ -8,7 +8,7 @@
 
 #include <AK/RedBlackTree.h>
 #include <AK/Traits.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/VirtualRange.h>
 
 namespace Kernel::Memory {
@@ -35,7 +35,7 @@ private:
 
     RedBlackTree<FlatPtr, VirtualRange> m_available_ranges;
     VirtualRange m_total_range;
-    mutable SpinLock<u8> m_lock;
+    mutable Spinlock<u8> m_lock;
 };
 
 }

+ 2 - 2
Kernel/Net/Routing.cpp

@@ -34,7 +34,7 @@ public:
             return false;
 
         {
-            ScopedSpinLock lock(m_lock);
+            ScopedSpinlock lock(m_lock);
             if (m_did_unblock)
                 return false;
             m_did_unblock = true;
@@ -97,7 +97,7 @@ void ARPTableBlocker::not_blocking(bool timeout_in_past)
         return table.get(ip_addr());
     });
 
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     if (!m_did_unblock) {
         m_did_unblock = true;
         m_addr = move(addr);

+ 1 - 1
Kernel/PerformanceEventBuffer.cpp

@@ -282,7 +282,7 @@ OwnPtr<PerformanceEventBuffer> PerformanceEventBuffer::try_create_with_size(size
 
 void PerformanceEventBuffer::add_process(const Process& process, ProcessEventType event_type)
 {
-    ScopedSpinLock locker(process.address_space().get_lock());
+    ScopedSpinlock locker(process.address_space().get_lock());
 
     String executable;
     if (process.executable())

+ 11 - 11
Kernel/Process.cpp

@@ -42,7 +42,7 @@ namespace Kernel {
 
 static void create_signal_trampoline();
 
-RecursiveSpinLock g_profiling_lock;
+RecursiveSpinlock g_profiling_lock;
 static Atomic<pid_t> next_pid;
 static Singleton<MutexProtected<Process::List>> s_processes;
 READONLY_AFTER_INIT HashMap<String, OwnPtr<Module>>* g_modules;
@@ -205,7 +205,7 @@ RefPtr<Process> Process::create_kernel_process(RefPtr<Thread>& first_thread, Str
     if (do_register == RegisterProcess::Yes)
         register_new(*process);
 
-    ScopedSpinLock lock(g_scheduler_lock);
+    ScopedSpinlock lock(g_scheduler_lock);
     first_thread->set_affinity(affinity);
     first_thread->set_state(Thread::State::Runnable);
     return process;
@@ -429,7 +429,7 @@ RefPtr<Process> Process::from_pid(ProcessID pid)
 
 const Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t i) const
 {
-    ScopedSpinLock lock(m_fds_lock);
+    ScopedSpinlock lock(m_fds_lock);
     if (m_fds_metadatas.size() <= i)
         return nullptr;
 
@@ -440,7 +440,7 @@ const Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(
 }
 Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t i)
 {
-    ScopedSpinLock lock(m_fds_lock);
+    ScopedSpinlock lock(m_fds_lock);
     if (m_fds_metadatas.size() <= i)
         return nullptr;
 
@@ -452,20 +452,20 @@ Process::FileDescriptionAndFlags* Process::FileDescriptions::get_if_valid(size_t
 
 const Process::FileDescriptionAndFlags& Process::FileDescriptions::at(size_t i) const
 {
-    ScopedSpinLock lock(m_fds_lock);
+    ScopedSpinlock lock(m_fds_lock);
     VERIFY(m_fds_metadatas[i].is_allocated());
     return m_fds_metadatas[i];
 }
 Process::FileDescriptionAndFlags& Process::FileDescriptions::at(size_t i)
 {
-    ScopedSpinLock lock(m_fds_lock);
+    ScopedSpinlock lock(m_fds_lock);
     VERIFY(m_fds_metadatas[i].is_allocated());
     return m_fds_metadatas[i];
 }
 
 RefPtr<FileDescription> Process::FileDescriptions::file_description(int fd) const
 {
-    ScopedSpinLock lock(m_fds_lock);
+    ScopedSpinlock lock(m_fds_lock);
     if (fd < 0)
         return nullptr;
     if (static_cast<size_t>(fd) < m_fds_metadatas.size())
@@ -475,7 +475,7 @@ RefPtr<FileDescription> Process::FileDescriptions::file_description(int fd) cons
 
 void Process::FileDescriptions::enumerate(Function<void(const FileDescriptionAndFlags&)> callback) const
 {
-    ScopedSpinLock lock(m_fds_lock);
+    ScopedSpinlock lock(m_fds_lock);
     for (auto& file_description_metadata : m_fds_metadatas) {
         callback(file_description_metadata);
     }
@@ -483,7 +483,7 @@ void Process::FileDescriptions::enumerate(Function<void(const FileDescriptionAnd
 
 void Process::FileDescriptions::change_each(Function<void(FileDescriptionAndFlags&)> callback)
 {
-    ScopedSpinLock lock(m_fds_lock);
+    ScopedSpinlock lock(m_fds_lock);
     for (auto& file_description_metadata : m_fds_metadatas) {
         callback(file_description_metadata);
     }
@@ -501,7 +501,7 @@ size_t Process::FileDescriptions::open_count() const
 
 KResultOr<Process::ScopedDescriptionAllocation> Process::FileDescriptions::allocate(int first_candidate_fd)
 {
-    ScopedSpinLock lock(m_fds_lock);
+    ScopedSpinlock lock(m_fds_lock);
     for (size_t i = first_candidate_fd; i < max_open(); ++i) {
         if (!m_fds_metadatas[i].is_allocated()) {
             m_fds_metadatas[i].allocate();
@@ -771,7 +771,7 @@ RefPtr<Thread> Process::create_kernel_thread(void (*entry)(void*), void* entry_d
     regs.set_ip((FlatPtr)entry);
     regs.set_sp((FlatPtr)entry_data); // entry function argument is expected to be in the SP register
 
-    ScopedSpinLock lock(g_scheduler_lock);
+    ScopedSpinlock lock(g_scheduler_lock);
     thread->set_state(Thread::State::Runnable);
     return thread;
 }

+ 8 - 8
Kernel/Process.h

@@ -636,7 +636,7 @@ public:
 
         KResult try_clone(const Kernel::Process::FileDescriptions& other)
         {
-            ScopedSpinLock lock_other(other.m_fds_lock);
+            ScopedSpinlock lock_other(other.m_fds_lock);
             if (!try_resize(other.m_fds_metadatas.size()))
                 return ENOMEM;
 
@@ -667,7 +667,7 @@ public:
 
         void clear()
         {
-            ScopedSpinLock lock(m_fds_lock);
+            ScopedSpinlock lock(m_fds_lock);
             m_fds_metadatas.clear();
         }
 
@@ -677,7 +677,7 @@ public:
     private:
         FileDescriptions() = default;
         static constexpr size_t m_max_open_file_descriptors { FD_SETSIZE };
-        mutable SpinLock<u8> m_fds_lock;
+        mutable Spinlock<u8> m_fds_lock;
         Vector<FileDescriptionAndFlags> m_fds_metadatas;
     };
 
@@ -743,10 +743,10 @@ public:
     const FileDescriptions& fds() const { return m_fds; }
 
 private:
-    SpinLockProtected<Thread::ListInProcess>& thread_list() { return m_thread_list; }
-    SpinLockProtected<Thread::ListInProcess> const& thread_list() const { return m_thread_list; }
+    SpinlockProtected<Thread::ListInProcess>& thread_list() { return m_thread_list; }
+    SpinlockProtected<Thread::ListInProcess> const& thread_list() const { return m_thread_list; }
 
-    SpinLockProtected<Thread::ListInProcess> m_thread_list;
+    SpinlockProtected<Thread::ListInProcess> m_thread_list;
 
     FileDescriptions m_fds;
 
@@ -779,7 +779,7 @@ private:
     OwnPtr<PerformanceEventBuffer> m_perf_event_buffer;
 
     FutexQueues m_futex_queues;
-    SpinLock<u8> m_futex_lock;
+    Spinlock<u8> m_futex_lock;
 
     // This member is used in the implementation of ptrace's PT_TRACEME flag.
     // If it is set to true, the process will stop at the next execve syscall
@@ -812,7 +812,7 @@ public:
 // The second page is being used exclusively for write-protected values.
 static_assert(sizeof(Process) == (PAGE_SIZE * 2));
 
-extern RecursiveSpinLock g_profiling_lock;
+extern RecursiveSpinlock g_profiling_lock;
 
 MutexProtected<Process::List>& processes();
 

+ 2 - 2
Kernel/ProcessExposed.cpp

@@ -15,7 +15,7 @@
 
 namespace Kernel {
 
-static SpinLock<u8> s_index_lock;
+static Spinlock<u8> s_index_lock;
 static InodeIndex s_next_inode_index = 0;
 
 namespace SegmentedProcFSIndex {
@@ -71,7 +71,7 @@ InodeIndex build_segmented_index_for_file_description(ProcessID pid, unsigned fd
 
 static size_t s_allocate_global_inode_index()
 {
-    ScopedSpinLock lock(s_index_lock);
+    ScopedSpinlock lock(s_index_lock);
     s_next_inode_index = s_next_inode_index.value() + 1;
     // Note: Global ProcFS indices must be above 0 and up to maximum of what 36 bit (2 ^ 36 - 1) can represent.
     VERIFY(s_next_inode_index > 0);

+ 2 - 2
Kernel/ProcessGroup.cpp

@@ -10,9 +10,9 @@
 
 namespace Kernel {
 
-static Singleton<SpinLockProtected<ProcessGroup::List>> s_process_groups;
+static Singleton<SpinlockProtected<ProcessGroup::List>> s_process_groups;
 
-SpinLockProtected<ProcessGroup::List>& process_groups()
+SpinlockProtected<ProcessGroup::List>& process_groups()
 {
     return *s_process_groups;
 }

+ 2 - 2
Kernel/ProcessGroup.h

@@ -9,7 +9,7 @@
 #include <AK/IntrusiveList.h>
 #include <AK/RefCounted.h>
 #include <AK/Weakable.h>
-#include <Kernel/Locking/SpinLockProtected.h>
+#include <Kernel/Locking/SpinlockProtected.h>
 #include <Kernel/UnixTypes.h>
 
 namespace Kernel {
@@ -43,6 +43,6 @@ public:
     using List = IntrusiveList<ProcessGroup, RawPtr<ProcessGroup>, &ProcessGroup::m_list_node>;
 };
 
-SpinLockProtected<ProcessGroup::List>& process_groups();
+SpinlockProtected<ProcessGroup::List>& process_groups();
 
 }

+ 1 - 1
Kernel/ProcessSpecificExposed.cpp

@@ -211,7 +211,7 @@ KResult Process::procfs_get_virtual_memory_stats(KBufferBuilder& builder) const
 {
     JsonArraySerializer array { builder };
     {
-        ScopedSpinLock lock(address_space().get_lock());
+        ScopedSpinlock lock(address_space().get_lock());
         for (auto& region : address_space().regions()) {
             if (!region->is_user() && !Process::current().is_superuser())
                 continue;

+ 1 - 1
Kernel/Random.cpp

@@ -70,7 +70,7 @@ UNMAP_AFTER_INIT KernelRng::KernelRng()
 
 void KernelRng::wait_for_entropy()
 {
-    ScopedSpinLock lock(get_lock());
+    ScopedSpinlock lock(get_lock());
     if (!resource().is_ready()) {
         dbgln("Entropy starvation...");
         m_seed_queue.wait_forever("KernelRng");

+ 5 - 5
Kernel/Random.h

@@ -37,7 +37,7 @@ public:
 
     bool get_random_bytes(u8* buffer, size_t n)
     {
-        ScopedSpinLock lock(m_lock);
+        ScopedSpinlock lock(m_lock);
         if (!is_ready())
             return false;
         if (m_p0_len >= reseed_threshold) {
@@ -82,7 +82,7 @@ public:
         return is_seeded() || m_p0_len >= reseed_threshold;
     }
 
-    SpinLock<u8>& get_lock() { return m_lock; }
+    Spinlock<u8>& get_lock() { return m_lock; }
 
 private:
     void reseed()
@@ -108,7 +108,7 @@ private:
     size_t m_p0_len { 0 };
     ByteBuffer m_key;
     HashType m_pools[pool_count];
-    SpinLock<u8> m_lock;
+    Spinlock<u8> m_lock;
 };
 
 class KernelRng : public Lockable<FortunaPRNG<Crypto::Cipher::AESCipher, Crypto::Hash::SHA256, 256>> {
@@ -122,7 +122,7 @@ public:
 
     void wake_if_ready();
 
-    SpinLock<u8>& get_lock() { return resource().get_lock(); }
+    Spinlock<u8>& get_lock() { return resource().get_lock(); }
 
 private:
     WaitQueue m_seed_queue;
@@ -156,7 +156,7 @@ public:
     void add_random_event(const T& event_data)
     {
         auto& kernel_rng = KernelRng::the();
-        ScopedSpinLock lock(kernel_rng.get_lock());
+        ScopedSpinlock lock(kernel_rng.get_lock());
         // We don't lock this because on the off chance a pool is corrupted, entropy isn't lost.
         Event<T> event = { read_tsc(), m_source, event_data };
         kernel_rng.resource().add_random_event(event, m_pool);

+ 7 - 7
Kernel/Scheduler.cpp

@@ -28,7 +28,7 @@ struct SchedulerData {
     bool in_scheduler { true };
 };
 
-RecursiveSpinLock g_scheduler_lock;
+RecursiveSpinlock g_scheduler_lock;
 
 static u32 time_slice_for(const Thread& thread)
 {
@@ -53,9 +53,9 @@ struct ThreadReadyQueues {
     Array<ThreadReadyQueue, count> queues;
 };
 
-static Singleton<SpinLockProtected<ThreadReadyQueues>> g_ready_queues;
+static Singleton<SpinlockProtected<ThreadReadyQueues>> g_ready_queues;
 
-static SpinLockProtected<TotalTimeScheduled> g_total_time_scheduled;
+static SpinlockProtected<TotalTimeScheduled> g_total_time_scheduled;
 
 // The Scheduler::current_time function provides a current time for scheduling purposes,
 // which may not necessarily relate to wall time
@@ -227,7 +227,7 @@ bool Scheduler::pick_next()
             scheduler_data.in_scheduler = false;
         });
 
-    ScopedSpinLock lock(g_scheduler_lock);
+    ScopedSpinlock lock(g_scheduler_lock);
 
     if constexpr (SCHEDULER_RUNNABLE_DEBUG) {
         dump_thread_list();
@@ -347,7 +347,7 @@ void Scheduler::enter_current(Thread& prev_thread, bool is_first)
         // Check if we have any signals we should deliver (even if we don't
         // end up switching to another thread).
         if (!current_thread->is_in_block() && current_thread->previous_mode() != Thread::PreviousMode::KernelMode && current_thread->current_trap()) {
-            ScopedSpinLock lock(current_thread->get_lock());
+            ScopedSpinlock lock(current_thread->get_lock());
             if (current_thread->state() == Thread::Running && current_thread->pending_signals_for_state()) {
                 current_thread->dispatch_one_pending_signal();
             }
@@ -485,7 +485,7 @@ void Scheduler::timer_tick(const RegisterState& regs)
     }
 
     if (current_thread->previous_mode() == Thread::PreviousMode::UserMode && current_thread->should_die() && !current_thread->is_blocked()) {
-        ScopedSpinLock scheduler_lock(g_scheduler_lock);
+        ScopedSpinlock scheduler_lock(g_scheduler_lock);
         dbgln_if(SCHEDULER_DEBUG, "Scheduler[{}]: Terminating user mode thread {}", Processor::id(), *current_thread);
         current_thread->set_state(Thread::Dying);
         Processor::current().invoke_scheduler_async();
@@ -517,7 +517,7 @@ void Scheduler::invoke_async()
     VERIFY(!processor.in_irq());
 
     // Since this function is called when leaving critical sections (such
-    // as a SpinLock), we need to check if we're not already doing this
+    // as a Spinlock), we need to check if we're not already doing this
     // to prevent recursion
     if (!ProcessorSpecific<SchedulerData>::get().in_scheduler)
         pick_next();

+ 2 - 2
Kernel/Scheduler.h

@@ -11,7 +11,7 @@
 #include <AK/IntrusiveList.h>
 #include <AK/Types.h>
 #include <Kernel/Forward.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Time/TimeManagement.h>
 #include <Kernel/UnixTypes.h>
 
@@ -22,7 +22,7 @@ struct RegisterState;
 extern Thread* g_finalizer;
 extern WaitQueue* g_finalizer_wait_queue;
 extern Atomic<bool> g_finalizer_has_work;
-extern RecursiveSpinLock g_scheduler_lock;
+extern RecursiveSpinlock g_scheduler_lock;
 
 struct TotalTimeScheduled {
     u64 total { 0 };

+ 9 - 9
Kernel/Storage/AHCIPort.cpp

@@ -8,7 +8,7 @@
 // please look at Documentation/Kernel/AHCILocking.md
 
 #include <AK/Atomic.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/MemoryManager.h>
 #include <Kernel/Memory/ScatterGatherList.h>
 #include <Kernel/Memory/TypedMapping.h>
@@ -124,7 +124,7 @@ bool AHCIPort::is_interrupts_enabled() const
 void AHCIPort::recover_from_fatal_error()
 {
     MutexLocker locker(m_lock);
-    ScopedSpinLock lock(m_hard_lock);
+    ScopedSpinlock lock(m_hard_lock);
     dmesgln("{}: AHCI Port {} fatal error, shutting down!", m_parent_handler->hba_controller()->pci_address(), representative_port_index());
     dmesgln("{}: AHCI Port {} fatal error, SError {}", m_parent_handler->hba_controller()->pci_address(), representative_port_index(), (u32)m_port_registers.serr);
     stop_command_list_processing();
@@ -208,7 +208,7 @@ void AHCIPort::eject()
 bool AHCIPort::reset()
 {
     MutexLocker locker(m_lock);
-    ScopedSpinLock lock(m_hard_lock);
+    ScopedSpinlock lock(m_hard_lock);
 
     dbgln_if(AHCI_DEBUG, "AHCI Port {}: Resetting", representative_port_index());
 
@@ -233,12 +233,12 @@ bool AHCIPort::reset()
 bool AHCIPort::initialize_without_reset()
 {
     MutexLocker locker(m_lock);
-    ScopedSpinLock lock(m_hard_lock);
+    ScopedSpinlock lock(m_hard_lock);
     dmesgln("AHCI Port {}: {}", representative_port_index(), try_disambiguate_sata_status());
     return initialize(lock);
 }
 
-bool AHCIPort::initialize(ScopedSpinLock<SpinLock<u8>>& main_lock)
+bool AHCIPort::initialize(ScopedSpinlock<Spinlock<u8>>& main_lock)
 {
     VERIFY(m_lock.is_locked());
     dbgln_if(AHCI_DEBUG, "AHCI Port {}: Initialization. Signature = {:#08x}", representative_port_index(), static_cast<u32>(m_port_registers.sig));
@@ -504,7 +504,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
     VERIFY(is_operable());
     VERIFY(m_lock.is_locked());
     VERIFY(m_current_scatter_list);
-    ScopedSpinLock lock(m_hard_lock);
+    ScopedSpinlock lock(m_hard_lock);
 
     dbgln_if(AHCI_DEBUG, "AHCI Port {}: Do a {}, lba {}, block count {}", representative_port_index(), direction == AsyncBlockDeviceRequest::RequestType::Write ? "write" : "read", lba, block_count);
     if (!spin_until_ready())
@@ -591,7 +591,7 @@ bool AHCIPort::access_device(AsyncBlockDeviceRequest::RequestType direction, u64
     return true;
 }
 
-bool AHCIPort::identify_device(ScopedSpinLock<SpinLock<u8>>& main_lock)
+bool AHCIPort::identify_device(ScopedSpinlock<Spinlock<u8>>& main_lock)
 {
     VERIFY(m_lock.is_locked());
     VERIFY(is_operable());
@@ -654,7 +654,7 @@ bool AHCIPort::identify_device(ScopedSpinLock<SpinLock<u8>>& main_lock)
 bool AHCIPort::shutdown()
 {
     MutexLocker locker(m_lock);
-    ScopedSpinLock lock(m_hard_lock);
+    ScopedSpinlock lock(m_hard_lock);
     rebase();
     set_interface_state(AHCI::DeviceDetectionInitialization::DisableInterface);
     return true;
@@ -740,7 +740,7 @@ void AHCIPort::stop_fis_receiving() const
     m_port_registers.cmd = m_port_registers.cmd & 0xFFFFFFEF;
 }
 
-bool AHCIPort::initiate_sata_reset(ScopedSpinLock<SpinLock<u8>>& main_lock)
+bool AHCIPort::initiate_sata_reset(ScopedSpinlock<Spinlock<u8>>& main_lock)
 {
     VERIFY(m_lock.is_locked());
     VERIFY(m_hard_lock.is_locked());

+ 5 - 5
Kernel/Storage/AHCIPort.h

@@ -12,7 +12,7 @@
 #include <Kernel/IO.h>
 #include <Kernel/Interrupts/IRQHandler.h>
 #include <Kernel/Locking/Mutex.h>
-#include <Kernel/Locking/SpinLock.h>
+#include <Kernel/Locking/Spinlock.h>
 #include <Kernel/Memory/AnonymousVMObject.h>
 #include <Kernel/Memory/PhysicalPage.h>
 #include <Kernel/Memory/ScatterGatherList.h>
@@ -51,7 +51,7 @@ public:
 
 private:
     bool is_phy_enabled() const { return (m_port_registers.ssts & 0xf) == 3; }
-    bool initialize(ScopedSpinLock<SpinLock<u8>>&);
+    bool initialize(ScopedSpinlock<Spinlock<u8>>&);
 
     UNMAP_AFTER_INIT AHCIPort(const AHCIPortHandler&, volatile AHCI::PortRegisters&, u32 port_index);
 
@@ -62,7 +62,7 @@ private:
     const char* try_disambiguate_sata_status();
     void try_disambiguate_sata_error();
 
-    bool initiate_sata_reset(ScopedSpinLock<SpinLock<u8>>&);
+    bool initiate_sata_reset(ScopedSpinlock<Spinlock<u8>>&);
     void rebase();
     void recover_from_fatal_error();
     bool shutdown();
@@ -79,7 +79,7 @@ private:
 
     bool spin_until_ready() const;
 
-    bool identify_device(ScopedSpinLock<SpinLock<u8>>&);
+    bool identify_device(ScopedSpinlock<Spinlock<u8>>&);
 
     ALWAYS_INLINE void start_command_list_processing() const;
     ALWAYS_INLINE void mark_command_header_ready_to_process(u8 command_header_index) const;
@@ -101,7 +101,7 @@ private:
 
     EntropySource m_entropy_source;
     RefPtr<AsyncBlockDeviceRequest> m_current_request;
-    SpinLock<u8> m_hard_lock;
+    Spinlock<u8> m_hard_lock;
     Mutex m_lock { "AHCIPort" };
 
     mutable bool m_wait_for_completion { false };

+ 4 - 4
Kernel/Storage/BMIDEChannel.cpp

@@ -80,7 +80,7 @@ bool BMIDEChannel::handle_irq(const RegisterState&)
     // clear bus master interrupt status
     m_io_group.bus_master_base().value().offset(2).out<u8>(m_io_group.bus_master_base().value().offset(2).in<u8>() | 4);
 
-    ScopedSpinLock lock(m_request_lock);
+    ScopedSpinlock lock(m_request_lock);
     dbgln_if(PATA_DEBUG, "BMIDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
         (status & ATA_SR_DRQ) != 0,
         (status & ATA_SR_BSY) != 0,
@@ -116,7 +116,7 @@ void BMIDEChannel::complete_current_request(AsyncDeviceRequest::RequestResult re
     // before Processor::deferred_call_queue returns!
     g_io_work->queue([this, result]() {
         dbgln_if(PATA_DEBUG, "BMIDEChannel::complete_current_request result: {}", (int)result);
-        ScopedSpinLock lock(m_request_lock);
+        ScopedSpinlock lock(m_request_lock);
         VERIFY(m_current_request);
         auto current_request = m_current_request;
         m_current_request.clear();
@@ -146,7 +146,7 @@ void BMIDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
     VERIFY(!m_current_request.is_null());
     VERIFY(m_current_request->block_count() <= 256);
 
-    ScopedSpinLock m_lock(m_request_lock);
+    ScopedSpinlock m_lock(m_request_lock);
     dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_write_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count());
 
     prdt().offset = m_dma_buffer_page->paddr().get();
@@ -194,7 +194,7 @@ void BMIDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
     VERIFY(!m_current_request.is_null());
     VERIFY(m_current_request->block_count() <= 256);
 
-    ScopedSpinLock m_lock(m_request_lock);
+    ScopedSpinlock m_lock(m_request_lock);
     dbgln_if(PATA_DEBUG, "BMIDEChannel::ata_read_sectors ({} x {})", m_current_request->block_index(), m_current_request->block_count());
 
     // Note: This is a fix for a quirk for an IDE controller on ICH7 machine.

+ 4 - 4
Kernel/Storage/IDEChannel.cpp

@@ -197,7 +197,7 @@ bool IDEChannel::handle_irq(const RegisterState&)
 
     m_entropy_source.add_random_event(status);
 
-    ScopedSpinLock lock(m_request_lock);
+    ScopedSpinlock lock(m_request_lock);
     dbgln_if(PATA_DEBUG, "IDEChannel: interrupt: DRQ={}, BSY={}, DRDY={}",
         (status & ATA_SR_DRQ) != 0,
         (status & ATA_SR_BSY) != 0,
@@ -223,7 +223,7 @@ bool IDEChannel::handle_irq(const RegisterState&)
     // trigger page faults
     g_io_work->queue([this]() {
         MutexLocker locker(m_lock);
-        ScopedSpinLock lock(m_request_lock);
+        ScopedSpinlock lock(m_request_lock);
         if (m_current_request->request_type() == AsyncBlockDeviceRequest::Read) {
             dbgln_if(PATA_DEBUG, "IDEChannel: Read block {}/{}", m_current_request_block_index, m_current_request->block_count());
 
@@ -498,7 +498,7 @@ void IDEChannel::ata_read_sectors(bool slave_request, u16 capabilities)
     VERIFY(!m_current_request.is_null());
     VERIFY(m_current_request->block_count() <= 256);
 
-    ScopedSpinLock m_lock(m_request_lock);
+    ScopedSpinlock m_lock(m_request_lock);
     dbgln_if(PATA_DEBUG, "IDEChannel::ata_read_sectors");
     dbgln_if(PATA_DEBUG, "IDEChannel: Reading {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
     ata_access(Direction::Read, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
@@ -536,7 +536,7 @@ void IDEChannel::ata_write_sectors(bool slave_request, u16 capabilities)
     VERIFY(!m_current_request.is_null());
     VERIFY(m_current_request->block_count() <= 256);
 
-    ScopedSpinLock m_lock(m_request_lock);
+    ScopedSpinlock m_lock(m_request_lock);
     dbgln_if(PATA_DEBUG, "IDEChannel: Writing {} sector(s) @ LBA {}", m_current_request->block_count(), m_current_request->block_index());
     ata_access(Direction::Write, slave_request, m_current_request->block_index(), m_current_request->block_count(), capabilities);
     ata_do_write_sector();

+ 1 - 1
Kernel/Storage/IDEChannel.h

@@ -157,7 +157,7 @@ protected:
     RefPtr<AsyncBlockDeviceRequest> m_current_request;
     u64 m_current_request_block_index { 0 };
     bool m_current_request_flushing_cache { false };
-    SpinLock<u8> m_request_lock;
+    Spinlock<u8> m_request_lock;
     Mutex m_lock { "IDEChannel" };
 
     IOAddressGroup m_io_group;

+ 1 - 1
Kernel/Syscalls/execve.cpp

@@ -682,7 +682,7 @@ KResult Process::do_exec(NonnullRefPtr<FileDescription> main_program_description
     }
 
     {
-        ScopedSpinLock lock(g_scheduler_lock);
+        ScopedSpinlock lock(g_scheduler_lock);
         new_main_thread->set_state(Thread::State::Runnable);
     }
     u32 lock_count_to_restore;

+ 2 - 2
Kernel/Syscalls/fork.cpp

@@ -93,7 +93,7 @@ KResultOr<FlatPtr> Process::sys$fork(RegisterState& regs)
 #endif
 
     {
-        ScopedSpinLock lock(address_space().get_lock());
+        ScopedSpinlock lock(address_space().get_lock());
         for (auto& region : address_space().regions()) {
             dbgln_if(FORK_DEBUG, "fork: cloning Region({}) '{}' @ {}", region, region->name(), region->vaddr());
             auto maybe_region_clone = region->try_clone();
@@ -120,7 +120,7 @@ KResultOr<FlatPtr> Process::sys$fork(RegisterState& regs)
 
     PerformanceManager::add_process_created_event(*child);
 
-    ScopedSpinLock lock(g_scheduler_lock);
+    ScopedSpinlock lock(g_scheduler_lock);
     child_first_thread->set_affinity(Thread::current()->affinity());
     child_first_thread->set_state(Thread::State::Runnable);
 

+ 5 - 5
Kernel/Syscalls/futex.cpp

@@ -13,7 +13,7 @@ namespace Kernel {
 
 void Process::clear_futex_queues_on_exec()
 {
-    ScopedSpinLock lock(m_futex_lock);
+    ScopedSpinlock lock(m_futex_lock);
     for (auto& it : m_futex_queues) {
         bool did_wake_all;
         it.value->wake_all(did_wake_all);
@@ -88,7 +88,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
     auto do_wake = [&](FlatPtr user_address, u32 count, Optional<u32> bitmask) -> int {
         if (count == 0)
             return 0;
-        ScopedSpinLock locker(m_futex_lock);
+        ScopedSpinlock locker(m_futex_lock);
         auto futex_queue = find_futex_queue(user_address, false);
         if (!futex_queue)
             return 0;
@@ -117,7 +117,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
             }
             atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
 
-            ScopedSpinLock locker(m_futex_lock);
+            ScopedSpinlock locker(m_futex_lock);
             did_create = false;
             futex_queue = find_futex_queue(user_address, true, &did_create);
             VERIFY(futex_queue);
@@ -130,7 +130,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
 
         Thread::BlockResult block_result = futex_queue->wait_on(timeout, bitset);
 
-        ScopedSpinLock locker(m_futex_lock);
+        ScopedSpinlock locker(m_futex_lock);
         if (futex_queue->is_empty_and_no_imminent_waits()) {
             // If there are no more waiters, we want to get rid of the futex!
             remove_futex_queue(user_address);
@@ -150,7 +150,7 @@ KResultOr<FlatPtr> Process::sys$futex(Userspace<const Syscall::SC_futex_params*>
         atomic_thread_fence(AK::MemoryOrder::memory_order_acquire);
 
         int woken_or_requeued = 0;
-        ScopedSpinLock locker(m_futex_lock);
+        ScopedSpinlock locker(m_futex_lock);
         if (auto futex_queue = find_futex_queue(user_address, false)) {
             RefPtr<FutexQueue> target_futex_queue;
             bool is_empty, is_target_empty;

+ 4 - 4
Kernel/Syscalls/profiling.cpp

@@ -31,7 +31,7 @@ KResultOr<FlatPtr> Process::sys$profiling_enable(pid_t pid, u64 event_mask)
         else
             g_global_perf_events = PerformanceEventBuffer::try_create_with_size(32 * MiB).leak_ptr();
 
-        ScopedSpinLock lock(g_profiling_lock);
+        ScopedSpinlock lock(g_profiling_lock);
         if (!TimeManagement::the().enable_profile_timer())
             return ENOTSUP;
         g_profiling_all_threads = true;
@@ -51,7 +51,7 @@ KResultOr<FlatPtr> Process::sys$profiling_enable(pid_t pid, u64 event_mask)
         return ESRCH;
     if (!is_superuser() && process->uid() != euid())
         return EPERM;
-    ScopedSpinLock lock(g_profiling_lock);
+    ScopedSpinlock lock(g_profiling_lock);
     g_profiling_event_mask = PERF_EVENT_PROCESS_CREATE | PERF_EVENT_THREAD_CREATE | PERF_EVENT_MMAP;
     process->set_profiling(true);
     if (!process->create_perf_events_buffer_if_needed()) {
@@ -86,7 +86,7 @@ KResultOr<FlatPtr> Process::sys$profiling_disable(pid_t pid)
         return ESRCH;
     if (!is_superuser() && process->uid() != euid())
         return EPERM;
-    ScopedSpinLock lock(g_profiling_lock);
+    ScopedSpinlock lock(g_profiling_lock);
     if (!process->is_profiling())
         return EINVAL;
     // FIXME: If we enabled the profile timer and it's not supported, how do we disable it now?
@@ -122,7 +122,7 @@ KResultOr<FlatPtr> Process::sys$profiling_free_buffer(pid_t pid)
         return ESRCH;
     if (!is_superuser() && process->uid() != euid())
         return EPERM;
-    ScopedSpinLock lock(g_profiling_lock);
+    ScopedSpinlock lock(g_profiling_lock);
     if (process->is_profiling())
         return EINVAL;
     process->delete_perf_events_buffer();

+ 2 - 2
Kernel/Syscalls/ptrace.cpp

@@ -18,7 +18,7 @@ namespace Kernel {
 
 static KResultOr<u32> handle_ptrace(const Kernel::Syscall::SC_ptrace_params& params, Process& caller)
 {
-    ScopedSpinLock scheduler_lock(g_scheduler_lock);
+    ScopedSpinlock scheduler_lock(g_scheduler_lock);
     if (params.request == PT_TRACE_ME) {
         if (Process::current().tracer())
             return EBUSY;
@@ -55,7 +55,7 @@ static KResultOr<u32> handle_ptrace(const Kernel::Syscall::SC_ptrace_params& par
         auto result = peer_process.start_tracing_from(caller.pid());
         if (result.is_error())
             return result.error();
-        ScopedSpinLock lock(peer->get_lock());
+        ScopedSpinlock lock(peer->get_lock());
         if (peer->state() != Thread::State::Stopped) {
             peer->send_signal(SIGSTOP, &caller);
         }

+ 2 - 2
Kernel/Syscalls/sched.cpp

@@ -28,7 +28,7 @@ KResultOr<FlatPtr> Process::sys$sched_setparam(int pid, Userspace<const struct s
         return EINVAL;
 
     auto* peer = Thread::current();
-    ScopedSpinLock lock(g_scheduler_lock);
+    ScopedSpinlock lock(g_scheduler_lock);
     if (pid != 0)
         peer = Thread::from_tid(pid);
 
@@ -49,7 +49,7 @@ KResultOr<FlatPtr> Process::sys$sched_getparam(pid_t pid, Userspace<struct sched
     int priority;
     {
         auto* peer = Thread::current();
-        ScopedSpinLock lock(g_scheduler_lock);
+        ScopedSpinlock lock(g_scheduler_lock);
         if (pid != 0) {
             // FIXME: PID/TID BUG
             // The entire process is supposed to be affected.

+ 2 - 2
Kernel/Syscalls/thread.cpp

@@ -77,7 +77,7 @@ KResultOr<FlatPtr> Process::sys$create_thread(void* (*entry)(void*), Userspace<c
 
     PerformanceManager::add_thread_created_event(*thread);
 
-    ScopedSpinLock lock(g_scheduler_lock);
+    ScopedSpinlock lock(g_scheduler_lock);
     thread->set_priority(requested_thread_priority);
     thread->set_state(Thread::State::Runnable);
     return thread->tid().value();
@@ -207,7 +207,7 @@ KResultOr<FlatPtr> Process::sys$get_thread_name(pid_t tid, Userspace<char*> buff
     if (!thread || thread->pid() != pid())
         return ESRCH;
 
-    ScopedSpinLock locker(thread->get_lock());
+    ScopedSpinlock locker(thread->get_lock());
     auto thread_name = thread->name();
 
     if (thread_name.is_null()) {

+ 2 - 2
Kernel/TTY/ConsoleManagement.cpp

@@ -59,7 +59,7 @@ UNMAP_AFTER_INIT void ConsoleManagement::initialize()
         PANIC("Switch to tty value is invalid: {} ", tty_number);
     }
     m_active_console = &m_consoles[tty_number];
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     m_active_console->set_active(true);
     if (!m_active_console->is_graphical())
         m_active_console->clear();
@@ -67,7 +67,7 @@ UNMAP_AFTER_INIT void ConsoleManagement::initialize()
 
 void ConsoleManagement::switch_to(unsigned index)
 {
-    ScopedSpinLock lock(m_lock);
+    ScopedSpinlock lock(m_lock);
     VERIFY(m_active_console);
     VERIFY(index < m_consoles.size());
     if (m_active_console->index() == index)

+ 3 - 3
Kernel/TTY/ConsoleManagement.h

@@ -35,13 +35,13 @@ public:
     NonnullRefPtr<VirtualConsole> first_tty() const { return m_consoles[0]; }
     NonnullRefPtr<VirtualConsole> debug_tty() const { return m_consoles[1]; }
 
-    RecursiveSpinLock& tty_write_lock() { return m_tty_write_lock; }
+    RecursiveSpinlock& tty_write_lock() { return m_tty_write_lock; }
 
 private:
     NonnullRefPtrVector<VirtualConsole, s_max_virtual_consoles> m_consoles;
     VirtualConsole* m_active_console { nullptr };
-    SpinLock<u8> m_lock;
-    RecursiveSpinLock m_tty_write_lock;
+    Spinlock<u8> m_lock;
+    RecursiveSpinlock m_tty_write_lock;
 };
 
 };

+ 2 - 2
Kernel/TTY/SlavePTY.cpp

@@ -13,9 +13,9 @@
 
 namespace Kernel {
 
-static Singleton<SpinLockProtected<SlavePTY::List>> s_all_instances;
+static Singleton<SpinlockProtected<SlavePTY::List>> s_all_instances;
 
-SpinLockProtected<SlavePTY::List>& SlavePTY::all_instances()
+SpinlockProtected<SlavePTY::List>& SlavePTY::all_instances()
 {
     return s_all_instances;
 }

+ 1 - 1
Kernel/TTY/SlavePTY.h

@@ -53,7 +53,7 @@ private:
 
 public:
     using List = IntrusiveList<SlavePTY, RawPtr<SlavePTY>, &SlavePTY::m_list_node>;
-    static SpinLockProtected<SlavePTY::List>& all_instances();
+    static SpinlockProtected<SlavePTY::List>& all_instances();
 };
 
 }

Някои файлове не бяха показани, защото твърде много файлове са промени