Kernel: Move Arch/x86/Spinlock.h and add stubs for aarch64

The code in Spinlock.h has no architectural specific logic, thus can be
moved to the Arch directory. This contains no functional change.

Also add the Spinlock.cpp file for aarch64 which contains stubs for the
lock and unlock functions.
This commit is contained in:
Timon Kruiper 2022-05-02 23:56:30 +02:00 committed by Andreas Kling
parent 0d6d4508df
commit 9abcb6700c
Notes: sideshowbarker 2024-07-17 12:02:22 +09:00
6 changed files with 103 additions and 177 deletions

View file

@ -1,17 +1,76 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Platform.h>
#include <Kernel/Arch/Processor.h>
#include <Kernel/Locking/LockRank.h>
#if ARCH(X86_64) || ARCH(I386)
# include <Kernel/Arch/x86/Spinlock.h>
#elif ARCH(AARCH64)
# include <Kernel/Arch/aarch64/Spinlock.h>
#else
# error "Unknown architecture"
#endif
namespace Kernel {
class Spinlock {
AK_MAKE_NONCOPYABLE(Spinlock);
AK_MAKE_NONMOVABLE(Spinlock);
public:
Spinlock(LockRank rank = LockRank::None)
: m_rank(rank)
{
}
u32 lock();
void unlock(u32 prev_flags);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<u8> m_lock { 0 };
const LockRank m_rank;
};
class RecursiveSpinlock {
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
public:
RecursiveSpinlock(LockRank rank = LockRank::None)
: m_rank(rank)
{
}
u32 lock();
void unlock(u32 prev_flags);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
[[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
{
return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<FlatPtr> m_lock { 0 };
u32 m_recursions { 0 };
const LockRank m_rank;
};
}

View file

@ -0,0 +1,33 @@
/*
* Copyright (c) 2022, Timon Kruiper <timonkruiper@gmail.com>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/Spinlock.h>
namespace Kernel {
u32 Spinlock::lock()
{
VERIFY_NOT_REACHED();
return 0;
}
void Spinlock::unlock(u32)
{
VERIFY_NOT_REACHED();
}
u32 RecursiveSpinlock::lock()
{
VERIFY_NOT_REACHED();
return 0;
}
void RecursiveSpinlock::unlock(u32)
{
VERIFY_NOT_REACHED();
}
}

View file

@ -1,88 +0,0 @@
/*
* Copyright (c) 2020, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <AK/Noncopyable.h>
#include <AK/Types.h>
#include <Kernel/Locking/LockRank.h>
namespace Kernel {
class Spinlock {
AK_MAKE_NONCOPYABLE(Spinlock);
AK_MAKE_NONMOVABLE(Spinlock);
public:
Spinlock(LockRank rank = LockRank::None)
{
(void)rank;
}
ALWAYS_INLINE u32 lock()
{
VERIFY_NOT_REACHED();
return 0;
}
ALWAYS_INLINE void unlock(u32 /*prev_flags*/)
{
VERIFY_NOT_REACHED();
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
VERIFY_NOT_REACHED();
return false;
}
ALWAYS_INLINE void initialize()
{
VERIFY_NOT_REACHED();
}
};
class RecursiveSpinlock {
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
public:
RecursiveSpinlock(LockRank rank = LockRank::None)
{
(void)rank;
VERIFY_NOT_REACHED();
}
ALWAYS_INLINE u32 lock()
{
VERIFY_NOT_REACHED();
return 0;
}
ALWAYS_INLINE void unlock(u32 /*prev_flags*/)
{
VERIFY_NOT_REACHED();
}
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
VERIFY_NOT_REACHED();
return false;
}
[[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
{
VERIFY_NOT_REACHED();
return false;
}
ALWAYS_INLINE void initialize()
{
VERIFY_NOT_REACHED();
}
};
}

View file

@ -1,79 +0,0 @@
/*
* Copyright (c) 2020-2022, Andreas Kling <kling@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#pragma once
#include <Kernel/Arch/Processor.h>
#include <Kernel/Locking/LockRank.h>
#include <AK/Platform.h>
VALIDATE_IS_X86()
namespace Kernel {
class Spinlock {
AK_MAKE_NONCOPYABLE(Spinlock);
AK_MAKE_NONMOVABLE(Spinlock);
public:
Spinlock(LockRank rank = LockRank::None)
: m_rank(rank)
{
}
u32 lock();
void unlock(u32 prev_flags);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<u8> m_lock { 0 };
const LockRank m_rank;
};
class RecursiveSpinlock {
AK_MAKE_NONCOPYABLE(RecursiveSpinlock);
AK_MAKE_NONMOVABLE(RecursiveSpinlock);
public:
RecursiveSpinlock(LockRank rank = LockRank::None)
: m_rank(rank)
{
}
u32 lock();
void unlock(u32 prev_flags);
[[nodiscard]] ALWAYS_INLINE bool is_locked() const
{
return m_lock.load(AK::memory_order_relaxed) != 0;
}
[[nodiscard]] ALWAYS_INLINE bool is_locked_by_current_processor() const
{
return m_lock.load(AK::memory_order_relaxed) == FlatPtr(&Processor::current());
}
ALWAYS_INLINE void initialize()
{
m_lock.store(0, AK::memory_order_relaxed);
}
private:
Atomic<FlatPtr> m_lock { 0 };
u32 m_recursions { 0 };
const LockRank m_rank;
};
}

View file

@ -4,7 +4,7 @@
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <Kernel/Arch/x86/Spinlock.h>
#include <Kernel/Arch/Spinlock.h>
namespace Kernel {

View file

@ -421,6 +421,7 @@ else()
Arch/aarch64/SafeMem.cpp
Arch/aarch64/ScopedCritical.cpp
Arch/aarch64/SmapDisabler.cpp
Arch/aarch64/Spinlock.cpp
Arch/aarch64/init.cpp
Arch/aarch64/vector_table.S