Browse Source

Kernel: Rename some BlockerSets to foo_blocker_set

Cleanup after renaming BlockCondition to BlockerSet.
Andreas Kling 3 years ago
parent
commit
e51a5e2d5d
6 changed files with 14 additions and 14 deletions
  1. 3 3
      Kernel/Net/Routing.cpp
  2. 4 4
      Kernel/Process.cpp
  3. 2 2
      Kernel/Process.h
  4. 2 2
      Kernel/Thread.cpp
  5. 1 1
      Kernel/Thread.h
  6. 2 2
      Kernel/ThreadBlockers.cpp

+ 3 - 3
Kernel/Net/Routing.cpp

@@ -80,13 +80,13 @@ protected:
     }
     }
 };
 };
 
 
-static Singleton<ARPTableBlockerSet> s_arp_table_block_condition;
+static Singleton<ARPTableBlockerSet> s_arp_table_blocker_set;
 
 
 ARPTableBlocker::ARPTableBlocker(IPv4Address ip_addr, Optional<MACAddress>& addr)
 ARPTableBlocker::ARPTableBlocker(IPv4Address ip_addr, Optional<MACAddress>& addr)
     : m_ip_addr(ip_addr)
     : m_ip_addr(ip_addr)
     , m_addr(addr)
     , m_addr(addr)
 {
 {
-    if (!add_to_blocker_set(*s_arp_table_block_condition))
+    if (!add_to_blocker_set(*s_arp_table_blocker_set))
         m_should_block = false;
         m_should_block = false;
 }
 }
 
 
@@ -117,7 +117,7 @@ void update_arp_table(const IPv4Address& ip_addr, const MACAddress& addr, Update
         if (update == UpdateArp::Delete)
         if (update == UpdateArp::Delete)
             table.remove(ip_addr);
             table.remove(ip_addr);
     });
     });
-    s_arp_table_block_condition->unblock(ip_addr, addr);
+    s_arp_table_blocker_set->unblock(ip_addr, addr);
 
 
     if constexpr (ARP_DEBUG) {
     if constexpr (ARP_DEBUG) {
         arp_table().with_shared([&](const auto& table) {
         arp_table().with_shared([&](const auto& table) {

+ 4 - 4
Kernel/Process.cpp

@@ -245,7 +245,7 @@ Process::Process(const String& name, uid_t uid, gid_t gid, ProcessID ppid, bool
     , m_executable(move(executable))
     , m_executable(move(executable))
     , m_cwd(move(cwd))
     , m_cwd(move(cwd))
     , m_tty(tty)
     , m_tty(tty)
-    , m_wait_block_condition(*this)
+    , m_wait_blocker_set(*this)
 {
 {
     // Ensure that we protect the process data when exiting the constructor.
     // Ensure that we protect the process data when exiting the constructor.
     ProtectedDataMutationScope scope { *this };
     ProtectedDataMutationScope scope { *this };
@@ -660,18 +660,18 @@ void Process::finalize()
     // reference if there are still waiters around, or whenever the last
     // reference if there are still waiters around, or whenever the last
     // waitable states are consumed. Unless there is no parent around
     // waitable states are consumed. Unless there is no parent around
     // anymore, in which case we'll just drop it right away.
     // anymore, in which case we'll just drop it right away.
-    m_wait_block_condition.finalize();
+    m_wait_blocker_set.finalize();
 }
 }
 
 
 void Process::disowned_by_waiter(Process& process)
 void Process::disowned_by_waiter(Process& process)
 {
 {
-    m_wait_block_condition.disowned_by_waiter(process);
+    m_wait_blocker_set.disowned_by_waiter(process);
 }
 }
 
 
 void Process::unblock_waiters(Thread::WaitBlocker::UnblockFlags flags, u8 signal)
 void Process::unblock_waiters(Thread::WaitBlocker::UnblockFlags flags, u8 signal)
 {
 {
     if (auto parent = Process::from_pid(ppid()))
     if (auto parent = Process::from_pid(ppid()))
-        parent->m_wait_block_condition.unblock(*this, flags, signal);
+        parent->m_wait_blocker_set.unblock(*this, flags, signal);
 }
 }
 
 
 void Process::die()
 void Process::die()

+ 2 - 2
Kernel/Process.h

@@ -485,7 +485,7 @@ public:
 
 
     void disowned_by_waiter(Process& process);
     void disowned_by_waiter(Process& process);
     void unblock_waiters(Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
     void unblock_waiters(Thread::WaitBlocker::UnblockFlags, u8 signal = 0);
-    Thread::WaitBlockerSet& wait_blocker_set() { return m_wait_block_condition; }
+    Thread::WaitBlockerSet& wait_blocker_set() { return m_wait_blocker_set; }
 
 
     template<typename Callback>
     template<typename Callback>
     void for_each_coredump_property(Callback callback) const
     void for_each_coredump_property(Callback callback) const
@@ -787,7 +787,7 @@ private:
     // and wait for a tracer to attach.
     // and wait for a tracer to attach.
     bool m_wait_for_tracer_at_next_execve { false };
     bool m_wait_for_tracer_at_next_execve { false };
 
 
-    Thread::WaitBlockerSet m_wait_block_condition;
+    Thread::WaitBlockerSet m_wait_blocker_set;
 
 
     struct CoredumpProperty {
     struct CoredumpProperty {
         OwnPtr<KString> key;
         OwnPtr<KString> key;

+ 2 - 2
Kernel/Thread.cpp

@@ -386,7 +386,7 @@ void Thread::die_if_needed()
 void Thread::exit(void* exit_value)
 void Thread::exit(void* exit_value)
 {
 {
     VERIFY(Thread::current() == this);
     VERIFY(Thread::current() == this);
-    m_join_condition.thread_did_exit(exit_value);
+    m_join_blocker_set.thread_did_exit(exit_value);
     set_should_die();
     set_should_die();
     u32 unlock_count;
     u32 unlock_count;
     [[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count);
     [[maybe_unused]] auto rc = unlock_process_if_locked(unlock_count);
@@ -511,7 +511,7 @@ void Thread::finalize()
         SpinlockLocker lock(g_scheduler_lock);
         SpinlockLocker lock(g_scheduler_lock);
         dbgln_if(THREAD_DEBUG, "Finalizing thread {}", *this);
         dbgln_if(THREAD_DEBUG, "Finalizing thread {}", *this);
         set_state(Thread::State::Dead);
         set_state(Thread::State::Dead);
-        m_join_condition.thread_finalizing();
+        m_join_blocker_set.thread_finalizing();
     }
     }
 
 
     if (m_dump_backtrace_on_finalization)
     if (m_dump_backtrace_on_finalization)

+ 1 - 1
Kernel/Thread.h

@@ -1329,7 +1329,7 @@ private:
     Vector<HoldingLockInfo> m_holding_locks_list;
     Vector<HoldingLockInfo> m_holding_locks_list;
 #endif
 #endif
 
 
-    JoinBlockerSet m_join_condition;
+    JoinBlockerSet m_join_blocker_set;
     Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
     Atomic<bool, AK::MemoryOrder::memory_order_relaxed> m_is_active { false };
     bool m_is_joinable { true };
     bool m_is_joinable { true };
     bool m_handling_page_fault { false };
     bool m_handling_page_fault { false };

+ 2 - 2
Kernel/ThreadBlockers.cpp

@@ -78,7 +78,7 @@ Thread::JoinBlocker::JoinBlocker(Thread& joinee, KResult& try_join_result, void*
         // but the joinee is joining immediately
         // but the joinee is joining immediately
         SpinlockLocker lock(m_lock);
         SpinlockLocker lock(m_lock);
         try_join_result = joinee.try_join([&]() {
         try_join_result = joinee.try_join([&]() {
-            if (!add_to_blocker_set(joinee.m_join_condition))
+            if (!add_to_blocker_set(joinee.m_join_blocker_set))
                 m_should_block = false;
                 m_should_block = false;
         });
         });
         m_join_error = try_join_result.is_error();
         m_join_error = try_join_result.is_error();
@@ -99,7 +99,7 @@ void Thread::JoinBlocker::not_blocking(bool timeout_in_past)
     // to supply us the information. We cannot hold the lock as unblock
     // to supply us the information. We cannot hold the lock as unblock
     // could be called by the BlockerSet at any time!
     // could be called by the BlockerSet at any time!
     VERIFY(timeout_in_past);
     VERIFY(timeout_in_past);
-    m_joinee->m_join_condition.try_unblock(*this);
+    m_joinee->m_join_blocker_set.try_unblock(*this);
 }
 }
 
 
 bool Thread::JoinBlocker::unblock(void* value, bool from_add_blocker)
 bool Thread::JoinBlocker::unblock(void* value, bool from_add_blocker)