VMObject.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #pragma once
  7. #include <AK/FixedArray.h>
  8. #include <AK/HashTable.h>
  9. #include <AK/IntrusiveList.h>
  10. #include <AK/RefCounted.h>
  11. #include <AK/RefPtr.h>
  12. #include <AK/Vector.h>
  13. #include <AK/Weakable.h>
  14. #include <Kernel/Forward.h>
  15. #include <Kernel/Mutex.h>
  16. #include <Kernel/VM/Region.h>
  17. namespace Kernel {
  18. class VMObjectDeletedHandler {
  19. public:
  20. virtual ~VMObjectDeletedHandler() = default;
  21. virtual void vmobject_deleted(VMObject&) = 0;
  22. };
  23. class VMObject : public RefCounted<VMObject>
  24. , public Weakable<VMObject> {
  25. friend class MemoryManager;
  26. friend class Region;
  27. public:
  28. virtual ~VMObject();
  29. virtual RefPtr<VMObject> try_clone() = 0;
  30. virtual bool is_anonymous() const { return false; }
  31. virtual bool is_inode() const { return false; }
  32. virtual bool is_shared_inode() const { return false; }
  33. virtual bool is_private_inode() const { return false; }
  34. virtual bool is_contiguous() const { return false; }
  35. size_t page_count() const { return m_physical_pages.size(); }
  36. Span<RefPtr<PhysicalPage> const> physical_pages() const { return m_physical_pages.span(); }
  37. Span<RefPtr<PhysicalPage>> physical_pages() { return m_physical_pages.span(); }
  38. size_t size() const { return m_physical_pages.size() * PAGE_SIZE; }
  39. virtual StringView class_name() const = 0;
  40. ALWAYS_INLINE void add_region(Region& region)
  41. {
  42. ScopedSpinLock locker(m_lock);
  43. m_regions.append(region);
  44. }
  45. ALWAYS_INLINE void remove_region(Region& region)
  46. {
  47. ScopedSpinLock locker(m_lock);
  48. m_regions.remove(region);
  49. }
  50. void register_on_deleted_handler(VMObjectDeletedHandler& handler)
  51. {
  52. ScopedSpinLock locker(m_on_deleted_lock);
  53. m_on_deleted.set(&handler);
  54. }
  55. void unregister_on_deleted_handler(VMObjectDeletedHandler& handler)
  56. {
  57. ScopedSpinLock locker(m_on_deleted_lock);
  58. m_on_deleted.remove(&handler);
  59. }
  60. protected:
  61. explicit VMObject(size_t);
  62. explicit VMObject(VMObject const&);
  63. template<typename Callback>
  64. void for_each_region(Callback);
  65. IntrusiveListNode<VMObject> m_list_node;
  66. FixedArray<RefPtr<PhysicalPage>> m_physical_pages;
  67. mutable RecursiveSpinLock m_lock;
  68. private:
  69. VMObject& operator=(VMObject const&) = delete;
  70. VMObject& operator=(VMObject&&) = delete;
  71. VMObject(VMObject&&) = delete;
  72. HashTable<VMObjectDeletedHandler*> m_on_deleted;
  73. SpinLock<u8> m_on_deleted_lock;
  74. Region::ListInVMObject m_regions;
  75. public:
  76. using List = IntrusiveList<VMObject, RawPtr<VMObject>, &VMObject::m_list_node>;
  77. };
  78. template<typename Callback>
  79. inline void VMObject::for_each_region(Callback callback)
  80. {
  81. ScopedSpinLock lock(m_lock);
  82. for (auto& region : m_regions) {
  83. callback(region);
  84. }
  85. }
  86. inline PhysicalPage const* Region::physical_page(size_t index) const
  87. {
  88. VERIFY(index < page_count());
  89. return vmobject().physical_pages()[first_page_index() + index];
  90. }
  91. inline RefPtr<PhysicalPage>& Region::physical_page_slot(size_t index)
  92. {
  93. VERIFY(index < page_count());
  94. return vmobject().physical_pages()[first_page_index() + index];
  95. }
  96. }