SharedBuffer.cpp 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. #include <Kernel/Process.h>
  2. #include <Kernel/SharedBuffer.h>
  3. Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>& shared_buffers()
  4. {
  5. static Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>* map;
  6. if (!map)
  7. map = new Lockable<HashMap<int, NonnullOwnPtr<SharedBuffer>>>;
  8. return *map;
  9. }
  10. void SharedBuffer::sanity_check(const char* what)
  11. {
  12. LOCKER(shared_buffers().lock());
  13. unsigned found_refs = 0;
  14. for (const auto& ref : m_refs)
  15. found_refs += ref.count;
  16. if (found_refs != m_total_refs) {
  17. dbgprintf("%s sanity -- SharedBuffer{%p} id: %d has total refs %d but we found %d\n", what, this, m_shared_buffer_id, m_total_refs, found_refs);
  18. for (const auto& ref : m_refs) {
  19. dbgprintf(" ref from pid %d: refcnt %d\n", ref.pid, ref.count);
  20. }
  21. ASSERT_NOT_REACHED();
  22. }
  23. }
  24. bool SharedBuffer::is_shared_with(pid_t peer_pid)
  25. {
  26. LOCKER(shared_buffers().lock());
  27. if (m_global)
  28. return true;
  29. for (auto& ref : m_refs) {
  30. if (ref.pid == peer_pid) {
  31. return true;
  32. }
  33. }
  34. return false;
  35. }
  36. void* SharedBuffer::ref_for_process_and_get_address(Process& process)
  37. {
  38. LOCKER(shared_buffers().lock());
  39. ASSERT(is_shared_with(process.pid()));
  40. if (m_global) {
  41. bool found = false;
  42. for (auto& ref : m_refs) {
  43. if (ref.pid == process.pid()) {
  44. found = true;
  45. break;
  46. }
  47. }
  48. if (!found)
  49. m_refs.append(Reference(process.pid()));
  50. }
  51. for (auto& ref : m_refs) {
  52. if (ref.pid == process.pid()) {
  53. ref.count++;
  54. m_total_refs++;
  55. if (ref.region == nullptr) {
  56. ref.region = process.allocate_region_with_vmobject(VirtualAddress(), size(), m_vmobject, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0));
  57. ref.region->set_shared(true);
  58. }
  59. sanity_check("ref_for_process_and_get_address");
  60. return ref.region->vaddr().as_ptr();
  61. }
  62. }
  63. ASSERT_NOT_REACHED();
  64. }
  65. void SharedBuffer::share_with(pid_t peer_pid)
  66. {
  67. LOCKER(shared_buffers().lock());
  68. for (auto& ref : m_refs) {
  69. if (ref.pid == peer_pid) {
  70. // don't increment the reference count yet; let them get_shared_buffer it first.
  71. sanity_check("share_with (old ref)");
  72. return;
  73. }
  74. }
  75. m_refs.append(Reference(peer_pid));
  76. sanity_check("share_with (new ref)");
  77. }
  78. void SharedBuffer::deref_for_process(Process& process)
  79. {
  80. LOCKER(shared_buffers().lock());
  81. for (int i = 0; i < m_refs.size(); ++i) {
  82. auto& ref = m_refs[i];
  83. if (ref.pid == process.pid()) {
  84. ref.count--;
  85. m_total_refs--;
  86. if (ref.count == 0) {
  87. #ifdef SHARED_BUFFER_DEBUG
  88. dbgprintf("Releasing shared buffer reference on %d of size %d by PID %d\n", m_shared_buffer_id, size(), process.pid());
  89. #endif
  90. process.deallocate_region(*ref.region);
  91. m_refs.remove(i);
  92. #ifdef SHARED_BUFFER_DEBUG
  93. dbgprintf("Released shared buffer reference on %d of size %d by PID %d\n", m_shared_buffer_id, size(), process.pid());
  94. #endif
  95. sanity_check("deref_for_process");
  96. destroy_if_unused();
  97. return;
  98. }
  99. return;
  100. }
  101. }
  102. ASSERT_NOT_REACHED();
  103. }
  104. void SharedBuffer::disown(pid_t pid)
  105. {
  106. LOCKER(shared_buffers().lock());
  107. for (int i = 0; i < m_refs.size(); ++i) {
  108. auto& ref = m_refs[i];
  109. if (ref.pid == pid) {
  110. #ifdef SHARED_BUFFER_DEBUG
  111. dbgprintf("Disowning shared buffer %d of size %d by PID %d\n", m_shared_buffer_id, size(), pid);
  112. #endif
  113. m_total_refs -= ref.count;
  114. m_refs.remove(i);
  115. #ifdef SHARED_BUFFER_DEBUG
  116. dbgprintf("Disowned shared buffer %d of size %d by PID %d\n", m_shared_buffer_id, size(), pid);
  117. #endif
  118. destroy_if_unused();
  119. return;
  120. }
  121. }
  122. }
  123. void SharedBuffer::destroy_if_unused()
  124. {
  125. LOCKER(shared_buffers().lock());
  126. sanity_check("destroy_if_unused");
  127. if (m_total_refs == 0) {
  128. #ifdef SHARED_BUFFER_DEBUG
  129. kprintf("Destroying unused SharedBuffer{%p} id: %d\n", this, m_shared_buffer_id);
  130. #endif
  131. auto count_before = shared_buffers().resource().size();
  132. shared_buffers().resource().remove(m_shared_buffer_id);
  133. ASSERT(count_before != shared_buffers().resource().size());
  134. }
  135. }
  136. void SharedBuffer::seal()
  137. {
  138. LOCKER(shared_buffers().lock());
  139. m_writable = false;
  140. for (auto& ref : m_refs) {
  141. if (ref.region) {
  142. ref.region->set_writable(false);
  143. ref.region->remap();
  144. }
  145. }
  146. }