SharedBuffer.cpp 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. #include <Kernel/SharedBuffer.h>
  2. #include <Kernel/Process.h>
  3. Lockable<HashMap<int, OwnPtr<SharedBuffer>>>& shared_buffers()
  4. {
  5. static Lockable<HashMap<int, OwnPtr<SharedBuffer>>>* map;
  6. if (!map)
  7. map = new Lockable<HashMap<int, OwnPtr<SharedBuffer>>>;
  8. return *map;
  9. }
  10. bool SharedBuffer::is_shared_with(pid_t peer_pid)
  11. {
  12. LOCKER(shared_buffers().lock());
  13. for (auto& ref : m_refs) {
  14. if (ref.pid == peer_pid) {
  15. return true;
  16. }
  17. }
  18. return false;
  19. }
  20. void* SharedBuffer::ref_for_process_and_get_address(Process& process)
  21. {
  22. LOCKER(shared_buffers().lock());
  23. ASSERT(is_shared_with(process.pid()));
  24. for (auto& ref : m_refs) {
  25. if (ref.pid == process.pid()) {
  26. ref.count++;
  27. m_total_refs++;
  28. if (ref.region == nullptr) {
  29. ref.region = process.allocate_region_with_vmo(VirtualAddress(), size(), m_vmo, 0, "SharedBuffer", PROT_READ | (m_writable ? PROT_WRITE : 0));
  30. ref.region->set_shared(true);
  31. }
  32. return ref.region->vaddr().as_ptr();
  33. }
  34. }
  35. ASSERT_NOT_REACHED();
  36. }
  37. void SharedBuffer::share_with(pid_t peer_pid)
  38. {
  39. LOCKER(shared_buffers().lock());
  40. for (auto& ref : m_refs) {
  41. if (ref.pid == peer_pid) {
  42. // don't increment the reference count yet; let them get_shared_buffer it first.
  43. return;
  44. }
  45. }
  46. m_refs.append(Reference(peer_pid));
  47. }
  48. void SharedBuffer::deref_for_process(Process& process)
  49. {
  50. LOCKER(shared_buffers().lock());
  51. for (int i = 0; i < m_refs.size(); ++i) {
  52. auto& ref = m_refs[i];
  53. if (ref.pid == process.pid()) {
  54. if (--ref.count == 0) {
  55. #ifdef SHARED_BUFFER_DEBUG
  56. dbgprintf("Releasing shared buffer reference on %d of size %d by PID %d\n", m_shared_buffer_id, size(), process.pid());
  57. #endif
  58. process.deallocate_region(*ref.region);
  59. m_refs.remove(i);
  60. #ifdef SHARED_BUFFER_DEBUG
  61. dbgprintf("Released shared buffer reference on %d of size %d by PID %d\n", m_shared_buffer_id, size(), process.pid());
  62. #endif
  63. destroy_if_unused();
  64. return;
  65. }
  66. }
  67. }
  68. }
  69. void SharedBuffer::disown(pid_t pid)
  70. {
  71. LOCKER(shared_buffers().lock());
  72. for (int i = 0; i < m_refs.size(); ++i) {
  73. auto& ref = m_refs[i];
  74. if (ref.pid == pid) {
  75. #ifdef SHARED_BUFFER_DEBUG
  76. dbgprintf("Disowning shared buffer %d of size %d by PID %d\n", m_shared_buffer_id, size(), pid);
  77. #endif
  78. m_refs.remove(i);
  79. #ifdef SHARED_BUFFER_DEBUG
  80. dbgprintf("Disowned shared buffer %d of size %d by PID %d\n", m_shared_buffer_id, size(), pid);
  81. #endif
  82. destroy_if_unused();
  83. return;
  84. }
  85. }
  86. }
  87. void SharedBuffer::destroy_if_unused()
  88. {
  89. LOCKER(shared_buffers().lock());
  90. if (m_total_refs == 0) {
  91. #ifdef SHARED_BUFFER_DEBUG
  92. kprintf("Destroying unused SharedBuffer{%p} id: %d\n", this, m_shared_buffer_id);
  93. #endif
  94. auto count_before = shared_buffers().resource().size();
  95. shared_buffers().resource().remove(m_shared_buffer_id);
  96. ASSERT(count_before != shared_buffers().resource().size());
  97. }
  98. }
  99. void SharedBuffer::seal()
  100. {
  101. LOCKER(shared_buffers().lock());
  102. m_writable = false;
  103. for (auto& ref : m_refs) {
  104. if (ref.region) {
  105. ref.region->set_writable(false);
  106. MM.remap_region(*ref.region->page_directory(), *ref.region);
  107. }
  108. }
  109. }