AsyncDeviceRequest.cpp 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149
  1. /*
  2. * Copyright (c) 2020, the SerenityOS developers.
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <Kernel/Devices/AsyncDeviceRequest.h>
  7. #include <Kernel/Devices/Device.h>
  8. namespace Kernel {
  9. AsyncDeviceRequest::AsyncDeviceRequest(Device& device)
  10. : m_device(device)
  11. , m_process(Process::current())
  12. {
  13. }
  14. AsyncDeviceRequest::~AsyncDeviceRequest()
  15. {
  16. {
  17. SpinlockLocker lock(m_lock);
  18. VERIFY(is_completed_result(m_result));
  19. VERIFY(m_sub_requests_pending.is_empty());
  20. }
  21. // We should not need any locking here anymore. The destructor should
  22. // only be called until either wait() or cancel() (once implemented) returned.
  23. // At that point no sub-request should be adding more requests and all
  24. // sub-requests should be completed (either succeeded, failed, or cancelled).
  25. // Which means there should be no more pending sub-requests and the
  26. // entire AsyncDeviceRequest hierarchy should be immutable.
  27. while (!m_sub_requests_complete.is_empty()) {
  28. // Note: sub_request is ref-counted, and we use this specific pattern
  29. // to allow make sure the refcount is dropped properly.
  30. auto sub_request = m_sub_requests_complete.take_first();
  31. VERIFY(is_completed_result(sub_request->m_result)); // Shouldn't need any locking anymore
  32. VERIFY(sub_request->m_parent_request == this);
  33. sub_request->m_parent_request = nullptr;
  34. }
  35. }
  36. void AsyncDeviceRequest::request_finished()
  37. {
  38. if (m_parent_request)
  39. m_parent_request->sub_request_finished(*this);
  40. // Trigger processing the next request
  41. m_device.process_next_queued_request({}, *this);
  42. // Wake anyone who may be waiting
  43. m_queue.wake_all();
  44. }
  45. auto AsyncDeviceRequest::wait(Time* timeout) -> RequestWaitResult
  46. {
  47. VERIFY(!m_parent_request);
  48. auto request_result = get_request_result();
  49. if (is_completed_result(request_result))
  50. return { request_result, Thread::BlockResult::NotBlocked };
  51. auto wait_result = m_queue.wait_on(Thread::BlockTimeout(false, timeout), name());
  52. return { get_request_result(), wait_result };
  53. }
  54. auto AsyncDeviceRequest::get_request_result() const -> RequestResult
  55. {
  56. SpinlockLocker lock(m_lock);
  57. return m_result;
  58. }
  59. void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request)
  60. {
  61. // Sub-requests cannot be for the same device
  62. VERIFY(&m_device != &sub_request->m_device);
  63. VERIFY(sub_request->m_parent_request == nullptr);
  64. sub_request->m_parent_request = this;
  65. SpinlockLocker lock(m_lock);
  66. VERIFY(!is_completed_result(m_result));
  67. m_sub_requests_pending.append(sub_request);
  68. if (m_result == Started)
  69. sub_request->do_start(move(lock));
  70. }
  71. void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
  72. {
  73. bool all_completed;
  74. {
  75. SpinlockLocker lock(m_lock);
  76. VERIFY(m_result == Started);
  77. if (m_sub_requests_pending.contains(sub_request)) {
  78. // Note: append handles removing from any previous intrusive list internally.
  79. m_sub_requests_complete.append(sub_request);
  80. }
  81. all_completed = m_sub_requests_pending.is_empty();
  82. if (all_completed) {
  83. // Aggregate any errors
  84. bool any_failures = false;
  85. bool any_memory_faults = false;
  86. for (auto& com_sub_request : m_sub_requests_complete) {
  87. auto sub_result = com_sub_request.get_request_result();
  88. VERIFY(is_completed_result(sub_result));
  89. switch (sub_result) {
  90. case Failure:
  91. any_failures = true;
  92. break;
  93. case MemoryFault:
  94. any_memory_faults = true;
  95. break;
  96. default:
  97. break;
  98. }
  99. if (any_failures && any_memory_faults)
  100. break; // Stop checking if all error conditions were found
  101. }
  102. if (any_failures)
  103. m_result = Failure;
  104. else if (any_memory_faults)
  105. m_result = MemoryFault;
  106. else
  107. m_result = Success;
  108. }
  109. }
  110. if (all_completed)
  111. request_finished();
  112. }
  113. void AsyncDeviceRequest::complete(RequestResult result)
  114. {
  115. VERIFY(result == Success || result == Failure || result == MemoryFault);
  116. ScopedCritical critical;
  117. {
  118. SpinlockLocker lock(m_lock);
  119. VERIFY(m_result == Started);
  120. m_result = result;
  121. }
  122. if (Processor::current().in_irq()) {
  123. ref(); // Make sure we don't get freed
  124. Processor::deferred_call_queue([this]() {
  125. request_finished();
  126. unref();
  127. });
  128. } else {
  129. request_finished();
  130. }
  131. }
  132. }