AsyncDeviceRequest.cpp 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. /*
  2. * Copyright (c) 2020, The SerenityOS developers.
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <Kernel/Devices/AsyncDeviceRequest.h>
  27. #include <Kernel/Devices/Device.h>
  28. namespace Kernel {
  29. AsyncDeviceRequest::AsyncDeviceRequest(Device& device)
  30. : m_device(device)
  31. , m_process(*Process::current())
  32. {
  33. }
  34. AsyncDeviceRequest::~AsyncDeviceRequest()
  35. {
  36. {
  37. ScopedSpinLock lock(m_lock);
  38. VERIFY(is_completed_result(m_result));
  39. VERIFY(m_sub_requests_pending.is_empty());
  40. }
  41. // We should not need any locking here anymore. The destructor should
  42. // only be called until either wait() or cancel() (once implemented) returned.
  43. // At that point no sub-request should be adding more requests and all
  44. // sub-requests should be completed (either succeeded, failed, or cancelled).
  45. // Which means there should be no more pending sub-requests and the
  46. // entire AsyncDeviceRequest hierarchy should be immutable.
  47. while (!m_sub_requests_complete.is_empty()) {
  48. // Note: sub_request is ref-counted, and we use this specific pattern
  49. // to allow make sure the refcount is dropped properly.
  50. auto sub_request = m_sub_requests_complete.take_first();
  51. VERIFY(is_completed_result(sub_request->m_result)); // Shouldn't need any locking anymore
  52. VERIFY(sub_request->m_parent_request == this);
  53. sub_request->m_parent_request = nullptr;
  54. }
  55. }
  56. void AsyncDeviceRequest::request_finished()
  57. {
  58. if (m_parent_request)
  59. m_parent_request->sub_request_finished(*this);
  60. // Trigger processing the next request
  61. m_device.process_next_queued_request({}, *this);
  62. // Wake anyone who may be waiting
  63. m_queue.wake_all();
  64. }
  65. auto AsyncDeviceRequest::wait(Time* timeout) -> RequestWaitResult
  66. {
  67. VERIFY(!m_parent_request);
  68. auto request_result = get_request_result();
  69. if (is_completed_result(request_result))
  70. return { request_result, Thread::BlockResult::NotBlocked };
  71. auto wait_result = m_queue.wait_on(Thread::BlockTimeout(false, timeout), name());
  72. return { get_request_result(), wait_result };
  73. }
  74. auto AsyncDeviceRequest::get_request_result() const -> RequestResult
  75. {
  76. ScopedSpinLock lock(m_lock);
  77. return m_result;
  78. }
  79. void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request)
  80. {
  81. // Sub-requests cannot be for the same device
  82. VERIFY(&m_device != &sub_request->m_device);
  83. VERIFY(sub_request->m_parent_request == nullptr);
  84. sub_request->m_parent_request = this;
  85. ScopedSpinLock lock(m_lock);
  86. VERIFY(!is_completed_result(m_result));
  87. m_sub_requests_pending.append(sub_request);
  88. if (m_result == Started)
  89. sub_request->do_start(move(lock));
  90. }
  91. void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
  92. {
  93. bool all_completed;
  94. {
  95. ScopedSpinLock lock(m_lock);
  96. VERIFY(m_result == Started);
  97. if (m_sub_requests_pending.contains(sub_request)) {
  98. // Note: append handles removing from any previous intrusive list internally.
  99. m_sub_requests_complete.append(sub_request);
  100. }
  101. all_completed = m_sub_requests_pending.is_empty();
  102. if (all_completed) {
  103. // Aggregate any errors
  104. bool any_failures = false;
  105. bool any_memory_faults = false;
  106. for (auto& com_sub_request : m_sub_requests_complete) {
  107. auto sub_result = com_sub_request.get_request_result();
  108. VERIFY(is_completed_result(sub_result));
  109. switch (sub_result) {
  110. case Failure:
  111. any_failures = true;
  112. break;
  113. case MemoryFault:
  114. any_memory_faults = true;
  115. break;
  116. default:
  117. break;
  118. }
  119. if (any_failures && any_memory_faults)
  120. break; // Stop checking if all error conditions were found
  121. }
  122. if (any_failures)
  123. m_result = Failure;
  124. else if (any_memory_faults)
  125. m_result = MemoryFault;
  126. else
  127. m_result = Success;
  128. }
  129. }
  130. if (all_completed)
  131. request_finished();
  132. }
  133. void AsyncDeviceRequest::complete(RequestResult result)
  134. {
  135. VERIFY(result == Success || result == Failure || result == MemoryFault);
  136. ScopedCritical critical;
  137. {
  138. ScopedSpinLock lock(m_lock);
  139. VERIFY(m_result == Started);
  140. m_result = result;
  141. }
  142. if (Processor::current().in_irq()) {
  143. ref(); // Make sure we don't get freed
  144. Processor::deferred_call_queue([this]() {
  145. request_finished();
  146. unref();
  147. });
  148. } else {
  149. request_finished();
  150. }
  151. }
  152. }