AsyncDeviceRequest.cpp 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. /*
  2. * Copyright (c) 2020, The SerenityOS developers.
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions are met:
  7. *
  8. * 1. Redistributions of source code must retain the above copyright notice, this
  9. * list of conditions and the following disclaimer.
  10. *
  11. * 2. Redistributions in binary form must reproduce the above copyright notice,
  12. * this list of conditions and the following disclaimer in the documentation
  13. * and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  16. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  18. * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  21. * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
  22. * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
  23. * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  24. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. #include <Kernel/Devices/AsyncDeviceRequest.h>
  27. #include <Kernel/Devices/Device.h>
  28. namespace Kernel {
  29. AsyncDeviceRequest::AsyncDeviceRequest(Device& device)
  30. : m_device(device)
  31. , m_process(*Process::current())
  32. {
  33. }
  34. AsyncDeviceRequest::~AsyncDeviceRequest()
  35. {
  36. {
  37. ScopedSpinLock lock(m_lock);
  38. ASSERT(is_completed_result(m_result));
  39. ASSERT(m_sub_requests_pending.is_empty());
  40. }
  41. // We should not need any locking here anymore. The destructor should
  42. // only be called until either wait() or cancel() (once implemented) returned.
  43. // At that point no sub-request should be adding more requests and all
  44. // sub-requests should be completed (either succeeded, failed, or cancelled).
  45. // Which means there should be no more pending sub-requests and the
  46. // entire AsyncDeviceRequest hirarchy should be immutable.
  47. for (auto& sub_request : m_sub_requests_complete) {
  48. ASSERT(is_completed_result(sub_request.m_result)); // Shouldn't need any locking anymore
  49. ASSERT(sub_request.m_parent_request == this);
  50. sub_request.m_parent_request = nullptr;
  51. }
  52. }
  53. void AsyncDeviceRequest::request_finished()
  54. {
  55. if (m_parent_request)
  56. m_parent_request->sub_request_finished(*this);
  57. // Trigger processing the next request
  58. m_device.process_next_queued_request({}, *this);
  59. // Wake anyone who may be waiting
  60. m_queue.wake_all();
  61. }
  62. auto AsyncDeviceRequest::wait(timeval* timeout) -> RequestWaitResult
  63. {
  64. ASSERT(!m_parent_request);
  65. auto request_result = get_request_result();
  66. if (is_completed_result(request_result))
  67. return { request_result, Thread::BlockResult::NotBlocked };
  68. auto wait_result = Thread::current()->wait_on(m_queue, name(), timeout);
  69. return { get_request_result(), wait_result };
  70. }
  71. auto AsyncDeviceRequest::get_request_result() const -> RequestResult
  72. {
  73. ScopedSpinLock lock(m_lock);
  74. return m_result;
  75. }
  76. void AsyncDeviceRequest::add_sub_request(NonnullRefPtr<AsyncDeviceRequest> sub_request)
  77. {
  78. // Sub-requests cannot be for the same device
  79. ASSERT(&m_device != &sub_request->m_device);
  80. ASSERT(sub_request->m_parent_request == nullptr);
  81. sub_request->m_parent_request = this;
  82. bool should_start;
  83. {
  84. ScopedSpinLock lock(m_lock);
  85. ASSERT(!is_completed_result(m_result));
  86. m_sub_requests_pending.append(sub_request);
  87. should_start = (m_result == Started);
  88. }
  89. if (should_start)
  90. sub_request->do_start();
  91. }
  92. void AsyncDeviceRequest::sub_request_finished(AsyncDeviceRequest& sub_request)
  93. {
  94. bool all_completed;
  95. {
  96. ScopedSpinLock lock(m_lock);
  97. ASSERT(m_result == Started);
  98. size_t index;
  99. for (index = 0; index < m_sub_requests_pending.size(); index++) {
  100. if (&m_sub_requests_pending[index] == &sub_request) {
  101. NonnullRefPtr<AsyncDeviceRequest> request(m_sub_requests_pending[index]);
  102. m_sub_requests_pending.remove(index);
  103. m_sub_requests_complete.append(move(request));
  104. break;
  105. }
  106. }
  107. ASSERT(index < m_sub_requests_pending.size());
  108. all_completed = m_sub_requests_pending.is_empty();
  109. if (all_completed) {
  110. // Aggregate any errors
  111. bool any_failures = false;
  112. bool any_memory_faults = false;
  113. for (index = 0; index < m_sub_requests_complete.size(); index++) {
  114. auto& sub_request = m_sub_requests_complete[index];
  115. auto sub_result = sub_request.get_request_result();
  116. ASSERT(is_completed_result(sub_result));
  117. switch (sub_result) {
  118. case Failure:
  119. any_failures = true;
  120. break;
  121. case MemoryFault:
  122. any_memory_faults = true;
  123. break;
  124. default:
  125. break;
  126. }
  127. if (any_failures && any_memory_faults)
  128. break; // Stop checking if all error conditions were found
  129. }
  130. if (any_failures)
  131. m_result = Failure;
  132. else if (any_memory_faults)
  133. m_result = MemoryFault;
  134. else
  135. m_result = Success;
  136. }
  137. }
  138. if (all_completed)
  139. request_finished();
  140. }
  141. void AsyncDeviceRequest::complete(RequestResult result)
  142. {
  143. ASSERT(result == Success || result == Failure || result == MemoryFault);
  144. ScopedCritical critical;
  145. {
  146. ScopedSpinLock lock(m_lock);
  147. ASSERT(m_result == Started);
  148. m_result = result;
  149. }
  150. if (Processor::current().in_irq()) {
  151. ref(); // Make sure we don't get freed
  152. Processor::deferred_call_queue([this]() {
  153. request_finished();
  154. unref();
  155. });
  156. } else {
  157. request_finished();
  158. }
  159. }
  160. }