StorageDevice.cpp 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199
  1. /*
  2. * Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Memory.h>
  7. #include <AK/StringView.h>
  8. #include <Kernel/Debug.h>
  9. #include <Kernel/FileSystem/FileDescription.h>
  10. #include <Kernel/Storage/StorageDevice.h>
  11. #include <Kernel/Storage/StorageManagement.h>
  12. namespace Kernel {
  13. StorageDevice::StorageDevice(const StorageController& controller, size_t sector_size, u64 max_addressable_block)
  14. : BlockDevice(StorageManagement::major_number(), StorageManagement::minor_number(), sector_size)
  15. , m_storage_controller(controller)
  16. , m_max_addressable_block(max_addressable_block)
  17. {
  18. }
  19. StorageDevice::StorageDevice(const StorageController& controller, int major, int minor, size_t sector_size, u64 max_addressable_block)
  20. : BlockDevice(major, minor, sector_size)
  21. , m_storage_controller(controller)
  22. , m_max_addressable_block(max_addressable_block)
  23. {
  24. }
  25. StringView StorageDevice::class_name() const
  26. {
  27. return "StorageDevice";
  28. }
  29. NonnullRefPtr<StorageController> StorageDevice::controller() const
  30. {
  31. return m_storage_controller;
  32. }
  33. KResultOr<size_t> StorageDevice::read(FileDescription&, u64 offset, UserOrKernelBuffer& outbuf, size_t len)
  34. {
  35. unsigned index = offset / block_size();
  36. u16 whole_blocks = len / block_size();
  37. size_t remaining = len % block_size();
  38. unsigned blocks_per_page = PAGE_SIZE / block_size();
  39. // PATAChannel will chuck a wobbly if we try to read more than PAGE_SIZE
  40. // at a time, because it uses a single page for its DMA buffer.
  41. if (whole_blocks >= blocks_per_page) {
  42. whole_blocks = blocks_per_page;
  43. remaining = 0;
  44. }
  45. dbgln_if(STORAGE_DEVICE_DEBUG, "StorageDevice::read() index={}, whole_blocks={}, remaining={}", index, whole_blocks, remaining);
  46. if (whole_blocks > 0) {
  47. auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index, whole_blocks, outbuf, whole_blocks * block_size());
  48. auto result = read_request->wait();
  49. if (result.wait_result().was_interrupted())
  50. return EINTR;
  51. switch (result.request_result()) {
  52. case AsyncDeviceRequest::Failure:
  53. case AsyncDeviceRequest::Cancelled:
  54. return EIO;
  55. case AsyncDeviceRequest::MemoryFault:
  56. return EFAULT;
  57. default:
  58. break;
  59. }
  60. }
  61. off_t pos = whole_blocks * block_size();
  62. if (remaining > 0) {
  63. auto data_result = ByteBuffer::create_uninitialized(block_size());
  64. if (!data_result.has_value())
  65. return ENOMEM;
  66. auto data = data_result.release_value();
  67. auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data());
  68. auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size());
  69. auto result = read_request->wait();
  70. if (result.wait_result().was_interrupted())
  71. return EINTR;
  72. switch (result.request_result()) {
  73. case AsyncDeviceRequest::Failure:
  74. return pos;
  75. case AsyncDeviceRequest::Cancelled:
  76. return EIO;
  77. case AsyncDeviceRequest::MemoryFault:
  78. // This should never happen, we're writing to a kernel buffer!
  79. VERIFY_NOT_REACHED();
  80. default:
  81. break;
  82. }
  83. if (!outbuf.write(data.data(), pos, remaining))
  84. return EFAULT;
  85. }
  86. return pos + remaining;
  87. }
  88. bool StorageDevice::can_read(const FileDescription&, size_t offset) const
  89. {
  90. return offset < (max_addressable_block() * block_size());
  91. }
  92. KResultOr<size_t> StorageDevice::write(FileDescription&, u64 offset, const UserOrKernelBuffer& inbuf, size_t len)
  93. {
  94. unsigned index = offset / block_size();
  95. u16 whole_blocks = len / block_size();
  96. size_t remaining = len % block_size();
  97. unsigned blocks_per_page = PAGE_SIZE / block_size();
  98. // PATAChannel will chuck a wobbly if we try to write more than PAGE_SIZE
  99. // at a time, because it uses a single page for its DMA buffer.
  100. if (whole_blocks >= blocks_per_page) {
  101. whole_blocks = blocks_per_page;
  102. remaining = 0;
  103. }
  104. dbgln_if(STORAGE_DEVICE_DEBUG, "StorageDevice::write() index={}, whole_blocks={}, remaining={}", index, whole_blocks, remaining);
  105. if (whole_blocks > 0) {
  106. auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index, whole_blocks, inbuf, whole_blocks * block_size());
  107. auto result = write_request->wait();
  108. if (result.wait_result().was_interrupted())
  109. return EINTR;
  110. switch (result.request_result()) {
  111. case AsyncDeviceRequest::Failure:
  112. case AsyncDeviceRequest::Cancelled:
  113. return EIO;
  114. case AsyncDeviceRequest::MemoryFault:
  115. return EFAULT;
  116. default:
  117. break;
  118. }
  119. }
  120. off_t pos = whole_blocks * block_size();
  121. // since we can only write in block_size() increments, if we want to do a
  122. // partial write, we have to read the block's content first, modify it,
  123. // then write the whole block back to the disk.
  124. if (remaining > 0) {
  125. // FIXME: Do something sensible with this OOM scenario.
  126. auto data = ByteBuffer::create_zeroed(block_size()).release_value();
  127. auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data());
  128. {
  129. auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size());
  130. auto result = read_request->wait();
  131. if (result.wait_result().was_interrupted())
  132. return EINTR;
  133. switch (result.request_result()) {
  134. case AsyncDeviceRequest::Failure:
  135. return pos;
  136. case AsyncDeviceRequest::Cancelled:
  137. return EIO;
  138. case AsyncDeviceRequest::MemoryFault:
  139. // This should never happen, we're writing to a kernel buffer!
  140. VERIFY_NOT_REACHED();
  141. default:
  142. break;
  143. }
  144. }
  145. if (!inbuf.read(data.data(), pos, remaining))
  146. return EFAULT;
  147. {
  148. auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index + whole_blocks, 1, data_buffer, block_size());
  149. auto result = write_request->wait();
  150. if (result.wait_result().was_interrupted())
  151. return EINTR;
  152. switch (result.request_result()) {
  153. case AsyncDeviceRequest::Failure:
  154. return pos;
  155. case AsyncDeviceRequest::Cancelled:
  156. return EIO;
  157. case AsyncDeviceRequest::MemoryFault:
  158. // This should never happen, we're writing to a kernel buffer!
  159. VERIFY_NOT_REACHED();
  160. default:
  161. break;
  162. }
  163. }
  164. }
  165. return pos + remaining;
  166. }
  167. bool StorageDevice::can_write(const FileDescription&, size_t offset) const
  168. {
  169. return offset < (max_addressable_block() * block_size());
  170. }
  171. }