StorageDevice.cpp 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195
  1. /*
  2. * Copyright (c) 2020, Liav A. <liavalb@hotmail.co.il>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Memory.h>
  7. #include <AK/StringView.h>
  8. #include <Kernel/Debug.h>
  9. #include <Kernel/FileSystem/FileDescription.h>
  10. #include <Kernel/Storage/StorageDevice.h>
  11. #include <Kernel/Storage/StorageManagement.h>
  12. namespace Kernel {
  13. StorageDevice::StorageDevice(const StorageController& controller, size_t sector_size, u64 max_addressable_block)
  14. : BlockDevice(StorageManagement::major_number(), StorageManagement::minor_number(), sector_size)
  15. , m_storage_controller(controller)
  16. , m_max_addressable_block(max_addressable_block)
  17. {
  18. }
  19. StorageDevice::StorageDevice(const StorageController& controller, int major, int minor, size_t sector_size, u64 max_addressable_block)
  20. : BlockDevice(major, minor, sector_size)
  21. , m_storage_controller(controller)
  22. , m_max_addressable_block(max_addressable_block)
  23. {
  24. }
  25. const char* StorageDevice::class_name() const
  26. {
  27. return "StorageDevice";
  28. }
  29. NonnullRefPtr<StorageController> StorageDevice::controller() const
  30. {
  31. return m_storage_controller;
  32. }
  33. KResultOr<size_t> StorageDevice::read(FileDescription&, u64 offset, UserOrKernelBuffer& outbuf, size_t len)
  34. {
  35. unsigned index = offset / block_size();
  36. u16 whole_blocks = len / block_size();
  37. size_t remaining = len % block_size();
  38. unsigned blocks_per_page = PAGE_SIZE / block_size();
  39. // PATAChannel will chuck a wobbly if we try to read more than PAGE_SIZE
  40. // at a time, because it uses a single page for its DMA buffer.
  41. if (whole_blocks >= blocks_per_page) {
  42. whole_blocks = blocks_per_page;
  43. remaining = 0;
  44. }
  45. dbgln_if(STORAGE_DEVICE_DEBUG, "StorageDevice::read() index={}, whole_blocks={}, remaining={}", index, whole_blocks, remaining);
  46. if (whole_blocks > 0) {
  47. auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index, whole_blocks, outbuf, whole_blocks * block_size());
  48. auto result = read_request->wait();
  49. if (result.wait_result().was_interrupted())
  50. return EINTR;
  51. switch (result.request_result()) {
  52. case AsyncDeviceRequest::Failure:
  53. case AsyncDeviceRequest::Cancelled:
  54. return EIO;
  55. case AsyncDeviceRequest::MemoryFault:
  56. return EFAULT;
  57. default:
  58. break;
  59. }
  60. }
  61. off_t pos = whole_blocks * block_size();
  62. if (remaining > 0) {
  63. auto data = ByteBuffer::create_uninitialized(block_size());
  64. auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data());
  65. auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size());
  66. auto result = read_request->wait();
  67. if (result.wait_result().was_interrupted())
  68. return EINTR;
  69. switch (result.request_result()) {
  70. case AsyncDeviceRequest::Failure:
  71. return pos;
  72. case AsyncDeviceRequest::Cancelled:
  73. return EIO;
  74. case AsyncDeviceRequest::MemoryFault:
  75. // This should never happen, we're writing to a kernel buffer!
  76. VERIFY_NOT_REACHED();
  77. default:
  78. break;
  79. }
  80. if (!outbuf.write(data.data(), pos, remaining))
  81. return EFAULT;
  82. }
  83. return pos + remaining;
  84. }
  85. bool StorageDevice::can_read(const FileDescription&, size_t offset) const
  86. {
  87. return offset < (max_addressable_block() * block_size());
  88. }
  89. KResultOr<size_t> StorageDevice::write(FileDescription&, u64 offset, const UserOrKernelBuffer& inbuf, size_t len)
  90. {
  91. unsigned index = offset / block_size();
  92. u16 whole_blocks = len / block_size();
  93. size_t remaining = len % block_size();
  94. unsigned blocks_per_page = PAGE_SIZE / block_size();
  95. // PATAChannel will chuck a wobbly if we try to write more than PAGE_SIZE
  96. // at a time, because it uses a single page for its DMA buffer.
  97. if (whole_blocks >= blocks_per_page) {
  98. whole_blocks = blocks_per_page;
  99. remaining = 0;
  100. }
  101. dbgln_if(STORAGE_DEVICE_DEBUG, "StorageDevice::write() index={}, whole_blocks={}, remaining={}", index, whole_blocks, remaining);
  102. if (whole_blocks > 0) {
  103. auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index, whole_blocks, inbuf, whole_blocks * block_size());
  104. auto result = write_request->wait();
  105. if (result.wait_result().was_interrupted())
  106. return EINTR;
  107. switch (result.request_result()) {
  108. case AsyncDeviceRequest::Failure:
  109. case AsyncDeviceRequest::Cancelled:
  110. return EIO;
  111. case AsyncDeviceRequest::MemoryFault:
  112. return EFAULT;
  113. default:
  114. break;
  115. }
  116. }
  117. off_t pos = whole_blocks * block_size();
  118. // since we can only write in block_size() increments, if we want to do a
  119. // partial write, we have to read the block's content first, modify it,
  120. // then write the whole block back to the disk.
  121. if (remaining > 0) {
  122. auto data = ByteBuffer::create_zeroed(block_size());
  123. auto data_buffer = UserOrKernelBuffer::for_kernel_buffer(data.data());
  124. {
  125. auto read_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Read, index + whole_blocks, 1, data_buffer, block_size());
  126. auto result = read_request->wait();
  127. if (result.wait_result().was_interrupted())
  128. return EINTR;
  129. switch (result.request_result()) {
  130. case AsyncDeviceRequest::Failure:
  131. return pos;
  132. case AsyncDeviceRequest::Cancelled:
  133. return EIO;
  134. case AsyncDeviceRequest::MemoryFault:
  135. // This should never happen, we're writing to a kernel buffer!
  136. VERIFY_NOT_REACHED();
  137. default:
  138. break;
  139. }
  140. }
  141. if (!inbuf.read(data.data(), pos, remaining))
  142. return EFAULT;
  143. {
  144. auto write_request = make_request<AsyncBlockDeviceRequest>(AsyncBlockDeviceRequest::Write, index + whole_blocks, 1, data_buffer, block_size());
  145. auto result = write_request->wait();
  146. if (result.wait_result().was_interrupted())
  147. return EINTR;
  148. switch (result.request_result()) {
  149. case AsyncDeviceRequest::Failure:
  150. return pos;
  151. case AsyncDeviceRequest::Cancelled:
  152. return EIO;
  153. case AsyncDeviceRequest::MemoryFault:
  154. // This should never happen, we're writing to a kernel buffer!
  155. VERIFY_NOT_REACHED();
  156. default:
  157. break;
  158. }
  159. }
  160. }
  161. return pos + remaining;
  162. }
  163. bool StorageDevice::can_write(const FileDescription&, size_t offset) const
  164. {
  165. return offset < (max_addressable_block() * block_size());
  166. }
  167. }