Job.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Debug.h>
  7. #include <LibCompress/Gzip.h>
  8. #include <LibCompress/Zlib.h>
  9. #include <LibCore/TCPSocket.h>
  10. #include <LibHTTP/HttpResponse.h>
  11. #include <LibHTTP/Job.h>
  12. #include <stdio.h>
  13. #include <unistd.h>
  14. namespace HTTP {
  15. static ByteBuffer handle_content_encoding(const ByteBuffer& buf, const String& content_encoding)
  16. {
  17. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf has content_encoding={}", content_encoding);
  18. if (content_encoding == "gzip") {
  19. if (!Compress::GzipDecompressor::is_likely_compressed(buf)) {
  20. dbgln("Job::handle_content_encoding: buf is not gzip compressed!");
  21. }
  22. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is gzip compressed!");
  23. auto uncompressed = Compress::GzipDecompressor::decompress_all(buf);
  24. if (!uncompressed.has_value()) {
  25. dbgln("Job::handle_content_encoding: Gzip::decompress() failed. Returning original buffer.");
  26. return buf;
  27. }
  28. if constexpr (JOB_DEBUG) {
  29. dbgln("Job::handle_content_encoding: Gzip::decompress() successful.");
  30. dbgln(" Input size: {}", buf.size());
  31. dbgln(" Output size: {}", uncompressed.value().size());
  32. }
  33. return uncompressed.value();
  34. } else if (content_encoding == "deflate") {
  35. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is deflate compressed!");
  36. // Even though the content encoding is "deflate", it's actually deflate with the zlib wrapper.
  37. // https://tools.ietf.org/html/rfc7230#section-4.2.2
  38. auto uncompressed = Compress::Zlib::decompress_all(buf);
  39. if (!uncompressed.has_value()) {
  40. // From the RFC:
  41. // "Note: Some non-conformant implementations send the "deflate"
  42. // compressed data without the zlib wrapper."
  43. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: Zlib::decompress_all() failed. Trying DeflateDecompressor::decompress_all()");
  44. uncompressed = Compress::DeflateDecompressor::decompress_all(buf);
  45. if (!uncompressed.has_value()) {
  46. dbgln("Job::handle_content_encoding: DeflateDecompressor::decompress_all() failed, returning original buffer.");
  47. return buf;
  48. }
  49. }
  50. if constexpr (JOB_DEBUG) {
  51. dbgln("Job::handle_content_encoding: Deflate decompression successful.");
  52. dbgln(" Input size: {}", buf.size());
  53. dbgln(" Output size: {}", uncompressed.value().size());
  54. }
  55. return uncompressed.value();
  56. }
  57. return buf;
  58. }
  59. Job::Job(const HttpRequest& request, OutputStream& output_stream)
  60. : Core::NetworkJob(output_stream)
  61. , m_request(request)
  62. {
  63. }
  64. Job::~Job()
  65. {
  66. }
  67. void Job::flush_received_buffers()
  68. {
  69. if (!m_can_stream_response || m_buffered_size == 0)
  70. return;
  71. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers: have {} bytes in {} buffers", m_buffered_size, m_received_buffers.size());
  72. for (size_t i = 0; i < m_received_buffers.size(); ++i) {
  73. auto& payload = m_received_buffers[i];
  74. auto written = do_write(payload);
  75. m_buffered_size -= written;
  76. if (written == payload.size()) {
  77. // FIXME: Make this a take-first-friendly object?
  78. m_received_buffers.take_first();
  79. --i;
  80. continue;
  81. }
  82. VERIFY(written < payload.size());
  83. payload = payload.slice(written, payload.size() - written);
  84. break;
  85. }
  86. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers done: have {} bytes in {} buffers", m_buffered_size, m_received_buffers.size());
  87. }
  88. void Job::on_socket_connected()
  89. {
  90. register_on_ready_to_write([&] {
  91. if (m_sent_data)
  92. return;
  93. m_sent_data = true;
  94. auto raw_request = m_request.to_raw_request();
  95. if constexpr (JOB_DEBUG) {
  96. dbgln("Job: raw_request:");
  97. dbgln("{}", String::copy(raw_request));
  98. }
  99. bool success = write(raw_request);
  100. if (!success)
  101. deferred_invoke([this](auto&) { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  102. });
  103. register_on_ready_to_read([&] {
  104. if (is_cancelled())
  105. return;
  106. if (m_state == State::Finished) {
  107. // This is probably just a EOF notification, which means we should receive nothing
  108. // and then get eof() == true.
  109. [[maybe_unused]] auto payload = receive(64);
  110. // These assertions are only correct if "Connection: close".
  111. VERIFY(payload.is_empty());
  112. VERIFY(eof());
  113. return;
  114. }
  115. if (m_state == State::InStatus) {
  116. if (!can_read_line())
  117. return;
  118. auto line = read_line(PAGE_SIZE);
  119. if (line.is_null()) {
  120. fprintf(stderr, "Job: Expected HTTP status\n");
  121. return deferred_invoke([this](auto&) { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  122. }
  123. auto parts = line.split_view(' ');
  124. if (parts.size() < 3) {
  125. warnln("Job: Expected 3-part HTTP status, got '{}'", line);
  126. return deferred_invoke([this](auto&) { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  127. }
  128. auto code = parts[1].to_uint();
  129. if (!code.has_value()) {
  130. fprintf(stderr, "Job: Expected numeric HTTP status\n");
  131. return deferred_invoke([this](auto&) { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  132. }
  133. m_code = code.value();
  134. m_state = State::InHeaders;
  135. return;
  136. }
  137. if (m_state == State::InHeaders || m_state == State::Trailers) {
  138. if (!can_read_line())
  139. return;
  140. auto line = read_line(PAGE_SIZE);
  141. if (line.is_null()) {
  142. if (m_state == State::Trailers) {
  143. // Some servers like to send two ending chunks
  144. // use this fact as an excuse to ignore anything after the last chunk
  145. // that is not a valid trailing header.
  146. return finish_up();
  147. }
  148. fprintf(stderr, "Job: Expected HTTP header\n");
  149. return did_fail(Core::NetworkJob::Error::ProtocolFailed);
  150. }
  151. if (line.is_empty()) {
  152. if (m_state == State::Trailers) {
  153. return finish_up();
  154. } else {
  155. if (on_headers_received)
  156. on_headers_received(m_headers, m_code > 0 ? m_code : Optional<u32> {});
  157. m_state = State::InBody;
  158. }
  159. return;
  160. }
  161. auto parts = line.split_view(':');
  162. if (parts.is_empty()) {
  163. if (m_state == State::Trailers) {
  164. // Some servers like to send two ending chunks
  165. // use this fact as an excuse to ignore anything after the last chunk
  166. // that is not a valid trailing header.
  167. return finish_up();
  168. }
  169. fprintf(stderr, "Job: Expected HTTP header with key/value\n");
  170. return deferred_invoke([this](auto&) { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  171. }
  172. auto name = parts[0];
  173. if (line.length() < name.length() + 2) {
  174. if (m_state == State::Trailers) {
  175. // Some servers like to send two ending chunks
  176. // use this fact as an excuse to ignore anything after the last chunk
  177. // that is not a valid trailing header.
  178. return finish_up();
  179. }
  180. warnln("Job: Malformed HTTP header: '{}' ({})", line, line.length());
  181. return deferred_invoke([this](auto&) { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  182. }
  183. auto value = line.substring(name.length() + 2, line.length() - name.length() - 2);
  184. m_headers.set(name, value);
  185. if (name.equals_ignoring_case("Content-Encoding")) {
  186. // Assume that any content-encoding means that we can't decode it as a stream :(
  187. dbgln_if(JOB_DEBUG, "Content-Encoding {} detected, cannot stream output :(", value);
  188. m_can_stream_response = false;
  189. }
  190. dbgln_if(JOB_DEBUG, "Job: [{}] = '{}'", name, value);
  191. return;
  192. }
  193. VERIFY(m_state == State::InBody);
  194. VERIFY(can_read());
  195. read_while_data_available([&] {
  196. auto read_size = 64 * KiB;
  197. if (m_current_chunk_remaining_size.has_value()) {
  198. read_chunk_size:;
  199. auto remaining = m_current_chunk_remaining_size.value();
  200. if (remaining == -1) {
  201. // read size
  202. auto size_data = read_line(PAGE_SIZE);
  203. if (m_should_read_chunk_ending_line) {
  204. VERIFY(size_data.is_empty());
  205. m_should_read_chunk_ending_line = false;
  206. return IterationDecision::Continue;
  207. }
  208. auto size_lines = size_data.view().lines();
  209. dbgln_if(JOB_DEBUG, "Job: Received a chunk with size '{}'", size_data);
  210. if (size_lines.size() == 0) {
  211. dbgln("Job: Reached end of stream");
  212. finish_up();
  213. return IterationDecision::Break;
  214. } else {
  215. auto chunk = size_lines[0].split_view(';', true);
  216. String size_string = chunk[0];
  217. char* endptr;
  218. auto size = strtoul(size_string.characters(), &endptr, 16);
  219. if (*endptr) {
  220. // invalid number
  221. deferred_invoke([this](auto&) { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  222. return IterationDecision::Break;
  223. }
  224. if (size == 0) {
  225. // This is the last chunk
  226. // '0' *[; chunk-ext-name = chunk-ext-value]
  227. // We're going to ignore _all_ chunk extensions
  228. read_size = 0;
  229. m_current_chunk_total_size = 0;
  230. m_current_chunk_remaining_size = 0;
  231. dbgln_if(JOB_DEBUG, "Job: Received the last chunk with extensions '{}'", size_string.substring_view(1, size_string.length() - 1));
  232. } else {
  233. m_current_chunk_total_size = size;
  234. m_current_chunk_remaining_size = size;
  235. read_size = size;
  236. dbgln_if(JOB_DEBUG, "Job: Chunk of size '{}' started", size);
  237. }
  238. }
  239. } else {
  240. read_size = remaining;
  241. dbgln_if(JOB_DEBUG, "Job: Resuming chunk with '{}' bytes left over", remaining);
  242. }
  243. } else {
  244. auto transfer_encoding = m_headers.get("Transfer-Encoding");
  245. if (transfer_encoding.has_value()) {
  246. // Note: Some servers add extra spaces around 'chunked', see #6302.
  247. auto encoding = transfer_encoding.value().trim_whitespace();
  248. dbgln_if(JOB_DEBUG, "Job: This content has transfer encoding '{}'", encoding);
  249. if (encoding.equals_ignoring_case("chunked")) {
  250. m_current_chunk_remaining_size = -1;
  251. goto read_chunk_size;
  252. } else {
  253. dbgln("Job: Unknown transfer encoding '{}', the result will likely be wrong!", encoding);
  254. }
  255. }
  256. }
  257. auto payload = receive(read_size);
  258. if (!payload) {
  259. if (eof()) {
  260. finish_up();
  261. return IterationDecision::Break;
  262. }
  263. if (should_fail_on_empty_payload()) {
  264. deferred_invoke([this](auto&) { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  265. return IterationDecision::Break;
  266. }
  267. }
  268. m_received_buffers.append(payload);
  269. m_buffered_size += payload.size();
  270. m_received_size += payload.size();
  271. flush_received_buffers();
  272. if (m_current_chunk_remaining_size.has_value()) {
  273. auto size = m_current_chunk_remaining_size.value() - payload.size();
  274. dbgln_if(JOB_DEBUG, "Job: We have {} bytes left over in this chunk", size);
  275. if (size == 0) {
  276. dbgln_if(JOB_DEBUG, "Job: Finished a chunk of {} bytes", m_current_chunk_total_size.value());
  277. if (m_current_chunk_total_size.value() == 0) {
  278. m_state = State::Trailers;
  279. return IterationDecision::Break;
  280. }
  281. // we've read everything, now let's get the next chunk
  282. size = -1;
  283. if (can_read_line()) {
  284. auto line = read_line(PAGE_SIZE);
  285. VERIFY(line.is_empty());
  286. } else {
  287. m_should_read_chunk_ending_line = true;
  288. }
  289. }
  290. m_current_chunk_remaining_size = size;
  291. }
  292. auto content_length_header = m_headers.get("Content-Length");
  293. Optional<u32> content_length {};
  294. if (content_length_header.has_value()) {
  295. auto length = content_length_header.value().to_uint();
  296. if (length.has_value())
  297. content_length = length.value();
  298. }
  299. deferred_invoke([this, content_length](auto&) { did_progress(content_length, m_received_size); });
  300. if (content_length.has_value()) {
  301. auto length = content_length.value();
  302. if (m_received_size >= length) {
  303. m_received_size = length;
  304. finish_up();
  305. return IterationDecision::Break;
  306. }
  307. }
  308. return IterationDecision::Continue;
  309. });
  310. if (!is_established()) {
  311. dbgln_if(JOB_DEBUG, "Connection appears to have closed, finishing up");
  312. finish_up();
  313. }
  314. });
  315. }
  316. void Job::finish_up()
  317. {
  318. m_state = State::Finished;
  319. if (!m_can_stream_response) {
  320. auto flattened_buffer = ByteBuffer::create_uninitialized(m_received_size);
  321. u8* flat_ptr = flattened_buffer.data();
  322. for (auto& received_buffer : m_received_buffers) {
  323. memcpy(flat_ptr, received_buffer.data(), received_buffer.size());
  324. flat_ptr += received_buffer.size();
  325. }
  326. m_received_buffers.clear();
  327. // For the time being, we cannot stream stuff with content-encoding set to _anything_.
  328. // FIXME: LibCompress exposes a streaming interface, so this can be resolved
  329. auto content_encoding = m_headers.get("Content-Encoding");
  330. if (content_encoding.has_value()) {
  331. flattened_buffer = handle_content_encoding(flattened_buffer, content_encoding.value());
  332. }
  333. m_buffered_size = flattened_buffer.size();
  334. m_received_buffers.append(move(flattened_buffer));
  335. m_can_stream_response = true;
  336. }
  337. flush_received_buffers();
  338. if (m_buffered_size != 0) {
  339. // We have to wait for the client to consume all the downloaded data
  340. // before we can actually call `did_finish`. in a normal flow, this should
  341. // never be hit since the client is reading as we are writing, unless there
  342. // are too many concurrent downloads going on.
  343. deferred_invoke([this](auto&) {
  344. finish_up();
  345. });
  346. return;
  347. }
  348. auto response = HttpResponse::create(m_code, move(m_headers));
  349. deferred_invoke([this, response](auto&) {
  350. did_finish(move(response));
  351. });
  352. }
  353. }