Job.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. * Copyright (c) 2022, the SerenityOS developers.
  4. *
  5. * SPDX-License-Identifier: BSD-2-Clause
  6. */
  7. #include <AK/CharacterTypes.h>
  8. #include <AK/Debug.h>
  9. #include <AK/JsonObject.h>
  10. #include <AK/MemoryStream.h>
  11. #include <AK/Try.h>
  12. #include <LibCompress/Brotli.h>
  13. #include <LibCompress/Gzip.h>
  14. #include <LibCompress/Zlib.h>
  15. #include <LibCore/Event.h>
  16. #include <LibHTTP/HttpResponse.h>
  17. #include <LibHTTP/Job.h>
  18. #include <stdio.h>
  19. #include <unistd.h>
  20. namespace HTTP {
  21. static ErrorOr<ByteBuffer> handle_content_encoding(ByteBuffer const& buf, DeprecatedString const& content_encoding)
  22. {
  23. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf has content_encoding={}", content_encoding);
  24. // FIXME: Actually do the decompression of the data using streams, instead of all at once when everything has been
  25. // received. This will require that some of the decompression algorithms are implemented in a streaming way.
  26. // Gzip and Deflate are implemented using Stream, while Brotli uses the newer Core::Stream. The Gzip and
  27. // Deflate implementations will likely need to be changed to LibCore::Stream for this to work easily.
  28. if (content_encoding == "gzip") {
  29. if (!Compress::GzipDecompressor::is_likely_compressed(buf)) {
  30. dbgln("Job::handle_content_encoding: buf is not gzip compressed!");
  31. }
  32. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is gzip compressed!");
  33. auto uncompressed = TRY(Compress::GzipDecompressor::decompress_all(buf));
  34. if constexpr (JOB_DEBUG) {
  35. dbgln("Job::handle_content_encoding: Gzip::decompress() successful.");
  36. dbgln(" Input size: {}", buf.size());
  37. dbgln(" Output size: {}", uncompressed.size());
  38. }
  39. return uncompressed;
  40. } else if (content_encoding == "deflate") {
  41. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is deflate compressed!");
  42. // Even though the content encoding is "deflate", it's actually deflate with the zlib wrapper.
  43. // https://tools.ietf.org/html/rfc7230#section-4.2.2
  44. auto memory_stream = make<FixedMemoryStream>(buf);
  45. auto zlib_decompressor = Compress::ZlibDecompressor::create(move(memory_stream));
  46. Optional<ByteBuffer> uncompressed;
  47. if (zlib_decompressor.is_error()) {
  48. // From the RFC:
  49. // "Note: Some non-conformant implementations send the "deflate"
  50. // compressed data without the zlib wrapper."
  51. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: ZlibDecompressor::decompress_all() failed. Trying DeflateDecompressor::decompress_all()");
  52. uncompressed = TRY(Compress::DeflateDecompressor::decompress_all(buf));
  53. } else {
  54. uncompressed = TRY(zlib_decompressor.value()->read_until_eof());
  55. }
  56. if constexpr (JOB_DEBUG) {
  57. dbgln("Job::handle_content_encoding: Deflate decompression successful.");
  58. dbgln(" Input size: {}", buf.size());
  59. dbgln(" Output size: {}", uncompressed.value().size());
  60. }
  61. return uncompressed.release_value();
  62. } else if (content_encoding == "br") {
  63. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is brotli compressed!");
  64. FixedMemoryStream bufstream { buf };
  65. auto brotli_stream = Compress::BrotliDecompressionStream { MaybeOwned<Stream>(bufstream) };
  66. auto uncompressed = TRY(brotli_stream.read_until_eof());
  67. if constexpr (JOB_DEBUG) {
  68. dbgln("Job::handle_content_encoding: Brotli::decompress() successful.");
  69. dbgln(" Input size: {}", buf.size());
  70. dbgln(" Output size: {}", uncompressed.size());
  71. }
  72. return uncompressed;
  73. }
  74. return buf;
  75. }
  76. Job::Job(HttpRequest&& request, Stream& output_stream)
  77. : Core::NetworkJob(output_stream)
  78. , m_request(move(request))
  79. {
  80. }
  81. void Job::start(Core::BufferedSocketBase& socket)
  82. {
  83. VERIFY(!m_socket);
  84. m_socket = &socket;
  85. dbgln_if(HTTPJOB_DEBUG, "Reusing previous connection for {}", url());
  86. deferred_invoke([this] {
  87. dbgln_if(HTTPJOB_DEBUG, "HttpJob: on_connected callback");
  88. on_socket_connected();
  89. });
  90. }
  91. void Job::shutdown(ShutdownMode mode)
  92. {
  93. if (!m_socket)
  94. return;
  95. if (mode == ShutdownMode::CloseSocket) {
  96. m_socket->close();
  97. m_socket->on_ready_to_read = nullptr;
  98. } else {
  99. m_socket->on_ready_to_read = nullptr;
  100. m_socket = nullptr;
  101. }
  102. }
  103. void Job::flush_received_buffers()
  104. {
  105. if (!m_can_stream_response || m_buffered_size == 0)
  106. return;
  107. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers: have {} bytes in {} buffers for {}", m_buffered_size, m_received_buffers.size(), m_request.url());
  108. for (size_t i = 0; i < m_received_buffers.size(); ++i) {
  109. auto& payload = m_received_buffers[i]->pending_flush;
  110. auto result = do_write(payload);
  111. if (result.is_error()) {
  112. if (!result.error().is_errno()) {
  113. dbgln_if(JOB_DEBUG, "Job: Failed to flush received buffers: {}", result.error());
  114. continue;
  115. }
  116. if (result.error().code() == EINTR) {
  117. i--;
  118. continue;
  119. }
  120. break;
  121. }
  122. auto written = result.release_value();
  123. m_buffered_size -= written;
  124. if (written == payload.size()) {
  125. // FIXME: Make this a take-first-friendly object?
  126. (void)m_received_buffers.take_first();
  127. --i;
  128. continue;
  129. }
  130. VERIFY(written < payload.size());
  131. payload = payload.slice(written, payload.size() - written);
  132. break;
  133. }
  134. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers done: have {} bytes in {} buffers for {}", m_buffered_size, m_received_buffers.size(), m_request.url());
  135. }
  136. void Job::register_on_ready_to_read(Function<void()> callback)
  137. {
  138. m_socket->on_ready_to_read = [this, callback = move(callback)] {
  139. callback();
  140. // As `m_socket` is a buffered object, we might not get notifications for data in the buffer
  141. // so exhaust the buffer to ensure we don't end up waiting forever.
  142. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  143. if (can_read_without_blocking.is_error())
  144. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  145. if (can_read_without_blocking.value() && m_state != State::Finished && !has_error()) {
  146. deferred_invoke([this] {
  147. if (m_socket && m_socket->on_ready_to_read)
  148. m_socket->on_ready_to_read();
  149. });
  150. }
  151. };
  152. }
  153. ErrorOr<DeprecatedString> Job::read_line(size_t size)
  154. {
  155. auto buffer = TRY(ByteBuffer::create_uninitialized(size));
  156. auto bytes_read = TRY(m_socket->read_until(buffer, "\r\n"sv));
  157. return DeprecatedString::copy(bytes_read);
  158. }
  159. ErrorOr<ByteBuffer> Job::receive(size_t size)
  160. {
  161. if (size == 0)
  162. return ByteBuffer {};
  163. auto buffer = TRY(ByteBuffer::create_uninitialized(size));
  164. size_t nread;
  165. do {
  166. auto result = m_socket->read_some(buffer);
  167. if (result.is_error() && result.error().is_errno() && result.error().code() == EINTR)
  168. continue;
  169. nread = TRY(result).size();
  170. break;
  171. } while (true);
  172. return buffer.slice(0, nread);
  173. }
  174. void Job::on_socket_connected()
  175. {
  176. auto raw_request = m_request.to_raw_request().release_value_but_fixme_should_propagate_errors();
  177. if constexpr (JOB_DEBUG) {
  178. dbgln("Job: raw_request:");
  179. dbgln("{}", DeprecatedString::copy(raw_request));
  180. }
  181. bool success = !m_socket->write_until_depleted(raw_request).is_error();
  182. if (!success)
  183. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  184. register_on_ready_to_read([&] {
  185. dbgln_if(JOB_DEBUG, "Ready to read for {}, state = {}, cancelled = {}", m_request.url(), to_underlying(m_state), is_cancelled());
  186. if (is_cancelled())
  187. return;
  188. if (m_state == State::Finished) {
  189. // We have everything we want, at this point, we can either get an EOF, or a bunch of extra newlines
  190. // (unless "Connection: close" isn't specified)
  191. // So just ignore everything after this.
  192. return;
  193. }
  194. if (m_socket->is_eof()) {
  195. dbgln_if(JOB_DEBUG, "Read failure: Actually EOF!");
  196. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  197. }
  198. while (m_state == State::InStatus) {
  199. auto can_read_line = m_socket->can_read_line();
  200. if (can_read_line.is_error()) {
  201. dbgln_if(JOB_DEBUG, "Job {} could not figure out whether we could read a line", m_request.url());
  202. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  203. }
  204. if (!can_read_line.value()) {
  205. dbgln_if(JOB_DEBUG, "Job {} cannot read a full line", m_request.url());
  206. // TODO: Should we retry here instead of failing instantly?
  207. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  208. }
  209. auto maybe_line = read_line(PAGE_SIZE);
  210. if (maybe_line.is_error()) {
  211. dbgln_if(JOB_DEBUG, "Job {} could not read line: {}", m_request.url(), maybe_line.error());
  212. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  213. }
  214. auto line = maybe_line.release_value();
  215. dbgln_if(JOB_DEBUG, "Job {} read line of length {}", m_request.url(), line.length());
  216. if (line.is_null()) {
  217. dbgln("Job: Expected HTTP status");
  218. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  219. }
  220. auto parts = line.split_view(' ');
  221. if (parts.size() < 2) {
  222. dbgln("Job: Expected 2-part or 3-part HTTP status line, got '{}'", line);
  223. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  224. }
  225. if (!parts[0].matches("HTTP/?.?"sv, CaseSensitivity::CaseSensitive) || !is_ascii_digit(parts[0][5]) || !is_ascii_digit(parts[0][7])) {
  226. dbgln("Job: Expected HTTP-Version to be of the form 'HTTP/X.Y', got '{}'", parts[0]);
  227. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  228. }
  229. auto http_major_version = parse_ascii_digit(parts[0][5]);
  230. auto http_minor_version = parse_ascii_digit(parts[0][7]);
  231. m_legacy_connection = http_major_version < 1 || (http_major_version == 1 && http_minor_version == 0);
  232. auto code = parts[1].to_uint();
  233. if (!code.has_value()) {
  234. dbgln("Job: Expected numeric HTTP status");
  235. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  236. }
  237. m_code = code.value();
  238. m_state = State::InHeaders;
  239. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  240. if (can_read_without_blocking.is_error())
  241. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  242. if (!can_read_without_blocking.value())
  243. return;
  244. }
  245. while (m_state == State::InHeaders || m_state == State::Trailers) {
  246. auto can_read_line = m_socket->can_read_line();
  247. if (can_read_line.is_error()) {
  248. dbgln_if(JOB_DEBUG, "Job {} could not figure out whether we could read a line", m_request.url());
  249. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  250. }
  251. if (!can_read_line.value()) {
  252. dbgln_if(JOB_DEBUG, "Can't read lines anymore :(");
  253. return;
  254. }
  255. // There's no max limit defined on headers, but for our sanity, let's limit it to 32K.
  256. auto maybe_line = read_line(32 * KiB);
  257. if (maybe_line.is_error()) {
  258. dbgln_if(JOB_DEBUG, "Job {} could not read a header line: {}", m_request.url(), maybe_line.error());
  259. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  260. }
  261. auto line = maybe_line.release_value();
  262. if (line.is_null()) {
  263. if (m_state == State::Trailers) {
  264. // Some servers like to send two ending chunks
  265. // use this fact as an excuse to ignore anything after the last chunk
  266. // that is not a valid trailing header.
  267. return finish_up();
  268. }
  269. dbgln("Job: Expected HTTP header");
  270. return did_fail(Core::NetworkJob::Error::ProtocolFailed);
  271. }
  272. if (line.is_empty()) {
  273. if (m_state == State::Trailers) {
  274. return finish_up();
  275. }
  276. if (on_headers_received) {
  277. if (!m_set_cookie_headers.is_empty())
  278. m_headers.set("Set-Cookie", JsonArray { m_set_cookie_headers }.to_deprecated_string());
  279. on_headers_received(m_headers, m_code > 0 ? m_code : Optional<u32> {});
  280. }
  281. m_state = State::InBody;
  282. // We've reached the end of the headers, there's a possibility that the server
  283. // responds with nothing (content-length = 0 with normal encoding); if that's the case,
  284. // quit early as we won't be reading anything anyway.
  285. if (auto result = m_headers.get("Content-Length"sv).value_or(""sv).to_uint(); result.has_value()) {
  286. if (result.value() == 0 && !m_headers.get("Transfer-Encoding"sv).value_or(""sv).view().trim_whitespace().equals_ignoring_ascii_case("chunked"sv))
  287. return finish_up();
  288. }
  289. // There's also the possibility that the server responds with 204 (No Content),
  290. // and manages to set a Content-Length anyway, in such cases ignore Content-Length and quit early;
  291. // As the HTTP spec explicitly prohibits presence of Content-Length when the response code is 204.
  292. if (m_code == 204)
  293. return finish_up();
  294. break;
  295. }
  296. auto parts = line.split_view(':');
  297. if (parts.is_empty()) {
  298. if (m_state == State::Trailers) {
  299. // Some servers like to send two ending chunks
  300. // use this fact as an excuse to ignore anything after the last chunk
  301. // that is not a valid trailing header.
  302. return finish_up();
  303. }
  304. dbgln("Job: Expected HTTP header with key/value");
  305. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  306. }
  307. auto name = parts[0];
  308. if (line.length() < name.length() + 2) {
  309. if (m_state == State::Trailers) {
  310. // Some servers like to send two ending chunks
  311. // use this fact as an excuse to ignore anything after the last chunk
  312. // that is not a valid trailing header.
  313. return finish_up();
  314. }
  315. dbgln("Job: Malformed HTTP header: '{}' ({})", line, line.length());
  316. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  317. }
  318. auto value = line.substring(name.length() + 2, line.length() - name.length() - 2);
  319. if (name.equals_ignoring_ascii_case("Set-Cookie"sv)) {
  320. dbgln_if(JOB_DEBUG, "Job: Received Set-Cookie header: '{}'", value);
  321. m_set_cookie_headers.append(move(value));
  322. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  323. if (can_read_without_blocking.is_error())
  324. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  325. if (!can_read_without_blocking.value())
  326. return;
  327. } else if (auto existing_value = m_headers.get(name); existing_value.has_value()) {
  328. StringBuilder builder;
  329. builder.append(existing_value.value());
  330. builder.append(',');
  331. builder.append(value);
  332. m_headers.set(name, builder.to_deprecated_string());
  333. } else {
  334. m_headers.set(name, value);
  335. }
  336. if (name.equals_ignoring_ascii_case("Content-Encoding"sv)) {
  337. // Assume that any content-encoding means that we can't decode it as a stream :(
  338. dbgln_if(JOB_DEBUG, "Content-Encoding {} detected, cannot stream output :(", value);
  339. m_can_stream_response = false;
  340. } else if (name.equals_ignoring_ascii_case("Content-Length"sv)) {
  341. auto length = value.to_uint<u64>();
  342. if (length.has_value())
  343. m_content_length = length.value();
  344. }
  345. dbgln_if(JOB_DEBUG, "Job: [{}] = '{}'", name, value);
  346. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  347. if (can_read_without_blocking.is_error())
  348. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  349. if (!can_read_without_blocking.value()) {
  350. dbgln_if(JOB_DEBUG, "Can't read headers anymore, byebye :(");
  351. return;
  352. }
  353. }
  354. VERIFY(m_state == State::InBody);
  355. while (true) {
  356. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  357. if (can_read_without_blocking.is_error())
  358. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  359. if (!can_read_without_blocking.value())
  360. break;
  361. auto read_size = 64 * KiB;
  362. if (m_current_chunk_remaining_size.has_value()) {
  363. read_chunk_size:;
  364. auto remaining = m_current_chunk_remaining_size.value();
  365. if (remaining == -1) {
  366. // read size
  367. auto maybe_size_data = read_line(PAGE_SIZE);
  368. if (maybe_size_data.is_error()) {
  369. dbgln_if(JOB_DEBUG, "Job: Could not receive chunk: {}", maybe_size_data.error());
  370. }
  371. auto size_data = maybe_size_data.release_value();
  372. if (m_should_read_chunk_ending_line) {
  373. // NOTE: Some servers seem to send an extra \r\n here despite there being no size.
  374. // This makes us tolerate that.
  375. size_data = size_data.trim("\r\n"sv, TrimMode::Right);
  376. VERIFY(size_data.is_empty());
  377. m_should_read_chunk_ending_line = false;
  378. continue;
  379. }
  380. auto size_lines = size_data.view().lines();
  381. dbgln_if(JOB_DEBUG, "Job: Received a chunk with size '{}'", size_data);
  382. if (size_lines.size() == 0) {
  383. if (!m_socket->is_eof())
  384. break;
  385. dbgln("Job: Reached end of stream");
  386. finish_up();
  387. break;
  388. } else {
  389. auto chunk = size_lines[0].split_view(';', SplitBehavior::KeepEmpty);
  390. DeprecatedString size_string = chunk[0];
  391. char* endptr;
  392. auto size = strtoul(size_string.characters(), &endptr, 16);
  393. if (*endptr) {
  394. // invalid number
  395. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  396. break;
  397. }
  398. if (size == 0) {
  399. // This is the last chunk
  400. // '0' *[; chunk-ext-name = chunk-ext-value]
  401. // We're going to ignore _all_ chunk extensions
  402. read_size = 0;
  403. m_current_chunk_total_size = 0;
  404. m_current_chunk_remaining_size = 0;
  405. dbgln_if(JOB_DEBUG, "Job: Received the last chunk with extensions '{}'", size_string.substring_view(1, size_string.length() - 1));
  406. } else {
  407. m_current_chunk_total_size = size;
  408. m_current_chunk_remaining_size = size;
  409. read_size = size;
  410. dbgln_if(JOB_DEBUG, "Job: Chunk of size '{}' started", size);
  411. }
  412. }
  413. } else {
  414. read_size = remaining;
  415. dbgln_if(JOB_DEBUG, "Job: Resuming chunk with '{}' bytes left over", remaining);
  416. }
  417. } else {
  418. auto transfer_encoding = m_headers.get("Transfer-Encoding");
  419. if (transfer_encoding.has_value()) {
  420. // HTTP/1.1 3.3.3.3:
  421. // If a message is received with both a Transfer-Encoding and a Content-Length header field, the Transfer-Encoding overrides the Content-Length. [...]
  422. // https://httpwg.org/specs/rfc7230.html#message.body.length
  423. m_content_length = {};
  424. // Note: Some servers add extra spaces around 'chunked', see #6302.
  425. auto encoding = transfer_encoding.value().trim_whitespace();
  426. dbgln_if(JOB_DEBUG, "Job: This content has transfer encoding '{}'", encoding);
  427. if (encoding.equals_ignoring_ascii_case("chunked"sv)) {
  428. m_current_chunk_remaining_size = -1;
  429. goto read_chunk_size;
  430. } else {
  431. dbgln("Job: Unknown transfer encoding '{}', the result will likely be wrong!", encoding);
  432. }
  433. }
  434. }
  435. can_read_without_blocking = m_socket->can_read_without_blocking();
  436. if (can_read_without_blocking.is_error())
  437. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  438. if (!can_read_without_blocking.value())
  439. break;
  440. dbgln_if(JOB_DEBUG, "Waiting for payload for {}", m_request.url());
  441. auto maybe_payload = receive(read_size);
  442. if (maybe_payload.is_error()) {
  443. dbgln_if(JOB_DEBUG, "Could not read the payload: {}", maybe_payload.error());
  444. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  445. }
  446. auto payload = maybe_payload.release_value();
  447. if (payload.is_empty() && m_socket->is_eof()) {
  448. finish_up();
  449. break;
  450. }
  451. bool read_everything = false;
  452. if (m_content_length.has_value()) {
  453. auto length = m_content_length.value();
  454. if (m_received_size + payload.size() >= length) {
  455. payload.resize(length - m_received_size);
  456. read_everything = true;
  457. }
  458. }
  459. m_received_buffers.append(make<ReceivedBuffer>(payload));
  460. m_buffered_size += payload.size();
  461. m_received_size += payload.size();
  462. flush_received_buffers();
  463. deferred_invoke([this] { did_progress(m_content_length, m_received_size); });
  464. if (read_everything) {
  465. VERIFY(m_received_size <= m_content_length.value());
  466. finish_up();
  467. break;
  468. }
  469. // Check after reading all the buffered data if we have reached the end of stream
  470. // for cases where the server didn't send a content length, chunked encoding but is
  471. // directly closing the connection.
  472. if (!m_content_length.has_value() && !m_current_chunk_remaining_size.has_value() && m_socket->is_eof()) {
  473. finish_up();
  474. break;
  475. }
  476. if (m_current_chunk_remaining_size.has_value()) {
  477. auto size = m_current_chunk_remaining_size.value() - payload.size();
  478. dbgln_if(JOB_DEBUG, "Job: We have {} bytes left over in this chunk", size);
  479. if (size == 0) {
  480. dbgln_if(JOB_DEBUG, "Job: Finished a chunk of {} bytes", m_current_chunk_total_size.value());
  481. if (m_current_chunk_total_size.value() == 0) {
  482. m_state = State::Trailers;
  483. break;
  484. }
  485. // we've read everything, now let's get the next chunk
  486. size = -1;
  487. auto can_read_line = m_socket->can_read_line();
  488. if (can_read_line.is_error())
  489. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  490. if (can_read_line.value()) {
  491. auto maybe_line = read_line(PAGE_SIZE);
  492. if (maybe_line.is_error()) {
  493. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  494. }
  495. VERIFY(maybe_line.value().is_empty());
  496. } else {
  497. m_should_read_chunk_ending_line = true;
  498. }
  499. }
  500. m_current_chunk_remaining_size = size;
  501. }
  502. }
  503. if (!m_socket->is_open()) {
  504. dbgln_if(JOB_DEBUG, "Connection appears to have closed, finishing up");
  505. finish_up();
  506. }
  507. });
  508. }
  509. void Job::timer_event(Core::TimerEvent& event)
  510. {
  511. event.accept();
  512. finish_up();
  513. if (m_buffered_size == 0)
  514. stop_timer();
  515. }
  516. void Job::finish_up()
  517. {
  518. VERIFY(!m_has_scheduled_finish);
  519. m_state = State::Finished;
  520. if (!m_can_stream_response) {
  521. auto maybe_flattened_buffer = ByteBuffer::create_uninitialized(m_buffered_size);
  522. if (maybe_flattened_buffer.is_error())
  523. return did_fail(Core::NetworkJob::Error::TransmissionFailed);
  524. auto flattened_buffer = maybe_flattened_buffer.release_value();
  525. u8* flat_ptr = flattened_buffer.data();
  526. for (auto& received_buffer : m_received_buffers) {
  527. memcpy(flat_ptr, received_buffer->pending_flush.data(), received_buffer->pending_flush.size());
  528. flat_ptr += received_buffer->pending_flush.size();
  529. }
  530. m_received_buffers.clear();
  531. // For the time being, we cannot stream stuff with content-encoding set to _anything_.
  532. // FIXME: LibCompress exposes a streaming interface, so this can be resolved
  533. auto content_encoding = m_headers.get("Content-Encoding");
  534. if (content_encoding.has_value()) {
  535. if (auto result = handle_content_encoding(flattened_buffer, content_encoding.value()); !result.is_error())
  536. flattened_buffer = result.release_value();
  537. else
  538. return did_fail(Core::NetworkJob::Error::TransmissionFailed);
  539. }
  540. m_buffered_size = flattened_buffer.size();
  541. m_received_buffers.append(make<ReceivedBuffer>(move(flattened_buffer)));
  542. m_can_stream_response = true;
  543. }
  544. flush_received_buffers();
  545. if (m_buffered_size != 0) {
  546. // We have to wait for the client to consume all the downloaded data
  547. // before we can actually call `did_finish`. in a normal flow, this should
  548. // never be hit since the client is reading as we are writing, unless there
  549. // are too many concurrent downloads going on.
  550. dbgln_if(JOB_DEBUG, "Flush finished with {} bytes remaining, will try again later", m_buffered_size);
  551. if (!has_timer())
  552. start_timer(50);
  553. return;
  554. }
  555. m_has_scheduled_finish = true;
  556. auto response = HttpResponse::create(m_code, move(m_headers), m_received_size);
  557. deferred_invoke([this, response = move(response)] {
  558. // If the server responded with "Connection: close", close the connection
  559. // as the server may or may not want to close the socket. Also, if this is
  560. // a legacy HTTP server (1.0 or older), assume close is the default value.
  561. if (auto result = response->headers().get("Connection"sv); result.has_value() ? result->equals_ignoring_ascii_case("close"sv) : m_legacy_connection)
  562. shutdown(ShutdownMode::CloseSocket);
  563. did_finish(response);
  564. });
  565. }
  566. }