Job.cpp 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Debug.h>
  7. #include <AK/JsonArray.h>
  8. #include <LibCompress/Gzip.h>
  9. #include <LibCompress/Zlib.h>
  10. #include <LibCore/Event.h>
  11. #include <LibHTTP/HttpResponse.h>
  12. #include <LibHTTP/Job.h>
  13. #include <stdio.h>
  14. #include <unistd.h>
  15. namespace HTTP {
  16. static Optional<ByteBuffer> handle_content_encoding(const ByteBuffer& buf, const String& content_encoding)
  17. {
  18. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf has content_encoding={}", content_encoding);
  19. if (content_encoding == "gzip") {
  20. if (!Compress::GzipDecompressor::is_likely_compressed(buf)) {
  21. dbgln("Job::handle_content_encoding: buf is not gzip compressed!");
  22. }
  23. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is gzip compressed!");
  24. auto uncompressed = Compress::GzipDecompressor::decompress_all(buf);
  25. if (!uncompressed.has_value()) {
  26. dbgln("Job::handle_content_encoding: Gzip::decompress() failed.");
  27. return {};
  28. }
  29. if constexpr (JOB_DEBUG) {
  30. dbgln("Job::handle_content_encoding: Gzip::decompress() successful.");
  31. dbgln(" Input size: {}", buf.size());
  32. dbgln(" Output size: {}", uncompressed.value().size());
  33. }
  34. return uncompressed.release_value();
  35. } else if (content_encoding == "deflate") {
  36. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is deflate compressed!");
  37. // Even though the content encoding is "deflate", it's actually deflate with the zlib wrapper.
  38. // https://tools.ietf.org/html/rfc7230#section-4.2.2
  39. auto uncompressed = Compress::Zlib::decompress_all(buf);
  40. if (!uncompressed.has_value()) {
  41. // From the RFC:
  42. // "Note: Some non-conformant implementations send the "deflate"
  43. // compressed data without the zlib wrapper."
  44. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: Zlib::decompress_all() failed. Trying DeflateDecompressor::decompress_all()");
  45. uncompressed = Compress::DeflateDecompressor::decompress_all(buf);
  46. if (!uncompressed.has_value()) {
  47. dbgln("Job::handle_content_encoding: DeflateDecompressor::decompress_all() failed.");
  48. return {};
  49. }
  50. }
  51. if constexpr (JOB_DEBUG) {
  52. dbgln("Job::handle_content_encoding: Deflate decompression successful.");
  53. dbgln(" Input size: {}", buf.size());
  54. dbgln(" Output size: {}", uncompressed.value().size());
  55. }
  56. return uncompressed.release_value();
  57. }
  58. return buf;
  59. }
  60. Job::Job(HttpRequest&& request, Core::Stream::Stream& output_stream)
  61. : Core::NetworkJob(output_stream)
  62. , m_request(move(request))
  63. {
  64. }
  65. Job::~Job()
  66. {
  67. }
  68. void Job::start(Core::Stream::Socket& socket)
  69. {
  70. VERIFY(!m_socket);
  71. m_socket = static_cast<Core::Stream::BufferedSocketBase*>(&socket);
  72. dbgln_if(HTTPJOB_DEBUG, "Reusing previous connection for {}", url());
  73. deferred_invoke([this] {
  74. dbgln_if(HTTPJOB_DEBUG, "HttpJob: on_connected callback");
  75. on_socket_connected();
  76. });
  77. }
  78. void Job::shutdown(ShutdownMode mode)
  79. {
  80. if (!m_socket)
  81. return;
  82. if (mode == ShutdownMode::CloseSocket) {
  83. m_socket->close();
  84. } else {
  85. m_socket->on_ready_to_read = nullptr;
  86. m_socket = nullptr;
  87. }
  88. }
  89. void Job::flush_received_buffers()
  90. {
  91. if (!m_can_stream_response || m_buffered_size == 0)
  92. return;
  93. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers: have {} bytes in {} buffers for {}", m_buffered_size, m_received_buffers.size(), m_request.url());
  94. for (size_t i = 0; i < m_received_buffers.size(); ++i) {
  95. auto& payload = m_received_buffers[i].pending_flush;
  96. auto result = do_write(payload);
  97. if (result.is_error()) {
  98. if (!result.error().is_errno()) {
  99. dbgln_if(JOB_DEBUG, "Job: Failed to flush received buffers: {}", result.error());
  100. continue;
  101. }
  102. if (result.error().code() == EINTR) {
  103. i--;
  104. continue;
  105. }
  106. break;
  107. }
  108. auto written = result.release_value();
  109. m_buffered_size -= written;
  110. if (written == payload.size()) {
  111. // FIXME: Make this a take-first-friendly object?
  112. (void)m_received_buffers.take_first();
  113. --i;
  114. continue;
  115. }
  116. VERIFY(written < payload.size());
  117. payload = payload.slice(written, payload.size() - written);
  118. break;
  119. }
  120. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers done: have {} bytes in {} buffers for {}", m_buffered_size, m_received_buffers.size(), m_request.url());
  121. }
  122. void Job::register_on_ready_to_read(Function<void()> callback)
  123. {
  124. m_socket->on_ready_to_read = [this, callback = move(callback)] {
  125. callback();
  126. // As `m_socket` is a buffered object, we might not get notifications for data in the buffer
  127. // so exhaust the buffer to ensure we don't end up waiting forever.
  128. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  129. if (can_read_without_blocking.is_error())
  130. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  131. if (can_read_without_blocking.value() && m_state != State::Finished && !has_error()) {
  132. deferred_invoke([this] {
  133. if (m_socket && m_socket->on_ready_to_read)
  134. m_socket->on_ready_to_read();
  135. });
  136. }
  137. };
  138. }
  139. ErrorOr<String> Job::read_line(size_t size)
  140. {
  141. auto buffer = TRY(ByteBuffer::create_uninitialized(size));
  142. auto nread = TRY(m_socket->read_until(buffer, "\r\n"sv));
  143. return String::copy(buffer.span().slice(0, nread));
  144. }
  145. ErrorOr<ByteBuffer> Job::receive(size_t size)
  146. {
  147. if (size == 0)
  148. return ByteBuffer {};
  149. auto buffer = TRY(ByteBuffer::create_uninitialized(size));
  150. size_t nread;
  151. do {
  152. auto result = m_socket->read(buffer);
  153. if (result.is_error() && result.error().is_errno() && result.error().code() == EINTR)
  154. continue;
  155. nread = TRY(result);
  156. break;
  157. } while (true);
  158. return buffer.slice(0, nread);
  159. }
  160. void Job::on_socket_connected()
  161. {
  162. auto raw_request = m_request.to_raw_request();
  163. if constexpr (JOB_DEBUG) {
  164. dbgln("Job: raw_request:");
  165. dbgln("{}", String::copy(raw_request));
  166. }
  167. bool success = m_socket->write_or_error(raw_request);
  168. if (!success)
  169. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  170. register_on_ready_to_read([&] {
  171. dbgln_if(JOB_DEBUG, "Ready to read for {}, state = {}, cancelled = {}", m_request.url(), to_underlying(m_state), is_cancelled());
  172. if (is_cancelled())
  173. return;
  174. if (m_state == State::Finished) {
  175. // We have everything we want, at this point, we can either get an EOF, or a bunch of extra newlines
  176. // (unless "Connection: close" isn't specified)
  177. // So just ignore everything after this.
  178. return;
  179. }
  180. if (m_socket->is_eof()) {
  181. dbgln_if(JOB_DEBUG, "Read failure: Actually EOF!");
  182. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  183. }
  184. while (m_state == State::InStatus) {
  185. auto can_read_line = m_socket->can_read_line();
  186. if (can_read_line.is_error()) {
  187. dbgln_if(JOB_DEBUG, "Job {} could not figure out whether we could read a line", m_request.url());
  188. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  189. }
  190. if (!can_read_line.value()) {
  191. dbgln_if(JOB_DEBUG, "Job {} cannot read line", m_request.url());
  192. auto maybe_buf = receive(64);
  193. if (maybe_buf.is_error()) {
  194. dbgln_if(JOB_DEBUG, "Job {} cannot read any bytes!", m_request.url());
  195. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  196. }
  197. dbgln_if(JOB_DEBUG, "{} bytes was read", maybe_buf.value().bytes().size());
  198. return;
  199. }
  200. auto maybe_line = read_line(PAGE_SIZE);
  201. if (maybe_line.is_error()) {
  202. dbgln_if(JOB_DEBUG, "Job {} could not read line: {}", m_request.url(), maybe_line.error());
  203. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  204. }
  205. auto line = maybe_line.release_value();
  206. dbgln_if(JOB_DEBUG, "Job {} read line of length {}", m_request.url(), line.length());
  207. if (line.is_null()) {
  208. dbgln("Job: Expected HTTP status");
  209. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  210. }
  211. auto parts = line.split_view(' ');
  212. if (parts.size() < 3) {
  213. dbgln("Job: Expected 3-part HTTP status, got '{}'", line);
  214. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  215. }
  216. auto code = parts[1].to_uint();
  217. if (!code.has_value()) {
  218. dbgln("Job: Expected numeric HTTP status");
  219. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  220. }
  221. m_code = code.value();
  222. m_state = State::InHeaders;
  223. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  224. if (can_read_without_blocking.is_error())
  225. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  226. if (!can_read_without_blocking.value())
  227. return;
  228. }
  229. while (m_state == State::InHeaders || m_state == State::Trailers) {
  230. auto can_read_line = m_socket->can_read_line();
  231. if (can_read_line.is_error()) {
  232. dbgln_if(JOB_DEBUG, "Job {} could not figure out whether we could read a line", m_request.url());
  233. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  234. }
  235. if (!can_read_line.value()) {
  236. dbgln_if(JOB_DEBUG, "Can't read lines anymore :(");
  237. return;
  238. }
  239. // There's no max limit defined on headers, but for our sanity, let's limit it to 32K.
  240. auto maybe_line = read_line(32 * KiB);
  241. if (maybe_line.is_error()) {
  242. dbgln_if(JOB_DEBUG, "Job {} could not read a header line: {}", m_request.url(), maybe_line.error());
  243. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  244. }
  245. auto line = maybe_line.release_value();
  246. if (line.is_null()) {
  247. if (m_state == State::Trailers) {
  248. // Some servers like to send two ending chunks
  249. // use this fact as an excuse to ignore anything after the last chunk
  250. // that is not a valid trailing header.
  251. return finish_up();
  252. }
  253. dbgln("Job: Expected HTTP header");
  254. return did_fail(Core::NetworkJob::Error::ProtocolFailed);
  255. }
  256. if (line.is_empty()) {
  257. if (m_state == State::Trailers) {
  258. return finish_up();
  259. }
  260. if (on_headers_received) {
  261. if (!m_set_cookie_headers.is_empty())
  262. m_headers.set("Set-Cookie", JsonArray { m_set_cookie_headers }.to_string());
  263. on_headers_received(m_headers, m_code > 0 ? m_code : Optional<u32> {});
  264. }
  265. m_state = State::InBody;
  266. // We've reached the end of the headers, there's a possibility that the server
  267. // responds with nothing (content-length = 0 with normal encoding); if that's the case,
  268. // quit early as we won't be reading anything anyway.
  269. if (auto result = m_headers.get("Content-Length"sv).value_or(""sv).to_uint(); result.has_value()) {
  270. if (result.value() == 0 && !m_headers.get("Transfer-Encoding"sv).value_or(""sv).view().trim_whitespace().equals_ignoring_case("chunked"sv))
  271. return finish_up();
  272. }
  273. // There's also the possibility that the server responds with 204 (No Content),
  274. // and manages to set a Content-Length anyway, in such cases ignore Content-Length and quit early;
  275. // As the HTTP spec explicitly prohibits presence of Content-Length when the response code is 204.
  276. if (m_code == 204)
  277. return finish_up();
  278. break;
  279. }
  280. auto parts = line.split_view(':');
  281. if (parts.is_empty()) {
  282. if (m_state == State::Trailers) {
  283. // Some servers like to send two ending chunks
  284. // use this fact as an excuse to ignore anything after the last chunk
  285. // that is not a valid trailing header.
  286. return finish_up();
  287. }
  288. dbgln("Job: Expected HTTP header with key/value");
  289. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  290. }
  291. auto name = parts[0];
  292. if (line.length() < name.length() + 2) {
  293. if (m_state == State::Trailers) {
  294. // Some servers like to send two ending chunks
  295. // use this fact as an excuse to ignore anything after the last chunk
  296. // that is not a valid trailing header.
  297. return finish_up();
  298. }
  299. dbgln("Job: Malformed HTTP header: '{}' ({})", line, line.length());
  300. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  301. }
  302. auto value = line.substring(name.length() + 2, line.length() - name.length() - 2);
  303. if (name.equals_ignoring_case("Set-Cookie")) {
  304. dbgln_if(JOB_DEBUG, "Job: Received Set-Cookie header: '{}'", value);
  305. m_set_cookie_headers.append(move(value));
  306. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  307. if (can_read_without_blocking.is_error())
  308. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  309. if (!can_read_without_blocking.value())
  310. return;
  311. } else if (auto existing_value = m_headers.get(name); existing_value.has_value()) {
  312. StringBuilder builder;
  313. builder.append(existing_value.value());
  314. builder.append(',');
  315. builder.append(value);
  316. m_headers.set(name, builder.build());
  317. } else {
  318. m_headers.set(name, value);
  319. }
  320. if (name.equals_ignoring_case("Content-Encoding")) {
  321. // Assume that any content-encoding means that we can't decode it as a stream :(
  322. dbgln_if(JOB_DEBUG, "Content-Encoding {} detected, cannot stream output :(", value);
  323. m_can_stream_response = false;
  324. } else if (name.equals_ignoring_case("Content-Length")) {
  325. auto length = value.to_uint();
  326. if (length.has_value())
  327. m_content_length = length.value();
  328. }
  329. dbgln_if(JOB_DEBUG, "Job: [{}] = '{}'", name, value);
  330. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  331. if (can_read_without_blocking.is_error())
  332. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  333. if (!can_read_without_blocking.value()) {
  334. dbgln_if(JOB_DEBUG, "Can't read headers anymore, byebye :(");
  335. return;
  336. }
  337. }
  338. VERIFY(m_state == State::InBody);
  339. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  340. if (can_read_without_blocking.is_error())
  341. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  342. if (!can_read_without_blocking.value())
  343. return;
  344. while (true) {
  345. auto can_read_without_blocking = m_socket->can_read_without_blocking();
  346. if (can_read_without_blocking.is_error())
  347. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  348. if (!can_read_without_blocking.value())
  349. break;
  350. auto read_size = 64 * KiB;
  351. if (m_current_chunk_remaining_size.has_value()) {
  352. read_chunk_size:;
  353. auto remaining = m_current_chunk_remaining_size.value();
  354. if (remaining == -1) {
  355. // read size
  356. auto maybe_size_data = read_line(PAGE_SIZE);
  357. if (maybe_size_data.is_error()) {
  358. dbgln_if(JOB_DEBUG, "Job: Could not receive chunk: {}", maybe_size_data.error());
  359. }
  360. auto size_data = maybe_size_data.release_value();
  361. if (m_should_read_chunk_ending_line) {
  362. VERIFY(size_data.is_empty());
  363. m_should_read_chunk_ending_line = false;
  364. continue;
  365. }
  366. auto size_lines = size_data.view().lines();
  367. dbgln_if(JOB_DEBUG, "Job: Received a chunk with size '{}'", size_data);
  368. if (size_lines.size() == 0) {
  369. if (!m_socket->is_eof())
  370. break;
  371. dbgln("Job: Reached end of stream");
  372. finish_up();
  373. break;
  374. } else {
  375. auto chunk = size_lines[0].split_view(';', true);
  376. String size_string = chunk[0];
  377. char* endptr;
  378. auto size = strtoul(size_string.characters(), &endptr, 16);
  379. if (*endptr) {
  380. // invalid number
  381. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  382. break;
  383. }
  384. if (size == 0) {
  385. // This is the last chunk
  386. // '0' *[; chunk-ext-name = chunk-ext-value]
  387. // We're going to ignore _all_ chunk extensions
  388. read_size = 0;
  389. m_current_chunk_total_size = 0;
  390. m_current_chunk_remaining_size = 0;
  391. dbgln_if(JOB_DEBUG, "Job: Received the last chunk with extensions '{}'", size_string.substring_view(1, size_string.length() - 1));
  392. } else {
  393. m_current_chunk_total_size = size;
  394. m_current_chunk_remaining_size = size;
  395. read_size = size;
  396. dbgln_if(JOB_DEBUG, "Job: Chunk of size '{}' started", size);
  397. }
  398. }
  399. } else {
  400. read_size = remaining;
  401. dbgln_if(JOB_DEBUG, "Job: Resuming chunk with '{}' bytes left over", remaining);
  402. }
  403. } else {
  404. auto transfer_encoding = m_headers.get("Transfer-Encoding");
  405. if (transfer_encoding.has_value()) {
  406. // HTTP/1.1 3.3.3.3:
  407. // If a message is received with both a Transfer-Encoding and a Content-Length header field, the Transfer-Encoding overrides the Content-Length. [...]
  408. // https://httpwg.org/specs/rfc7230.html#message.body.length
  409. m_content_length = {};
  410. // Note: Some servers add extra spaces around 'chunked', see #6302.
  411. auto encoding = transfer_encoding.value().trim_whitespace();
  412. dbgln_if(JOB_DEBUG, "Job: This content has transfer encoding '{}'", encoding);
  413. if (encoding.equals_ignoring_case("chunked")) {
  414. m_current_chunk_remaining_size = -1;
  415. goto read_chunk_size;
  416. } else {
  417. dbgln("Job: Unknown transfer encoding '{}', the result will likely be wrong!", encoding);
  418. }
  419. }
  420. }
  421. can_read_without_blocking = m_socket->can_read_without_blocking();
  422. if (can_read_without_blocking.is_error())
  423. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  424. if (!can_read_without_blocking.value())
  425. break;
  426. dbgln_if(JOB_DEBUG, "Waiting for payload for {}", m_request.url());
  427. auto maybe_payload = receive(read_size);
  428. if (maybe_payload.is_error()) {
  429. dbgln_if(JOB_DEBUG, "Could not read the payload: {}", maybe_payload.error());
  430. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  431. }
  432. auto payload = maybe_payload.release_value();
  433. if (payload.is_empty() && m_socket->is_eof()) {
  434. finish_up();
  435. break;
  436. }
  437. bool read_everything = false;
  438. if (m_content_length.has_value()) {
  439. auto length = m_content_length.value();
  440. if (m_received_size + payload.size() >= length) {
  441. payload.resize(length - m_received_size);
  442. read_everything = true;
  443. }
  444. }
  445. m_received_buffers.append(make<ReceivedBuffer>(payload));
  446. m_buffered_size += payload.size();
  447. m_received_size += payload.size();
  448. flush_received_buffers();
  449. deferred_invoke([this] { did_progress(m_content_length, m_received_size); });
  450. if (read_everything) {
  451. VERIFY(m_received_size <= m_content_length.value());
  452. finish_up();
  453. break;
  454. }
  455. if (m_current_chunk_remaining_size.has_value()) {
  456. auto size = m_current_chunk_remaining_size.value() - payload.size();
  457. dbgln_if(JOB_DEBUG, "Job: We have {} bytes left over in this chunk", size);
  458. if (size == 0) {
  459. dbgln_if(JOB_DEBUG, "Job: Finished a chunk of {} bytes", m_current_chunk_total_size.value());
  460. if (m_current_chunk_total_size.value() == 0) {
  461. m_state = State::Trailers;
  462. break;
  463. }
  464. // we've read everything, now let's get the next chunk
  465. size = -1;
  466. auto can_read_line = m_socket->can_read_line();
  467. if (can_read_line.is_error())
  468. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  469. if (can_read_line.value()) {
  470. auto maybe_line = read_line(PAGE_SIZE);
  471. if (maybe_line.is_error()) {
  472. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  473. }
  474. VERIFY(maybe_line.value().is_empty());
  475. } else {
  476. m_should_read_chunk_ending_line = true;
  477. }
  478. }
  479. m_current_chunk_remaining_size = size;
  480. }
  481. }
  482. if (!m_socket->is_open()) {
  483. dbgln_if(JOB_DEBUG, "Connection appears to have closed, finishing up");
  484. finish_up();
  485. }
  486. });
  487. }
  488. void Job::timer_event(Core::TimerEvent& event)
  489. {
  490. event.accept();
  491. finish_up();
  492. if (m_buffered_size == 0)
  493. stop_timer();
  494. }
  495. void Job::finish_up()
  496. {
  497. VERIFY(!m_has_scheduled_finish);
  498. m_state = State::Finished;
  499. if (!m_can_stream_response) {
  500. auto maybe_flattened_buffer = ByteBuffer::create_uninitialized(m_buffered_size);
  501. if (maybe_flattened_buffer.is_error())
  502. return did_fail(Core::NetworkJob::Error::TransmissionFailed);
  503. auto flattened_buffer = maybe_flattened_buffer.release_value();
  504. u8* flat_ptr = flattened_buffer.data();
  505. for (auto& received_buffer : m_received_buffers) {
  506. memcpy(flat_ptr, received_buffer.pending_flush.data(), received_buffer.pending_flush.size());
  507. flat_ptr += received_buffer.pending_flush.size();
  508. }
  509. m_received_buffers.clear();
  510. // For the time being, we cannot stream stuff with content-encoding set to _anything_.
  511. // FIXME: LibCompress exposes a streaming interface, so this can be resolved
  512. auto content_encoding = m_headers.get("Content-Encoding");
  513. if (content_encoding.has_value()) {
  514. if (auto result = handle_content_encoding(flattened_buffer, content_encoding.value()); result.has_value())
  515. flattened_buffer = result.release_value();
  516. else
  517. return did_fail(Core::NetworkJob::Error::TransmissionFailed);
  518. }
  519. m_buffered_size = flattened_buffer.size();
  520. m_received_buffers.append(make<ReceivedBuffer>(move(flattened_buffer)));
  521. m_can_stream_response = true;
  522. }
  523. flush_received_buffers();
  524. if (m_buffered_size != 0) {
  525. // We have to wait for the client to consume all the downloaded data
  526. // before we can actually call `did_finish`. in a normal flow, this should
  527. // never be hit since the client is reading as we are writing, unless there
  528. // are too many concurrent downloads going on.
  529. dbgln_if(JOB_DEBUG, "Flush finished with {} bytes remaining, will try again later", m_buffered_size);
  530. if (!has_timer())
  531. start_timer(50);
  532. return;
  533. }
  534. m_has_scheduled_finish = true;
  535. auto response = HttpResponse::create(m_code, move(m_headers), m_received_size);
  536. deferred_invoke([this, response = move(response)] {
  537. // If the server responded with "Connection: close", close the connection
  538. // as the server may or may not want to close the socket.
  539. if (auto result = response->headers().get("Connection"sv); result.has_value() && result.value().equals_ignoring_case("close"sv))
  540. shutdown(ShutdownMode::CloseSocket);
  541. did_finish(response);
  542. });
  543. }
  544. }