Job.cpp 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Debug.h>
  7. #include <AK/JsonArray.h>
  8. #include <LibCompress/Gzip.h>
  9. #include <LibCompress/Zlib.h>
  10. #include <LibCore/Event.h>
  11. #include <LibCore/TCPSocket.h>
  12. #include <LibHTTP/HttpResponse.h>
  13. #include <LibHTTP/Job.h>
  14. #include <stdio.h>
  15. #include <unistd.h>
  16. namespace HTTP {
  17. static Optional<ByteBuffer> handle_content_encoding(const ByteBuffer& buf, const String& content_encoding)
  18. {
  19. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf has content_encoding={}", content_encoding);
  20. if (content_encoding == "gzip") {
  21. if (!Compress::GzipDecompressor::is_likely_compressed(buf)) {
  22. dbgln("Job::handle_content_encoding: buf is not gzip compressed!");
  23. }
  24. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is gzip compressed!");
  25. auto uncompressed = Compress::GzipDecompressor::decompress_all(buf);
  26. if (!uncompressed.has_value()) {
  27. dbgln("Job::handle_content_encoding: Gzip::decompress() failed.");
  28. return {};
  29. }
  30. if constexpr (JOB_DEBUG) {
  31. dbgln("Job::handle_content_encoding: Gzip::decompress() successful.");
  32. dbgln(" Input size: {}", buf.size());
  33. dbgln(" Output size: {}", uncompressed.value().size());
  34. }
  35. return uncompressed.release_value();
  36. } else if (content_encoding == "deflate") {
  37. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is deflate compressed!");
  38. // Even though the content encoding is "deflate", it's actually deflate with the zlib wrapper.
  39. // https://tools.ietf.org/html/rfc7230#section-4.2.2
  40. auto uncompressed = Compress::Zlib::decompress_all(buf);
  41. if (!uncompressed.has_value()) {
  42. // From the RFC:
  43. // "Note: Some non-conformant implementations send the "deflate"
  44. // compressed data without the zlib wrapper."
  45. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: Zlib::decompress_all() failed. Trying DeflateDecompressor::decompress_all()");
  46. uncompressed = Compress::DeflateDecompressor::decompress_all(buf);
  47. if (!uncompressed.has_value()) {
  48. dbgln("Job::handle_content_encoding: DeflateDecompressor::decompress_all() failed.");
  49. return {};
  50. }
  51. }
  52. if constexpr (JOB_DEBUG) {
  53. dbgln("Job::handle_content_encoding: Deflate decompression successful.");
  54. dbgln(" Input size: {}", buf.size());
  55. dbgln(" Output size: {}", uncompressed.value().size());
  56. }
  57. return uncompressed.release_value();
  58. }
  59. return buf;
  60. }
  61. Job::Job(HttpRequest&& request, OutputStream& output_stream)
  62. : Core::NetworkJob(output_stream)
  63. , m_request(move(request))
  64. {
  65. }
  66. Job::~Job()
  67. {
  68. }
  69. void Job::flush_received_buffers()
  70. {
  71. if (!m_can_stream_response || m_buffered_size == 0)
  72. return;
  73. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers: have {} bytes in {} buffers for {}", m_buffered_size, m_received_buffers.size(), m_request.url());
  74. for (size_t i = 0; i < m_received_buffers.size(); ++i) {
  75. auto& payload = m_received_buffers[i];
  76. auto written = do_write(payload);
  77. m_buffered_size -= written;
  78. if (written == payload.size()) {
  79. // FIXME: Make this a take-first-friendly object?
  80. m_received_buffers.take_first();
  81. --i;
  82. continue;
  83. }
  84. VERIFY(written < payload.size());
  85. payload = payload.slice(written, payload.size() - written);
  86. break;
  87. }
  88. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers done: have {} bytes in {} buffers for {}", m_buffered_size, m_received_buffers.size(), m_request.url());
  89. }
  90. void Job::on_socket_connected()
  91. {
  92. register_on_ready_to_write([&] {
  93. if (m_sent_data)
  94. return;
  95. m_sent_data = true;
  96. auto raw_request = m_request.to_raw_request();
  97. if constexpr (JOB_DEBUG) {
  98. dbgln("Job: raw_request:");
  99. dbgln("{}", String::copy(raw_request));
  100. }
  101. bool success = write(raw_request);
  102. if (!success)
  103. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  104. });
  105. register_on_ready_to_read([&] {
  106. dbgln_if(JOB_DEBUG, "Ready to read for {}, state = {}, cancelled = {}", m_request.url(), to_underlying(m_state), is_cancelled());
  107. if (is_cancelled())
  108. return;
  109. if (m_state == State::Finished) {
  110. // We have everything we want, at this point, we can either get an EOF, or a bunch of extra newlines
  111. // (unless "Connection: close" isn't specified)
  112. // So just ignore everything after this.
  113. return;
  114. }
  115. if (eof())
  116. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  117. if (m_state == State::InStatus) {
  118. if (!can_read_line()) {
  119. dbgln_if(JOB_DEBUG, "Job {} cannot read line", m_request.url());
  120. return;
  121. }
  122. auto line = read_line(PAGE_SIZE);
  123. dbgln_if(JOB_DEBUG, "Job {} read line of length {}", m_request.url(), line.length());
  124. if (line.is_null()) {
  125. dbgln("Job: Expected HTTP status");
  126. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  127. }
  128. auto parts = line.split_view(' ');
  129. if (parts.size() < 3) {
  130. dbgln("Job: Expected 3-part HTTP status, got '{}'", line);
  131. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  132. }
  133. auto code = parts[1].to_uint();
  134. if (!code.has_value()) {
  135. dbgln("Job: Expected numeric HTTP status");
  136. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  137. }
  138. m_code = code.value();
  139. m_state = State::InHeaders;
  140. return;
  141. }
  142. if (m_state == State::InHeaders || m_state == State::Trailers) {
  143. if (!can_read_line())
  144. return;
  145. // There's no max limit defined on headers, but for our sanity, let's limit it to 32K.
  146. auto line = read_line(32 * KiB);
  147. if (line.is_null()) {
  148. if (m_state == State::Trailers) {
  149. // Some servers like to send two ending chunks
  150. // use this fact as an excuse to ignore anything after the last chunk
  151. // that is not a valid trailing header.
  152. return finish_up();
  153. }
  154. dbgln("Job: Expected HTTP header");
  155. return did_fail(Core::NetworkJob::Error::ProtocolFailed);
  156. }
  157. if (line.is_empty()) {
  158. if (m_state == State::Trailers) {
  159. return finish_up();
  160. } else {
  161. if (on_headers_received) {
  162. if (!m_set_cookie_headers.is_empty())
  163. m_headers.set("Set-Cookie", JsonArray { m_set_cookie_headers }.to_string());
  164. on_headers_received(m_headers, m_code > 0 ? m_code : Optional<u32> {});
  165. }
  166. m_state = State::InBody;
  167. }
  168. // We've reached the end of the headers, there's a possibility that the server
  169. // responds with nothing (content-length = 0 with normal encoding); if that's the case,
  170. // quit early as we won't be reading anything anyway.
  171. if (auto result = m_headers.get("Content-Length"sv).value_or(""sv).to_uint(); result.has_value()) {
  172. if (result.value() == 0 && !m_headers.get("Transfer-Encoding"sv).value_or(""sv).view().trim_whitespace().equals_ignoring_case("chunked"sv))
  173. return finish_up();
  174. }
  175. return;
  176. }
  177. auto parts = line.split_view(':');
  178. if (parts.is_empty()) {
  179. if (m_state == State::Trailers) {
  180. // Some servers like to send two ending chunks
  181. // use this fact as an excuse to ignore anything after the last chunk
  182. // that is not a valid trailing header.
  183. return finish_up();
  184. }
  185. dbgln("Job: Expected HTTP header with key/value");
  186. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  187. }
  188. auto name = parts[0];
  189. if (line.length() < name.length() + 2) {
  190. if (m_state == State::Trailers) {
  191. // Some servers like to send two ending chunks
  192. // use this fact as an excuse to ignore anything after the last chunk
  193. // that is not a valid trailing header.
  194. return finish_up();
  195. }
  196. dbgln("Job: Malformed HTTP header: '{}' ({})", line, line.length());
  197. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  198. }
  199. auto value = line.substring(name.length() + 2, line.length() - name.length() - 2);
  200. if (name.equals_ignoring_case("Set-Cookie")) {
  201. dbgln_if(JOB_DEBUG, "Job: Received Set-Cookie header: '{}'", value);
  202. m_set_cookie_headers.append(move(value));
  203. return;
  204. }
  205. if (auto existing_value = m_headers.get(name); existing_value.has_value()) {
  206. StringBuilder builder;
  207. builder.append(existing_value.value());
  208. builder.append(',');
  209. builder.append(value);
  210. m_headers.set(name, builder.build());
  211. } else {
  212. m_headers.set(name, value);
  213. }
  214. if (name.equals_ignoring_case("Content-Encoding")) {
  215. // Assume that any content-encoding means that we can't decode it as a stream :(
  216. dbgln_if(JOB_DEBUG, "Content-Encoding {} detected, cannot stream output :(", value);
  217. m_can_stream_response = false;
  218. } else if (name.equals_ignoring_case("Content-Length")) {
  219. auto length = value.to_uint();
  220. if (length.has_value())
  221. m_content_length = length.value();
  222. }
  223. dbgln_if(JOB_DEBUG, "Job: [{}] = '{}'", name, value);
  224. return;
  225. }
  226. VERIFY(m_state == State::InBody);
  227. VERIFY(can_read());
  228. read_while_data_available([&] {
  229. auto read_size = 64 * KiB;
  230. if (m_current_chunk_remaining_size.has_value()) {
  231. read_chunk_size:;
  232. auto remaining = m_current_chunk_remaining_size.value();
  233. if (remaining == -1) {
  234. // read size
  235. auto size_data = read_line(PAGE_SIZE);
  236. if (m_should_read_chunk_ending_line) {
  237. VERIFY(size_data.is_empty());
  238. m_should_read_chunk_ending_line = false;
  239. return IterationDecision::Continue;
  240. }
  241. auto size_lines = size_data.view().lines();
  242. dbgln_if(JOB_DEBUG, "Job: Received a chunk with size '{}'", size_data);
  243. if (size_lines.size() == 0) {
  244. if (!eof())
  245. return AK::IterationDecision::Break;
  246. dbgln("Job: Reached end of stream");
  247. finish_up();
  248. return IterationDecision::Break;
  249. } else {
  250. auto chunk = size_lines[0].split_view(';', true);
  251. String size_string = chunk[0];
  252. char* endptr;
  253. auto size = strtoul(size_string.characters(), &endptr, 16);
  254. if (*endptr) {
  255. // invalid number
  256. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  257. return IterationDecision::Break;
  258. }
  259. if (size == 0) {
  260. // This is the last chunk
  261. // '0' *[; chunk-ext-name = chunk-ext-value]
  262. // We're going to ignore _all_ chunk extensions
  263. read_size = 0;
  264. m_current_chunk_total_size = 0;
  265. m_current_chunk_remaining_size = 0;
  266. dbgln_if(JOB_DEBUG, "Job: Received the last chunk with extensions '{}'", size_string.substring_view(1, size_string.length() - 1));
  267. } else {
  268. m_current_chunk_total_size = size;
  269. m_current_chunk_remaining_size = size;
  270. read_size = size;
  271. dbgln_if(JOB_DEBUG, "Job: Chunk of size '{}' started", size);
  272. }
  273. }
  274. } else {
  275. read_size = remaining;
  276. dbgln_if(JOB_DEBUG, "Job: Resuming chunk with '{}' bytes left over", remaining);
  277. }
  278. } else {
  279. auto transfer_encoding = m_headers.get("Transfer-Encoding");
  280. if (transfer_encoding.has_value()) {
  281. // HTTP/1.1 3.3.3.3:
  282. // If a message is received with both a Transfer-Encoding and a Content-Length header field, the Transfer-Encoding overrides the Content-Length. [...]
  283. // https://httpwg.org/specs/rfc7230.html#message.body.length
  284. m_content_length = {};
  285. // Note: Some servers add extra spaces around 'chunked', see #6302.
  286. auto encoding = transfer_encoding.value().trim_whitespace();
  287. dbgln_if(JOB_DEBUG, "Job: This content has transfer encoding '{}'", encoding);
  288. if (encoding.equals_ignoring_case("chunked")) {
  289. m_current_chunk_remaining_size = -1;
  290. goto read_chunk_size;
  291. } else {
  292. dbgln("Job: Unknown transfer encoding '{}', the result will likely be wrong!", encoding);
  293. }
  294. }
  295. }
  296. dbgln_if(JOB_DEBUG, "Waiting for payload for {}", m_request.url());
  297. auto payload = receive(read_size);
  298. dbgln_if(JOB_DEBUG, "Received {} bytes of payload from {}", payload.size(), m_request.url());
  299. if (payload.is_empty()) {
  300. if (eof()) {
  301. finish_up();
  302. return IterationDecision::Break;
  303. }
  304. if (should_fail_on_empty_payload()) {
  305. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  306. return IterationDecision::Break;
  307. }
  308. }
  309. bool read_everything = false;
  310. if (m_content_length.has_value()) {
  311. auto length = m_content_length.value();
  312. if (m_received_size + payload.size() >= length) {
  313. payload.resize(length - m_received_size);
  314. read_everything = true;
  315. }
  316. }
  317. m_received_buffers.append(payload);
  318. m_buffered_size += payload.size();
  319. m_received_size += payload.size();
  320. flush_received_buffers();
  321. deferred_invoke([this] { did_progress(m_content_length, m_received_size); });
  322. if (read_everything) {
  323. VERIFY(m_received_size <= m_content_length.value());
  324. finish_up();
  325. return IterationDecision::Break;
  326. }
  327. if (m_current_chunk_remaining_size.has_value()) {
  328. auto size = m_current_chunk_remaining_size.value() - payload.size();
  329. dbgln_if(JOB_DEBUG, "Job: We have {} bytes left over in this chunk", size);
  330. if (size == 0) {
  331. dbgln_if(JOB_DEBUG, "Job: Finished a chunk of {} bytes", m_current_chunk_total_size.value());
  332. if (m_current_chunk_total_size.value() == 0) {
  333. m_state = State::Trailers;
  334. return IterationDecision::Break;
  335. }
  336. // we've read everything, now let's get the next chunk
  337. size = -1;
  338. if (can_read_line()) {
  339. auto line = read_line(PAGE_SIZE);
  340. VERIFY(line.is_empty());
  341. } else {
  342. m_should_read_chunk_ending_line = true;
  343. }
  344. }
  345. m_current_chunk_remaining_size = size;
  346. }
  347. return IterationDecision::Continue;
  348. });
  349. if (!is_established()) {
  350. dbgln_if(JOB_DEBUG, "Connection appears to have closed, finishing up");
  351. finish_up();
  352. }
  353. });
  354. }
  355. void Job::timer_event(Core::TimerEvent& event)
  356. {
  357. event.accept();
  358. finish_up();
  359. if (m_buffered_size == 0)
  360. stop_timer();
  361. }
  362. void Job::finish_up()
  363. {
  364. VERIFY(!m_has_scheduled_finish);
  365. m_state = State::Finished;
  366. if (!m_can_stream_response) {
  367. auto flattened_buffer = ByteBuffer::create_uninitialized(m_buffered_size).release_value(); // FIXME: Handle possible OOM situation.
  368. u8* flat_ptr = flattened_buffer.data();
  369. for (auto& received_buffer : m_received_buffers) {
  370. memcpy(flat_ptr, received_buffer.data(), received_buffer.size());
  371. flat_ptr += received_buffer.size();
  372. }
  373. m_received_buffers.clear();
  374. // For the time being, we cannot stream stuff with content-encoding set to _anything_.
  375. // FIXME: LibCompress exposes a streaming interface, so this can be resolved
  376. auto content_encoding = m_headers.get("Content-Encoding");
  377. if (content_encoding.has_value()) {
  378. if (auto result = handle_content_encoding(flattened_buffer, content_encoding.value()); result.has_value())
  379. flattened_buffer = result.release_value();
  380. else
  381. return did_fail(Core::NetworkJob::Error::TransmissionFailed);
  382. }
  383. m_buffered_size = flattened_buffer.size();
  384. m_received_buffers.append(move(flattened_buffer));
  385. m_can_stream_response = true;
  386. }
  387. flush_received_buffers();
  388. if (m_buffered_size != 0) {
  389. // We have to wait for the client to consume all the downloaded data
  390. // before we can actually call `did_finish`. in a normal flow, this should
  391. // never be hit since the client is reading as we are writing, unless there
  392. // are too many concurrent downloads going on.
  393. dbgln_if(JOB_DEBUG, "Flush finished with {} bytes remaining, will try again later", m_buffered_size);
  394. if (!has_timer())
  395. start_timer(50);
  396. return;
  397. }
  398. m_has_scheduled_finish = true;
  399. auto response = HttpResponse::create(m_code, move(m_headers));
  400. deferred_invoke([this, response = move(response)] {
  401. // If the server responded with "Connection: close", close the connection
  402. // as the server may or may not want to close the socket.
  403. if (auto result = response->headers().get("Connection"sv); result.has_value() && result.value().equals_ignoring_case("close"sv))
  404. shutdown(ShutdownMode::CloseSocket);
  405. did_finish(response);
  406. });
  407. }
  408. }