Job.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. /*
  2. * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
  3. *
  4. * SPDX-License-Identifier: BSD-2-Clause
  5. */
  6. #include <AK/Debug.h>
  7. #include <LibCompress/Gzip.h>
  8. #include <LibCompress/Zlib.h>
  9. #include <LibCore/Event.h>
  10. #include <LibCore/TCPSocket.h>
  11. #include <LibHTTP/HttpResponse.h>
  12. #include <LibHTTP/Job.h>
  13. #include <stdio.h>
  14. #include <unistd.h>
  15. namespace HTTP {
  16. static ByteBuffer handle_content_encoding(const ByteBuffer& buf, const String& content_encoding)
  17. {
  18. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf has content_encoding={}", content_encoding);
  19. if (content_encoding == "gzip") {
  20. if (!Compress::GzipDecompressor::is_likely_compressed(buf)) {
  21. dbgln("Job::handle_content_encoding: buf is not gzip compressed!");
  22. }
  23. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is gzip compressed!");
  24. auto uncompressed = Compress::GzipDecompressor::decompress_all(buf);
  25. if (!uncompressed.has_value()) {
  26. dbgln("Job::handle_content_encoding: Gzip::decompress() failed. Returning original buffer.");
  27. return buf;
  28. }
  29. if constexpr (JOB_DEBUG) {
  30. dbgln("Job::handle_content_encoding: Gzip::decompress() successful.");
  31. dbgln(" Input size: {}", buf.size());
  32. dbgln(" Output size: {}", uncompressed.value().size());
  33. }
  34. return uncompressed.value();
  35. } else if (content_encoding == "deflate") {
  36. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: buf is deflate compressed!");
  37. // Even though the content encoding is "deflate", it's actually deflate with the zlib wrapper.
  38. // https://tools.ietf.org/html/rfc7230#section-4.2.2
  39. auto uncompressed = Compress::Zlib::decompress_all(buf);
  40. if (!uncompressed.has_value()) {
  41. // From the RFC:
  42. // "Note: Some non-conformant implementations send the "deflate"
  43. // compressed data without the zlib wrapper."
  44. dbgln_if(JOB_DEBUG, "Job::handle_content_encoding: Zlib::decompress_all() failed. Trying DeflateDecompressor::decompress_all()");
  45. uncompressed = Compress::DeflateDecompressor::decompress_all(buf);
  46. if (!uncompressed.has_value()) {
  47. dbgln("Job::handle_content_encoding: DeflateDecompressor::decompress_all() failed, returning original buffer.");
  48. return buf;
  49. }
  50. }
  51. if constexpr (JOB_DEBUG) {
  52. dbgln("Job::handle_content_encoding: Deflate decompression successful.");
  53. dbgln(" Input size: {}", buf.size());
  54. dbgln(" Output size: {}", uncompressed.value().size());
  55. }
  56. return uncompressed.value();
  57. }
  58. return buf;
  59. }
  60. Job::Job(const HttpRequest& request, OutputStream& output_stream)
  61. : Core::NetworkJob(output_stream)
  62. , m_request(request)
  63. {
  64. }
  65. Job::~Job()
  66. {
  67. }
  68. void Job::flush_received_buffers()
  69. {
  70. if (!m_can_stream_response || m_buffered_size == 0)
  71. return;
  72. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers: have {} bytes in {} buffers", m_buffered_size, m_received_buffers.size());
  73. for (size_t i = 0; i < m_received_buffers.size(); ++i) {
  74. auto& payload = m_received_buffers[i];
  75. auto written = do_write(payload);
  76. m_buffered_size -= written;
  77. if (written == payload.size()) {
  78. // FIXME: Make this a take-first-friendly object?
  79. m_received_buffers.take_first();
  80. --i;
  81. continue;
  82. }
  83. VERIFY(written < payload.size());
  84. payload = payload.slice(written, payload.size() - written);
  85. break;
  86. }
  87. dbgln_if(JOB_DEBUG, "Job: Flushing received buffers done: have {} bytes in {} buffers", m_buffered_size, m_received_buffers.size());
  88. }
  89. void Job::on_socket_connected()
  90. {
  91. register_on_ready_to_write([&] {
  92. if (m_sent_data)
  93. return;
  94. m_sent_data = true;
  95. auto raw_request = m_request.to_raw_request();
  96. if constexpr (JOB_DEBUG) {
  97. dbgln("Job: raw_request:");
  98. dbgln("{}", String::copy(raw_request));
  99. }
  100. bool success = write(raw_request);
  101. if (!success)
  102. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  103. });
  104. register_on_ready_to_read([&] {
  105. if (is_cancelled())
  106. return;
  107. if (m_state == State::Finished) {
  108. // We have everything we want, at this point, we can either get an EOF, or a bunch of extra newlines
  109. // (unless "Connection: close" isn't specified)
  110. // So just ignore everything after this.
  111. return;
  112. }
  113. if (m_state == State::InStatus) {
  114. if (!can_read_line())
  115. return;
  116. auto line = read_line(PAGE_SIZE);
  117. if (line.is_null()) {
  118. dbgln("Job: Expected HTTP status");
  119. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  120. }
  121. auto parts = line.split_view(' ');
  122. if (parts.size() < 3) {
  123. dbgln("Job: Expected 3-part HTTP status, got '{}'", line);
  124. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  125. }
  126. auto code = parts[1].to_uint();
  127. if (!code.has_value()) {
  128. dbgln("Job: Expected numeric HTTP status");
  129. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  130. }
  131. m_code = code.value();
  132. m_state = State::InHeaders;
  133. return;
  134. }
  135. if (m_state == State::InHeaders || m_state == State::Trailers) {
  136. if (!can_read_line())
  137. return;
  138. auto line = read_line(PAGE_SIZE);
  139. if (line.is_null()) {
  140. if (m_state == State::Trailers) {
  141. // Some servers like to send two ending chunks
  142. // use this fact as an excuse to ignore anything after the last chunk
  143. // that is not a valid trailing header.
  144. return finish_up();
  145. }
  146. dbgln("Job: Expected HTTP header");
  147. return did_fail(Core::NetworkJob::Error::ProtocolFailed);
  148. }
  149. if (line.is_empty()) {
  150. if (m_state == State::Trailers) {
  151. return finish_up();
  152. } else {
  153. if (on_headers_received)
  154. on_headers_received(m_headers, m_code > 0 ? m_code : Optional<u32> {});
  155. m_state = State::InBody;
  156. }
  157. return;
  158. }
  159. auto parts = line.split_view(':');
  160. if (parts.is_empty()) {
  161. if (m_state == State::Trailers) {
  162. // Some servers like to send two ending chunks
  163. // use this fact as an excuse to ignore anything after the last chunk
  164. // that is not a valid trailing header.
  165. return finish_up();
  166. }
  167. dbgln("Job: Expected HTTP header with key/value");
  168. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  169. }
  170. auto name = parts[0];
  171. if (line.length() < name.length() + 2) {
  172. if (m_state == State::Trailers) {
  173. // Some servers like to send two ending chunks
  174. // use this fact as an excuse to ignore anything after the last chunk
  175. // that is not a valid trailing header.
  176. return finish_up();
  177. }
  178. dbgln("Job: Malformed HTTP header: '{}' ({})", line, line.length());
  179. return deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  180. }
  181. auto value = line.substring(name.length() + 2, line.length() - name.length() - 2);
  182. m_headers.set(name, value);
  183. if (name.equals_ignoring_case("Content-Encoding")) {
  184. // Assume that any content-encoding means that we can't decode it as a stream :(
  185. dbgln_if(JOB_DEBUG, "Content-Encoding {} detected, cannot stream output :(", value);
  186. m_can_stream_response = false;
  187. }
  188. dbgln_if(JOB_DEBUG, "Job: [{}] = '{}'", name, value);
  189. return;
  190. }
  191. VERIFY(m_state == State::InBody);
  192. VERIFY(can_read());
  193. read_while_data_available([&] {
  194. auto read_size = 64 * KiB;
  195. if (m_current_chunk_remaining_size.has_value()) {
  196. read_chunk_size:;
  197. auto remaining = m_current_chunk_remaining_size.value();
  198. if (remaining == -1) {
  199. // read size
  200. auto size_data = read_line(PAGE_SIZE);
  201. if (m_should_read_chunk_ending_line) {
  202. VERIFY(size_data.is_empty());
  203. m_should_read_chunk_ending_line = false;
  204. return IterationDecision::Continue;
  205. }
  206. auto size_lines = size_data.view().lines();
  207. dbgln_if(JOB_DEBUG, "Job: Received a chunk with size '{}'", size_data);
  208. if (size_lines.size() == 0) {
  209. dbgln("Job: Reached end of stream");
  210. finish_up();
  211. return IterationDecision::Break;
  212. } else {
  213. auto chunk = size_lines[0].split_view(';', true);
  214. String size_string = chunk[0];
  215. char* endptr;
  216. auto size = strtoul(size_string.characters(), &endptr, 16);
  217. if (*endptr) {
  218. // invalid number
  219. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::TransmissionFailed); });
  220. return IterationDecision::Break;
  221. }
  222. if (size == 0) {
  223. // This is the last chunk
  224. // '0' *[; chunk-ext-name = chunk-ext-value]
  225. // We're going to ignore _all_ chunk extensions
  226. read_size = 0;
  227. m_current_chunk_total_size = 0;
  228. m_current_chunk_remaining_size = 0;
  229. dbgln_if(JOB_DEBUG, "Job: Received the last chunk with extensions '{}'", size_string.substring_view(1, size_string.length() - 1));
  230. } else {
  231. m_current_chunk_total_size = size;
  232. m_current_chunk_remaining_size = size;
  233. read_size = size;
  234. dbgln_if(JOB_DEBUG, "Job: Chunk of size '{}' started", size);
  235. }
  236. }
  237. } else {
  238. read_size = remaining;
  239. dbgln_if(JOB_DEBUG, "Job: Resuming chunk with '{}' bytes left over", remaining);
  240. }
  241. } else {
  242. auto transfer_encoding = m_headers.get("Transfer-Encoding");
  243. if (transfer_encoding.has_value()) {
  244. // Note: Some servers add extra spaces around 'chunked', see #6302.
  245. auto encoding = transfer_encoding.value().trim_whitespace();
  246. dbgln_if(JOB_DEBUG, "Job: This content has transfer encoding '{}'", encoding);
  247. if (encoding.equals_ignoring_case("chunked")) {
  248. m_current_chunk_remaining_size = -1;
  249. goto read_chunk_size;
  250. } else {
  251. dbgln("Job: Unknown transfer encoding '{}', the result will likely be wrong!", encoding);
  252. }
  253. }
  254. }
  255. auto payload = receive(read_size);
  256. if (payload.is_empty()) {
  257. if (eof()) {
  258. finish_up();
  259. return IterationDecision::Break;
  260. }
  261. if (should_fail_on_empty_payload()) {
  262. deferred_invoke([this] { did_fail(Core::NetworkJob::Error::ProtocolFailed); });
  263. return IterationDecision::Break;
  264. }
  265. }
  266. m_received_buffers.append(payload);
  267. m_buffered_size += payload.size();
  268. m_received_size += payload.size();
  269. flush_received_buffers();
  270. if (m_current_chunk_remaining_size.has_value()) {
  271. auto size = m_current_chunk_remaining_size.value() - payload.size();
  272. dbgln_if(JOB_DEBUG, "Job: We have {} bytes left over in this chunk", size);
  273. if (size == 0) {
  274. dbgln_if(JOB_DEBUG, "Job: Finished a chunk of {} bytes", m_current_chunk_total_size.value());
  275. if (m_current_chunk_total_size.value() == 0) {
  276. m_state = State::Trailers;
  277. return IterationDecision::Break;
  278. }
  279. // we've read everything, now let's get the next chunk
  280. size = -1;
  281. if (can_read_line()) {
  282. auto line = read_line(PAGE_SIZE);
  283. VERIFY(line.is_empty());
  284. } else {
  285. m_should_read_chunk_ending_line = true;
  286. }
  287. }
  288. m_current_chunk_remaining_size = size;
  289. }
  290. auto content_length_header = m_headers.get("Content-Length");
  291. Optional<u32> content_length {};
  292. if (content_length_header.has_value()) {
  293. auto length = content_length_header.value().to_uint();
  294. if (length.has_value())
  295. content_length = length.value();
  296. }
  297. deferred_invoke([this, content_length] { did_progress(content_length, m_received_size); });
  298. if (content_length.has_value()) {
  299. auto length = content_length.value();
  300. if (m_received_size >= length) {
  301. m_received_size = length;
  302. finish_up();
  303. return IterationDecision::Break;
  304. }
  305. }
  306. return IterationDecision::Continue;
  307. });
  308. if (!is_established()) {
  309. dbgln_if(JOB_DEBUG, "Connection appears to have closed, finishing up");
  310. finish_up();
  311. }
  312. });
  313. }
  314. void Job::timer_event(Core::TimerEvent& event)
  315. {
  316. event.accept();
  317. finish_up();
  318. if (m_buffered_size == 0)
  319. stop_timer();
  320. }
  321. void Job::finish_up()
  322. {
  323. VERIFY(!m_has_scheduled_finish);
  324. m_state = State::Finished;
  325. if (!m_can_stream_response) {
  326. auto flattened_buffer = ByteBuffer::create_uninitialized(m_received_size).release_value(); // FIXME: Handle possible OOM situation.
  327. u8* flat_ptr = flattened_buffer.data();
  328. for (auto& received_buffer : m_received_buffers) {
  329. memcpy(flat_ptr, received_buffer.data(), received_buffer.size());
  330. flat_ptr += received_buffer.size();
  331. }
  332. m_received_buffers.clear();
  333. // For the time being, we cannot stream stuff with content-encoding set to _anything_.
  334. // FIXME: LibCompress exposes a streaming interface, so this can be resolved
  335. auto content_encoding = m_headers.get("Content-Encoding");
  336. if (content_encoding.has_value()) {
  337. flattened_buffer = handle_content_encoding(flattened_buffer, content_encoding.value());
  338. }
  339. m_buffered_size = flattened_buffer.size();
  340. m_received_buffers.append(move(flattened_buffer));
  341. m_can_stream_response = true;
  342. }
  343. flush_received_buffers();
  344. if (m_buffered_size != 0) {
  345. // We have to wait for the client to consume all the downloaded data
  346. // before we can actually call `did_finish`. in a normal flow, this should
  347. // never be hit since the client is reading as we are writing, unless there
  348. // are too many concurrent downloads going on.
  349. dbgln_if(JOB_DEBUG, "Flush finished with {} bytes remaining, will try again later", m_buffered_size);
  350. if (!has_timer())
  351. start_timer(50);
  352. return;
  353. }
  354. m_has_scheduled_finish = true;
  355. auto response = HttpResponse::create(m_code, move(m_headers));
  356. deferred_invoke([this, response = move(response)] {
  357. did_finish(response);
  358. });
  359. }
  360. }