File

spec/net_http_parser_spec.lua @ 13652:a08065207ef0

net.server_epoll: Call :shutdown() on TLS sockets when supported Comment from Matthew: This fixes a potential issue where the Prosody process gets blocked on sockets waiting for them to close. Unlike non-TLS sockets, closing a TLS socket sends layer 7 data, and this can cause problems for sockets which are in the process of being cleaned up. This depends on LuaSec changes which are not yet upstream. From Martijn's original email: So first my analysis of luasec. in ssl.c the socket is put into blocking mode right before calling SSL_shutdown() inside meth_destroy(). My best guess to why this is is because meth_destroy is linked to the __close and __gc methods, which can't exactly be called multiple times and luasec does want to make sure that a tls session is shutdown as clean as possible. I can't say I disagree with this reasoning and don't want to change this behaviour. My solution to this without changing the current behaviour is to introduce a shutdown() method. I am aware that this overlaps in a conflicting way with tcp's shutdown method, but it stays close to the OpenSSL name. This method calls SSL_shutdown() in the current (non)blocking mode of the underlying socket and returns a boolean whether or not the shutdown is completed (matching SSL_shutdown()'s 0 or 1 return values), and returns the familiar ssl_ioerror() strings on error with a false for completion. This error can then be used to determine if we have wantread/wantwrite to finalize things. Once meth_shutdown() has been called once a shutdown flag will be set, which indicates to meth_destroy() that the SSL_shutdown() has been handled by the application and it shouldn't be needed to set the socket to blocking mode. I've left the SSL_shutdown() call in the LSEC_STATE_CONNECTED to prevent TOCTOU if the application reaches a timeout for the shutdown code, which might allow SSL_shutdown() to clean up anyway at the last possible moment. Another thing I've changed to luasec is the call to socket_setblocking() right before calling close(2) in socket_destroy() in usocket.c. According to the latest POSIX[0]: Note that the requirement for close() on a socket to block for up to the current linger interval is not conditional on the O_NONBLOCK setting. Which I read to mean that removing O_NONBLOCK on the socket before close doesn't impact the behaviour and only causes noise in system call tracers. I didn't touch the windows bits of this, since I don't do windows. For the prosody side of things I've made the TLS shutdown bits resemble interface:onwritable(), and put it under a combined guard of self._tls and self.conn.shutdown. The self._tls bit is there to prevent getting stuck on this condition, and self.conn.shutdown is there to prevent the code being called by instances where the patched luasec isn't deployed. The destroy() method can be called from various places and is read by me as the "we give up" error path. To accommodate for these unexpected entrypoints I've added a single call to self.conn:shutdown() to prevent the socket being put into blocking mode. I have no expectations that there is any other use here. Same as previous, the self.conn.shutdown check is there to make sure it's not called on unpatched luasec deployments and self._tls is there to make sure we don't call shutdown() on tcp sockets. I wouldn't recommend logging of the conn:shutdown() error inside close(), since a lot of clients simply close the connection before SSL_shutdown() is done.
author Martijn van Duren <martijn@openbsd.org>
date Thu, 06 Feb 2025 15:04:38 +0000
parent 13378:db30ffbf2090
line wrap: on
line source

local http_parser = require "net.http.parser";
local sha1 = require "util.hashes".sha1;

local parser_input_bytes = 3;

local function CRLF(s)
	return (s:gsub("\n", "\r\n"));
end

local function test_stream(stream, expect)
	local chunks_processed = 0;
	local success_cb = spy.new(function (packet)
		assert.is_table(packet);
		if packet.body ~= false then
			assert.is_equal(expect.body, packet.body);
		end
		if expect.chunks then
			if chunks_processed == 0 then
				assert.is_true(packet.partial);
				packet.body_sink = {
					write = function (_, data)
						chunks_processed = chunks_processed + 1;
						assert.equal(expect.chunks[chunks_processed], data);
						return true;
					end;
				};
			end
		end
	end);

	local function options_cb()
		return {
			-- Force streaming API mode
			body_size_limit = expect.chunks and 0 or nil;
			buffer_size_limit = 10*1024*2;
		};
	end

	local parser = http_parser.new(success_cb, error, (stream[1] or stream):sub(1,4) == "HTTP" and "client" or "server", options_cb)
	if type(stream) == "string" then
		for chunk in stream:gmatch("."..string.rep(".?", parser_input_bytes-1)) do
			parser:feed(chunk);
		end
	else
		for _, chunk in ipairs(stream) do
			parser:feed(chunk);
		end
	end

	if expect.chunks then
		assert.equal(chunks_processed, #expect.chunks);
	end
	assert.spy(success_cb).was_called(expect.count or 1);
end


describe("net.http.parser", function()
	describe("parser", function()
		it("should handle requests with no content-length or body", function ()
			test_stream(
CRLF[[
GET / HTTP/1.1
Host: example.com

]],
				{
					body = "";
				}
			);
		end);

		it("should handle responses with empty body", function ()
			test_stream(
CRLF[[
HTTP/1.1 200 OK
Content-Length: 0

]],
				{
					body = "";
				}
			);
		end);

		it("should handle simple responses", function ()
			test_stream(

CRLF[[
HTTP/1.1 200 OK
Content-Length: 7

Hello
]],
				{
					body = "Hello\r\n", count = 1;
				}
			);
		end);

		it("should handle chunked encoding in responses", function ()
			test_stream(

CRLF[[
HTTP/1.1 200 OK
Transfer-Encoding: chunked

1
H
1
e
2
ll
1
o
0


]],
				{
					body = "Hello", count = 3;
				}
			);
		end);

		it("should handle a stream of responses", function ()
			test_stream(

CRLF[[
HTTP/1.1 200 OK
Content-Length: 5

Hello
HTTP/1.1 200 OK
Transfer-Encoding: chunked

1
H
1
e
2
ll
1
o
0


]],
				{
					body = "Hello", count = 4;
				}
			);
		end);

		it("should correctly find chunk boundaries", function ()
			test_stream({

CRLF[[
HTTP/1.1 200 OK
Transfer-Encoding: chunked

]].."3\r\n:)\n\r\n"},
				{
					count = 1; -- Once (partial)
					chunks = {
						":)\n"
					};
				}
			);
		end);

		it("should reject very large request heads", function()
			local finished = false;
			local success_cb = spy.new(function()
				finished = true;
			end)
			local error_cb = spy.new(function()
				finished = true;
			end)
			local parser = http_parser.new(success_cb, error_cb, "server", function()
				return { head_size_limit = 1024; body_size_limit = 1024; buffer_size_limit = 2048 };
			end)
			parser:feed("GET / HTTP/1.1\r\n");
			for i = 1, 64 do -- * header line > buffer_size_limit
				parser:feed(string.format("Header-%04d: Yet-AnotherValue\r\n", i));
				if finished then
					-- should hit an error around half-way
					break
				end
			end
			if not finished then
				parser:feed("\r\n")
			end
			assert.spy(success_cb).was_called(0);
			assert.spy(error_cb).was_called(1);
			assert.spy(error_cb).was_called_with("header-too-large");
		end)
	end);

	it("should handle large chunked responses", function ()
		local data = io.open("spec/inputs/http/httpstream-chunked-test.txt", "rb"):read("*a");

		-- Just a sanity check... text editors and things may mess with line endings, etc.
		assert.equal("25930f021785ae14053a322c2dbc1897c3769720", sha1(data, true), "test data malformed");

		test_stream(data, {
			body = string.rep("~", 11085), count = 3;
		});
	end);
end);