File

spec/net_http_parser_spec.lua @ 13801:a5d5fefb8b68 13.0

mod_tls: Enable Prosody's certificate checking for incoming s2s connections (fixes #1916) (thanks Damian, Zash) Various options in Prosody allow control over the behaviour of the certificate verification process For example, some deployments choose to allow falling back to traditional "dialback" authentication (XEP-0220), while others verify via DANE, hard-coded fingerprints, or other custom plugins. Implementing this flexibility requires us to override OpenSSL's default certificate verification, to allow Prosody to verify the certificate itself, apply custom policies and make decisions based on the outcome. To enable our custom logic, we have to suppress OpenSSL's default behaviour of aborting the connection with a TLS alert message. With LuaSec, this can be achieved by using the verifyext "lsec_continue" flag. We also need to use the lsec_ignore_purpose flag, because XMPP s2s uses server certificates as "client" certificates (for mutual TLS verification in outgoing s2s connections). Commit 99d2100d2918 moved these settings out of the defaults and into mod_s2s, because we only really need these changes for s2s, and they should be opt-in, rather than automatically applied to all TLS services we offer. That commit was incomplete, because it only added the flags for incoming direct TLS connections. StartTLS connections are handled by mod_tls, which was not applying the lsec_* flags. It previously worked because they were already in the defaults. This resulted in incoming s2s connections with "invalid" certificates being aborted early by OpenSSL, even if settings such as `s2s_secure_auth = false` or DANE were present in the config. Outgoing s2s connections inherit verify "none" from the defaults, which means OpenSSL will receive the cert but will not terminate the connection when it is deemed invalid. This means we don't need lsec_continue there, and we also don't need lsec_ignore_purpose (because the remote peer is a "server"). Wondering why we can't just use verify "none" for incoming s2s? It's because in that mode, OpenSSL won't request a certificate from the peer for incoming connections. Setting verify "peer" is how you ask OpenSSL to request a certificate from the client, but also what triggers its built-in verification.
author Matthew Wild <mwild1@gmail.com>
date Tue, 01 Apr 2025 17:26:56 +0100
parent 13378:db30ffbf2090
line wrap: on
line source

local http_parser = require "net.http.parser";
local sha1 = require "util.hashes".sha1;

local parser_input_bytes = 3;

local function CRLF(s)
	return (s:gsub("\n", "\r\n"));
end

local function test_stream(stream, expect)
	local chunks_processed = 0;
	local success_cb = spy.new(function (packet)
		assert.is_table(packet);
		if packet.body ~= false then
			assert.is_equal(expect.body, packet.body);
		end
		if expect.chunks then
			if chunks_processed == 0 then
				assert.is_true(packet.partial);
				packet.body_sink = {
					write = function (_, data)
						chunks_processed = chunks_processed + 1;
						assert.equal(expect.chunks[chunks_processed], data);
						return true;
					end;
				};
			end
		end
	end);

	local function options_cb()
		return {
			-- Force streaming API mode
			body_size_limit = expect.chunks and 0 or nil;
			buffer_size_limit = 10*1024*2;
		};
	end

	local parser = http_parser.new(success_cb, error, (stream[1] or stream):sub(1,4) == "HTTP" and "client" or "server", options_cb)
	if type(stream) == "string" then
		for chunk in stream:gmatch("."..string.rep(".?", parser_input_bytes-1)) do
			parser:feed(chunk);
		end
	else
		for _, chunk in ipairs(stream) do
			parser:feed(chunk);
		end
	end

	if expect.chunks then
		assert.equal(chunks_processed, #expect.chunks);
	end
	assert.spy(success_cb).was_called(expect.count or 1);
end


describe("net.http.parser", function()
	describe("parser", function()
		it("should handle requests with no content-length or body", function ()
			test_stream(
CRLF[[
GET / HTTP/1.1
Host: example.com

]],
				{
					body = "";
				}
			);
		end);

		it("should handle responses with empty body", function ()
			test_stream(
CRLF[[
HTTP/1.1 200 OK
Content-Length: 0

]],
				{
					body = "";
				}
			);
		end);

		it("should handle simple responses", function ()
			test_stream(

CRLF[[
HTTP/1.1 200 OK
Content-Length: 7

Hello
]],
				{
					body = "Hello\r\n", count = 1;
				}
			);
		end);

		it("should handle chunked encoding in responses", function ()
			test_stream(

CRLF[[
HTTP/1.1 200 OK
Transfer-Encoding: chunked

1
H
1
e
2
ll
1
o
0


]],
				{
					body = "Hello", count = 3;
				}
			);
		end);

		it("should handle a stream of responses", function ()
			test_stream(

CRLF[[
HTTP/1.1 200 OK
Content-Length: 5

Hello
HTTP/1.1 200 OK
Transfer-Encoding: chunked

1
H
1
e
2
ll
1
o
0


]],
				{
					body = "Hello", count = 4;
				}
			);
		end);

		it("should correctly find chunk boundaries", function ()
			test_stream({

CRLF[[
HTTP/1.1 200 OK
Transfer-Encoding: chunked

]].."3\r\n:)\n\r\n"},
				{
					count = 1; -- Once (partial)
					chunks = {
						":)\n"
					};
				}
			);
		end);

		it("should reject very large request heads", function()
			local finished = false;
			local success_cb = spy.new(function()
				finished = true;
			end)
			local error_cb = spy.new(function()
				finished = true;
			end)
			local parser = http_parser.new(success_cb, error_cb, "server", function()
				return { head_size_limit = 1024; body_size_limit = 1024; buffer_size_limit = 2048 };
			end)
			parser:feed("GET / HTTP/1.1\r\n");
			for i = 1, 64 do -- * header line > buffer_size_limit
				parser:feed(string.format("Header-%04d: Yet-AnotherValue\r\n", i));
				if finished then
					-- should hit an error around half-way
					break
				end
			end
			if not finished then
				parser:feed("\r\n")
			end
			assert.spy(success_cb).was_called(0);
			assert.spy(error_cb).was_called(1);
			assert.spy(error_cb).was_called_with("header-too-large");
		end)
	end);

	it("should handle large chunked responses", function ()
		local data = io.open("spec/inputs/http/httpstream-chunked-test.txt", "rb"):read("*a");

		-- Just a sanity check... text editors and things may mess with line endings, etc.
		assert.equal("25930f021785ae14053a322c2dbc1897c3769720", sha1(data, true), "test data malformed");

		test_stream(data, {
			body = string.rep("~", 11085), count = 3;
		});
	end);
end);