Merge branch 'fix-little-issues' into minor

This commit is contained in:
Lion Kortlepel 2024-07-28 10:54:55 +02:00
commit f3627ce0bf
No known key found for this signature in database
GPG Key ID: 4322FF2B4C71259B

View File

@ -384,10 +384,16 @@ void SplitString(const std::string& str, const char delim, std::vector<std::stri
out.push_back(str.substr(start, end - start)); out.push_back(str.substr(start, end - start));
} }
} }
static constexpr size_t STARTING_MAX_DECOMPRESSION_BUFFER_SIZE = 15 * 1024 * 1024;
static constexpr size_t MAX_DECOMPRESSION_BUFFER_SIZE = 30 * 1024 * 1024;
std::vector<uint8_t> DeComp(std::span<const uint8_t> input) { std::vector<uint8_t> DeComp(std::span<const uint8_t> input) {
beammp_debugf("got {} bytes of input data", input.size()); beammp_debugf("got {} bytes of input data", input.size());
std::vector<uint8_t> output_buffer(std::min<size_t>(input.size() * 5, 15 * 1024 * 1024)); // start with a decompression buffer of 5x the input size, clamped to a maximum of 15 MB.
// this buffer can and will grow, but we don't want to start it too large. A 5x compression ratio
// is pretty optimistic.
std::vector<uint8_t> output_buffer(std::min<size_t>(input.size() * 5, STARTING_MAX_DECOMPRESSION_BUFFER_SIZE));
uLongf output_size = output_buffer.size(); uLongf output_size = output_buffer.size();
@ -398,11 +404,17 @@ std::vector<uint8_t> DeComp(std::span<const uint8_t> input) {
reinterpret_cast<const Bytef*>(input.data()), reinterpret_cast<const Bytef*>(input.data()),
static_cast<uLongf>(input.size())); static_cast<uLongf>(input.size()));
if (res == Z_BUF_ERROR) { if (res == Z_BUF_ERROR) {
if (output_buffer.size() > 30 * 1024 * 1024) { // We assume that a reasonable maximum size for decompressed packets exists. We want to avoid
throw std::runtime_error("decompressed packet size of 30 MB exceeded"); // a client effectively "zip bombing" us by sending a lot of small packets which decompress
// into huge data.
// If this limit were to be an issue, this could be made configurable, however clients have a similar
// limit. For that reason, we just reject packets which decompress into too much data.
if (output_buffer.size() >= MAX_DECOMPRESSION_BUFFER_SIZE) {
throw std::runtime_error(fmt::format("decompressed packet size of {} bytes exceeded", MAX_DECOMPRESSION_BUFFER_SIZE));
} }
beammp_warn("zlib uncompress() failed, trying with 2x buffer size of " + std::to_string(output_buffer.size() * 2)); // if decompression fails, we double the buffer size (up to the allowed limit) and try again
output_buffer.resize(output_buffer.size() * 2); output_buffer.resize(std::max<size_t>(output_buffer.size() * 2, MAX_DECOMPRESSION_BUFFER_SIZE));
beammp_warnf("zlib uncompress() failed, trying with a larger buffer size of {}", output_buffer.size());
output_size = output_buffer.size(); output_size = output_buffer.size();
} else if (res != Z_OK) { } else if (res != Z_OK) {
beammp_error("zlib uncompress() failed: " + std::to_string(res)); beammp_error("zlib uncompress() failed: " + std::to_string(res));