From 8b21b6cef318afb503cceaf791a06cdcdf56009c Mon Sep 17 00:00:00 2001 From: Lion Date: Mon, 15 Jul 2024 12:26:08 +0200 Subject: [PATCH 1/3] add comments to DeComp() magic numbers --- src/Common.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/Common.cpp b/src/Common.cpp index 52aea27..ada2dd6 100644 --- a/src/Common.cpp +++ b/src/Common.cpp @@ -390,6 +390,9 @@ void SplitString(const std::string& str, const char delim, std::vector DeComp(std::span input) { beammp_debugf("got {} bytes of input data", input.size()); + // start with a decompression buffer of 5x the input size, clamped to a maximum of 15 MB. + // this buffer can and will grow, but we don't want to start it too large. A 5x compression ratio + // is pretty optimistic. std::vector output_buffer(std::min(input.size() * 5, 15 * 1024 * 1024)); uLongf output_size = output_buffer.size(); @@ -401,6 +404,9 @@ std::vector DeComp(std::span input) { reinterpret_cast(input.data()), static_cast(input.size())); if (res == Z_BUF_ERROR) { + // We assume that a reasonable maximum size for decompressed packets is 30 MB. + // If this were to be an issue, this could be made configurable, however clients have a similar + // limit. For that reason, we just reject packets which decompress into too much data. if (output_buffer.size() > 30 * 1024 * 1024) { throw std::runtime_error("decompressed packet size of 30 MB exceeded"); } From 0950d367d4d92ef584285cec0022f0804dfb2ac2 Mon Sep 17 00:00:00 2001 From: Lion Date: Mon, 15 Jul 2024 12:31:09 +0200 Subject: [PATCH 2/3] refactor decompression limits and resizing --- src/Common.cpp | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/src/Common.cpp b/src/Common.cpp index ada2dd6..7441b05 100644 --- a/src/Common.cpp +++ b/src/Common.cpp @@ -387,13 +387,16 @@ void SplitString(const std::string& str, const char delim, std::vector DeComp(std::span input) { beammp_debugf("got {} bytes of input data", input.size()); // start with a decompression buffer of 5x the input size, clamped to a maximum of 15 MB. // this buffer can and will grow, but we don't want to start it too large. A 5x compression ratio // is pretty optimistic. - std::vector output_buffer(std::min(input.size() * 5, 15 * 1024 * 1024)); + std::vector output_buffer(std::min(input.size() * 5, STARTING_MAX_DECOMPRESSION_BUFFER_SIZE)); uLongf output_size = output_buffer.size(); @@ -404,14 +407,17 @@ std::vector DeComp(std::span input) { reinterpret_cast(input.data()), static_cast(input.size())); if (res == Z_BUF_ERROR) { - // We assume that a reasonable maximum size for decompressed packets is 30 MB. - // If this were to be an issue, this could be made configurable, however clients have a similar + // We assume that a reasonable maximum size for decompressed packets exists. We want to avoid + // a client effectively "zip bombing" us by sending a lot of small packets which decompress + // into huge data. + // If this limit were to be an issue, this could be made configurable, however clients have a similar // limit. For that reason, we just reject packets which decompress into too much data. - if (output_buffer.size() > 30 * 1024 * 1024) { - throw std::runtime_error("decompressed packet size of 30 MB exceeded"); + if (output_buffer.size() > MAX_DECOMPRESSION_BUFFER_SIZE) { + throw std::runtime_error(fmt::format("decompressed packet size of {} bytes exceeded", MAX_DECOMPRESSION_BUFFER_SIZE)); } - beammp_warn("zlib uncompress() failed, trying with 2x buffer size of " + std::to_string(output_buffer.size() * 2)); - output_buffer.resize(output_buffer.size() * 2); + // if decompression fails, we double the buffer size (up to the allowed limit) and try again + output_buffer.resize(std::max(output_buffer.size() * 2, MAX_DECOMPRESSION_BUFFER_SIZE)); + beammp_warnf("zlib uncompress() failed, trying with a larger buffer size of {}", output_buffer.size()); output_size = output_buffer.size(); } else if (res != Z_OK) { beammp_error("zlib uncompress() failed: " + std::to_string(res)); From baa2c86e25f93bcd22ff1f1fc4e5e6e21b261f52 Mon Sep 17 00:00:00 2001 From: Lion Date: Mon, 15 Jul 2024 12:35:32 +0200 Subject: [PATCH 3/3] fix typo in DeComp buffer size logic --- src/Common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Common.cpp b/src/Common.cpp index 7441b05..597011d 100644 --- a/src/Common.cpp +++ b/src/Common.cpp @@ -412,7 +412,7 @@ std::vector DeComp(std::span input) { // into huge data. // If this limit were to be an issue, this could be made configurable, however clients have a similar // limit. For that reason, we just reject packets which decompress into too much data. - if (output_buffer.size() > MAX_DECOMPRESSION_BUFFER_SIZE) { + if (output_buffer.size() >= MAX_DECOMPRESSION_BUFFER_SIZE) { throw std::runtime_error(fmt::format("decompressed packet size of {} bytes exceeded", MAX_DECOMPRESSION_BUFFER_SIZE)); } // if decompression fails, we double the buffer size (up to the allowed limit) and try again