This option allows you to set the minimum payload size required for
compression to be applied.
This helps save CPU on both server and client sides when the payload does
not need to be compressed.
---
 doc/configuration.txt              |   7 ++
 include/haproxy/global-t.h         |   1 +
 reg-tests/compression/min_size.vtc | 131 +++++++++++++++++++++++++++++
 src/cfgparse-global.c              |  12 +++
 src/flt_http_comp.c                |  40 +++++++++
 src/haproxy.c                      |   1 +
 6 files changed, 192 insertions(+)
 create mode 100644 reg-tests/compression/min_size.vtc

diff --git a/doc/configuration.txt b/doc/configuration.txt
index 7f2aa0fbd..bb0a2cffc 100644
--- a/doc/configuration.txt
+++ b/doc/configuration.txt
@@ -1637,6 +1637,7 @@ The following keywords are supported in the "global" 
section :
    - tune.bufsize
    - tune.bufsize.small
    - tune.comp.maxlevel
+   - tune.comp.minsize
    - tune.disable-fast-forward
    - tune.disable-zero-copy-forwarding
    - tune.epoll.mask-events
@@ -3531,6 +3532,12 @@ tune.comp.maxlevel <number>
   Each stream using compression initializes the compression algorithm with
   this value. The default value is 1.
 
+tune.comp.minsize <size>
+  Sets the minimum payload size in bytes for compression to be applied.
+  Payloads smaller than this size will not be compressed, avoiding unnecessary
+  CPU overhead for data that would not significantly benefit from compression.
+  The default value is 0.
+
 tune.disable-fast-forward [ EXPERIMENTAL ]
   Disables the data fast-forwarding. It is a mechanism to optimize the data
   forwarding by passing data directly from a side to the other one without
diff --git a/include/haproxy/global-t.h b/include/haproxy/global-t.h
index 508ef846a..91cb236f4 100644
--- a/include/haproxy/global-t.h
+++ b/include/haproxy/global-t.h
@@ -186,6 +186,7 @@ struct global {
                int pattern_cache; /* max number of entries in the pattern 
cache. */
                int sslcachesize;  /* SSL cache size in session, defaults to 
20000 */
                int comp_maxlevel;    /* max HTTP compression level */
+               int comp_minsize;     /* min size of HTTP body required to 
enable compression */
                int pool_low_ratio;   /* max ratio of FDs used before we stop 
using new idle connections */
                int pool_high_ratio;  /* max ratio of FDs used before we start 
killing idle connections when creating new connections */
                int pool_low_count;   /* max number of opened fd before we stop 
using new idle connections */
diff --git a/reg-tests/compression/min_size.vtc 
b/reg-tests/compression/min_size.vtc
new file mode 100644
index 000000000..02cf710a3
--- /dev/null
+++ b/reg-tests/compression/min_size.vtc
@@ -0,0 +1,131 @@
+varnishtest "Compression ignores small payloads"
+
+#REQUIRE_OPTION=ZLIB|SLZ
+
+feature ignore_unknown_macro
+
+server s1 {
+        rxreq
+        expect req.url == "/response-lower"
+        expect req.http.accept-encoding == "gzip"
+        txresp \
+          -hdr "Content-Type: text/plain" \
+          -hdr "ETag: \"123\"" \
+          -bodylen 50
+
+        rxreq
+        expect req.url == "/response-equal"
+        expect req.http.accept-encoding == "gzip"
+        txresp \
+          -hdr "Content-Type: text/plain" \
+          -hdr "ETag: \"123\"" \
+          -bodylen 1024
+
+        rxreq
+        expect req.url == "/response-greater"
+        expect req.http.accept-encoding == "gzip"
+        txresp \
+          -hdr "Content-Type: text/plain" \
+          -hdr "ETag: \"123\"" \
+          -bodylen 2000
+
+        rxreq
+        expect req.url == "/request-lower"
+        expect req.http.content-encoding == "<undef>"
+        expect req.method == "POST"
+        expect resp.bodylen == 50
+        txresp
+
+        rxreq
+        expect req.url == "/request-equal"
+        expect req.http.content-encoding == "gzip"
+        expect req.method == "POST"
+        gunzip
+        expect resp.bodylen == 1024
+        txresp
+
+        rxreq
+        expect req.url == "/request-greater"
+        expect req.http.content-encoding == "gzip"
+        expect req.method == "POST"
+        gunzip
+        expect resp.bodylen == 2000
+        txresp
+} -start
+
+
+haproxy h1 -conf {
+    global
+        # WT: limit false-positives causing "HTTP header incomplete" due to
+        # idle server connections being randomly used and randomly expiring
+        # under us.
+        tune.idle-pool.shared off
+        tune.comp.minsize 1k
+
+    defaults
+        mode http
+        timeout connect "${HAPROXY_TEST_TIMEOUT-5s}"
+        timeout client  "${HAPROXY_TEST_TIMEOUT-5s}"
+        timeout server  "${HAPROXY_TEST_TIMEOUT-5s}"
+
+    frontend fe-gzip
+        bind "fd@${fe_gzip}"
+        default_backend be-gzip
+
+    backend be-gzip
+        compression direction both
+        compression algo-res gzip
+        compression algo-req gzip
+        compression type-res text/plain
+        compression type-req text/plain
+        server www ${s1_addr}:${s1_port}
+} -start
+
+client c1 -connect ${h1_fe_gzip_sock} {
+        txreq -url "/response-lower" \
+          -hdr "Accept-Encoding: gzip"
+        rxresp
+        expect resp.status == 200
+        expect resp.http.content-encoding == "<undef>"
+        expect resp.http.etag == "\"123\""
+        expect resp.bodylen == 50
+
+        txreq -url "/response-equal" \
+          -hdr "Accept-Encoding: gzip"
+        rxresp
+        expect resp.status == 200
+        expect resp.http.content-encoding == "gzip"
+        expect resp.http.etag == "W/\"123\""
+        gunzip
+        expect resp.bodylen == 1024
+
+        txreq -url "/response-greater" \
+          -hdr "Accept-Encoding: gzip"
+        rxresp
+        expect resp.status == 200
+        expect resp.http.content-encoding == "gzip"
+        expect resp.http.etag == "W/\"123\""
+        gunzip
+        expect resp.bodylen == 2000
+
+        txreq -method POST \
+          -url "/request-lower" \
+          -hdr "Content-Type: text/plain" \
+          -bodylen 50
+        rxresp
+        expect resp.status == 200
+
+        txreq -method POST \
+          -url "/request-equal" \
+          -hdr "Content-Type: text/plain" \
+          -bodylen 1024
+        rxresp
+        expect resp.status == 200
+
+        txreq -method POST \
+          -url "/request-greater" \
+          -hdr "Content-Type: text/plain" \
+          -bodylen 2000
+        rxresp
+        expect resp.status == 200
+} -run
diff --git a/src/cfgparse-global.c b/src/cfgparse-global.c
index 4064b2b9c..893adc646 100644
--- a/src/cfgparse-global.c
+++ b/src/cfgparse-global.c
@@ -1337,6 +1337,17 @@ static int cfg_parse_global_tune_opts(char **args, int 
section_type,
 
                return 0;
        }
+       else if (strcmp(args[0], "tune.comp.minsize") == 0) {
+               if (*(args[1]) == 0) {
+                       memprintf(err, "'%s' expects an integer argument.", 
args[0]);
+                       return -1;
+               }
+               res = parse_size_err(args[1], &global.tune.comp_minsize);
+               if (res != NULL)
+                       goto size_err;
+
+               return 0;
+       }
        else if (strcmp(args[0], "tune.pattern.cache-size") == 0) {
                if (*(args[1]) == 0) {
                        memprintf(err, "'%s' expects a positive numeric value", 
args[0]);
@@ -1715,6 +1726,7 @@ static struct cfg_kw_list cfg_kws = {ILH, {
        { CFG_GLOBAL, "tune.http.logurilen", cfg_parse_global_tune_opts },
        { CFG_GLOBAL, "tune.http.maxhdr", cfg_parse_global_tune_opts },
        { CFG_GLOBAL, "tune.comp.maxlevel", cfg_parse_global_tune_opts },
+       { CFG_GLOBAL, "tune.comp.minsize", cfg_parse_global_tune_opts },
        { CFG_GLOBAL, "tune.pattern.cache-size", cfg_parse_global_tune_opts },
        { CFG_GLOBAL, "tune.disable-fast-forward", 
cfg_parse_global_tune_forward_opts },
        { CFG_GLOBAL, "tune.disable-zero-copy-forwarding", 
cfg_parse_global_tune_forward_opts },
diff --git a/src/flt_http_comp.c b/src/flt_http_comp.c
index e601ff672..582757a14 100644
--- a/src/flt_http_comp.c
+++ b/src/flt_http_comp.c
@@ -137,6 +137,8 @@ comp_prepare_compress_request(struct comp_state *st, struct 
stream *s, struct ht
        struct http_txn *txn = s->txn;
        struct http_hdr_ctx ctx;
        struct comp_type *comp_type;
+       int32_t pos;
+       unsigned long long len = 0;
 
        ctx.blk = NULL;
        /* Already compressed, don't bother */
@@ -147,6 +149,24 @@ comp_prepare_compress_request(struct comp_state *st, 
struct stream *s, struct ht
                return;
        comp_type = NULL;
 
+       /* compress only if body size is >= than the min size */
+       if (global.tune.comp_minsize > 0) {
+               for (pos = htx_get_first(htx); pos != -1; pos = 
htx_get_next(htx, pos)) {
+                       struct htx_blk *blk = htx_get_blk(htx, pos);
+                       enum htx_blk_type type = htx_get_blk_type(blk);
+
+                       if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+                               break;
+                       if (type == HTX_BLK_DATA)
+                               len += htx_get_blksz(blk);
+               }
+               if (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
+                       len += htx->extra;
+               /* small requests should not be compressed */
+               if (len < global.tune.comp_minsize)
+                       goto fail;
+       }
+
        /*
         * We don't want to compress content-types not listed in the 
"compression type" directive if any. If no content-type was found but 
configuration
         * requires one, we don't compress either. Backend has the priority.
@@ -624,6 +644,8 @@ select_compression_response_header(struct comp_state *st, 
struct stream *s, stru
        struct http_txn *txn = s->txn;
        struct http_hdr_ctx ctx;
        struct comp_type *comp_type;
+       int32_t pos;
+       unsigned long long len = 0;
 
        /* no common compression algorithm was found in request header */
        if (st->comp_algo[COMP_DIR_RES] == NULL)
@@ -650,6 +672,24 @@ select_compression_response_header(struct comp_state *st, 
struct stream *s, stru
        if (!(msg->flags & HTTP_MSGF_XFER_LEN) || msg->flags & 
HTTP_MSGF_BODYLESS)
                goto fail;
 
+       /* compress only if body size is >= than the min size */
+       if (global.tune.comp_minsize > 0) {
+               for (pos = htx_get_first(htx); pos != -1; pos = 
htx_get_next(htx, pos)) {
+                       struct htx_blk *blk = htx_get_blk(htx, pos);
+                       enum htx_blk_type type = htx_get_blk_type(blk);
+
+                       if (type == HTX_BLK_TLR || type == HTX_BLK_EOT)
+                               break;
+                       if (type == HTX_BLK_DATA)
+                               len += htx_get_blksz(blk);
+               }
+               if (htx->extra != HTX_UNKOWN_PAYLOAD_LENGTH)
+                       len += htx->extra;
+               /* small responses should not be compressed */
+               if (len < global.tune.comp_minsize)
+                       goto fail;
+       }
+
        /* content is already compressed */
        ctx.blk = NULL;
        if (http_find_header(htx, ist("Content-Encoding"), &ctx, 1))
diff --git a/src/haproxy.c b/src/haproxy.c
index 31d20d55f..4e054a043 100644
--- a/src/haproxy.c
+++ b/src/haproxy.c
@@ -184,6 +184,7 @@ struct global global = {
                .sslcachesize = SSLCACHESIZE,
 #endif
                .comp_maxlevel = 1,
+               .comp_minsize = 0,
 #ifdef DEFAULT_IDLE_TIMER
                .idle_timer = DEFAULT_IDLE_TIMER,
 #else
-- 
2.39.5



Reply via email to