Hi, > Here is a more detailed review.
Thanks for the feedback, I have incorporated the suggestions and updated a new version of the patch (v3-0001). The required documentation changes are also incorporated in updated patch (v3-0001). > Interesting approach. This unfortunately has the effect of making that > test case file look a bit incoherent -- the comment at the top of the > file isn't really accurate any more, for example, and the plain_format > flag does more than just cause us to use -Fp; it also causes us NOT to > use --target server:X. However, that might be something we can figure > out a way to clean up. Alternatively, we could have a new test case > file that is structured like 002_algorithm.pl but looping over > compression methods rather than checksum algorithms, and testing each > one with --server-compress and -Fp. It might be easier to make that > look nice (but I'm not 100% sure). Added a new test case file "009_extract.pl" to test server compressed plain format backup (v3-0002). > I committed the base backup target patch yesterday, and today I > updated the remaining code in light of Michael Paquier's commit > 5c649fe153367cdab278738ee4aebbfd158e0546. Here is the resulting patch. v13 patch does not apply on the latest head, it requires a rebase. I have applied it on commit dc43fc9b3aa3e0fa9c84faddad6d301813580f88 to validate gzip decompression patches. Thanks, Dipesh
From 9ec2efcc908e988409cd9ba19ea64a50012163a2 Mon Sep 17 00:00:00 2001 From: Dipesh Pandit <dipesh.pan...@enterprisedb.com> Date: Mon, 24 Jan 2022 15:28:48 +0530 Subject: [PATCH 1/2] Support for extracting gzip compressed archive pg_basebackup can support server side compression using gzip. In order to support plain format backup with option '-Fp' we need to add support for decompressing the compressed blocks at client. This patch addresses the extraction of gzip compressed blocks at client. --- doc/src/sgml/ref/pg_basebackup.sgml | 8 +- src/bin/pg_basebackup/Makefile | 1 + src/bin/pg_basebackup/bbstreamer.h | 1 + src/bin/pg_basebackup/bbstreamer_file.c | 182 ---------------- src/bin/pg_basebackup/bbstreamer_gzip.c | 376 ++++++++++++++++++++++++++++++++ src/bin/pg_basebackup/pg_basebackup.c | 19 +- 6 files changed, 401 insertions(+), 186 deletions(-) create mode 100644 src/bin/pg_basebackup/bbstreamer_gzip.c diff --git a/doc/src/sgml/ref/pg_basebackup.sgml b/doc/src/sgml/ref/pg_basebackup.sgml index 1d0df34..19849be 100644 --- a/doc/src/sgml/ref/pg_basebackup.sgml +++ b/doc/src/sgml/ref/pg_basebackup.sgml @@ -428,8 +428,12 @@ PostgreSQL documentation </para> <para> When the tar format is used, the suffix <filename>.gz</filename> will - automatically be added to all tar filenames. Compression is not - available in plain format. + automatically be added to all tar filenames. + </para> + <para> + Server compression can be specified with plain format backup. It + enables compression of the archive at server and extract the + compressed archive at client. </para> </listitem> </varlistentry> diff --git a/src/bin/pg_basebackup/Makefile b/src/bin/pg_basebackup/Makefile index 5b18851..78d96c6 100644 --- a/src/bin/pg_basebackup/Makefile +++ b/src/bin/pg_basebackup/Makefile @@ -38,6 +38,7 @@ OBJS = \ BBOBJS = \ pg_basebackup.o \ bbstreamer_file.o \ + bbstreamer_gzip.o \ bbstreamer_inject.o \ bbstreamer_tar.o diff --git a/src/bin/pg_basebackup/bbstreamer.h b/src/bin/pg_basebackup/bbstreamer.h index fc88b50..270b0df 100644 --- a/src/bin/pg_basebackup/bbstreamer.h +++ b/src/bin/pg_basebackup/bbstreamer.h @@ -205,6 +205,7 @@ extern bbstreamer *bbstreamer_extractor_new(const char *basepath, const char *(*link_map) (const char *), void (*report_output_file) (const char *)); +extern bbstreamer *bbstreamer_gzip_extractor_new(bbstreamer *next); extern bbstreamer *bbstreamer_tar_parser_new(bbstreamer *next); extern bbstreamer *bbstreamer_tar_terminator_new(bbstreamer *next); extern bbstreamer *bbstreamer_tar_archiver_new(bbstreamer *next); diff --git a/src/bin/pg_basebackup/bbstreamer_file.c b/src/bin/pg_basebackup/bbstreamer_file.c index 77ca222..d721f87 100644 --- a/src/bin/pg_basebackup/bbstreamer_file.c +++ b/src/bin/pg_basebackup/bbstreamer_file.c @@ -11,10 +11,6 @@ #include "postgres_fe.h" -#ifdef HAVE_LIBZ -#include <zlib.h> -#endif - #include <unistd.h> #include "bbstreamer.h" @@ -30,15 +26,6 @@ typedef struct bbstreamer_plain_writer bool should_close_file; } bbstreamer_plain_writer; -#ifdef HAVE_LIBZ -typedef struct bbstreamer_gzip_writer -{ - bbstreamer base; - char *pathname; - gzFile gzfile; -} bbstreamer_gzip_writer; -#endif - typedef struct bbstreamer_extractor { bbstreamer base; @@ -62,22 +49,6 @@ const bbstreamer_ops bbstreamer_plain_writer_ops = { .free = bbstreamer_plain_writer_free }; -#ifdef HAVE_LIBZ -static void bbstreamer_gzip_writer_content(bbstreamer *streamer, - bbstreamer_member *member, - const char *data, int len, - bbstreamer_archive_context context); -static void bbstreamer_gzip_writer_finalize(bbstreamer *streamer); -static void bbstreamer_gzip_writer_free(bbstreamer *streamer); -static const char *get_gz_error(gzFile gzf); - -const bbstreamer_ops bbstreamer_gzip_writer_ops = { - .content = bbstreamer_gzip_writer_content, - .finalize = bbstreamer_gzip_writer_finalize, - .free = bbstreamer_gzip_writer_free -}; -#endif - static void bbstreamer_extractor_content(bbstreamer *streamer, bbstreamer_member *member, const char *data, int len, @@ -196,159 +167,6 @@ bbstreamer_plain_writer_free(bbstreamer *streamer) } /* - * Create a bbstreamer that just compresses data using gzip, and then writes - * it to a file. - * - * As in the case of bbstreamer_plain_writer_new, pathname is always used - * for error reporting purposes; if file is NULL, it is also the opened and - * closed so that the data may be written there. - */ -bbstreamer * -bbstreamer_gzip_writer_new(char *pathname, FILE *file, int compresslevel) -{ -#ifdef HAVE_LIBZ - bbstreamer_gzip_writer *streamer; - - streamer = palloc0(sizeof(bbstreamer_gzip_writer)); - *((const bbstreamer_ops **) &streamer->base.bbs_ops) = - &bbstreamer_gzip_writer_ops; - - streamer->pathname = pstrdup(pathname); - - if (file == NULL) - { - streamer->gzfile = gzopen(pathname, "wb"); - if (streamer->gzfile == NULL) - { - pg_log_error("could not create compressed file \"%s\": %m", - pathname); - exit(1); - } - } - else - { - int fd = dup(fileno(file)); - - if (fd < 0) - { - pg_log_error("could not duplicate stdout: %m"); - exit(1); - } - - streamer->gzfile = gzdopen(fd, "wb"); - if (streamer->gzfile == NULL) - { - pg_log_error("could not open output file: %m"); - exit(1); - } - } - - if (gzsetparams(streamer->gzfile, compresslevel, - Z_DEFAULT_STRATEGY) != Z_OK) - { - pg_log_error("could not set compression level %d: %s", - compresslevel, get_gz_error(streamer->gzfile)); - exit(1); - } - - return &streamer->base; -#else - pg_log_error("this build does not support compression"); - exit(1); -#endif -} - -#ifdef HAVE_LIBZ -/* - * Write archive content to gzip file. - */ -static void -bbstreamer_gzip_writer_content(bbstreamer *streamer, - bbstreamer_member *member, const char *data, - int len, bbstreamer_archive_context context) -{ - bbstreamer_gzip_writer *mystreamer; - - mystreamer = (bbstreamer_gzip_writer *) streamer; - - if (len == 0) - return; - - errno = 0; - if (gzwrite(mystreamer->gzfile, data, len) != len) - { - /* if write didn't set errno, assume problem is no disk space */ - if (errno == 0) - errno = ENOSPC; - pg_log_error("could not write to compressed file \"%s\": %s", - mystreamer->pathname, get_gz_error(mystreamer->gzfile)); - exit(1); - } -} - -/* - * End-of-archive processing when writing to a gzip file consists of just - * calling gzclose. - * - * It makes no difference whether we opened the file or the caller did it, - * because libz provides no way of avoiding a close on the underling file - * handle. Notice, however, that bbstreamer_gzip_writer_new() uses dup() to - * work around this issue, so that the behavior from the caller's viewpoint - * is the same as for bbstreamer_plain_writer. - */ -static void -bbstreamer_gzip_writer_finalize(bbstreamer *streamer) -{ - bbstreamer_gzip_writer *mystreamer; - - mystreamer = (bbstreamer_gzip_writer *) streamer; - - errno = 0; /* in case gzclose() doesn't set it */ - if (gzclose(mystreamer->gzfile) != 0) - { - pg_log_error("could not close compressed file \"%s\": %m", - mystreamer->pathname); - exit(1); - } - - mystreamer->gzfile = NULL; -} - -/* - * Free memory associated with this bbstreamer. - */ -static void -bbstreamer_gzip_writer_free(bbstreamer *streamer) -{ - bbstreamer_gzip_writer *mystreamer; - - mystreamer = (bbstreamer_gzip_writer *) streamer; - - Assert(mystreamer->base.bbs_next == NULL); - Assert(mystreamer->gzfile == NULL); - - pfree(mystreamer->pathname); - pfree(mystreamer); -} - -/* - * Helper function for libz error reporting. - */ -static const char * -get_gz_error(gzFile gzf) -{ - int errnum; - const char *errmsg; - - errmsg = gzerror(gzf, &errnum); - if (errnum == Z_ERRNO) - return strerror(errno); - else - return errmsg; -} -#endif - -/* * Create a bbstreamer that extracts an archive. * * All pathnames in the archive are interpreted relative to basepath. diff --git a/src/bin/pg_basebackup/bbstreamer_gzip.c b/src/bin/pg_basebackup/bbstreamer_gzip.c new file mode 100644 index 0000000..1144090 --- /dev/null +++ b/src/bin/pg_basebackup/bbstreamer_gzip.c @@ -0,0 +1,376 @@ +/*------------------------------------------------------------------------- + * + * bbstreamer_gzip.c + * + * Portions Copyright (c) 1996-2022, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/bin/pg_basebackup/bbstreamer_gzip.c + *------------------------------------------------------------------------- + */ + +#include "postgres_fe.h" + +#ifdef HAVE_LIBZ +#include <zlib.h> +#endif + +#include "bbstreamer.h" +#include "common/logging.h" +#include "common/file_perm.h" +#include "common/string.h" + +#ifdef HAVE_LIBZ +typedef struct bbstreamer_gzip_writer +{ + bbstreamer base; + char *pathname; + gzFile gzfile; +} bbstreamer_gzip_writer; + +typedef struct bbstreamer_gzip_extractor +{ + bbstreamer base; + z_stream zstream; + size_t bytes_written; +} bbstreamer_gzip_extractor; + +static void bbstreamer_gzip_writer_content(bbstreamer *streamer, + bbstreamer_member *member, + const char *data, int len, + bbstreamer_archive_context context); +static void bbstreamer_gzip_writer_finalize(bbstreamer *streamer); +static void bbstreamer_gzip_writer_free(bbstreamer *streamer); +static const char *get_gz_error(gzFile gzf); + +const bbstreamer_ops bbstreamer_gzip_writer_ops = { + .content = bbstreamer_gzip_writer_content, + .finalize = bbstreamer_gzip_writer_finalize, + .free = bbstreamer_gzip_writer_free +}; + +static void bbstreamer_gzip_extractor_content(bbstreamer *streamer, + bbstreamer_member *member, + const char *data, int len, + bbstreamer_archive_context context); +static void bbstreamer_gzip_extractor_finalize(bbstreamer *streamer); +static void bbstreamer_gzip_extractor_free(bbstreamer *streamer); +static void *gzip_palloc(void *opaque, unsigned items, unsigned size); +static void gzip_pfree(void *opaque, void *address); + +const bbstreamer_ops bbstreamer_gzip_extractor_ops = { + .content = bbstreamer_gzip_extractor_content, + .finalize = bbstreamer_gzip_extractor_finalize, + .free = bbstreamer_gzip_extractor_free +}; +#endif + +/* + * Create a bbstreamer that just compresses data using gzip, and then writes + * it to a file. + * + * As in the case of bbstreamer_plain_writer_new, pathname is always used + * for error reporting purposes; if file is NULL, it is also the opened and + * closed so that the data may be written there. + */ +bbstreamer * +bbstreamer_gzip_writer_new(char *pathname, FILE *file, int compresslevel) +{ +#ifdef HAVE_LIBZ + bbstreamer_gzip_writer *streamer; + + streamer = palloc0(sizeof(bbstreamer_gzip_writer)); + *((const bbstreamer_ops **) &streamer->base.bbs_ops) = + &bbstreamer_gzip_writer_ops; + + streamer->pathname = pstrdup(pathname); + + if (file == NULL) + { + streamer->gzfile = gzopen(pathname, "wb"); + if (streamer->gzfile == NULL) + { + pg_log_error("could not create compressed file \"%s\": %m", + pathname); + exit(1); + } + } + else + { + int fd = dup(fileno(file)); + + if (fd < 0) + { + pg_log_error("could not duplicate stdout: %m"); + exit(1); + } + + streamer->gzfile = gzdopen(fd, "wb"); + if (streamer->gzfile == NULL) + { + pg_log_error("could not open output file: %m"); + exit(1); + } + } + + if (gzsetparams(streamer->gzfile, compresslevel, + Z_DEFAULT_STRATEGY) != Z_OK) + { + pg_log_error("could not set compression level %d: %s", + compresslevel, get_gz_error(streamer->gzfile)); + exit(1); + } + + return &streamer->base; +#else + pg_log_error("this build does not support compression"); + exit(1); +#endif +} + +#ifdef HAVE_LIBZ +/* + * Write archive content to gzip file. + */ +static void +bbstreamer_gzip_writer_content(bbstreamer *streamer, + bbstreamer_member *member, const char *data, + int len, bbstreamer_archive_context context) +{ + bbstreamer_gzip_writer *mystreamer; + + mystreamer = (bbstreamer_gzip_writer *) streamer; + + if (len == 0) + return; + + errno = 0; + if (gzwrite(mystreamer->gzfile, data, len) != len) + { + /* if write didn't set errno, assume problem is no disk space */ + if (errno == 0) + errno = ENOSPC; + pg_log_error("could not write to compressed file \"%s\": %s", + mystreamer->pathname, get_gz_error(mystreamer->gzfile)); + exit(1); + } +} + +/* + * End-of-archive processing when writing to a gzip file consists of just + * calling gzclose. + * + * It makes no difference whether we opened the file or the caller did it, + * because libz provides no way of avoiding a close on the underling file + * handle. Notice, however, that bbstreamer_gzip_writer_new() uses dup() to + * work around this issue, so that the behavior from the caller's viewpoint + * is the same as for bbstreamer_plain_writer. + */ +static void +bbstreamer_gzip_writer_finalize(bbstreamer *streamer) +{ + bbstreamer_gzip_writer *mystreamer; + + mystreamer = (bbstreamer_gzip_writer *) streamer; + + errno = 0; /* in case gzclose() doesn't set it */ + if (gzclose(mystreamer->gzfile) != 0) + { + pg_log_error("could not close compressed file \"%s\": %m", + mystreamer->pathname); + exit(1); + } + + mystreamer->gzfile = NULL; +} + +/* + * Free memory associated with this bbstreamer. + */ +static void +bbstreamer_gzip_writer_free(bbstreamer *streamer) +{ + bbstreamer_gzip_writer *mystreamer; + + mystreamer = (bbstreamer_gzip_writer *) streamer; + + Assert(mystreamer->base.bbs_next == NULL); + Assert(mystreamer->gzfile == NULL); + + pfree(mystreamer->pathname); + pfree(mystreamer); +} + +/* + * Helper function for libz error reporting. + */ +static const char * +get_gz_error(gzFile gzf) +{ + int errnum; + const char *errmsg; + + errmsg = gzerror(gzf, &errnum); + if (errnum == Z_ERRNO) + return strerror(errno); + else + return errmsg; +} +#endif + +/* + * Create a new base backup streamer that performs decompression of gzip + * compressed blocks. + */ +bbstreamer * +bbstreamer_gzip_extractor_new(bbstreamer *next) +{ +#ifdef HAVE_LIBZ + bbstreamer_gzip_extractor *streamer; + z_stream *zs; + + Assert(next != NULL); + + streamer = palloc0(sizeof(bbstreamer_gzip_extractor)); + *((const bbstreamer_ops **) &streamer->base.bbs_ops) = + &bbstreamer_gzip_extractor_ops; + + streamer->base.bbs_next = next; + initStringInfo(&streamer->base.bbs_buffer); + + /* Initialize internal stream state for decompression */ + zs = &streamer->zstream; + zs->zalloc = gzip_palloc; + zs->zfree = gzip_pfree; + zs->next_out = (uint8 *) streamer->base.bbs_buffer.data; + zs->avail_out = streamer->base.bbs_buffer.maxlen; + + /* + * Data compression was initialized using deflateInit2 to request a gzip + * header. Similarly, we are using inflateInit2 to initialize data + * decompression. + * + * Per the documentation of inflateInit2, the second argument is + * "windowBits" and it's value must be greater than or equal to the value + * provided while compressing the data, so we are using the maximum + * possible value for safety. + */ + if (inflateInit2(zs, 15 + 16) != Z_OK) + { + pg_log_error("could not initialize compression library"); + exit(1); + } + + return &streamer->base; +#else + pg_log_error("this build does not support compression"); + exit(1); +#endif +} + +#ifdef HAVE_LIBZ +/* + * Decompress the input data to output buffer until we ran out of the input + * data. Each time the output buffer is full invoke bbstreamer_content to pass + * on the decompressed data to next streamer. + */ +static void +bbstreamer_gzip_extractor_content(bbstreamer *streamer, + bbstreamer_member *member, + const char *data, int len, + bbstreamer_archive_context context) +{ + bbstreamer_gzip_extractor *mystreamer = (bbstreamer_gzip_extractor *) streamer; + z_stream *zs = &mystreamer->zstream; + + + zs->next_in = (uint8 *) data; + zs->avail_in = len; + + /* Process the current chunk */ + while (zs->avail_in > 0) + { + int res; + + Assert(mystreamer->bytes_written < mystreamer->base.bbs_buffer.maxlen); + + zs->next_out = (uint8 *) + mystreamer->base.bbs_buffer.data + mystreamer->bytes_written; + zs->avail_out = mystreamer->base.bbs_buffer.maxlen - mystreamer->bytes_written; + + /* + * Decompresses data starting at zs->next_in and update zs->next_in + * and zs->avail_in, generate output data starting at zs->next_out + * and update zs->next_out and zs->avail_out accordingly. + */ + res = inflate(zs, Z_NO_FLUSH); + + if (res == Z_STREAM_ERROR) + pg_log_error("could not decompress data: %s", zs->msg); + + mystreamer->bytes_written = mystreamer->base.bbs_buffer.maxlen - zs->avail_out; + + /* If output buffer is full then pass on the content to next streamer */ + if (mystreamer->bytes_written >= mystreamer->base.bbs_buffer.maxlen) + { + bbstreamer_content(mystreamer->base.bbs_next, member, + mystreamer->base.bbs_buffer.data, + mystreamer->base.bbs_buffer.maxlen, context); + mystreamer->bytes_written = 0; + } + } +} + +/* + * End-of-stream processing. + */ +static void +bbstreamer_gzip_extractor_finalize(bbstreamer *streamer) +{ + bbstreamer_gzip_extractor *mystreamer = (bbstreamer_gzip_extractor *) streamer; + + /* + * End of the stream, if there is some pending data in output buffers then + * we must forward it to next streamer. + */ + bbstreamer_content(mystreamer->base.bbs_next, NULL, + mystreamer->base.bbs_buffer.data, + mystreamer->base.bbs_buffer.maxlen, + BBSTREAMER_UNKNOWN); + + bbstreamer_finalize(mystreamer->base.bbs_next); +} + +/* + * Free memory. + */ +static void +bbstreamer_gzip_extractor_free(bbstreamer *streamer) +{ + bbstreamer_gzip_extractor *mystreamer = (bbstreamer_gzip_extractor *) streamer; + + bbstreamer_free(mystreamer->base.bbs_next); + pfree(mystreamer->base.bbs_buffer.data); + pfree(streamer); +} + +/* + * Wrapper function to adjust the signature of palloc to match what libz + * expects. + */ +static void * +gzip_palloc(void *opaque, unsigned items, unsigned size) +{ + return palloc(items * size); +} + +/* + * Wrapper function to adjust the signature of pfree to match what libz + * expects. + */ +static void +gzip_pfree(void *opaque, void *address) +{ + pfree(address); +} +#endif diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 6fdd1b9..6b7cff9 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -1108,7 +1108,8 @@ CreateBackupStreamer(char *archive_name, char *spclocation, bbstreamer *streamer = NULL; bbstreamer *manifest_inject_streamer = NULL; bool inject_manifest; - bool is_tar; + bool is_tar, + is_tar_gz; bool must_parse_archive; int archive_name_len = strlen(archive_name); @@ -1123,6 +1124,10 @@ CreateBackupStreamer(char *archive_name, char *spclocation, is_tar = (archive_name_len > 4 && strcmp(archive_name + archive_name_len - 4, ".tar") == 0); + /* Is this a gzip archive? */ + is_tar_gz = (archive_name_len > 8 && + strcmp(archive_name + archive_name_len - 3, ".gz") == 0); + /* * We have to parse the archive if (1) we're suppose to extract it, or if * (2) we need to inject backup_manifest or recovery configuration into it. @@ -1132,7 +1137,7 @@ CreateBackupStreamer(char *archive_name, char *spclocation, (spclocation == NULL && writerecoveryconf)); /* At present, we only know how to parse tar archives. */ - if (must_parse_archive && !is_tar) + if (must_parse_archive && !is_tar && !is_tar_gz) { pg_log_error("unable to parse archive: %s", archive_name); pg_log_info("only tar archives can be parsed"); @@ -1246,6 +1251,16 @@ CreateBackupStreamer(char *archive_name, char *spclocation, else if (expect_unterminated_tarfile) streamer = bbstreamer_tar_terminator_new(streamer); +#ifdef HAVE_LIBZ + /* + * If the user has requested a server compressed archive along with archive + * extraction at client then we need to decompress it. + */ + if (format == 'p' && compressmethod == COMPRESSION_GZIP && + compressloc == COMPRESS_LOCATION_SERVER) + streamer = bbstreamer_gzip_extractor_new(streamer); +#endif + /* Return the results. */ *manifest_inject_streamer_p = manifest_inject_streamer; return streamer; -- 1.8.3.1
From 534ae0b539fe981361a50e1b2794ff88f466b5ff Mon Sep 17 00:00:00 2001 From: Dipesh Pandit <dipesh.pan...@enterprisedb.com> Date: Mon, 24 Jan 2022 18:06:12 +0530 Subject: [PATCH 2/2] Test plain format server compressed gzip backup --- src/bin/pg_verifybackup/t/009_extract.pl | 66 ++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100755 src/bin/pg_verifybackup/t/009_extract.pl diff --git a/src/bin/pg_verifybackup/t/009_extract.pl b/src/bin/pg_verifybackup/t/009_extract.pl new file mode 100755 index 0000000..0eeab46 --- /dev/null +++ b/src/bin/pg_verifybackup/t/009_extract.pl @@ -0,0 +1,66 @@ + +# Copyright (c) 2021-2022, PostgreSQL Global Development Group + +# This test aims to verify server compression for plain format backup. + +use strict; +use warnings; +use Cwd; +use Config; +use File::Path qw(rmtree); +use PostgreSQL::Test::Cluster; +use PostgreSQL::Test::Utils; +use Test::More tests => 7; + +my $primary = PostgreSQL::Test::Cluster->new('primary'); +$primary->init(allows_streaming => 1); +$primary->start; + +my @test_configuration = ( + { + 'compression_method' => 'none', + 'backup_flags' => [], + 'enabled' => 1 + }, + { + 'compression_method' => 'gzip', + 'backup_flags' => ['--compress', 'server-gzip:5'], + 'enabled' => check_pg_config("#define HAVE_LIBZ 1") + } +); + +for my $tc (@test_configuration) +{ + my $backup_path = $primary->backup_dir . '/' . 'extract_backup'; + my $method = $tc->{'compression_method'}; + + SKIP: { + skip "$method compression not supported by this build", 3 + if ! $tc->{'enabled'}; + + # Take backup with server compression enabled. + my @backup = ( + 'pg_basebackup', '-D', $backup_path, + '-Xfetch', '--no-sync', '-cfast', '-Fp'); + push @backup, @{$tc->{'backup_flags'}}; + + my @verify = ('pg_verifybackup', '-e', $backup_path); + + # A backup with a valid compression method should work. + $primary->command_ok(\@backup, "backup ok with compression method \"$method\""); + + # Verify that backup is extracted + if ($method ne 'none') + { + ok (-f "$backup_path/PG_VERSION", "extracted compressed backup, compression method \"$method\""); + } + ok(-f "$backup_path/backup_manifest", "backup manifest exists, compression method \"$method\""); + + # Make sure that it verifies OK. + $primary->command_ok(\@verify, + "verify backup with compression method \"$method\""); + } + + # Remove backup immediately to save disk space. + rmtree($backup_path); +} -- 1.8.3.1