Michael Paquier wrote:
> On Sat, Nov 28, 2015 at 7:53 AM, Alvaro Herrera wrote:

> > Hmm. I just noticed RewindTest sets $ENV{PGDATABASE} outside BEGIN.  Not
> > sure what to think of that.  Could instead pass the database name in
> > $node->getConnStr() calls, like run_pg_rewind() is already doing.
> 
> Yes, let's remove that and pass the database name to getConnStr().

Ok.


> A separate issue, but as long as we are working on this set of tests:
> I have noticed that config_default.pl is missing the flag tap_tests in
> its list. See the patch attached. Could you apply that as well and
> backpatch?

> I have as well noticed that RewindTest.pm is missing "1;" on its last
> line. When this is loaded this would lead to compilation errors.

Sure.


> > I tried all the t/ tests we have and all of them pass for me.  If I'm
> > able, I will push this on my Sunday late evening, so that I can fix
> > whatever gets red on Monday first thing ...
> 
> I have done as well additional tests on Windows and this patch is
> showing a green status.

Great.

Here's your recovery test patch rebased, for your (and others'!)
perusal.  It passes for me.  (Test 003 is unchanged.)

-- 
Álvaro Herrera                http://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
diff --git a/src/test/Makefile b/src/test/Makefile
index b713c2c..7f7754f 100644
--- a/src/test/Makefile
+++ b/src/test/Makefile
@@ -12,7 +12,7 @@ subdir = src/test
 top_builddir = ../..
 include $(top_builddir)/src/Makefile.global
 
-SUBDIRS = regress isolation modules
+SUBDIRS = regress isolation modules recovery
 
 # We don't build or execute examples/, locale/, or thread/ by default,
 # but we do want "make clean" etc to recurse into them.  Likewise for ssl/,
diff --git a/src/test/recovery/.gitignore b/src/test/recovery/.gitignore
new file mode 100644
index 0000000..499fa7d
--- /dev/null
+++ b/src/test/recovery/.gitignore
@@ -0,0 +1,3 @@
+# Generated by test suite
+/regress_log/
+/tmp_check/
diff --git a/src/test/recovery/Makefile b/src/test/recovery/Makefile
new file mode 100644
index 0000000..16c063a
--- /dev/null
+++ b/src/test/recovery/Makefile
@@ -0,0 +1,17 @@
+#-------------------------------------------------------------------------
+#
+# Makefile for src/test/recovery
+#
+# Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+# Portions Copyright (c) 1994, Regents of the University of California
+#
+# src/test/recovery/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/test/recovery
+top_builddir = ../../..
+include $(top_builddir)/src/Makefile.global
+
+check:
+	$(prove_check)
diff --git a/src/test/recovery/README b/src/test/recovery/README
new file mode 100644
index 0000000..20b98e0
--- /dev/null
+++ b/src/test/recovery/README
@@ -0,0 +1,19 @@
+src/test/recovery/README
+
+Regression tests for recovery and replication
+=============================================
+
+This directory contains a test suite for recovery and replication,
+testing mainly the interactions of recovery.conf with cluster
+instances by providing a simple set of routines that can be used
+to define a custom cluster for a test, including backup, archiving,
+and streaming configuration.
+
+Running the tests
+=================
+
+    make check
+
+NOTE: This creates a temporary installation, and some tests may
+create one or multiple nodes, be they master or standby(s) for the
+purpose of the tests.
diff --git a/src/test/recovery/RecoveryTest.pm b/src/test/recovery/RecoveryTest.pm
new file mode 100644
index 0000000..3706847
--- /dev/null
+++ b/src/test/recovery/RecoveryTest.pm
@@ -0,0 +1,177 @@
+# Set of common routines for recovery regression tests for a PostgreSQL
+# cluster. This includes methods that can be used by the various set of
+# tests present to set up cluster nodes and configure them according to
+# the test scenario wanted.
+#
+# This module makes use of PostgresNode for node manipulation, performing
+# higher-level operations to create standby nodes or setting them up
+# for archiving and replication.
+#
+# Nodes are identified by their port number and have one allocated when
+# created, hence it is unique for each node of the cluster as it is run
+# locally. PGHOST is equally set to a unique value for the duration of
+# each test.
+
+package RecoveryTest;
+
+use strict;
+use warnings;
+
+use Cwd;
+use Exporter 'import';
+use IPC::Run qw(run start);
+use PostgresNode;
+use RecursiveCopy;
+use TestBase;
+use TestLib;
+use Test::More;
+
+our @EXPORT = qw(
+	enable_archiving
+	enable_restoring
+	enable_streaming
+	make_master
+	make_archive_standby
+	make_stream_standby
+);
+
+# Set of handy routines able to set up a node with different characteristics
+# Enable streaming replication
+sub enable_streaming
+{
+	my $node_root = shift; # Instance to link to
+	my $node_standby = shift;
+	my $root_connstr = $node_root->getConnStr();
+	my $applname = $node_standby->getApplName();
+
+	$node_standby->appendConf('recovery.conf', qq(
+primary_conninfo='$root_connstr application_name=$applname'
+standby_mode=on
+recovery_target_timeline='latest'
+));
+}
+
+# Enable the use of restore_command from a node
+sub enable_restoring
+{
+	my $node_root = shift; # Instance to link to
+	my $node_standby = shift;
+	my $path = $node_root->getArchiveDir();
+
+	# Switch path to use slashes on Windows
+	$path =~ tr#\\#/# if ($windows_os);
+	my $copy_command = $windows_os ?
+		"copy \"$path\\\\%f\" \"%p\"" :
+		"cp -i $path/%f %p";
+	$node_standby->appendConf('recovery.conf', qq(
+restore_command='$copy_command'
+standby_mode=on
+));
+}
+
+# Enable WAL archiving on a node
+sub enable_archiving
+{
+	my $node = shift;
+	my $path = $node->getArchiveDir();
+
+	# Switch path to use slashes on Windows
+	$path =~ tr#\\#/# if ($windows_os);
+	my $copy_command = $windows_os ?
+		"copy \"%p\" \"$path\\\\%f\"" :
+		"cp %p $path/%f";
+
+	# Enable archive_mode and archive_command on node
+	$node->appendConf('postgresql.conf', qq(
+archive_mode = on
+archive_command = '$copy_command'
+));
+}
+
+# Master node initialization.
+sub make_master
+{
+	my $node_master = get_new_node();
+	my $port_master = $node_master->getPort();
+	print "# Initializing master node wih port $port_master\n";
+	$node_master->initNode();
+	configure_base_node($node_master);
+	return $node_master;
+}
+
+sub configure_base_node
+{
+	my $node = shift;
+
+	$node->appendConf('postgresql.conf', qq(
+wal_level = hot_standby
+max_wal_senders = 5
+wal_keep_segments = 20
+max_wal_size = 128MB
+shared_buffers = 1MB
+wal_log_hints = on
+hot_standby = on
+autovacuum = off
+));
+}
+
+# Standby node initializations
+# Node only streaming.
+sub make_stream_standby
+{
+	my $node_master = shift;
+	my $backup_name = shift;
+	my $node_standby = get_new_node();
+	my $master_port = $node_master->getPort();
+	my $standby_port = $node_standby->getPort();
+
+	print "# Initializing streaming mode for node $standby_port from node $master_port\n";
+	$node_standby->initNodeFromBackup($node_master, $backup_name);
+	configure_base_node($node_standby);
+
+	# Start second node, streaming from first one
+	enable_streaming($node_master, $node_standby);
+	return $node_standby;
+}
+
+# Node getting WAL only from archives
+sub make_archive_standby
+{
+	my $node_master = shift;
+	my $backup_name = shift;
+	my $node_standby = get_new_node();
+	my $master_port = $node_master->getPort();
+	my $standby_port = $node_standby->getPort();
+
+	print "# Initializing archive mode for node $standby_port from node $master_port\n";
+	$node_standby->initNodeFromBackup($node_master, $backup_name);
+	configure_base_node($node_standby);
+
+	# Start second node, restoring from first one
+	enable_restoring($node_master, $node_standby);
+	return $node_standby;
+}
+
+# Wait until a node is able to accept queries. Useful when putting a node
+# in recovery and wait for it to be able to work particularly on slow
+# machines.
+sub wait_for_node
+{
+	my $node         = shift;
+	my $max_attempts = 30;
+	my $attempts     = 0;
+	while ($attempts < $max_attempts)
+	{
+		if (run_log(['pg_isready', '-p', $node->getPort()]))
+		{
+			return 1;
+		}
+
+		# Wait a second before retrying.
+		sleep 1;
+		$attempts++;
+	}
+	return 0;
+}
+
+1;
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
new file mode 100644
index 0000000..e902d42
--- /dev/null
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -0,0 +1,67 @@
+# Minimal test testing streaming replication
+use strict;
+use warnings;
+use TestLib;
+use Test::More tests => 4;
+
+use RecoveryTest;
+
+# Initialize master node
+my $node_master = make_master();
+$node_master->startNode();
+my $backup_name = 'my_backup';
+
+# Take backup
+$node_master->backupNode($backup_name);
+
+# Create streaming standby linking to master
+my $node_standby_1 = make_stream_standby($node_master, $backup_name);
+$node_standby_1->startNode();
+
+# Take backup of standby 1 (not mandatory, but useful to check if
+# pg_basebackup works on a standby).
+$node_standby_1->backupNode($backup_name);
+
+# Create second standby node linking to standby 1
+my $node_standby_2 = make_stream_standby($node_standby_1, $backup_name);
+$node_standby_2->startNode();
+$node_standby_2->backupNode($backup_name);
+
+# Create some content on master and check its presence in standby 1 an
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a";
+
+# Wait for standbys to catch up
+my $applname_1 = $node_standby_1->getApplName();
+my $applname_2 = $node_standby_2->getApplName();
+my $caughtup_query = "SELECT pg_current_xlog_location() = write_location FROM pg_stat_replication WHERE application_name = '$applname_1';";
+poll_query_until($node_master, $caughtup_query)
+	or die "Timed out while waiting for standby 1 to catch up";
+$caughtup_query = "SELECT pg_last_xlog_replay_location() = write_location FROM pg_stat_replication WHERE application_name = '$applname_2';";
+poll_query_until($node_standby_1, $caughtup_query)
+	or die "Timed out while waiting for standby 2 to catch up";
+
+my $result = psql $node_standby_1->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+print "standby 1: $result\n";
+is($result, qq(1002), 'check streamed content on standby 1');
+
+$result = psql $node_standby_2->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+print "standby 2: $result\n";
+is($result, qq(1002), 'check streamed content on standby 2');
+
+# Check that only READ-only queries can run on standbys
+command_fails(['psql', '-A', '-t',  '--no-psqlrc',
+	'-d', $node_standby_1->getConnStr(), '-c',
+    "INSERT INTO tab_int VALUES (1)"],
+	'Read-only queries on standby 1');
+command_fails(['psql', '-A', '-t',  '--no-psqlrc',
+	'-d', $node_standby_1->getConnStr(), '-c',
+    "INSERT INTO tab_int VALUES (1)"],
+	'Read-only queries on standby 2');
+
+# Cleanup nodes
+teardown_node($node_standby_2);
+teardown_node($node_standby_1);
+teardown_node($node_master);
diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl
new file mode 100644
index 0000000..16dbbc1
--- /dev/null
+++ b/src/test/recovery/t/002_archiving.pl
@@ -0,0 +1,51 @@
+# test for archiving with warm standby
+use strict;
+use warnings;
+use TestLib;
+use Test::More tests => 1;
+use File::Copy;
+use RecoveryTest;
+
+# Initialize master node, doing archives
+my $node_master = make_master();
+my $backup_name = 'my_backup';
+enable_archiving($node_master);
+
+# Start it
+$node_master->startNode();
+
+# Take backup for slave
+$node_master->backupNode($backup_name);
+
+# Initialize standby node from backup, fetching WAL from archives
+my $node_standby = make_archive_standby($node_master, $backup_name);
+$node_standby->appendConf('postgresql.conf', qq(
+wal_retrieve_retry_interval = '100ms'
+));
+$node_standby->startNode();
+
+# Create some content on master
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a";
+my $current_lsn = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# Force archiving of WAL file to make it present on master
+psql $node_master->getConnStr(), "SELECT pg_switch_xlog()";
+
+# Add some more content, it should not be present on standby
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(1001,2000))";
+
+# Wait until necessary replay has been done on standby
+my $caughtup_query = "SELECT '$current_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+poll_query_until($node_standby, $caughtup_query)
+	or die "Timed out while waiting for standby to catch up";
+
+my $result = psql $node_standby->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+is($result, qq(1000), 'check content from archives');
+
+# Cleanup nodes
+teardown_node($node_standby);
+teardown_node($node_master);
diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl
new file mode 100644
index 0000000..6c5021c
--- /dev/null
+++ b/src/test/recovery/t/003_recovery_targets.pl
@@ -0,0 +1,135 @@
+# Test for recovery targets: name, timestamp, XID
+use strict;
+use warnings;
+use TestLib;
+use Test::More tests => 7;
+
+use RecoveryTest;
+
+# Create and test a standby from given backup, with a certain
+# recovery target.
+sub test_recovery_standby
+{
+	my $test_name = shift;
+	my $node_master = shift;
+	my $recovery_params = shift;
+	my $num_rows = shift;
+	my $until_lsn = shift;
+
+	my $node_standby = make_archive_standby($node_master, 'my_backup');
+
+	foreach my $param_item (@$recovery_params)
+	{
+		$node_standby->appendConf('recovery.conf',
+					   qq($param_item
+));
+	}
+
+	$node_standby->startNode();
+
+	# Wait until standby has replayed enough data
+	my $caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+	poll_query_until($node_standby, $caughtup_query)
+		or die "Timed out while waiting for standby to catch up";
+
+	# Create some content on master and check its presence in standby
+	my $result = psql $node_standby->getConnStr(),
+		"SELECT count(*) FROM tab_int";
+	is($result, qq($num_rows), "check standby content for $test_name");
+
+	# Stop standby node
+	teardown_node($node_standby);
+}
+
+# Initialize master node
+my $node_master = make_master();
+enable_archiving($node_master);
+
+# Start it
+$node_master->startNode();
+
+# Create data before taking the backup, aimed at testing
+# recovery_target = 'immediate'
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a";
+my $lsn1 = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# Take backup from which all operations will be run
+$node_master->backupNode('my_backup');
+
+# Insert some data with used as a replay reference, with a recovery
+# target TXID.
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(1001,2000))";
+my $recovery_txid = psql $node_master->getConnStr(),
+	"SELECT txid_current()";
+my $lsn2 = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# More data, with recovery target timestamp
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(2001,3000))";
+my $recovery_time = psql $node_master->getConnStr(), "SELECT now()";
+my $lsn3 = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# Even more data, this time with a recovery target name
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(3001,4000))";
+my $recovery_name = "my_target";
+my $lsn4 = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+psql $node_master->getConnStr(),
+	"SELECT pg_create_restore_point('$recovery_name')";
+
+# Force archiving of WAL file
+psql $node_master->getConnStr(), "SELECT pg_switch_xlog()";
+
+# Test recovery targets
+my @recovery_params = ( "recovery_target = 'immediate'" );
+test_recovery_standby('immediate target', $node_master,
+					  \@recovery_params,
+					  "1000", $lsn1);
+@recovery_params = ( "recovery_target_xid = '$recovery_txid'" );
+test_recovery_standby('XID', $node_master,
+					  \@recovery_params,
+					  "2000", $lsn2);
+@recovery_params = ( "recovery_target_time = '$recovery_time'" );
+test_recovery_standby('Time', $node_master,
+					  \@recovery_params,
+					  "3000", $lsn3);
+@recovery_params = ( "recovery_target_name = '$recovery_name'" );
+test_recovery_standby('Name', $node_master,
+					  \@recovery_params,
+					  "4000", $lsn4);
+
+# Multiple targets
+# Last entry has priority (note that an array respects the order of items
+# not hashes).
+@recovery_params = (
+	"recovery_target_name = '$recovery_name'",
+	"recovery_target_xid  = '$recovery_txid'",
+	"recovery_target_time = '$recovery_time'"
+);
+test_recovery_standby('Name + XID + Time', $node_master,
+					  \@recovery_params,
+					  "3000", $lsn3);
+@recovery_params = (
+	"recovery_target_time = '$recovery_time'",
+	"recovery_target_name = '$recovery_name'",
+	"recovery_target_xid  = '$recovery_txid'"
+);
+test_recovery_standby('Time + Name + XID', $node_master,
+					  \@recovery_params,
+					  "2000", $lsn2);
+@recovery_params = (
+	"recovery_target_xid  = '$recovery_txid'",
+	"recovery_target_time = '$recovery_time'",
+	"recovery_target_name = '$recovery_name'"
+);
+test_recovery_standby('XID + Time + Name', $node_master,
+					  \@recovery_params,
+					  "4000", $lsn4);
+
+teardown_node($node_master);
diff --git a/src/test/recovery/t/004_timeline_switch.pl b/src/test/recovery/t/004_timeline_switch.pl
new file mode 100644
index 0000000..f0ddebb
--- /dev/null
+++ b/src/test/recovery/t/004_timeline_switch.pl
@@ -0,0 +1,76 @@
+# Tets for timeline switch
+# Encure that a standby is able to follow a newly-promoted standby
+# on a new timeline.
+use strict;
+use warnings;
+use File::Path qw(remove_tree);
+use PostgresNode;
+use TestBase;
+use TestLib;
+use Test::More tests => 1;
+
+use RecoveryTest;
+
+$ENV{PGDATABASE} = 'postgres';
+
+# Initialize master node
+my $node_master = make_master();
+$node_master->startNode();
+
+# Take backup
+my $backup_name = 'my_backup';
+$node_master->backupNode($backup_name);
+
+# Create two standbys linking to it
+my $node_standby_1 = make_stream_standby($node_master, $backup_name);
+$node_standby_1->startNode();
+my $node_standby_2 = make_stream_standby($node_master, $backup_name);
+$node_standby_2->startNode();
+
+# Create some content on master
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a";
+my $until_lsn = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# Wait until standby has replayed enough data on standby 1
+my $caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+poll_query_until($node_standby_1, $caughtup_query)
+	or die "Timed out while waiting for standby to catch up";
+
+# Stop and remove master, and promote standby 1, switching it to a new timeline
+teardown_node($node_master);
+system_or_bail('pg_ctl', '-w', '-D', $node_standby_1->getDataDir(),
+			   'promote');
+print "# Promoted standby 1\n";
+
+# Switch standby 2 to replay from standby 1
+remove_tree($node_standby_2->getDataDir() . '/recovery.conf');
+my $connstr_1 = $node_standby_1->getConnStr();
+$node_standby_2->appendConf('recovery.conf', qq(
+primary_conninfo='$connstr_1'
+standby_mode=on
+recovery_target_timeline='latest'
+));
+$node_standby_2->restartNode();
+
+# Insert some data in standby 1 and check its presence in standby 2
+# to ensure that the timeline switch has been done. Standby 1 needs
+# to exit recovery first before moving on with the test.
+poll_query_until($node_standby_1, "SELECT pg_is_in_recovery() <> true",
+				 );
+psql $node_standby_1->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(1001,2000))";
+$until_lsn = psql $node_standby_1->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+$caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+poll_query_until($node_standby_2, $caughtup_query)
+	or die "Timed out while waiting for standby to catch up";
+
+my $result = psql $node_standby_2->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+is($result, qq(2000), 'check content of standby 2');
+
+# Stop nodes
+teardown_node($node_standby_2);
+teardown_node($node_standby_1);
diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl
new file mode 100644
index 0000000..f00e93b
--- /dev/null
+++ b/src/test/recovery/t/005_replay_delay.pl
@@ -0,0 +1,49 @@
+# Checks for recovery_min_apply_delay
+use strict;
+use warnings;
+use TestLib;
+use Test::More tests => 2;
+
+use RecoveryTest;
+
+# Initialize master node
+my $node_master = make_master();
+$node_master->startNode();
+
+# And some content
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,10) AS a";
+
+# Take backup
+my $backup_name = 'my_backup';
+$node_master->backupNode($backup_name);
+
+# Create streaming standby from backup
+my $node_standby = make_stream_standby($node_master, $backup_name);
+$node_standby->appendConf('recovery.conf', qq(
+recovery_min_apply_delay = '2s'
+));
+$node_standby->startNode();
+
+# Make new content on master and check its presence in standby
+# depending on the delay of 2s applied above.
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(11,20))";
+sleep 1;
+# Here we should have only 10 rows
+my $result = psql $node_standby->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+is($result, qq(10), 'check content with delay of 1s');
+
+# Now wait for replay to complete on standby
+my $until_lsn = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+my $caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+poll_query_until($node_standby, $caughtup_query)
+	or die "Timed out while waiting for standby to catch up";
+$result = psql $node_standby->getConnStr(), "SELECT count(*) FROM tab_int";
+is($result, qq(20), 'check content with delay of 2s');
+
+# Stop nodes
+teardown_node($node_standby);
+teardown_node($node_master);
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to