On Tue, Nov 24, 2015 at 2:14 PM, Michael Paquier
<michael.paqu...@gmail.com>wrote:

> I'll rework this patch and will update a new version soon.
>

So, attached is a new patch addressing all the comments received. The new
version has the following changes:
- Print more verbosely stderr output in case of error in psql
- Add recovery test suite to SUBDIRS in src/test/Makefile
- Add strict and warnings to what is used in the new modules of this patch
- Manage node information using package/class PostgresNode.pm and have
RecoveryTest use it. I have actually made PostgresNode bare-bone and simple
on purpose: one can initialize the node, append configuration parameters to
it and manage it through start/stop/restart (we may want to add reload and
promote actually if needed). However, more complex configuration is left to
RecoveryTest.pm, which is in charge of appending the configuration
dedicated to streaming, archiving, etc though a set of routines working on
PostgresNode objects. I have also arrived at the conclusion that it is not
really worth adding a node status flag in PostgresNode because the port
number saved there is sufficient when doing free port lookup, and the list
of nodes used in a recovery test are saved in an array.
- Add new module RecursiveCopy to be used for base backups. This removes
the dependency with Archive::Tar. PostgresNode makes use of that when
initializing a node from a backup.
- Tests have been updated to use the PostgresNode objects instead of the
port number as identifier. That's more portable.

Hopefully I have missed nothing.
Regards,
-- 
Michael
diff --git a/src/bin/pg_rewind/RewindTest.pm b/src/bin/pg_rewind/RewindTest.pm
index a4c1737..ea219d7 100644
--- a/src/bin/pg_rewind/RewindTest.pm
+++ b/src/bin/pg_rewind/RewindTest.pm
@@ -125,38 +125,6 @@ sub check_query
 	}
 }
 
-# Run a query once a second, until it returns 't' (i.e. SQL boolean true).
-sub poll_query_until
-{
-	my ($query, $connstr) = @_;
-
-	my $max_attempts = 30;
-	my $attempts     = 0;
-	my ($stdout, $stderr);
-
-	while ($attempts < $max_attempts)
-	{
-		my $cmd = [ 'psql', '-At', '-c', "$query", '-d', "$connstr" ];
-		my $result = run $cmd, '>', \$stdout, '2>', \$stderr;
-
-		chomp($stdout);
-		$stdout =~ s/\r//g if $Config{osname} eq 'msys';
-		if ($stdout eq "t")
-		{
-			return 1;
-		}
-
-		# Wait a second before retrying.
-		sleep 1;
-		$attempts++;
-	}
-
-	# The query result didn't change in 30 seconds. Give up. Print the stderr
-	# from the last attempt, hopefully that's useful for debugging.
-	diag $stderr;
-	return 0;
-}
-
 sub append_to_file
 {
 	my ($filename, $str) = @_;
diff --git a/src/test/Makefile b/src/test/Makefile
index b713c2c..7f7754f 100644
--- a/src/test/Makefile
+++ b/src/test/Makefile
@@ -12,7 +12,7 @@ subdir = src/test
 top_builddir = ../..
 include $(top_builddir)/src/Makefile.global
 
-SUBDIRS = regress isolation modules
+SUBDIRS = regress isolation modules recovery
 
 # We don't build or execute examples/, locale/, or thread/ by default,
 # but we do want "make clean" etc to recurse into them.  Likewise for ssl/,
diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm
new file mode 100644
index 0000000..90ad3bd
--- /dev/null
+++ b/src/test/perl/PostgresNode.pm
@@ -0,0 +1,162 @@
+# PostgresNode, simple node representation for regression tests
+#
+# Regression tests should use this basic class infrastructure to define
+# nodes that need to be used in the complex scenarios. This object is wanted
+# simple with only a basic set of routines able to configure, initialize
+# and manage a node.
+
+package PostgresNode;
+
+use strict;
+use warnings;
+
+use RecursiveCopy;
+use TestLib;
+
+sub new {
+	my $class = shift;
+	my $pghost = shift;
+	my $pgport = shift;
+	my $self = {
+		_port => undef,
+		_host => undef,
+		_basedir => undef,
+		_connstr => undef,
+		_applname => undef
+			};
+
+	# Set up each field
+	$self->{_port} = $pgport;
+	$self->{_host} = $pghost;
+	$self->{_basedir} = TestLib::tempdir;
+	$self->{_connstr} = "port=$pgport host=$pghost";
+	$self->{_applname} = "node_$pgport";
+	bless $self, $class;
+	return $self;
+}
+
+# Get routines for various variables
+sub getPort {
+	my( $self ) = @_;
+	return $self->{_port};
+}
+sub getHost {
+	my( $self ) = @_;
+	return $self->{_host};
+}
+sub getConnStr {
+	my( $self ) = @_;
+	return $self->{_connstr};
+}
+sub getDataDir {
+	my ( $self ) = @_;
+	return $self->{_basedir} . '/pgdata';
+}
+sub getApplName {
+	my ( $self ) = @_;
+	return $self->{_applname};
+}
+sub getArchiveDir {
+	my ( $self ) = @_;
+	return $self->{_basedir} . '/archives';
+}
+sub getBackupDir {
+	my ( $self ) = @_;
+	return $self->{_basedir} . '/backup';
+}
+
+# Dump node information
+sub dumpNodeInfo {
+	my ( $self ) = @_;
+	print 'Data directory: ' . $self->getDataDir() . "\n";
+	print 'Backup directory: ' . $self->getBackupDir() . "\n";
+	print 'Archive directory: ' . $self->getArchiveDir() . "\n";
+	print 'Connection string: ' . $self->getConnStr() . "\n";
+	print 'Application name: ' . $self->getApplName() . "\n";
+}
+
+# Actions on node
+sub initNode
+{
+	my ( $self ) = @_;
+	my $port = $self->getPort();
+
+	mkdir $self->getBackupDir();
+	mkdir $self->getArchiveDir();
+
+	standard_initdb($self->getDataDir(), $self->getHost());
+	$self->appendConf('postgresql.conf', qq(
+port = $port
+));
+	configure_hba_for_replication($self->getDataDir());
+}
+sub appendConf
+{
+	my ($self, $filename, $str) = @_;
+
+	my $conffile = $self->getDataDir() . '/' . $filename;
+
+	open my $fh, ">>", $conffile or die "could not open file $filename";
+	print $fh $str;
+	close $fh;
+}
+sub backupNode
+{
+	my ($self, $backup_name) = @_;
+    my $backup_path = $self->getBackupDir() . '/' . $backup_name;
+	my $port = $self->getPort();
+
+	print "# Taking backup $backup_name from node with port $port\n";
+	system_or_bail("pg_basebackup -D $backup_path -p $port -x");
+	print "# Backup finished\n";
+}
+sub initNodeFromBackup
+{
+	my ( $self, $root_node, $backup_name ) = @_;
+	my $backup_path = $root_node->getBackupDir() . '/' . $backup_name;
+	my $port = $self->getPort();
+	my $root_port = $root_node->getPort();
+
+	print "Initializing node $port from backup \"$backup_name\" of node $root_port\n";
+	die "Backup $backup_path does not exist" if (! -d $backup_path);
+
+	mkdir $self->getBackupDir();
+	mkdir $self->getArchiveDir();
+
+	my $data_path = $self->getDataDir();
+    rmdir($data_path);
+    RecursiveCopy::copypath($backup_path, $data_path);
+    chmod(0700, $data_path);
+
+	# Base configuration for this node
+	$self->appendConf('postgresql.conf', qq(
+port = $port
+));
+	configure_hba_for_replication($self->getDataDir());
+}
+sub startNode
+{
+	my ( $self ) = @_;
+	my $port = $self->getPort();
+	print "Starting node with port $port\n";
+    system_or_bail('pg_ctl', '-w', '-D', $self->getDataDir(),
+				   '-l', "$log_path/node_$port.log",
+				   'start');
+}
+sub stopNode
+{
+	my ( $self, $mode ) = @_;
+	my $port = $self->getPort();
+	$mode = 'fast' if (!defined($mode));
+	print "Stopping node with port $port with $mode mode\n";
+	system('pg_ctl', '-D', $self->getDataDir(), '-m',
+		   $mode, 'stop');
+}
+sub restartNode
+{
+	my ( $self ) = @_;
+	$self->stopNode();
+	$self->startNode();
+}
+
+1;
diff --git a/src/test/perl/RecoveryTest.pm b/src/test/perl/RecoveryTest.pm
new file mode 100644
index 0000000..1056a1c
--- /dev/null
+++ b/src/test/perl/RecoveryTest.pm
@@ -0,0 +1,251 @@
+# Set of common routines for recovery regression tests for a PostgreSQL
+# cluster. This includes methods that can be used by the various set of
+# tests present to set up cluster nodes and configure them according to
+# the test scenario wanted.
+#
+# This module makes use of PostgresNode for node manipulation, performing
+# higher-level operations to create standby nodes or setting them up
+# for archiving and replication.
+#
+# Nodes are identified by their port number and have one allocated when
+# created, hence it is unique for each node of the cluster as it is run
+# locally. PGHOST is equally set to a unique value for the duration of
+# each test.
+
+package RecoveryTest;
+
+use strict;
+use warnings;
+
+use Cwd;
+use PostgresNode;
+use RecursiveCopy;
+use TestLib;
+use Test::More;
+
+use IPC::Run qw(run start);
+
+use Exporter 'import';
+
+our @EXPORT = qw(
+	%active_nodes
+
+	enable_archiving
+	enable_restoring
+	enable_streaming
+	get_free_port
+	make_master
+	make_archive_standby
+	make_stream_standby
+	teardown_node
+);
+
+# Tracking of last port value assigned to accelerate free port lookup.
+# XXX: Should this part use PG_VERSION_NUM?
+my $last_port_assigned =  90600 % 16384 + 49152;
+
+# Value tracking the host value used for a single run.
+my $node_pghost = $windows_os ? "127.0.0.1" : tempdir_short;
+
+
+# Database used for each connection attempt via psql
+$ENV{PGDATABASE} = "postgres";
+
+# Tracker of active nodes
+my @active_nodes = ();
+
+# Set of handy routines able to set up a node with different characteristics
+# Enable streaming replication
+sub enable_streaming
+{
+	my $node_root = shift; # Instance to link to
+	my $node_standby = shift;
+	my $root_connstr = $node_root->getConnStr();
+	my $applname = $node_standby->getApplName();
+
+	$node_standby->appendConf('recovery.conf', qq(
+primary_conninfo='$root_connstr application_name=$applname'
+standby_mode=on
+recovery_target_timeline='latest'
+));
+}
+
+# Enable the use of restore_command from a node
+sub enable_restoring
+{
+	my $node_root = shift; # Instance to link to
+	my $node_standby = shift;
+	my $path = $node_root->getArchiveDir();
+
+	# Switch path to use slashes on Windows
+	$path =~ tr#\\#/# if ($windows_os);
+	my $copy_command = $windows_os ?
+		"copy \"$path\\\\%f\" \"%p\"" :
+		"cp -i $path/%f %p";
+	$node_standby->appendConf('recovery.conf', qq(
+restore_command='$copy_command'
+standby_mode=on
+));
+}
+
+# Enable WAL archiving on a node
+sub enable_archiving
+{
+	my $node = shift;
+	my $path = $node->getArchiveDir();
+
+	# Switch path to use slashes on Windows
+	$path =~ tr#\\#/# if ($windows_os);
+	my $copy_command = $windows_os ?
+		"copy \"%p\" \"$path\\\\%f\"" :
+		"cp %p $path/%f";
+
+	# Enable archive_mode and archive_command on node
+	$node->appendConf('postgresql.conf', qq(
+archive_mode = on
+archive_command = '$copy_command'
+));
+}
+
+# Master node initialization.
+sub make_master
+{
+	my $node_master = get_free_port();
+	my $port_master = $node_master->getPort();
+	print "# Initializing master node wih port $port_master\n";
+	$node_master->initNode();
+	configure_base_node($node_master);
+	return $node_master;
+}
+
+sub configure_base_node
+{
+	my $node = shift;
+
+	$node->appendConf('postgresql.conf', qq(
+wal_level = hot_standby
+max_wal_senders = 5
+wal_keep_segments = 20
+max_wal_size = 128MB
+shared_buffers = 1MB
+wal_log_hints = on
+hot_standby = on
+autovacuum = off
+));
+	configure_hba_for_replication($node->getDataDir());
+}
+
+# Standby node initializations
+# Node only streaming.
+sub make_stream_standby
+{
+	my $node_master = shift;
+	my $backup_name = shift;
+	my $node_standby = get_free_port();
+	my $master_port = $node_master->getPort();
+	my $standby_port = $node_standby->getPort();
+
+	print "# Initializing streaming mode for node $standby_port from node $master_port\n";
+	$node_standby->initNodeFromBackup($node_master, $backup_name);
+	configure_base_node($node_standby);
+
+	# Start second node, streaming from first one
+	enable_streaming($node_master, $node_standby);
+	return $node_standby;
+}
+
+# Node getting WAL only from archives
+sub make_archive_standby
+{
+	my $node_master = shift;
+	my $backup_name = shift;
+	my $node_standby = get_free_port();
+	my $master_port = $node_master->getPort();
+	my $standby_port = $node_standby->getPort();
+
+	print "# Initializing archive mode for node $standby_port from node $master_port\n";
+	$node_standby->initNodeFromBackup($node_master, $backup_name);
+	configure_base_node($node_standby);
+
+	# Start second node, restoring from first one
+	enable_restoring($node_master, $node_standby);
+	return $node_standby;
+}
+
+# Get a port number not in use currently for a new node
+# As port number retrieval is based on the nodes currently running and
+# their presence in the list of registered ports, be sure that the node
+# that is consuming this port number has already been started and that
+# it is not registered yet.
+sub get_free_port
+{
+	my $found = 0;
+	my $port = $last_port_assigned;
+
+	while ($found == 0)
+	{
+		$port++;
+		print "# Checking for port $port\n";
+		my $devnull = $windows_os ? "nul" : "/dev/null";
+		if (!run_log(['pg_isready', '-p', $port]))
+		{
+			$found = 1;
+			# Found a potential candidate, check first that it is
+			# not included in the list of registered nodes.
+			foreach my $node (@active_nodes)
+			{
+				$found = 0 if ($node->getPort() == $port);
+			}
+		}
+	}
+
+	print "# Found free port $port\n";
+	# Lock port number found by creating a new node
+	my $node = new PostgresNode($node_pghost, $port);
+
+	# Add node to list of nodes currently in use
+	push(@active_nodes, $node);
+	$last_port_assigned = $port;
+	return $node;
+}
+
+# Wait until a node is able to accept queries. Useful when putting a node
+# in recovery and wait for it to be able to work particularly on slow
+# machines.
+sub wait_for_node
+{
+	my $node         = shift;
+	my $max_attempts = 30;
+	my $attempts     = 0;
+	while ($attempts < $max_attempts)
+	{
+		if (run_log(['pg_isready', '-p', $node->getPort()]))
+		{
+			return 1;
+		}
+
+		# Wait a second before retrying.
+		sleep 1;
+		$attempts++;
+	}
+	return 0;
+}
+
+# Remove any traces of given node.
+sub teardown_node
+{
+	my $node = shift;
+
+	$node->stopNode('immediate');
+	@active_nodes = grep { $_ ne $node } @active_nodes;
+}
+
+END
+{
+	foreach my $node (@active_nodes)
+	{
+		teardown_node($node);
+	}
+}
+
+1;
diff --git a/src/test/perl/RecursiveCopy.pm b/src/test/perl/RecursiveCopy.pm
new file mode 100644
index 0000000..f06f975
--- /dev/null
+++ b/src/test/perl/RecursiveCopy.pm
@@ -0,0 +1,42 @@
+# RecursiveCopy, a simple recursive copy implementation
+#
+# Having this implementation has the advantage to not have the regression
+# test code rely on any external module for this simple operation.
+
+package RecursiveCopy;
+use strict;
+use warnings;
+
+use File::Basename;
+use File::Copy;
+
+sub copypath {
+	my $srcpath = shift;
+	my $destpath = shift;
+
+	die "Cannot operate on symlinks" if ( -l $srcpath || -l $destpath );
+
+	# This source path is a file, simply copy it to destination with the
+	# same name.
+	die "Destination path $destpath exists as file" if ( -f $destpath );
+	if ( -f $srcpath )
+	{
+		my $filename = basename($destpath);
+		copy($srcpath, "$destpath");
+		return 1;
+	}
+
+	die "Destination needs to be a directory" if (! -d $srcpath);
+	mkdir($destpath);
+
+	# Scan existing source directory and recursively copy everything.
+	opendir(my $directory, $srcpath);
+	while (my $entry = readdir($directory)) {
+		next if ($entry eq '.' || $entry eq '..');
+		RecursiveCopy::copypath("$srcpath/$entry", "$destpath/$entry");
+	}
+	closedir($directory);
+	return 1;
+}
+
+1;
diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm
index 02533eb..359ef8a 100644
--- a/src/test/perl/TestLib.pm
+++ b/src/test/perl/TestLib.pm
@@ -12,6 +12,7 @@ our @EXPORT = qw(
   configure_hba_for_replication
   start_test_server
   restart_test_server
+  poll_query_until
   psql
   slurp_dir
   slurp_file
@@ -136,11 +137,12 @@ sub tempdir_short
 sub standard_initdb
 {
 	my $pgdata = shift;
+	my $tempdir_short = shift;
+
+	$tempdir_short = tempdir_short if (! defined($tempdir_short));
 	system_or_bail('initdb', '-D', "$pgdata", '-A' , 'trust', '-N');
 	system_or_bail($ENV{PG_REGRESS}, '--config-auth', $pgdata);
 
-	my $tempdir_short = tempdir_short;
-
 	open CONF, ">>$pgdata/postgresql.conf";
 	print CONF "\n# Added by TestLib.pm)\n";
 	print CONF "fsync = off\n";
@@ -220,12 +222,49 @@ END
 	}
 }
 
+sub poll_query_until
+{
+	my ($query, $connstr) = @_;
+
+	my $max_attempts = 30;
+	my $attempts     = 0;
+	my ($stdout, $stderr);
+
+	while ($attempts < $max_attempts)
+	{
+		my $cmd = [ 'psql', '-At', '-c', "$query", '-d', "$connstr" ];
+		my $result = run $cmd, '>', \$stdout, '2>', \$stderr;
+
+		chomp($stdout);
+		$stdout =~ s/\r//g if $Config{osname} eq 'msys';
+		if ($stdout eq "t")
+		{
+			return 1;
+		}
+
+		# Wait a second before retrying.
+		sleep 1;
+		$attempts++;
+	}
+
+	# The query result didn't change in 30 seconds. Give up. Print the stderr
+	# from the last attempt, hopefully that's useful for debugging.
+	diag $stderr;
+	return 0;
+}
+
 sub psql
 {
 	my ($dbname, $sql) = @_;
 	my ($stdout, $stderr);
 	print("# Running SQL command: $sql\n");
 	run [ 'psql', '-X', '-A', '-t', '-q', '-d', $dbname, '-f', '-' ], '<', \$sql, '>', \$stdout, '2>', \$stderr or die;
+	if ($stderr ne "")
+	{
+		print "#### Begin standard error\n";
+		print $stderr;
+		print "#### End standard error\n";
+	}
 	chomp $stdout;
 	$stdout =~ s/\r//g if $Config{osname} eq 'msys';
 	return $stdout;
diff --git a/src/test/recovery/.gitignore b/src/test/recovery/.gitignore
new file mode 100644
index 0000000..499fa7d
--- /dev/null
+++ b/src/test/recovery/.gitignore
@@ -0,0 +1,3 @@
+# Generated by test suite
+/regress_log/
+/tmp_check/
diff --git a/src/test/recovery/Makefile b/src/test/recovery/Makefile
new file mode 100644
index 0000000..16c063a
--- /dev/null
+++ b/src/test/recovery/Makefile
@@ -0,0 +1,17 @@
+#-------------------------------------------------------------------------
+#
+# Makefile for src/test/recovery
+#
+# Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
+# Portions Copyright (c) 1994, Regents of the University of California
+#
+# src/test/recovery/Makefile
+#
+#-------------------------------------------------------------------------
+
+subdir = src/test/recovery
+top_builddir = ../../..
+include $(top_builddir)/src/Makefile.global
+
+check:
+	$(prove_check)
diff --git a/src/test/recovery/README b/src/test/recovery/README
new file mode 100644
index 0000000..20b98e0
--- /dev/null
+++ b/src/test/recovery/README
@@ -0,0 +1,19 @@
+src/test/recovery/README
+
+Regression tests for recovery and replication
+=============================================
+
+This directory contains a test suite for recovery and replication,
+testing mainly the interactions of recovery.conf with cluster
+instances by providing a simple set of routines that can be used
+to define a custom cluster for a test, including backup, archiving,
+and streaming configuration.
+
+Running the tests
+=================
+
+    make check
+
+NOTE: This creates a temporary installation, and some tests may
+create one or multiple nodes, be they master or standby(s) for the
+purpose of the tests.
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl
new file mode 100644
index 0000000..aae1026
--- /dev/null
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -0,0 +1,67 @@
+# Minimal test testing streaming replication
+use strict;
+use warnings;
+use TestLib;
+use Test::More tests => 4;
+
+use RecoveryTest;
+
+# Initialize master node
+my $node_master = make_master();
+$node_master->startNode();
+my $backup_name = 'my_backup';
+
+# Take backup
+$node_master->backupNode($backup_name);
+
+# Create streaming standby linking to master
+my $node_standby_1 = make_stream_standby($node_master, $backup_name);
+$node_standby_1->startNode();
+
+# Take backup of standby 1 (not mandatory, but useful to check if
+# pg_basebackup works on a standby).
+$node_standby_1->backupNode($backup_name);
+
+# Create second standby node linking to standby 1
+my $node_standby_2 = make_stream_standby($node_standby_1, $backup_name);
+$node_standby_2->startNode();
+$node_standby_2->backupNode($backup_name);
+
+# Create some content on master and check its presence in standby 1 an
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,1002) AS a";
+
+# Wait for standbys to catch up
+my $applname_1 = $node_standby_1->getApplName();
+my $applname_2 = $node_standby_2->getApplName();
+my $caughtup_query = "SELECT pg_current_xlog_location() = write_location FROM pg_stat_replication WHERE application_name = '$applname_1';";
+poll_query_until($caughtup_query, $node_master->getConnStr())
+	or die "Timed out while waiting for standby 1 to catch up";
+$caughtup_query = "SELECT pg_last_xlog_replay_location() = write_location FROM pg_stat_replication WHERE application_name = '$applname_2';";
+poll_query_until($caughtup_query, $node_standby_1->getConnStr())
+	or die "Timed out while waiting for standby 2 to catch up";
+
+my $result = psql $node_standby_1->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+print "standby 1: $result\n";
+is($result, qq(1002), 'check streamed content on standby 1');
+
+$result = psql $node_standby_2->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+print "standby 2: $result\n";
+is($result, qq(1002), 'check streamed content on standby 2');
+
+# Check that only READ-only queries can run on standbys
+command_fails(['psql', '-A', '-t',  '--no-psqlrc',
+	'-d', $node_standby_1->getConnStr(), '-c',
+    "INSERT INTO tab_int VALUES (1)"],
+	'Read-only queries on standby 1');
+command_fails(['psql', '-A', '-t',  '--no-psqlrc',
+	'-d', $node_standby_1->getConnStr(), '-c',
+    "INSERT INTO tab_int VALUES (1)"],
+	'Read-only queries on standby 2');
+
+# Cleanup nodes
+teardown_node($node_standby_2);
+teardown_node($node_standby_1);
+teardown_node($node_master);
diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl
new file mode 100644
index 0000000..c3e8465
--- /dev/null
+++ b/src/test/recovery/t/002_archiving.pl
@@ -0,0 +1,51 @@
+# test for archiving with warm standby
+use strict;
+use warnings;
+use TestLib;
+use Test::More tests => 1;
+use File::Copy;
+use RecoveryTest;
+
+# Initialize master node, doing archives
+my $node_master = make_master();
+my $backup_name = 'my_backup';
+enable_archiving($node_master);
+
+# Start it
+$node_master->startNode();
+
+# Take backup for slave
+$node_master->backupNode($backup_name);
+
+# Initialize standby node from backup, fetching WAL from archives
+my $node_standby = make_archive_standby($node_master, $backup_name);
+$node_standby->appendConf('postgresql.conf', qq(
+wal_retrieve_retry_interval = '100ms'
+));
+$node_standby->startNode();
+
+# Create some content on master
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a";
+my $current_lsn = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# Force archiving of WAL file to make it present on master
+psql $node_master->getConnStr(), "SELECT pg_switch_xlog()";
+
+# Add some more content, it should not be present on standby
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(1001,2000))";
+
+# Wait until necessary replay has been done on standby
+my $caughtup_query = "SELECT '$current_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+poll_query_until($caughtup_query, $node_standby->getConnStr())
+	or die "Timed out while waiting for standby to catch up";
+
+my $result = psql $node_standby->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+is($result, qq(1000), 'check content from archives');
+
+# Cleanup nodes
+teardown_node($node_standby);
+teardown_node($node_master);
diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl
new file mode 100644
index 0000000..995e8e4
--- /dev/null
+++ b/src/test/recovery/t/003_recovery_targets.pl
@@ -0,0 +1,135 @@
+# Test for recovery targets: name, timestamp, XID
+use strict;
+use warnings;
+use TestLib;
+use Test::More tests => 7;
+
+use RecoveryTest;
+
+# Create and test a standby from given backup, with a certain
+# recovery target.
+sub test_recovery_standby
+{
+	my $test_name = shift;
+	my $node_master = shift;
+	my $recovery_params = shift;
+	my $num_rows = shift;
+	my $until_lsn = shift;
+
+	my $node_standby = make_archive_standby($node_master, 'my_backup');
+
+	foreach my $param_item (@$recovery_params)
+	{
+		$node_standby->appendConf('recovery.conf',
+					   qq($param_item
+));
+	}
+
+	$node_standby->startNode();
+
+	# Wait until standby has replayed enough data
+	my $caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+	poll_query_until($caughtup_query, $node_standby->getConnStr())
+		or die "Timed out while waiting for standby to catch up";
+
+	# Create some content on master and check its presence in standby
+	my $result = psql $node_standby->getConnStr(),
+		"SELECT count(*) FROM tab_int";
+	is($result, qq($num_rows), "check standby content for $test_name");
+
+	# Stop standby node
+	teardown_node($node_standby);
+}
+
+# Initialize master node
+my $node_master = make_master();
+enable_archiving($node_master);
+
+# Start it
+$node_master->startNode();
+
+# Create data before taking the backup, aimed at testing
+# recovery_target = 'immediate'
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a";
+my $lsn1 = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# Take backup from which all operations will be run
+$node_master->backupNode('my_backup');
+
+# Insert some data with used as a replay reference, with a recovery
+# target TXID.
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(1001,2000))";
+my $recovery_txid = psql $node_master->getConnStr(),
+	"SELECT txid_current()";
+my $lsn2 = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# More data, with recovery target timestamp
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(2001,3000))";
+my $recovery_time = psql $node_master->getConnStr(), "SELECT now()";
+my $lsn3 = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# Even more data, this time with a recovery target name
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(3001,4000))";
+my $recovery_name = "my_target";
+my $lsn4 = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+psql $node_master->getConnStr(),
+	"SELECT pg_create_restore_point('$recovery_name')";
+
+# Force archiving of WAL file
+psql $node_master->getConnStr(), "SELECT pg_switch_xlog()";
+
+# Test recovery targets
+my @recovery_params = ( "recovery_target = 'immediate'" );
+test_recovery_standby('immediate target', $node_master,
+					  \@recovery_params,
+					  "1000", $lsn1);
+@recovery_params = ( "recovery_target_xid = '$recovery_txid'" );
+test_recovery_standby('XID', $node_master,
+					  \@recovery_params,
+					  "2000", $lsn2);
+@recovery_params = ( "recovery_target_time = '$recovery_time'" );
+test_recovery_standby('Time', $node_master,
+					  \@recovery_params,
+					  "3000", $lsn3);
+@recovery_params = ( "recovery_target_name = '$recovery_name'" );
+test_recovery_standby('Name', $node_master,
+					  \@recovery_params,
+					  "4000", $lsn4);
+
+# Multiple targets
+# Last entry has priority (note that an array respects the order of items
+# not hashes).
+@recovery_params = (
+	"recovery_target_name = '$recovery_name'",
+	"recovery_target_xid  = '$recovery_txid'",
+	"recovery_target_time = '$recovery_time'"
+);
+test_recovery_standby('Name + XID + Time', $node_master,
+					  \@recovery_params,
+					  "3000", $lsn3);
+@recovery_params = (
+	"recovery_target_time = '$recovery_time'",
+	"recovery_target_name = '$recovery_name'",
+	"recovery_target_xid  = '$recovery_txid'"
+);
+test_recovery_standby('Time + Name + XID', $node_master,
+					  \@recovery_params,
+					  "2000", $lsn2);
+@recovery_params = (
+	"recovery_target_xid  = '$recovery_txid'",
+	"recovery_target_time = '$recovery_time'",
+	"recovery_target_name = '$recovery_name'"
+);
+test_recovery_standby('XID + Time + Name', $node_master,
+					  \@recovery_params,
+					  "4000", $lsn4);
+
+teardown_node($node_master);
diff --git a/src/test/recovery/t/004_timeline_switch.pl b/src/test/recovery/t/004_timeline_switch.pl
new file mode 100644
index 0000000..509571c
--- /dev/null
+++ b/src/test/recovery/t/004_timeline_switch.pl
@@ -0,0 +1,72 @@
+# Tets for timeline switch
+# Encure that a standby is able to follow a newly-promoted standby
+# on a new timeline.
+use strict;
+use warnings;
+use File::Path qw(remove_tree);
+use TestLib;
+use Test::More tests => 1;
+
+use RecoveryTest;
+
+# Initialize master node
+my $node_master = make_master();
+$node_master->startNode();
+
+# Take backup
+my $backup_name = 'my_backup';
+$node_master->backupNode($backup_name);
+
+# Create two standbys linking to it
+my $node_standby_1 = make_stream_standby($node_master, $backup_name);
+$node_standby_1->startNode();
+my $node_standby_2 = make_stream_standby($node_master, $backup_name);
+$node_standby_2->startNode();
+
+# Create some content on master
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,1000) AS a";
+my $until_lsn = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+
+# Wait until standby has replayed enough data on standby 1
+my $caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+poll_query_until($caughtup_query, $node_standby_1->getConnStr())
+	or die "Timed out while waiting for standby to catch up";
+
+# Stop and remove master, and promote standby 1, switching it to a new timeline
+teardown_node($node_master);
+system_or_bail('pg_ctl', '-w', '-D', $node_standby_1->getDataDir(),
+			   'promote');
+print "# Promoted standby 1\n";
+
+# Switch standby 2 to replay from standby 1
+remove_tree($node_standby_2->getDataDir() . '/recovery.conf');
+my $connstr_1 = $node_standby_1->getConnStr();
+$node_standby_2->appendConf('recovery.conf', qq(
+primary_conninfo='$connstr_1'
+standby_mode=on
+recovery_target_timeline='latest'
+));
+$node_standby_2->restartNode();
+
+# Insert some data in standby 1 and check its presence in standby 2
+# to ensure that the timeline switch has been done. Standby 1 needs
+# to exit recovery first before moving on with the test.
+poll_query_until("SELECT pg_is_in_recovery() <> true",
+				 $node_standby_1->getConnStr());
+psql $node_standby_1->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(1001,2000))";
+$until_lsn = psql $node_standby_1->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+$caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+poll_query_until($caughtup_query, $node_standby_2->getConnStr())
+	or die "Timed out while waiting for standby to catch up";
+
+my $result = psql $node_standby_2->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+is($result, qq(2000), 'check content of standby 2');
+
+# Stop nodes
+teardown_node($node_standby_2);
+teardown_node($node_standby_1);
diff --git a/src/test/recovery/t/005_replay_delay.pl b/src/test/recovery/t/005_replay_delay.pl
new file mode 100644
index 0000000..c209b55
--- /dev/null
+++ b/src/test/recovery/t/005_replay_delay.pl
@@ -0,0 +1,49 @@
+# Checks for recovery_min_apply_delay
+use strict;
+use warnings;
+use TestLib;
+use Test::More tests => 2;
+
+use RecoveryTest;
+
+# Initialize master node
+my $node_master = make_master();
+$node_master->startNode();
+
+# And some content
+psql $node_master->getConnStr(),
+	"CREATE TABLE tab_int AS SELECT generate_series(1,10) AS a";
+
+# Take backup
+my $backup_name = 'my_backup';
+$node_master->backupNode($backup_name);
+
+# Create streaming standby from backup
+my $node_standby = make_stream_standby($node_master, $backup_name);
+$node_standby->appendConf('recovery.conf', qq(
+recovery_min_apply_delay = '2s'
+));
+$node_standby->startNode();
+
+# Make new content on master and check its presence in standby
+# depending on the delay of 2s applied above.
+psql $node_master->getConnStr(),
+	"INSERT INTO tab_int VALUES (generate_series(11,20))";
+sleep 1;
+# Here we should have only 10 rows
+my $result = psql $node_standby->getConnStr(),
+	"SELECT count(*) FROM tab_int";
+is($result, qq(10), 'check content with delay of 1s');
+
+# Now wait for replay to complete on standby
+my $until_lsn = psql $node_master->getConnStr(),
+	"SELECT pg_current_xlog_location();";
+my $caughtup_query = "SELECT '$until_lsn'::pg_lsn <= pg_last_xlog_replay_location()";
+poll_query_until($caughtup_query, $node_standby->getConnStr())
+	or die "Timed out while waiting for standby to catch up";
+$result = psql $node_standby->getConnStr(), "SELECT count(*) FROM tab_int";
+is($result, qq(20), 'check content with delay of 2s');
+
+# Stop nodes
+teardown_node($node_standby);
+teardown_node($node_master);
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to