This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 47edda592ba Fix some conflicts on test
47edda592ba is described below
commit 47edda592ba22b35f93c089368d4a495218dca4b
Author: Jinbao Chen <[email protected]>
AuthorDate: Sun Mar 8 21:36:49 2026 -0400
Fix some conflicts on test
---
src/test/kerberos/t/001_auth.pl | 8 -
src/test/ldap/t/001_auth.pl | 60 ------
src/test/recovery/t/001_stream_rep.pl | 13 +-
src/test/recovery/t/002_archiving.pl | 233 ---------------------
src/test/recovery/t/003_recovery_targets.pl | 21 +-
src/test/recovery/t/006_logical_decoding.pl | 10 -
src/test/recovery/t/009_twophase.pl | 13 --
.../recovery/t/010_logical_decoding_timelines.pl | 9 -
src/test/recovery/t/012_subtransactions.pl | 13 --
src/test/recovery/t/013_crash_restart.pl | 38 ----
src/test/recovery/t/014_unlogged_reinit.pl | 4 -
src/test/recovery/t/016_min_consistency.pl | 8 +-
src/test/recovery/t/017_shm.pl | 17 --
src/test/recovery/t/018_wal_optimize.pl | 110 +++++-----
src/test/recovery/t/019_replslot_limit.pl | 64 +-----
src/test/recovery/t/020_archive_status.pl | 10 -
src/test/recovery/t/021_row_visibility.pl | 94 +++------
src/test/recovery/t/022_crash_temp_files.pl | 62 ------
src/test/recovery/t/023_pitr_prepared_xact.pl | 12 --
src/test/recovery/t/024_archive_recovery.pl | 11 +-
src/test/recovery/t/025_stuck_on_old_timeline.pl | 4 -
src/test/recovery/t/026_overwrite_contrecord.pl | 24 ---
src/test/recovery/t/031_recovery_conflict.pl | 230 --------------------
src/test/regress/expected/compression.out | 6 +-
src/test/regress/expected/largeobject.out | 13 --
src/test/regress/expected/largeobject_1.out | 13 --
src/test/regress/expected/rules.out | 24 +--
src/test/regress/expected/xml_1.out | 5 -
src/test/regress/expected/xmlmap_1.out | 5 -
src/test/regress/sql/largeobject.sql | 13 --
src/test/regress/sql/rules.sql | 8 +-
src/test/ssl/t/001_ssltests.pl | 18 --
src/test/ssl/t/002_scram.pl | 15 --
33 files changed, 94 insertions(+), 1094 deletions(-)
diff --git a/src/test/kerberos/t/001_auth.pl b/src/test/kerberos/t/001_auth.pl
index 01b3b3860a6..14f5f393ee9 100644
--- a/src/test/kerberos/t/001_auth.pl
+++ b/src/test/kerberos/t/001_auth.pl
@@ -24,15 +24,7 @@ use PostgreSQL::Test::Cluster;
use Test::More;
use Time::HiRes qw(usleep);
-<<<<<<< HEAD
-if ($ENV{with_gssapi} eq 'yes')
-{
- plan tests => 45;
-}
-else
-=======
if ($ENV{with_gssapi} ne 'yes')
->>>>>>> REL_16_9
{
plan skip_all => 'GSSAPI/Kerberos not supported by this build';
}
diff --git a/src/test/ldap/t/001_auth.pl b/src/test/ldap/t/001_auth.pl
index f66acd18072..ef8ad1762aa 100644
--- a/src/test/ldap/t/001_auth.pl
+++ b/src/test/ldap/t/001_auth.pl
@@ -41,67 +41,7 @@ my ($ldap_server, $ldap_port, $ldaps_port, $ldap_url,
) = $ldap->prop(qw(server port s_port url s_url basedn rootdn));
# don't bother to check the server's cert (though perhaps we should)
-<<<<<<< HEAD
-append_to_file(
- $ldap_conf,
- qq{TLS_REQCERT never
-});
-
-mkdir $ldap_datadir or die;
-mkdir $slapd_certs or die;
-
-system_or_bail "openssl", "req", "-new", "-nodes", "-keyout",
- "$slapd_certs/ca.key", "-x509", "-out", "$slapd_certs/ca.crt", "-subj",
- "/CN=CA";
-system_or_bail "openssl", "req", "-new", "-nodes", "-keyout",
- "$slapd_certs/server.key", "-out", "$slapd_certs/server.csr", "-subj",
- "/CN=server";
-system_or_bail "openssl", "x509", "-req", "-in", "$slapd_certs/server.csr",
- "-CA", "$slapd_certs/ca.crt", "-CAkey", "$slapd_certs/ca.key",
- "-CAcreateserial", "-out", "$slapd_certs/server.crt";
-
-system_or_bail $slapd, '-f', $slapd_conf, '-h', "$ldap_url $ldaps_url";
-
-END
-{
- kill 'INT', `cat $slapd_pidfile` if -f $slapd_pidfile;
-}
-
-append_to_file($ldap_pwfile, $ldap_rootpw);
-chmod 0600, $ldap_pwfile or die;
-
-# wait until slapd accepts requests
-my $retries = 0;
-while (1)
-{
- last
- if (
- system_log(
- "ldapsearch", "-sbase",
- "-H", $ldap_url,
- "-b", $ldap_basedn,
- "-D", $ldap_rootdn,
- "-y", $ldap_pwfile,
- "-n", "'objectclass=*'") == 0);
- die "cannot connect to slapd" if ++$retries >= 300;
- note "waiting for slapd to accept requests...";
- Time::HiRes::usleep(1000000);
-}
-
-$ENV{'LDAPURI'} = $ldap_url;
-$ENV{'LDAPBINDDN'} = $ldap_rootdn;
-$ENV{'LDAPCONF'} = $ldap_conf;
-
-note "loading LDAP data";
-
-system_or_bail 'ldapadd', '-x', '-y', $ldap_pwfile, '-f', 'authdata.ldif';
-system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret1',
- 'uid=test1,dc=example,dc=net';
-system_or_bail 'ldappasswd', '-x', '-y', $ldap_pwfile, '-s', 'secret2',
- 'uid=test2,dc=example,dc=net';
-=======
$ENV{'LDAPTLS_REQCERT'} = "never";
->>>>>>> REL_16_9
note "setting up PostgreSQL instance";
diff --git a/src/test/recovery/t/001_stream_rep.pl
b/src/test/recovery/t/001_stream_rep.pl
index 468594b394b..710bdd54dab 100644
--- a/src/test/recovery/t/001_stream_rep.pl
+++ b/src/test/recovery/t/001_stream_rep.pl
@@ -98,15 +98,6 @@ note "testing connection parameter \"target_session_attrs\"";
sub test_target_session_attrs
{
local $Test::Builder::Level = $Test::Builder::Level + 1;
-<<<<<<< HEAD
-
- my $node1 = shift;
- my $node2 = shift;
- my $target_node = shift;
- my $mode = shift;
- my $status = shift;
-=======
->>>>>>> REL_16_9
my $node1 = shift;
my $node2 = shift;
@@ -323,7 +314,6 @@ note "switching to physical replication slot";
# also increase the standby feedback interval to ensure timely updates.
my ($slotname_1, $slotname_2) = ('standby_1', 'standby_2');
$node_primary->append_conf('postgresql.conf', "max_replication_slots = 4");
-$node_primary->append_conf('postgresql.conf', "wal_keep_size = 0");
$node_primary->restart;
is( $node_primary->psql(
'postgres',
@@ -518,8 +508,7 @@ $node_primary->psql(
'postgres', "
CREATE TABLE tab_phys_slot (a int);
INSERT INTO tab_phys_slot VALUES (generate_series(1,10));
- SELECT pg_switch_wal();
- checkpoint;");
+ SELECT pg_switch_wal();");
my $current_lsn =
$node_primary->safe_psql('postgres', "SELECT pg_current_wal_lsn();");
chomp($current_lsn);
diff --git a/src/test/recovery/t/002_archiving.pl
b/src/test/recovery/t/002_archiving.pl
index 2d1615452f4..4aa502bb66d 100644
--- a/src/test/recovery/t/002_archiving.pl
+++ b/src/test/recovery/t/002_archiving.pl
@@ -4,15 +4,9 @@
# test for archiving with hot standby
use strict;
use warnings;
-<<<<<<< HEAD
-use PostgresNode;
-use TestLib;
-use Test::More tests => 14;
-=======
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
->>>>>>> REL_16_9
use File::Copy;
# Initialize primary node, doing archives
@@ -21,7 +15,6 @@ $node_primary->init(
has_archiving => 1,
allows_streaming => 1);
my $backup_name = 'my_backup';
-my $primary_connstr = $node_primary->connstr;
# Start it
$node_primary->start;
@@ -30,11 +23,7 @@ $node_primary->start;
$node_primary->backup($backup_name);
# Initialize standby node from backup, fetching WAL from archives
-<<<<<<< HEAD
-my $node_standby = get_new_node('standby');
-=======
my $node_standby = PostgreSQL::Test::Cluster->new('standby');
->>>>>>> REL_16_9
# Note that this makes the standby store its contents on the archives
# of the primary.
$node_standby->init_from_backup($node_primary, $backup_name,
@@ -84,74 +73,6 @@ my $result =
$node_standby->safe_psql('postgres', "SELECT count(*) FROM tab_int");
is($result, qq(1000), 'check content from archives');
-<<<<<<< HEAD
-$node_standby->append_conf('postgresql.conf',
qq(primary_conninfo='$primary_connstr'));
-$node_standby->restart;
-
-###################### partial wal file tests ############################
-# Test the following scenario:
-# primary is alive but the standby is promoted. In this case, the last wal file
-# on the old timeline in the mirror's pg_wal dir is renamed with the suffix
".partial"
-# This partial file also gets archived. The original wal file only gets
archived once
-# the user runs pg_rewind.
-
-# Consider the following example: Let's assume that 0000100004 is the current
-# wal file on the primary
-
-# start with a primary and standby pair
-# add data to primary
-# contents of pg_wal on primary
-# 0000100001
-# .....
-# 0000100003
-# 0000100004 - current wal file on primary
-#
-# primary is alive but standby gets promoted
-# contents of pg_wal on standby
-# 0000100001
-# ....
-# 0000100003
-# 0000100004.partial (note that 0000100004 does not exist on the
standby)
-# 0000200004 - current wal file on standby
-#
-# Contents of the archive location
-# 0000100003
-# 0000100004
-#
-# stop primary with pg_ctl stop -m fast
-# contents of pg_wal on primary
-# 0000100004 on the primary gets flushed and gets archived
-# 0000100004.done gets created on primary
-# Contents of the archive location
-# 0000100003
-# 0000100004.partial
-# 0000100004
-# pg_rewind
-# copies from standby to primary
-# removes 0000100004 and 0000100004.done from primary's pg_wal dir
-
-$node_primary->safe_psql('postgres',
- "CREATE TABLE test_partial_wal as SELECT generate_series(1,1000)");
-my $latest_wal_filename_old_timeline = $node_primary->safe_psql('postgres',
"SELECT pg_walfile_name(pg_current_wal_lsn());");
-my $latest_done_old_timeline = '/pg_wal/archive_status/' .
$latest_wal_filename_old_timeline . '.done';
-my $latest_wal_filepath_old_timeline = $node_primary->data_dir . '/pg_wal/' .
$latest_wal_filename_old_timeline;
-my $latest_archived_wal_old_timeline = $node_primary->archive_dir . '/' .
$latest_wal_filename_old_timeline;
-
-my $partial_wal_file_path = '/pg_wal/' . $latest_wal_filename_old_timeline .
'.partial';
-my $partial_done_file_path = '/pg_wal/archive_status/' .
$latest_wal_filename_old_timeline . '.partial.done';
-my $archived_partial_wal_file = $node_primary->archive_dir . '/' .
$latest_wal_filename_old_timeline . '.partial';
-
-#assert that 0000100004 exists on primary but it's not archived
-ok(-f "$latest_wal_filepath_old_timeline", 'latest wal file from the old
timeline exists on primary');
-ok(!-f "$latest_archived_wal_old_timeline", 'latest wal file from the old
timeline is not archived yet');
-
-#Only promote standby once the latest wal file from the primary's current
timeline has been streamed to the standby
-my $primary_current_wal_loc = $node_primary->safe_psql('postgres', "SELECT
pg_current_wal_lsn();");
-my $query = "SELECT pg_last_wal_receive_lsn() >=
'$primary_current_wal_loc'::pg_lsn;";
-$node_standby->poll_query_until('postgres', $query)
- or die "Timed out while waiting for standby to receive the latest wal file";
-
-=======
# archive_cleanup_command is executed after generating a restart point,
# with a checkpoint.
$node_standby->safe_psql('postgres', q{CHECKPOINT});
@@ -167,161 +88,8 @@ ok( !-f "$data_dir/$recovery_end_command_file",
# switches, promote the existing standby first. Then create a second
# standby based on the primary, using its archives. Finally, the second
# standby is promoted.
->>>>>>> REL_16_9
$node_standby->promote;
-# Force a checkpoint after the promotion. pg_rewind looks at the control
-# file to determine what timeline the server is on, and that isn't updated
-# immediately at promotion, but only at the next checkpoint. When running
-# pg_rewind in remote mode, it's possible that we complete the test steps
-# after promotion so quickly that when pg_rewind runs, the standby has not
-# performed a checkpoint after promotion yet.
-$node_standby->safe_psql('postgres', "checkpoint");# wait for the partial file
to get archived
-
-<<<<<<< HEAD
-$node_standby->safe_psql('postgres',
- "INSERT INTO test_partial_wal SELECT generate_series(1,1000)");
-# Once we promote the standby, it will be on a new timeline and we want to
assert
-# that the latest file from the old timeline is archived properly
-post_standby_promotion_tests();
-
-$node_primary->stop;
-$node_standby->safe_psql('postgres',
- "INSERT INTO test_partial_wal SELECT generate_series(1,1000)");
-
-post_primary_stop_tests();
-
-my $tmp_check = TestLib::tempdir;
-my $primary_datadir = $node_primary->data_dir;
-# Keep a temporary postgresql.conf for primary node or it would be
-# overwritten during the rewind.
-copy("$primary_datadir/postgresql.conf",
- "$tmp_check/primary-postgresql.conf.tmp");
-
-local $ENV{PGOPTIONS} = '-c gp_role=utility';
-command_ok(['pg_rewind',
- "--debug",
- "--source-server",
- 'port='. $node_standby->port . ' dbname=postgres',
- '--target-pgdata=' . $node_primary->data_dir],
- 'pg_rewind');
-
-post_pg_rewind_tests();
-
-# Now move back postgresql.conf with old settings
-move("$tmp_check/primary-postgresql.conf.tmp",
- "$primary_datadir/postgresql.conf");
-
-# Start the primary
-$node_primary->start;
-$node_primary->safe_psql('postgres',
- "INSERT INTO test_partial_wal SELECT generate_series(1,1000)");
-
-sub wait_until_file_exists
-{
- my ($filepath, $filedesc) = @_;
- my $query = "SELECT size IS NOT NULL FROM pg_stat_file('$filepath')";
- # we aren't querying primary because we stop the primary node for some
of the
- # scenarios
- $node_standby->poll_query_until('postgres', $query)
- or die "Timed out while waiting for $filedesc $filepath";
-}
-
-sub post_standby_promotion_tests
-{
- #assert that 0000100004 exists on primary
- wait_until_file_exists($latest_wal_filepath_old_timeline, "latest wal
file from the old timeline to exist on primary");
- #assert that 0000100004.partial exists on standby
- wait_until_file_exists($node_standby->data_dir .
$partial_wal_file_path, "partial wal file from the old timeline to exist on
standby");
- #assert that 0000100004.partial.done exists on standby
- wait_until_file_exists($node_standby->data_dir .
$partial_done_file_path, "partial done file from the old timeline to exist on
standby");
- #assert that 0000100004.partial got archived
- wait_until_file_exists($archived_partial_wal_file, "latest partial wal
file from the old timeline to be archived");
-
- #assert that 0000100004.partial doesn't exist on primary
- ok(!-f $node_primary->data_dir . $partial_wal_file_path, 'partial wal
file from the old timeline should not exist on primary');
- #assert that 0000100004.partial.done doesn't exist on primary
- ok(!-f $node_primary->data_dir . $partial_done_file_path, 'partial done
file from the old timeline should not exist on primary');
- #assert that 0000100004.done doesn't exist on primary
- ok(!-f $node_primary->data_dir . $latest_done_old_timeline, 'done file
from the old timeline should not exist on primary');
- #assert that 0000100004 hasn't been archived
- ok(!-f $latest_archived_wal_old_timeline, 'wal file from the old
timeline should not be archived');
- #assert that 0000100004 doesn't exist on standby
- ok(!-f $node_standby->data_dir . '/pg_wal/' .
$latest_wal_filename_old_timeline, 'latest wal file from the old timeline
should not exist on the standby');
-
- check_history_files();
-}
-
-sub post_primary_stop_tests
-{
- #assert that 0000100004 still exists on primary
- wait_until_file_exists($latest_wal_filepath_old_timeline, "latest wal
file from the old timeline to exist on primary");
- #assert that 0000100004.done exists on primary
- wait_until_file_exists($node_primary->data_dir .
$latest_done_old_timeline, "done file from the old timeline to exist on
primary");
- #assert that 0000100004 is archived
- wait_until_file_exists($latest_archived_wal_old_timeline, "latest wal
file from the old timeline to be archived");
-}
-
-sub post_pg_rewind_tests
-{
- #assert that 0000100004.partial exists on primary
- wait_until_file_exists($node_primary->data_dir .
$partial_wal_file_path, "latest partial wal file from the old timeline to exist
on primary");
- #assert that 0000100004.partial.done exists on primary
- wait_until_file_exists($node_primary->data_dir .
$partial_done_file_path, "latest partial done file from the old timeline to
exist on primary");
-
- #assert that 0000100004 is still archived
- wait_until_file_exists($latest_archived_wal_old_timeline, "latest wal
file from the old timeline to be archived");
- #partial wal file is still archived
- wait_until_file_exists($archived_partial_wal_file, "latest partial wal
file from the old timeline to be archived");
-
- #assert that 0000100004 does not exist on primary
- ok(!-f "$latest_wal_filepath_old_timeline", 'latest wal file from the
old timeline should not exist on standby');
- #assert that 0000100004.done does not exist on primary
- ok(!-f $node_primary->data_dir . $latest_done_old_timeline, 'latest
done file from the old timeline should not exist on primary');
-
-}
-
-sub check_history_files
-{
- # Check the presence of temporary files specifically generated during
- # archive recovery. To ensure the presence of the temporary history
- # file, switch to a timeline large enough to allow a standby to recover
- # a history file from an archive. As this requires at least two
timeline
- # switches, promote the existing standby first. Then create a second
- # standby based on the primary, using its archives. Finally, the second
- # standby is promoted.
-
- # Wait until the history file has been stored on the archives of the
- # primary once the promotion of the standby completes. This ensures
that
- # the second standby created below will be able to restore this file,
- # creating a RECOVERYHISTORY.
- my $primary_archive = $node_primary->archive_dir;
- wait_until_file_exists("$primary_archive/00000002.history", "history
file to be archived");
-
- my $node_standby2 = get_new_node('standby2');
- $node_standby2->init_from_backup($node_primary, $backup_name,
- has_streaming => 1, has_restoring => 1);
- $node_standby2->start;
-
- my $log_location = -s $node_standby2->logfile;
-
- # Now promote standby2, and check that temporary files specifically
- # generated during archive recovery are removed by the end of recovery.
- $node_standby2->promote;
-
- # Check the logs of the standby to see that the commands have failed.
- my $log_contents = slurp_file($node_standby2->logfile,
$log_location);
- my $node_standby2_data = $node_standby2->data_dir;
- like(
- $log_contents,
- qr/restored log file "00000002.history" from archive/s,
- "00000002.history retrieved from the archives");
- ok( !-f "$node_standby2_data/pg_wal/RECOVERYHISTORY",
- "RECOVERYHISTORY removed after promotion");
- ok( !-f "$node_standby2_data/pg_wal/RECOVERYXLOG",
- "RECOVERYXLOG removed after promotion");
-}
-=======
# Wait until the history file has been stored on the archives of the
# primary once the promotion of the standby completes. This ensures that
# the second standby created below will be able to restore this file,
@@ -374,4 +142,3 @@ like(
"recovery_end_command failure detected in logs after promotion");
done_testing();
->>>>>>> REL_16_9
diff --git a/src/test/recovery/t/003_recovery_targets.pl
b/src/test/recovery/t/003_recovery_targets.pl
index 54cf22d884c..e882ce20773 100644
--- a/src/test/recovery/t/003_recovery_targets.pl
+++ b/src/test/recovery/t/003_recovery_targets.pl
@@ -15,16 +15,6 @@ use Time::HiRes qw(usleep);
sub test_recovery_standby
{
local $Test::Builder::Level = $Test::Builder::Level + 1;
-<<<<<<< HEAD
-
- my $test_name = shift;
- my $node_name = shift;
- my $node_primary = shift;
- my $recovery_params = shift;
- my $num_rows = shift;
- my $until_lsn = shift;
-=======
->>>>>>> REL_16_9
my $test_name = shift;
my $node_name = shift;
@@ -67,7 +57,7 @@ $node_primary->init(has_archiving => 1, allows_streaming =>
1);
# Bump the transaction ID epoch. This is useful to stress the portability
# of recovery_target_xid parsing.
-system('echo yes|pg_resetwal', '--epoch', '1', $node_primary->data_dir);
+system_or_bail('pg_resetwal', '--epoch', '1', $node_primary->data_dir);
# Start it
$node_primary->start;
@@ -178,21 +168,12 @@ $node_standby->append_conf('postgresql.conf',
run_log(
[
-<<<<<<< HEAD
- 'pg_ctl', '-D', $node_standby->data_dir, '-l',
- $node_standby->logfile, 'start', '-o',
"--cluster-name=standby_8 -c gp_role=utility --gp_dbid=9 --gp_contentid=0"
- ]);
-
-# wait up to 180s for postgres to terminate
-foreach my $i (0..1800)
-=======
'pg_ctl', '-D', $node_standby->data_dir, '-l',
$node_standby->logfile, 'start'
]);
# wait for postgres to terminate
foreach my $i (0 .. 10 * $PostgreSQL::Test::Utils::timeout_default)
->>>>>>> REL_16_9
{
last if !-f $node_standby->data_dir . '/postmaster.pid';
usleep(100_000);
diff --git a/src/test/recovery/t/006_logical_decoding.pl
b/src/test/recovery/t/006_logical_decoding.pl
index 7f677cd75ba..96210b8bab7 100644
--- a/src/test/recovery/t/006_logical_decoding.pl
+++ b/src/test/recovery/t/006_logical_decoding.pl
@@ -107,14 +107,9 @@ $node_primary->safe_psql('postgres',
);
my $stdout_recv = $node_primary->pg_recvlogical_upto(
-<<<<<<< HEAD
- 'postgres', 'test_slot', $endpos, $TestLib::timeout_default,
- 'include-xids' => '0',
-=======
'postgres', 'test_slot', $endpos,
$PostgreSQL::Test::Utils::timeout_default,
'include-xids' => '0',
->>>>>>> REL_16_9
'skip-empty-xacts' => '1');
chomp($stdout_recv);
is($stdout_recv, $expected,
@@ -125,14 +120,9 @@ $node_primary->poll_query_until('postgres',
) or die "slot never became inactive";
$stdout_recv = $node_primary->pg_recvlogical_upto(
-<<<<<<< HEAD
- 'postgres', 'test_slot', $endpos, $TestLib::timeout_default,
- 'include-xids' => '0',
-=======
'postgres', 'test_slot', $endpos,
$PostgreSQL::Test::Utils::timeout_default,
'include-xids' => '0',
->>>>>>> REL_16_9
'skip-empty-xacts' => '1');
chomp($stdout_recv);
is($stdout_recv, '', 'pg_recvlogical acknowledged changes');
diff --git a/src/test/recovery/t/009_twophase.pl
b/src/test/recovery/t/009_twophase.pl
index fa791839794..fe7e8e79802 100644
--- a/src/test/recovery/t/009_twophase.pl
+++ b/src/test/recovery/t/009_twophase.pl
@@ -5,22 +5,9 @@
use strict;
use warnings;
-<<<<<<< HEAD
-use PostgresNode;
-use TestLib;
-
-# GPDB: Effectively disable this TAP test. We cannot run PREPARE
-# TRANSACTION in utility-mode. We need at least 1 test so create a
-# dummy one.
-#use Test::More tests => 24;
-use Test::More tests => 1;
-is(-1, -1, "Disable this TAP test");
-exit;
-=======
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
->>>>>>> REL_16_9
my $psql_out = '';
my $psql_rc = '';
diff --git a/src/test/recovery/t/010_logical_decoding_timelines.pl
b/src/test/recovery/t/010_logical_decoding_timelines.pl
index 35544b34f7e..be518e437ae 100644
--- a/src/test/recovery/t/010_logical_decoding_timelines.pl
+++ b/src/test/recovery/t/010_logical_decoding_timelines.pl
@@ -158,11 +158,7 @@ like(
($ret, $stdout, $stderr) = $node_replica->psql(
'postgres',
"SELECT data FROM pg_logical_slot_peek_changes('before_basebackup',
NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1');",
-<<<<<<< HEAD
- timeout => $TestLib::timeout_default);
-=======
timeout => $PostgreSQL::Test::Utils::timeout_default);
->>>>>>> REL_16_9
is($ret, 0, 'replay from slot before_basebackup succeeds');
my $final_expected_output_bb = q(BEGIN
@@ -191,13 +187,8 @@ my $endpos = $node_replica->safe_psql('postgres',
$stdout = $node_replica->pg_recvlogical_upto(
'postgres', 'before_basebackup',
-<<<<<<< HEAD
- $endpos, $TestLib::timeout_default,
- 'include-xids' => '0',
-=======
$endpos, $PostgreSQL::Test::Utils::timeout_default,
'include-xids' => '0',
->>>>>>> REL_16_9
'skip-empty-xacts' => '1');
# walsender likes to add a newline
diff --git a/src/test/recovery/t/012_subtransactions.pl
b/src/test/recovery/t/012_subtransactions.pl
index 997df1e4ac7..91ae79dd514 100644
--- a/src/test/recovery/t/012_subtransactions.pl
+++ b/src/test/recovery/t/012_subtransactions.pl
@@ -5,22 +5,9 @@
use strict;
use warnings;
-<<<<<<< HEAD
-use PostgresNode;
-use TestLib;
-
-# GPDB: Effectively disable this TAP test. We cannot run PREPARE
-# TRANSACTION in utility-mode. We need at least 1 test so create a
-# dummy one.
-#use Test::More tests => 12;
-use Test::More tests => 1;
-is(-1, -1, "Disable this TAP test");
-exit;
-=======
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
->>>>>>> REL_16_9
# Setup primary node
my $node_primary = PostgreSQL::Test::Cluster->new("primary");
diff --git a/src/test/recovery/t/013_crash_restart.pl
b/src/test/recovery/t/013_crash_restart.pl
index 379ee0be067..ce57792f312 100644
--- a/src/test/recovery/t/013_crash_restart.pl
+++ b/src/test/recovery/t/013_crash_restart.pl
@@ -19,18 +19,7 @@ use Test::More;
my $psql_timeout = IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default);
-<<<<<<< HEAD
-
-# To avoid hanging while expecting some specific input from a psql
-# instance being driven by us, add a timeout high enough that it
-# should never trigger even on very slow machines, unless something
-# is really wrong.
-my $psql_timeout = IPC::Run::timer($TestLib::timeout_default);
-
-my $node = get_new_node('primary');
-=======
my $node = PostgreSQL::Test::Cluster->new('primary');
->>>>>>> REL_16_9
$node->init(allows_streaming => 1);
$node->start();
@@ -77,12 +66,8 @@ CREATE TABLE alive(status text);
INSERT INTO alive VALUES($$committed-before-sigquit$$);
SELECT pg_backend_pid();
];
-<<<<<<< HEAD
-ok(pump_until($killme, $psql_timeout, \$killme_stdout,
qr/[[:digit:]]+[\r\n]$/m),
-=======
ok( pump_until(
$killme, $psql_timeout, \$killme_stdout,
qr/[[:digit:]]+[\r\n]$/m),
->>>>>>> REL_16_9
'acquired pid for SIGQUIT');
my $pid = $killme_stdout;
chomp($pid);
@@ -94,13 +79,9 @@ $killme_stdin .= q[
BEGIN;
INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status;
];
-<<<<<<< HEAD
-ok(pump_until($killme, $psql_timeout, \$killme_stdout,
qr/in-progress-before-sigquit/m),
-=======
ok( pump_until(
$killme, $psql_timeout,
\$killme_stdout, qr/in-progress-before-sigquit/m),
->>>>>>> REL_16_9
'inserted in-progress-before-sigquit');
$killme_stdout = '';
$killme_stderr = '';
@@ -113,12 +94,8 @@ $monitor_stdin .= q[
SELECT $$psql-connected$$;
SELECT pg_sleep(3600);
];
-<<<<<<< HEAD
-ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
-=======
ok( pump_until(
$monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
->>>>>>> REL_16_9
'monitor connected');
$monitor_stdout = '';
$monitor_stderr = '';
@@ -172,12 +149,8 @@ $monitor->run();
$killme_stdin .= q[
SELECT pg_backend_pid();
];
-<<<<<<< HEAD
-ok(pump_until($killme, $psql_timeout, \$killme_stdout,
qr/[[:digit:]]+[\r\n]$/m),
-=======
ok( pump_until(
$killme, $psql_timeout, \$killme_stdout,
qr/[[:digit:]]+[\r\n]$/m),
->>>>>>> REL_16_9
"acquired pid for SIGKILL");
$pid = $killme_stdout;
chomp($pid);
@@ -190,13 +163,9 @@ INSERT INTO alive VALUES($$committed-before-sigkill$$)
RETURNING status;
BEGIN;
INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status;
];
-<<<<<<< HEAD
-ok(pump_until($killme, $psql_timeout, \$killme_stdout,
qr/in-progress-before-sigkill/m),
-=======
ok( pump_until(
$killme, $psql_timeout,
\$killme_stdout, qr/in-progress-before-sigkill/m),
->>>>>>> REL_16_9
'inserted in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
@@ -208,12 +177,8 @@ $monitor_stdin .= q[
SELECT $$psql-connected$$;
SELECT pg_sleep(3600);
];
-<<<<<<< HEAD
-ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
-=======
ok( pump_until(
$monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m),
->>>>>>> REL_16_9
'monitor connected');
$monitor_stdout = '';
$monitor_stderr = '';
@@ -281,8 +246,5 @@ is( $node->safe_psql(
'can still write after orderly restart');
$node->stop();
-<<<<<<< HEAD
-=======
done_testing();
->>>>>>> REL_16_9
diff --git a/src/test/recovery/t/014_unlogged_reinit.pl
b/src/test/recovery/t/014_unlogged_reinit.pl
index 3e6970ff7c4..3591b3309e6 100644
--- a/src/test/recovery/t/014_unlogged_reinit.pl
+++ b/src/test/recovery/t/014_unlogged_reinit.pl
@@ -44,12 +44,8 @@ is($node->safe_psql('postgres', "SELECT
nextval('seq_unlogged')"),
my $tablespaceDir = PostgreSQL::Test::Utils::tempdir;
-<<<<<<< HEAD
-$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION
'$tablespaceDir'");
-=======
$node->safe_psql('postgres',
"CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'");
->>>>>>> REL_16_9
$node->safe_psql('postgres',
'CREATE UNLOGGED TABLE ts1_unlogged (id int) TABLESPACE ts1');
diff --git a/src/test/recovery/t/016_min_consistency.pl
b/src/test/recovery/t/016_min_consistency.pl
index 4287fe25dc4..81f7a43c079 100644
--- a/src/test/recovery/t/016_min_consistency.pl
+++ b/src/test/recovery/t/016_min_consistency.pl
@@ -52,9 +52,7 @@ $primary->init(allows_streaming => 1);
# file. Autovacuum is disabled so as there is no risk of having other
# processes than the checkpointer doing page flushes.
$primary->append_conf("postgresql.conf", <<EOF);
-# The minimum on GPDB is higher
-#shared_buffers = 128kB
-shared_buffers = 512kB
+shared_buffers = 128kB
autovacuum = off
EOF
@@ -68,12 +66,10 @@ $standby->init_from_backup($primary, 'bkp', has_streaming
=> 1);
$standby->start;
# Create base table whose data consistency is checked.
-# Use more data in GPDB, because the block size is larger, and because
-# in GPDB the data will be distributed across segments.
$primary->safe_psql(
'postgres', "
CREATE TABLE test1 (a int) WITH (fillfactor = 10);
-INSERT INTO test1 SELECT generate_series(1, 10000 * 100);");
+INSERT INTO test1 SELECT generate_series(1, 10000);");
# Take a checkpoint and enforce post-checkpoint full page writes
# which makes the startup process replay those pages, updating
diff --git a/src/test/recovery/t/017_shm.pl b/src/test/recovery/t/017_shm.pl
index 2b4e88f1aac..74359e0e388 100644
--- a/src/test/recovery/t/017_shm.pl
+++ b/src/test/recovery/t/017_shm.pl
@@ -131,11 +131,7 @@ my $slow_client = IPC::Run::start(
\$stdout,
'2>',
\$stderr,
-<<<<<<< HEAD
- IPC::Run::timeout(5 * $TestLib::timeout_default));
-=======
IPC::Run::timeout(5 * $PostgreSQL::Test::Utils::timeout_default));
->>>>>>> REL_16_9
ok( $gnat->poll_query_until(
'postgres',
"SELECT 1 FROM pg_stat_activity WHERE query = '$slow_query'",
'1'),
@@ -147,19 +143,11 @@ unlink($gnat->data_dir . '/postmaster.pid');
$gnat->rotate_logfile; # on Windows, can't open old log for writing
log_ipcs();
# Reject ordinary startup. Retry for the same reasons poll_start() does,
-<<<<<<< HEAD
-# every 0.1s for at least $TestLib::timeout_default seconds.
-my $pre_existing_msg = qr/pre-existing shared memory block/;
-{
- my $max_attempts = 10 * $TestLib::timeout_default;
- my $attempts = 0;
-=======
# every 0.1s for at least $PostgreSQL::Test::Utils::timeout_default seconds.
my $pre_existing_msg = qr/pre-existing shared memory block/;
{
my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default;
my $attempts = 0;
->>>>>>> REL_16_9
while ($attempts < $max_attempts)
{
last
@@ -205,13 +193,8 @@ sub poll_start
{
my ($node) = @_;
-<<<<<<< HEAD
- my $max_attempts = 10 * $TestLib::timeout_default;
- my $attempts = 0;
-=======
my $max_attempts = 10 * $PostgreSQL::Test::Utils::timeout_default;
my $attempts = 0;
->>>>>>> REL_16_9
while ($attempts < $max_attempts)
{
diff --git a/src/test/recovery/t/018_wal_optimize.pl
b/src/test/recovery/t/018_wal_optimize.pl
index ec0ae8da608..1d613eaede4 100644
--- a/src/test/recovery/t/018_wal_optimize.pl
+++ b/src/test/recovery/t/018_wal_optimize.pl
@@ -12,19 +12,9 @@
use strict;
use warnings;
-<<<<<<< HEAD
-use PostgresNode;
-use TestLib;
-
-# GPDB: Effectively disable some of these tests. We cannot run
-# PREPARE TRANSACTION in utility-mode.
-# use Test::More tests => 38;
-use Test::More tests => 36;
-=======
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
->>>>>>> REL_16_9
sub check_orphan_relfilenodes
{
@@ -124,23 +114,22 @@ wal_skip_threshold = 0
"SELECT count(*), min(id) FROM trunc_ins;");
is($result, qq(1|2), "wal_level = $wal_level, TRUNCATE INSERT");
- # GPDB: Disable this test.
- # # Same for prepared transaction.
- # # Tuples inserted after the truncation should be seen.
- # $node->safe_psql(
- # 'postgres', "
- # BEGIN;
- # CREATE TABLE twophase (id serial PRIMARY KEY);
- # INSERT INTO twophase VALUES (DEFAULT);
- # TRUNCATE twophase;
- # INSERT INTO twophase VALUES (DEFAULT);
- # PREPARE TRANSACTION 't';
- # COMMIT PREPARED 't';");
- # $node->stop('immediate');
- # $node->start;
- # $result = $node->safe_psql('postgres',
- # "SELECT count(*), min(id) FROM trunc_ins;");
- # is($result, qq(1|2), "wal_level = $wal_level, TRUNCATE INSERT
PREPARE");
+ # Same for prepared transaction.
+ # Tuples inserted after the truncation should be seen.
+ $node->safe_psql(
+ 'postgres', "
+ BEGIN;
+ CREATE TABLE twophase (id serial PRIMARY KEY);
+ INSERT INTO twophase VALUES (DEFAULT);
+ TRUNCATE twophase;
+ INSERT INTO twophase VALUES (DEFAULT);
+ PREPARE TRANSACTION 't';
+ COMMIT PREPARED 't';");
+ $node->stop('immediate');
+ $node->start;
+ $result = $node->safe_psql('postgres',
+ "SELECT count(*), min(id) FROM trunc_ins;");
+ is($result, qq(1|2), "wal_level = $wal_level, TRUNCATE INSERT PREPARE");
# Writing WAL at end of xact, instead of syncing.
$node->safe_psql(
@@ -360,41 +349,38 @@ wal_skip_threshold = 0
# Test consistency of INSERT, COPY and TRUNCATE in same transaction
block
# with TRUNCATE triggers.
- SKIP: {
- skip('Triggers for statements are not yet supported', 1);
- $node->safe_psql(
- 'postgres', "
- BEGIN;
- CREATE TABLE trunc_trig (id serial PRIMARY KEY, id2
text);
- CREATE FUNCTION trunc_trig_before_stat_trig() RETURNS
trigger
- LANGUAGE plpgsql as \$\$
- BEGIN
- INSERT INTO trunc_trig VALUES (DEFAULT,
'triggered stat before');
- RETURN NULL;
- END; \$\$;
- CREATE FUNCTION trunc_trig_after_stat_trig() RETURNS
trigger
- LANGUAGE plpgsql as \$\$
- BEGIN
- INSERT INTO trunc_trig VALUES (DEFAULT,
'triggered stat before');
- RETURN NULL;
- END; \$\$;
- CREATE TRIGGER trunc_trig_before_stat_truncate
- BEFORE TRUNCATE ON trunc_trig
- FOR EACH STATEMENT EXECUTE PROCEDURE
trunc_trig_before_stat_trig();
- CREATE TRIGGER trunc_trig_after_stat_truncate
- AFTER TRUNCATE ON trunc_trig
- FOR EACH STATEMENT EXECUTE PROCEDURE
trunc_trig_after_stat_trig();
- INSERT INTO trunc_trig VALUES (DEFAULT, 1);
- TRUNCATE trunc_trig;
- COPY trunc_trig FROM '$copy_file' DELIMITER ',';
- COMMIT;");
- $node->stop('immediate');
- $node->start;
- $result =
- $node->safe_psql('postgres', "SELECT count(*) FROM
trunc_trig;");
- is($result, qq(4),
- "wal_level = $wal_level, TRUNCATE COPY with TRUNCATE
triggers");
- }
+ $node->safe_psql(
+ 'postgres', "
+ BEGIN;
+ CREATE TABLE trunc_trig (id serial PRIMARY KEY, id2 text);
+ CREATE FUNCTION trunc_trig_before_stat_trig() RETURNS trigger
+ LANGUAGE plpgsql as \$\$
+ BEGIN
+ INSERT INTO trunc_trig VALUES (DEFAULT, 'triggered stat
before');
+ RETURN NULL;
+ END; \$\$;
+ CREATE FUNCTION trunc_trig_after_stat_trig() RETURNS trigger
+ LANGUAGE plpgsql as \$\$
+ BEGIN
+ INSERT INTO trunc_trig VALUES (DEFAULT, 'triggered stat
before');
+ RETURN NULL;
+ END; \$\$;
+ CREATE TRIGGER trunc_trig_before_stat_truncate
+ BEFORE TRUNCATE ON trunc_trig
+ FOR EACH STATEMENT EXECUTE PROCEDURE
trunc_trig_before_stat_trig();
+ CREATE TRIGGER trunc_trig_after_stat_truncate
+ AFTER TRUNCATE ON trunc_trig
+ FOR EACH STATEMENT EXECUTE PROCEDURE
trunc_trig_after_stat_trig();
+ INSERT INTO trunc_trig VALUES (DEFAULT, 1);
+ TRUNCATE trunc_trig;
+ COPY trunc_trig FROM '$copy_file' DELIMITER ',';
+ COMMIT;");
+ $node->stop('immediate');
+ $node->start;
+ $result =
+ $node->safe_psql('postgres', "SELECT count(*) FROM trunc_trig;");
+ is($result, qq(4),
+ "wal_level = $wal_level, TRUNCATE COPY with TRUNCATE triggers");
# Test redo of temp table creation.
$node->safe_psql(
diff --git a/src/test/recovery/t/019_replslot_limit.pl
b/src/test/recovery/t/019_replslot_limit.pl
index c1080064f4c..33e50ad933b 100644
--- a/src/test/recovery/t/019_replslot_limit.pl
+++ b/src/test/recovery/t/019_replslot_limit.pl
@@ -7,17 +7,9 @@
use strict;
use warnings;
-<<<<<<< HEAD
-use TestLib;
-use PostgresNode;
-
-use File::Path qw(rmtree);
-use Test::More tests => $TestLib::windows_os ? 16 : 20;
-=======
use PostgreSQL::Test::Utils;
use PostgreSQL::Test::Cluster;
use Test::More;
->>>>>>> REL_16_9
use Time::HiRes qw(usleep);
$ENV{PGDATABASE} = 'postgres';
@@ -54,7 +46,6 @@ $node_standby->append_conf('postgresql.conf',
"primary_slot_name = 'rep1'");
$node_standby->start;
-$node_primary->safe_psql('postgres', "CHECKPOINT;");
# Wait until standby has replayed enough data
$node_primary->wait_for_catchup($node_standby);
@@ -90,13 +81,7 @@ is($result, "reserved|t", 'check that slot is working');
# The standby can reconnect to primary
$node_standby->start;
-<<<<<<< HEAD
-$node_primary->safe_psql('postgres', "CHECKPOINT;");
-$start_lsn = $node_primary->lsn('write');
-$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn);
-=======
$node_primary->wait_for_catchup($node_standby);
->>>>>>> REL_16_9
$node_standby->stop;
@@ -126,13 +111,7 @@ is($result, "reserved",
# The standby can reconnect to primary
$node_standby->start;
-<<<<<<< HEAD
-$node_primary->safe_psql('postgres', "CHECKPOINT;");
-$start_lsn = $node_primary->lsn('write');
-$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn);
-=======
$node_primary->wait_for_catchup($node_standby);
->>>>>>> REL_16_9
$node_standby->stop;
# wal_keep_size overrides max_slot_wal_keep_size
@@ -151,13 +130,7 @@ $result = $node_primary->safe_psql('postgres',
# The standby can reconnect to primary
$node_standby->start;
-<<<<<<< HEAD
-$node_primary->safe_psql('postgres', "CHECKPOINT;");
-$start_lsn = $node_primary->lsn('write');
-$node_primary->wait_for_catchup($node_standby, 'replay', $start_lsn);
-=======
$node_primary->wait_for_catchup($node_standby);
->>>>>>> REL_16_9
$node_standby->stop;
# Advance WAL again without checkpoint, reducing remain by 6 MB.
@@ -208,19 +181,10 @@ $node_primary->safe_psql('postgres',
'ALTER SYSTEM RESET max_wal_size; SELECT pg_reload_conf()');
$node_primary->safe_psql('postgres', "CHECKPOINT;");
my $invalidated = 0;
-<<<<<<< HEAD
-for (my $i = 0; $i < 10000; $i++)
-{
- if (find_in_log(
- $node_primary,
- "invalidating slot \"rep1\" because its restart_lsn
[0-9A-F/]+ exceeds max_slot_wal_keep_size",
- $logstart))
-=======
for (my $i = 0; $i < 10 * $PostgreSQL::Test::Utils::timeout_default; $i++)
{
if ($node_primary->log_contains(
'invalidating obsolete replication slot "rep1"',
$logstart))
->>>>>>> REL_16_9
{
$invalidated = 1;
last;
@@ -239,15 +203,9 @@ is($result, "rep1|f|t|lost|",
# Wait until current checkpoint ends
my $checkpoint_ended = 0;
-<<<<<<< HEAD
-for (my $i = 0; $i < 10000; $i++)
-{
- if (find_in_log($node_primary, "checkpoint complete: ", $logstart))
-=======
for (my $i = 0; $i < 10 * $PostgreSQL::Test::Utils::timeout_default; $i++)
{
if ($node_primary->log_contains("checkpoint complete: ", $logstart))
->>>>>>> REL_16_9
{
$checkpoint_ended = 1;
last;
@@ -291,13 +249,8 @@ ok($failed, 'check that replication has been broken');
$node_primary->stop;
$node_standby->stop;
-<<<<<<< HEAD
-my $node_primary2 = get_new_node('primary2');
-$node_primary2->init(allows_streaming => 1, extra => ['--wal-segsize=16']);
-=======
my $node_primary2 = PostgreSQL::Test::Cluster->new('primary2');
$node_primary2->init(allows_streaming => 1);
->>>>>>> REL_16_9
$node_primary2->append_conf(
'postgresql.conf', qq(
min_wal_size = 32MB
@@ -332,11 +285,7 @@ my @result =
SELECT pg_switch_wal();
CHECKPOINT;
SELECT 'finished';",
-<<<<<<< HEAD
- timeout => $TestLib::timeout_default));
-=======
timeout => $PostgreSQL::Test::Utils::timeout_default));
->>>>>>> REL_16_9
is($result[1], 'finished', 'check if checkpoint command is not blocked');
$node_primary2->stop;
@@ -361,7 +310,6 @@ $node_primary3->append_conf(
max_wal_size = 2MB
log_checkpoints = yes
max_slot_wal_keep_size = 1MB
- wal_keep_size = 0
));
$node_primary3->start;
$node_primary3->safe_psql('postgres',
@@ -426,12 +374,8 @@ $logstart = get_log_size($node_primary3);
kill 'STOP', $senderpid, $receiverpid;
advance_wal($node_primary3, 2);
-<<<<<<< HEAD
-my $max_attempts = $TestLib::timeout_default;
-=======
my $msg_logged = 0;
my $max_attempts = $PostgreSQL::Test::Utils::timeout_default;
->>>>>>> REL_16_9
while ($max_attempts-- >= 0)
{
if ($node_primary3->log_contains(
@@ -441,7 +385,7 @@ while ($max_attempts-- >= 0)
$msg_logged = 1;
last;
}
- sleep 5;
+ sleep 1;
}
ok($msg_logged, "walsender termination logged");
@@ -454,12 +398,8 @@ $node_primary3->poll_query_until('postgres',
"lost")
or die "timed out waiting for slot to be lost";
-<<<<<<< HEAD
-$max_attempts = $TestLib::timeout_default;
-=======
$msg_logged = 0;
$max_attempts = $PostgreSQL::Test::Utils::timeout_default;
->>>>>>> REL_16_9
while ($max_attempts-- >= 0)
{
if ($node_primary3->log_contains(
@@ -468,7 +408,7 @@ while ($max_attempts-- >= 0)
$msg_logged = 1;
last;
}
- sleep 5;
+ sleep 1;
}
ok($msg_logged, "slot invalidation logged");
diff --git a/src/test/recovery/t/020_archive_status.pl
b/src/test/recovery/t/020_archive_status.pl
index dfedad86be3..fa24153d4b9 100644
--- a/src/test/recovery/t/020_archive_status.pl
+++ b/src/test/recovery/t/020_archive_status.pl
@@ -1,9 +1,5 @@
-<<<<<<< HEAD
-# Copyright (c) 2021, PostgreSQL Global Development Group
-=======
# Copyright (c) 2021-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
#
# Tests related to WAL archiving and recovery.
@@ -144,7 +140,6 @@ $primary->poll_query_until('postgres',
my $standby1 = PostgreSQL::Test::Cluster->new('standby');
$standby1->init_from_backup($primary, 'backup', has_restoring => 1);
$standby1->append_conf('postgresql.conf', "archive_mode = on");
-$standby1->append_conf('postgresql.conf', "wal_keep_size = 0");
my $standby1_data = $standby1->data_dir;
$standby1->start;
@@ -158,14 +153,9 @@ $standby1->safe_psql('postgres', q{CHECKPOINT});
# Recovery with archive_mode=on does not keep .ready signal files inherited
# from backup. Note that this WAL segment existed in the backup.
-# GPDB_13_MERGE_FIXME: disable the following test temporarily
-SKIP:
-{
- skip 'skip the case temporarily in PG 13', 1;
ok( !-f "$standby1_data/$segment_path_1_ready",
".ready file for WAL segment $segment_name_1 present in backup got
removed with archive_mode=on on standby"
);
-}
# Recovery with archive_mode=on should not create .ready files.
# Note that this segment did not exist in the backup.
diff --git a/src/test/recovery/t/021_row_visibility.pl
b/src/test/recovery/t/021_row_visibility.pl
index 90e044cc00b..52a6a3509c2 100644
--- a/src/test/recovery/t/021_row_visibility.pl
+++ b/src/test/recovery/t/021_row_visibility.pl
@@ -6,19 +6,9 @@
use strict;
use warnings;
-<<<<<<< HEAD
-use PostgresNode;
-use TestLib;
-# GPDB: Effectively disable some of these tests. We cannot run
-# PREPARE TRANSACTION in utility-mode.
-# use Test::More tests => 10;
-use Test::More tests => 6;
-use Config;
-=======
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
->>>>>>> REL_16_9
# Initialize primary node
my $node_primary = PostgreSQL::Test::Cluster->new('primary');
@@ -42,11 +32,7 @@ $node_standby->append_conf('postgresql.conf',
'max_prepared_transactions=10');
$node_standby->start;
my $psql_timeout =
-<<<<<<< HEAD
- IPC::Run::timer(2 * $TestLib::timeout_default);
-=======
IPC::Run::timer(2 * $PostgreSQL::Test::Utils::timeout_default);
->>>>>>> REL_16_9
# One psql to primary and standby each, for all queries. That allows
# to check uncommitted changes being replicated and such.
@@ -128,59 +114,45 @@ ok( send_query_and_wait(
qr/first update\n\(1 row\)$/m),
'committed update visible');
-# GPDB: Disable this test.
-# #
-# # 5. Check that changes in prepared xacts is invisible
-# #
-# ok( send_query_and_wait(
-# \%psql_primary, q[
-# DELETE from test_visibility; -- delete old data, so we start with clean slate
-# BEGIN;
-# INSERT INTO test_visibility VALUES('inserted in prepared will_commit');
-# PREPARE TRANSACTION 'will_commit';],
-# qr/^PREPARE TRANSACTION$/m),
-# 'prepared will_commit');
-
-# ok( send_query_and_wait(
-# \%psql_primary, q[
-# BEGIN;
-# INSERT INTO test_visibility VALUES('inserted in prepared will_abort');
-# PREPARE TRANSACTION 'will_abort';
-# ],
-# qr/^PREPARE TRANSACTION$/m),
-# 'prepared will_abort');
-
-<<<<<<< HEAD
-# $node_primary->wait_for_catchup($node_standby, 'replay',
-# $node_primary->lsn('insert'));
-=======
+#
+# 5. Check that changes in prepared xacts is invisible
+#
+ok( send_query_and_wait(
+ \%psql_primary, q[
+DELETE from test_visibility; -- delete old data, so we start with clean slate
+BEGIN;
+INSERT INTO test_visibility VALUES('inserted in prepared will_commit');
+PREPARE TRANSACTION 'will_commit';],
+ qr/^PREPARE TRANSACTION$/m),
+ 'prepared will_commit');
+
+ok( send_query_and_wait(
+ \%psql_primary, q[
+BEGIN;
+INSERT INTO test_visibility VALUES('inserted in prepared will_abort');
+PREPARE TRANSACTION 'will_abort';
+ ],
+ qr/^PREPARE TRANSACTION$/m),
+ 'prepared will_abort');
+
$node_primary->wait_for_catchup($node_standby);
->>>>>>> REL_16_9
-
-# ok( send_query_and_wait(
-# \%psql_standby,
-# q[SELECT * FROM test_visibility ORDER BY data;],
-# qr/^\(0 rows\)$/m),
-# 'uncommitted prepared invisible');
-
-<<<<<<< HEAD
-# # For some variation, finish prepared xacts via separate connections
-# $node_primary->safe_psql('postgres', "COMMIT PREPARED 'will_commit';");
-# $node_primary->safe_psql('postgres', "ROLLBACK PREPARED 'will_abort';");
-# $node_primary->wait_for_catchup($node_standby, 'replay',
-# $node_primary->lsn('insert'));
-=======
+
+ok( send_query_and_wait(
+ \%psql_standby,
+ q[SELECT * FROM test_visibility ORDER BY data;],
+ qr/^\(0 rows\)$/m),
+ 'uncommitted prepared invisible');
+
# For some variation, finish prepared xacts via separate connections
$node_primary->safe_psql('postgres', "COMMIT PREPARED 'will_commit';");
$node_primary->safe_psql('postgres', "ROLLBACK PREPARED 'will_abort';");
$node_primary->wait_for_catchup($node_standby);
->>>>>>> REL_16_9
-# ok( send_query_and_wait(
-# \%psql_standby,
-# q[SELECT * FROM test_visibility ORDER BY data;],
-# qr/will_commit.*\n\(1 row\)$/m),
-# 'finished prepared visible');
+ok( send_query_and_wait(
+ \%psql_standby,
+ q[SELECT * FROM test_visibility ORDER BY data;],
+ qr/will_commit.*\n\(1 row\)$/m),
+ 'finished prepared visible');
# explicitly shut down psql instances gracefully - to avoid hangs
# or worse on windows
diff --git a/src/test/recovery/t/022_crash_temp_files.pl
b/src/test/recovery/t/022_crash_temp_files.pl
index bc50b23605a..14fd8bfc7fc 100644
--- a/src/test/recovery/t/022_crash_temp_files.pl
+++ b/src/test/recovery/t/022_crash_temp_files.pl
@@ -14,23 +14,10 @@ if ($Config{osname} eq 'MSWin32')
plan skip_all => 'tests hang on Windows';
exit;
}
-<<<<<<< HEAD
-else
-{
- plan tests => 11;
-}
-=======
->>>>>>> REL_16_9
my $psql_timeout = IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default);
-<<<<<<< HEAD
-my $psql_timeout = IPC::Run::timer($TestLib::timeout_default);
-
-my $node = get_new_node('node_crash');
-=======
my $node = PostgreSQL::Test::Cluster->new('node_crash');
->>>>>>> REL_16_9
$node->init();
$node->start();
@@ -66,12 +53,8 @@ my $killme = IPC::Run::start(
$killme_stdin .= q[
SELECT pg_backend_pid();
];
-<<<<<<< HEAD
-ok(pump_until($killme, $psql_timeout, \$killme_stdout,
qr/[[:digit:]]+[\r\n]$/m),
-=======
ok( pump_until(
$killme, $psql_timeout, \$killme_stdout,
qr/[[:digit:]]+[\r\n]$/m),
->>>>>>> REL_16_9
'acquired pid for SIGKILL');
my $pid = $killme_stdout;
chomp($pid);
@@ -100,12 +83,8 @@ BEGIN;
INSERT INTO tab_crash (a) VALUES(1);
SELECT $$insert-tuple-to-lock-next-insert$$;
];
-<<<<<<< HEAD
-pump_until($killme2, $psql_timeout, \$killme_stdout2,
qr/insert-tuple-to-lock-next-insert/m);
-=======
pump_until($killme2, $psql_timeout, \$killme_stdout2,
qr/insert-tuple-to-lock-next-insert/m);
->>>>>>> REL_16_9
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -118,13 +97,9 @@ BEGIN;
SELECT $$in-progress-before-sigkill$$;
INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i);
];
-<<<<<<< HEAD
-ok(pump_until($killme, $psql_timeout, \$killme_stdout,
qr/in-progress-before-sigkill/m),
-=======
ok( pump_until(
$killme, $psql_timeout,
\$killme_stdout, qr/in-progress-before-sigkill/m),
->>>>>>> REL_16_9
'insert in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
@@ -146,12 +121,8 @@ END; $c$;
SELECT $$insert-tuple-lock-waiting$$;
];
-<<<<<<< HEAD
-pump_until($killme2, $psql_timeout, \$killme_stdout2,
qr/insert-tuple-lock-waiting/m);
-=======
pump_until($killme2, $psql_timeout, \$killme_stdout2,
qr/insert-tuple-lock-waiting/m);
->>>>>>> REL_16_9
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -165,18 +136,11 @@ $killme->finish;
# Wait till the other session reports failure, ensuring that the postmaster
# has noticed its dead child and begun a restart cycle.
$killme_stdin2 .= qq[
-<<<<<<< HEAD
-SELECT pg_sleep($TestLib::timeout_default);
-];
-ok( pump_until(
- $killme2,
-=======
SELECT pg_sleep($PostgreSQL::Test::Utils::timeout_default);
];
ok( pump_until(
$killme2,
$psql_timeout,
->>>>>>> REL_16_9
\$killme_stderr2,
qr/WARNING: terminating connection because of crash of another
server process|server closed the connection unexpectedly|connection to server
was lost|could not send data to server/m
),
@@ -208,12 +172,8 @@ $killme->run();
$killme_stdin .= q[
SELECT pg_backend_pid();
];
-<<<<<<< HEAD
-ok(pump_until($killme, $psql_timeout, \$killme_stdout,
qr/[[:digit:]]+[\r\n]$/m),
-=======
ok( pump_until(
$killme, $psql_timeout, \$killme_stdout,
qr/[[:digit:]]+[\r\n]$/m),
->>>>>>> REL_16_9
'acquired pid for SIGKILL');
$pid = $killme_stdout;
chomp($pid);
@@ -230,12 +190,8 @@ BEGIN;
INSERT INTO tab_crash (a) VALUES(1);
SELECT $$insert-tuple-to-lock-next-insert$$;
];
-<<<<<<< HEAD
-pump_until($killme2, $psql_timeout, \$killme_stdout2,
qr/insert-tuple-to-lock-next-insert/m);
-=======
pump_until($killme2, $psql_timeout, \$killme_stdout2,
qr/insert-tuple-to-lock-next-insert/m);
->>>>>>> REL_16_9
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -248,13 +204,9 @@ BEGIN;
SELECT $$in-progress-before-sigkill$$;
INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i);
];
-<<<<<<< HEAD
-ok(pump_until($killme, $psql_timeout, \$killme_stdout,
qr/in-progress-before-sigkill/m),
-=======
ok( pump_until(
$killme, $psql_timeout,
\$killme_stdout, qr/in-progress-before-sigkill/m),
->>>>>>> REL_16_9
'insert in-progress-before-sigkill');
$killme_stdout = '';
$killme_stderr = '';
@@ -276,12 +228,8 @@ END; $c$;
SELECT $$insert-tuple-lock-waiting$$;
];
-<<<<<<< HEAD
-pump_until($killme2, $psql_timeout, \$killme_stdout2,
qr/insert-tuple-lock-waiting/m);
-=======
pump_until($killme2, $psql_timeout, \$killme_stdout2,
qr/insert-tuple-lock-waiting/m);
->>>>>>> REL_16_9
$killme_stdout2 = '';
$killme_stderr2 = '';
@@ -295,18 +243,11 @@ $killme->finish;
# Wait till the other session reports failure, ensuring that the postmaster
# has noticed its dead child and begun a restart cycle.
$killme_stdin2 .= qq[
-<<<<<<< HEAD
-SELECT pg_sleep($TestLib::timeout_default);
-];
-ok( pump_until(
- $killme2,
-=======
SELECT pg_sleep($PostgreSQL::Test::Utils::timeout_default);
];
ok( pump_until(
$killme2,
$psql_timeout,
->>>>>>> REL_16_9
\$killme_stderr2,
qr/WARNING: terminating connection because of crash of another
server process|server closed the connection unexpectedly|connection to server
was lost|could not send data to server/m
),
@@ -332,8 +273,5 @@ is( $node->safe_psql(
'temporary file was removed');
$node->stop();
-<<<<<<< HEAD
-=======
done_testing();
->>>>>>> REL_16_9
diff --git a/src/test/recovery/t/023_pitr_prepared_xact.pl
b/src/test/recovery/t/023_pitr_prepared_xact.pl
index 7cb80916257..a8cdf4efdd4 100644
--- a/src/test/recovery/t/023_pitr_prepared_xact.pl
+++ b/src/test/recovery/t/023_pitr_prepared_xact.pl
@@ -4,21 +4,9 @@
# Test for point-in-time recovery (PITR) with prepared transactions
use strict;
use warnings;
-<<<<<<< HEAD
-use PostgresNode;
-use TestLib;
-
-# GPDB: Effectively disable this TAP test. We cannot run PREPARE
-# TRANSACTION in utility-mode.
-use Test::More tests => 1;
-is(-1, -1, "Disable this TAP test");
-exit;
-
-=======
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
->>>>>>> REL_16_9
use File::Compare;
# Initialize and start primary node with WAL archiving
diff --git a/src/test/recovery/t/024_archive_recovery.pl
b/src/test/recovery/t/024_archive_recovery.pl
index 9a7a4280335..d594332b18d 100644
--- a/src/test/recovery/t/024_archive_recovery.pl
+++ b/src/test/recovery/t/024_archive_recovery.pl
@@ -78,20 +78,11 @@ sub test_recovery_wal_level_minimal
[
'pg_ctl', '-D',
$recovery_node->data_dir, '-l',
-<<<<<<< HEAD
- $recovery_node->logfile, 'start'
- , '-o', "--cluster-name=$node_name -c gp_role=utility
--gp_dbid=1 --gp_contentid=0"
- ]);
-
- # wait for postgres to terminate
- foreach my $i (0 .. 10 * $TestLib::timeout_default)
-=======
$recovery_node->logfile, 'start'
]);
# wait for postgres to terminate
foreach my $i (0 .. 10 * $PostgreSQL::Test::Utils::timeout_default)
->>>>>>> REL_16_9
{
last if !-f $recovery_node->data_dir . '/postmaster.pid';
usleep(100_000);
@@ -100,7 +91,7 @@ sub test_recovery_wal_level_minimal
# Confirm that the archive recovery fails with an expected error
my $logfile = slurp_file($recovery_node->logfile());
ok( $logfile =~
- qr/WARNING: .* WAL was generated with wal_level=minimal,
cannot continue recovering/,
+ qr/FATAL: .* WAL was generated with wal_level=minimal, cannot
continue recovering/,
"$node_text ends with an error because it finds WAL generated
with wal_level=minimal"
);
}
diff --git a/src/test/recovery/t/025_stuck_on_old_timeline.pl
b/src/test/recovery/t/025_stuck_on_old_timeline.pl
index 9da4627d2f1..91309030df9 100644
--- a/src/test/recovery/t/025_stuck_on_old_timeline.pl
+++ b/src/test/recovery/t/025_stuck_on_old_timeline.pl
@@ -29,11 +29,7 @@ $node_primary->init(allows_streaming => 1, has_archiving =>
1);
# that arise from use of backslashes. That means we need to double-quote all
# the paths in the archive_command
my $perlbin = $^X;
-<<<<<<< HEAD
-$perlbin =~ s!\\!/!g if $TestLib::windows_os;
-=======
$perlbin =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os;
->>>>>>> REL_16_9
my $archivedir_primary = $node_primary->archive_dir;
$archivedir_primary =~ s!\\!/!g if $PostgreSQL::Test::Utils::windows_os;
$node_primary->append_conf(
diff --git a/src/test/recovery/t/026_overwrite_contrecord.pl
b/src/test/recovery/t/026_overwrite_contrecord.pl
index fbc9c0bbdbf..fad1811ca8d 100644
--- a/src/test/recovery/t/026_overwrite_contrecord.pl
+++ b/src/test/recovery/t/026_overwrite_contrecord.pl
@@ -1,8 +1,4 @@
-<<<<<<< HEAD
-# Copyright (c) 2021, PostgreSQL Global Development Group
-=======
# Copyright (c) 2021-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
# Tests for already-propagated WAL segments ending in incomplete WAL records.
@@ -10,29 +6,16 @@ use strict;
use warnings;
use FindBin;
-<<<<<<< HEAD
-use PostgresNode;
-use TestLib;
-use Test::More;
-
-plan tests => 3;
-
-=======
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
->>>>>>> REL_16_9
# Test: Create a physical replica that's missing the last WAL file,
# then restart the primary to create a divergent WAL file and observe
# that the replica replays the "overwrite contrecord" from that new
# file and the standby promotes successfully.
-<<<<<<< HEAD
-my $node = PostgresNode->get_new_node('primary');
-=======
my $node = PostgreSQL::Test::Cluster->new('primary');
->>>>>>> REL_16_9
$node->init(allows_streaming => 1);
# We need these settings for stability of WAL behavior.
$node->append_conf(
@@ -90,11 +73,7 @@ unlink $node->basedir . "/pgdata/pg_wal/$endfile"
# OK, create a standby at this spot.
$node->backup_fs_cold('backup');
-<<<<<<< HEAD
-my $node_standby = PostgresNode->get_new_node('standby');
-=======
my $node_standby = PostgreSQL::Test::Cluster->new('standby');
->>>>>>> REL_16_9
$node_standby->init_from_backup($node, 'backup', has_streaming => 1);
$node_standby->start;
@@ -126,8 +105,5 @@ $node_standby->promote;
$node->stop;
$node_standby->stop;
-<<<<<<< HEAD
-=======
done_testing();
->>>>>>> REL_16_9
diff --git a/src/test/recovery/t/031_recovery_conflict.pl
b/src/test/recovery/t/031_recovery_conflict.pl
index 399d7eb7a9c..b3e7972599b 100644
--- a/src/test/recovery/t/031_recovery_conflict.pl
+++ b/src/test/recovery/t/031_recovery_conflict.pl
@@ -1,8 +1,4 @@
-<<<<<<< HEAD
-# Copyright (c) 2021-2022, PostgreSQL Global Development Group
-=======
# Copyright (c) 2021-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
# Test that connections to a hot standby are correctly canceled when a
# recovery conflict is detected Also, test that statistics in
@@ -14,11 +10,7 @@ use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
-<<<<<<< HEAD
-plan skip_all => "disabled until after minor releases, due to instability";
-=======
plan skip_all => "disabled due to instability";
->>>>>>> REL_16_9
# Set up nodes
my $node_primary = PostgreSQL::Test::Cluster->new('primary');
@@ -28,15 +20,7 @@ my $tablespace1 = "test_recovery_conflict_tblspc";
$node_primary->append_conf(
'postgresql.conf', qq[
-<<<<<<< HEAD
-
-# Doesn't currently exist pre 15, but might be backpatched later
-#allow_in_place_tablespaces = on
-#temp_tablespaces = $tablespace1
-
-=======
allow_in_place_tablespaces = on
->>>>>>> REL_16_9
log_temp_files = 0
# for deadlock test
@@ -45,10 +29,7 @@ max_prepared_transactions = 10
# wait some to test the wait paths as well, but not long for obvious reasons
max_standby_streaming_delay = 50ms
-<<<<<<< HEAD
-=======
temp_tablespaces = $tablespace1
->>>>>>> REL_16_9
# Some of the recovery conflict logging code only gets exercised after
# deadlock_timeout. The test doesn't rely on that additional output, but it's
# nice to get some minimal coverage of that code.
@@ -59,14 +40,8 @@ $node_primary->start;
my $backup_name = 'my_backup';
-<<<<<<< HEAD
-# See allow_in_place_tablespaces comment above
-#$node_primary->safe_psql('postgres',
-# qq[CREATE TABLESPACE $tablespace1 LOCATION '']);
-=======
$node_primary->safe_psql('postgres',
qq[CREATE TABLESPACE $tablespace1 LOCATION '']);
->>>>>>> REL_16_9
$node_primary->backup($backup_name);
my $node_standby = PostgreSQL::Test::Cluster->new('standby');
@@ -89,28 +64,12 @@ CREATE TABLE ${table1}(a int, b int);
INSERT INTO $table1 SELECT i % 3, 0 FROM generate_series(1,20) i;
CREATE TABLE ${table2}(a int, b int);
]);
-<<<<<<< HEAD
-my $primary_lsn = $node_primary->lsn('flush');
-$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-
-
-# a longrunning psql that we can use to trigger conflicts
-my $psql_timeout = IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default);
-my %psql_standby = ('stdin' => '', 'stdout' => '');
-$psql_standby{run} =
- $node_standby->background_psql($test_db, \$psql_standby{stdin},
- \$psql_standby{stdout},
- $psql_timeout);
-$psql_standby{stdout} = '';
-
-=======
$node_primary->wait_for_replay_catchup($node_standby);
# a longrunning psql that we can use to trigger conflicts
my $psql_standby =
$node_standby->background_psql($test_db, on_error_stop => 0);
->>>>>>> REL_16_9
my $expected_conflicts = 0;
@@ -132,28 +91,12 @@ $node_primary->safe_psql(
BEGIN; LOCK $table1; COMMIT;
]);
-<<<<<<< HEAD
-$primary_lsn = $node_primary->lsn('flush');
-$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-=======
$node_primary->wait_for_replay_catchup($node_standby);
->>>>>>> REL_16_9
my $cursor1 = "test_recovery_conflict_cursor";
# DECLARE and use a cursor on standby, causing buffer with the only block of
# the relation to be pinned on the standby
-<<<<<<< HEAD
-$psql_standby{stdin} .= qq[
- BEGIN;
- DECLARE $cursor1 CURSOR FOR SELECT b FROM $table1;
- FETCH FORWARD FROM $cursor1;
- ];
-# FETCH FORWARD should have returned a 0 since all values of b in the table
-# are 0
-ok(pump_until_standby(qr/^0$/m),
- "$sect: cursor with conflicting pin established");
-=======
my $res = $psql_standby->query_safe(
qq[
BEGIN;
@@ -163,7 +106,6 @@ my $res = $psql_standby->query_safe(
# FETCH FORWARD should have returned a 0 since all values of b in the table
# are 0
like($res, qr/^0$/m, "$sect: cursor with conflicting pin established");
->>>>>>> REL_16_9
# to check the log starting now for recovery conflict messages
my $log_location = -s $node_standby->logfile;
@@ -175,18 +117,10 @@ $node_primary->safe_psql($test_db, qq[VACUUM $table1;]);
# finished, so waiting for catchup ensures that there is no race between
# encountering the recovery conflict which causes the disconnect and checking
# the logfile for the terminated connection.
-<<<<<<< HEAD
-$primary_lsn = $node_primary->lsn('flush');
-$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-
-check_conflict_log("User was holding shared buffer pin for too long");
-reconnect_and_clear();
-=======
$node_primary->wait_for_replay_catchup($node_standby);
check_conflict_log("User was holding shared buffer pin for too long");
$psql_standby->reconnect_and_clear();
->>>>>>> REL_16_9
check_conflict_stat("bufferpin");
@@ -196,21 +130,6 @@ $expected_conflicts++;
$node_primary->safe_psql($test_db,
qq[INSERT INTO $table1 SELECT i, 0 FROM generate_series(1,20) i]);
-<<<<<<< HEAD
-$primary_lsn = $node_primary->lsn('flush');
-$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-
-# DECLARE and FETCH from cursor on the standby
-$psql_standby{stdin} .= qq[
- BEGIN;
- DECLARE $cursor1 CURSOR FOR SELECT b FROM $table1;
- FETCH FORWARD FROM $cursor1;
- ];
-ok( pump_until(
- $psql_standby{run}, $psql_timeout,
- \$psql_standby{stdout}, qr/^0$/m,),
- "$sect: cursor with conflicting snapshot established");
-=======
$node_primary->wait_for_replay_catchup($node_standby);
# DECLARE and FETCH from cursor on the standby
@@ -221,7 +140,6 @@ $res = $psql_standby->query_safe(
FETCH FORWARD FROM $cursor1;
]);
like($res, qr/^0$/m, "$sect: cursor with conflicting snapshot established");
->>>>>>> REL_16_9
# Do some HOT updates
$node_primary->safe_psql($test_db,
@@ -231,20 +149,11 @@ $node_primary->safe_psql($test_db,
$node_primary->safe_psql($test_db, qq[VACUUM $table1;]);
# Wait for attempted replay of PRUNE records
-<<<<<<< HEAD
-$primary_lsn = $node_primary->lsn('flush');
-$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-
-check_conflict_log(
- "User query might have needed to see row versions that must be
removed");
-reconnect_and_clear();
-=======
$node_primary->wait_for_replay_catchup($node_standby);
check_conflict_log(
"User query might have needed to see row versions that must be
removed");
$psql_standby->reconnect_and_clear();
->>>>>>> REL_16_9
check_conflict_stat("snapshot");
@@ -253,14 +162,6 @@ $sect = "lock conflict";
$expected_conflicts++;
# acquire lock to conflict with
-<<<<<<< HEAD
-$psql_standby{stdin} .= qq[
- BEGIN;
- LOCK TABLE $table1 IN ACCESS SHARE MODE;
- SELECT 1;
- ];
-ok(pump_until_standby(qr/^1$/m), "$sect: conflicting lock acquired");
-=======
$res = $psql_standby->query_safe(
qq[
BEGIN;
@@ -268,60 +169,10 @@ $res = $psql_standby->query_safe(
SELECT 1;
]);
like($res, qr/^1$/m, "$sect: conflicting lock acquired");
->>>>>>> REL_16_9
# DROP TABLE containing block which standby has in a pinned buffer
$node_primary->safe_psql($test_db, qq[DROP TABLE $table1;]);
-<<<<<<< HEAD
-$primary_lsn = $node_primary->lsn('flush');
-$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-
-check_conflict_log("User was holding a relation lock for too long");
-reconnect_and_clear();
-check_conflict_stat("lock");
-
-
-# See allow_in_place_tablespaces comment above
-### RECOVERY CONFLICT 4: Tablespace conflict
-#$sect = "tablespace conflict";
-#$expected_conflicts++;
-#
-## DECLARE a cursor for a query which, with sufficiently low work_mem, will
-## spill tuples into temp files in the temporary tablespace created during
-## setup.
-#$psql_standby{stdin} .= qq[
-# BEGIN;
-# SET work_mem = '64kB';
-# DECLARE $cursor1 CURSOR FOR
-# SELECT count(*) FROM generate_series(1,6000);
-# FETCH FORWARD FROM $cursor1;
-# ];
-#ok(pump_until_standby(qr/^6000$/m),
-# "$sect: cursor with conflicting temp file established");
-#
-## Drop the tablespace currently containing spill files for the query on the
-## standby
-#$node_primary->safe_psql($test_db, qq[DROP TABLESPACE $tablespace1;]);
-#
-#$primary_lsn = $node_primary->lsn('flush');
-#$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-#
-#check_conflict_log(
-# "User was or might have been using tablespace that must be dropped");
-#reconnect_and_clear();
-#check_conflict_stat("tablespace");
-
-
-## RECOVERY CONFLICT 5: Deadlock
-SKIP:
-{
- skip "disabled until after minor releases, due to instability";
-
-$sect = "startup deadlock";
-$expected_conflicts++;
-
-=======
$node_primary->wait_for_replay_catchup($node_standby);
check_conflict_log("User was holding a relation lock for too long");
@@ -373,7 +224,6 @@ $node_standby->adjust_conf(
$node_standby->restart();
$psql_standby->reconnect_and_clear();
->>>>>>> REL_16_9
# Generate a few dead rows, to later be cleaned up by vacuum. Then acquire a
# lock on another relation in a prepared xact, so it's held continuously by
# the startup process. The standby psql will block acquiring that lock while
@@ -393,32 +243,18 @@ INSERT INTO $table1(a) VALUES (170);
SELECT txid_current();
]);
-<<<<<<< HEAD
-$primary_lsn = $node_primary->lsn('flush');
-$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-
-$psql_standby{stdin} .= qq[
-=======
$node_primary->wait_for_replay_catchup($node_standby);
$res = $psql_standby->query_until(
qr/^1$/m, qq[
->>>>>>> REL_16_9
BEGIN;
-- hold pin
DECLARE $cursor1 CURSOR FOR SELECT a FROM $table1;
FETCH FORWARD FROM $cursor1;
-- wait for lock held by prepared transaction
SELECT * FROM $table2;
-<<<<<<< HEAD
- ];
-ok( pump_until(
- $psql_standby{run}, $psql_timeout,
- \$psql_standby{stdout}, qr/^1$/m,),
-=======
]);
ok(1,
->>>>>>> REL_16_9
"$sect: cursor holding conflicting pin, also waiting for lock,
established"
);
@@ -432,30 +268,18 @@ SELECT 'waiting' FROM pg_locks WHERE locktype =
'relation' AND NOT granted;
# VACUUM will prune away rows, causing a buffer pin conflict, while standby
# psql is waiting on lock
$node_primary->safe_psql($test_db, qq[VACUUM $table1;]);
-<<<<<<< HEAD
-$primary_lsn = $node_primary->lsn('flush');
-$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-
-check_conflict_log("User transaction caused buffer deadlock with recovery.");
-reconnect_and_clear();
-=======
$node_primary->wait_for_replay_catchup($node_standby);
check_conflict_log("User transaction caused buffer deadlock with recovery.");
$psql_standby->reconnect_and_clear();
->>>>>>> REL_16_9
check_conflict_stat("deadlock");
# clean up for next tests
$node_primary->safe_psql($test_db, qq[ROLLBACK PREPARED 'lock';]);
-<<<<<<< HEAD
-}
-=======
$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay',
'50ms');
$node_standby->restart();
$psql_standby->reconnect_and_clear();
->>>>>>> REL_16_9
# Check that expected number of conflicts show in pg_stat_database. Needs to
@@ -472,24 +296,14 @@ $sect = "database conflict";
$node_primary->safe_psql('postgres', qq[DROP DATABASE $test_db;]);
-<<<<<<< HEAD
-$primary_lsn = $node_primary->lsn('flush');
-$node_primary->wait_for_catchup($node_standby, 'replay', $primary_lsn);
-=======
$node_primary->wait_for_replay_catchup($node_standby);
->>>>>>> REL_16_9
check_conflict_log("User was connected to a database that must be dropped");
# explicitly shut down psql instances gracefully - to avoid hangs or worse on
# windows
-<<<<<<< HEAD
-$psql_standby{stdin} .= "\\q\n";
-$psql_standby{run}->finish;
-=======
$psql_standby->quit;
->>>>>>> REL_16_9
$node_standby->stop();
$node_primary->stop();
@@ -497,46 +311,9 @@ $node_primary->stop();
done_testing();
-<<<<<<< HEAD
-
-sub pump_until_standby
-{
- my $match = shift;
-
- return pump_until($psql_standby{run}, $psql_timeout,
- \$psql_standby{stdout}, $match);
-}
-
-sub reconnect_and_clear
-{
- # If psql isn't dead already, tell it to quit as \q, when already dead,
- # causes IPC::Run to unhelpfully error out with "ack Broken pipe:".
- $psql_standby{run}->pump_nb();
- if ($psql_standby{run}->pumpable())
- {
- $psql_standby{stdin} .= "\\q\n";
- }
- $psql_standby{run}->finish;
-
- # restart
- $psql_standby{run}->run();
- $psql_standby{stdin} = '';
- $psql_standby{stdout} = '';
-
- # Run query to ensure connection has finished re-establishing
- $psql_standby{stdin} .= qq[SELECT 1;\n];
- die unless pump_until_standby(qr/^1$/m);
- $psql_standby{stdout} = '';
-}
-
-sub check_conflict_log
-{
- my $message = shift;
-=======
sub check_conflict_log
{
my $message = shift;
->>>>>>> REL_16_9
my $old_log_location = $log_location;
$log_location = $node_standby->wait_for_log(qr/$message/,
$log_location);
@@ -548,17 +325,10 @@ sub check_conflict_log
sub check_conflict_stat
{
-<<<<<<< HEAD
- # Stats can't easily be checked before 15, requires waiting for stats to
- # be reported to stats collector and then those messages need to be
- # processed. Dealt with here to reduce intra-branch difference in the
- # tests.
-=======
my $conflict_type = shift;
my $count = $node_standby->safe_psql($test_db,
qq[SELECT confl_$conflict_type FROM pg_stat_database_conflicts
WHERE datname='$test_db';]
);
is($count, 1, "$sect: stats show conflict on standby");
->>>>>>> REL_16_9
}
diff --git a/src/test/regress/expected/compression.out
b/src/test/regress/expected/compression.out
index b55f1a95704..ca5da903806 100644
--- a/src/test/regress/expected/compression.out
+++ b/src/test/regress/expected/compression.out
@@ -317,13 +317,9 @@ SELECT pg_column_compression(f1) FROM cmdata;
DROP TABLE cmdata2;
CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4);
CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2));
-<<<<<<< HEAD
DETAIL: Distribution key column "f1" is not included in the constraint.
ERROR: UNIQUE index must contain all columns in the table's distribution key
-INSERT INTO cmdata2 VALUES((SELECT array_agg(md5(g::TEXT))::TEXT FROM
-=======
-INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM
->>>>>>> REL_16_9
+INSERT INTO cmdata2 VALUES((SELECT array_agg(md5(g::TEXT))::TEXT FROM=
generate_series(1, 50) g), VERSION());
-- check data is ok
SELECT length(f1) FROM cmdata;
diff --git a/src/test/regress/expected/largeobject.out
b/src/test/regress/expected/largeobject.out
index 4faf0b50c7e..4921dd79aee 100644
--- a/src/test/regress/expected/largeobject.out
+++ b/src/test/regress/expected/largeobject.out
@@ -182,18 +182,6 @@ SELECT lo_open(loid, x'40000'::int) from
lotest_stash_values;
(1 row)
ABORT;
-<<<<<<< HEAD:src/test/regress/output/largeobject.source
-DO $$
-DECLARE
- loid oid;
-BEGIN
- SELECT tbl.loid INTO loid FROM lotest_stash_values tbl;
- PERFORM lo_export(loid, '@abs_builddir@/results/invalid/path');
-EXCEPTION
- WHEN UNDEFINED_FILE THEN RAISE NOTICE 'could not open file, as expected';
-END;
-$$;
-=======
\set filename :abs_builddir '/results/invalid/path'
\set dobody 'DECLARE loid oid; BEGIN '
\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; '
@@ -201,7 +189,6 @@ $$;
\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN '
\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END'
DO :'dobody';
->>>>>>> REL_16_9:src/test/regress/expected/largeobject.out
NOTICE: could not open file, as expected
-- Test truncation.
BEGIN;
diff --git a/src/test/regress/expected/largeobject_1.out
b/src/test/regress/expected/largeobject_1.out
index 93ae04eed48..7172ddb39bb 100644
--- a/src/test/regress/expected/largeobject_1.out
+++ b/src/test/regress/expected/largeobject_1.out
@@ -182,18 +182,6 @@ SELECT lo_open(loid, x'40000'::int) from
lotest_stash_values;
(1 row)
ABORT;
-<<<<<<< HEAD:src/test/regress/output/largeobject_1.source
-DO $$
-DECLARE
- loid oid;
-BEGIN
- SELECT tbl.loid INTO loid FROM lotest_stash_values tbl;
- PERFORM lo_export(loid, '@abs_builddir@/results/invalid/path');
-EXCEPTION
- WHEN UNDEFINED_FILE THEN RAISE NOTICE 'could not open file, as expected';
-END;
-$$;
-=======
\set filename :abs_builddir '/results/invalid/path'
\set dobody 'DECLARE loid oid; BEGIN '
\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; '
@@ -201,7 +189,6 @@ $$;
\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN '
\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END'
DO :'dobody';
->>>>>>> REL_16_9:src/test/regress/expected/largeobject_1.out
NOTICE: could not open file, as expected
-- Test truncation.
BEGIN;
diff --git a/src/test/regress/expected/rules.out
b/src/test/regress/expected/rules.out
index 3daecde9e2d..614ffea1784 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -1869,16 +1869,7 @@ pg_stat_database_conflicts| SELECT oid AS datid,
pg_stat_get_db_conflict_startup_deadlock(oid) AS confl_deadlock,
pg_stat_get_db_conflict_logicalslot(oid) AS confl_active_logicalslot
FROM pg_database d;
-<<<<<<< HEAD
-pg_stat_gssapi| SELECT s.pid,
- s.gss_auth AS gss_authenticated,
- s.gss_princ AS principal,
- s.gss_enc AS encrypted
- FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid,
application_name, state, query, wait_event_type, wait_event, xact_start,
query_start, backend_start, state_change, client_addr, client_hostname,
client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion,
sslcipher, sslbits, ssl_client_dn, ssl_client_serial, ssl_issuer_dn, gss_auth,
gss_princ, gss_enc, leader_pid, query_id)
- WHERE (s.client_port IS NOT NULL);
-pg_stat_progress_analyze| SELECT d.gp_segment_id,
- s.pid,
-=======
+
pg_stat_gssapi| SELECT pid,
gss_auth AS gss_authenticated,
gss_princ AS principal,
@@ -1905,8 +1896,8 @@ pg_stat_io| SELECT backend_type,
fsync_time,
stats_reset
FROM pg_stat_get_io() b(backend_type, object, context, reads, read_time,
writes, write_time, writebacks, writeback_time, extends, extend_time, op_bytes,
hits, evictions, reuses, fsyncs, fsync_time, stats_reset);
-pg_stat_progress_analyze| SELECT s.pid,
->>>>>>> REL_16_9
+pg_stat_progress_analyze| SELECT d.gp_segment_id,
+ s.pid,
s.datid,
d.datname,
s.relid,
@@ -1928,14 +1919,9 @@ pg_stat_progress_analyze| SELECT s.pid,
(s.param8)::oid AS current_child_table_relid
FROM (pg_stat_get_progress_info('ANALYZE'::text) s(pid, datid, relid,
param1, param2, param3, param4, param5, param6, param7, param8, param9,
param10, param11, param12, param13, param14, param15, param16, param17,
param18, param19, param20)
LEFT JOIN pg_database d ON ((s.datid = d.oid)));
-<<<<<<< HEAD
pg_stat_progress_basebackup| SELECT gp_execution_segment() AS gp_segment_id,
s.pid,
CASE s.param1
-=======
-pg_stat_progress_basebackup| SELECT pid,
- CASE param1
->>>>>>> REL_16_9
WHEN 0 THEN 'initializing'::text
WHEN 1 THEN 'waiting for checkpoint to finish'::text
WHEN 2 THEN 'estimating backup size'::text
@@ -3742,12 +3728,11 @@ SELECT * FROM ruletest2;
DROP TABLE ruletest1;
DROP TABLE ruletest2;
-<<<<<<< HEAD
-- test rule for select-for-update
create table t_test_rules_select_for_update (c int) distributed randomly;
create rule myrule as on insert to t_test_rules_select_for_update
do instead select * from t_test_rules_select_for_update for update;
-=======
+
--
-- Test non-SELECT rule on security invoker view.
-- Should use view owner's permissions.
@@ -3792,4 +3777,3 @@ DROP TABLE ruletest_t3;
DROP TABLE ruletest_t2;
DROP TABLE ruletest_t1;
DROP USER regress_rule_user1;
->>>>>>> REL_16_9
diff --git a/src/test/regress/expected/xml_1.out
b/src/test/regress/expected/xml_1.out
index 104da85c92c..95022478134 100644
--- a/src/test/regress/expected/xml_1.out
+++ b/src/test/regress/expected/xml_1.out
@@ -479,12 +479,7 @@ ERROR: unsupported XML feature
LINE 1: SELECT '<>' IS NOT DOCUMENT;
^
DETAIL: This functionality requires the server to be built with libxml
support.
-<<<<<<< HEAD
-HINT: You need to rebuild PostgreSQL using --with-libxml.
SELECT xmlagg(data order by id) FROM xmltest;
-=======
-SELECT xmlagg(data) FROM xmltest;
->>>>>>> REL_16_9
xmlagg
--------
diff --git a/src/test/regress/expected/xmlmap_1.out
b/src/test/regress/expected/xmlmap_1.out
index c041d5ddda5..09777d9cecd 100644
--- a/src/test/regress/expected/xmlmap_1.out
+++ b/src/test/regress/expected/xmlmap_1.out
@@ -72,12 +72,7 @@ DETAIL: This functionality requires the server to be built
with libxml support.
MOVE BACKWARD ALL IN xc;
ERROR: backward scan is not supported in this version of Apache Cloudberry
SELECT cursor_to_xml('xc'::refcursor, 5, true, false, '');
-<<<<<<< HEAD
ERROR: portal "xc" cannot be run
-=======
-ERROR: unsupported XML feature
-DETAIL: This functionality requires the server to be built with libxml
support.
->>>>>>> REL_16_9
SELECT cursor_to_xmlschema('xc'::refcursor, true, false, '');
ERROR: unsupported XML feature
DETAIL: This functionality requires the server to be built with libxml
support.
diff --git a/src/test/regress/sql/largeobject.sql
b/src/test/regress/sql/largeobject.sql
index e90ff6b7b05..f5a8abb3492 100644
--- a/src/test/regress/sql/largeobject.sql
+++ b/src/test/regress/sql/largeobject.sql
@@ -132,18 +132,6 @@ BEGIN;
SELECT lo_open(loid, x'40000'::int) from lotest_stash_values;
ABORT;
-<<<<<<< HEAD:src/test/regress/input/largeobject.source
-DO $$
-DECLARE
- loid oid;
-BEGIN
- SELECT tbl.loid INTO loid FROM lotest_stash_values tbl;
- PERFORM lo_export(loid, '@abs_builddir@/results/invalid/path');
-EXCEPTION
- WHEN UNDEFINED_FILE THEN RAISE NOTICE 'could not open file, as expected';
-END;
-$$;
-=======
\set filename :abs_builddir '/results/invalid/path'
\set dobody 'DECLARE loid oid; BEGIN '
\set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; '
@@ -151,7 +139,6 @@ $$;
\set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN '
\set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END'
DO :'dobody';
->>>>>>> REL_16_9:src/test/regress/sql/largeobject.sql
-- Test truncation.
BEGIN;
diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql
index 6a6270f8e57..bc9320c158f 100644
--- a/src/test/regress/sql/rules.sql
+++ b/src/test/regress/sql/rules.sql
@@ -776,11 +776,7 @@ drop table cchild;
\a\t
SELECT viewname, definition FROM pg_views
-<<<<<<< HEAD
-WHERE schemaname <> 'information_schema' AND viewname <> 'pg_roles' AND
viewname <> 'gp_pgdatabase' AND viewname <> 'pg_locks' AND viewname <>
'gp_max_external_files' AND viewname <> 'pg_resqueue_status' AND viewname <>
'pg_stat_resqueues'
-=======
WHERE schemaname = 'pg_catalog'
->>>>>>> REL_16_9
ORDER BY viewname;
SELECT tablename, rulename, definition FROM pg_rules
@@ -1368,12 +1364,11 @@ SELECT * FROM ruletest2;
DROP TABLE ruletest1;
DROP TABLE ruletest2;
-<<<<<<< HEAD
-- test rule for select-for-update
create table t_test_rules_select_for_update (c int) distributed randomly;
create rule myrule as on insert to t_test_rules_select_for_update
do instead select * from t_test_rules_select_for_update for update;
-=======
+
--
-- Test non-SELECT rule on security invoker view.
-- Should use view owner's permissions.
@@ -1416,4 +1411,3 @@ DROP TABLE ruletest_t2;
DROP TABLE ruletest_t1;
DROP USER regress_rule_user1;
->>>>>>> REL_16_9
diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl
index 523a714f46e..f7bf7ebddd4 100644
--- a/src/test/ssl/t/001_ssltests.pl
+++ b/src/test/ssl/t/001_ssltests.pl
@@ -17,16 +17,12 @@ if ($ENV{with_ssl} ne 'openssl')
{
plan skip_all => 'OpenSSL not supported by this build';
}
-<<<<<<< HEAD
elsif (!$ENV{PG_TEST_EXTRA} || $ENV{PG_TEST_EXTRA} !~ /\bssl\b/)
{
plan skip_all =>
'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA';
}
-else
-=======
elsif ($ENV{PG_TEST_EXTRA} !~ /\bssl\b/)
->>>>>>> REL_16_9
{
plan skip_all =>
'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA';
@@ -285,8 +281,6 @@ switch_server_cert($node, certfile => 'server-ip-cn-only');
$common_connstr =
"$default_ssl_connstr user=ssltestuser dbname=trustdb
sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR
sslmode=verify-full";
-<<<<<<< HEAD
-=======
$node->connect_ok("$common_connstr host=192.0.2.1",
"IP address in the Common Name");
@@ -310,7 +304,6 @@ switch_server_cert($node, certfile =>
'server-multiple-alt-names');
$common_connstr =
"$default_ssl_connstr user=ssltestuser dbname=trustdb
sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR
sslmode=verify-full";
->>>>>>> REL_16_9
$node->connect_ok(
"$common_connstr host=dns1.alt-name.pg-ssltest.test",
@@ -799,14 +792,11 @@ $node->connect_fails(
. sslkey('client-revoked.key'),
"certificate authorization fails with revoked client cert",
expected_stderr => qr|SSL error: ssl[a-z0-9/]* alert certificate
revoked|,
-<<<<<<< HEAD
-=======
# temporarily(?) skip this check due to timing issue
# log_like => [
# qr{Client certificate verification failed at depth 0:
certificate revoked},
# qr{Failed certificate data \(unverified\): subject
"/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for
PostgreSQL SSL regression test client certs"},
# ],
->>>>>>> REL_16_9
# revoked certificates should not authenticate the user
log_unlike => [qr/connection authenticated:/],);
@@ -844,13 +834,9 @@ $node->connect_ok(
# intermediate client_ca.crt is provided by client, and isn't in server's
ssl_ca_file
switch_server_cert($node, certfile => 'server-cn-only', cafile => 'root_ca');
$common_connstr =
-<<<<<<< HEAD
- "$default_ssl_connstr user=ssltestuser dbname=certdb
sslkey=ssl/client_tmp.key sslrootcert=ssl/root+server_ca.crt
hostaddr=$SERVERHOSTADDR host=localhost";
-=======
"$default_ssl_connstr user=ssltestuser dbname=certdb "
. sslkey('client.key')
. " sslrootcert=ssl/root+server_ca.crt hostaddr=$SERVERHOSTADDR
host=localhost";
->>>>>>> REL_16_9
$node->connect_ok(
"$common_connstr sslmode=require sslcert=ssl/client+client_ca.crt",
@@ -910,9 +896,6 @@ $node->connect_fails(
"$common_connstr user=ssltestuser sslcert=ssl/client-revoked.crt "
. sslkey('client-revoked.key'),
"certificate authorization fails with revoked client cert with
server-side CRL directory",
-<<<<<<< HEAD
- expected_stderr => qr|SSL error: ssl[a-z0-9/]* alert certificate
revoked|);
-=======
expected_stderr => qr|SSL error: ssl[a-z0-9/]* alert certificate
revoked|,
# temporarily(?) skip this check due to timing issue
# log_like => [
@@ -920,7 +903,6 @@ $node->connect_fails(
# qr{Failed certificate data \(unverified\): subject
"/CN=ssltestuser", serial number 2315134995201656577, issuer "/CN=Test CA for
PostgreSQL SSL regression test client certs"},
# ]
);
->>>>>>> REL_16_9
# revoked client cert, non-ASCII subject
$node->connect_fails(
diff --git a/src/test/ssl/t/002_scram.pl b/src/test/ssl/t/002_scram.pl
index 030739a4eb6..b75b5863fc5 100644
--- a/src/test/ssl/t/002_scram.pl
+++ b/src/test/ssl/t/002_scram.pl
@@ -20,17 +20,11 @@ if ($ENV{with_ssl} ne 'openssl')
{
plan skip_all => 'OpenSSL not supported by this build';
}
-<<<<<<< HEAD
-elsif (!$ENV{PG_TEST_EXTRA} || $ENV{PG_TEST_EXTRA} !~ /\bssl\b/)
-=======
elsif ($ENV{PG_TEST_EXTRA} !~ /\bssl\b/)
->>>>>>> REL_16_9
{
plan skip_all =>
'Potentially unsafe test SSL not enabled in PG_TEST_EXTRA';
}
-<<<<<<< HEAD
-=======
my $ssl_server = SSL::Server->new();
@@ -44,8 +38,6 @@ sub switch_server_cert
$ssl_server->switch_server_cert(@_);
}
->>>>>>> REL_16_9
-
# This is the hostname used to connect to the server.
my $SERVERHOSTADDR = '127.0.0.1';
# This is the pattern to use in pg_hba.conf to match incoming connections.
@@ -176,12 +168,6 @@ else
);
}
-<<<<<<< HEAD
-# clean up
-unlink($client_tmp_key);
-
-done_testing($number_of_tests);
-=======
# Now test with a server certificate that uses the RSA-PSS algorithm.
# This checks that the certificate can be loaded and that channel binding
# works. (see bug #17760)
@@ -196,4 +182,3 @@ if ($supports_rsapss_certs)
]);
}
done_testing();
->>>>>>> REL_16_9
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]