This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new a81b84484f1 Fix conflicts for pg_basebackup
a81b84484f1 is described below
commit a81b84484f1212670f94ed3db11c54120dda9e09
Author: Jinbao Chen <[email protected]>
AuthorDate: Thu Jan 29 00:10:34 2026 +0800
Fix conflicts for pg_basebackup
---
gpAux/gpdemo/gpdemo-defaults.sh | 2 +
src/bin/Makefile | 1 +
src/bin/pg_basebackup/nls.mk | 6 -
src/bin/pg_basebackup/pg_basebackup.c | 409 +---------------------
src/bin/pg_basebackup/pg_receivewal.c | 4 -
src/bin/pg_basebackup/pg_recvlogical.c | 7 -
src/bin/pg_basebackup/receivelog.c | 33 --
src/bin/pg_basebackup/t/010_pg_basebackup.pl | 496 ++++++++-------------------
src/bin/pg_basebackup/walmethods.c | 401 ----------------------
9 files changed, 155 insertions(+), 1204 deletions(-)
diff --git a/gpAux/gpdemo/gpdemo-defaults.sh b/gpAux/gpdemo/gpdemo-defaults.sh
index 84126cbf2a2..12ffa40b2d3 100755
--- a/gpAux/gpdemo/gpdemo-defaults.sh
+++ b/gpAux/gpdemo/gpdemo-defaults.sh
@@ -32,5 +32,7 @@ export with_openssl
export DEMO_PORT_BASE="$PORT_BASE"
export NUM_PRIMARY_MIRROR_PAIRS
+export WITH_MIRRORS
+export WITH_STANDBY
export BLDWRAP_POSTGRES_CONF_ADDONS
export DEFAULT_QD_MAX_CONNECT=150
\ No newline at end of file
diff --git a/src/bin/Makefile b/src/bin/Makefile
index 85b61526cab..80adb09debb 100644
--- a/src/bin/Makefile
+++ b/src/bin/Makefile
@@ -44,6 +44,7 @@ SUBDIRS = \
gpfts \
pg_amcheck \
pg_archivecleanup \
+ pg_basebackup \
pg_checksums \
pg_config \
pg_controldata \
diff --git a/src/bin/pg_basebackup/nls.mk b/src/bin/pg_basebackup/nls.mk
index 301ed2975ca..fc475003e8e 100644
--- a/src/bin/pg_basebackup/nls.mk
+++ b/src/bin/pg_basebackup/nls.mk
@@ -1,10 +1,5 @@
# src/bin/pg_basebackup/nls.mk
CATALOG_NAME = pg_basebackup
-<<<<<<< HEAD
-AVAIL_LANGUAGES = cs de el es fr ja ko ru sv tr uk zh_CN
-GETTEXT_FILES = $(FRONTEND_COMMON_GETTEXT_FILES) pg_basebackup.c
pg_receivewal.c pg_recvlogical.c receivelog.c streamutil.c walmethods.c
../../common/fe_memutils.c ../../common/file_utils.c
../../fe_utils/recovery_gen.c
-GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) simple_prompt
tar_set_error
-=======
GETTEXT_FILES = $(FRONTEND_COMMON_GETTEXT_FILES) \
bbstreamer_file.c \
bbstreamer_gzip.c \
@@ -24,5 +19,4 @@ GETTEXT_FILES = $(FRONTEND_COMMON_GETTEXT_FILES) \
../../fe_utils/option_utils.c \
../../fe_utils/recovery_gen.c
GETTEXT_TRIGGERS = $(FRONTEND_COMMON_GETTEXT_TRIGGERS) simple_prompt
->>>>>>> REL_16_9
GETTEXT_FLAGS = $(FRONTEND_COMMON_GETTEXT_FLAGS)
diff --git a/src/bin/pg_basebackup/pg_basebackup.c
b/src/bin/pg_basebackup/pg_basebackup.c
index 1ce53913762..10c4cfd2991 100644
--- a/src/bin/pg_basebackup/pg_basebackup.c
+++ b/src/bin/pg_basebackup/pg_basebackup.c
@@ -72,22 +72,6 @@ typedef struct WriteTarState
bbstreamer *streamer;
} WriteTarState;
-<<<<<<< HEAD
-typedef struct UnpackTarState
-{
- int tablespacenum;
- char current_path[MAXPGPATH];
- char filename[MAXPGPATH];
- const char *mapped_tblspc_path;
- pgoff_t current_len_left;
- int current_padding;
- FILE *file;
- char gp_tablespace_filename[MAXPGPATH];
- bool basetablespace;
-} UnpackTarState;
-
-=======
->>>>>>> REL_16_9
typedef struct WriteManifestState
{
char filename[MAXPGPATH];
@@ -420,27 +404,20 @@ usage(void)
" (in kB/s, or use suffix
\"k\" or \"M\")\n"));
printf(_(" -R, --write-recovery-conf\n"
" write configuration for
replication\n"));
-<<<<<<< HEAD
printf(_(" -o, --write-conf-files-only\n"
" write configuration files
only\n"));
-=======
printf(_(" -t, --target=TARGET[:DETAIL]\n"
" backup target (if other than
client)\n"));
->>>>>>> REL_16_9
printf(_(" -T, --tablespace-mapping=OLDDIR=NEWDIR\n"
" relocate tablespace in
OLDDIR to NEWDIR\n"));
printf(_(" --waldir=WALDIR location for the write-ahead log
directory\n"));
printf(_(" -X, --wal-method=none|fetch|stream\n"
" include required WAL files
with specified method\n"));
printf(_(" -z, --gzip compress tar output\n"));
-<<<<<<< HEAD
- printf(_(" -Z, --compress=0-9 compress tar output with given
compression level\n"));
printf(_(" --target-gp-dbid create tablespace subdirectories
with given dbid\n"));
-=======
printf(_(" -Z, --compress=[{client|server}-]METHOD[:DETAIL]\n"
" compress on client or server
as specified\n"));
printf(_(" -Z, --compress=none do not compress tar output\n"));
->>>>>>> REL_16_9
printf(_("\nGeneral options:\n"));
printf(_(" -c, --checkpoint=fast|spread\n"
" set fast or spread
checkpointing\n"));
@@ -1669,36 +1646,7 @@ ReceiveTarFile(PGconn *conn, char *archive_name, char
*spclocation,
bbstreamer_finalize(state.streamer);
bbstreamer_free(state.streamer);
-<<<<<<< HEAD
-#ifdef HAVE_LIBZ
- if (state.ztarfile != NULL)
- {
- errno = 0; /* in case gzclose()
doesn't set it */
- if (gzclose(state.ztarfile) != 0)
- {
- pg_log_error("could not close compressed file \"%s\":
%m",
- state.filename);
- exit(1);
- }
- }
- else
-#endif
- {
- if (strcmp(basedir, "-") != 0)
- {
- if (fclose(state.tarfile) != 0)
- {
- pg_log_error("could not close file \"%s\": %m",
- state.filename);
- exit(1);
- }
- }
- }
-
- progress_report(rownum, state.filename, true, false);
-=======
progress_report(tablespacenum, true, false);
->>>>>>> REL_16_9
/*
* Do not sync the resulting tar file yet, all files are synced once at
@@ -1742,318 +1690,6 @@ get_tablespace_mapping(const char *dir)
return dir;
}
-<<<<<<< HEAD
-
-/*
- * Receive a tar format stream from the connection to the server, and unpack
- * the contents of it into a directory. Only files, directories and
- * symlinks are supported, no other kinds of special files.
- *
- * If the data is for the main data directory, it will be restored in the
- * specified directory. If it's for another tablespace, it will be restored
- * in the original or mapped directory.
- */
-static void
-ReceiveAndUnpackTarFile(PGconn *conn, PGresult *res, int rownum)
-{
- UnpackTarState state;
- bool basetablespace;
-
- memset(&state, 0, sizeof(state));
- state.tablespacenum = rownum;
-
- basetablespace = PQgetisnull(res, rownum, 0);
- state.basetablespace = basetablespace;
- if (basetablespace)
- strlcpy(state.current_path, basedir,
sizeof(state.current_path));
- else
- {
- strlcpy(state.current_path,
- get_tablespace_mapping(PQgetvalue(res, rownum,
1)),
- sizeof(state.current_path));
-
- if (target_gp_dbid < 1)
- {
- pg_log_error("cannot restore user-defined tablespaces
without the --target-gp-dbid option");
- exit(1);
- }
-
- /*
- * Construct the new tablespace path using the given target gp
dbid
- */
- snprintf(state.gp_tablespace_filename,
sizeof(state.gp_tablespace_filename),
- "%s/%d/%s",
- state.current_path,
- target_gp_dbid,
- GP_TABLESPACE_VERSION_DIRECTORY);
- }
-
- ReceiveCopyData(conn, ReceiveTarAndUnpackCopyChunk, &state);
-
-
- if (state.file)
- fclose(state.file);
-
- progress_report(rownum, state.filename, true, false);
-
- if (state.file != NULL)
- {
- pg_log_error("COPY stream ended before last file was finished");
- exit(1);
- }
-
- if (basetablespace && writerecoveryconf)
- WriteRecoveryConfig(conn, basedir, recoveryconfcontents);
-
- if (basetablespace)
- WriteInternalConfFile();
- /*
- * No data is synced here, everything is done for all tablespaces at the
- * end.
- */
-}
-
-static void
-ReceiveTarAndUnpackCopyChunk(size_t r, char *copybuf, void *callback_data)
-{
- UnpackTarState *state = callback_data;
-
- if (state->file == NULL)
- {
-#ifndef WIN32
- int filemode;
-#endif
-
- /*
- * No current file, so this must be the header for a new file
- */
- if (r != TAR_BLOCK_SIZE)
- {
- pg_log_error("invalid tar block header size: %zu", r);
- exit(1);
- }
- totaldone += TAR_BLOCK_SIZE;
-
- state->current_len_left = read_tar_number(©buf[124], 12);
-
-#ifndef WIN32
- /* Set permissions on the file */
- filemode = read_tar_number(©buf[100], 8);
-#endif
-
- /*
- * All files are padded up to a multiple of TAR_BLOCK_SIZE
- */
- state->current_padding =
- tarPaddingBytesRequired(state->current_len_left);
-
- /*
- * First part of header is zero terminated filename
- */
- if (state->basetablespace)
- {
- snprintf(state->filename, sizeof(state->filename),
- "%s/%s", state->current_path, copybuf);
- }
- else
- {
- /*
- * Append relfile path to --target-gp-dbid tablespace
path.
- *
- * For example, copybuf can be
- *
"<GP_TABLESPACE_VERSION_DIRECTORY>_db<dbid>/16384/16385".
- * We create a pointer to the dbid and relfile
"/16384/16385",
- * construct the new tablespace with provided dbid, and
append
- * the dbid and relfile on top.
- */
- char *copybuf_dbid_relfile = strstr(copybuf, "/");
-
- snprintf(state->filename, sizeof(state->filename),
"%s%s",
- state->gp_tablespace_filename,
- copybuf_dbid_relfile);
- }
- if (state->filename[strlen(state->filename) - 1] == '/')
- {
- /*
- * Ends in a slash means directory or symlink to
directory
- */
- if (copybuf[156] == '5')
- {
- /*
- * Directory. Remove trailing slash first.
- */
- state->filename[strlen(state->filename) - 1] =
'\0';
- /*
- * Since the forceoverwrite flag is being used,
the
- * directories still exist. Remove them so that
- * pg_basebackup can create them. Skip when we
detect
- * pg_log because we want to retain its
contents.
- */
- if (forceoverwrite &&
pg_check_dir(state->filename) != 0)
- {
- /*
- * We want to retain the contents of
pg_log. And for
- * pg_xlog we assume is deleted at the
start of
- * pg_basebackup. We cannot delete
pg_xlog because if
- * streammode was used then it may have
already copied
- * new xlog files into pg_xlog
directory.
- */
- if (pg_str_endswith(state->filename,
"/pg_log") ||
-
pg_str_endswith(state->filename, "/log") ||
-
pg_str_endswith(state->filename, "/pg_wal") ||
-
pg_str_endswith(state->filename, "/pg_xlog"))
- return;
-
- rmtree(state->filename, true);
- }
-
- bool is_gp_tablespace_directory =
strncmp(state->gp_tablespace_filename,
-
state->filename, strlen(state->filename)) == 0;
- if (is_gp_tablespace_directory &&
!forceoverwrite) {
- /*
- * This directory has already been
created during beginning of BaseBackup().
- */
- return;
- }
- if (mkdir(state->filename, pg_dir_create_mode)
!= 0)
- {
- /*
- * When streaming WAL, pg_wal (or
pg_xlog for pre-9.6
- * clusters) will have been created by
the wal receiver
- * process. Also, when the WAL
directory location was
- * specified, pg_wal (or pg_xlog) has
already been created
- * as a symbolic link before starting
the actual backup.
- * So just ignore creation failures on
related
- * directories.
- */
- if (!((pg_str_endswith(state->filename,
"/pg_wal") ||
-
pg_str_endswith(state->filename, "/pg_xlog") ||
-
pg_str_endswith(state->filename, "/archive_status")) &&
- errno == EEXIST))
- {
- pg_log_error("could not create
directory \"%s\": %m",
-
state->filename);
- exit(1);
- }
- }
-#ifndef WIN32
- if (chmod(state->filename, (mode_t) filemode))
- pg_log_error("could not set permissions
on directory \"%s\": %m",
-
state->filename);
-#endif
- }
- else if (copybuf[156] == '2')
- {
- /*
- * Symbolic link
- *
- * It's most likely a link in pg_tblspc
directory, to the
- * location of a tablespace. Apply any
tablespace mapping
- * given on the command line
(--tablespace-mapping). (We
- * blindly apply the mapping without checking
that the link
- * really is inside pg_tblspc. We don't expect
there to be
- * other symlinks in a data directory, but if
there are, you
- * can call it an undocumented feature that you
can map them
- * too.)
- */
- state->filename[strlen(state->filename) - 1] =
'\0'; /* Remove trailing slash */
-
- state->mapped_tblspc_path =
- get_tablespace_mapping(©buf[157]);
- char *mapped_tblspc_path_with_dbid =
psprintf("%s/%d", state->mapped_tblspc_path, target_gp_dbid);
- if (symlink(mapped_tblspc_path_with_dbid,
state->filename) != 0)
- {
- pg_log_error("could not create symbolic
link from \"%s\" to \"%s\": %m",
-
state->filename, state->mapped_tblspc_path);
- exit(1);
- }
- pfree(mapped_tblspc_path_with_dbid);
- }
- else
- {
- pg_log_error("unrecognized link indicator
\"%c\"",
- copybuf[156]);
- exit(1);
- }
- return; /* directory or link
handled */
- }
-
- /*
- * regular file
- *
- * In GPDB, we may need to remove the file first if we are
forcing
- * an overwrite instead of starting with a blank directory. Some
- * files may have had their permissions changed to read only.
- * Remove the file instead of literally overwriting them.
- */
- if (forceoverwrite)
- remove(state->filename);
- state->file = fopen(state->filename, "wb");
- if (!state->file)
- {
- pg_log_error("could not create file \"%s\": %m",
state->filename);
- exit(1);
- }
-
-#ifndef WIN32
- if (chmod(state->filename, (mode_t) filemode))
- pg_log_error("could not set permissions on file \"%s\":
%m",
- state->filename);
-#endif
-
- if (state->current_len_left == 0)
- {
- /*
- * Done with this file, next one will be a new tar
header
- */
- fclose(state->file);
- state->file = NULL;
- return;
- }
- } /* new file */
- else
- {
- /*
- * Continuing blocks in existing file
- */
- if (state->current_len_left == 0 && r == state->current_padding)
- {
- /*
- * Received the padding block for this file, ignore it
and close
- * the file, then move on to the next tar header.
- */
- fclose(state->file);
- state->file = NULL;
- totaldone += r;
- return;
- }
-
- errno = 0;
- if (fwrite(copybuf, r, 1, state->file) != 1)
- {
- /* if write didn't set errno, assume problem is no disk
space */
- if (errno == 0)
- errno = ENOSPC;
- pg_log_error("could not write to file \"%s\": %m",
state->filename);
- exit(1);
- }
- totaldone += r;
- progress_report(state->tablespacenum, state->filename, false,
false);
-
- state->current_len_left -= r;
- if (state->current_len_left == 0 && state->current_padding == 0)
- {
- /*
- * Received the last block, and there is no padding to
be
- * expected. Close the file and move on to the next tar
header.
- */
- fclose(state->file);
- state->file = NULL;
- return;
- }
- } /* continuing
data in existing file */
-}
-
static void
add_to_exclude_list(PQExpBufferData *buf, const char *exclude)
{
@@ -2128,8 +1764,6 @@ build_exclude_list(void)
return buf.data;
}
-=======
->>>>>>> REL_16_9
/*
* Receive the backup manifest file and write it out to a file.
*/
@@ -2360,29 +1994,13 @@ BaseBackup(char *compression_algorithm, char
*compression_detail,
fprintf(stderr, "\n");
}
-<<<<<<< HEAD
- basebkp =
- psprintf("BASE_BACKUP LABEL '%s' %s %s %s %s %s %s %s %s %s %s",
- escaped_label,
- estimatesize ? "PROGRESS" : "",
- includewal == FETCH_WAL ? "WAL" : "",
- fastcheckpoint ? "FAST" : "",
- includewal == NO_WAL ? "" : "NOWAIT",
- maxrate_clause ? maxrate_clause : "",
- format == 't' ? "TABLESPACE_MAP" : "",
- verify_checksums ? "" : "NOVERIFY_CHECKSUMS",
- manifest_clause ? manifest_clause : "",
- manifest_checksums_clause,
- exclude_list);
-
if (exclude_list[0] != '\0')
free(exclude_list);
-=======
+
if (use_new_option_syntax && buf.len > 0)
basebkp = psprintf("BASE_BACKUP (%s)", buf.data);
else
basebkp = psprintf("BASE_BACKUP %s", buf.data);
->>>>>>> REL_16_9
if (PQsendQuery(conn, basebkp) == 0)
pg_fatal("could not send replication command \"%s\": %s",
@@ -2450,10 +2068,7 @@ BaseBackup(char *compression_algorithm, char
*compression_detail,
*/
if (backup_target == NULL && format == 'p' && !PQgetisnull(res,
i, 1))
{
-<<<<<<< HEAD
- char *path = unconstify(char *,
get_tablespace_mapping(PQgetvalue(res, i, 1)));
char path_with_subdir[MAXPGPATH];
-=======
char *path = PQgetvalue(res, i, 1);
if (is_absolute_path(path))
@@ -2463,7 +2078,6 @@ BaseBackup(char *compression_algorithm, char
*compression_detail,
/* This is an in-place tablespace, so prepend
basedir. */
path = psprintf("%s/%s", basedir, path);
}
->>>>>>> REL_16_9
snprintf(path_with_subdir, MAXPGPATH, "%s/%d/%s", path,
target_gp_dbid, GP_TABLESPACE_VERSION_DIRECTORY);
@@ -2829,11 +2443,7 @@ main(int argc, char **argv)
num_exclude_from = 0;
atexit(cleanup_directories_atexit);
-<<<<<<< HEAD
- while ((c = getopt_long(argc, argv,
"CD:F:o:r:RT:xX:l:zZ:d:c:h:p:U:s:S:wWvPE:",
-=======
- while ((c = getopt_long(argc, argv,
"c:Cd:D:F:h:l:nNp:Pr:Rs:S:t:T:U:vwWX:zZ:",
->>>>>>> REL_16_9
+ while ((c = getopt_long(argc, argv,
"c:Cd:D:E:F:h:l:nNo:p:Pr:Rs:S:t:T:U:vwWxX:zZ:",
long_options,
&option_index)) != -1)
{
switch (c)
@@ -3216,7 +2826,6 @@ main(int argc, char **argv)
}
}
-<<<<<<< HEAD
if (writeconffilesonly)
{
if(create_slot)
@@ -3236,19 +2845,9 @@ main(int argc, char **argv)
}
}
-#ifndef HAVE_LIBZ
- if (compresslevel != 0)
- {
- pg_log_error("this build does not support compression");
- exit(1);
- }
-#endif
-
-=======
/*
* Sanity checks for progress reporting options.
*/
->>>>>>> REL_16_9
if (showprogress && !estimatesize)
{
pg_log_error("%s and %s are incompatible options",
@@ -3285,7 +2884,6 @@ main(int argc, char **argv)
}
atexit(disconnect_atexit);
-<<<<<<< HEAD
/* To only write recovery.conf and internal.auto.conf files,
one of the usecase is gprecoverseg differential recovery (there can
be others in future)
*/
@@ -3296,7 +2894,7 @@ main(int argc, char **argv)
success = true;
return 0;
}
-=======
+
#ifndef WIN32
/*
@@ -3309,7 +2907,6 @@ main(int argc, char **argv)
*/
pqsignal(SIGCHLD, sigchld_handler);
#endif
->>>>>>> REL_16_9
/*
* Set umask so that directories/files are created with the same
diff --git a/src/bin/pg_basebackup/pg_receivewal.c
b/src/bin/pg_basebackup/pg_receivewal.c
index 8d124df3814..6450af77195 100644
--- a/src/bin/pg_basebackup/pg_receivewal.c
+++ b/src/bin/pg_basebackup/pg_receivewal.c
@@ -839,12 +839,8 @@ main(int argc, char **argv)
* if one is needed, in GetConnection.)
*/
#ifndef WIN32
-<<<<<<< HEAD
- pqsignal(SIGINT, sigint_handler);
-=======
pqsignal(SIGINT, sigexit_handler);
pqsignal(SIGTERM, sigexit_handler);
->>>>>>> REL_16_9
#endif
/*
diff --git a/src/bin/pg_basebackup/pg_recvlogical.c
b/src/bin/pg_basebackup/pg_recvlogical.c
index c3c43059e9b..769c57deaf1 100644
--- a/src/bin/pg_basebackup/pg_recvlogical.c
+++ b/src/bin/pg_basebackup/pg_recvlogical.c
@@ -900,8 +900,6 @@ main(int argc, char **argv)
exit(1);
}
-<<<<<<< HEAD
-=======
if (two_phase && !do_create_slot)
{
pg_log_error("--two-phase may only be specified with
--create-slot");
@@ -909,7 +907,6 @@ main(int argc, char **argv)
exit(1);
}
->>>>>>> REL_16_9
/*
* Obtain a connection to server. Notably, if we need a password, we
want
* to collect it from the user immediately.
@@ -925,12 +922,8 @@ main(int argc, char **argv)
* if one is needed, in GetConnection.)
*/
#ifndef WIN32
-<<<<<<< HEAD
- pqsignal(SIGINT, sigint_handler);
-=======
pqsignal(SIGINT, sigexit_handler);
pqsignal(SIGTERM, sigexit_handler);
->>>>>>> REL_16_9
pqsignal(SIGHUP, sighup_handler);
#endif
diff --git a/src/bin/pg_basebackup/receivelog.c
b/src/bin/pg_basebackup/receivelog.c
index 1f5941e0d26..e69ad912a25 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -68,17 +68,10 @@ mark_file_as_archived(StreamCtl *stream, const char *fname)
return false;
}
-<<<<<<< HEAD
- if (stream->walmethod->close(f, CLOSE_NORMAL) != 0)
- {
- pg_log_error("could not close archive status file \"%s\": %s",
- tmppath,
stream->walmethod->getlasterror());
-=======
if (stream->walmethod->ops->close(f, CLOSE_NORMAL) != 0)
{
pg_log_error("could not close archive status file \"%s\": %s",
tmppath,
GetLastWalMethodError(stream->walmethod));
->>>>>>> REL_16_9
return false;
}
@@ -106,14 +99,9 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint)
XLogFileName(walfile_name, stream->timeline, segno, WalSegSz);
/* Note that this considers the compression used if necessary */
-<<<<<<< HEAD
- fn = stream->walmethod->get_file_name(current_walfile_name,
-
stream->partial_suffix);
-=======
fn = stream->walmethod->ops->get_file_name(stream->walmethod,
walfile_name,
stream->partial_suffix);
->>>>>>> REL_16_9
/*
* When streaming to files, if an existing file exists we verify that
it's
@@ -125,23 +113,14 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint)
* When streaming to tar, no file with this name will exist before, so
we
* never have to verify a size.
*/
-<<<<<<< HEAD
- if (stream->walmethod->compression() == 0 &&
- stream->walmethod->existsfile(fn))
-=======
if (stream->walmethod->compression_algorithm == PG_COMPRESSION_NONE &&
stream->walmethod->ops->existsfile(stream->walmethod, fn))
->>>>>>> REL_16_9
{
size = stream->walmethod->ops->get_file_size(stream->walmethod,
fn);
if (size < 0)
{
pg_log_error("could not get size of write-ahead log
file \"%s\": %s",
-<<<<<<< HEAD
- fn,
stream->walmethod->getlasterror());
-=======
fn,
GetLastWalMethodError(stream->walmethod));
->>>>>>> REL_16_9
pg_free(fn);
return false;
}
@@ -152,11 +131,7 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint)
if (f == NULL)
{
pg_log_error("could not open existing
write-ahead log file \"%s\": %s",
-<<<<<<< HEAD
- fn,
stream->walmethod->getlasterror());
-=======
fn,
GetLastWalMethodError(stream->walmethod));
->>>>>>> REL_16_9
pg_free(fn);
return false;
}
@@ -182,11 +157,7 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint)
pg_log_error(ngettext("write-ahead log file \"%s\" has
%zd byte, should be 0 or %d",
"write-ahead
log file \"%s\" has %zd bytes, should be 0 or %d",
size),
-<<<<<<< HEAD
- fn, (int) size, WalSegSz);
-=======
fn, size, WalSegSz);
->>>>>>> REL_16_9
pg_free(fn);
return false;
}
@@ -202,11 +173,7 @@ open_walfile(StreamCtl *stream, XLogRecPtr startpoint)
if (f == NULL)
{
pg_log_error("could not open write-ahead log file \"%s\": %s",
-<<<<<<< HEAD
- fn, stream->walmethod->getlasterror());
-=======
fn,
GetLastWalMethodError(stream->walmethod));
->>>>>>> REL_16_9
pg_free(fn);
return false;
}
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 0fd5cca0d58..c8b291d0c43 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -5,17 +5,10 @@ use strict;
use warnings;
use Config;
use File::Basename qw(basename dirname);
-<<<<<<< HEAD
-use File::Path qw(rmtree);
-use PostgresNode;
-use TestLib;
-use Test::More tests => 109 + 21;
-=======
use File::Path qw(rmtree);
use PostgreSQL::Test::Cluster;
use PostgreSQL::Test::Utils;
use Test::More;
->>>>>>> REL_16_9
program_help_ok('pg_basebackup');
program_version_ok('pg_basebackup');
@@ -65,18 +58,12 @@ if (open my $badchars, '>>',
"$tempdir/pgdata/FOO\xe0\xe0\xe0BAR")
$node->set_replication_conf();
$node->reload;
-<<<<<<< HEAD
command_fails(['pg_basebackup', '-D', "$tempdir/backup" ],
'pg_basebackup fails without specifiying the target cloudberry db id');
$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/backup", '--target-gp-dbid', '123' ],
-=======
-
-$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup" ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup", '--target-gp-dbid',
'123' ],
'pg_basebackup fails because of WAL configuration');
ok(!-d "$tempdir/backup", 'backup directory was cleaned up');
@@ -235,12 +222,8 @@ foreach my $filename (@tempRelationFiles)
}
# Run base backup.
-<<<<<<< HEAD
-$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backup", '-X', 'none',
'--target-gp-dbid', '123' ],
-=======
$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none',
'--target-gp-dbid', '123' ],
'pg_basebackup runs');
ok(-f "$tempdir/backup/PG_VERSION", 'backup was created');
ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included');
@@ -312,12 +295,8 @@ $node->command_ok(
[
@pg_basebackup_defs, '-D',
"$tempdir/backup2", '--no-manifest',
-<<<<<<< HEAD
- '--waldir', "$tempdir/xlog2",
+ '--waldir', "$tempdir/xlog2",
'--target-gp-dbid', '123'
-=======
- '--waldir', "$tempdir/xlog2"
->>>>>>> REL_16_9
],
'separate xlog directory');
ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created');
@@ -326,57 +305,31 @@ ok(-d "$tempdir/xlog2/", 'xlog directory was created');
rmtree("$tempdir/backup2");
rmtree("$tempdir/xlog2");
-<<<<<<< HEAD
-$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup",
'--target-gp-dbid', '123', , '-Ft' ],
-=======
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup", '-Ft' ],
->>>>>>> REL_16_9
+$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup",
'--target-gp-dbid', '123', , '-Ft' ],
'tar format');
ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created');
rmtree("$tempdir/tarbackup");
$node->command_fails(
-<<<<<<< HEAD
- [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-T=/foo" ],
- '-T with empty old directory fails');
-$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-T/foo=" ],
- '-T with empty new directory fails');
-$node->command_fails(
- [
- 'pg_basebackup', '-D', "$tempdir/backup_foo", '-Fp',
- "-T/foo=/bar=/baz", '--target-gp-dbid', '123'
- ],
- '-T with multiple = fails');
-$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-Tfoo=/bar" ],
- '-T with old directory not absolute fails');
-$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-T/foo=bar" ],
- '-T with new directory not absolute fails');
-$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-Tfoo" ],
-=======
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T=/foo" ],
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-T=/foo" ],
'-T with empty old directory fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=" ],
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-T/foo=" ],
'-T with empty new directory fails');
$node->command_fails(
[
@pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp',
- "-T/foo=/bar=/baz"
+ "-T/foo=/bar=/baz", '--target-gp-dbid', '123'
],
'-T with multiple = fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo=/bar"
],
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-Tfoo=/bar" ],
'-T with old directory not absolute fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=bar"
],
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-T/foo=bar" ],
'-T with new directory not absolute fails');
$node->command_fails(
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo" ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '--target-gp-dbid',
'123', '-Fp', "-Tfoo" ],
'-T with invalid format fails');
my $superlongname = "superlongname_" . ("x" x 100);
@@ -385,15 +338,6 @@ SKIP:
{
my $superlongpath = "$pgdata/$superlongname";
-<<<<<<< HEAD
-open my $file, '>', "$superlongpath"
- or die "unable to create file $superlongpath";
-close $file;
-$node->command_fails(
- [ 'pg_basebackup', '-D', "$tempdir/tarbackup_l1", '--target-gp-dbid',
'123', '-Ft' ],
- 'pg_basebackup tar with long name fails');
-unlink "$pgdata/$superlongname";
-=======
skip "File path too long", 1
if $windows_os && length($superlongpath) > 255;
@@ -405,7 +349,6 @@ unlink "$pgdata/$superlongname";
'pg_basebackup tar with long name fails');
unlink "$superlongpath";
}
->>>>>>> REL_16_9
# The following tests are for symlinks.
@@ -433,26 +376,14 @@ dir_symlink("$sys_tempdir/pg_replslot",
"$pgdata/pg_replslot")
$node->start;
# Test backup of a tablespace using tar format.
-<<<<<<< HEAD
-# Create a temporary directory in the system location and symlink it
-# to our physical temp location. That way we can use shorter names
-# for the tablespace directories, which hopefully won't run afoul of
-# the 99 character length limit.
-my $sys_tempdir = TestLib::tempdir_short;
-=======
# Symlink the system located tempdir to our physical temp location.
# That way we can use shorter names for the tablespace directories,
# which hopefully won't run afoul of the 99 character length limit.
->>>>>>> REL_16_9
my $real_sys_tempdir = "$sys_tempdir/tempdir";
dir_symlink "$tempdir", $real_sys_tempdir;
mkdir "$tempdir/tblspc1";
-<<<<<<< HEAD
-my $realTsDir = "$real_sys_tempdir/tblspc1";
-=======
my $realTsDir = "$real_sys_tempdir/tblspc1";
->>>>>>> REL_16_9
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc1 LOCATION '$realTsDir';");
$node->safe_psql('postgres',
@@ -474,11 +405,7 @@ is(scalar(@tblspc_tars), 1, 'one tablespace tar was
created');
SKIP:
{
my $tar = $ENV{TAR};
-<<<<<<< HEAD
- # don't check for a working tar here, to accomodate various odd
-=======
# don't check for a working tar here, to accommodate various odd
->>>>>>> REL_16_9
# cases such as AIX. If tar doesn't work the init_from_backup below
# will fail.
skip "no tar program available", 1
@@ -490,11 +417,7 @@ SKIP:
$node2->init_from_backup($node, 'tarbackup2', tar_program => $tar);
# Recover tablespace into a new directory (not where it was!)
-<<<<<<< HEAD
- my $repTsDir = "$tempdir/tblspc1replica";
-=======
my $repTsDir = "$tempdir/tblspc1replica";
->>>>>>> REL_16_9
my $realRepTsDir = "$real_sys_tempdir/tblspc1replica";
mkdir $repTsDir;
PostgreSQL::Test::Utils::system_or_bail($tar, 'xf', $tblspc_tars[0],
@@ -541,32 +464,18 @@ my $tblSpc1Id = basename(
foreach my $filename (@tempRelationFiles)
{
append_to_file(
-<<<<<<< HEAD
"$real_sys_tempdir/tblspc1/$node_dbid/$tblSpc1Id/$postgresOid/$filename",
-=======
- "$real_sys_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename",
->>>>>>> REL_16_9
'TEMP_RELATION');
}
$node->command_fails(
-<<<<<<< HEAD
- [ 'pg_basebackup', '-D', "$tempdir/backup1", '-Fp', '--target-gp-dbid',
'-1' ],
+ [ @pg_basebackup_defs, '-D', "$tempdir/backup1", '-Fp',
'--target-gp-dbid', '-1' ],
'plain format with tablespaces fails without tablespace mapping and
target-gp-dbid as the test server dbid');
$node->command_ok(
[
- 'pg_basebackup', '-D', "$tempdir/backup1", '-Fp',
+ @pg_basebackup_defs, '-D', "$tempdir/backup1", '-Fp',
'--target-gp-dbid', '1',
-=======
- [ @pg_basebackup_defs, '-D', "$tempdir/backup1", '-Fp' ],
- 'plain format with tablespaces fails without tablespace mapping');
-
-$node->command_ok(
- [
- @pg_basebackup_defs, '-D',
- "$tempdir/backup1", '-Fp',
->>>>>>> REL_16_9
"-T$realTsDir=$tempdir/tbackup/tblspc1",
],
'plain format with tablespaces succeeds with tablespace mapping');
@@ -615,11 +524,7 @@ foreach my $filename (@tempRelationFiles)
# Also remove temp relation files or tablespace drop will fail.
my $filepath =
-<<<<<<< HEAD
"$real_sys_tempdir/tblspc1/$node_dbid/$tblSpc1Id/$postgresOid/$filename";
-=======
- "$real_sys_tempdir/tblspc1/$tblSpc1Id/$postgresOid/$filename";
->>>>>>> REL_16_9
unlink($filepath)
or BAIL_OUT("unable to unlink $filepath");
@@ -639,13 +544,8 @@ $node->safe_psql('postgres',
$realTsDir =~ s/=/\\=/;
$node->command_ok(
[
-<<<<<<< HEAD
- 'pg_basebackup', '-D',
- "$tempdir/backup3", '--target-gp-dbid', '123', '-Fp',
-=======
@pg_basebackup_defs, '-D',
- "$tempdir/backup3", '-Fp',
->>>>>>> REL_16_9
+ "$tempdir/backup3", '--target-gp-dbid', '123', '-Fp',
"-T$realTsDir=$tempdir/tbackup/tbl\\=spc2",
],
'mapping tablespace with = sign in path');
@@ -657,23 +557,13 @@ mkdir "$tempdir/$superlongname";
$realTsDir = "$real_sys_tempdir/$superlongname";
$node->safe_psql('postgres',
"CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';");
-<<<<<<< HEAD
# skip test since gpdb doesn't support tar file for output
-#$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/tarbackup_l3",
'--target-gp-dbid', '123', '-Ft' ],
+#$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3",
'--target-gp-dbid', '123', '-Ft' ],
# 'pg_basebackup tar with long symlink target');
$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
rmtree("$tempdir/tarbackup_l3");
-$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backupR",
'--target-gp-dbid', '123', '-R' ],
-=======
-$node->command_ok(
- [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ],
- 'pg_basebackup tar with long symlink target');
-$node->safe_psql('postgres', "DROP TABLESPACE tblspc3;");
-rmtree("$tempdir/tarbackup_l3");
-
-$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR", '-R' ],
->>>>>>> REL_16_9
+$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR",
'--target-gp-dbid', '123', '-R' ],
'pg_basebackup -R runs');
ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists');
ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created');
@@ -687,45 +577,26 @@ like(
'postgresql.auto.conf sets primary_conninfo');
$node->command_ok(
-<<<<<<< HEAD
- [ 'pg_basebackup', '-D', "$tempdir/backupxd", '--target-gp-dbid', '123'
],
-=======
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxd" ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '-D', "$tempdir/backupxd", '--target-gp-dbid',
'123' ],
'pg_basebackup runs in default xlog mode');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxd");
$node->command_ok(
-<<<<<<< HEAD
- [ 'pg_basebackup', '-D', "$tempdir/backupxf", '--target-gp-dbid',
'123', '-X', 'fetch' ],
-=======
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '-X', 'fetch' ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '--target-gp-dbid',
'123', '-X', 'fetch' ],
'pg_basebackup -X fetch runs');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxf/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxf");
$node->command_ok(
-<<<<<<< HEAD
- [ 'pg_basebackup', '-D', "$tempdir/backupxs", '--target-gp-dbid',
'123', '-X', 'stream' ],
-=======
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs", '-X', 'stream' ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '-D', "$tempdir/backupxs", '--target-gp-dbid',
'123', '-X', 'stream' ],
'pg_basebackup -X stream runs');
ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")),
'WAL files copied');
rmtree("$tempdir/backupxs");
$node->command_ok(
-<<<<<<< HEAD
- [ 'pg_basebackup', '-D', "$tempdir/backupxst", '--target-gp-dbid',
'123', '-X', 'stream', '-Ft' ],
-=======
- [
- @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream',
- '-Ft'
- ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '-D', "$tempdir/backupxst", '--target-gp-dbid',
'123', '-X', 'stream', '-Ft' ],
'pg_basebackup -X stream runs in tar mode');
ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created");
rmtree("$tempdir/backupxst");
@@ -733,12 +604,8 @@ $node->command_ok(
[
@pg_basebackup_defs, '-D',
"$tempdir/backupnoslot", '-X',
-<<<<<<< HEAD
- 'stream', '--no-slot',
- '--target-gp-dbid', '123',
-=======
- 'stream', '--no-slot'
->>>>>>> REL_16_9
+ 'stream', '--no-slot',
+ '--target-gp-dbid', '123'
],
'pg_basebackup -X stream runs with --no-slot');
rmtree("$tempdir/backupnoslot");
@@ -800,35 +667,23 @@ $node->command_fails(
[
@pg_basebackup_defs, '-D',
"$tempdir/backupxs_sl_fail", '-X',
-<<<<<<< HEAD
'stream', '-S',
'slot0',
'--target-gp-dbid', '123',
-=======
- 'stream', '-S',
- 'slot0'
->>>>>>> REL_16_9
],
'pg_basebackup fails with nonexistent replication slot');
$node->command_fails(
-<<<<<<< HEAD
- [ 'pg_basebackup', '--target-gp-dbid', '123', '-D',
"$tempdir/backupxs_slot", '-C' ],
-=======
- [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C' ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '--target-gp-dbid', '123', '-D',
"$tempdir/backupxs_slot", '-C' ],
'pg_basebackup -C fails without slot name');
$node->command_fails(
[
@pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot", '-C',
-<<<<<<< HEAD
- '-S', 'slot0',
- '--target-gp-dbid', '123',
-=======
'-S', 'slot0',
- '--no-slot'
+ '--no-slot',
+ '--target-gp-dbid', '123',
],
'pg_basebackup fails with -C -S --no-slot');
$node->command_fails_like(
@@ -861,21 +716,12 @@ $node->command_fails(
@pg_basebackup_defs, '-D',
"$tempdir/backupxs_slot", '-C',
'-S', 'slot0',
->>>>>>> REL_16_9
'--no-slot'
],
'pg_basebackup fails with -C -S --no-slot');
$node->command_ok(
-<<<<<<< HEAD
- [ 'pg_basebackup', '--target-gp-dbid', '123', '-D',
"$tempdir/backupxs_slot", '-C', '-S', 'slot0' ],
-=======
- [
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot", '-C',
- '-S', 'slot0'
- ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '--target-gp-dbid', '123', '-D',
"$tempdir/backupxs_slot", '-C', '-S', 'slot0' ],
'pg_basebackup -C runs');
rmtree("$tempdir/backupxs_slot");
@@ -894,15 +740,7 @@ isnt(
'restart LSN of new slot is not null');
$node->command_fails(
-<<<<<<< HEAD
- [ 'pg_basebackup', '--target-gp-dbid', '123', '-D',
"$tempdir/backupxs_slot1", '-v', '-C', '-S', 'slot0' ],
-=======
- [
- @pg_basebackup_defs, '-D',
- "$tempdir/backupxs_slot1", '-C',
- '-S', 'slot0'
- ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '--target-gp-dbid', '123', '-D',
"$tempdir/backupxs_slot1", '-v', '-C', '-S', 'slot0' ],
'pg_basebackup fails with -C -S and a previously existing slot');
$node->safe_psql('postgres',
@@ -912,24 +750,12 @@ my $lsn = $node->safe_psql('postgres',
);
is($lsn, '', 'restart LSN of new slot is null');
$node->command_fails(
-<<<<<<< HEAD
- [ 'pg_basebackup', '--target-gp-dbid', '123', '-D', "$tempdir/fail",
'-S', 'slot1', '-X', 'none' ],
+ [ @pg_basebackup_defs, '--target-gp-dbid', '123', '-D',
"$tempdir/fail", '-S', 'slot1', '-X', 'none' ],
'pg_basebackup with replication slot fails without WAL streaming');
$node->command_ok(
[
- 'pg_basebackup', '-D', "$tempdir/backupxs_sl",
'--target-gp-dbid', '123', '-X',
+ @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl",
'--target-gp-dbid', '123', '-X',
'stream', '-S', 'slot1'
-=======
- [
- @pg_basebackup_defs, '-D', "$tempdir/fail", '-S',
- 'slot1', '-X', 'none'
- ],
- 'pg_basebackup with replication slot fails without WAL streaming');
-$node->command_ok(
- [
- @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl", '-X',
- 'stream', '-S', 'slot1'
->>>>>>> REL_16_9
],
'pg_basebackup -X stream with replication slot runs');
$lsn = $node->safe_psql('postgres',
@@ -940,14 +766,9 @@ rmtree("$tempdir/backupxs_sl");
$node->command_ok(
[
-<<<<<<< HEAD
- 'pg_basebackup', '-D', "$tempdir/backupxs_sl_R", '-X',
+ @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X',
'stream', '-S', 'slot1', '-R',
'--target-gp-dbid', '123'
-=======
- @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X',
- 'stream', '-S', 'slot1', '-R',
->>>>>>> REL_16_9
],
'pg_basebackup with replication slot and -R runs');
like(
@@ -976,11 +797,7 @@ $node->corrupt_page_checksum($file_corrupt1, 0);
$node->start;
$node->command_checks_all(
-<<<<<<< HEAD
- [ 'pg_basebackup', '--target-gp-dbid', '123', '-D',
"$tempdir/backup_corrupt" ],
-=======
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt" ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '--target-gp-dbid', '123', '-D',
"$tempdir/backup_corrupt" ],
1,
[qr{^$}],
[qr/^WARNING.*checksum verification failed/s],
@@ -996,11 +813,7 @@ for my $i (1 .. 5)
$node->start;
$node->command_checks_all(
-<<<<<<< HEAD
- [ 'pg_basebackup', '--target-gp-dbid', '123', '-D',
"$tempdir/backup_corrupt2" ],
-=======
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt2" ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '--target-gp-dbid', '123', '-D',
"$tempdir/backup_corrupt2" ],
1,
[qr{^$}],
[qr/^WARNING.*further.*failures.*will.not.be.reported/s],
@@ -1013,11 +826,7 @@ $node->corrupt_page_checksum($file_corrupt2, 0);
$node->start;
$node->command_checks_all(
-<<<<<<< HEAD
- [ 'pg_basebackup', '--target-gp-dbid', '123', '-D',
"$tempdir/backup_corrupt3" ],
-=======
- [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt3" ],
->>>>>>> REL_16_9
+ [ @pg_basebackup_defs, '--target-gp-dbid', '123', '-D',
"$tempdir/backup_corrupt3" ],
1,
[qr{^$}],
[qr/^WARNING.*7 total checksum verification failures/s],
@@ -1027,13 +836,8 @@ rmtree("$tempdir/backup_corrupt3");
# do not verify checksums, should return ok
$node->command_ok(
[
-<<<<<<< HEAD
- 'pg_basebackup', '--target-gp-dbid', '123', '-D',
+ @pg_basebackup_defs, '--target-gp-dbid', '123', '-D',
"$tempdir/backup_corrupt4", '--no-verify-checksums'
-=======
- @pg_basebackup_defs, '-D',
- "$tempdir/backup_corrupt4", '--no-verify-checksums',
->>>>>>> REL_16_9
],
'pg_basebackup with -k does not report checksum mismatch');
rmtree("$tempdir/backup_corrupt4");
@@ -1041,117 +845,13 @@ rmtree("$tempdir/backup_corrupt4");
$node->safe_psql('postgres', "DROP TABLE corrupt1;");
$node->safe_psql('postgres', "DROP TABLE corrupt2;");
-<<<<<<< HEAD
-# Some additional GPDB tests
-my $twenty_characters = '11111111112222222222';
-my $longer_tempdir =
"$tempdir/some_long_directory_path_$twenty_characters$twenty_characters$twenty_characters$twenty_characters$twenty_characters";
-my $some_backup_dir = "$tempdir/backup_dir";
-my $some_other_backup_dir = "$tempdir/other_backup_dir";
-
-mkdir "$longer_tempdir";
-mkdir "$some_backup_dir";
-$node->psql('postgres', "CREATE TABLESPACE too_long_tablespace LOCATION
'$longer_tempdir';");
-$node->command_checks_all(
- [ 'pg_basebackup', '-D', "$some_backup_dir", '--target-gp-dbid', '99'],
- 1,
- [qr{^$}],
- [qr/symbolic link ".*" target is too long and will not be added to the
backup/],
- 'basebackup with a tablespace that has a very long location should
error out with target is too long.');
-
-mkdir "$some_other_backup_dir";
-$node->command_checks_all(
- ['pg_basebackup', '-D', "$some_other_backup_dir", '--target-gp-dbid',
'99'],
- 1,
- [qr{^$}],
- [qr/The symbolic link with target ".*" is too long. Symlink targets
with length greater than 100 characters would be truncated./],
- 'basebackup with a tablespace that has a very long location should
error out link not added to the backup.');
-
-$node->command_checks_all(
- ['ls', "$some_other_backup_dir/pg_tblspc/*"],
- 2,
- [qr{^$}],
- [qr/No such file/],
- 'tablespace directory should be empty');
-
-$node->psql('postgres', "DROP TABLESPACE too_long_tablespace;");
-
-#
-# GPDB: Exclude some files with the --exclude-from option
-#
-
-my $exclude_tempdir = "$tempdir/backup_exclude";
-my $excludelist = "$tempdir/exclude.list";
-
-mkdir "$exclude_tempdir";
-mkdir "$pgdata/exclude";
-
-open EXCLUDELIST, ">$excludelist";
-
-# Put a large amount of non-exist patterns in the exclude-from file,
-# the pattern matching is efficient enough to handle them.
-for my $i (1..1000000) {
- print EXCLUDELIST "./exclude/non_exist.$i\n";
-}
-
-# Create some files to exclude
-for my $i (1..1000) {
- print EXCLUDELIST "./exclude/$i\n";
-
- open FILE, ">$pgdata/exclude/$i";
- close FILE;
-}
-
-# Below file should not be excluded
-open FILE, ">$pgdata/exclude/keep";
-close FILE;
-
-close EXCLUDELIST;
-
-$node->command_ok(
- [ 'pg_basebackup',
- '-D', "$exclude_tempdir",
- '--target-gp-dbid', '123',
- '--exclude-from', "$excludelist" ],
- 'pg_basebackup runs with exclude-from file');
-ok(! -f "$exclude_tempdir/exclude/0", 'excluded files were not created');
-ok(-f "$exclude_tempdir/exclude/keep", 'other files were created');
-
-# GPDB: Exclude gpbackup default directory
-my $gpbackup_test_dir = "$tempdir/gpbackup_test_dir";
-mkdir "$pgdata/backups";
-append_to_file("$pgdata/backups/random_backup_file", "some random backup
data");
-
-$node->command_ok([ 'pg_basebackup', '-D', $gpbackup_test_dir,
'--target-gp-dbid', '123' ],
- 'pg_basebackup does not copy over \'backups/\' directory created by
gpbackup');
-
-ok(! -d "$gpbackup_test_dir/backups", 'gpbackup default backup directory
should be excluded');
-rmtree($gpbackup_test_dir);
-
-#GPDB: write config files only
-mkdir("$tempdir/backup");
-
-$node->command_fails([ 'pg_basebackup', '-D', "$tempdir/backup",
'--target-gp-dbid', '123',
- '--write-conf-files-only', '--create-slot',
'--slot', "wal_replication_slot"],
- 'pg_basebackup --write-conf-files-only fails with
--create_slot');
-
-$node->command_fails([ 'pg_basebackup', '-D', "$tempdir/backup",
'--target-gp-dbid', '123',
- '--write-conf-files-only', '--write-recovery-conf' ],
- 'pg_basebackup --write-conf-files-only fails with
--write-recovery-conf');
-
-$node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backup",
'--target-gp-dbid', '123', '--write-conf-files-only' ],
- 'pg_basebackup runs with write-conf-files-only');
-ok(-f "$tempdir/backup/internal.auto.conf", 'internal.auto.conf was created');
-ok(-f "$tempdir/backup/postgresql.auto.conf", 'postgresql.auto.conf was
created');
-ok(-f "$tempdir/backup/standby.signal", 'standby.signal was created');
-rmtree("$tempdir/backup");
-=======
note "Testing pg_basebackup with compression methods";
# Check ZLIB compression if available.
SKIP:
{
skip "postgres was not built with ZLIB support", 7
- if (!check_pg_config("#define HAVE_LIBZ 1"));
+ if (!check_pg_config("#define HAVE_LIBZ 1"));
$node->command_ok(
[
@@ -1194,11 +894,11 @@ SKIP:
# Check the integrity of the files generated.
my $gzip = $ENV{GZIP_PROGRAM};
skip "program gzip is not found in your system", 1
- if (!defined $gzip
- || $gzip eq '');
+ if (!defined $gzip
+ || $gzip eq '');
my $gzip_is_valid =
- system_log($gzip, '--test', @zlib_files, @zlib_files2, @zlib_files3);
+ system_log($gzip, '--test', @zlib_files, @zlib_files2,
@zlib_files3);
is($gzip_is_valid, 0, "gzip verified the integrity of compressed data");
rmtree("$tempdir/backup_gzip");
rmtree("$tempdir/backup_gzip2");
@@ -1213,7 +913,7 @@ $node->safe_psql('postgres',
q{CREATE TABLE t AS SELECT a FROM generate_series(1,10000) AS a;});
my $sigchld_bb_timeout =
- IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default);
+ IPC::Run::timer($PostgreSQL::Test::Utils::timeout_default);
my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', '');
my $sigchld_bb = IPC::Run::start(
[
@@ -1231,16 +931,16 @@ my $sigchld_bb = IPC::Run::start(
$sigchld_bb_timeout);
is( $node->poll_query_until(
- 'postgres',
- "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE "
- . "application_name = '010_pg_basebackup.pl' AND wait_event =
'WalSenderMain' "
- . "AND backend_type = 'walsender' AND query ~
'START_REPLICATION'"),
+ 'postgres',
+ "SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE "
+ . "application_name = '010_pg_basebackup.pl' AND wait_event =
'WalSenderMain' "
+ . "AND backend_type = 'walsender' AND query ~
'START_REPLICATION'"),
"1",
"Walsender killed");
ok( pump_until(
- $sigchld_bb, $sigchld_bb_timeout,
- \$sigchld_bb_stderr, qr/background process terminated
unexpectedly/),
+ $sigchld_bb, $sigchld_bb_timeout,
+ \$sigchld_bb_stderr, qr/background process terminated unexpectedly/),
'background process exit message');
$sigchld_bb->finish();
@@ -1249,8 +949,8 @@ $node->safe_psql('postgres',
"SET allow_in_place_tablespaces = on; CREATE TABLESPACE tblspc2
LOCATION '';"
);
$node->safe_psql('postgres',
- "CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
- . "INSERT INTO test2 VALUES (1234);");
+ "CREATE TABLE test2 (a int) TABLESPACE tblspc2;"
+ . "INSERT INTO test2 VALUES (1234);");
my $tblspc_oid = $node->safe_psql('postgres',
"SELECT oid FROM pg_tablespace WHERE spcname = 'tblspc2';");
$node->backup('backup3');
@@ -1263,4 +963,106 @@ my @dst_tblspc = glob
"$backupdir/pg_tblspc/$tblspc_oid/PG_*";
is(@dst_tblspc, 1, 'tblspc directory copied');
done_testing();
->>>>>>> REL_16_9
+
+# Some additional GPDB tests
+my $twenty_characters = '11111111112222222222';
+my $longer_tempdir =
"$tempdir/some_long_directory_path_$twenty_characters$twenty_characters$twenty_characters$twenty_characters$twenty_characters";
+my $some_backup_dir = "$tempdir/backup_dir";
+my $some_other_backup_dir = "$tempdir/other_backup_dir";
+
+mkdir "$longer_tempdir";
+mkdir "$some_backup_dir";
+$node->psql('postgres', "CREATE TABLESPACE too_long_tablespace LOCATION
'$longer_tempdir';");
+$node->command_checks_all(
+ [ @pg_basebackup_defs, '-D', "$some_backup_dir", '--target-gp-dbid',
'99'],
+ 1,
+ [qr{^$}],
+ [qr/symbolic link ".*" target is too long and will not be added to the
backup/],
+ 'basebackup with a tablespace that has a very long location should
error out with target is too long.');
+
+mkdir "$some_other_backup_dir";
+$node->command_checks_all(
+ [@pg_basebackup_defs, '-D', "$some_other_backup_dir",
'--target-gp-dbid', '99'],
+ 1,
+ [qr{^$}],
+ [qr/The symbolic link with target ".*" is too long. Symlink targets
with length greater than 100 characters would be truncated./],
+ 'basebackup with a tablespace that has a very long location should
error out link not added to the backup.');
+
+$node->command_checks_all(
+ ['ls', "$some_other_backup_dir/pg_tblspc/*"],
+ 2,
+ [qr{^$}],
+ [qr/No such file/],
+ 'tablespace directory should be empty');
+
+$node->psql('postgres', "DROP TABLESPACE too_long_tablespace;");
+
+#
+# GPDB: Exclude some files with the --exclude-from option
+#
+
+my $exclude_tempdir = "$tempdir/backup_exclude";
+my $excludelist = "$tempdir/exclude.list";
+
+mkdir "$exclude_tempdir";
+mkdir "$pgdata/exclude";
+
+open EXCLUDELIST, ">$excludelist";
+
+# Put a large amount of non-exist patterns in the exclude-from file,
+# the pattern matching is efficient enough to handle them.
+for my $i (1..1000000) {
+ print EXCLUDELIST "./exclude/non_exist.$i\n";
+}
+
+# Create some files to exclude
+for my $i (1..1000) {
+ print EXCLUDELIST "./exclude/$i\n";
+
+ open FILE, ">$pgdata/exclude/$i";
+ close FILE;
+}
+
+# Below file should not be excluded
+open FILE, ">$pgdata/exclude/keep";
+close FILE;
+
+close EXCLUDELIST;
+
+$node->command_ok(
+ [ @pg_basebackup_defs,
+ '-D', "$exclude_tempdir",
+ '--target-gp-dbid', '123',
+ '--exclude-from', "$excludelist" ],
+ 'pg_basebackup runs with exclude-from file');
+ok(! -f "$exclude_tempdir/exclude/0", 'excluded files were not created');
+ok(-f "$exclude_tempdir/exclude/keep", 'other files were created');
+
+# GPDB: Exclude gpbackup default directory
+my $gpbackup_test_dir = "$tempdir/gpbackup_test_dir";
+mkdir "$pgdata/backups";
+append_to_file("$pgdata/backups/random_backup_file", "some random backup
data");
+
+$node->command_ok([ @pg_basebackup_defs, '-D', $gpbackup_test_dir,
'--target-gp-dbid', '123' ],
+ 'pg_basebackup does not copy over \'backups/\' directory created by
gpbackup');
+
+ok(! -d "$gpbackup_test_dir/backups", 'gpbackup default backup directory
should be excluded');
+rmtree($gpbackup_test_dir);
+
+#GPDB: write config files only
+mkdir("$tempdir/backup");
+
+$node->command_fails([ @pg_basebackup_defs, '-D', "$tempdir/backup",
'--target-gp-dbid', '123',
+ '--write-conf-files-only', '--create-slot',
'--slot', "wal_replication_slot"],
+ 'pg_basebackup --write-conf-files-only fails with
--create_slot');
+
+$node->command_fails([ @pg_basebackup_defs, '-D', "$tempdir/backup",
'--target-gp-dbid', '123',
+ '--write-conf-files-only', '--write-recovery-conf' ],
+ 'pg_basebackup --write-conf-files-only fails with
--write-recovery-conf');
+
+$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backup",
'--target-gp-dbid', '123', '--write-conf-files-only' ],
+ 'pg_basebackup runs with write-conf-files-only');
+ok(-f "$tempdir/backup/internal.auto.conf", 'internal.auto.conf was created');
+ok(-f "$tempdir/backup/postgresql.auto.conf", 'postgresql.auto.conf was
created');
+ok(-f "$tempdir/backup/standby.signal", 'standby.signal was created');
+rmtree("$tempdir/backup");
diff --git a/src/bin/pg_basebackup/walmethods.c
b/src/bin/pg_basebackup/walmethods.c
index 403d6274389..c4685f8c147 100644
--- a/src/bin/pg_basebackup/walmethods.c
+++ b/src/bin/pg_basebackup/walmethods.c
@@ -74,13 +74,6 @@ typedef struct DirectoryMethodData
{
WalWriteMethod base;
char *basedir;
-<<<<<<< HEAD
- int compression;
- bool sync;
- const char *lasterrstring; /* if set, takes precedence over
lasterrno */
- int lasterrno;
-=======
->>>>>>> REL_16_9
} DirectoryMethodData;
/*
@@ -102,28 +95,6 @@ typedef struct DirectoryMethodFile
#endif
} DirectoryMethodFile;
-<<<<<<< HEAD
-#define dir_clear_error() \
- (dir_data->lasterrstring = NULL, dir_data->lasterrno = 0)
-#define dir_set_error(msg) \
- (dir_data->lasterrstring = _(msg))
-
-static const char *
-dir_getlasterror(void)
-{
- if (dir_data->lasterrstring)
- return dir_data->lasterrstring;
- return strerror(dir_data->lasterrno);
-}
-
-static char *
-dir_get_file_name(const char *pathname, const char *temp_suffix)
-{
- char *filename = pg_malloc0(MAXPGPATH * sizeof(char));
-
- snprintf(filename, MAXPGPATH, "%s%s%s",
- pathname, dir_data->compression > 0 ? ".gz" : "",
-=======
#define clear_error(wwmethod) \
((wwmethod)->lasterrstring = NULL, (wwmethod)->lasterrno = 0)
@@ -137,7 +108,6 @@ dir_get_file_name(WalWriteMethod *wwmethod,
pathname,
wwmethod->compression_algorithm == PG_COMPRESSION_GZIP
? ".gz" :
wwmethod->compression_algorithm == PG_COMPRESSION_LZ4
? ".lz4" : "",
->>>>>>> REL_16_9
temp_suffix ? temp_suffix : "");
return filename;
@@ -147,10 +117,7 @@ static Walfile *
dir_open_for_write(WalWriteMethod *wwmethod, const char *pathname,
const char *temp_suffix, size_t pad_to_size)
{
-<<<<<<< HEAD
-=======
DirectoryMethodData *dir_data = (DirectoryMethodData *) wwmethod;
->>>>>>> REL_16_9
char tmppath[MAXPGPATH];
char *filename;
int fd;
@@ -164,15 +131,9 @@ dir_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
void *lz4buf = NULL;
#endif
-<<<<<<< HEAD
- dir_clear_error();
-
- filename = dir_get_file_name(pathname, temp_suffix);
-=======
clear_error(wwmethod);
filename = dir_get_file_name(wwmethod, pathname, temp_suffix);
->>>>>>> REL_16_9
snprintf(tmppath, sizeof(tmppath), "%s/%s",
dir_data->basedir, filename);
pg_free(filename);
@@ -186,11 +147,7 @@ dir_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
fd = open(tmppath, O_WRONLY | O_CREAT | PG_BINARY, pg_file_create_mode);
if (fd < 0)
{
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return NULL;
}
@@ -200,11 +157,7 @@ dir_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
gzfp = gzdopen(fd, "wb");
if (gzfp == NULL)
{
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
close(fd);
return NULL;
}
@@ -212,11 +165,7 @@ dir_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
if (gzsetparams(gzfp, wwmethod->compression_level,
Z_DEFAULT_STRATEGY) != Z_OK)
{
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
gzclose(gzfp);
return NULL;
}
@@ -277,20 +226,9 @@ dir_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
if (rc < 0)
{
-<<<<<<< HEAD
- errno = 0;
- if (write(fd, zerobuf.data, XLOG_BLCKSZ) != XLOG_BLCKSZ)
- {
- /* If write didn't set errno, assume problem is
no disk space */
- dir_data->lasterrno = errno ? errno : ENOSPC;
- close(fd);
- return NULL;
- }
-=======
wwmethod->lasterrno = errno;
close(fd);
return NULL;
->>>>>>> REL_16_9
}
/*
@@ -299,11 +237,7 @@ dir_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
*/
if (lseek(fd, 0, SEEK_SET) != 0)
{
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
close(fd);
return NULL;
}
@@ -320,11 +254,7 @@ dir_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
if (fsync_fname(tmppath, false) != 0 ||
fsync_parent_path(tmppath) != 0)
{
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
#ifdef HAVE_LIBZ
if (wwmethod->compression_algorithm ==
PG_COMPRESSION_GZIP)
gzclose(gzfp);
@@ -377,34 +307,21 @@ dir_write(Walfile *f, const void *buf, size_t count)
DirectoryMethodFile *df = (DirectoryMethodFile *) f;
Assert(f != NULL);
-<<<<<<< HEAD
- dir_clear_error();
-
-#ifdef HAVE_LIBZ
- if (dir_data->compression > 0)
-=======
clear_error(f->wwmethod);
#ifdef HAVE_LIBZ
if (f->wwmethod->compression_algorithm == PG_COMPRESSION_GZIP)
->>>>>>> REL_16_9
{
errno = 0;
r = (ssize_t) gzwrite(df->gzfp, buf, count);
if (r != count)
{
/* If write didn't set errno, assume problem is no disk
space */
-<<<<<<< HEAD
- dir_data->lasterrno = errno ? errno : ENOSPC;
-=======
f->wwmethod->lasterrno = errno ? errno : ENOSPC;
->>>>>>> REL_16_9
}
}
else
#endif
-<<<<<<< HEAD
-=======
#ifdef USE_LZ4
if (f->wwmethod->compression_algorithm == PG_COMPRESSION_LZ4)
{
@@ -450,18 +367,13 @@ dir_write(Walfile *f, const void *buf, size_t count)
}
else
#endif
->>>>>>> REL_16_9
{
errno = 0;
r = write(df->fd, buf, count);
if (r != count)
{
/* If write didn't set errno, assume problem is no disk
space */
-<<<<<<< HEAD
- dir_data->lasterrno = errno ? errno : ENOSPC;
-=======
f->wwmethod->lasterrno = errno ? errno : ENOSPC;
->>>>>>> REL_16_9
}
}
if (r > 0)
@@ -469,49 +381,24 @@ dir_write(Walfile *f, const void *buf, size_t count)
return r;
}
-<<<<<<< HEAD
-static off_t
-dir_get_current_pos(Walfile f)
-{
- Assert(f != NULL);
- dir_clear_error();
-
- /* Use a cached value to prevent lots of reseeks */
- return ((DirectoryMethodFile *) f)->currpos;
-}
-
-=======
->>>>>>> REL_16_9
static int
dir_close(Walfile *f, WalCloseMethod method)
{
int r;
DirectoryMethodFile *df = (DirectoryMethodFile *) f;
-<<<<<<< HEAD
-=======
DirectoryMethodData *dir_data = (DirectoryMethodData *) f->wwmethod;
->>>>>>> REL_16_9
char tmppath[MAXPGPATH];
char tmppath2[MAXPGPATH];
Assert(f != NULL);
-<<<<<<< HEAD
- dir_clear_error();
-
-#ifdef HAVE_LIBZ
- if (dir_data->compression > 0)
-=======
clear_error(f->wwmethod);
#ifdef HAVE_LIBZ
if (f->wwmethod->compression_algorithm == PG_COMPRESSION_GZIP)
->>>>>>> REL_16_9
{
errno = 0; /* in case gzclose()
doesn't set it */
r = gzclose(df->gzfp);
}
-<<<<<<< HEAD
-=======
else
#endif
#ifdef USE_LZ4
@@ -539,7 +426,6 @@ dir_close(Walfile *f, WalCloseMethod method)
r = close(df->fd);
}
->>>>>>> REL_16_9
else
#endif
r = close(df->fd);
@@ -556,24 +442,13 @@ dir_close(Walfile *f, WalCloseMethod method)
* If we have a temp prefix, normal operation is to
rename the
* file.
*/
-<<<<<<< HEAD
- filename = dir_get_file_name(df->pathname,
df->temp_suffix);
-=======
filename = dir_get_file_name(f->wwmethod,
df->base.pathname,
df->temp_suffix);
->>>>>>> REL_16_9
snprintf(tmppath, sizeof(tmppath), "%s/%s",
dir_data->basedir, filename);
pg_free(filename);
/* permanent name, so no need for the prefix */
-<<<<<<< HEAD
- filename2 = dir_get_file_name(df->pathname, NULL);
- snprintf(tmppath2, sizeof(tmppath2), "%s/%s",
- dir_data->basedir, filename2);
- pg_free(filename2);
- r = durable_rename(tmppath, tmppath2);
-=======
filename2 = dir_get_file_name(f->wwmethod,
df->base.pathname, NULL);
snprintf(tmppath2, sizeof(tmppath2), "%s/%s",
dir_data->basedir, filename2);
@@ -589,19 +464,14 @@ dir_close(Walfile *f, WalCloseMethod method)
r = -1;
}
}
->>>>>>> REL_16_9
}
else if (method == CLOSE_UNLINK)
{
char *filename;
/* Unlink the file once it's closed */
-<<<<<<< HEAD
- filename = dir_get_file_name(df->pathname,
df->temp_suffix);
-=======
filename = dir_get_file_name(f->wwmethod,
df->base.pathname,
df->temp_suffix);
->>>>>>> REL_16_9
snprintf(tmppath, sizeof(tmppath), "%s/%s",
dir_data->basedir, filename);
pg_free(filename);
@@ -624,11 +494,6 @@ dir_close(Walfile *f, WalCloseMethod method)
}
if (r != 0)
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
-
- pg_free(df->pathname);
-=======
f->wwmethod->lasterrno = errno;
#ifdef USE_LZ4
@@ -638,7 +503,6 @@ dir_close(Walfile *f, WalCloseMethod method)
#endif
pg_free(df->base.pathname);
->>>>>>> REL_16_9
pg_free(df->fullpath);
pg_free(df->temp_suffix);
pg_free(df);
@@ -650,12 +514,6 @@ static int
dir_sync(Walfile *f)
{
int r;
-<<<<<<< HEAD
-
- Assert(f != NULL);
- dir_clear_error();
-=======
->>>>>>> REL_16_9
Assert(f != NULL);
clear_error(f->wwmethod);
@@ -668,11 +526,6 @@ dir_sync(Walfile *f)
{
if (gzflush(((DirectoryMethodFile *) f)->gzfp, Z_SYNC_FLUSH) !=
Z_OK)
{
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
- return -1;
- }
-=======
f->wwmethod->lasterrno = errno;
return -1;
}
@@ -699,17 +552,12 @@ dir_sync(Walfile *f)
f->wwmethod->lasterrno = errno ? errno : ENOSPC;
return -1;
}
->>>>>>> REL_16_9
}
#endif
r = fsync(((DirectoryMethodFile *) f)->fd);
if (r < 0)
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
-=======
f->wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return r;
}
@@ -725,11 +573,7 @@ dir_get_file_size(WalWriteMethod *wwmethod, const char
*pathname)
if (stat(tmppath, &statbuf) != 0)
{
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return -1;
}
@@ -745,18 +589,11 @@ dir_compression(void)
static bool
dir_existsfile(WalWriteMethod *wwmethod, const char *pathname)
{
-<<<<<<< HEAD
- char tmppath[MAXPGPATH];
- int fd;
-
- dir_clear_error();
-=======
DirectoryMethodData *dir_data = (DirectoryMethodData *) wwmethod;
char tmppath[MAXPGPATH];
int fd;
clear_error(wwmethod);
->>>>>>> REL_16_9
snprintf(tmppath, sizeof(tmppath), "%s/%s",
dir_data->basedir, pathname);
@@ -771,15 +608,9 @@ dir_existsfile(WalWriteMethod *wwmethod, const char
*pathname)
static bool
dir_finish(WalWriteMethod *wwmethod)
{
-<<<<<<< HEAD
- dir_clear_error();
-
- if (dir_data->sync)
-=======
clear_error(wwmethod);
if (wwmethod->sync)
->>>>>>> REL_16_9
{
DirectoryMethodData *dir_data = (DirectoryMethodData *)
wwmethod;
@@ -789,11 +620,7 @@ dir_finish(WalWriteMethod *wwmethod)
*/
if (fsync_fname(dir_data->basedir, true) != 0)
{
-<<<<<<< HEAD
- dir_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return false;
}
}
@@ -805,30 +632,8 @@ dir_free(WalWriteMethod *wwmethod)
{
DirectoryMethodData *dir_data = (DirectoryMethodData *) wwmethod;
-<<<<<<< HEAD
- method = pg_malloc0(sizeof(WalWriteMethod));
- method->open_for_write = dir_open_for_write;
- method->write = dir_write;
- method->get_current_pos = dir_get_current_pos;
- method->get_file_size = dir_get_file_size;
- method->get_file_name = dir_get_file_name;
- method->compression = dir_compression;
- method->close = dir_close;
- method->sync = dir_sync;
- method->existsfile = dir_existsfile;
- method->finish = dir_finish;
- method->getlasterror = dir_getlasterror;
-
- dir_data = pg_malloc0(sizeof(DirectoryMethodData));
- dir_data->compression = compression;
- dir_data->basedir = pg_strdup(basedir);
- dir_data->sync = sync;
-
- return method;
-=======
pg_free(dir_data->basedir);
pg_free(wwmethod);
->>>>>>> REL_16_9
}
@@ -837,11 +642,6 @@ CreateWalDirectoryMethod(const char *basedir,
pg_compress_algorithm
compression_algorithm,
int compression_level, bool
sync)
{
-<<<<<<< HEAD
- pg_free(dir_data->basedir);
- pg_free(dir_data);
- dir_data = NULL;
-=======
DirectoryMethodData *wwmethod;
wwmethod = pg_malloc0(sizeof(DirectoryMethodData));
@@ -854,7 +654,6 @@ CreateWalDirectoryMethod(const char *basedir,
wwmethod->basedir = pg_strdup(basedir);
return &wwmethod->base;
->>>>>>> REL_16_9
}
@@ -904,33 +703,11 @@ typedef struct TarMethodData
char *tarfilename;
int fd;
TarMethodFile *currentfile;
-<<<<<<< HEAD
- const char *lasterrstring; /* if set, takes precedence over
lasterrno */
- int lasterrno;
-=======
->>>>>>> REL_16_9
#ifdef HAVE_LIBZ
z_streamp zp;
void *zlibOut;
#endif
} TarMethodData;
-<<<<<<< HEAD
-static TarMethodData *tar_data = NULL;
-
-#define tar_clear_error() \
- (tar_data->lasterrstring = NULL, tar_data->lasterrno = 0)
-#define tar_set_error(msg) \
- (tar_data->lasterrstring = _(msg))
-
-static const char *
-tar_getlasterror(void)
-{
- if (tar_data->lasterrstring)
- return tar_data->lasterrstring;
- return strerror(tar_data->lasterrno);
-}
-=======
->>>>>>> REL_16_9
#ifdef HAVE_LIBZ
static bool
@@ -959,11 +736,7 @@ tar_write_compressed_data(TarMethodData *tar_data, void
*buf, size_t count,
if (write(tar_data->fd, tar_data->zlibOut, len) != len)
{
/* If write didn't set errno, assume problem is
no disk space */
-<<<<<<< HEAD
- tar_data->lasterrno = errno ? errno : ENOSPC;
-=======
tar_data->base.lasterrno = errno ? errno :
ENOSPC;
->>>>>>> REL_16_9
return false;
}
@@ -1006,17 +779,10 @@ tar_write(Walfile *f, const void *buf, size_t count)
if (r != count)
{
/* If write didn't set errno, assume problem is no disk
space */
-<<<<<<< HEAD
- tar_data->lasterrno = errno ? errno : ENOSPC;
- return -1;
- }
- ((TarMethodFile *) f)->currpos += r;
-=======
f->wwmethod->lasterrno = errno ? errno : ENOSPC;
return -1;
}
f->currpos += r;
->>>>>>> REL_16_9
return r;
}
#ifdef HAVE_LIBZ
@@ -1028,16 +794,6 @@ tar_write(Walfile *f, const void *buf, size_t count)
f->currpos += count;
return count;
}
-<<<<<<< HEAD
-#else
- else
- {
- /* Can't happen - compression enabled with no libz */
- tar_data->lasterrno = ENOSYS;
- return -1;
- }
-=======
->>>>>>> REL_16_9
#endif
else
{
@@ -1068,27 +824,10 @@ tar_write_padding_data(TarMethodFile *f, size_t bytes)
}
static char *
-<<<<<<< HEAD
-tar_get_file_name(const char *pathname, const char *temp_suffix)
-{
- char *filename = pg_malloc0(MAXPGPATH * sizeof(char));
-
- snprintf(filename, MAXPGPATH, "%s%s",
- pathname, temp_suffix ? temp_suffix : "");
-
- return filename;
-}
-
-static Walfile
-tar_open_for_write(const char *pathname, const char *temp_suffix, size_t
pad_to_size)
-{
- char *tmppath;
-=======
tar_get_file_name(WalWriteMethod *wwmethod, const char *pathname,
const char *temp_suffix)
{
char *filename = pg_malloc0(MAXPGPATH * sizeof(char));
->>>>>>> REL_16_9
snprintf(filename, MAXPGPATH, "%s%s",
pathname, temp_suffix ? temp_suffix : "");
@@ -1115,11 +854,7 @@ tar_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
pg_file_create_mode);
if (tar_data->fd < 0)
{
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return NULL;
}
@@ -1163,11 +898,7 @@ tar_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
tar_data->currentfile = pg_malloc0(sizeof(TarMethodFile));
tar_data->currentfile->base.wwmethod = wwmethod;
-<<<<<<< HEAD
- tmppath = tar_get_file_name(pathname, temp_suffix);
-=======
tmppath = tar_get_file_name(wwmethod, pathname, temp_suffix);
->>>>>>> REL_16_9
/* Create a header with size set to 0 - we will fill out the size on
close */
if (tarCreateHeader(tar_data->currentfile->header, tmppath, NULL, 0,
S_IRUSR | S_IWUSR, 0, 0, time(NULL)) != TAR_OK)
@@ -1201,11 +932,7 @@ tar_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
tar_data->currentfile->ofs_start = lseek(tar_data->fd, 0, SEEK_CUR);
if (tar_data->currentfile->ofs_start == -1)
{
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
pg_free(tar_data->currentfile);
tar_data->currentfile = NULL;
return NULL;
@@ -1219,11 +946,7 @@ tar_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
TAR_BLOCK_SIZE) != TAR_BLOCK_SIZE)
{
/* If write didn't set errno, assume problem is no disk
space */
-<<<<<<< HEAD
- tar_data->lasterrno = errno ? errno : ENOSPC;
-=======
wwmethod->lasterrno = errno ? errno : ENOSPC;
->>>>>>> REL_16_9
pg_free(tar_data->currentfile);
tar_data->currentfile = NULL;
return NULL;
@@ -1271,11 +994,7 @@ tar_open_for_write(WalWriteMethod *wwmethod, const char
*pathname,
tar_data->currentfile->ofs_start +
TAR_BLOCK_SIZE,
SEEK_SET) !=
tar_data->currentfile->ofs_start + TAR_BLOCK_SIZE)
{
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return NULL;
}
@@ -1292,44 +1011,15 @@ tar_get_file_size(WalWriteMethod *wwmethod, const char
*pathname)
clear_error(wwmethod);
/* Currently not used, so not supported */
-<<<<<<< HEAD
- tar_data->lasterrno = ENOSYS;
- return -1;
-}
-
-static int
-tar_compression(void)
-{
- return tar_data->compression;
-}
-
-static off_t
-tar_get_current_pos(Walfile f)
-{
- Assert(f != NULL);
- tar_clear_error();
-
- return ((TarMethodFile *) f)->currpos;
-}
-
-=======
wwmethod->lasterrno = ENOSYS;
return -1;
}
->>>>>>> REL_16_9
static int
tar_sync(Walfile *f)
{
-<<<<<<< HEAD
- int r;
-
- Assert(f != NULL);
- tar_clear_error();
-=======
TarMethodData *tar_data = (TarMethodData *) f->wwmethod;
int r;
->>>>>>> REL_16_9
Assert(f != NULL);
clear_error(f->wwmethod);
@@ -1346,11 +1036,7 @@ tar_sync(Walfile *f)
r = fsync(tar_data->fd);
if (r < 0)
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
-=======
f->wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return r;
}
@@ -1380,11 +1066,7 @@ tar_close(Walfile *f, WalCloseMethod method)
*/
if (ftruncate(tar_data->fd, tf->ofs_start) != 0)
{
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
-=======
f->wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return -1;
}
@@ -1445,11 +1127,7 @@ tar_close(Walfile *f, WalCloseMethod method)
if (f->wwmethod->compression_algorithm == PG_COMPRESSION_GZIP)
{
/* Flush the current buffer */
-<<<<<<< HEAD
- if (!tar_write_compressed_data(NULL, 0, true))
-=======
if (!tar_write_compressed_data(tar_data, NULL, 0, true))
->>>>>>> REL_16_9
return -1;
}
#endif
@@ -1472,27 +1150,16 @@ tar_close(Walfile *f, WalCloseMethod method)
print_tar_number(&(tf->header[148]), 8, tarChecksum(((TarMethodFile *)
f)->header));
if (lseek(tar_data->fd, tf->ofs_start, SEEK_SET) != ((TarMethodFile *)
f)->ofs_start)
{
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
- return -1;
- }
- if (!tar_data->compression)
-=======
f->wwmethod->lasterrno = errno;
return -1;
}
if (f->wwmethod->compression_algorithm == PG_COMPRESSION_NONE)
->>>>>>> REL_16_9
{
errno = 0;
if (write(tar_data->fd, tf->header, TAR_BLOCK_SIZE) !=
TAR_BLOCK_SIZE)
{
/* If write didn't set errno, assume problem is no disk
space */
-<<<<<<< HEAD
- tar_data->lasterrno = errno ? errno : ENOSPC;
-=======
f->wwmethod->lasterrno = errno ? errno : ENOSPC;
->>>>>>> REL_16_9
return -1;
}
}
@@ -1529,11 +1196,7 @@ tar_close(Walfile *f, WalCloseMethod method)
/* Move file pointer back down to end, so we can write the next file */
if (lseek(tar_data->fd, 0, SEEK_END) < 0)
{
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
-=======
f->wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return -1;
}
@@ -1541,14 +1204,8 @@ tar_close(Walfile *f, WalCloseMethod method)
if (tar_sync(f) < 0)
{
/* XXX this seems pretty bogus; why is only this case fatal? */
-<<<<<<< HEAD
- pg_log_fatal("could not fsync file \"%s\": %s",
- tf->pathname, tar_getlasterror());
- exit(1);
-=======
pg_fatal("could not fsync file \"%s\": %s",
tf->base.pathname,
GetLastWalMethodError(f->wwmethod));
->>>>>>> REL_16_9
}
/* Clean up and done */
@@ -1588,11 +1245,7 @@ tar_finish(WalWriteMethod *wwmethod)
if (write(tar_data->fd, zerobuf, sizeof(zerobuf)) !=
sizeof(zerobuf))
{
/* If write didn't set errno, assume problem is no disk
space */
-<<<<<<< HEAD
- tar_data->lasterrno = errno ? errno : ENOSPC;
-=======
wwmethod->lasterrno = errno ? errno : ENOSPC;
->>>>>>> REL_16_9
return false;
}
}
@@ -1628,11 +1281,7 @@ tar_finish(WalWriteMethod *wwmethod)
* If write didn't set errno, assume
problem is no disk
* space.
*/
-<<<<<<< HEAD
- tar_data->lasterrno = errno ? errno :
ENOSPC;
-=======
wwmethod->lasterrno = errno ? errno :
ENOSPC;
->>>>>>> REL_16_9
return false;
}
}
@@ -1658,22 +1307,14 @@ tar_finish(WalWriteMethod *wwmethod)
{
if (fsync(tar_data->fd) != 0)
{
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return false;
}
}
if (close(tar_data->fd) != 0)
{
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return false;
}
@@ -1684,11 +1325,7 @@ tar_finish(WalWriteMethod *wwmethod)
if (fsync_fname(tar_data->tarfilename, false) != 0 ||
fsync_parent_path(tar_data->tarfilename) != 0)
{
-<<<<<<< HEAD
- tar_data->lasterrno = errno;
-=======
wwmethod->lasterrno = errno;
->>>>>>> REL_16_9
return false;
}
}
@@ -1701,48 +1338,11 @@ tar_free(WalWriteMethod *wwmethod)
{
TarMethodData *tar_data = (TarMethodData *) wwmethod;
-<<<<<<< HEAD
- method = pg_malloc0(sizeof(WalWriteMethod));
- method->open_for_write = tar_open_for_write;
- method->write = tar_write;
- method->get_current_pos = tar_get_current_pos;
- method->get_file_size = tar_get_file_size;
- method->get_file_name = tar_get_file_name;
- method->compression = tar_compression;
- method->close = tar_close;
- method->sync = tar_sync;
- method->existsfile = tar_existsfile;
- method->finish = tar_finish;
- method->getlasterror = tar_getlasterror;
-
- tar_data = pg_malloc0(sizeof(TarMethodData));
- tar_data->tarfilename = pg_malloc0(strlen(tarbase) + strlen(suffix) +
1);
- sprintf(tar_data->tarfilename, "%s%s", tarbase, suffix);
- tar_data->fd = -1;
- tar_data->compression = compression;
- tar_data->sync = sync;
-#ifdef HAVE_LIBZ
- if (compression)
- tar_data->zlibOut = (char *) pg_malloc(ZLIB_OUT_SIZE + 1);
-#endif
-
- return method;
-}
-
-void
-FreeWalTarMethod(void)
-{
-=======
->>>>>>> REL_16_9
pg_free(tar_data->tarfilename);
#ifdef HAVE_LIBZ
if (wwmethod->compression_algorithm == PG_COMPRESSION_GZIP)
pg_free(tar_data->zlibOut);
#endif
-<<<<<<< HEAD
- pg_free(tar_data);
- tar_data = NULL;
-=======
pg_free(wwmethod);
}
@@ -1786,5 +1386,4 @@ GetLastWalMethodError(WalWriteMethod *wwmethod)
if (wwmethod->lasterrstring)
return wwmethod->lasterrstring;
return strerror(wwmethod->lasterrno);
->>>>>>> REL_16_9
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]