This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 913684841d1 Fix conflicts in commands
913684841d1 is described below
commit 913684841d1fae12f7d053800242673dfac58880
Author: Jinbao Chen <[email protected]>
AuthorDate: Sun Jul 20 15:18:44 2025 +0800
Fix conflicts in commands
---
src/backend/commands/analyze.c | 73 +---------
src/backend/commands/collationcmds.c | 40 +-----
src/backend/commands/comment.c | 5 -
src/backend/commands/copyfrom.c | 48 -------
src/backend/commands/copyfromparse.c | 17 ---
src/backend/commands/copyto.c | 89 +------------
src/backend/commands/dbcommands.c | 235 +++------------------------------
src/backend/commands/dropcmds.c | 14 +-
src/backend/commands/explain.c | 45 +------
src/backend/commands/indexcmds.c | 111 ++--------------
src/backend/commands/lockcmds.c | 5 -
src/backend/commands/opclasscmds.c | 4 -
src/backend/commands/prepare.c | 11 +-
src/backend/commands/trigger.c | 36 +----
src/backend/commands/variable.c | 3 -
src/backend/commands/view.c | 10 +-
src/include/commands/copyto_internal.h | 1 +
17 files changed, 52 insertions(+), 695 deletions(-)
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 75b29b58e61..eb2f367c8e2 100644
--- a/src/backend/commands/analyze.c
+++ b/src/backend/commands/analyze.c
@@ -3,7 +3,6 @@
* analyze.c
* the Postgres statistics generator
*
-<<<<<<< HEAD
*
* There are a few things in Cloudberry that make this more complicated
* than in upstream:
@@ -54,10 +53,7 @@
*
* TODO: explain how this works.
*
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -187,17 +183,15 @@ static void compute_index_stats(Relation onerel, double
totalrows,
HeapTuple
*rows, int numrows,
MemoryContext
col_context);
static VacAttrStats *examine_attribute(Relation onerel, int attnum,
-<<<<<<< HEAD
Node
*index_expr, int elevel);
static int acquire_sample_rows_dispatcher(Relation onerel, bool inh, int
elevel,
-=======
- Node
*index_expr);
+
HeapTuple *rows, int targrows,
+
double *totalrows, double *totaldeadrows);
static int acquire_sample_rows(Relation onerel, int elevel,
HeapTuple
*rows, int targrows,
double
*totalrows, double *totaldeadrows);
static int compare_rows(const void *a, const void *b, void *arg);
static int acquire_inherited_sample_rows(Relation onerel, int elevel,
->>>>>>> REL_16_9
HeapTuple *rows, int targrows,
double *totalrows, double *totaldeadrows);
static BlockNumber acquire_index_number_of_blocks(Relation indexrel, Relation
tablerel);
@@ -628,11 +622,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
*/
if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
{
-<<<<<<< HEAD
- List *idxs = RelationGetIndexList(onerel);
-=======
List *idxs = RelationGetIndexList(onerel);
->>>>>>> REL_16_9
Irel = NULL;
nindexes = 0;
@@ -1014,14 +1004,11 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
MemoryContextResetAndDeleteChildren(col_context);
}
-<<<<<<< HEAD
/*
* Datums exceeding WIDTH_THRESHOLD are masked as NULL in the
sample, and
* are used as is to evaluate index statistics. It is less
likely to have
* indexes on very wide columns, so the effect will be minimal.
*/
-=======
->>>>>>> REL_16_9
if (nindexes > 0)
compute_index_stats(onerel, totalrows,
indexdata,
nindexes,
@@ -1047,36 +1034,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
thisdata->attr_cnt,
thisdata->vacattrstats);
}
-<<<<<<< HEAD
- /*
- * Should we build extended statistics for this relation?
- *
- * The extended statistics catalog does not include an
inheritance
- * flag, so we can't store statistics built both with and
without
- * data from child relations. We can store just one set of
statistics
- * per relation. For plain relations that's fine, but for
inheritance
- * trees we have to pick whether to store statistics for just
the
- * one relation or the whole tree. For plain inheritance we
store
- * the (!inh) version, mostly for backwards compatibility
reasons.
- * For partitioned tables that's pointless (the non-leaf tables
are
- * always empty), so we store stats representing the whole tree.
- */
- build_ext_stats = (onerel->rd_rel->relkind ==
RELKIND_PARTITIONED_TABLE) ? inh : (!inh);
-
- /*
- * Build extended statistics (if there are any).
- *
- * For now we only build extended statistics on individual
relations,
- * not for relations representing inheritance trees.
- */
- if (build_ext_stats)
- BuildRelationExtStatistics(onerel, totalrows, numrows,
rows,
-
attr_cnt, vacattrstats);
-=======
/* Build extended statistics (if there are any). */
BuildRelationExtStatistics(onerel, inh, totalrows, numrows,
rows,
attr_cnt,
vacattrstats);
->>>>>>> REL_16_9
}
pgstat_progress_update_param(PROGRESS_ANALYZE_PHASE,
@@ -1119,7 +1079,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
hasindex,
InvalidTransactionId,
InvalidMultiXactId,
-<<<<<<< HEAD
+ NULL, NULL,
in_outer_xact,
false /* isVacuum */);
@@ -1152,10 +1112,6 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
}
ReleaseSysCache(aotup);
}
-=======
- NULL, NULL,
- in_outer_xact);
->>>>>>> REL_16_9
/* Same for indexes */
for (ind = 0; ind < nindexes; ind++)
@@ -1195,13 +1151,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
false,
InvalidTransactionId,
InvalidMultiXactId,
-<<<<<<< HEAD
+ NULL, NULL,
in_outer_xact,
false /*
isVacuum */);
-=======
- NULL, NULL,
- in_outer_xact);
->>>>>>> REL_16_9
}
}
else if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
@@ -1214,13 +1166,9 @@ do_analyze_rel(Relation onerel, VacuumParams *params,
vac_update_relstats(onerel, -1, totalrows,
0, hasindex,
InvalidTransactionId,
InvalidMultiXactId,
-<<<<<<< HEAD
+ NULL, NULL,
in_outer_xact,
false /* isVacuum */);
-=======
- NULL, NULL,
- in_outer_xact);
->>>>>>> REL_16_9
}
/*
@@ -1765,7 +1713,6 @@ acquire_sample_rows(Relation onerel, int elevel,
nblocks = BlockSampler_Init(&bs, totalblocks, targrows, randseed);
#ifdef USE_PREFETCH
-<<<<<<< HEAD
/*
* GPDB_14_MERGE_FIXME: pre-fetching is introduced in PG14.
* It's not suitable for AO tables, will get error when PrefetchBuffer
@@ -1783,12 +1730,6 @@ acquire_sample_rows(Relation onerel, int elevel,
if (prefetch_maximum)
(void) BlockSampler_Init(&prefetch_bs, totalblocks,
targrows, randseed);
}
-=======
- prefetch_maximum =
get_tablespace_maintenance_io_concurrency(onerel->rd_rel->reltablespace);
- /* Create another BlockSampler, using the same seed, for prefetching */
- if (prefetch_maximum)
- (void) BlockSampler_Init(&prefetch_bs, totalblocks, targrows,
randseed);
->>>>>>> REL_16_9
#endif
/* Report sampling block numbers */
@@ -4468,7 +4409,6 @@ compute_scalar_stats(VacAttrStatsP stats,
}
/*
-<<<<<<< HEAD
* merge_leaf_stats() -- merge leaf stats for the root
*
* We use this when we can find "=" and "<" operators for the datatype.
@@ -4975,9 +4915,6 @@ merge_leaf_stats(VacAttrStatsP stats,
/*
* qsort_arg comparator for sorting ScalarItems
-=======
- * Comparator for sorting ScalarItems
->>>>>>> REL_16_9
*
* Aside from sorting the items, we update the tupnoLink[] array
* whenever two ScalarItems are found to contain equal datums. The array
diff --git a/src/backend/commands/collationcmds.c
b/src/backend/commands/collationcmds.c
index fcbabbc904f..19525961573 100644
--- a/src/backend/commands/collationcmds.c
+++ b/src/backend/commands/collationcmds.c
@@ -922,12 +922,6 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
enc = create_collation_from_locale(localebuf, nspid,
&nvalid, &ncreated);
if (enc < 0)
continue;
-<<<<<<< HEAD
- }
- if (!PG_VALID_BE_ENCODING(enc))
- continue; /* ignore locales for
client-only encodings */
- if (enc == PG_SQL_ASCII)
- continue; /* C/POSIX are already
in the catalog */
/*
* Greenplum specific behavior: this function in
Greenplum can only be called after a full cluster is
* built, this is different from Postgres which might
call this function during initdb. When reaching
@@ -935,35 +929,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
* encoding because they cannot be used in this
database.
*/
if (enc != GetDatabaseEncoding())
- continue; /* Ignore collations
incompatible with database encoding */
-
- /* count valid locales found in operating system */
- nvalid++;
-
- /*
- * Create a collation named the same as the locale, but
quietly
- * doing nothing if it already exists. This is the
behavior we
- * need even at initdb time, because some versions of
"locale -a"
- * can report the same locale name more than once. And
it's
- * convenient for later import runs, too, since you
just about
- * always want to add on new locales without a lot of
chatter
- * about existing ones.
- */
- collid = CollationCreate(localebuf, nspid, GetUserId(),
-
COLLPROVIDER_LIBC, true, enc,
-
localebuf, localebuf,
-
get_collation_actual_version(COLLPROVIDER_LIBC, localebuf),
- true,
true);
- if (OidIsValid(collid))
- {
- DispatchCollationCreate(localebuf, localebuf,
nspid, "libc");
- ncreated++;
-
- /* Must do CCI between inserts to handle
duplicates correctly */
- CommandCounterIncrement();
- }
-=======
->>>>>>> REL_16_9
+ continue; /* Ignore collations
incompatible with database encoding */
/*
* Generate aliases such as "en_US" in addition to
"en_US.utf8"
@@ -1063,11 +1029,7 @@ pg_import_system_collations(PG_FUNCTION_ARGS)
const char *name;
char *langtag;
char *icucomment;
-<<<<<<< HEAD
- const char *collcollate;
char *collname;
-=======
->>>>>>> REL_16_9
Oid collid;
if (i == -1)
diff --git a/src/backend/commands/comment.c b/src/backend/commands/comment.c
index 20bfb2b65f3..17e9f5d25f5 100644
--- a/src/backend/commands/comment.c
+++ b/src/backend/commands/comment.c
@@ -102,14 +102,9 @@ CommentObject(CommentStmt *stmt)
relation->rd_rel->relkind !=
RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-<<<<<<< HEAD
- errmsg("\"%s\" is not a table,
directory table, view, materialized view, composite type, or foreign table",
-
RelationGetRelationName(relation))));
-=======
errmsg("cannot set comment on
relation \"%s\"",
RelationGetRelationName(relation)),
errdetail_relkind_not_supported(relation->rd_rel->relkind)));
->>>>>>> REL_16_9
break;
default:
break;
diff --git a/src/backend/commands/copyfrom.c b/src/backend/commands/copyfrom.c
index 0b0eddb44d0..30a1f6c3af6 100644
--- a/src/backend/commands/copyfrom.c
+++ b/src/backend/commands/copyfrom.c
@@ -481,31 +481,7 @@ CopyMultiInsertBufferFlush(CopyMultiInsertInfo *miinfo,
TupleTableSlot **slots = buffer->slots;
int i;
-<<<<<<< HEAD
- /*
- * Print error context information correctly, if one of the operations
- * below fails.
- */
- cstate->line_buf_valid = false;
- save_cur_lineno = cstate->cur_lineno;
-
- /*
- * table_multi_insert may leak memory, so switch to short-lived memory
- * context before calling it.
- */
- oldcontext = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
- table_multi_insert(resultRelInfo->ri_RelationDesc,
- slots,
- nused,
- mycid,
- ti_options,
- buffer->bistate);
- MemoryContextSwitchTo(oldcontext);
-
- for (i = 0; i < nused; i++)
-=======
if (resultRelInfo->ri_FdwRoutine)
->>>>>>> REL_16_9
{
int batch_size =
resultRelInfo->ri_BatchSize;
int sent = 0;
@@ -692,7 +668,6 @@ CopyMultiInsertBufferCleanup(CopyMultiInsertInfo *miinfo,
for (i = 0; i < MAX_BUFFERED_TUPLES && buffer->slots[i] != NULL; i++)
ExecDropSingleTupleTableSlot(buffer->slots[i]);
-<<<<<<< HEAD
if (RelationIsNonblockRelation(buffer->resultRelInfo->ri_RelationDesc))
{
/*
@@ -706,13 +681,9 @@ CopyMultiInsertBufferCleanup(CopyMultiInsertInfo *miinfo,
return;
}
- table_finish_bulk_insert(buffer->resultRelInfo->ri_RelationDesc,
- miinfo->ti_options);
-=======
if (resultRelInfo->ri_FdwRoutine == NULL)
table_finish_bulk_insert(resultRelInfo->ri_RelationDesc,
miinfo->ti_options);
->>>>>>> REL_16_9
pfree(buffer);
}
@@ -2931,18 +2902,7 @@ BeginCopyFrom(ParseState *pstate,
else
{
cstate->need_transcoding = true;
-<<<<<<< HEAD
cstate->conversion_proc =
FindDefaultConversionProc(cstate->file_encoding, GetDatabaseEncoding());
-=======
- cstate->conversion_proc =
FindDefaultConversionProc(cstate->file_encoding,
-
GetDatabaseEncoding());
- if (!OidIsValid(cstate->conversion_proc))
- ereport(ERROR,
- (errcode(ERRCODE_UNDEFINED_FUNCTION),
- errmsg("default conversion function
for encoding \"%s\" to \"%s\" does not exist",
-
pg_encoding_to_char(cstate->file_encoding),
-
pg_encoding_to_char(GetDatabaseEncoding()))));
->>>>>>> REL_16_9
}
/*
@@ -2959,7 +2919,6 @@ BeginCopyFrom(ParseState *pstate,
cstate->whereClause = whereClause;
-<<<<<<< HEAD
/*
* Determine the mode
*/
@@ -2981,13 +2940,6 @@ BeginCopyFrom(ParseState *pstate,
cstate->dispatch_mode = COPY_EXECUTOR;
else
cstate->dispatch_mode = COPY_DIRECT;
-
- MemoryContextSwitchTo(oldcontext);
-
- oldcontext = MemoryContextSwitchTo(cstate->copycontext);
-
-=======
->>>>>>> REL_16_9
/* Initialize state variables */
// cstate->eol_type = EOL_UNKNOWN; /* GPDB: don't overwrite value set
in ProcessCopyOptions */
cstate->cur_relname = RelationGetRelationName(cstate->rel);
diff --git a/src/backend/commands/copyfromparse.c
b/src/backend/commands/copyfromparse.c
index 8e499568d81..24e38ca6868 100644
--- a/src/backend/commands/copyfromparse.c
+++ b/src/backend/commands/copyfromparse.c
@@ -72,11 +72,8 @@
#include "miscadmin.h"
#include "pgstat.h"
#include "port/pg_bswap.h"
-<<<<<<< HEAD
#include "utils/elog.h"
-=======
#include "utils/builtins.h"
->>>>>>> REL_16_9
#include "utils/memutils.h"
#include "utils/rel.h"
@@ -760,8 +757,6 @@ CopyReadBinaryData(CopyFromState cstate, char *dest, int
nbytes)
}
/*
-<<<<<<< HEAD
-=======
* Read raw fields in the next line for COPY FROM in text or csv mode.
* Return false if no more lines.
*
@@ -1066,7 +1061,6 @@ NextCopyFrom(CopyFromState cstate, ExprContext *econtext,
}
/*
->>>>>>> REL_16_9
* Read the next input line and stash it in line_buf.
*
* Result is true if read was terminated by EOF, false if terminated
@@ -1206,23 +1200,12 @@ CopyReadLineText(CopyFromState cstate)
char c;
/*
-<<<<<<< HEAD
- * Load more data if needed. Ideally we would just force four
bytes
- * of read-ahead and avoid the many calls to
- * IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(), but the COPY_OLD_FE
protocol
- * does not allow us to read too far ahead or we might read
into the
- * next data, so we read-ahead only as far we know we can. One
- * optimization would be to read-ahead four byte here if
- * cstate->copy_src != COPY_FRONTEND, but it hardly seems worth
it,
- * considering the size of the buffer.
-=======
* Load more data if needed.
*
* TODO: We could just force four bytes of read-ahead and avoid
the
* many calls to IF_NEED_REFILL_AND_NOT_EOF_CONTINUE(). That
was
* unsafe with the old v2 COPY protocol, but we don't support
that
* anymore.
->>>>>>> REL_16_9
*/
if (input_buf_ptr >= copy_buf_len || need_data)
{
diff --git a/src/backend/commands/copyto.c b/src/backend/commands/copyto.c
index 13eb82ccf49..c29020ce365 100644
--- a/src/backend/commands/copyto.c
+++ b/src/backend/commands/copyto.c
@@ -52,10 +52,9 @@
#include "utils/rel.h"
#include "utils/snapmgr.h"
-<<<<<<< HEAD
#include "cdb/cdbdisp_query.h"
#include "cdb/cdbvars.h"
-=======
+
/*
* Represents the different dest cases we need to worry about at
* the bottom level
@@ -66,46 +65,10 @@ typedef enum CopyDest
COPY_FRONTEND, /* to frontend */
COPY_CALLBACK /* to callback function */
} CopyDest;
->>>>>>> REL_16_9
#define EXEC_DATA_P 0
-<<<<<<< HEAD
extern CopyStmt *glob_copystmt;
-=======
- int file_encoding; /* file or remote side's
character encoding */
- bool need_transcoding; /* file encoding diff from
server? */
- bool encoding_embeds_ascii; /* ASCII can be non-first byte?
*/
-
- /* parameters from the COPY command */
- Relation rel; /* relation to copy to */
- QueryDesc *queryDesc; /* executable query to copy from */
- List *attnumlist; /* integer list of attnums to copy */
- char *filename; /* filename, or NULL for STDOUT */
- bool is_program; /* is 'filename' a program to
popen? */
- copy_data_dest_cb data_dest_cb; /* function for writing data */
-
- CopyFormatOptions opts;
- Node *whereClause; /* WHERE condition (or NULL) */
-
- /*
- * Working state
- */
- MemoryContext copycontext; /* per-copy execution context */
-
- FmgrInfo *out_functions; /* lookup info for output functions */
- MemoryContext rowcontext; /* per-row evaluation context */
- uint64 bytes_processed; /* number of bytes processed so
far */
-} CopyToStateData;
-
-/* DestReceiver for COPY (query) TO */
-typedef struct
-{
- DestReceiver pub; /* publicly-known function
pointers */
- CopyToState cstate; /* CopyToStateData for the
command */
- uint64 processed; /* # of tuples processed */
-} DR_copy;
->>>>>>> REL_16_9
/* NOTE: there's a copy of this in copyfromparse.c */
static const char BinarySignature[11] = "PGCOPY\n\377\r\n\0";
@@ -113,15 +76,10 @@ static const char BinarySignature[11] =
"PGCOPY\n\377\r\n\0";
/* non-export function prototypes */
static void EndCopy(CopyToState cstate);
-<<<<<<< HEAD
-static void CopyAttributeOutText(CopyToState cstate, char *string);
-static void CopyAttributeOutCSV(CopyToState cstate, char *string,
-=======
static void ClosePipeToProgram(CopyToState cstate);
static void CopyOneRowTo(CopyToState cstate, TupleTableSlot *slot);
static void CopyAttributeOutText(CopyToState cstate, const char *string);
static void CopyAttributeOutCSV(CopyToState cstate, const char *string,
->>>>>>> REL_16_9
bool use_quote,
bool single_attr);
static uint64 CopyToDispatch(CopyToState cstate);
static void CopyToDispatchFlush(CopyToState cstate);
@@ -255,19 +213,8 @@ void CopySendEndOfRow(CopyToState cstate)
(void) pq_putmessage('d', fe_msgbuf->data,
fe_msgbuf->len);
break;
case COPY_CALLBACK:
-<<<<<<< HEAD
- /* we don't actually do the write here, we let the
caller do it */
-#ifndef WIN32
- CopySendChar(cstate, '\n');
-#else
- CopySendString(cstate, "\r\n");
-#endif
- return; /* don't want to reset msgbuf quite yet */
-
-=======
cstate->data_dest_cb(fe_msgbuf->data, fe_msgbuf->len);
break;
->>>>>>> REL_16_9
}
/* Update the progress */
@@ -331,13 +278,7 @@ BeginCopyTo(ParseState *pstate,
List *options)
{
CopyToState cstate;
-<<<<<<< HEAD
bool pipe;
-=======
- bool pipe = (filename == NULL && data_dest_cb == NULL);
- TupleDesc tupDesc;
- int num_phys_attrs;
->>>>>>> REL_16_9
MemoryContext oldcontext;
const int progress_cols[] = {
PROGRESS_COPY_COMMAND,
@@ -1225,15 +1166,9 @@ BeginCopy(ParseState *pstate,
* function and is executed repeatedly. (See also the same
hack in
* DECLARE CURSOR and PREPARE.) XXX FIXME someday.
*/
-<<<<<<< HEAD
- rewritten = pg_analyze_and_rewrite(copyObject(raw_query),
-
pstate->p_sourcetext, NULL, 0,
-
NULL);
-=======
- rewritten = pg_analyze_and_rewrite_fixedparams(raw_query,
+ rewritten =
pg_analyze_and_rewrite_fixedparams(copyObject(raw_query),
pstate->p_sourcetext, NULL, 0,
NULL);
->>>>>>> REL_16_9
/* check that we got back something we can work with */
if (rewritten == NIL)
@@ -1435,22 +1370,10 @@ BeginCopy(ParseState *pstate,
cstate->copy_dest = COPY_FILE; /* default */
-<<<<<<< HEAD
MemoryContextSwitchTo(oldcontext);
return cstate;
}
-=======
- if (data_dest_cb)
- {
- progress_vals[1] = PROGRESS_COPY_TYPE_CALLBACK;
- cstate->copy_dest = COPY_CALLBACK;
- cstate->data_dest_cb = data_dest_cb;
- }
- else if (pipe)
- {
- progress_vals[1] = PROGRESS_COPY_TYPE_PIPE;
->>>>>>> REL_16_9
CopyToState
BeginCopyToOnSegment(QueryDesc *queryDesc)
@@ -1945,11 +1868,6 @@ CopyToDispatchDirectoryTable(CopyToState cstate)
static uint64
CopyTo(CopyToState cstate)
{
-<<<<<<< HEAD
-=======
- bool pipe = (cstate->filename == NULL &&
cstate->data_dest_cb == NULL);
- bool fe_copy = (pipe && whereToSendOutput == DestRemote);
->>>>>>> REL_16_9
TupleDesc tupDesc;
int num_phys_attrs;
ListCell *cur;
@@ -2477,8 +2395,6 @@ BeginCopyToDirectoryTable(ParseState *pstate,
return cstate;
}
-<<<<<<< HEAD
-=======
/*
* Send text representation of one attribute, with conversion and escaping
*/
@@ -2788,4 +2704,3 @@ CreateCopyDestReceiver(void)
return (DestReceiver *) self;
}
->>>>>>> REL_16_9
diff --git a/src/backend/commands/dbcommands.c
b/src/backend/commands/dbcommands.c
index 1199891056c..5ccfeffa134 100644
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -8,13 +8,9 @@
* stepping on each others' toes. Formerly we used table-level locks
* on pg_database, but that's too coarse-grained.
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2010, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -46,12 +42,9 @@
#include "catalog/indexing.h"
#include "catalog/objectaccess.h"
#include "catalog/pg_authid.h"
-<<<<<<< HEAD
#include "catalog/pg_class.h"
#include "catalog/pg_namespace.h"
-=======
#include "catalog/pg_collation.h"
->>>>>>> REL_16_9
#include "catalog/pg_database.h"
#include "catalog/pg_db_role_setting.h"
#include "catalog/pg_subscription.h"
@@ -63,11 +56,8 @@
#include "commands/defrem.h"
#include "commands/seclabel.h"
#include "commands/tablespace.h"
-<<<<<<< HEAD
#include "commands/tag.h"
-=======
#include "common/file_perm.h"
->>>>>>> REL_16_9
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "pgstat.h"
@@ -89,7 +79,6 @@
#include "utils/snapmgr.h"
#include "utils/syscache.h"
-<<<<<<< HEAD
#include "catalog/oid_dispatch.h"
#include "cdb/cdbdisp_query.h"
#include "cdb/cdbdispatchresult.h"
@@ -98,7 +87,7 @@
#include "cdb/cdbvars.h"
#include "utils/pg_rusage.h"
-=======
+
/*
* Create database strategy.
*
@@ -114,7 +103,6 @@ typedef enum CreateDBStrategy
CREATEDB_WAL_LOG,
CREATEDB_FILE_COPY
} CreateDBStrategy;
->>>>>>> REL_16_9
typedef struct
{
@@ -644,6 +632,13 @@ CreateDatabaseUsingFileCopy(Oid src_dboid, Oid dst_dboid,
Oid src_tsid,
dstpath = GetDatabasePath(dst_dboid, dsttablespace);
+ /*
+ * Register the database directory to PendingDBDelete link list
+ * for cleanup in txn abort.
+ */
+ ScheduleDbDirDelete(dst_dboid, dsttablespace, false);
+
+
/*
* Copy this subdirectory to the new location
*
@@ -710,13 +705,7 @@ CreateDatabaseUsingFileCopy(Oid src_dboid, Oid dst_dboid,
Oid src_tsid,
Oid
createdb(ParseState *pstate, const CreatedbStmt *stmt)
{
-<<<<<<< HEAD
- TableScanDesc scan;
- Relation rel;
Oid src_dboid = InvalidOid;
-=======
- Oid src_dboid;
->>>>>>> REL_16_9
Oid src_owner;
int src_encoding = -1;
char *src_collate = NULL;
@@ -1382,12 +1371,6 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
*/
pg_database_rel = table_open(DatabaseRelationId, RowExclusiveLock);
-<<<<<<< HEAD
- if (Gp_role == GP_ROLE_EXECUTE)
- dboid = GetPreassignedOidForDatabase(dbname);
- else
- {
-=======
/*
* If database OID is configured, check if the OID is already in use or
* data directory already exists.
@@ -1410,20 +1393,21 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
else
{
/* Select an OID for the new database if is not explicitly
configured. */
->>>>>>> REL_16_9
- do
+ if (Gp_role == GP_ROLE_EXECUTE)
+ dboid = GetPreassignedOidForDatabase(dbname);
+ else
{
- dboid = GetNewOidWithIndex(pg_database_rel,
DatabaseOidIndexId,
-
Anum_pg_database_oid);
- } while (check_db_file_conflict(dboid));
-<<<<<<< HEAD
-
- if (Gp_role == GP_ROLE_DISPATCH)
- RememberAssignedOidForDatabase(dbname, dboid);
-=======
->>>>>>> REL_16_9
+ do
+ {
+ dboid = GetNewOidWithIndex(pg_database_rel,
DatabaseOidIndexId,
+
Anum_pg_database_oid);
+ } while (check_db_file_conflict(dboid));
+ }
}
+ if (Gp_role == GP_ROLE_DISPATCH)
+ RememberAssignedOidForDatabase(dbname, dboid);
+
/*
* Insert a new tuple into pg_database. This establishes our ownership
of
* the new database name (anyone else trying to insert the same name
will
@@ -1446,8 +1430,6 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
new_record[Anum_pg_database_datfrozenxid - 1] =
TransactionIdGetDatum(src_frozenxid);
new_record[Anum_pg_database_datminmxid - 1] =
TransactionIdGetDatum(src_minmxid);
new_record[Anum_pg_database_dattablespace - 1] =
ObjectIdGetDatum(dst_deftablespace);
-<<<<<<< HEAD
-=======
new_record[Anum_pg_database_datcollate - 1] =
CStringGetTextDatum(dbcollate);
new_record[Anum_pg_database_datctype - 1] =
CStringGetTextDatum(dbctype);
if (dbiculocale)
@@ -1463,7 +1445,6 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
else
new_record_nulls[Anum_pg_database_datcollversion - 1] = true;
->>>>>>> REL_16_9
/*
* We deliberately set datacl to default (NULL), rather than copying it
* from the template database. Copying it would be a bad idea when the
@@ -1565,117 +1546,12 @@ createdb(ParseState *pstate, const CreatedbStmt *stmt)
* Otherwise, call CreateDatabaseUsingFileCopy that will copy
the
* database file by file.
*/
-<<<<<<< HEAD
- rel = table_open(TableSpaceRelationId, AccessShareLock);
- scan = table_beginscan_catalog(rel, 0, NULL);
- while ((tuple = heap_getnext(scan, ForwardScanDirection)) !=
NULL)
- {
- Form_pg_tablespace spaceform = (Form_pg_tablespace)
GETSTRUCT(tuple);
- Oid srctablespace = spaceform->oid;
- Oid dsttablespace;
- char *srcpath;
- char *dstpath;
- struct stat st;
-
- /* No need to copy global tablespace */
- if (srctablespace == GLOBALTABLESPACE_OID)
- continue;
-
- srcpath = GetDatabasePath(src_dboid, srctablespace);
-
- if (stat(srcpath, &st) < 0 || !S_ISDIR(st.st_mode) ||
- directory_is_empty(srcpath))
- {
- /* Assume we can ignore it */
- pfree(srcpath);
- continue;
- }
-
- if (srctablespace == src_deftablespace)
- dsttablespace = dst_deftablespace;
- else
- dsttablespace = srctablespace;
-
- dstpath = GetDatabasePath(dboid, dsttablespace);
-
- /*
- * Register the database directory to PendingDBDelete
link list
- * for cleanup in txn abort.
- */
- ScheduleDbDirDelete(dboid, dsttablespace, false);
-
- /*
- * Copy this subdirectory to the new location
- *
- * We don't need to copy subdirectories
- */
- copydir(srcpath, dstpath, false);
-
- SIMPLE_FAULT_INJECTOR("create_db_after_file_copy");
-
- /* Record the filesystem change in XLOG */
- {
- xl_dbase_create_rec xlrec;
-
- xlrec.db_id = dboid;
- xlrec.tablespace_id = dsttablespace;
- xlrec.src_db_id = src_dboid;
- xlrec.src_tablespace_id = srctablespace;
-
- XLogBeginInsert();
- XLogRegisterData((char *) &xlrec,
sizeof(xl_dbase_create_rec));
-
- (void) XLogInsert(RM_DBASE_ID,
-
XLOG_DBASE_CREATE | XLR_SPECIAL_REL_UPDATE);
- }
-
- pfree(srcpath);
- pfree(dstpath);
- }
-
- SIMPLE_FAULT_INJECTOR("after_xlog_create_database");
-
- table_endscan(scan);
- table_close(rel, AccessShareLock);
-
- /*
- * We force a checkpoint before committing. This effectively
means
- * that committed XLOG_DBASE_CREATE operations will never need
to be
- * replayed (at least not in ordinary crash recovery; we still
have to
- * make the XLOG entry for the benefit of PITR operations). This
- * avoids two nasty scenarios:
- *
- * #1: When PITR is off, we don't XLOG the contents of newly
created
- * indexes; therefore the drop-and-recreate-whole-directory
behavior
- * of DBASE_CREATE replay would lose such indexes.
- *
- * #2: Since we have to recopy the source database during
DBASE_CREATE
- * replay, we run the risk of copying changes in it that were
- * committed after the original CREATE DATABASE command but
before the
- * system crash that led to the replay. This is at least
unexpected
- * and at worst could lead to inconsistencies, eg duplicate
table
- * names.
- *
- * (Both of these were real bugs in releases 8.0 through 8.0.3.)
- *
- * In PITR replay, the first of these isn't an issue, and the
second
- * is only a risk if the CREATE DATABASE and subsequent template
- * database change both occur while a base backup is being
taken.
- * There doesn't seem to be much we can do about that except
document
- * it as a limitation.
- *
- * Perhaps if we ever implement CREATE DATABASE in a less
cheesy way,
- * we can avoid this.
- */
- RequestCheckpoint(CHECKPOINT_IMMEDIATE | CHECKPOINT_FORCE |
CHECKPOINT_WAIT);
-=======
if (dbstrategy == CREATEDB_WAL_LOG)
CreateDatabaseUsingWalLog(src_dboid, dboid,
src_deftablespace,
dst_deftablespace);
else
CreateDatabaseUsingFileCopy(src_dboid, dboid,
src_deftablespace,
dst_deftablespace);
->>>>>>> REL_16_9
/*
* Close pg_database, but keep lock till commit.
@@ -1824,11 +1700,7 @@ dropdb(const char *dbname, bool missing_ok, bool force)
pgdbrel = table_open(DatabaseRelationId, RowExclusiveLock);
if (!get_db_info(dbname, AccessExclusiveLock, &db_id, NULL, NULL,
-<<<<<<< HEAD
- &db_istemplate, NULL, NULL, NULL,
NULL, &defaultTablespace, NULL, NULL))
-=======
&db_istemplate, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL))
->>>>>>> REL_16_9
{
if (!missing_ok)
{
@@ -1943,7 +1815,6 @@ dropdb(const char *dbname, bool missing_ok, bool force)
errdetail_busy_db(notherbackends,
npreparedxacts)));
/*
-<<<<<<< HEAD
* Free the database on the segDBs
*/
if (Gp_role == GP_ROLE_DISPATCH)
@@ -1977,8 +1848,6 @@ dropdb(const char *dbname, bool missing_ok, bool force)
ReleaseSysCache(tup);
/*
-=======
->>>>>>> REL_16_9
* Delete any comments or security labels associated with the database.
*/
DeleteSharedComments(db_id, DatabaseRelationId);
@@ -2049,12 +1918,6 @@ dropdb(const char *dbname, bool missing_ok, bool force)
*/
DropDatabaseBuffers(db_id);
- /*
-<<<<<<< HEAD
- * Tell the stats collector to forget it immediately, too.
- */
- pgstat_drop_database(db_id);
-
/* MPP-6929: metadata tracking */
if (Gp_role == GP_ROLE_DISPATCH)
MetaTrackDropObject(DatabaseRelationId, db_id);
@@ -2065,8 +1928,6 @@ dropdb(const char *dbname, bool missing_ok, bool force)
pgstat_drop_database(db_id);
/*
-=======
->>>>>>> REL_16_9
* Tell checkpointer to forget any pending fsync and unlink requests for
* files in the database; else the fsyncs will fail at next checkpoint,
or
* worse, it will delete files that belong to a newly created database
@@ -2415,14 +2276,12 @@ movedb(const char *dbname, const char *tblspcname)
PointerGetDatum(&fparms));
#endif
{
-<<<<<<< HEAD
- ScheduleDbDirDelete(db_id, dst_tblspcoid, false);
-=======
Datum new_record[Natts_pg_database] = {0};
bool new_record_nulls[Natts_pg_database] = {0};
bool new_record_repl[Natts_pg_database] = {0};
->>>>>>> REL_16_9
+ ScheduleDbDirDelete(db_id, dst_tblspcoid, false);
+
/*
* Copy files from the old tablespace to the new one
*/
@@ -2536,7 +2395,6 @@ movedb(const char *dbname, const char *tblspcname)
MoveDbSessionLockRelease();
}
-<<<<<<< HEAD
/*
* register the db_id with pending deletes list to schedule removing
database
* directory on transaction commit.
@@ -2546,14 +2404,6 @@ movedb(const char *dbname, const char *tblspcname)
pfree(src_dbpath);
pfree(dst_dbpath);
SIMPLE_FAULT_INJECTOR("inside_move_db_transaction");
-=======
- /* Now it's safe to release the database lock */
- UnlockSharedObjectForSession(DatabaseRelationId, db_id, 0,
-
AccessExclusiveLock);
-
- pfree(src_dbpath);
- pfree(dst_dbpath);
->>>>>>> REL_16_9
}
/*
@@ -2787,12 +2637,6 @@ AlterDatabase(ParseState *pstate, AlterDatabaseStmt
*stmt, bool isTopLevel)
newtuple = heap_modify_tuple(tuple, RelationGetDescr(rel), new_record,
new_record_nulls, new_record_repl);
CatalogTupleUpdate(rel, &tuple->t_self, newtuple);
-<<<<<<< HEAD
-
-=======
- UnlockTuple(rel, &tuple->t_self, InplaceUpdateTupleLock);
-
->>>>>>> REL_16_9
InvokeObjectPostAlterHook(DatabaseRelationId, dboid, 0);
systable_endscan(scan);
@@ -3618,11 +3462,7 @@ dbase_redo(XLogReaderState *record)
(xl_dbase_create_file_copy_rec *)
XLogRecGetData(record);
char *src_path;
char *dst_path;
-<<<<<<< HEAD
char *parentdir;
-=======
- char *parent_path;
->>>>>>> REL_16_9
struct stat st;
src_path = GetDatabasePath(xlrec->src_db_id,
xlrec->src_tablespace_id);
@@ -3643,7 +3483,6 @@ dbase_redo(XLogReaderState *record)
}
/*
-<<<<<<< HEAD
* It is possible that the tablespace was later dropped, but we
are
* re-redoing database create before that. In that case,
* either src_path or dst_path is probably missing here and
needs to
@@ -3666,33 +3505,6 @@ dbase_redo(XLogReaderState *record)
parentdir)));
}
pfree(parentdir);
-=======
- * If the parent of the target path doesn't exist, create it
now. This
- * enables us to create the target underneath later.
- */
- parent_path = pstrdup(dst_path);
- get_parent_directory(parent_path);
- if (stat(parent_path, &st) < 0)
- {
- if (errno != ENOENT)
- ereport(FATAL,
- errmsg("could not stat
directory \"%s\": %m",
- dst_path));
-
- /* create the parent directory if needed and valid */
- recovery_create_dbdir(parent_path, true);
- }
- pfree(parent_path);
-
- /*
- * There's a case where the copy source directory is missing
for the
- * same reason above. Create the empty source directory so that
- * copydir below doesn't fail. The directory will be dropped
soon by
- * recovery.
- */
- if (stat(src_path, &st) < 0 && errno == ENOENT)
- recovery_create_dbdir(src_path, false);
->>>>>>> REL_16_9
/*
* Force dirty buffers out to disk, to ensure source database is
@@ -3712,8 +3524,6 @@ dbase_redo(XLogReaderState *record)
pfree(src_path);
pfree(dst_path);
-<<<<<<< HEAD
-=======
}
else if (info == XLOG_DBASE_CREATE_WAL_LOG)
{
@@ -3733,7 +3543,6 @@ dbase_redo(XLogReaderState *record)
CreateDirAndVersionFile(dbpath, xlrec->db_id,
xlrec->tablespace_id,
true);
pfree(dbpath);
->>>>>>> REL_16_9
}
else if (info == XLOG_DBASE_DROP)
{
diff --git a/src/backend/commands/dropcmds.c b/src/backend/commands/dropcmds.c
index 4fa28969be5..b8d1a10bc68 100644
--- a/src/backend/commands/dropcmds.c
+++ b/src/backend/commands/dropcmds.c
@@ -488,14 +488,6 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
msg = gettext_noop("publication \"%s\" does not exist,
skipping");
name = strVal(object);
break;
-<<<<<<< HEAD
- case OBJECT_EXTPROTOCOL:
- msg = gettext_noop("protocol \"%s\" does not exist,
skipping");
- name = strVal((Value *) object);
- break;
- default:
- elog(ERROR, "unrecognized object type: %d", (int)
objtype);
-=======
case OBJECT_COLUMN:
case OBJECT_DATABASE:
@@ -514,7 +506,6 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
* is probably wrong or should be revisited.
*/
elog(ERROR, "unsupported object type: %d", (int)
objtype);
->>>>>>> REL_16_9
break;
case OBJECT_AMOP:
@@ -532,7 +523,10 @@ does_not_exist_skipping(ObjectType objtype, Node *object)
/* These are currently not used or needed. */
elog(ERROR, "unsupported object type: %d", (int)
objtype);
break;
-
+ case OBJECT_EXTPROTOCOL:
+ msg = gettext_noop("protocol \"%s\" does not exist,
skipping");
+ name = strVal((Value *) object);
+ break;
/* no default, to let compiler warn about missing case
*/
}
if (!msg)
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index a7fcbdd9f42..e1a42e5ad86 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -3,13 +3,9 @@
* explain.c
* Explain query execution plans
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2010, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994-5, Regents of the University of California
*
* IDENTIFICATION
@@ -141,7 +137,6 @@ static void show_windowagg_keys(WindowAggState *waggstate,
List *ancestors, Expl
static void show_incremental_sort_info(IncrementalSortState *incrsortstate,
ExplainState *es);
static void show_hash_info(HashState *hashstate, ExplainState *es);
-<<<<<<< HEAD
static void show_runtime_filter_info(RuntimeFilterState *rfstate,
ExplainState *es);
static void show_pushdown_runtime_filter_info(const char *qlabel,
@@ -150,11 +145,6 @@ static void show_pushdown_runtime_filter_info(const char
*qlabel,
static void show_memoize_info(MemoizeState *mstate, List *ancestors,
ExplainState *es);
static void show_hashagg_info(AggState *hashstate, ExplainState *es);
-=======
-static void show_memoize_info(MemoizeState *mstate, List *ancestors,
- ExplainState *es);
-static void show_hashagg_info(AggState *aggstate, ExplainState *es);
->>>>>>> REL_16_9
static void show_tidbitmap_info(BitmapHeapScanState *planstate,
ExplainState
*es);
static void show_instrumentation_count(const char *qlabel, int which,
@@ -828,28 +818,9 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into,
ExplainState *es,
/* Create textual dump of plan tree */
ExplainPrintPlan(es, queryDesc);
-<<<<<<< HEAD
if (cursorOptions & CURSOR_OPT_PARALLEL_RETRIEVE)
ExplainParallelRetrieveCursor(es, queryDesc);
- /*
- * COMPUTE_QUERY_ID_REGRESS means COMPUTE_QUERY_ID_AUTO, but we don't
show
- * the queryid in any of the EXPLAIN plans to keep stable the results
- * generated by regression test suites.
- */
- if (es->verbose && plannedstmt->queryId != UINT64CONST(0) &&
- compute_query_id != COMPUTE_QUERY_ID_REGRESS)
- {
- /*
- * Output the queryid as an int64 rather than a uint64 so we
match
- * what would be seen in the BIGINT pg_stat_statements.queryid
column.
- */
- ExplainPropertyInteger("Query Identifier", NULL, (int64)
-
plannedstmt->queryId, es);
- }
-
-=======
->>>>>>> REL_16_9
/* Show buffer usage in planning */
if (bufusage)
{
@@ -1071,7 +1042,6 @@ ExplainPrintPlan(ExplainState *es, QueryDesc *queryDesc)
* If requested, include information about GUC parameters with values
that
* don't match the built-in defaults.
*/
-<<<<<<< HEAD
ExplainPrintSettings(es, queryDesc->plannedstmt->planGen);
}
@@ -1139,8 +1109,6 @@ ExplainPrintSliceTable(ExplainState *es, QueryDesc
*queryDesc)
}
ExplainCloseGroup("Slice Table", "Slice Table", false, es);
-=======
- ExplainPrintSettings(es);
/*
* COMPUTE_QUERY_ID_REGRESS means COMPUTE_QUERY_ID_AUTO, but we don't
show
@@ -1157,7 +1125,6 @@ ExplainPrintSliceTable(ExplainState *es, QueryDesc
*queryDesc)
ExplainPropertyInteger("Query Identifier", NULL, (int64)
queryDesc->plannedstmt->queryId, es);
}
->>>>>>> REL_16_9
}
/*
@@ -2210,13 +2177,11 @@ ExplainNode(PlanState *planstate, List *ancestors,
case JOIN_ANTI:
jointype = "Anti";
break;
-<<<<<<< HEAD
case JOIN_LASJ_NOTIN:
jointype = "Left Anti Semi
(Not-In)";
-=======
+ break;
case JOIN_RIGHT_ANTI:
jointype = "Right Anti";
->>>>>>> REL_16_9
break;
default:
jointype = "???";
@@ -2816,9 +2781,6 @@ ExplainNode(PlanState *planstate, List *ancestors,
show_instrumentation_count("Rows Removed by
Filter", 1,
planstate, es);
break;
-<<<<<<< HEAD
-#if 0 /* Group node has been disabled in GPDB */
-=======
case T_WindowAgg:
show_upper_qual(plan->qual, "Filter", planstate,
ancestors, es);
if (plan->qual)
@@ -2827,7 +2789,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
show_upper_qual(((WindowAgg *) plan)->runConditionOrig,
"Run Condition",
planstate, ancestors, es);
break;
->>>>>>> REL_16_9
+#if 0 /* Group node has been disabled in GPDB */
case T_Group:
show_group_keys(castNode(GroupState, planstate),
ancestors, es);
show_upper_qual(plan->qual, "Filter", planstate,
ancestors, es);
@@ -2880,7 +2842,6 @@ ExplainNode(PlanState *planstate, List *ancestors,
case T_Hash:
show_hash_info(castNode(HashState, planstate), es);
break;
-<<<<<<< HEAD
case T_RuntimeFilter:
show_runtime_filter_info(castNode(RuntimeFilterState,
planstate),
es);
@@ -2912,8 +2873,6 @@ ExplainNode(PlanState *planstate, List *ancestors,
case T_Append:
show_join_pruning_info(((Append *)
plan)->join_prune_paramids, es);
break;
-=======
->>>>>>> REL_16_9
case T_Memoize:
show_memoize_info(castNode(MemoizeState, planstate),
ancestors,
es);
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index 3a75eb85ace..bbcbb2b2da3 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -3,13 +3,9 @@
* indexcmds.c
* POSTGRES define and remove index code.
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2010, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -35,11 +31,8 @@
#include "catalog/pg_am.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_constraint.h"
-<<<<<<< HEAD
#include "catalog/pg_directory_table.h"
-=======
#include "catalog/pg_database.h"
->>>>>>> REL_16_9
#include "catalog/pg_inherits.h"
#include "catalog/pg_namespace.h"
#include "catalog/pg_opclass.h"
@@ -124,16 +117,9 @@ static void RangeVarCallbackForReindexIndex(const RangeVar
*relation,
Oid relId, Oid oldRelId, void *arg);
static Oid ReindexTable(ReindexStmt *stmt, ReindexParams *params,
bool isTopLevel);
-<<<<<<< HEAD
static void ReindexMultipleTables(ReindexStmt *stmt, ReindexParams *params);
static void reindex_error_callback(void *args);
static void ReindexPartitions(ReindexStmt *stmt, Oid relid, ReindexParams
*params,
-=======
-static void ReindexMultipleTables(const char *objectName,
-
ReindexObjectType objectKind, ReindexParams *params);
-static void reindex_error_callback(void *arg);
-static void ReindexPartitions(Oid relid, ReindexParams *params,
->>>>>>> REL_16_9
bool isTopLevel);
static void ReindexMultipleInternal(ReindexStmt *stmt, List *relids,
ReindexParams *params);
@@ -765,7 +751,7 @@ DefineIndex(Oid relationId,
LockRelId heaprelid;
LOCKTAG heaplocktag;
LOCKMODE lockmode;
-<<<<<<< HEAD
+ Snapshot snapshot;
Oid root_save_userid;
int root_save_sec_context;
int root_save_nestlevel;
@@ -785,12 +771,6 @@ DefineIndex(Oid relationId,
*/
shouldDispatch = false;
}
-=======
- Snapshot snapshot;
- Oid root_save_userid;
- int root_save_sec_context;
- int root_save_nestlevel;
->>>>>>> REL_16_9
root_save_nestlevel = NewGUCNestLevel();
@@ -992,13 +972,8 @@ DefineIndex(Oid relationId,
{
AclResult aclresult;
-<<<<<<< HEAD
- aclresult = pg_namespace_aclcheck(namespaceId, root_save_userid,
-
ACL_CREATE);
-=======
aclresult = object_aclcheck(NamespaceRelationId, namespaceId,
root_save_userid,
ACL_CREATE);
->>>>>>> REL_16_9
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_SCHEMA,
get_namespace_name(namespaceId));
@@ -1041,13 +1016,8 @@ DefineIndex(Oid relationId,
{
AclResult aclresult;
-<<<<<<< HEAD
- aclresult = pg_tablespace_aclcheck(tablespaceId,
root_save_userid,
-
ACL_CREATE);
-=======
aclresult = object_aclcheck(TableSpaceRelationId, tablespaceId,
root_save_userid,
ACL_CREATE);
->>>>>>> REL_16_9
if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, OBJECT_TABLESPACE,
get_tablespace_name(tablespaceId));
@@ -1656,7 +1626,6 @@ DefineIndex(Oid relationId,
}
/*
-<<<<<<< HEAD
* Create tag description.
*/
if (stmt->tags)
@@ -1685,12 +1654,7 @@ DefineIndex(Oid relationId,
* some index function changed a behavior-affecting GUC, e.g. xmloption,
* that affects subsequent steps. This improves bug-compatibility with
* older PostgreSQL versions. They did the AtEOXact_GUC() here for the
- * purpose of clearing the above default_tablespace change.
-=======
- * Roll back any GUC changes executed by index functions, and keep
- * subsequent changes local to this command. This is essential if some
- * index function changed a behavior-affecting GUC, e.g. search_path.
->>>>>>> REL_16_9
+ * purpose of clearing the above default_tablespace change
*/
AtEOXact_GUC(false, root_save_nestlevel);
root_save_nestlevel = NewGUCNestLevel();
@@ -1964,36 +1928,16 @@ DefineIndex(Oid relationId,
Assert(GetUserId() ==
child_save_userid);
SetUserIdAndSecContext(root_save_userid,
root_save_sec_context);
-<<<<<<< HEAD
DefineIndex(childRelid, childStmt,
- InvalidOid, /*
no predefined OID */
-
indexRelationId, /* this is our child */
-
createdConstraintId,
- is_alter_table,
check_rights, check_not_in_use,
- skip_build,
quiet);
-
SetUserIdAndSecContext(child_save_userid,
-
child_save_sec_context);
-=======
- childAddr =
- DefineIndex(childRelid,
childStmt,
-
InvalidOid, /* no predefined OID */
-
indexRelationId, /* this is our child */
-
createdConstraintId,
- -1,
-
is_alter_table, check_rights,
-
check_not_in_use,
-
skip_build, quiet);
+ InvalidOid, /* no
predefined OID */
+ indexRelationId,
/* this is our child */
+ createdConstraintId,
+ -1,
+ is_alter_table,
check_rights,
+ check_not_in_use,
+ skip_build, quiet);
SetUserIdAndSecContext(child_save_userid,
child_save_sec_context);
-
- /*
- * Check if the index just created is
valid or not, as it
- * could be possible that it has been
switched as invalid
- * when recursing across multiple
partition levels.
- */
- if
(!get_index_isvalid(childAddr.objectId))
- invalidate_parent = true;
->>>>>>> REL_16_9
}
free_attrmap(attmap);
@@ -2067,7 +2011,6 @@ DefineIndex(Oid relationId,
return address;
}
-<<<<<<< HEAD
stmt->idxname = indexRelationName;
if (shouldDispatch)
{
@@ -2091,12 +2034,6 @@ DefineIndex(Oid relationId,
SetUserIdAndSecContext(root_save_userid, root_save_sec_context);
if (!concurrent || Gp_role == GP_ROLE_EXECUTE)
-=======
- AtEOXact_GUC(false, root_save_nestlevel);
- SetUserIdAndSecContext(root_save_userid, root_save_sec_context);
-
- if (!concurrent)
->>>>>>> REL_16_9
{
/* Close the heap and we're done, in the non-concurrent case */
table_close(rel, NoLock);
@@ -3647,11 +3584,7 @@ ReindexMultipleTables(ReindexStmt *stmt, ReindexParams
*params)
bool concurrent_warning = false;
bool tablespace_warning = false;
-<<<<<<< HEAD
Assert(Gp_role != GP_ROLE_EXECUTE);
- AssertArg(objectName);
-=======
->>>>>>> REL_16_9
Assert(objectKind == REINDEX_OBJECT_SCHEMA ||
objectKind == REINDEX_OBJECT_SYSTEM ||
objectKind == REINDEX_OBJECT_DATABASE);
@@ -4275,14 +4208,8 @@ ReindexRelationConcurrently(ReindexStmt *stmt, Oid
relationOid, ReindexParams *p
{
ReindexIndexInfo *idx;
-<<<<<<< HEAD
/* Save the list of
relation OIDs in private context */
oldcontext =
MemoryContextSwitchTo(private_context);
-=======
- idx =
palloc_object(ReindexIndexInfo);
- idx->indexId = cellOid;
- /* other fields set later */
->>>>>>> REL_16_9
idx =
makeNode(ReindexIndexInfo);
idx->indexId = cellOid;
@@ -4332,18 +4259,11 @@ ReindexRelationConcurrently(ReindexStmt *stmt, Oid
relationOid, ReindexParams *p
{
ReindexIndexInfo *idx;
-<<<<<<< HEAD
/*
* Save the list
of relation OIDs in private
* context
*/
oldcontext =
MemoryContextSwitchTo(private_context);
-=======
- idx =
palloc_object(ReindexIndexInfo);
- idx->indexId = cellOid;
- indexIds =
lappend(indexIds, idx);
- /* other fields set
later */
->>>>>>> REL_16_9
idx =
makeNode(ReindexIndexInfo);
idx->indexId =
cellOid;
@@ -4430,11 +4350,7 @@ ReindexRelationConcurrently(ReindexStmt *stmt, Oid
relationOid, ReindexParams *p
* Save the list of relation OIDs in private
context. Note
* that invalid indexes are allowed here.
*/
-<<<<<<< HEAD
idx = makeNode(ReindexIndexInfo);
-=======
- idx = palloc_object(ReindexIndexInfo);
->>>>>>> REL_16_9
idx->indexId = relationOid;
indexIds = lappend(indexIds, idx);
/* other fields set later */
@@ -4944,11 +4860,7 @@ reindex_concurrently_create_indexes(MemoryContext
oldcontext,
*/
oldcontext = MemoryContextSwitchTo(private_context);
-<<<<<<< HEAD
newidx = makeNode(ReindexIndexInfo);
-=======
- newidx = palloc_object(ReindexIndexInfo);
->>>>>>> REL_16_9
newidx->indexId = newIndexId;
newidx->safe = idx->safe;
newidx->tableId = idx->tableId;
@@ -4964,13 +4876,8 @@ reindex_concurrently_create_indexes(MemoryContext
oldcontext,
*/
lockrelid = palloc_object(LockRelId);
*lockrelid = indexRel->rd_lockInfo.lockRelId;
-<<<<<<< HEAD
*relationLocks = lappend(*relationLocks, lockrelid);
lockrelid = palloc(sizeof(*lockrelid));
-=======
- relationLocks = lappend(relationLocks, lockrelid);
- lockrelid = palloc_object(LockRelId);
->>>>>>> REL_16_9
*lockrelid = newIndexRel->rd_lockInfo.lockRelId;
*relationLocks = lappend(*relationLocks, lockrelid);
diff --git a/src/backend/commands/lockcmds.c b/src/backend/commands/lockcmds.c
index c7f0e41f014..0b9588312ed 100644
--- a/src/backend/commands/lockcmds.c
+++ b/src/backend/commands/lockcmds.c
@@ -111,14 +111,9 @@ RangeVarCallbackForLockTable(const RangeVar *rv, Oid
relid, Oid oldrelid,
relkind != RELKIND_VIEW && relkind != RELKIND_DIRECTORY_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-<<<<<<< HEAD
- errmsg("\"%s\" is not a table, directory table
or view",
- rv->relname)));
-=======
errmsg("cannot lock relation \"%s\"",
rv->relname),
errdetail_relkind_not_supported(relkind)));
->>>>>>> REL_16_9
#if 0 /* Upstream code not applicable to GPDB */
/*
diff --git a/src/backend/commands/opclasscmds.c
b/src/backend/commands/opclasscmds.c
index 55b584988a9..ee7919791aa 100644
--- a/src/backend/commands/opclasscmds.c
+++ b/src/backend/commands/opclasscmds.c
@@ -822,7 +822,6 @@ DefineOpFamily(CreateOpFamilyStmt *stmt)
errmsg("must be superuser to create an
operator family")));
/* Insert pg_opfamily catalog entry */
-<<<<<<< HEAD
ObjectAddress objAddr;
objAddr = CreateOpFamily(stmt, opfname, namespaceoid, amoid);
@@ -837,9 +836,6 @@ DefineOpFamily(CreateOpFamilyStmt *stmt)
}
return objAddr;
-=======
- return CreateOpFamily(stmt, opfname, namespaceoid, amoid);
->>>>>>> REL_16_9
}
diff --git a/src/backend/commands/prepare.c b/src/backend/commands/prepare.c
index 0946d931e88..871b84bf37b 100644
--- a/src/backend/commands/prepare.c
+++ b/src/backend/commands/prepare.c
@@ -63,12 +63,10 @@ PrepareQuery(ParseState *pstate, PrepareStmt *stmt,
CachedPlanSource *plansource;
Oid *argtypes = NULL;
int nargs;
+ Query *query;
List *query_list;
-<<<<<<< HEAD
- int i;
+ int i;
NodeTag srctag; /* GPDB */
-=======
->>>>>>> REL_16_9
/*
* Disallow empty-string statement name (conflicts with protocol-level
@@ -121,7 +119,6 @@ PrepareQuery(ParseState *pstate, PrepareStmt *stmt,
* information about unknown parameters to be deduced from context.
* Rewrite the query. The result could be 0, 1, or many queries.
*/
-<<<<<<< HEAD
query = parse_analyze_varparams(rawstmt, pstate->p_sourcetext,
&argtypes, &nargs);
@@ -166,10 +163,6 @@ PrepareQuery(ParseState *pstate, PrepareStmt *stmt,
/* Rewrite the query. The result could be 0, 1, or many queries. */
query_list = QueryRewrite(query);
-=======
- query_list = pg_analyze_and_rewrite_varparams(rawstmt,
pstate->p_sourcetext,
-
&argtypes, &nargs, NULL);
->>>>>>> REL_16_9
/* Finish filling in the CachedPlanSource */
CompleteCachedPlan(plansource,
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index e84321a79e6..09732b2b6d9 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -929,8 +929,7 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char
*queryString,
CStringGetDatum(trigname));
values[Anum_pg_trigger_tgfoid - 1] = ObjectIdGetDatum(funcoid);
values[Anum_pg_trigger_tgtype - 1] = Int16GetDatum(tgtype);
-<<<<<<< HEAD
-
+
/*
* Special for Apache Cloudberry: Ignore foreign keys for now. Create
* the triggers to back them as 'disabled'.
@@ -953,12 +952,7 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char
*queryString,
elog(WARNING, "unrecognized internal trigger function
%u", funcoid);
}
values[Anum_pg_trigger_tgenabled - 1] = CharGetDatum(tgenabled);
-
- values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal ||
in_partition);
-=======
- values[Anum_pg_trigger_tgenabled - 1] = trigger_fires_when;
values[Anum_pg_trigger_tgisinternal - 1] = BoolGetDatum(isInternal);
->>>>>>> REL_16_9
values[Anum_pg_trigger_tgconstrrelid - 1] =
ObjectIdGetDatum(constrrelid);
values[Anum_pg_trigger_tgconstrindid - 1] = ObjectIdGetDatum(indexOid);
values[Anum_pg_trigger_tgconstraint - 1] =
ObjectIdGetDatum(constraintOid);
@@ -1274,11 +1268,7 @@ CreateTriggerFiringOn(CreateTrigStmt *stmt, const char
*queryString,
CreateTriggerFiringOn(childStmt, queryString,
partdesc->oids[i], refRelOid,
-<<<<<<< HEAD
- InvalidOid,
indexOnChild,
-=======
InvalidOid,
InvalidOid,
->>>>>>> REL_16_9
funcoid,
trigoid, qual,
isInternal,
true, trigger_fires_when);
@@ -1526,14 +1516,9 @@ RangeVarCallbackForRenameTrigger(const RangeVar *rv, Oid
relid, Oid oldrelid,
form->relkind != RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
-<<<<<<< HEAD
- errmsg("\"%s\" is not a table, directory
table, view, or foreign table",
- rv->relname)));
-=======
errmsg("relation \"%s\" cannot have triggers",
rv->relname),
errdetail_relkind_not_supported(form->relkind)));
->>>>>>> REL_16_9
/* you must own the table to rename one of its triggers */
if (!object_ownercheck(RelationRelationId, relid, GetUserId()))
@@ -2640,21 +2625,12 @@ ExecARInsertTriggers(EState *estate, ResultRelInfo
*relinfo,
if ((trigdesc && trigdesc->trig_insert_after_row) ||
(transition_capture &&
transition_capture->tcs_insert_new_table))
-<<<<<<< HEAD
- {
- AfterTriggerSaveEvent(estate, relinfo, TRIGGER_EVENT_INSERT,
- true, NULL, slot,
- recheckIndexes, NULL,
- transition_capture);
- }
-=======
AfterTriggerSaveEvent(estate, relinfo, NULL, NULL,
TRIGGER_EVENT_INSERT,
true, NULL, slot,
recheckIndexes, NULL,
transition_capture,
false);
->>>>>>> REL_16_9
}
bool
@@ -5387,23 +5363,17 @@ AfterTriggerFreeQuery(AfterTriggersQueryData *qs)
ts = table->old_upd_tuplestore;
table->old_upd_tuplestore = NULL;
if (ts)
-<<<<<<< HEAD
release_or_prolong_tuplestore(ts, table->prolonged);
- ts = table->new_tuplestore;
- table->new_tuplestore = NULL;
-=======
- tuplestore_end(ts);
ts = table->new_upd_tuplestore;
table->new_upd_tuplestore = NULL;
if (ts)
- tuplestore_end(ts);
+ release_or_prolong_tuplestore(ts, table->prolonged);
ts = table->old_del_tuplestore;
table->old_del_tuplestore = NULL;
if (ts)
- tuplestore_end(ts);
+ release_or_prolong_tuplestore(ts, table->prolonged);
ts = table->new_ins_tuplestore;
table->new_ins_tuplestore = NULL;
->>>>>>> REL_16_9
if (ts)
release_or_prolong_tuplestore(ts, table->prolonged);
if (table->storeslot)
diff --git a/src/backend/commands/variable.c b/src/backend/commands/variable.c
index 474a97763dd..a872ce5028a 100644
--- a/src/backend/commands/variable.c
+++ b/src/backend/commands/variable.c
@@ -24,12 +24,9 @@
#include "access/xlog.h"
#include "access/xlogprefetcher.h"
#include "catalog/pg_authid.h"
-<<<<<<< HEAD
#include "cdb/cdbvars.h"
#include "commands/variable.h"
-=======
#include "common/string.h"
->>>>>>> REL_16_9
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "postmaster/postmaster.h"
diff --git a/src/backend/commands/view.c b/src/backend/commands/view.c
index ed7a47a97cd..5b96c37956e 100644
--- a/src/backend/commands/view.c
+++ b/src/backend/commands/view.c
@@ -3,13 +3,9 @@
* view.c
* use rewrite rules to construct views
*
-<<<<<<< HEAD
* Portions Copyright (c) 2006-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -402,15 +398,11 @@ DefineView(ViewStmt *stmt, const char *queryString,
rawstmt->stmt_location = stmt_location;
rawstmt->stmt_len = stmt_len;
-<<<<<<< HEAD
- viewParse = parse_analyze(rawstmt, queryString, NULL, 0, NULL);
+ viewParse = parse_analyze_fixedparams(rawstmt, queryString,
NULL, 0, NULL);
}
else
viewParse = (Query *) stmt->query;
viewParse_orig = copyObject(viewParse);
-=======
- viewParse = parse_analyze_fixedparams(rawstmt, queryString, NULL, 0,
NULL);
->>>>>>> REL_16_9
/*
* The grammar should ensure that the result is a single SELECT Query.
diff --git a/src/include/commands/copyto_internal.h
b/src/include/commands/copyto_internal.h
index 35a2985878c..120a459f2d5 100644
--- a/src/include/commands/copyto_internal.h
+++ b/src/include/commands/copyto_internal.h
@@ -53,6 +53,7 @@ typedef struct CopyToStateData
char *filename; /* filename, or NULL for STDOUT */
char *dirfilename; /* filename of directory table, not
NULL for copy directory table to */
bool is_program; /* is 'filename' a program to
popen? */
+ copy_data_dest_cb data_dest_cb; /* function for writing data */
CopyFormatOptions opts;
Node *whereClause; /* WHERE condition (or NULL) */
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]