This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 5d62caf454e Fix all conflicts for executor
5d62caf454e is described below
commit 5d62caf454ebb0f99164fbe34dc7ef0ec0b26755
Author: Jinbao Chen <[email protected]>
AuthorDate: Mon Jul 21 16:28:29 2025 +0800
Fix all conflicts for executor
---
src/backend/executor/execMain.c | 73 ++-----
src/backend/executor/execUtils.c | 7 -
src/backend/executor/nodeAgg.c | 203 +-----------------
src/backend/executor/nodeHash.c | 117 ++--------
src/backend/executor/nodeHashjoin.c | 93 +-------
src/backend/executor/nodeModifyTable.c | 382 ++++-----------------------------
src/backend/executor/nodeWindowAgg.c | 86 --------
7 files changed, 85 insertions(+), 876 deletions(-)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 1c789d8020d..c182a406cb3 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -26,13 +26,9 @@
* before ExecutorEnd. This can be omitted only in case of EXPLAIN,
* which should also omit ExecutorRun.
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2010, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -64,11 +60,8 @@
#include "jit/jit.h"
#include "mb/pg_wchar.h"
#include "miscadmin.h"
-<<<<<<< HEAD
#include "nodes/plannodes.h"
-=======
#include "parser/parse_relation.h"
->>>>>>> REL_16_9
#include "parser/parsetree.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
@@ -180,11 +173,13 @@ static void CheckValidRowMarkRel(Relation rel,
RowMarkType markType);
static void ExecPostprocessPlan(EState *estate);
static void ExecEndPlan(PlanState *planstate, EState *estate);
static void ExecutePlan(QueryDesc *queryDesc,
+ bool use_parallel_mode,
CmdType operation,
bool sendTuples,
uint64 numberTuples,
ScanDirection direction,
- DestReceiver *dest);
+ DestReceiver *dest,
+ bool execute_once);
static bool ExecCheckOneRelPerms(RTEPermissionInfo *perminfo);
static bool ExecCheckPermissionsModified(Oid relOid, Oid userid,
Bitmapset *modifiedCols,
@@ -931,26 +926,17 @@ standard_ExecutorRun(QueryDesc *queryDesc,
* run plan
*/
if (!ScanDirectionIsNoMovement(direction))
-<<<<<<< HEAD
{
if (execute_once && queryDesc->already_executed)
elog(ERROR, "can't re-execute query flagged for single
execution");
queryDesc->already_executed = true;
}
-=======
- ExecutePlan(queryDesc,
- operation,
- sendTuples,
- count,
- direction,
- dest);
/*
* Update es_total_processed to keep track of the number of tuples
* processed across multiple ExecutorRun() calls.
*/
estate->es_total_processed += estate->es_processed;
->>>>>>> REL_16_9
/*
* Need a try/catch block here so that if an ereport is called from
@@ -999,8 +985,7 @@ standard_ExecutorRun(QueryDesc *queryDesc,
Assert(motionState);
- ExecutePlan(estate,
- (PlanState *) motionState,
+ ExecutePlan(queryDesc,
amIParallel,
CMD_SELECT,
sendTuples,
@@ -1036,8 +1021,7 @@ standard_ExecutorRun(QueryDesc *queryDesc,
*/
EndpointNotifyQD(ENDPOINT_READY_ACK_MSG);
- ExecutePlan(estate,
- queryDesc->planstate,
+ ExecutePlan(queryDesc,
amIParallel,
operation,
true,
@@ -1055,8 +1039,7 @@ standard_ExecutorRun(QueryDesc *queryDesc,
* motion nodes at the fringe of the top slice
to return
* without ever calling nodes below them.
*/
- ExecutePlan(estate,
- queryDesc->planstate,
+ ExecutePlan(queryDesc,
amIParallel,
operation,
sendTuples,
@@ -1541,13 +1524,8 @@ ExecCheckPermissions(List *rangeTable, List
*rteperminfos,
* ExecCheckOneRelPerms
* Check access permissions for a single relation.
*/
-<<<<<<< HEAD
-bool
-ExecCheckRTEPerms(RangeTblEntry *rte)
-=======
static bool
ExecCheckOneRelPerms(RTEPermissionInfo *perminfo)
->>>>>>> REL_16_9
{
AclMode requiredPerms;
AclMode relPerms;
@@ -1751,26 +1729,19 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
{
RTEPermissionInfo *perminfo = lfirst_node(RTEPermissionInfo, l);
-<<<<<<< HEAD
rti++;
- if (rte->rtekind != RTE_RELATION)
- continue;
-
- if ((rte->requiredPerms & (~ACL_SELECT)) == 0)
- continue;
-
/*
* External and foreign tables don't need two phase commit
which is for
* local mpp tables
*/
- if (get_rel_relkind(rte->relid) == RELKIND_FOREIGN_TABLE)
-=======
+ if (get_rel_relkind(perminfo->relid) == RELKIND_FOREIGN_TABLE)
+ continue;
+
if ((perminfo->requiredPerms & (~ACL_SELECT)) == 0)
continue;
if (isTempNamespace(get_rel_namespace(perminfo->relid)))
->>>>>>> REL_16_9
continue;
if (isTempNamespace(get_rel_namespace(rte->relid)))
@@ -1782,7 +1753,7 @@ ExecCheckXactReadOnly(PlannedStmt *plannedstmt)
/* CDB: Allow SELECT FOR SHARE/UPDATE *
*
*/
- if ((rte->requiredPerms & ~(ACL_SELECT | ACL_SELECT_FOR_UPDATE)) == 0)
+ if ((perminfo->requiredPerms & ~(ACL_SELECT | ACL_SELECT_FOR_UPDATE))
== 0)
{
ListCell *cell;
bool foundRTI = false;
@@ -1850,14 +1821,10 @@ InitPlan(QueryDesc *queryDesc, int eflags)
/*
* Do permissions checks
*/
-<<<<<<< HEAD
if (operation != CMD_SELECT || Gp_role != GP_ROLE_EXECUTE)
{
- ExecCheckRTPerms(rangeTable, true);
+ ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
}
-=======
- ExecCheckPermissions(rangeTable, plannedstmt->permInfos, true);
->>>>>>> REL_16_9
/*
* initialize the node's execution state
@@ -1976,7 +1943,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
* We may not have any motion in the current slice, e.g., in
insert query
* the root may not have any motion.
*/
-<<<<<<< HEAD
if (NULL != m)
{
start_plan_node = (Plan *) m;
@@ -2016,12 +1982,7 @@ InitPlan(QueryDesc *queryDesc, int eflags)
* GPDB: We always set the REWIND flag, to delay
eagerfree.
*/
sp_eflags = eflags
- & (EXEC_FLAG_EXPLAIN_ONLY |
EXEC_FLAG_WITH_NO_DATA);
-=======
- sp_eflags = eflags
- & ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD |
EXEC_FLAG_MARK);
- if (bms_is_member(i, plannedstmt->rewindPlanIDs))
->>>>>>> REL_16_9
+ & ~(EXEC_FLAG_REWIND | EXEC_FLAG_BACKWARD |
EXEC_FLAG_MARK);
sp_eflags |= EXEC_FLAG_REWIND;
/* set our global sliceid variable for elog. */
@@ -2871,11 +2832,13 @@ ExecCloseRangeTableRelations(EState *estate)
*/
static void
ExecutePlan(QueryDesc *queryDesc,
+ bool use_parallel_mode,
CmdType operation,
bool sendTuples,
uint64 numberTuples,
ScanDirection direction,
- DestReceiver *dest)
+ DestReceiver *dest,
+ bool execute_once)
{
EState *estate = queryDesc->estate;
PlanState *planstate = queryDesc->planstate;
@@ -2907,14 +2870,8 @@ ExecutePlan(QueryDesc *queryDesc,
* already partially executed it, or if the caller asks us to exit
early,
* we must force the plan to run without parallelism.
*/
-<<<<<<< HEAD
if (!execute_once || GP_ROLE_DISPATCH == Gp_role)
-=======
- if (queryDesc->already_executed || numberTuples != 0)
->>>>>>> REL_16_9
use_parallel_mode = false;
- else
- use_parallel_mode = queryDesc->plannedstmt->parallelModeNeeded;
queryDesc->already_executed = true;
estate->es_use_parallel_mode = use_parallel_mode;
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 633496383bb..f5631dfc938 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -3,13 +3,9 @@
* execUtils.c
* miscellaneous executor utility routines
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -105,12 +101,9 @@
static bool tlist_matches_tupdesc(PlanState *ps, List *tlist, int varno,
TupleDesc tupdesc);
static void ShutdownExprContext(ExprContext *econtext, bool isCommit);
-<<<<<<< HEAD
static List *flatten_logic_exprs(Node *node);
ProcessDispatchResult_hook_type ProcessDispatchResult_hook = NULL;
-=======
static RTEPermissionInfo *GetResultRTEPermissionInfo(ResultRelInfo *relinfo,
EState *estate);
->>>>>>> REL_16_9
/* ----------------------------------------------------------------
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index df71ea805e8..46ac1416aa2 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -237,17 +237,13 @@
* to filter expressions having to be evaluated early, and allows to JIT
* the entire expression into one native function.
*
-<<<<<<< HEAD
* GPDB: Note that statement_mem is used to decide the operator memory
* instead of the work_mem, but to keep minimal change with postgres we keep
* the word "work_mem" in comments.
*
* Portions Copyright (c) 2007-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
@@ -422,7 +418,6 @@ static void build_hash_table(AggState *aggstate, int setno,
long nbuckets);
static void hashagg_recompile_expressions(AggState *aggstate, bool minslot,
bool nullcheck);
static long hash_choose_num_buckets(double hashentrysize,
-<<<<<<< HEAD
long
estimated_nbuckets,
Size
memory);
static int hash_choose_num_partitions(AggState *aggstate,
@@ -430,13 +425,6 @@ static int hash_choose_num_partitions(AggState *aggstate,
double hashentrysize,
int
used_bits,
int
*log2_npartittions);
-=======
- long
ngroups, Size memory);
-static int hash_choose_num_partitions(double input_groups,
-
double hashentrysize,
- int
used_bits,
- int
*log2_npartitions);
->>>>>>> REL_16_9
static void initialize_hash_entry(AggState *aggstate,
TupleHashTable hashtable,
TupleHashEntry entry);
@@ -456,12 +444,8 @@ static HashAggBatch *hashagg_batch_new(LogicalTape
*input_tape, int setno,
int64 input_tuples, double input_card,
int
used_bits);
static MinimalTuple hashagg_batch_read(HashAggBatch *batch, uint32 *hashp);
-<<<<<<< HEAD
static void hashagg_spill_init(AggState *aggstate,
- HashAggSpill *spill,
HashTapeInfo *tapeinfo,
-=======
-static void hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *tapeset,
->>>>>>> REL_16_9
+ HashAggSpill *spill,
LogicalTapeSet *tapeset,
int used_bits,
double input_groups,
double
hashentrysize);
static Size hashagg_spill_tuple(AggState *aggstate, HashAggSpill *spill,
@@ -559,13 +543,8 @@ initialize_phase(AggState *aggstate, int newphase)
sortnode->sortOperators,
sortnode->collations,
sortnode->nullsFirst,
-<<<<<<< HEAD
PlanStateOperatorMemKB((PlanState *) aggstate),
-
NULL, false);
-=======
-
work_mem,
NULL, TUPLESORT_NONE);
->>>>>>> REL_16_9
}
aggstate->current_phase = newphase;
@@ -642,11 +621,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans
pertrans,
pertrans->sortOperators[0],
pertrans->sortCollations[0],
pertrans->sortNullsFirst[0],
-<<<<<<< HEAD
-
PlanStateOperatorMemKB((PlanState *) aggstate), NULL, false);
-=======
-
work_mem, NULL, TUPLESORT_NONE);
->>>>>>> REL_16_9
+
PlanStateOperatorMemKB((PlanState *) aggstate), NULL, TUPLESORT_NONE);
}
else
pertrans->sortstates[aggstate->current_set] =
@@ -656,11 +631,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans
pertrans,
pertrans->sortOperators,
pertrans->sortCollations,
pertrans->sortNullsFirst,
-<<<<<<< HEAD
-
PlanStateOperatorMemKB((PlanState *) aggstate), NULL, false);
-=======
-
work_mem, NULL, TUPLESORT_NONE);
->>>>>>> REL_16_9
+
PlanStateOperatorMemKB((PlanState *) aggstate), NULL, TUPLESORT_NONE);
}
/*
@@ -1206,19 +1177,6 @@ finalize_aggregate(AggState *aggstate,
*resultIsNull = pergroupstate->transValueIsNull;
}
-<<<<<<< HEAD
- /*
- * If result is pass-by-ref, make sure it is in the right context.
- */
- if (!peragg->resulttypeByVal && !*resultIsNull &&
- !MemoryContextContainsGenericAllocation(CurrentMemoryContext,
-
DatumGetPointer(*resultVal)))
- *resultVal = datumCopy(*resultVal,
-
peragg->resulttypeByVal,
-
peragg->resulttypeLen);
-
-=======
->>>>>>> REL_16_9
MemoryContextSwitchTo(oldContext);
}
@@ -1279,17 +1237,6 @@ finalize_partialaggregate(AggState *aggstate,
*resultIsNull = pergroupstate->transValueIsNull;
}
-<<<<<<< HEAD
- /* If result is pass-by-ref, make sure it is in the right context. */
- if (!peragg->resulttypeByVal && !*resultIsNull &&
- !MemoryContextContainsGenericAllocation(CurrentMemoryContext,
-
DatumGetPointer(*resultVal)))
- *resultVal = datumCopy(*resultVal,
-
peragg->resulttypeByVal,
-
peragg->resulttypeLen);
-
-=======
->>>>>>> REL_16_9
MemoryContextSwitchTo(oldContext);
}
@@ -1904,7 +1851,6 @@ hash_agg_set_limits(AggState *aggstate, double
hashentrysize, double input_group
{
int npartitions;
Size partition_mem;
-<<<<<<< HEAD
uint64 strict_memlimit = work_mem;
if (aggstate)
@@ -1921,17 +1867,6 @@ hash_agg_set_limits(AggState *aggstate, double
hashentrysize, double input_group
*num_partitions = 0;
*mem_limit = strict_memlimit * 1024L;
*ngroups_limit = *mem_limit / hashentrysize;
-=======
- Size hash_mem_limit = get_hash_memory_limit();
-
- /* if not expected to spill, use all of hash_mem */
- if (input_groups * hashentrysize <= hash_mem_limit)
- {
- if (num_partitions != NULL)
- *num_partitions = 0;
- *mem_limit = hash_mem_limit;
- *ngroups_limit = hash_mem_limit / hashentrysize;
->>>>>>> REL_16_9
return;
}
@@ -1957,17 +1892,10 @@ hash_agg_set_limits(AggState *aggstate, double
hashentrysize, double input_group
* minimum number of partitions, so we aren't going to dramatically
exceed
* work mem anyway.
*/
-<<<<<<< HEAD
if (strict_memlimit * 1024L > 4 * partition_mem)
*mem_limit = strict_memlimit * 1024L - partition_mem;
else
*mem_limit = strict_memlimit * 1024L * 0.75;
-=======
- if (hash_mem_limit > 4 * partition_mem)
- *mem_limit = hash_mem_limit - partition_mem;
- else
- *mem_limit = hash_mem_limit * 0.75;
->>>>>>> REL_16_9
if (*mem_limit > hashentrysize)
*ngroups_limit = *mem_limit / hashentrysize;
@@ -2037,11 +1965,7 @@ hash_agg_enter_spill_mode(AggState *aggstate)
AggStatePerHash perhash = &aggstate->perhash[setno];
HashAggSpill *spill = &aggstate->hash_spills[setno];
-<<<<<<< HEAD
- hashagg_spill_init(aggstate, spill,
aggstate->hash_tapeinfo, 0,
-=======
- hashagg_spill_init(spill, aggstate->hash_tapeset, 0,
->>>>>>> REL_16_9
+ hashagg_spill_init(aggstate, spill,
aggstate->hash_tapeset, 0,
perhash->aggnode->numGroups,
aggstate->hashentrysize);
}
@@ -2144,21 +2068,16 @@ static int
hash_choose_num_partitions(AggState *aggstate, double input_groups, double
hashentrysize,
int used_bits, int
*log2_npartitions)
{
-<<<<<<< HEAD
/* GPDB_14_MERGE_FIXME: no use in GPDB, work_mem instead */
#if 0
Size hash_mem_limit = get_hash_memory_limit();
#endif
-=======
- Size hash_mem_limit = get_hash_memory_limit();
->>>>>>> REL_16_9
double partition_limit;
double mem_wanted;
double dpartitions;
int npartitions;
int partition_bits;
-<<<<<<< HEAD
uint64 strict_memlimit = work_mem;
// GPDB_14_MERGE_FIXME: PG14 applies `hash_mem_multiplier` to increase
the memory
@@ -2170,29 +2089,19 @@ hash_choose_num_partitions(AggState *aggstate, double
input_groups, double hashe
if (operator_mem < strict_memlimit)
strict_memlimit = operator_mem;
}
-=======
->>>>>>> REL_16_9
/*
* Avoid creating so many partitions that the memory requirements of the
* open partition files are greater than 1/4 of hash_mem.
*/
partition_limit =
-<<<<<<< HEAD
(strict_memlimit * 1024L * 0.25 - HASHAGG_READ_BUFFER_SIZE) /
-=======
- (hash_mem_limit * 0.25 - HASHAGG_READ_BUFFER_SIZE) /
->>>>>>> REL_16_9
HASHAGG_WRITE_BUFFER_SIZE;
mem_wanted = HASHAGG_PARTITION_FACTOR * input_groups * hashentrysize;
/* make enough partitions so that each one is likely to fit in memory */
-<<<<<<< HEAD
dpartitions = 1 + (mem_wanted / (strict_memlimit * 1024L));
-=======
- dpartitions = 1 + (mem_wanted / hash_mem_limit);
->>>>>>> REL_16_9
if (dpartitions > partition_limit)
dpartitions = partition_limit;
@@ -2315,11 +2224,7 @@ lookup_hash_entries(AggState *aggstate)
TupleTableSlot *slot =
aggstate->tmpcontext->ecxt_outertuple;
if (spill->partitions == NULL)
-<<<<<<< HEAD
- hashagg_spill_init(aggstate, spill,
aggstate->hash_tapeinfo, 0,
-=======
- hashagg_spill_init(spill,
aggstate->hash_tapeset, 0,
->>>>>>> REL_16_9
+ hashagg_spill_init(aggstate, spill,
aggstate->hash_tapeset, 0,
perhash->aggnode->numGroups,
aggstate->hashentrysize);
@@ -2902,11 +2807,7 @@ agg_refill_hash_table(AggState *aggstate)
* that we don't assign tapes that will never
be used.
*/
spill_initialized = true;
-<<<<<<< HEAD
- hashagg_spill_init(aggstate, &spill, tapeinfo,
batch->used_bits,
-=======
- hashagg_spill_init(&spill, tapeset,
batch->used_bits,
->>>>>>> REL_16_9
+ hashagg_spill_init(aggstate, &spill, tapeset,
batch->used_bits,
batch->input_card, aggstate->hashentrysize);
}
/* no memory for a new group, spill */
@@ -3135,96 +3036,13 @@ agg_retrieve_hash_table_in_memory(AggState *aggstate)
}
/*
-<<<<<<< HEAD
- * Initialize HashTapeInfo
- */
-static void
-hashagg_tapeinfo_init(AggState *aggstate)
-{
- HashTapeInfo *tapeinfo = palloc(sizeof(HashTapeInfo));
- int init_tapes = 16; /* expanded dynamically
*/
-
- tapeinfo->tapeset = LogicalTapeSetCreate(init_tapes, true, NULL, NULL,
-1);
- tapeinfo->ntapes = init_tapes;
- tapeinfo->nfreetapes = init_tapes;
- tapeinfo->freetapes_alloc = init_tapes;
- tapeinfo->freetapes = palloc(init_tapes * sizeof(int));
- for (int i = 0; i < init_tapes; i++)
- tapeinfo->freetapes[i] = i;
-
- aggstate->hash_tapeinfo = tapeinfo;
-
-#ifdef FAULT_INJECTOR
- if (SIMPLE_FAULT_INJECTOR("hashagg_spill_temp_files") ==
FaultInjectorTypeSkip) {
- const char *filename =
LogicalTapeGetBufFilename(tapeinfo->tapeset);
- if (!filename)
- ereport(NOTICE, (errmsg("hashagg: buffilename is
null")));
- else if (strstr(filename, "base/" PG_TEMP_FILES_DIR) ==
filename)
- ereport(NOTICE, (errmsg("hashagg: Use default
tablespace")));
- else if (strstr(filename, "pg_tblspc/") == filename)
- ereport(NOTICE, (errmsg("hashagg: Use temp
tablespace")));
- else
- ereport(NOTICE, (errmsg("hashagg: Unexpected prefix of
the tablespace path")));
-
- }
-#endif
-}
-
-/*
- * Assign unused tapes to spill partitions, extending the tape set if
- * necessary.
- */
-static void
-hashagg_tapeinfo_assign(HashTapeInfo *tapeinfo, int *partitions,
- int npartitions)
-{
- int partidx = 0;
-
- /* use free tapes if available */
- while (partidx < npartitions && tapeinfo->nfreetapes > 0)
- partitions[partidx++] =
tapeinfo->freetapes[--tapeinfo->nfreetapes];
-
- if (partidx < npartitions)
- {
- LogicalTapeSetExtend(tapeinfo->tapeset, npartitions - partidx);
-
- while (partidx < npartitions)
- partitions[partidx++] = tapeinfo->ntapes++;
- }
-}
-
-/*
- * After a tape has already been written to and then read, this function
- * rewinds it for writing and adds it to the free list.
- */
-static void
-hashagg_tapeinfo_release(HashTapeInfo *tapeinfo, int tapenum)
-{
- /* rewinding frees the buffer while not in use */
- LogicalTapeRewindForWrite(tapeinfo->tapeset, tapenum);
- if (tapeinfo->freetapes_alloc == tapeinfo->nfreetapes)
- {
- tapeinfo->freetapes_alloc <<= 1;
- tapeinfo->freetapes = repalloc(tapeinfo->freetapes,
-
tapeinfo->freetapes_alloc * sizeof(int));
- }
- tapeinfo->freetapes[tapeinfo->nfreetapes++] = tapenum;
-}
-
-/*
-=======
->>>>>>> REL_16_9
* hashagg_spill_init
*
* Called after we determined that spilling is necessary. Chooses the number
* of partitions to create, and initializes them.
*/
static void
-<<<<<<< HEAD
-hashagg_spill_init(AggState *aggstate, HashAggSpill *spill, HashTapeInfo
*tapeinfo, int used_bits,
-=======
-hashagg_spill_init(HashAggSpill *spill, LogicalTapeSet *tapeset, int used_bits,
->>>>>>> REL_16_9
+hashagg_spill_init(AggState *aggstate, HashAggSpill *spill, LogicalTapeSet
*tapeset, int used_bits,
double input_groups, double hashentrysize)
{
int npartitions;
@@ -4448,12 +4266,6 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans,
* Set up infrastructure for calling the transfn. Note that invtransfn
is
* not needed here.
*/
-<<<<<<< HEAD
- if (DO_AGGSPLIT_COMBINE(pertrans->aggref->aggsplit))
- {
- Expr *combinefnexpr;
- size_t numTransArgs;
-=======
build_aggregate_transfn_expr(inputTypes,
numArguments,
numDirectArgs,
@@ -4464,7 +4276,6 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans,
InvalidOid,
&transfnexpr,
NULL);
->>>>>>> REL_16_9
fmgr_info(transfn_oid, &pertrans->transfn);
fmgr_info_set_expr((Node *) transfnexpr, &pertrans->transfn);
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index acfdbaf5ecf..22a8f0b3589 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -3,13 +3,9 @@
* nodeHash.c
* Routines to hash relations for hashjoin
*
-<<<<<<< HEAD
* Portions Copyright (c) 2006-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -601,11 +597,7 @@ ExecHashTableCreate(HashState *state, HashJoinState
*hjstate,
* per-query memory context. Everything else should be kept inside the
* subsidiary hashCxt, batchCxt or spillCxt.
*/
-<<<<<<< HEAD
- hashtable = (HashJoinTable) palloc0(sizeof(HashJoinTableData));
-=======
hashtable = palloc_object(HashJoinTableData);
->>>>>>> REL_16_9
hashtable->nbuckets = nbuckets;
hashtable->nbuckets_original = nbuckets;
hashtable->nbuckets_optimal = nbuckets;
@@ -663,16 +655,14 @@ ExecHashTableCreate(HashState *state, HashJoinState
*hjstate,
"HashBatchContext",
ALLOCSET_DEFAULT_SIZES);
-<<<<<<< HEAD
/* CDB: track temp buf file allocations in separate context */
hashtable->bfCxt = AllocSetContextCreate(CurrentMemoryContext,
"hbbfcxt",
ALLOCSET_DEFAULT_SIZES);
-=======
+
hashtable->spillCxt = AllocSetContextCreate(hashtable->hashCxt,
"HashSpillContext",
ALLOCSET_DEFAULT_SIZES);
->>>>>>> REL_16_9
/* Allocate data that will live for the life of the hashjoin */
@@ -852,32 +842,20 @@ ExecChooseHashTableSize(double ntuples, int tupwidth,
bool useskew,
/*
* Compute in-memory hashtable size limit from GUCs.
*/
-<<<<<<< HEAD
hash_table_bytes = operatorMemKB * 1024L;
-=======
- hash_table_bytes = get_hash_memory_limit();
->>>>>>> REL_16_9
/*
* Parallel Hash tries to use the combined hash_mem of all workers to
* avoid the need to batch. If that won't work, it falls back to
hash_mem
* per worker and tries to process batches in parallel.
*/
-<<<<<<< HEAD
if (try_combined_hash_mem && parallel_workers > 0)
-=======
- if (try_combined_hash_mem)
->>>>>>> REL_16_9
{
/* Careful, this could overflow size_t */
double newlimit;
-<<<<<<< HEAD
/* CBDB_PARALLEL_FIXME: if we enable pg style parallel some
day, we should reconsider it. */
newlimit = (double) hash_table_bytes * (double)
parallel_workers;
-=======
- newlimit = (double) hash_table_bytes * (double)
(parallel_workers + 1);
->>>>>>> REL_16_9
newlimit = Min(newlimit, (double) SIZE_MAX);
hash_table_bytes = (size_t) newlimit;
}
@@ -998,15 +976,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool
useskew,
* gp_hashjoin_tuples_per_bucket tuples, whose projected size
already includes
* overhead for the hash code, pointer to the next tuple, etc.
*/
-<<<<<<< HEAD
bucket_size = (tupsize * gp_hashjoin_tuples_per_bucket +
sizeof(HashJoinTuple));
if (hash_table_bytes < bucket_size)
sbuckets = 1;
-=======
- bucket_size = (tupsize * NTUP_PER_BUCKET +
sizeof(HashJoinTuple));
- if (hash_table_bytes <= bucket_size)
- sbuckets = 1; /* avoid
pg_nextpower2_size_t(0) */
->>>>>>> REL_16_9
else
sbuckets = pg_nextpower2_size_t(hash_table_bytes /
bucket_size);
sbuckets = Min(sbuckets, max_pointers);
@@ -1222,7 +1194,6 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
hashtable->outerBatchFile =
repalloc0_array(hashtable->outerBatchFile, BufFile *, oldnbatch, nbatch);
}
-<<<<<<< HEAD
/* EXPLAIN ANALYZE batch statistics */
if (stats && stats->nbatchstats < nbatch)
{
@@ -1234,11 +1205,6 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
memset(stats->batchstats + stats->nbatchstats, 0, sz);
stats->nbatchstats = nbatch;
}
-
- MemoryContextSwitchTo(oldcxt);
-
-=======
->>>>>>> REL_16_9
hashtable->nbatch = nbatch;
/*
@@ -1311,14 +1277,9 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
ExecHashJoinSaveTuple(NULL,
HJTUPLE_MINTUPLE(hashTuple),
hashTuple->hashvalue,
-<<<<<<< HEAD
hashtable,
&hashtable->innerBatchFile[batchno],
hashtable->bfCxt);
-=======
-
&hashtable->innerBatchFile[batchno],
-
hashtable);
->>>>>>> REL_16_9
hashtable->spaceUsed -= hashTupleSize;
spaceFreed += hashTupleSize;
@@ -1467,23 +1428,9 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable
hashtable)
* array.
*/
dtuples = (old_batch0->ntuples * 2.0) /
new_nbatch;
-<<<<<<< HEAD
dbuckets = ceil(dtuples /
gp_hashjoin_tuples_per_bucket);
dbuckets = Min(dbuckets,
MaxAllocSize
/ sizeof(dsa_pointer_atomic));
-=======
-
- /*
- * We need to calculate the maximum
number of buckets to
- * stay within the MaxAllocSize
boundary. Round the
- * maximum number to the previous power
of 2 given that
- * later we round the number to the
next power of 2.
- */
- max_buckets = pg_prevpower2_32((uint32)
-
(MaxAllocSize / sizeof(dsa_pointer_atomic)));
- dbuckets = ceil(dtuples /
NTUP_PER_BUCKET);
- dbuckets = Min(dbuckets, max_buckets);
->>>>>>> REL_16_9
new_nbuckets = (int) dbuckets;
new_nbuckets = Max(new_nbuckets, 1024);
new_nbuckets =
pg_nextpower2_32(new_nbuckets);
@@ -2023,14 +1970,9 @@ ExecHashTableInsert(HashState *hashState, HashJoinTable
hashtable,
Assert(batchno > hashtable->curbatch);
ExecHashJoinSaveTuple(ps, tuple,
hashvalue,
-<<<<<<< HEAD
hashtable,
&hashtable->innerBatchFile[batchno],
hashtable->bfCxt);
-=======
-
&hashtable->innerBatchFile[batchno],
- hashtable);
->>>>>>> REL_16_9
}
if (shouldFree)
@@ -2731,16 +2673,11 @@ ExecReScanHash(HashState *node)
* if chgParam of subnode is not null then plan will be re-scanned by
* first ExecProcNode.
*/
-<<<<<<< HEAD
- if (node->ps.lefttree->chgParam == NULL)
- ExecReScan(node->ps.lefttree);
+ if (outerPlan->chgParam == NULL)
+ ExecReScan(outerPlan);
if (gp_enable_runtime_filter_pushdown && node->filters)
ResetRuntimeFilter(node);
-=======
- if (outerPlan->chgParam == NULL)
- ExecReScan(outerPlan);
->>>>>>> REL_16_9
}
@@ -3435,16 +3372,10 @@ ExecHashRemoveNextSkewBucket(HashState *hashState,
HashJoinTable hashtable)
{
/* Put the tuple into a temp file for later batches */
Assert(batchno > hashtable->curbatch);
-<<<<<<< HEAD
ExecHashJoinSaveTuple(ps, tuple,
hashvalue,
hashtable,
&hashtable->innerBatchFile[batchno], hashtable->bfCxt);
-=======
- ExecHashJoinSaveTuple(tuple, hashvalue,
-
&hashtable->innerBatchFile[batchno],
- hashtable);
->>>>>>> REL_16_9
pfree(hashTuple);
hashtable->spaceUsed -= tupleSize;
hashtable->spaceUsedSkew -= tupleSize;
@@ -4057,15 +3988,6 @@ ExecHashTableDetachBatch(HashJoinTable hashtable)
sts_end_parallel_scan(hashtable->batches[curbatch].inner_tuples);
sts_end_parallel_scan(hashtable->batches[curbatch].outer_tuples);
-<<<<<<< HEAD
- /* Detach from the batch we were last working on. */
- /*
- * CBDB_PARALLEL: Parallel Hash Left Anti Semi (Not-In)
Join(parallel-aware)
- * If phs_lasj_has_null is true, that means we have found null
when building hash table,
- * there were no batches to detach.
- */
- if (!hashtable->parallel_state->phs_lasj_has_null &&
BarrierArriveAndDetach(&batch->batch_barrier))
-=======
/* After attaching we always get at least to PHJ_BATCH_PROBE. */
Assert(BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE ||
BarrierPhase(&batch->batch_barrier) ==
PHJ_BATCH_SCAN);
@@ -4083,7 +4005,6 @@ ExecHashTableDetachBatch(HashJoinTable hashtable)
*/
if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE &&
!hashtable->batches[curbatch].outer_eof)
->>>>>>> REL_16_9
{
/*
* This flag may be written to by multiple backends
during
@@ -4100,7 +4021,13 @@ ExecHashTableDetachBatch(HashJoinTable hashtable)
*/
if (BarrierPhase(&batch->batch_barrier) == PHJ_BATCH_PROBE)
attached =
BarrierArriveAndDetachExceptLast(&batch->batch_barrier);
- if (attached && BarrierArriveAndDetach(&batch->batch_barrier))
+
+ /*
+ * CBDB_PARALLEL: Parallel Hash Left Anti Semi (Not-In)
Join(parallel-aware)
+ * If phs_lasj_has_null is true, that means we have found null
when building hash table,
+ * there were no batches to detach.
+ */
+ if (!hashtable->parallel_state->phs_lasj_has_null && attached
&& BarrierArriveAndDetach(&batch->batch_barrier))
{
/*
* We are not longer attached to the batch barrier, but
we're the
@@ -4399,16 +4326,15 @@ get_hash_memory_limit(void)
size_t
get_hash_memory_limit(void)
{
-<<<<<<< HEAD
- size_t mem_limit = get_hash_memory_limit();
+ double mem_limit;
- /* Remove the kilobyte factor */
- mem_limit /= 1024;
+ /* Do initial calculation in double arithmetic */
+ mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
- /* Clamp to MAX_KILOBYTES, like work_mem */
- mem_limit = Min(mem_limit, (size_t) MAX_KILOBYTES);
+ /* Clamp in case it doesn't fit in size_t */
+ mem_limit = Min(mem_limit, (double) SIZE_MAX);
- return (int) mem_limit;
+ return (size_t) mem_limit;
}
/*
@@ -4548,15 +4474,4 @@ ResetRuntimeFilter(HashState *node)
attr_filter->min = LONG_MAX;
attr_filter->max = LONG_MIN;
}
-=======
- double mem_limit;
-
- /* Do initial calculation in double arithmetic */
- mem_limit = (double) work_mem * hash_mem_multiplier * 1024.0;
-
- /* Clamp in case it doesn't fit in size_t */
- mem_limit = Min(mem_limit, (double) SIZE_MAX);
-
- return (size_t) mem_limit;
->>>>>>> REL_16_9
}
diff --git a/src/backend/executor/nodeHashjoin.c
b/src/backend/executor/nodeHashjoin.c
index 92eda332d26..6c97a4bd48e 100644
--- a/src/backend/executor/nodeHashjoin.c
+++ b/src/backend/executor/nodeHashjoin.c
@@ -3,13 +3,9 @@
* nodeHashjoin.c
* Routines to handle hash join nodes
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -322,13 +318,6 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
Assert(hashtable == NULL);
/*
-<<<<<<< HEAD
- * MPP-4165: My fix for MPP-3300 was correct in
that we avoided
- * the *deadlock* but had very unexpected (and
painful)
- * performance characteristics: we basically
de-pipeline and
- * de-parallelize execution of any query which
has motion below
- * us.
-=======
* If the outer relation is completely empty,
and it's not
* right/right-anti/full join, we can quit
without building
* the hash table. However, for an inner join
it is only a
@@ -339,7 +328,6 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
* should always make this check, since we
aren't going to be
* able to skip the join on the strength of an
empty inner
* relation anyway.)
->>>>>>> REL_16_9
*
* So now prefetch_inner is set (see
createplan.c) if we have *any* motion
* below us. If we don't have any motion, it
doesn't matter.
@@ -643,17 +631,11 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel)
*/
Assert(parallel_state == NULL);
Assert(batchno > hashtable->curbatch);
-<<<<<<< HEAD
ExecHashJoinSaveTuple(&node->js.ps,
mintuple,
hashvalue,
hashtable,
&hashtable->outerBatchFile[batchno],
hashtable->bfCxt);
-=======
- ExecHashJoinSaveTuple(mintuple,
hashvalue,
-
&hashtable->outerBatchFile[batchno],
-
hashtable);
->>>>>>> REL_16_9
if (shouldFree)
heap_free_minimal_tuple(mintuple);
@@ -1517,34 +1499,8 @@ ExecHashJoinNewBatch(HashJoinState *hjstate)
if (!ExecHashJoinReloadHashTable(hjstate))
{
-<<<<<<< HEAD
/* We no longer continue as we couldn't load the batch */
return false;
-=======
- if (BufFileSeek(innerFile, 0, 0, SEEK_SET))
- ereport(ERROR,
- (errcode_for_file_access(),
- errmsg("could not rewind hash-join
temporary file")));
-
- while ((slot = ExecHashJoinGetSavedTuple(hjstate,
-
innerFile,
-
&hashvalue,
-
hjstate->hj_HashTupleSlot)))
- {
- /*
- * NOTE: some tuples may be sent to future batches.
Also, it is
- * possible for hashtable->nbatch to be increased here!
- */
- ExecHashTableInsert(hashtable, slot, hashvalue);
- }
-
- /*
- * after we build the hash table, the inner batch file is no
longer
- * needed
- */
- BufFileClose(innerFile);
- hashtable->innerBatchFile[curbatch] = NULL;
->>>>>>> REL_16_9
}
/*
@@ -1603,16 +1559,11 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
{
SharedTuplestoreAccessor *inner_tuples;
Barrier *batch_barrier =
-<<<<<<< HEAD
&hashtable->batches[batchno].shared->batch_barrier;
int phase =
BarrierAttach(batch_barrier);
-=======
-
&hashtable->batches[batchno].shared->batch_barrier;
->>>>>>> REL_16_9
if (hashtable->nbatch == 1 && batchno == 0 &&
((HashJoin *)hjstate->js.ps.plan)->batch0_barrier)
{
-<<<<<<< HEAD
Assert(phase == PHJ_BATCH_PROBING);
batch0_barrier = &pstate->batch0_barrier;
@@ -1622,10 +1573,7 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
switch (phase)
{
- case PHJ_BATCH_ELECTING:
-=======
case PHJ_BATCH_ELECT:
->>>>>>> REL_16_9
/* One backend allocates the hash
table. */
if (BarrierArriveAndWait(batch_barrier,
@@ -1729,7 +1677,6 @@ ExecParallelHashJoinNewBatch(HashJoinState *hjstate)
* created for the hashtable.
*/
void
-<<<<<<< HEAD
ExecHashJoinSaveTuple(PlanState *ps, MinimalTuple tuple, uint32 hashvalue,
HashJoinTable hashtable, BufFile
**fileptr,
MemoryContext bfCxt)
@@ -1770,35 +1717,6 @@ ExecHashJoinSaveTuple(PlanState *ps, MinimalTuple tuple,
uint32 hashvalue,
BufFileGetFilename(file));
MemoryContextSwitchTo(oldcxt);
-=======
-ExecHashJoinSaveTuple(MinimalTuple tuple, uint32 hashvalue,
- BufFile **fileptr, HashJoinTable
hashtable)
-{
- BufFile *file = *fileptr;
-
- /*
- * The batch file is lazily created. If this is the first tuple written
to
- * this batch, the batch file is created and its buffer is allocated in
- * the spillCxt context, NOT in the batchCxt.
- *
- * During the build phase, buffered files are created for inner batches.
- * Each batch's buffered file is closed (and its buffer freed) after the
- * batch is loaded into memory during the outer side scan. Therefore, it
- * is necessary to allocate the batch file buffer in a memory context
- * which outlives the batch itself.
- *
- * Also, we use spillCxt instead of hashCxt for a better accounting of
the
- * spilling memory consumption.
- */
- if (file == NULL)
- {
- MemoryContext oldctx =
MemoryContextSwitchTo(hashtable->spillCxt);
-
- file = BufFileCreateTemp(false);
- *fileptr = file;
-
- MemoryContextSwitchTo(oldctx);
->>>>>>> REL_16_9
}
BufFileWrite(file, &hashvalue, sizeof(uint32));
@@ -1866,15 +1784,10 @@ ExecReScanHashJoin(HashJoinState *node)
*/
if (node->hj_HashTable != NULL)
{
-<<<<<<< HEAD
node->hj_HashTable->first_pass = false;
if (node->js.ps.righttree->chgParam == NULL &&
!node->hj_HashTable->eagerlyReleased)
-=======
- if (node->hj_HashTable->nbatch == 1 &&
- innerPlan->chgParam == NULL)
->>>>>>> REL_16_9
{
/*
* Okay to reuse the hash table; needn't rescan inner,
either.
@@ -1913,13 +1826,9 @@ ExecReScanHashJoin(HashJoinState *node)
else
{
/* must destroy and rebuild hash table */
-<<<<<<< HEAD
if (!node->hj_HashTable->eagerlyReleased)
{
- HashState *hashNode = castNode(HashState,
innerPlanState(node));
-=======
- HashState *hashNode = castNode(HashState, innerPlan);
->>>>>>> REL_16_9
+ HashState *hashNode = castNode(HashState,
innerPlan);
Assert(hashNode->hashtable ==
node->hj_HashTable);
/* accumulate stats from old hash table, if
wanted */
diff --git a/src/backend/executor/nodeModifyTable.c
b/src/backend/executor/nodeModifyTable.c
index 8a281a55e33..99597dcdbcf 100644
--- a/src/backend/executor/nodeModifyTable.c
+++ b/src/backend/executor/nodeModifyTable.c
@@ -173,7 +173,6 @@ static TupleTableSlot
*ExecPrepareTupleRouting(ModifyTableState *mtstate,
TupleTableSlot *slot,
ResultRelInfo **partRelInfo);
-<<<<<<< HEAD
static void
send_subtag(StringInfoData *buf, ExtendProtocolSubTag subtag, Bitmapset*
relids);
@@ -185,7 +184,7 @@ notify_modified_relations_local(ModifyTableState *node);
static void
epd_add_subtag_data(ExtendProtocolSubTag subtag, Bitmapset *relids);
-=======
+
static TupleTableSlot *ExecMerge(ModifyTableContext *context,
ResultRelInfo
*resultRelInfo,
ItemPointer
tupleid,
@@ -199,7 +198,6 @@ static void ExecMergeNotMatched(ModifyTableContext *context,
ResultRelInfo
*resultRelInfo,
bool canSetTag);
->>>>>>> REL_16_9
/*
* Verify that the tuples to be produced by INSERT match the
@@ -827,16 +825,12 @@ static TupleTableSlot *
ExecInsert(ModifyTableContext *context,
ResultRelInfo *resultRelInfo,
TupleTableSlot *slot,
-<<<<<<< HEAD
TupleTableSlot *planSlot,
EState *estate,
- bool canSetTag,
- bool splitUpdate)
-=======
bool canSetTag,
TupleTableSlot **inserted_tuple,
- ResultRelInfo **insert_destrel)
->>>>>>> REL_16_9
+ ResultRelInfo **insert_destrel
+ bool splitUpdate)
{
ModifyTableState *mtstate = context->mtstate;
EState *estate = context->estate;
@@ -1459,14 +1453,15 @@ ExecPendingInserts(EState *estate)
static bool
ExecDeletePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
ItemPointer tupleid, HeapTuple oldtuple,
- TupleTableSlot **epqreturnslot, TM_Result
*result)
+ TupleTableSlot **epqreturnslot, TM_Result
*result, bool splitUpdate)
{
if (result)
*result = TM_Ok;
/* BEFORE ROW DELETE triggers */
if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->trig_delete_before_row)
+ resultRelInfo->ri_TrigDesc->trig_delete_before_row &&
+ !splitUpdate)
{
/* Flush any pending inserts, so rows are visible to the
triggers */
if (context->estate->es_insert_pending_result_relations != NIL)
@@ -1511,7 +1506,7 @@ ExecDeleteAct(ModifyTableContext *context, ResultRelInfo
*resultRelInfo,
*/
static void
ExecDeleteEpilogue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
- ItemPointer tupleid, HeapTuple oldtuple,
bool changingPart)
+ ItemPointer tupleid, HeapTuple oldtuple,
bool changingPart, bool splitUpdate)
{
ModifyTableState *mtstate = context->mtstate;
EState *estate = context->estate;
@@ -1541,8 +1536,9 @@ ExecDeleteEpilogue(ModifyTableContext *context,
ResultRelInfo *resultRelInfo,
}
/* AFTER ROW DELETE Triggers */
- ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
- ar_delete_trig_tcs,
changingPart);
+ if (!RelationIsNonblockRelation(resultRelInfo->ri_RelationDesc) &&
!splitUpdate)
+ ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
+ ar_delete_trig_tcs,
changingPart);
}
/* ----------------------------------------------------------------
@@ -1575,23 +1571,13 @@ ExecDelete(ModifyTableContext *context,
ResultRelInfo *resultRelInfo,
ItemPointer tupleid,
HeapTuple oldtuple,
-<<<<<<< HEAD
- TupleTableSlot *planSlot,
- EPQState *epqstate,
- EState *estate,
int32 segid,
-=======
->>>>>>> REL_16_9
bool processReturning,
bool changingPart,
-<<<<<<< HEAD
- bool splitUpdate,
-=======
bool canSetTag,
TM_Result *tmresult,
->>>>>>> REL_16_9
bool *tupleDeleted,
- TupleTableSlot **epqreturnslot)
+ TupleTableSlot **epqreturnslot, bool splitUpdate)
{
EState *estate = context->estate;
Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
@@ -1602,7 +1588,6 @@ ExecDelete(ModifyTableContext *context,
*tupleDeleted = false;
/*
-<<<<<<< HEAD
* Sanity check the distribution of the tuple to prevent
* potential data corruption in case users manipulate data
* incorrectly (e.g. insert data on incorrect segment through
@@ -1616,30 +1601,13 @@ ExecDelete(ModifyTableContext *context,
tupleid->ip_posid,
segid);
- /* BEFORE ROW DELETE Triggers */
/*
- * Disallow DELETE triggers on a split UPDATE. See comments in
ExecInsert().
- */
- if (resultRelInfo->ri_TrigDesc &&
- resultRelInfo->ri_TrigDesc->trig_delete_before_row &&
- !splitUpdate)
- {
- bool dodelete;
-
- dodelete = ExecBRDeleteTriggers(estate, epqstate, resultRelInfo,
-
tupleid, oldtuple, epqreturnslot);
-
- if (!dodelete) /* "do nothing" */
- return NULL;
- }
-=======
* Prepare for the delete. This includes BEFORE ROW triggers, so we're
* done if it says we are.
*/
if (!ExecDeletePrologue(context, resultRelInfo, tupleid, oldtuple,
- epqreturnslot,
tmresult))
+ epqreturnslot,
tmresult, splitUpdate))
return NULL;
->>>>>>> REL_16_9
/* INSTEAD OF ROW DELETE Triggers */
if (resultRelInfo->ri_TrigDesc &&
@@ -1690,22 +1658,11 @@ ExecDelete(ModifyTableContext *context,
* special-case behavior needed for referential integrity
updates in
* transaction-snapshot mode transactions.
*/
-<<<<<<< HEAD
-ldelete:;
- result = table_tuple_delete(resultRelationDesc, tupleid,
-
estate->es_output_cid,
-
estate->es_snapshot,
-
estate->es_crosscheck_snapshot,
- true /*
wait for commit */ ,
- &tmfd,
-
changingPart || splitUpdate);
-=======
ldelete:
- result = ExecDeleteAct(context, resultRelInfo, tupleid,
changingPart);
+ result = ExecDeleteAct(context, resultRelInfo, tupleid,
changingPart || splitUpdate);
if (tmresult)
*tmresult = result;
->>>>>>> REL_16_9
switch (result)
{
@@ -1927,43 +1884,7 @@ ldelete:
if (tupleDeleted)
*tupleDeleted = true;
-<<<<<<< HEAD
- /*
- * If this delete is the result of a partition key update that moved the
- * tuple to a new partition, put this row into the transition OLD TABLE,
- * if there is one. We need to do this separately for DELETE and INSERT
- * because they happen on different tables.
- */
- ar_delete_trig_tcs = mtstate->mt_transition_capture;
- if (mtstate->operation == CMD_UPDATE && mtstate->mt_transition_capture
- && mtstate->mt_transition_capture->tcs_update_old_table)
- {
- ExecARUpdateTriggers(estate, resultRelInfo,
- tupleid,
- oldtuple,
- NULL,
- NULL,
-
mtstate->mt_transition_capture);
-
- /*
- * We've already captured the NEW TABLE row, so make sure any AR
- * DELETE trigger fired below doesn't capture it again.
- */
- ar_delete_trig_tcs = NULL;
- }
-
- /* AFTER ROW DELETE Triggers */
- /*
- * Disallow DELETE triggers on a split UPDATE. See comments in
ExecInsert().
- */
- if (!RelationIsNonblockRelation(resultRelationDesc) && !splitUpdate)
- {
- ExecARDeleteTriggers(estate, resultRelInfo, tupleid, oldtuple,
- ar_delete_trig_tcs);
- }
-=======
- ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple,
changingPart);
->>>>>>> REL_16_9
+ ExecDeleteEpilogue(context, resultRelInfo, tupleid, oldtuple,
changingPart, splitUpdate);
/* Process RETURNING if present and if requested */
/*
@@ -2037,15 +1958,11 @@ static bool
ExecCrossPartitionUpdate(ModifyTableContext *context,
ResultRelInfo *resultRelInfo,
ItemPointer tupleid, HeapTuple
oldtuple,
-<<<<<<< HEAD
- TupleTableSlot *slot,
TupleTableSlot *planSlot,
- EPQState *epqstate, int32
segid, bool canSetTag,
-=======
TupleTableSlot *slot,
+ int32 segid,
bool canSetTag,
UpdateContext *updateCxt,
TM_Result *tmresult,
->>>>>>> REL_16_9
TupleTableSlot **retry_slot,
TupleTableSlot
**inserted_tuple,
ResultRelInfo **insert_destrel)
@@ -2104,22 +2021,12 @@ ExecCrossPartitionUpdate(ModifyTableContext *context,
* Row movement, part 1. Delete the tuple, but skip RETURNING
processing.
* We want to return rows from INSERT.
*/
-<<<<<<< HEAD
- ExecDelete(mtstate, resultRelInfo, tupleid, oldtuple, planSlot,
- epqstate, estate, segid,
-=======
ExecDelete(context, resultRelInfo,
- tupleid, oldtuple,
->>>>>>> REL_16_9
+ tupleid, oldtuple, segid,
false, /* processReturning */
true, /* changingPart */
-<<<<<<< HEAD
- false, /* splitUpdate */
- &tuple_deleted, &epqslot);
-=======
false, /* canSetTag */
- tmresult, &tuple_deleted, &epqslot);
->>>>>>> REL_16_9
+ tmresult, &tuple_deleted, &epqslot, false);
/*
* For some reason if DELETE didn't happen (e.g. trigger prevented it,
or
@@ -2187,14 +2094,9 @@ ExecCrossPartitionUpdate(ModifyTableContext *context,
mtstate->mt_root_tuple_slot);
/* Tuple routing starts from the root table. */
-<<<<<<< HEAD
- *inserted_tuple = ExecInsert(mtstate, mtstate->rootResultRelInfo, slot,
- planSlot,
estate, canSetTag, false /* splitUpdate */);
-=======
context->cpUpdateReturningSlot =
ExecInsert(context, mtstate->rootResultRelInfo, slot, canSetTag,
- inserted_tuple, insert_destrel);
->>>>>>> REL_16_9
+ inserted_tuple, insert_destrel, false);
/*
* Reset the transition state that may possibly have been written by
@@ -2214,24 +2116,10 @@ ExecCrossPartitionUpdate(ModifyTableContext *context,
* triggers. We return false if one of them makes the update a no-op;
* otherwise, return true.
*/
-<<<<<<< HEAD
-static TupleTableSlot *
-ExecUpdate(ModifyTableState *mtstate,
- ResultRelInfo *resultRelInfo,
- ItemPointer tupleid,
- HeapTuple oldtuple,
- TupleTableSlot *slot,
- TupleTableSlot *planSlot,
- EPQState *epqstate,
- EState *estate,
- int32 segid, /* gpdb specific parameter, check if tuple to
update is from local */
- bool canSetTag)
-=======
static bool
ExecUpdatePrologue(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
ItemPointer tupleid, HeapTuple oldtuple,
TupleTableSlot *slot,
TM_Result *result)
->>>>>>> REL_16_9
{
Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
@@ -2322,7 +2210,7 @@ ExecUpdatePrepareSlot(ResultRelInfo *resultRelInfo,
static TM_Result
ExecUpdateAct(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
ItemPointer tupleid, HeapTuple oldtuple,
TupleTableSlot *slot,
- bool canSetTag, UpdateContext *updateCxt)
+ bool canSetTag, UpdateContext *updateCxt, int32 segid)
{
EState *estate = context->estate;
Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
@@ -2384,7 +2272,7 @@ lreplace:
* if the tuple has been concurrently updated, a retry is
needed.
*/
if (ExecCrossPartitionUpdate(context, resultRelInfo,
-
tupleid, oldtuple, slot,
+
tupleid, oldtuple, slot, segid
canSetTag, updateCxt,
&result,
&retry_slot,
@@ -2490,14 +2378,15 @@ ExecUpdateEpilogue(ModifyTableContext *context,
UpdateContext *updateCxt,
(updateCxt->updateIndexes == TU_Summarizing));
/* AFTER ROW UPDATE Triggers */
- ExecARUpdateTriggers(context->estate, resultRelInfo,
- NULL, NULL,
- tupleid, oldtuple, slot,
- recheckIndexes,
- mtstate->operation ==
CMD_INSERT ?
-
mtstate->mt_oc_transition_capture :
- mtstate->mt_transition_capture,
- false);
+ if (!RelationIsNonblockRelation(resultRelInfo->ri_RelationDesc))
+ ExecARUpdateTriggers(context->estate, resultRelInfo,
+ NULL, NULL,
+ tupleid, oldtuple,
slot,
+ recheckIndexes,
+ mtstate->operation ==
CMD_INSERT ?
+
mtstate->mt_oc_transition_capture :
+
mtstate->mt_transition_capture,
+ false);
list_free(recheckIndexes);
@@ -2615,7 +2504,7 @@ ExecCrossPartitionUpdateForeignKey(ModifyTableContext
*context,
static TupleTableSlot *
ExecUpdate(ModifyTableContext *context, ResultRelInfo *resultRelInfo,
ItemPointer tupleid, HeapTuple oldtuple, TupleTableSlot
*slot,
- bool canSetTag)
+ int32 segid, bool canSetTag)
{
EState *estate = context->estate;
Relation resultRelationDesc = resultRelInfo->ri_RelationDesc;
@@ -2680,87 +2569,16 @@ ExecUpdate(ModifyTableContext *context, ResultRelInfo
*resultRelInfo,
redo_act:
lockedtid = *tupleid;
result = ExecUpdateAct(context, resultRelInfo, tupleid,
oldtuple, slot,
- canSetTag,
&updateCxt);
+ canSetTag,
&updateCxt, segid);
/*
* If ExecUpdateAct reports that a cross-partition update was
done,
* then the RETURNING tuple (if any) has been projected and
there's
* nothing else for us to do.
*/
-<<<<<<< HEAD
- partition_constraint_failed =
- resultRelationDesc->rd_rel->relispartition &&
- !ExecPartitionCheck(resultRelInfo, slot, estate, false);
- if (!partition_constraint_failed &&
- resultRelInfo->ri_WithCheckOptions != NIL)
- {
- /*
- * ExecWithCheckOptions() will skip any WCOs which are
not of the
- * kind we are looking for at this point.
- */
- ExecWithCheckOptions(WCO_RLS_UPDATE_CHECK,
- resultRelInfo,
slot, estate);
- }
-
- /*
- * If a partition check failed, try to move the row into the
right
- * partition.
- */
- if (partition_constraint_failed)
- {
- TupleTableSlot *inserted_tuple,
- *retry_slot;
- bool retry;
-
- /*
- * ExecCrossPartitionUpdate will first DELETE the row
from the
- * partition it's currently in and then insert it back
into the
- * root table, which will re-route it to the correct
partition.
- * The first part may have to be repeated if it is
detected that
- * the tuple we're trying to move has been concurrently
updated.
- */
- retry = !ExecCrossPartitionUpdate(mtstate,
resultRelInfo, tupleid,
-
oldtuple, slot, planSlot,
-
epqstate, segid, canSetTag,
-
&retry_slot, &inserted_tuple);
- if (retry)
- {
- slot = retry_slot;
- goto lreplace;
- }
-
- return inserted_tuple;
- }
-
- /*
- * Check the constraints of the tuple. We've already checked
the
- * partition constraint above; however, we must still ensure
the tuple
- * passes all other constraints, so we will call
ExecConstraints() and
- * have it validate all remaining checks.
- */
- if (resultRelationDesc->rd_att->constr)
- ExecConstraints(resultRelInfo, slot, estate);
-
- /*
- * replace the heap tuple
- *
- * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we
check
- * that the row to be updated is visible to that snapshot, and
throw a
- * can't-serialize error if not. This is a special-case behavior
- * needed for referential integrity updates in
transaction-snapshot
- * mode transactions.
- */
- result = table_tuple_update(resultRelationDesc, tupleid, slot,
-
estate->es_output_cid,
-
estate->es_snapshot,
-
estate->es_crosscheck_snapshot,
- true /*
wait for commit */ ,
- &tmfd,
&lockmode, &update_indexes);
-=======
if (updateCxt.crossPartUpdate)
return context->cpUpdateReturningSlot;
->>>>>>> REL_16_9
switch (result)
{
@@ -2795,12 +2613,8 @@ redo_act:
* AO case, as visimap update within same
command happens at end
* of command.
*/
-<<<<<<< HEAD
if
(!RelationIsNonblockRelation(resultRelationDesc) &&
- tmfd.cmax != estate->es_output_cid)
-=======
- if (context->tmfd.cmax != estate->es_output_cid)
->>>>>>> REL_16_9
+ context->tmfd.cmax !=
estate->es_output_cid)
ereport(ERROR,
(errcode(ERRCODE_TRIGGERED_DATA_CHANGE_VIOLATION),
errmsg("tuple to be
updated was already modified by an operation triggered by the current command"),
@@ -2926,7 +2740,6 @@ redo_act:
if (canSetTag)
(estate->es_processed)++;
-<<<<<<< HEAD
if (resultRelationDesc->rd_rel->relispartition)
{
mtstate->mt_leaf_relids_updated =
@@ -2934,32 +2747,8 @@ redo_act:
mtstate->has_leaf_changed = true;
}
- /* AFTER ROW UPDATE Triggers */
- /* GPDB: AO and AOCO tables don't support triggers */
- if (!RelationIsNonblockRelation(resultRelationDesc))
- ExecARUpdateTriggers(estate, resultRelInfo, tupleid, oldtuple,
slot,
- recheckIndexes,
- mtstate->operation ==
CMD_INSERT ?
-
mtstate->mt_oc_transition_capture :
-
mtstate->mt_transition_capture);
-
- list_free(recheckIndexes);
-
- /*
- * Check any WITH CHECK OPTION constraints from parent views. We are
- * required to do this after testing all constraints and uniqueness
- * violations per the SQL spec, so we do it after actually updating the
- * record in the heap and all indexes.
- *
- * ExecWithCheckOptions() will skip any WCOs which are not of the kind
we
- * are looking for at this point.
- */
- if (resultRelInfo->ri_WithCheckOptions != NIL)
- ExecWithCheckOptions(WCO_VIEW_CHECK, resultRelInfo, slot,
estate);
-=======
ExecUpdateEpilogue(context, &updateCxt, resultRelInfo, tupleid,
oldtuple,
slot);
->>>>>>> REL_16_9
/* Process RETURNING if present */
if (resultRelInfo->ri_projectReturning)
@@ -3185,12 +2974,7 @@ ExecOnConflictUpdate(ModifyTableContext *context,
*returning = ExecUpdate(context, resultRelInfo,
conflictTid, NULL,
resultRelInfo->ri_onConflict->oc_ProjSlot,
-<<<<<<< HEAD
- planSlot,
- &mtstate->mt_epqstate,
mtstate->ps.state,
GpIdentity.segindex,
-=======
->>>>>>> REL_16_9
canSetTag);
/*
@@ -4447,19 +4231,12 @@ ExecModifyTable(PlanState *pstate)
/* Initialize projection info if first time for
this table */
if
(unlikely(!resultRelInfo->ri_projectNewInfoValid))
ExecInitInsertProjection(node,
resultRelInfo);
-<<<<<<< HEAD
- slot = ExecGetInsertNewTuple(resultRelInfo,
planSlot);
- slot = ExecInsert(node, resultRelInfo, slot,
planSlot,
- estate,
node->canSetTag, false /* splitUpdate */);
-=======
slot = ExecGetInsertNewTuple(resultRelInfo,
context.planSlot);
slot = ExecInsert(&context, resultRelInfo, slot,
-
node->canSetTag, NULL, NULL);
->>>>>>> REL_16_9
+
node->canSetTag, NULL, NULL, false /* splitUpdate */);
break;
case CMD_UPDATE:
-<<<<<<< HEAD
if (!AttributeNumberIsValid(action_attno))
{
/* normal non-split UPDATE */
@@ -4492,9 +4269,8 @@ ExecModifyTable(PlanState *pstate)
oldSlot);
/* Now apply the update. */
- slot = ExecUpdate(node, resultRelInfo,
tupleid, oldtuple, slot,
-
planSlot, &node->mt_epqstate, estate,
-
segid, node->canSetTag);
+ slot = ExecUpdate(&context,
resultRelInfo, tupleid, oldtuple,
+ slot,
segid, node->canSetTag);
}
else if (action == DML_INSERT)
{
@@ -4503,83 +4279,27 @@ ExecModifyTable(PlanState *pstate)
/* Initialize projection info if first
time for this table */
if
(unlikely(!resultRelInfo->ri_projectNewInfoValid))
ExecInitInsertProjection(node,
resultRelInfo);
- slot =
ExecGetInsertNewTuple(resultRelInfo, planSlot);
- slot = ExecInsert(node, resultRelInfo,
slot, planSlot,
-
estate, node->canSetTag, true/* splitUpdate */);
+ slot =
ExecGetInsertNewTuple(resultRelInfo, context.planSlot);
+ slot = ExecInsert(&context,
resultRelInfo, slot, context.planSlot,
+
estate, node->canSetTag, NULL, NULL, true/* splitUpdate */);
resultRelInfo = old;
}
else if (action == DML_DELETE)
{
- slot = ExecDelete(node, resultRelInfo,
tupleid, oldtuple,
-
planSlot, &node->mt_epqstate, estate, segid,
+ slot = ExecDelete(&context,
resultRelInfo, tupleid, oldtuple, segid,
false, /* processReturning */
-
false, /* canSetTag */
true,
/* changingPart */
- true,
/* splitUpdate */
- NULL,
NULL);
+
false, /* canSetTag */
+ NULL,
NULL, NULL,
+ true
/* splitUpdate */);
}
else
ereport(ERROR, (errmsg("unknown action
= %d", action)));
-=======
- tuplock = false;
-
- /* Initialize projection info if first time for
this table */
- if
(unlikely(!resultRelInfo->ri_projectNewInfoValid))
- ExecInitUpdateProjection(node,
resultRelInfo);
-
- /*
- * Make the new tuple by combining plan's
output tuple with
- * the old tuple being updated.
- */
- oldSlot = resultRelInfo->ri_oldTupleSlot;
- if (oldtuple != NULL)
- {
-
Assert(!resultRelInfo->ri_needLockTagTuple);
- /* Use the wholerow junk attr as the
old tuple. */
- ExecForceStoreHeapTuple(oldtuple,
oldSlot, false);
- }
- else
- {
- /* Fetch the most recent version of old
tuple. */
- Relation relation =
resultRelInfo->ri_RelationDesc;
-
- if (resultRelInfo->ri_needLockTagTuple)
- {
- LockTuple(relation, tupleid,
InplaceUpdateTupleLock);
- tuplock = true;
- }
- if
(!table_tuple_fetch_row_version(relation, tupleid,
-
SnapshotAny,
-
oldSlot))
- elog(ERROR, "failed to fetch
tuple being updated");
- }
- slot = ExecGetUpdateNewTuple(resultRelInfo,
context.planSlot,
-
oldSlot);
- context.relaction = NULL;
-
- /* Now apply the update. */
- slot = ExecUpdate(&context, resultRelInfo,
tupleid, oldtuple,
- slot,
node->canSetTag);
- if (tuplock)
-
UnlockTuple(resultRelInfo->ri_RelationDesc, tupleid,
-
InplaceUpdateTupleLock);
->>>>>>> REL_16_9
break;
case CMD_DELETE:
-<<<<<<< HEAD
- slot = ExecDelete(node, resultRelInfo, tupleid,
oldtuple,
- planSlot,
&node->mt_epqstate, estate,
- segid,
- true, /*
processReturning */
-
node->canSetTag,
- false,
/* changingPart */
- false,
/* splitUpdate */
- NULL, NULL);
-=======
- slot = ExecDelete(&context, resultRelInfo,
tupleid, oldtuple,
- true, false,
node->canSetTag, NULL, NULL, NULL);
->>>>>>> REL_16_9
+ slot = ExecDelete(&context, resultRelInfo,
tupleid, oldtuple, segid,
+ true, false,
node->canSetTag, NULL, NULL, NULL, false);
break;
case CMD_MERGE:
@@ -4602,7 +4322,6 @@ ExecModifyTable(PlanState *pstate)
/*
* Insert remaining tuples for batch insert.
*/
-<<<<<<< HEAD
if (proute)
relinfos = estate->es_tuple_routing_result_relations;
else
@@ -4625,10 +4344,6 @@ ExecModifyTable(PlanState *pstate)
resultRelInfo->ri_NumSlots,
estate,
node->canSetTag);
}
-=======
- if (estate->es_insert_pending_result_relations != NIL)
- ExecPendingInserts(estate);
->>>>>>> REL_16_9
/*
* We're done, but fire AFTER STATEMENT triggers before exiting.
@@ -4782,17 +4497,12 @@ ExecInitModifyTable(ModifyTable *node, EState *estate,
int eflags)
}
/* set up epqstate with dummy subplan data for the moment */
-<<<<<<< HEAD
- EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
node->epqParam);
+ EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
+ node->epqParam, node->resultRelations);
/* GPDB: Don't fire statement-triggers in QE reader processes */
if (Gp_role != GP_ROLE_EXECUTE || Gp_is_writer)
mtstate->fireBSTriggers = true;
-=======
- EvalPlanQualInit(&mtstate->mt_epqstate, estate, NULL, NIL,
- node->epqParam, node->resultRelations);
- mtstate->fireBSTriggers = true;
->>>>>>> REL_16_9
/*
* Build state for collecting transition tuples. This requires having a
diff --git a/src/backend/executor/nodeWindowAgg.c
b/src/backend/executor/nodeWindowAgg.c
index 4367627e658..af988032f31 100644
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -43,11 +43,8 @@
#include "nodes/execnodes.h"
#include "nodes/makefuncs.h"
#include "nodes/nodeFuncs.h"
-<<<<<<< HEAD
-=======
#include "optimizer/clauses.h"
#include "optimizer/optimizer.h"
->>>>>>> REL_16_9
#include "parser/parse_agg.h"
#include "parser/parse_coerce.h"
#include "parser/parse_oper.h"
@@ -844,18 +841,6 @@ finalize_windowaggregate(WindowAggState *winstate,
*isnull = peraggstate->transValueIsNull;
}
-<<<<<<< HEAD
- /*
- * If result is pass-by-ref, make sure it is in the right context.
- */
- if (!peraggstate->resulttypeByVal && !*isnull &&
- !MemoryContextContainsGenericAllocation(CurrentMemoryContext,
-
DatumGetPointer(*result)))
- *result = datumCopy(*result,
-
peraggstate->resulttypeByVal,
-
peraggstate->resulttypeLen);
-=======
->>>>>>> REL_16_9
MemoryContextSwitchTo(oldContext);
}
@@ -1337,13 +1322,7 @@ eval_windowfunction(WindowAggState *winstate,
WindowStatePerFunc perfuncstate,
* ensure it's not clobbered by later window functions.
*/
if (!perfuncstate->resulttypeByVal && !fcinfo->isnull &&
-<<<<<<< HEAD
- !MemoryContextContainsGenericAllocation(CurrentMemoryContext,
-
DatumGetPointer(*result))
- )
-=======
winstate->numfuncs > 1)
->>>>>>> REL_16_9
*result = datumCopy(*result,
perfuncstate->resulttypeByVal,
perfuncstate->resulttypeLen);
@@ -2461,76 +2440,14 @@ ExecWindowAgg(PlanState *pstate)
winstate->start_offset_var_free &&
winstate->end_offset_var_free)
{
-<<<<<<< HEAD
compute_start_end_offsets(winstate);
-=======
- int frameOptions = winstate->frameOptions;
- Datum value;
- bool isnull;
- int16 len;
- bool byval;
-
- econtext = winstate->ss.ps.ps_ExprContext;
-
- if (frameOptions & FRAMEOPTION_START_OFFSET)
- {
- Assert(winstate->startOffset != NULL);
- value = ExecEvalExprSwitchContext(winstate->startOffset,
-
econtext,
-
&isnull);
- if (isnull)
- ereport(ERROR,
-
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("frame starting offset
must not be null")));
- /* copy value into query-lifespan context */
- get_typlenbyval(exprType((Node *)
winstate->startOffset->expr),
- &len, &byval);
- winstate->startOffsetValue = datumCopy(value, byval,
len);
- if (frameOptions & (FRAMEOPTION_ROWS |
FRAMEOPTION_GROUPS))
- {
- /* value is known to be int8 */
- int64 offset = DatumGetInt64(value);
-
- if (offset < 0)
- ereport(ERROR,
-
(errcode(ERRCODE_INVALID_PRECEDING_OR_FOLLOWING_SIZE),
- errmsg("frame starting
offset must not be negative")));
- }
- }
- if (frameOptions & FRAMEOPTION_END_OFFSET)
- {
- Assert(winstate->endOffset != NULL);
- value = ExecEvalExprSwitchContext(winstate->endOffset,
-
econtext,
-
&isnull);
- if (isnull)
- ereport(ERROR,
-
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
- errmsg("frame ending offset
must not be null")));
- /* copy value into query-lifespan context */
- get_typlenbyval(exprType((Node *)
winstate->endOffset->expr),
- &len, &byval);
- winstate->endOffsetValue = datumCopy(value, byval, len);
- if (frameOptions & (FRAMEOPTION_ROWS |
FRAMEOPTION_GROUPS))
- {
- /* value is known to be int8 */
- int64 offset = DatumGetInt64(value);
-
- if (offset < 0)
- ereport(ERROR,
-
(errcode(ERRCODE_INVALID_PRECEDING_OR_FOLLOWING_SIZE),
- errmsg("frame ending
offset must not be negative")));
- }
- }
->>>>>>> REL_16_9
winstate->all_first = false;
}
/* We need to loop as the runCondition or qual may filter out tuples */
for (;;)
{
-<<<<<<< HEAD
/* Initialize for first partition and set current row = 0 */
begin_partition(winstate);
/* If there are no input rows, we'll detect that and exit below
*/
@@ -2591,9 +2508,6 @@ ExecWindowAgg(PlanState *pstate)
release_partition(winstate);
if (winstate->more_partitions)
-=======
- if (winstate->buffer == NULL)
->>>>>>> REL_16_9
{
/* Initialize for first partition and set current row =
0 */
begin_partition(winstate);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]