This is an automated email from the ASF dual-hosted git repository.
chenjinbao1989 pushed a commit to branch cbdb-postgres-merge
in repository https://gitbox.apache.org/repos/asf/cloudberry.git
The following commit(s) were added to refs/heads/cbdb-postgres-merge by this
push:
new 0b3470da9d0 Fix conflict for path in optimizer
0b3470da9d0 is described below
commit 0b3470da9d02529753a43ef7ba98c173f459e1f7
Author: Jinbao Chen <[email protected]>
AuthorDate: Tue Jul 22 11:16:17 2025 +0800
Fix conflict for path in optimizer
---
src/backend/optimizer/path/allpaths.c | 205 +++++++-------------------------
src/backend/optimizer/path/clausesel.c | 4 -
src/backend/optimizer/path/costsize.c | 102 +---------------
src/backend/optimizer/path/equivclass.c | 10 +-
src/backend/optimizer/path/indxpath.c | 4 -
src/backend/optimizer/path/joinpath.c | 35 ------
src/backend/optimizer/path/joinrels.c | 12 +-
src/backend/optimizer/path/pathkeys.c | 4 -
src/backend/optimizer/path/tidpath.c | 4 -
9 files changed, 50 insertions(+), 330 deletions(-)
diff --git a/src/backend/optimizer/path/allpaths.c
b/src/backend/optimizer/path/allpaths.c
index 41eb5728d91..3efb15af077 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -3,13 +3,9 @@
* allpaths.c
* Routines to find possible search paths for processing a query
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -2276,11 +2272,8 @@ generate_orderedappend_paths(PlannerInfo *root,
RelOptInfo *rel,
NULL,
0,
false,
-<<<<<<< HEAD
-1),
root);
-=======
-
-1));
if (fractional_subpaths)
add_path(rel, (Path *) create_append_path(root,
@@ -2291,8 +2284,8 @@ generate_orderedappend_paths(PlannerInfo *root,
RelOptInfo *rel,
NULL,
0,
false,
-
-1));
->>>>>>> REL_16_9
+
-1),
+ root);
}
else
{
@@ -2308,19 +2301,16 @@ generate_orderedappend_paths(PlannerInfo *root,
RelOptInfo *rel,
rel,
total_subpaths,
pathkeys,
-<<<<<<< HEAD
NULL),
root);
-=======
-
NULL));
if (fractional_subpaths)
add_path(rel, (Path *)
create_merge_append_path(root,
rel,
fractional_subpaths,
pathkeys,
-
NULL));
->>>>>>> REL_16_9
+
NULL),
+ root);
}
}
}
@@ -2854,6 +2844,7 @@ static void
set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
Index rti, RangeTblEntry *rte)
{
+ Query *parse = root->parse;
Query *subquery = rte->subquery;
bool trivial_pathtarget;
Relids required_outer;
@@ -2902,64 +2893,15 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo
*rel,
/* if IS_SINGLENODE then role must be GP_ROLE_UTILITY */
forceDistRand = rte->forceDistRandom && Gp_role != GP_ROLE_UTILITY;
+
/* CDB: Could be a preplanned subquery from window_planner. */
- if (rte->subquery_root == NULL)
+ if (rel->baserestrictinfo != NIL &&
+ subquery_is_pushdown_safe(subquery, subquery, &safetyInfo))
{
- /*
- * push down quals if possible. Note subquery might be
- * different pointer from original one.
- */
- subquery = push_down_restrict(root, rel, rte, rti, subquery);
-
-<<<<<<< HEAD
- /*
- * The upper query might not use all the subquery's output
columns; if
- * not, we can simplify.
- */
- remove_unused_subquery_outputs(subquery, rel);
-
- /*
- * We can safely pass the outer tuple_fraction down to the
subquery if the
- * outer level has no joining, aggregation, or sorting to do.
Otherwise
- * we'd better tell the subquery to plan for full retrieval.
(XXX This
- * could probably be made more intelligent ...)
- */
- if (subquery->hasAggs ||
- subquery->groupClause ||
- subquery->groupingSets ||
- subquery->havingQual ||
- subquery->distinctClause ||
- subquery->sortClause ||
- has_multiple_baserels(root))
- tuple_fraction = 0.0; /* default case */
- else
- tuple_fraction = root->tuple_fraction;
-
- /* Generate a subroot and Paths for the subquery */
- config = CopyPlannerConfig(root->config);
- config->honor_order_by = false; /* partial order is
enough */
-
- /*
- * CDB: if this subquery is the inner plan of a lateral
- * join and if it contains a limit, we can only gather
- * it to singleQE and materialize the data because we
- * cannot pass params across motion.
- */
- if ((!bms_is_empty(required_outer)) &&
- is_query_contain_limit_groupby(subquery))
- config->force_singleQE = true;
-
- /*
- * Cloudberry specific behavior:
- * config->may_rescan is used to guide if
- * we should add materialize path over motion
- * in the left tree of a join.
- */
- config->may_rescan = config->may_rescan ||
!bms_is_empty(required_outer);
+ /* OK to consider pushing down individual quals */
+ List *upperrestrictlist = NIL;
+ ListCell *l;
- /* plan_params should not be in use in current query level */
- Assert(root->plan_params == NIL);
-=======
foreach(l, rel->baserestrictinfo)
{
RestrictInfo *rinfo = (RestrictInfo *) lfirst(l);
@@ -3034,18 +2976,35 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo
*rel,
tuple_fraction = 0.0; /* default case */
else
tuple_fraction = root->tuple_fraction;
->>>>>>> REL_16_9
- rel->subroot = subquery_planner(root->glob, subquery,
- root,
- false,
tuple_fraction,
- config);
- }
- else
- {
- /* This is a preplanned sub-query RTE. */
- rel->subroot = rte->subquery_root;
- }
+
+ /* Generate a subroot and Paths for the subquery */
+ config = CopyPlannerConfig(root->config);
+ config->honor_order_by = false; /* partial order is enough */
+
+ /*
+ * CDB: if this subquery is the inner plan of a lateral
+ * join and if it contains a limit, we can only gather
+ * it to singleQE and materialize the data because we
+ * cannot pass params across motion.
+ */
+ if ((!bms_is_empty(required_outer)) &&
+ is_query_contain_limit_groupby(subquery))
+ config->force_singleQE = true;
+
+ /*
+ * Cloudberry specific behavior:
+ * config->may_rescan is used to guide if
+ * we should add materialize path over motion
+ * in the left tree of a join.
+ */
+ config->may_rescan = config->may_rescan ||
!bms_is_empty(required_outer);
+
+
+ rel->subroot = subquery_planner(root->glob, subquery,
+ root,
+ false,
tuple_fraction,
+ config);
/* Isolate the params needed by this specific subplan */
rel->subplan_params = root->plan_params;
@@ -3139,14 +3098,7 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
(Node *) l);
/* Generate outer path using this subpath */
-<<<<<<< HEAD
add_path(rel, path, root);
-=======
- add_path(rel, (Path *)
- create_subqueryscan_path(root, rel, subpath,
-
trivial_pathtarget,
-
pathkeys, required_outer));
->>>>>>> REL_16_9
}
/* If outer rel allows parallelism, do same for partial paths. */
@@ -3177,13 +3129,8 @@ set_subquery_pathlist(PlannerInfo *root, RelOptInfo *rel,
make_tlist_from_pathtarget(subpath->pathtarget));
/* Generate outer path using this subpath */
-<<<<<<< HEAD
path = (Path *) create_subqueryscan_path(root, rel,
subpath,
-=======
- add_partial_path(rel, (Path *)
-
create_subqueryscan_path(root, rel, subpath,
-
trivial_pathtarget,
->>>>>>> REL_16_9
+
trivial_pathtarget,
pathkeys,
locus,
required_outer);
@@ -3441,7 +3388,6 @@ set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte)
}
if (lc == NULL) /* shouldn't happen */
elog(ERROR, "could not find CTE \"%s\"", rte->ctename);
-<<<<<<< HEAD
Assert(IsA(cte->ctequery, Query));
/*
@@ -3623,14 +3569,6 @@ set_cte_pathlist(PlannerInfo *root, RelOptInfo *rel,
RangeTblEntry *rte)
}
pathkeys = subroot->query_pathkeys;
-=======
- if (ndx >= list_length(cteroot->cte_plan_ids))
- elog(ERROR, "could not find plan for CTE \"%s\"", rte->ctename);
- plan_id = list_nth_int(cteroot->cte_plan_ids, ndx);
- if (plan_id <= 0)
- elog(ERROR, "no plan was made for CTE \"%s\"", rte->ctename);
- cteplan = (Plan *) list_nth(root->glob->subplans, plan_id - 1);
->>>>>>> REL_16_9
/* Mark rel with estimated output rows, width, etc */
{
@@ -4039,35 +3977,12 @@ generate_useful_gather_paths(PlannerInfo *root,
RelOptInfo *rel, bool override_r
*/
if (presorted_keys == 0 || !enable_incremental_sort)
{
-<<<<<<< HEAD
- Path *tmp;
-
- tmp = (Path *) create_sort_path(root,
-
rel,
-
subpath,
-
useful_pathkeys,
-
-1.0);
-
- rows = tmp->rows * tmp->parallel_workers;
-
- path = create_gather_merge_path(root, rel,
-
tmp,
-
rel->reltarget,
-
tmp->pathkeys,
-
NULL,
-
rowsp);
-
- add_path(rel, &path->path, root);
-
- /* Fall through */
-=======
subpath = (Path *) create_sort_path(root,
rel,
subpath,
useful_pathkeys,
-1.0);
rows = subpath->rows *
subpath->parallel_workers;
->>>>>>> REL_16_9
}
else
subpath = (Path *)
create_incremental_sort_path(root,
@@ -4083,41 +3998,7 @@ generate_useful_gather_paths(PlannerInfo *root,
RelOptInfo *rel, bool override_r
NULL,
rowsp);
-<<<<<<< HEAD
- /*
- * Consider incremental sort, but only when the subpath
is already
- * partially sorted on a pathkey prefix.
- */
- if (enable_incremental_sort && presorted_keys > 0)
- {
- Path *tmp;
-
- /*
- * We should have already excluded pathkeys of
length 1
- * because then presorted_keys > 0 would imply
is_sorted was
- * true.
- */
- Assert(list_length(useful_pathkeys) != 1);
-
- tmp = (Path *)
create_incremental_sort_path(root,
-
rel,
-
subpath,
-
useful_pathkeys,
-
presorted_keys,
-
-1);
-
- path = create_gather_merge_path(root, rel,
-
tmp,
-
rel->reltarget,
-
tmp->pathkeys,
-
NULL,
-
rowsp);
-
- add_path(rel, &path->path, root);
- }
-=======
- add_path(rel, &path->path);
->>>>>>> REL_16_9
+ add_path(rel, &path->path, root);
}
}
}
@@ -4416,12 +4297,8 @@ standard_join_search(PlannerInfo *root, int
levels_needed, List *initial_rels)
* partial paths. We'll do the same for the topmost
scan/join rel
* once we know the final targetlist (see
grouping_planner).
*/
-<<<<<<< HEAD
#if 0
- if (lev < levels_needed)
-=======
if (!bms_equal(rel->relids, root->all_query_rels))
->>>>>>> REL_16_9
generate_useful_gather_paths(root, rel, false);
#endif
diff --git a/src/backend/optimizer/path/clausesel.c
b/src/backend/optimizer/path/clausesel.c
index fdf884510fd..2ae20ae2aab 100644
--- a/src/backend/optimizer/path/clausesel.c
+++ b/src/backend/optimizer/path/clausesel.c
@@ -3,13 +3,9 @@
* clausesel.c
* Routines to compute clause selectivities
*
-<<<<<<< HEAD
* Portions Copyright (c) 2006-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
diff --git a/src/backend/optimizer/path/costsize.c
b/src/backend/optimizer/path/costsize.c
index b70a3266d6d..72abd7280bb 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -60,13 +60,9 @@
* values.
*
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
@@ -157,11 +153,7 @@ bool enable_hashagg_disk = true;
bool enable_nestloop = false;
bool enable_material = true;
bool enable_memoize = true;
-<<<<<<< HEAD
bool enable_mergejoin = false;
-=======
-bool enable_mergejoin = true;
->>>>>>> REL_16_9
bool enable_hashjoin = true;
bool enable_gathermerge = true;
bool enable_partitionwise_join = false;
@@ -206,11 +198,7 @@ static Selectivity
get_foreign_key_join_selectivity(PlannerInfo *root,
List **restrictlist);
static Cost append_nonpartial_cost(List *subpaths, int numpaths,
int
parallel_workers);
-<<<<<<< HEAD
-=======
-static void set_rel_width(PlannerInfo *root, RelOptInfo *rel);
static int32 get_expr_width(PlannerInfo *root, const Node *expr);
->>>>>>> REL_16_9
static double relation_byte_size(double tuples, int width);
static double page_size(double tuples, int width);
static double get_parallel_divisor(Path *path);
@@ -314,7 +302,6 @@ adjust_reloptinfo(RelOptInfoPerSegment *basescan,
RelOptInfo *baserel_orig,
}
/*
-<<<<<<< HEAD
* ADJUST_BASESCAN initializes the proxy structs for RelOptInfo and
ParamPathInfo,
* adjusting them by # of segments as needed.
*/
@@ -324,7 +311,8 @@ adjust_reloptinfo(RelOptInfoPerSegment *basescan,
RelOptInfo *baserel_orig,
RelOptInfoPerSegment *baserel = &baserel_adjusted; \
ParamPathInfoPerSegment *param_info =
adjust_reloptinfo(&baserel_adjusted, baserel_orig, \
¶m_info_adjusted, param_info_orig)
-=======
+
+/*
* clamp_cardinality_to_long
* Cast a Cardinality value to a sane long value.
*/
@@ -349,7 +337,6 @@ clamp_cardinality_to_long(Cardinality x)
*/
return (x < (double) LONG_MAX) ? (long) x : LONG_MAX;
}
->>>>>>> REL_16_9
/*
@@ -1586,7 +1573,6 @@ cost_subqueryscan(SubqueryScanPath *path, PlannerInfo
*root,
Assert(baserel->relid > 0);
Assert(baserel->rtekind == RTE_SUBQUERY);
-<<<<<<< HEAD
/* Adjust row count if this runs in multiple segments and parallel
model */
if (CdbPathLocus_IsPartitioned(path->path.locus))
{
@@ -1595,23 +1581,16 @@ cost_subqueryscan(SubqueryScanPath *path, PlannerInfo
*root,
else
numsegments = 1;
- /* Mark the path with the correct row estimate */
-=======
/*
* We compute the rowcount estimate as the subplan's estimate times the
* selectivity of relevant restriction clauses. In simple cases this
will
* come out the same as baserel->rows; but when dealing with
parallelized
* paths we must do it like this to get the right answer.
*/
->>>>>>> REL_16_9
if (param_info)
qpquals = list_concat_copy(param_info->ppi_clauses,
baserel->baserestrictinfo);
else
-<<<<<<< HEAD
- path->path.rows = baserel->rows;
- path->path.rows = clamp_row_est(path->path.rows / numsegments);
-=======
qpquals = baserel->baserestrictinfo;
path->path.rows = clamp_row_est(path->subpath->rows *
@@ -1620,7 +1599,6 @@ cost_subqueryscan(SubqueryScanPath *path, PlannerInfo
*root,
0,
JOIN_INNER,
NULL));
->>>>>>> REL_16_9
/*
* Cost of path is cost of evaluating the subplan, plus cost of
evaluating
@@ -1651,11 +1629,7 @@ cost_subqueryscan(SubqueryScanPath *path, PlannerInfo
*root,
startup_cost = qpqual_cost.startup;
cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple;
-<<<<<<< HEAD
- run_cost = cpu_per_tuple * clamp_row_est(baserel->tuples / numsegments);
-=======
- run_cost = cpu_per_tuple * path->subpath->rows;
->>>>>>> REL_16_9
+ run_cost = cpu_per_tuple * clamp_row_est(path->subpath->rows /
numsegments);
/* tlist eval costs are paid per output row, not per tuple scanned */
startup_cost += path->path.pathtarget->cost.startup;
@@ -2703,10 +2677,7 @@ cost_memoize_rescan(PlannerInfo *root, MemoizePath
*mpath,
Cost *rescan_startup_cost, Cost
*rescan_total_cost)
{
EstimationInfo estinfo;
-<<<<<<< HEAD
-=======
ListCell *lc;
->>>>>>> REL_16_9
Cost input_startup_cost = mpath->subpath->startup_cost;
Cost input_total_cost = mpath->subpath->total_cost;
double tuples = mpath->subpath->rows;
@@ -3366,12 +3337,8 @@ final_cost_nestloop(PlannerInfo *root, NestPath *path,
if (path->jpath.path.param_info)
path->jpath.path.rows = path->jpath.path.param_info->ppi_rows;
else
-<<<<<<< HEAD
- path->path.rows = path->path.parent->rows;
- path->path.rows /= numsegments;
-=======
path->jpath.path.rows = path->jpath.path.parent->rows;
->>>>>>> REL_16_9
+ path->jpath.path.rows /= numsegments;
/* For partial paths, scale row estimate. */
if (path->jpath.path.parallel_workers > 0)
@@ -4248,10 +4215,6 @@ final_cost_hashjoin(PlannerInfo *root, HashPath *path,
Cost run_cost = workspace->run_cost;
int numbuckets = workspace->numbuckets;
int numbatches = workspace->numbatches;
-<<<<<<< HEAD
-=======
- Cost cpu_per_tuple;
->>>>>>> REL_16_9
QualCost hash_qual_cost;
QualCost qp_qual_cost;
double hashjointuples;
@@ -6745,63 +6708,6 @@ set_pathtarget_cost_width(PlannerInfo *root, PathTarget
*target)
/* For non-Vars, account for evaluation cost */
if (!IsA(node, Var))
{
-<<<<<<< HEAD
- Var *var = (Var *) node;
- int32 item_width;
-
- /* We should not see any upper-level Vars here */
- Assert(var->varlevelsup == 0);
-
- /* Try to get data from RelOptInfo cache */
- if (var->varno < root->simple_rel_array_size)
- {
- RelOptInfo *rel =
root->simple_rel_array[var->varno];
-
- if (rel != NULL &&
- var->varattno >= rel->min_attr &&
- var->varattno <= rel->max_attr)
- {
- int ndx =
var->varattno - rel->min_attr;
-
- if (rel->attr_widths[ndx] > 0)
- {
- tuple_width +=
rel->attr_widths[ndx];
- continue;
- }
- }
- }
-
- /*
- * No cached data available, so estimate using just the
type info.
- */
- item_width = get_typavgwidth(var->vartype,
var->vartypmod);
- Assert(item_width > 0);
- tuple_width += item_width;
- }
- else if (IsA(node, Aggref))
- {
- int32 item_width;
-
- /*
- * If the target is evaluated by AggPath, it'll care of
cost
- * estimate. If the target is above AggPath (typically
target of a
- * join relation that contains grouped relation), the
cost of
- * Aggref should not be accounted for again.
- *
- * On the other hand, width is always needed.
- */
- item_width = get_typavgwidth(exprType(node),
exprTypmod(node));
- Assert(item_width > 0);
- tuple_width += item_width;
- }
- else
- {
- /*
- * Handle general expressions using type info.
- */
- int32 item_width;
-=======
->>>>>>> REL_16_9
QualCost cost;
cost_qual_eval_node(&cost, node, root);
diff --git a/src/backend/optimizer/path/equivclass.c
b/src/backend/optimizer/path/equivclass.c
index 63258e09850..4c7e6866cfd 100644
--- a/src/backend/optimizer/path/equivclass.c
+++ b/src/backend/optimizer/path/equivclass.c
@@ -725,19 +725,15 @@ get_eclass_for_sort_expr(PlannerInfo *root,
{
RelOptInfo *rel = root->simple_rel_array[i];
-<<<<<<< HEAD
- Assert(rel->reloptkind == RELOPT_BASEREL ||
- rel->reloptkind == RELOPT_DEADREL ||
- rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
-=======
if (rel == NULL) /* must be an outer join */
{
Assert(bms_is_member(i, root->outer_join_rels));
continue;
}
- Assert(rel->reloptkind == RELOPT_BASEREL);
->>>>>>> REL_16_9
+ Assert(rel->reloptkind == RELOPT_BASEREL ||
+ rel->reloptkind == RELOPT_DEADREL ||
+ rel->reloptkind == RELOPT_OTHER_MEMBER_REL);
rel->eclass_indexes =
bms_add_member(rel->eclass_indexes,
ec_index);
diff --git a/src/backend/optimizer/path/indxpath.c
b/src/backend/optimizer/path/indxpath.c
index dc7a44ce0fd..66ec59a87c2 100644
--- a/src/backend/optimizer/path/indxpath.c
+++ b/src/backend/optimizer/path/indxpath.c
@@ -4,13 +4,9 @@
* Routines to determine which indexes are usable for scanning a
* given relation, and create Paths accordingly.
*
-<<<<<<< HEAD
* Portions Copyright (c) 2006-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
diff --git a/src/backend/optimizer/path/joinpath.c
b/src/backend/optimizer/path/joinpath.c
index 175d42a47bf..95f5da829a3 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -3,13 +3,9 @@
* joinpath.c
* Routines to find all possible paths for processing a set of joins
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -30,12 +26,9 @@
#include "optimizer/pathnode.h"
#include "optimizer/paths.h"
#include "optimizer/planmain.h"
-<<<<<<< HEAD
#include "optimizer/tlist.h"
#include "utils/lsyscache.h"
-=======
#include "optimizer/restrictinfo.h"
->>>>>>> REL_16_9
#include "utils/typcache.h"
#include "utils/guc.h"
@@ -397,7 +390,6 @@ add_paths_to_join_relation(PlannerInfo *root,
* permissions as, give the FDW a chance to push down joins.
*/
if (joinrel->fdwroutine &&
-<<<<<<< HEAD
joinrel->fdwroutine->GetForeignJoinPaths)
{
List *foreignRestrictlist = NIL;
@@ -432,10 +424,6 @@ add_paths_to_join_relation(PlannerInfo *root,
extra.restrictlist = foreignRestrictlist;
-=======
- joinrel->fdwroutine->GetForeignJoinPaths &&
- consider_join_pushdown)
->>>>>>> REL_16_9
joinrel->fdwroutine->GetForeignJoinPaths(root, joinrel,
outerrel, innerrel,
jointype, &extra);
@@ -515,11 +503,6 @@ allow_star_schema_join(PlannerInfo *root,
* to use a clause involving a Var with nonempty varnullingrels at
* a join level where that value isn't yet computable.
*
-<<<<<<< HEAD
- * Additionally we also collect the outer exprs and the hash operators for
- * each parameter to innerrel. These set in 'param_exprs', 'operators' and
- * 'binary_mode' when we return true.
-=======
* In practice, this test never finds a problem because earlier join order
* restrictions prevent us from attempting a join that would cause a problem.
* (That's unsurprising, because the code worked before we ever added
@@ -575,7 +558,6 @@ have_unsafe_outer_join_ref(PlannerInfo *root,
* These are returned in parallel lists in *param_exprs and *operators.
* We also set *binary_mode to indicate whether strict binary matching is
* required.
->>>>>>> REL_16_9
*/
static bool
paraminfo_get_equal_hashops(PlannerInfo *root, ParamPathInfo *param_info,
@@ -602,10 +584,6 @@ paraminfo_get_equal_hashops(PlannerInfo *root,
ParamPathInfo *param_info,
Node *expr;
Oid hasheqoperator;
-<<<<<<< HEAD
- /* can't use a memoize node without a valid hash equals
operator */
- if (!OidIsValid(rinfo->hasheqoperator) ||
-=======
opexpr = (OpExpr *) rinfo->clause;
/*
@@ -613,7 +591,6 @@ paraminfo_get_equal_hashops(PlannerInfo *root,
ParamPathInfo *param_info,
* with 2 args.
*/
if (!IsA(opexpr, OpExpr) || list_length(opexpr->args)
!= 2 ||
->>>>>>> REL_16_9
!clause_sides_match_join(rinfo, outerrel,
innerrel))
{
list_free(*operators);
@@ -676,11 +653,7 @@ paraminfo_get_equal_hashops(PlannerInfo *root,
ParamPathInfo *param_info,
typentry = lookup_type_cache(exprType(expr),
TYPECACHE_HASH_PROC | TYPECACHE_EQ_OPR);
-<<<<<<< HEAD
- /* can't use a memoize node without a valid hash equals
operator */
-=======
/* can't use memoize without a valid hash proc and equals
operator */
->>>>>>> REL_16_9
if (!OidIsValid(typentry->hash_proc) ||
!OidIsValid(typentry->eq_opr))
{
list_free(*operators);
@@ -1563,11 +1536,7 @@ sort_inner_and_outer(PlannerInfo *root,
foreach(l, all_pathkeys)
{
-<<<<<<< HEAD
- PathKey *front_pathkey = (PathKey *) lfirst(l);
-=======
PathKey *front_pathkey = (PathKey *) lfirst(l);
->>>>>>> REL_16_9
List *cur_mergeclauses;
List *outerkeys;
List *innerkeys;
@@ -2281,11 +2250,7 @@ consider_parallel_nestloop(PlannerInfo *root,
extra);
if (mpath != NULL)
try_partial_nestloop_path(root, joinrel,
outerpath, mpath,
-<<<<<<< HEAD
pathkeys, jointype, save_jointype, extra);
-=======
-
pathkeys, jointype, extra);
->>>>>>> REL_16_9
}
}
}
diff --git a/src/backend/optimizer/path/joinrels.c
b/src/backend/optimizer/path/joinrels.c
index 01847dfa477..4ea5a36da96 100644
--- a/src/backend/optimizer/path/joinrels.c
+++ b/src/backend/optimizer/path/joinrels.c
@@ -3,13 +3,10 @@
* joinrels.c
* Routines to determine which relations should be joined
*
-<<<<<<< HEAD
+
* Portions Copyright (c) 2006-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
@@ -781,14 +778,9 @@ make_join_relation(PlannerInfo *root, RelOptInfo *rel1,
RelOptInfo *rel2)
* Find or build the join RelOptInfo, and compute the restrictlist that
* goes with this particular joining.
*/
-<<<<<<< HEAD
- joinrel = build_join_rel(root, joinrelids, rel1, rel2, sjinfo,
- &restrictlist, NULL);
-=======
joinrel = build_join_rel(root, joinrelids, rel1, rel2,
sjinfo,
pushed_down_joins,
- &restrictlist);
->>>>>>> REL_16_9
+ &restrictlist, NULL);
/*
* If we've already proven this join is empty, we needn't consider any
diff --git a/src/backend/optimizer/path/pathkeys.c
b/src/backend/optimizer/path/pathkeys.c
index cc1f6cb3d6e..c22bcbbbecb 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -7,13 +7,9 @@
* the nature and use of path keys.
*
*
-<<<<<<< HEAD
* Portions Copyright (c) 2005-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
diff --git a/src/backend/optimizer/path/tidpath.c
b/src/backend/optimizer/path/tidpath.c
index 29730c749e8..20512905245 100644
--- a/src/backend/optimizer/path/tidpath.c
+++ b/src/backend/optimizer/path/tidpath.c
@@ -27,13 +27,9 @@
* "CTID relop pseudoconstant", where relop is one of >,>=,<,<=, and
* AND-clauses composed of such conditions.
*
-<<<<<<< HEAD
* Portions Copyright (c) 2007-2008, Greenplum inc
* Portions Copyright (c) 2012-Present VMware, Inc. or its affiliates.
- * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
-=======
* Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
->>>>>>> REL_16_9
* Portions Copyright (c) 1994, Regents of the University of California
*
*
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]