Hi,
I stumbled across a few places that depend on the inheritance appends being applied at a later date, so I quickly abandoned that idea. I thought a bit about the indexlist, in particular the inhparent, and I am not sure what depends on get_relation_info working in that way. Therefore I propose a new attribute partIndexlist of RelOptInfo to include information about uniqueness, in the case the executor can't use the structure that causes the uniqueness to begin with. Said attribute can be used by relation_has_unique_index_for and rel_supports_distinctness. The attached patch takes that route. I'd appreciate feedback! Regards Arne
diff --git a/src/backend/access/heap/vacuumlazy.c b/src/backend/access/heap/vacuumlazy.c index 05221cc1d6..e3dca4b465 100644 --- a/src/backend/access/heap/vacuumlazy.c +++ b/src/backend/access/heap/vacuumlazy.c @@ -2375,7 +2375,7 @@ lazy_vacuum_heap_rel(LVRelState *vacrel) ereport(elevel, (errmsg("table \"%s\": removed %lld dead item identifiers in %u pages", - vacrel->relname, (long long ) tupindex, vacuumed_pages), + vacrel->relname, (long long) tupindex, vacuumed_pages), errdetail_internal("%s", pg_rusage_show(&ru0)))); /* Revert to the previous phase information for error traceback */ diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index f547efd294..94fca8cf8b 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -906,7 +906,7 @@ static void checkTimeLineSwitch(XLogRecPtr lsn, TimeLineID newTLI, TimeLineID prevTLI); static void VerifyOverwriteContrecord(xl_overwrite_contrecord *xlrec, XLogReaderState *state); -static int LocalSetXLogInsertAllowed(void); +static int LocalSetXLogInsertAllowed(void); static void CreateEndOfRecoveryRecord(void); static XLogRecPtr CreateOverwriteContrecordRecord(XLogRecPtr aborted_lsn); static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags); @@ -5753,38 +5753,38 @@ CleanupAfterArchiveRecovery(TimeLineID EndOfLogTLI, XLogRecPtr EndOfLog) * We switched to a new timeline. Clean up segments on the old timeline. * * If there are any higher-numbered segments on the old timeline, remove - * them. They might contain valid WAL, but they might also be pre-allocated - * files containing garbage. In any case, they are not part of the new - * timeline's history so we don't need them. + * them. They might contain valid WAL, but they might also be + * pre-allocated files containing garbage. In any case, they are not part + * of the new timeline's history so we don't need them. */ RemoveNonParentXlogFiles(EndOfLog, ThisTimeLineID); /* * If the switch happened in the middle of a segment, what to do with the * last, partial segment on the old timeline? If we don't archive it, and - * the server that created the WAL never archives it either (e.g. because it - * was hit by a meteor), it will never make it to the archive. That's OK - * from our point of view, because the new segment that we created with the - * new TLI contains all the WAL from the old timeline up to the switch + * the server that created the WAL never archives it either (e.g. because + * it was hit by a meteor), it will never make it to the archive. That's + * OK from our point of view, because the new segment that we created with + * the new TLI contains all the WAL from the old timeline up to the switch * point. But if you later try to do PITR to the "missing" WAL on the old - * timeline, recovery won't find it in the archive. It's physically present - * in the new file with new TLI, but recovery won't look there when it's - * recovering to the older timeline. On the other hand, if we archive the - * partial segment, and the original server on that timeline is still - * running and archives the completed version of the same segment later, it - * will fail. (We used to do that in 9.4 and below, and it caused such - * problems). + * timeline, recovery won't find it in the archive. It's physically + * present in the new file with new TLI, but recovery won't look there + * when it's recovering to the older timeline. On the other hand, if we + * archive the partial segment, and the original server on that timeline + * is still running and archives the completed version of the same segment + * later, it will fail. (We used to do that in 9.4 and below, and it + * caused such problems). * - * As a compromise, we rename the last segment with the .partial suffix, and - * archive it. Archive recovery will never try to read .partial segments, so - * they will normally go unused. But in the odd PITR case, the administrator - * can copy them manually to the pg_wal directory (removing the suffix). - * They can be useful in debugging, too. + * As a compromise, we rename the last segment with the .partial suffix, + * and archive it. Archive recovery will never try to read .partial + * segments, so they will normally go unused. But in the odd PITR case, + * the administrator can copy them manually to the pg_wal directory + * (removing the suffix). They can be useful in debugging, too. * * If a .done or .ready file already exists for the old timeline, however, - * we had already determined that the segment is complete, so we can let it - * be archived normally. (In particular, if it was restored from the archive - * to begin with, it's expected to have a .done file). + * we had already determined that the segment is complete, so we can let + * it be archived normally. (In particular, if it was restored from the + * archive to begin with, it's expected to have a .done file). */ if (XLogSegmentOffset(EndOfLog, wal_segment_size) != 0 && XLogArchivingActive()) @@ -8100,10 +8100,10 @@ StartupXLOG(void) * Emit checkpoint or end-of-recovery record in XLOG, if required. * * XLogCtl->lastReplayedEndRecPtr will be a valid LSN if and only if we - * entered recovery. Even if we ultimately replayed no WAL records, it will - * have been initialized based on where replay was due to start. We don't - * need a lock to access this, since this can't change any more by the time - * we reach this code. + * entered recovery. Even if we ultimately replayed no WAL records, it + * will have been initialized based on where replay was due to start. We + * don't need a lock to access this, since this can't change any more by + * the time we reach this code. */ if (!XLogRecPtrIsInvalid(XLogCtl->lastReplayedEndRecPtr)) promoted = PerformRecoveryXLogAction(); @@ -8292,15 +8292,15 @@ PerformRecoveryXLogAction(void) /* * Perform a checkpoint to update all our recovery activity to disk. * - * Note that we write a shutdown checkpoint rather than an on-line one. This - * is not particularly critical, but since we may be assigning a new TLI, - * using a shutdown checkpoint allows us to have the rule that TLI only - * changes in shutdown checkpoints, which allows some extra error checking - * in xlog_redo. + * Note that we write a shutdown checkpoint rather than an on-line one. + * This is not particularly critical, but since we may be assigning a new + * TLI, using a shutdown checkpoint allows us to have the rule that TLI + * only changes in shutdown checkpoints, which allows some extra error + * checking in xlog_redo. * - * In promotion, only create a lightweight end-of-recovery record instead of - * a full checkpoint. A checkpoint is requested later, after we're fully out - * of recovery mode and already accepting queries. + * In promotion, only create a lightweight end-of-recovery record instead + * of a full checkpoint. A checkpoint is requested later, after we're + * fully out of recovery mode and already accepting queries. */ if (ArchiveRecoveryRequested && IsUnderPostmaster && LocalPromoteIsTriggered) @@ -8310,11 +8310,11 @@ PerformRecoveryXLogAction(void) /* * Insert a special WAL record to mark the end of recovery, since we * aren't doing a checkpoint. That means that the checkpointer process - * may likely be in the middle of a time-smoothed restartpoint and could - * continue to be for minutes after this. That sounds strange, but the - * effect is roughly the same and it would be stranger to try to come - * out of the restartpoint and then checkpoint. We request a checkpoint - * later anyway, just for safety. + * may likely be in the middle of a time-smoothed restartpoint and + * could continue to be for minutes after this. That sounds strange, + * but the effect is roughly the same and it would be stranger to try + * to come out of the restartpoint and then checkpoint. We request a + * checkpoint later anyway, just for safety. */ CreateEndOfRecoveryRecord(); } @@ -8486,7 +8486,7 @@ XLogInsertAllowed(void) static int LocalSetXLogInsertAllowed(void) { - int oldXLogAllowed = LocalXLogInsertAllowed; + int oldXLogAllowed = LocalXLogInsertAllowed; LocalXLogInsertAllowed = 1; diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c index 81cc39fb70..97ddd2b24a 100644 --- a/src/backend/catalog/heap.c +++ b/src/backend/catalog/heap.c @@ -3550,9 +3550,8 @@ restart: /* * If this constraint has a parent constraint which we have not seen * yet, keep track of it for the second loop, below. Tracking parent - * constraints allows us to climb up to the top-level constraint - * and look for all possible relations referencing the partitioned - * table. + * constraints allows us to climb up to the top-level constraint and + * look for all possible relations referencing the partitioned table. */ if (OidIsValid(con->conparentid) && !list_member_oid(parent_cons, con->conparentid)) diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 2bae3fbb17..86b9813300 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -856,7 +856,7 @@ const ObjectAddress InvalidObjectAddress = }; static ObjectAddress get_object_address_unqualified(ObjectType objtype, - String *strval, bool missing_ok); + String * strval, bool missing_ok); static ObjectAddress get_relation_by_qualified_name(ObjectType objtype, List *object, Relation *relp, LOCKMODE lockmode, bool missing_ok); @@ -1255,7 +1255,7 @@ get_object_address_rv(ObjectType objtype, RangeVar *rel, List *object, */ static ObjectAddress get_object_address_unqualified(ObjectType objtype, - String *strval, bool missing_ok) + String * strval, bool missing_ok) { const char *name; ObjectAddress address; diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c index 4928702aec..9b7bb1b3be 100644 --- a/src/backend/commands/analyze.c +++ b/src/backend/commands/analyze.c @@ -428,7 +428,7 @@ do_analyze_rel(Relation onerel, VacuumParams *params, */ if (onerel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - List *idxs = RelationGetIndexList(onerel); + List *idxs = RelationGetIndexList(onerel); Irel = NULL; nindexes = 0; diff --git a/src/backend/commands/cluster.c b/src/backend/commands/cluster.c index 9d22f648a8..ffba47926d 100644 --- a/src/backend/commands/cluster.c +++ b/src/backend/commands/cluster.c @@ -1529,8 +1529,8 @@ finish_heap_swap(Oid OIDOldHeap, Oid OIDNewHeap, /* * Reset the relrewrite for the toast. The command-counter - * increment is required here as we are about to update - * the tuple that is updated as part of RenameRelationInternal. + * increment is required here as we are about to update the tuple + * that is updated as part of RenameRelationInternal. */ CommandCounterIncrement(); ResetRelRewrite(newrel->rd_rel->reltoastrelid); diff --git a/src/backend/commands/publicationcmds.c b/src/backend/commands/publicationcmds.c index d1fff13d2e..21bf50a81c 100644 --- a/src/backend/commands/publicationcmds.c +++ b/src/backend/commands/publicationcmds.c @@ -813,7 +813,7 @@ RemovePublicationById(Oid pubid) if (!HeapTupleIsValid(tup)) elog(ERROR, "cache lookup failed for publication %u", pubid); - pubform = (Form_pg_publication)GETSTRUCT(tup); + pubform = (Form_pg_publication) GETSTRUCT(tup); /* Invalidate relcache so that publication info is rebuilt. */ if (pubform->puballtables) diff --git a/src/backend/commands/statscmds.c b/src/backend/commands/statscmds.c index 8f1550ec80..e2fe1e174e 100644 --- a/src/backend/commands/statscmds.c +++ b/src/backend/commands/statscmds.c @@ -261,9 +261,9 @@ CreateStatistics(CreateStatsStmt *stmt) nattnums++; ReleaseSysCache(atttuple); } - else if (IsA(selem->expr, Var)) /* column reference in parens */ + else if (IsA(selem->expr, Var)) /* column reference in parens */ { - Var *var = (Var *) selem->expr; + Var *var = (Var *) selem->expr; TypeCacheEntry *type; /* Disallow use of system attributes in extended stats */ @@ -300,10 +300,11 @@ CreateStatistics(CreateStatsStmt *stmt) while ((k = bms_next_member(attnums, k)) >= 0) { AttrNumber attnum = k + FirstLowInvalidHeapAttributeNumber; + if (attnum <= 0) ereport(ERROR, - (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("statistics creation on system columns is not supported"))); + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("statistics creation on system columns is not supported"))); } /* diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 857cc5ce6e..28a051f4ff 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -17525,12 +17525,12 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd, /* * If the partition we just attached is partitioned itself, invalidate * relcache for all descendent partitions too to ensure that their - * rd_partcheck expression trees are rebuilt; partitions already locked - * at the beginning of this function. + * rd_partcheck expression trees are rebuilt; partitions already locked at + * the beginning of this function. */ if (attachrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - ListCell *l; + ListCell *l; foreach(l, attachrel_children) { @@ -18232,13 +18232,13 @@ DetachPartitionFinalize(Relation rel, Relation partRel, bool concurrent, /* * If the partition we just detached is partitioned itself, invalidate * relcache for all descendent partitions too to ensure that their - * rd_partcheck expression trees are rebuilt; must lock partitions - * before doing so, using the same lockmode as what partRel has been - * locked with by the caller. + * rd_partcheck expression trees are rebuilt; must lock partitions before + * doing so, using the same lockmode as what partRel has been locked with + * by the caller. */ if (partRel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE) { - List *children; + List *children; children = find_all_inheritors(RelationGetRelid(partRel), AccessExclusiveLock, NULL); diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index d328856ae5..d13fbf4590 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -715,7 +715,7 @@ ExecInsert(ModifyTableState *mtstate, { TupleDesc tdesc = CreateTupleDescCopy(slot->tts_tupleDescriptor); TupleDesc plan_tdesc = - CreateTupleDescCopy(planSlot->tts_tupleDescriptor); + CreateTupleDescCopy(planSlot->tts_tupleDescriptor); resultRelInfo->ri_Slots[resultRelInfo->ri_NumSlots] = MakeSingleTupleTableSlot(tdesc, slot->tts_ops); diff --git a/src/backend/jit/llvm/llvmjit.c b/src/backend/jit/llvm/llvmjit.c index 169dad96d7..4e7212e7bf 100644 --- a/src/backend/jit/llvm/llvmjit.c +++ b/src/backend/jit/llvm/llvmjit.c @@ -890,8 +890,8 @@ llvm_shutdown(int code, Datum arg) * has occurred in the middle of LLVM code. It is not safe to call back * into LLVM (which is why a FATAL error was thrown). * - * We do need to shutdown LLVM in other shutdown cases, otherwise - * e.g. profiling data won't be written out. + * We do need to shutdown LLVM in other shutdown cases, otherwise e.g. + * profiling data won't be written out. */ if (llvm_in_fatal_on_oom()) { diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 82464c9889..232cf37d22 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -4925,9 +4925,9 @@ _copyExtensibleNode(const ExtensibleNode *from) * **************************************************************** */ static Integer * -_copyInteger(const Integer *from) +_copyInteger(const Integer * from) { - Integer *newnode = makeNode(Integer); + Integer *newnode = makeNode(Integer); COPY_SCALAR_FIELD(val); @@ -4935,7 +4935,7 @@ _copyInteger(const Integer *from) } static Float * -_copyFloat(const Float *from) +_copyFloat(const Float * from) { Float *newnode = makeNode(Float); @@ -4945,7 +4945,7 @@ _copyFloat(const Float *from) } static String * -_copyString(const String *from) +_copyString(const String * from) { String *newnode = makeNode(String); @@ -4955,9 +4955,9 @@ _copyString(const String *from) } static BitString * -_copyBitString(const BitString *from) +_copyBitString(const BitString * from) { - BitString *newnode = makeNode(BitString); + BitString *newnode = makeNode(BitString); COPY_STRING_FIELD(val); diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index f537d3eb96..fca72c8e45 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -2437,8 +2437,7 @@ static bool _equalA_Const(const A_Const *a, const A_Const *b) { /* - * Hack for in-line val field. Also val is not valid is isnull is - * true. + * Hack for in-line val field. Also val is not valid is isnull is true. */ if (!a->isnull && !b->isnull && !equal(&a->val, &b->val)) @@ -3122,7 +3121,7 @@ _equalList(const List *a, const List *b) */ static bool -_equalInteger(const Integer *a, const Integer *b) +_equalInteger(const Integer * a, const Integer * b) { COMPARE_SCALAR_FIELD(val); @@ -3130,7 +3129,7 @@ _equalInteger(const Integer *a, const Integer *b) } static bool -_equalFloat(const Float *a, const Float *b) +_equalFloat(const Float * a, const Float * b) { COMPARE_STRING_FIELD(val); @@ -3138,7 +3137,7 @@ _equalFloat(const Float *a, const Float *b) } static bool -_equalString(const String *a, const String *b) +_equalString(const String * a, const String * b) { COMPARE_STRING_FIELD(val); @@ -3146,7 +3145,7 @@ _equalString(const String *a, const String *b) } static bool -_equalBitString(const BitString *a, const BitString *b) +_equalBitString(const BitString * a, const BitString * b) { COMPARE_STRING_FIELD(val); diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 2e5ed77e18..bd5aba02ef 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -3414,27 +3414,27 @@ _outA_Expr(StringInfo str, const A_Expr *node) } static void -_outInteger(StringInfo str, const Integer *node) +_outInteger(StringInfo str, const Integer * node) { appendStringInfo(str, "%d", node->val); } static void -_outFloat(StringInfo str, const Float *node) +_outFloat(StringInfo str, const Float * node) { /* - * We assume the value is a valid numeric literal and so does not - * need quoting. + * We assume the value is a valid numeric literal and so does not need + * quoting. */ appendStringInfoString(str, node->val); } static void -_outString(StringInfo str, const String *node) +_outString(StringInfo str, const String * node) { /* - * We use outToken to provide escaping of the string's content, - * but we don't want it to do anything with an empty string. + * We use outToken to provide escaping of the string's content, but we + * don't want it to do anything with an empty string. */ appendStringInfoChar(str, '"'); if (node->val[0] != '\0') @@ -3443,7 +3443,7 @@ _outString(StringInfo str, const String *node) } static void -_outBitString(StringInfo str, const BitString *node) +_outBitString(StringInfo str, const BitString * node) { /* internal representation already has leading 'b' */ appendStringInfoString(str, node->val); diff --git a/src/backend/nodes/value.c b/src/backend/nodes/value.c index 515f93c223..980245ed5d 100644 --- a/src/backend/nodes/value.c +++ b/src/backend/nodes/value.c @@ -22,7 +22,7 @@ Integer * makeInteger(int i) { - Integer *v = makeNode(Integer); + Integer *v = makeNode(Integer); v->val = i; return v; diff --git a/src/backend/optimizer/path/indxpath.c b/src/backend/optimizer/path/indxpath.c index 0e4e00eaf0..8475c18560 100644 --- a/src/backend/optimizer/path/indxpath.c +++ b/src/backend/optimizer/path/indxpath.c @@ -3517,7 +3517,7 @@ relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, Assert(list_length(exprlist) == list_length(oprlist)); /* Short-circuit if no indexes... */ - if (rel->indexlist == NIL) + if (rel->indexlist == NIL && rel->partIndexlist == NIL) return false; /* @@ -3562,7 +3562,7 @@ relation_has_unique_index_for(PlannerInfo *root, RelOptInfo *rel, return false; /* Examine each index of the relation ... */ - foreach(ic, rel->indexlist) + foreach(ic, list_concat(rel->indexlist, rel->partIndexlist)) { IndexOptInfo *ind = (IndexOptInfo *) lfirst(ic); int c; diff --git a/src/backend/optimizer/plan/analyzejoins.c b/src/backend/optimizer/plan/analyzejoins.c index 37eb64bcef..3ea53d520f 100644 --- a/src/backend/optimizer/plan/analyzejoins.c +++ b/src/backend/optimizer/plan/analyzejoins.c @@ -23,6 +23,7 @@ #include "postgres.h" #include "nodes/nodeFuncs.h" +#include "nodes/nodes.h" #include "optimizer/clauses.h" #include "optimizer/joininfo.h" #include "optimizer/optimizer.h" @@ -598,7 +599,7 @@ rel_supports_distinctness(PlannerInfo *root, RelOptInfo *rel) */ ListCell *lc; - foreach(lc, rel->indexlist) + foreach(lc, list_concat(rel->indexlist, rel->partIndexlist)) { IndexOptInfo *ind = (IndexOptInfo *) lfirst(lc); diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index c5194fdbbf..71c996582d 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -117,6 +117,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, Relation relation; bool hasindex; List *indexinfos = NIL; + List *partIndexinfos = NIL; /* * We need not lock the relation since it was already locked, either by @@ -163,7 +164,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, else hasindex = relation->rd_rel->relhasindex; - if (hasindex) + if (!(IgnoreSystemIndexes && !IsSystemRelation(relation)) && relation->rd_rel->relhasindex) { List *indexoidlist; LOCKMODE lmode; @@ -212,10 +213,12 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, } /* - * Ignore partitioned indexes, since they are not usable for - * queries. + * Don't add partitioned indexes to the indexlist, since they are + * not usable by the executor. If they are unique add them to the + * partindexlist instead, to use for further pruning. If they + * aren't that either, simply skip them. */ - if (indexRelation->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) + if (inhparent && (!index->indisunique || indexRelation->rd_rel->relkind != RELKIND_PARTITIONED_INDEX)) { index_close(indexRelation, NoLock); continue; @@ -263,7 +266,40 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, info->indexcollations[i] = indexRelation->rd_indcollation[i]; } - info->relam = indexRelation->rd_rel->relam; + /* + * Fetch the index expressions and predicate, if any. We must + * modify the copies we obtain from the relcache to have the + * correct varno for the parent relation, so that they match up + * correctly against qual clauses. + */ + info->indexprs = RelationGetIndexExpressions(indexRelation); + info->indpred = RelationGetIndexPredicate(indexRelation); + if (info->indexprs && varno != 1) + ChangeVarNodes((Node *) info->indexprs, 1, varno, 0); + if (info->indpred && varno != 1) + ChangeVarNodes((Node *) info->indpred, 1, varno, 0); + + info->unique = index->indisunique; + info->immediate = index->indimmediate; + + /* + * Don't add partitioned indexes to the indexlist, add them to the + * partindexlist instead, since they are not usable by the + * executor. + */ + if (indexRelation->rd_rel->relkind == RELKIND_PARTITIONED_INDEX) + { + index_close(indexRelation, NoLock); + partIndexinfos = lappend(partIndexinfos, info); + continue; + } + + info->hypothetical = false; + info->indrestrictinfo = NIL; /* set later, in indxpath.c */ + info->predOK = false; /* set later, in indxpath.c */ + + /* Build targetlist using the completed indexprs data */ + info->indextlist = build_index_tlist(root, info, relation); /* We copy just the fields we need, not all of rd_indam */ amroutine = indexRelation->rd_indam; @@ -283,6 +319,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, /* Fetch index opclass options */ info->opclassoptions = RelationGetIndexAttOptions(indexRelation, true); + info->relam = indexRelation->rd_rel->relam; + /* * Fetch the ordering information for the index, if any. */ @@ -369,28 +407,6 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, info->nulls_first = NULL; } - /* - * Fetch the index expressions and predicate, if any. We must - * modify the copies we obtain from the relcache to have the - * correct varno for the parent relation, so that they match up - * correctly against qual clauses. - */ - info->indexprs = RelationGetIndexExpressions(indexRelation); - info->indpred = RelationGetIndexPredicate(indexRelation); - if (info->indexprs && varno != 1) - ChangeVarNodes((Node *) info->indexprs, 1, varno, 0); - if (info->indpred && varno != 1) - ChangeVarNodes((Node *) info->indpred, 1, varno, 0); - - /* Build targetlist using the completed indexprs data */ - info->indextlist = build_index_tlist(root, info, relation); - - info->indrestrictinfo = NIL; /* set later, in indxpath.c */ - info->predOK = false; /* set later, in indxpath.c */ - info->unique = index->indisunique; - info->immediate = index->indimmediate; - info->hypothetical = false; - /* * Estimate the index size. If it's not a partial index, we lock * the number-of-tuples estimate to equal the parent table; if it @@ -440,6 +456,7 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, } rel->indexlist = indexinfos; + rel->partIndexlist = partIndexinfos; rel->statlist = get_relation_statistics(rel, relation); diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index 146ee8dd1e..f165531a73 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -2039,8 +2039,8 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt, ListCell *ltl; ListCell *rtl; const char *context; - bool recursive = (pstate->p_parent_cte && - pstate->p_parent_cte->cterecursive); + bool recursive = (pstate->p_parent_cte && + pstate->p_parent_cte->cterecursive); context = (stmt->op == SETOP_UNION ? "UNION" : (stmt->op == SETOP_INTERSECT ? "INTERSECT" : @@ -2194,7 +2194,10 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt, setup_parser_errposition_callback(&pcbstate, pstate, bestlocation); - /* If it's a recursive union, we need to require hashing support. */ + /* + * If it's a recursive union, we need to require hashing + * support. + */ op->groupClauses = lappend(op->groupClauses, makeSortGroupClauseForSetOp(rescoltype, recursive)); diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index 078029ba1f..3e450053c5 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -1998,7 +1998,7 @@ findTargetlistEntrySQL92(ParseState *pstate, Node *node, List **tlist, } if (IsA(node, A_Const)) { - A_Const *aconst = castNode(A_Const, node); + A_Const *aconst = castNode(A_Const, node); int targetlist_pos = 0; int target_pos; diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c index fb49747f6e..3220d4808d 100644 --- a/src/backend/partitioning/partdesc.c +++ b/src/backend/partitioning/partdesc.c @@ -91,8 +91,8 @@ RelationGetPartitionDesc(Relation rel, bool omit_detached) * cached descriptor too. We determine that based on the pg_inherits.xmin * that was saved alongside that descriptor: if the xmin that was not in * progress for that active snapshot is also not in progress for the - * current active snapshot, then we can use it. Otherwise build one - * from scratch. + * current active snapshot, then we can use it. Otherwise build one from + * scratch. */ if (omit_detached && rel->rd_partdesc_nodetached && diff --git a/src/backend/postmaster/bgworker.c b/src/backend/postmaster/bgworker.c index c05f500639..9d63ab4b1f 100644 --- a/src/backend/postmaster/bgworker.c +++ b/src/backend/postmaster/bgworker.c @@ -826,9 +826,9 @@ StartBackgroundWorker(void) /* * Create a per-backend PGPROC struct in shared memory, except in the - * EXEC_BACKEND case where this was done in SubPostmasterMain. We must - * do this before we can use LWLocks (and in the EXEC_BACKEND case we - * already had to do some stuff with LWLocks). + * EXEC_BACKEND case where this was done in SubPostmasterMain. We must do + * this before we can use LWLocks (and in the EXEC_BACKEND case we already + * had to do some stuff with LWLocks). */ #ifndef EXEC_BACKEND InitProcess(); diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index b7d0fbaefd..2d1d7e3ac5 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -263,7 +263,7 @@ typedef struct TwoPhasePgStatRecord PgStat_Counter deleted_pre_truncdrop; Oid t_id; /* table's OID */ bool t_shared; /* is it a shared catalog? */ - bool t_truncdropped; /* was the relation truncated/dropped? */ + bool t_truncdropped; /* was the relation truncated/dropped? */ } TwoPhasePgStatRecord; /* @@ -361,7 +361,7 @@ static void pgstat_recv_vacuum(PgStat_MsgVacuum *msg, int len); static void pgstat_recv_analyze(PgStat_MsgAnalyze *msg, int len); static void pgstat_recv_archiver(PgStat_MsgArchiver *msg, int len); static void pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len); -static void pgstat_recv_checkpointer(PgStat_MsgCheckpointer *msg, int len); +static void pgstat_recv_checkpointer(PgStat_MsgCheckpointer * msg, int len); static void pgstat_recv_wal(PgStat_MsgWal *msg, int len); static void pgstat_recv_slru(PgStat_MsgSLRU *msg, int len); static void pgstat_recv_funcstat(PgStat_MsgFuncstat *msg, int len); @@ -2530,11 +2530,11 @@ AtEOSubXact_PgStat_Relations(PgStat_SubXactStatus *xact_state, bool isCommit, in { /* * When there isn't an immediate parent state, we can just - * reuse the record instead of going through a - * palloc/pfree pushup (this works since it's all in - * TopTransactionContext anyway). We have to re-link it - * into the parent level, though, and that might mean - * pushing a new entry into the pgStatXactStack. + * reuse the record instead of going through a palloc/pfree + * pushup (this works since it's all in TopTransactionContext + * anyway). We have to re-link it into the parent level, + * though, and that might mean pushing a new entry into the + * pgStatXactStack. */ PgStat_SubXactStatus *upper_xact_state; @@ -3244,9 +3244,9 @@ pgstat_send_wal(bool force) WalUsage walusage; /* - * Calculate how much WAL usage counters were increased by - * subtracting the previous counters from the current ones. Fill the - * results in WAL stats message. + * Calculate how much WAL usage counters were increased by subtracting + * the previous counters from the current ones. Fill the results in + * WAL stats message. */ MemSet(&walusage, 0, sizeof(WalUsage)); WalUsageAccumDiff(&walusage, &pgWalUsage, &prevWalUsage); @@ -4077,7 +4077,7 @@ pgstat_read_statsfiles(Oid onlydb, bool permanent, bool deep) bool found; const char *statfile = permanent ? PGSTAT_STAT_PERMANENT_FILENAME : pgstat_stat_filename; int i; - TimestampTz ts; + TimestampTz ts; /* * The tables will live in pgStatLocalContext. @@ -5059,6 +5059,7 @@ pgstat_recv_tabstat(PgStat_MsgTabstat *msg, int len) tabentry->tuples_updated += tabmsg->t_counts.t_tuples_updated; tabentry->tuples_deleted += tabmsg->t_counts.t_tuples_deleted; tabentry->tuples_hot_updated += tabmsg->t_counts.t_tuples_hot_updated; + /* * If table was truncated/dropped, first reset the live/dead * counters. @@ -5221,7 +5222,10 @@ pgstat_recv_resetsharedcounter(PgStat_MsgResetsharedcounter *msg, int len) { if (msg->m_resettarget == RESET_BGWRITER) { - /* Reset the global, bgwriter and checkpointer statistics for the cluster. */ + /* + * Reset the global, bgwriter and checkpointer statistics for the + * cluster. + */ memset(&globalStats, 0, sizeof(globalStats)); globalStats.bgwriter.stat_reset_timestamp = GetCurrentTimestamp(); } @@ -5501,7 +5505,7 @@ pgstat_recv_bgwriter(PgStat_MsgBgWriter *msg, int len) * ---------- */ static void -pgstat_recv_checkpointer(PgStat_MsgCheckpointer *msg, int len) +pgstat_recv_checkpointer(PgStat_MsgCheckpointer * msg, int len) { globalStats.checkpointer.timed_checkpoints += msg->m_timed_checkpoints; globalStats.checkpointer.requested_checkpoints += msg->m_requested_checkpoints; diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c index 28e68dd871..1ddc942991 100644 --- a/src/backend/postmaster/startup.c +++ b/src/backend/postmaster/startup.c @@ -73,7 +73,7 @@ static volatile sig_atomic_t startup_progress_timer_expired = false; /* * Time between progress updates for long-running startup operations. */ -int log_startup_progress_interval = 10000; /* 10 sec */ +int log_startup_progress_interval = 10000; /* 10 sec */ /* Signal handlers */ static void StartupProcTriggerHandler(SIGNAL_ARGS); diff --git a/src/backend/statistics/dependencies.c b/src/backend/statistics/dependencies.c index 8bf80db8e4..abb0a095bf 100644 --- a/src/backend/statistics/dependencies.c +++ b/src/backend/statistics/dependencies.c @@ -354,7 +354,7 @@ statext_dependencies_build(StatsBuildData *data) /* result */ MVDependencies *dependencies = NULL; - MemoryContext cxt; + MemoryContext cxt; Assert(data->nattnums >= 2); diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c index b350fc5f7b..18c7be8195 100644 --- a/src/backend/statistics/mcv.c +++ b/src/backend/statistics/mcv.c @@ -1619,7 +1619,7 @@ mcv_get_match_bitmap(PlannerInfo *root, List *clauses, Assert(mcvlist->nitems <= STATS_MCVLIST_MAX_ITEMS); matches = palloc(sizeof(bool) * mcvlist->nitems); - memset(matches, !is_or, sizeof(bool) * mcvlist->nitems); + memset(matches, !is_or, sizeof(bool) * mcvlist->nitems); /* * Loop through the list of clauses, and for each of them evaluate all the diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c index 08ebabfe96..6003431e2c 100644 --- a/src/backend/storage/buffer/bufmgr.c +++ b/src/backend/storage/buffer/bufmgr.c @@ -666,9 +666,8 @@ ReadRecentBuffer(RelFileNode rnode, ForkNumber forkNum, BlockNumber blockNum, { /* * It's now safe to pin the buffer. We can't pin first and ask - * questions later, because it might confuse code paths - * like InvalidateBuffer() if we pinned a random non-matching - * buffer. + * questions later, because it might confuse code paths like + * InvalidateBuffer() if we pinned a random non-matching buffer. */ if (have_private_ref) PinBuffer(bufHdr, NULL); /* bump pin count */ diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c index cb1a8dd34f..40dc4d570f 100644 --- a/src/backend/storage/file/fd.c +++ b/src/backend/storage/file/fd.c @@ -911,7 +911,7 @@ InitFileAccess(void) void InitTemporaryFileAccess(void) { - Assert(SizeVfdCache != 0); /* InitFileAccess() needs to have run*/ + Assert(SizeVfdCache != 0); /* InitFileAccess() needs to have run */ Assert(!temporary_files_allowed); /* call me only once */ /* diff --git a/src/backend/storage/ipc/procarray.c b/src/backend/storage/ipc/procarray.c index bd3c7a47fe..eda80c8d97 100644 --- a/src/backend/storage/ipc/procarray.c +++ b/src/backend/storage/ipc/procarray.c @@ -255,7 +255,7 @@ typedef enum GlobalVisHorizonKind VISHORIZON_CATALOG, VISHORIZON_DATA, VISHORIZON_TEMP -} GlobalVisHorizonKind; +} GlobalVisHorizonKind; static ProcArrayStruct *procArray; diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c index b4ce9629d4..9bcdb5bfa5 100644 --- a/src/backend/storage/ipc/shm_mq.c +++ b/src/backend/storage/ipc/shm_mq.c @@ -534,9 +534,9 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait, } /* - * If the caller has requested force flush or we have written more than 1/4 - * of the ring size, mark it as written in shared memory and notify the - * receiver. + * If the caller has requested force flush or we have written more than + * 1/4 of the ring size, mark it as written in shared memory and notify + * the receiver. */ if (force_flush || mqh->mqh_send_pending > (mq->mq_ring_size >> 2)) { diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c index c25af7fe09..653b371206 100644 --- a/src/backend/storage/lmgr/lock.c +++ b/src/backend/storage/lmgr/lock.c @@ -3237,7 +3237,7 @@ CheckForSessionAndXactLocks(void) LOCKTAG lock; /* identifies the lockable object */ bool sessLock; /* is any lockmode held at session level? */ bool xactLock; /* is any lockmode held at xact level? */ - } PerLockTagEntry; + } PerLockTagEntry; HASHCTL hash_ctl; HTAB *lockhtab; diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c index 660e854e93..58d49179cc 100644 --- a/src/backend/utils/adt/arrayfuncs.c +++ b/src/backend/utils/adt/arrayfuncs.c @@ -3996,7 +3996,8 @@ hash_array(PG_FUNCTION_ARGS) /* * Make fake type cache entry structure. Note that we can't just - * modify typentry, since that points directly into the type cache. + * modify typentry, since that points directly into the type + * cache. */ record_typentry = palloc0(sizeof(*record_typentry)); record_typentry->type_id = element_type; diff --git a/src/backend/utils/adt/multirangetypes.c b/src/backend/utils/adt/multirangetypes.c index 7773215564..f3d2e52dbd 100644 --- a/src/backend/utils/adt/multirangetypes.c +++ b/src/backend/utils/adt/multirangetypes.c @@ -2651,7 +2651,7 @@ multirange_unnest(PG_FUNCTION_ARGS) MultirangeType *mr; TypeCacheEntry *typcache; int index; - } multirange_unnest_fctx; + } multirange_unnest_fctx; FuncCallContext *funcctx; multirange_unnest_fctx *fctx; diff --git a/src/backend/utils/adt/rangetypes_spgist.c b/src/backend/utils/adt/rangetypes_spgist.c index 912b43f083..9395b32576 100644 --- a/src/backend/utils/adt/rangetypes_spgist.c +++ b/src/backend/utils/adt/rangetypes_spgist.c @@ -608,8 +608,8 @@ spg_range_quad_inner_consistent(PG_FUNCTION_ARGS) /* * Non-empty range A contains non-empty range B if lower * bound of A is lower or equal to lower bound of range B - * and upper bound of range A is greater than or equal to upper - * bound of range A. + * and upper bound of range A is greater than or equal to + * upper bound of range A. * * All non-empty ranges contain an empty range. */ diff --git a/src/backend/utils/cache/inval.c b/src/backend/utils/cache/inval.c index 777fab4915..24972e3a71 100644 --- a/src/backend/utils/cache/inval.c +++ b/src/backend/utils/cache/inval.c @@ -166,7 +166,7 @@ typedef struct InvalMessageArray { SharedInvalidationMessage *msgs; /* palloc'd array (can be expanded) */ int maxmsgs; /* current allocated size of array */ -} InvalMessageArray; +} InvalMessageArray; static InvalMessageArray InvalMessageArrays[2]; @@ -175,7 +175,7 @@ typedef struct InvalidationMsgsGroup { int firstmsg[2]; /* first index in relevant array */ int nextmsg[2]; /* last+1 index */ -} InvalidationMsgsGroup; +} InvalidationMsgsGroup; /* Macros to help preserve InvalidationMsgsGroup abstraction */ #define SetSubGroupToFollow(targetgroup, priorgroup, subgroup) \ @@ -287,7 +287,7 @@ static int relcache_callback_count = 0; * subgroup must be CatCacheMsgs or RelCacheMsgs. */ static void -AddInvalidationMessage(InvalidationMsgsGroup *group, int subgroup, +AddInvalidationMessage(InvalidationMsgsGroup * group, int subgroup, const SharedInvalidationMessage *msg) { InvalMessageArray *ima = &InvalMessageArrays[subgroup]; @@ -327,8 +327,8 @@ AddInvalidationMessage(InvalidationMsgsGroup *group, int subgroup, * the source subgroup to empty. */ static void -AppendInvalidationMessageSubGroup(InvalidationMsgsGroup *dest, - InvalidationMsgsGroup *src, +AppendInvalidationMessageSubGroup(InvalidationMsgsGroup * dest, + InvalidationMsgsGroup * src, int subgroup) { /* Messages must be adjacent in main array */ @@ -392,7 +392,7 @@ AppendInvalidationMessageSubGroup(InvalidationMsgsGroup *dest, * Add a catcache inval entry */ static void -AddCatcacheInvalidationMessage(InvalidationMsgsGroup *group, +AddCatcacheInvalidationMessage(InvalidationMsgsGroup * group, int id, uint32 hashValue, Oid dbId) { SharedInvalidationMessage msg; @@ -420,7 +420,7 @@ AddCatcacheInvalidationMessage(InvalidationMsgsGroup *group, * Add a whole-catalog inval entry */ static void -AddCatalogInvalidationMessage(InvalidationMsgsGroup *group, +AddCatalogInvalidationMessage(InvalidationMsgsGroup * group, Oid dbId, Oid catId) { SharedInvalidationMessage msg; @@ -438,7 +438,7 @@ AddCatalogInvalidationMessage(InvalidationMsgsGroup *group, * Add a relcache inval entry */ static void -AddRelcacheInvalidationMessage(InvalidationMsgsGroup *group, +AddRelcacheInvalidationMessage(InvalidationMsgsGroup * group, Oid dbId, Oid relId) { SharedInvalidationMessage msg; @@ -470,7 +470,7 @@ AddRelcacheInvalidationMessage(InvalidationMsgsGroup *group, * We put these into the relcache subgroup for simplicity. */ static void -AddSnapshotInvalidationMessage(InvalidationMsgsGroup *group, +AddSnapshotInvalidationMessage(InvalidationMsgsGroup * group, Oid dbId, Oid relId) { SharedInvalidationMessage msg; @@ -497,8 +497,8 @@ AddSnapshotInvalidationMessage(InvalidationMsgsGroup *group, * the source group to empty. */ static void -AppendInvalidationMessages(InvalidationMsgsGroup *dest, - InvalidationMsgsGroup *src) +AppendInvalidationMessages(InvalidationMsgsGroup * dest, + InvalidationMsgsGroup * src) { AppendInvalidationMessageSubGroup(dest, src, CatCacheMsgs); AppendInvalidationMessageSubGroup(dest, src, RelCacheMsgs); @@ -511,7 +511,7 @@ AppendInvalidationMessages(InvalidationMsgsGroup *dest, * catcache entries are processed first, for reasons mentioned above. */ static void -ProcessInvalidationMessages(InvalidationMsgsGroup *group, +ProcessInvalidationMessages(InvalidationMsgsGroup * group, void (*func) (SharedInvalidationMessage *msg)) { ProcessMessageSubGroup(group, CatCacheMsgs, func(msg)); @@ -523,7 +523,7 @@ ProcessInvalidationMessages(InvalidationMsgsGroup *group, * rather than just one at a time. */ static void -ProcessInvalidationMessagesMulti(InvalidationMsgsGroup *group, +ProcessInvalidationMessagesMulti(InvalidationMsgsGroup * group, void (*func) (const SharedInvalidationMessage *msgs, int n)) { ProcessMessageSubGroupMulti(group, CatCacheMsgs, func(msgs, n)); diff --git a/src/bin/pg_amcheck/pg_amcheck.c b/src/bin/pg_amcheck/pg_amcheck.c index d4a53c8e63..a3c13fa27e 100644 --- a/src/bin/pg_amcheck/pg_amcheck.c +++ b/src/bin/pg_amcheck/pg_amcheck.c @@ -1100,17 +1100,17 @@ verify_btree_slot_handler(PGresult *res, PGconn *conn, void *context) if (PQresultStatus(res) == PGRES_TUPLES_OK) { - int ntups = PQntuples(res); + int ntups = PQntuples(res); if (ntups > 1) { /* * We expect the btree checking functions to return one void row * each, or zero rows if the check was skipped due to the object - * being in the wrong state to be checked, so we should output some - * sort of warning if we get anything more, not because it - * indicates corruption, but because it suggests a mismatch between - * amcheck and pg_amcheck versions. + * being in the wrong state to be checked, so we should output + * some sort of warning if we get anything more, not because it + * indicates corruption, but because it suggests a mismatch + * between amcheck and pg_amcheck versions. * * In conjunction with --progress, anything written to stderr at * this time would present strangely to the user without an extra diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 27ee6394cf..19b7b1d412 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -1888,26 +1888,26 @@ BaseBackup(void) } if (maxrate > 0) AppendIntegerCommandOption(&buf, use_new_option_syntax, "MAX_RATE", - maxrate); + maxrate); if (format == 't') AppendPlainCommandOption(&buf, use_new_option_syntax, "TABLESPACE_MAP"); if (!verify_checksums) { if (use_new_option_syntax) AppendIntegerCommandOption(&buf, use_new_option_syntax, - "VERIFY_CHECKSUMS", 0); + "VERIFY_CHECKSUMS", 0); else AppendPlainCommandOption(&buf, use_new_option_syntax, - "NOVERIFY_CHECKSUMS"); + "NOVERIFY_CHECKSUMS"); } if (manifest) { AppendStringCommandOption(&buf, use_new_option_syntax, "MANIFEST", - manifest_force_encode ? "force-encode" : "yes"); + manifest_force_encode ? "force-encode" : "yes"); if (manifest_checksums != NULL) AppendStringCommandOption(&buf, use_new_option_syntax, - "MANIFEST_CHECKSUMS", manifest_checksums); + "MANIFEST_CHECKSUMS", manifest_checksums); } if (verbose) diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c index 2a3e0c688f..0690393913 100644 --- a/src/bin/pg_basebackup/streamutil.c +++ b/src/bin/pg_basebackup/streamutil.c @@ -625,7 +625,7 @@ CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, /* pg_recvlogical doesn't use an exported snapshot, so suppress */ if (use_new_option_syntax) AppendStringCommandOption(query, use_new_option_syntax, - "SNAPSHOT", "nothing"); + "SNAPSHOT", "nothing"); else AppendPlainCommandOption(query, use_new_option_syntax, "NOEXPORT_SNAPSHOT"); diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c index 7fbbe7022e..7fd1d62d02 100644 --- a/src/bin/pg_ctl/pg_ctl.c +++ b/src/bin/pg_ctl/pg_ctl.c @@ -1748,7 +1748,7 @@ typedef BOOL (WINAPI * __QueryInformationJobObject) (HANDLE, JOBOBJECTINFOCLASS, * achieves the goal of postmaster running in a similar environment as pg_ctl. */ static void -InheritStdHandles(STARTUPINFO* si) +InheritStdHandles(STARTUPINFO *si) { si->dwFlags |= STARTF_USESTDHANDLES; si->hStdInput = GetStdHandle(STD_INPUT_HANDLE); @@ -1800,8 +1800,8 @@ CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, bool as_ser si.cb = sizeof(si); /* - * Set stdin/stdout/stderr handles to be inherited in the child - * process. That allows postmaster and the processes it starts to perform + * Set stdin/stdout/stderr handles to be inherited in the child process. + * That allows postmaster and the processes it starts to perform * additional checks to see if running in a service (otherwise they get * the default console handles - which point to "somewhere"). */ diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c index ecab0a9e4e..3e091a754e 100644 --- a/src/bin/pg_dump/common.c +++ b/src/bin/pg_dump/common.c @@ -59,7 +59,7 @@ typedef struct _catalogIdMapEntry uint32 hashval; /* hash code for the CatalogId */ DumpableObject *dobj; /* the associated DumpableObject, if any */ ExtensionInfo *ext; /* owning extension, if any */ -} CatalogIdMapEntry; +} CatalogIdMapEntry; #define SH_PREFIX catalogid #define SH_ELEMENT_TYPE CatalogIdMapEntry @@ -77,7 +77,7 @@ typedef struct _catalogIdMapEntry #define CATALOGIDHASH_INITIAL_SIZE 10000 -static catalogid_hash *catalogIdHash = NULL; +static catalogid_hash * catalogIdHash = NULL; static void flagInhTables(Archive *fout, TableInfo *tbinfo, int numTables, InhInfo *inhinfo, int numInherits); diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index d17f69333f..2e34516d3e 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -4158,7 +4158,7 @@ initGenerateDataClientSide(PGconn *con) PGresult *res; int i; int64 k; - char *copy_statement; + char *copy_statement; /* used to track elapsed time and estimate of the remaining time */ pg_time_usec_t start; diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h index fef9945ed8..6d609819a7 100644 --- a/src/include/catalog/pg_class.h +++ b/src/include/catalog/pg_class.h @@ -198,7 +198,7 @@ DECLARE_INDEX(pg_class_tblspc_relfilenode_index, 3455, ClassTblspcRelfilenodeInd (relkind) == RELKIND_TOASTVALUE || \ (relkind) == RELKIND_MATVIEW) -extern int errdetail_relkind_not_supported(char relkind); +extern int errdetail_relkind_not_supported(char relkind); #endif /* EXPOSE_TO_CLIENT_CODE */ diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 7c657c1241..4cd0004a11 100644 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -670,7 +670,8 @@ extern bool equal(const void *a, const void *b); */ typedef double Selectivity; /* fraction of tuples a qualifier will pass */ typedef double Cost; /* execution cost (in page-access units) */ -typedef double Cardinality; /* (estimated) number of rows or other integer count */ +typedef double Cardinality; /* (estimated) number of rows or other integer + * count */ /* diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index 49123e28a4..8e8efc08ea 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -295,6 +295,7 @@ typedef struct A_Expr typedef struct A_Const { NodeTag type; + /* * Value nodes are inline for performance. You can treat 'val' as a node, * as in IsA(&val, Integer). 'val' is not valid if isnull is true. @@ -756,7 +757,8 @@ typedef struct DefElem NodeTag type; char *defnamespace; /* NULL if unqualified name */ char *defname; - Node *arg; /* typically Integer, Float, String, or TypeName */ + Node *arg; /* typically Integer, Float, String, or + * TypeName */ DefElemAction defaction; /* unspecified action, or SET/ADD/DROP */ int location; /* token location, or -1 if unknown */ } DefElem; @@ -1144,7 +1146,7 @@ typedef struct RangeTblEntry * Fields valid for ENR RTEs (else NULL/zero): */ char *enrname; /* name of ephemeral named relation */ - Cardinality enrtuples; /* estimated or actual from caller */ + Cardinality enrtuples; /* estimated or actual from caller */ /* * Fields valid in all RTEs: diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index 2a53a6e344..66389185cf 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -334,11 +334,11 @@ struct PlannerInfo MemoryContext planner_cxt; /* context holding PlannerInfo */ - Cardinality total_table_pages; /* # of pages in all non-dummy tables of + Cardinality total_table_pages; /* # of pages in all non-dummy tables of * query */ - Selectivity tuple_fraction; /* tuple_fraction passed to query_planner */ - Cardinality limit_tuples; /* limit_tuples passed to query_planner */ + Selectivity tuple_fraction; /* tuple_fraction passed to query_planner */ + Cardinality limit_tuples; /* limit_tuples passed to query_planner */ Index qual_security_level; /* minimum security_level for quals */ /* Note: qual_security_level is zero if there are no securityQuals */ @@ -681,7 +681,7 @@ typedef struct RelOptInfo Relids relids; /* set of base relids (rangetable indexes) */ /* size estimates generated by planner */ - Cardinality rows; /* estimated number of result tuples */ + Cardinality rows; /* estimated number of result tuples */ /* per-relation planner control flags */ bool consider_startup; /* keep cheap-startup-cost paths? */ @@ -716,9 +716,10 @@ typedef struct RelOptInfo List *lateral_vars; /* LATERAL Vars and PHVs referenced by rel */ Relids lateral_referencers; /* rels that reference me laterally */ List *indexlist; /* list of IndexOptInfo */ + List *partIndexlist; /* list of IndexOptInfo */ List *statlist; /* list of StatisticExtInfo */ BlockNumber pages; /* size estimates derived from pg_class */ - Cardinality tuples; + Cardinality tuples; double allvisfrac; Bitmapset *eclass_indexes; /* Indexes in PlannerInfo's eq_classes list of * ECs that mention this rel */ @@ -841,7 +842,7 @@ struct IndexOptInfo /* index-size statistics (from pg_class and elsewhere) */ BlockNumber pages; /* number of disk pages in index */ - Cardinality tuples; /* number of index tuples in index */ + Cardinality tuples; /* number of index tuples in index */ int tree_height; /* index tree height, or -1 if unknown */ /* index descriptor information */ @@ -1139,7 +1140,7 @@ typedef struct ParamPathInfo NodeTag type; Relids ppi_req_outer; /* rels supplying parameters used by path */ - Cardinality ppi_rows; /* estimated number of result tuples */ + Cardinality ppi_rows; /* estimated number of result tuples */ List *ppi_clauses; /* join clauses available from outer rels */ } ParamPathInfo; @@ -1189,7 +1190,7 @@ typedef struct Path int parallel_workers; /* desired # of workers; 0 = not parallel */ /* estimated size/costs for path (see costsize.c for more info) */ - Cardinality rows; /* estimated number of result tuples */ + Cardinality rows; /* estimated number of result tuples */ Cost startup_cost; /* cost expended before fetching any tuples */ Cost total_cost; /* total cost (assuming all tuples fetched) */ @@ -1452,7 +1453,7 @@ typedef struct AppendPath List *subpaths; /* list of component Paths */ /* Index of first partial path in subpaths; list_length(subpaths) if none */ int first_partial_path; - Cardinality limit_tuples; /* hard limit on output tuples, or -1 */ + Cardinality limit_tuples; /* hard limit on output tuples, or -1 */ } AppendPath; #define IS_DUMMY_APPEND(p) \ @@ -1474,7 +1475,7 @@ typedef struct MergeAppendPath { Path path; List *subpaths; /* list of component Paths */ - Cardinality limit_tuples; /* hard limit on output tuples, or -1 */ + Cardinality limit_tuples; /* hard limit on output tuples, or -1 */ } MergeAppendPath; /* @@ -1515,7 +1516,7 @@ typedef struct MemoizePath List *param_exprs; /* cache keys */ bool singlerow; /* true if the cache entry is to be marked as * complete after caching the first record. */ - Cardinality calls; /* expected number of rescans */ + Cardinality calls; /* expected number of rescans */ uint32 est_entries; /* The maximum number of entries that the * planner expects will fit in the cache, or 0 * if unknown */ @@ -1667,7 +1668,7 @@ typedef struct HashPath JoinPath jpath; List *path_hashclauses; /* join clauses used for hashing */ int num_batches; /* number of batches expected */ - Cardinality inner_rows_total; /* total inner rows expected */ + Cardinality inner_rows_total; /* total inner rows expected */ } HashPath; /* @@ -1770,7 +1771,7 @@ typedef struct AggPath Path *subpath; /* path representing input source */ AggStrategy aggstrategy; /* basic strategy, see nodes.h */ AggSplit aggsplit; /* agg-splitting mode, see nodes.h */ - Cardinality numGroups; /* estimated number of groups in input */ + Cardinality numGroups; /* estimated number of groups in input */ uint64 transitionSpace; /* for pass-by-ref transition data */ List *groupClause; /* a list of SortGroupClause's */ List *qual; /* quals (HAVING quals), if any */ @@ -1784,7 +1785,7 @@ typedef struct GroupingSetData { NodeTag type; List *set; /* grouping set as list of sortgrouprefs */ - Cardinality numGroups; /* est. number of result groups */ + Cardinality numGroups; /* est. number of result groups */ } GroupingSetData; typedef struct RollupData @@ -1793,7 +1794,7 @@ typedef struct RollupData List *groupClause; /* applicable subset of parse->groupClause */ List *gsets; /* lists of integer indexes into groupClause */ List *gsets_data; /* list of GroupingSetData */ - Cardinality numGroups; /* est. number of result groups */ + Cardinality numGroups; /* est. number of result groups */ bool hashable; /* can be hashed */ bool is_hashed; /* to be implemented as a hashagg */ } RollupData; @@ -1844,7 +1845,7 @@ typedef struct SetOpPath List *distinctList; /* SortGroupClauses identifying target cols */ AttrNumber flagColIdx; /* where is the flag column, if any */ int firstFlag; /* flag value for first input relation */ - Cardinality numGroups; /* estimated number of groups in input */ + Cardinality numGroups; /* estimated number of groups in input */ } SetOpPath; /* @@ -1857,7 +1858,7 @@ typedef struct RecursiveUnionPath Path *rightpath; List *distinctList; /* SortGroupClauses identifying target cols */ int wtParam; /* ID of Param representing work table */ - Cardinality numGroups; /* estimated number of groups in input */ + Cardinality numGroups; /* estimated number of groups in input */ } RecursiveUnionPath; /* @@ -2612,7 +2613,7 @@ typedef struct typedef struct { bool limit_needed; - Cardinality limit_tuples; + Cardinality limit_tuples; int64 count_est; int64 offset_est; } FinalPathExtraData; @@ -2643,15 +2644,15 @@ typedef struct JoinCostWorkspace Cost inner_rescan_run_cost; /* private for cost_mergejoin code */ - Cardinality outer_rows; - Cardinality inner_rows; - Cardinality outer_skip_rows; - Cardinality inner_skip_rows; + Cardinality outer_rows; + Cardinality inner_rows; + Cardinality outer_skip_rows; + Cardinality inner_skip_rows; /* private for cost_hashjoin code */ int numbuckets; int numbatches; - Cardinality inner_rows_total; + Cardinality inner_rows_total; } JoinCostWorkspace; /* diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 01a246d50e..f0e15e4907 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -120,7 +120,7 @@ typedef struct Plan /* * planner's estimate of result size of this plan step */ - Cardinality plan_rows; /* number of rows plan is expected to emit */ + Cardinality plan_rows; /* number of rows plan is expected to emit */ int plan_width; /* average row width in bytes */ /* @@ -976,7 +976,7 @@ typedef struct Hash AttrNumber skewColumn; /* outer join key's column #, or zero */ bool skewInherit; /* is outer join rel an inheritance tree? */ /* all other info is in the parent HashJoin node */ - Cardinality rows_total; /* estimate total rows if parallel_aware */ + Cardinality rows_total; /* estimate total rows if parallel_aware */ } Hash; /* ---------------- diff --git a/src/include/nodes/value.h b/src/include/nodes/value.h index 8b71b510eb..67b5f66b58 100644 --- a/src/include/nodes/value.h +++ b/src/include/nodes/value.h @@ -29,7 +29,7 @@ typedef struct Integer { NodeTag type; int val; -} Integer; +} Integer; /* * Float is internally represented as string. Using T_Float as the node type @@ -46,27 +46,27 @@ typedef struct Float { NodeTag type; char *val; -} Float; +} Float; typedef struct String { NodeTag type; char *val; -} String; +} String; typedef struct BitString { NodeTag type; char *val; -} BitString; +} BitString; #define intVal(v) (castNode(Integer, v)->val) #define floatVal(v) atof(castNode(Float, v)->val) #define strVal(v) (castNode(String, v)->val) -extern Integer *makeInteger(int i); -extern Float *makeFloat(char *numericStr); -extern String *makeString(char *str); -extern BitString *makeBitString(char *str); +extern Integer * makeInteger(int i); +extern Float * makeFloat(char *numericStr); +extern String * makeString(char *str); +extern BitString * makeBitString(char *str); #endif /* VALUE_H */ diff --git a/src/include/pgstat.h b/src/include/pgstat.h index bcd3588ea2..17785f7e57 100644 --- a/src/include/pgstat.h +++ b/src/include/pgstat.h @@ -186,8 +186,8 @@ typedef struct PgStat_TableXactStatus PgStat_Counter tuples_inserted; /* tuples inserted in (sub)xact */ PgStat_Counter tuples_updated; /* tuples updated in (sub)xact */ PgStat_Counter tuples_deleted; /* tuples deleted in (sub)xact */ - bool truncdropped; /* relation truncated/dropped in this - * (sub)xact */ + bool truncdropped; /* relation truncated/dropped in this + * (sub)xact */ /* tuples i/u/d prior to truncate/drop */ PgStat_Counter inserted_pre_truncdrop; PgStat_Counter updated_pre_truncdrop; @@ -477,7 +477,7 @@ typedef struct PgStat_MsgCheckpointer PgStat_Counter m_buf_fsync_backend; PgStat_Counter m_checkpoint_write_time; /* times in milliseconds */ PgStat_Counter m_checkpoint_sync_time; -} PgStat_MsgCheckpointer; +} PgStat_MsgCheckpointer; /* ---------- * PgStat_MsgWal Sent by backends and background processes to update WAL statistics. @@ -853,7 +853,7 @@ typedef struct PgStat_BgWriterStats PgStat_Counter maxwritten_clean; PgStat_Counter buf_alloc; TimestampTz stat_reset_timestamp; -} PgStat_BgWriterStats; +} PgStat_BgWriterStats; /* * Checkpointer statistics kept in the stats collector @@ -868,7 +868,7 @@ typedef struct PgStat_CheckpointerStats PgStat_Counter buf_written_checkpoints; PgStat_Counter buf_written_backend; PgStat_Counter buf_fsync_backend; -} PgStat_CheckpointerStats; +} PgStat_CheckpointerStats; /* * Global statistics kept in the stats collector @@ -1130,8 +1130,8 @@ extern PgStat_StatDBEntry *pgstat_fetch_stat_dbentry(Oid dbid); extern PgStat_StatTabEntry *pgstat_fetch_stat_tabentry(Oid relid); extern PgStat_StatFuncEntry *pgstat_fetch_stat_funcentry(Oid funcid); extern PgStat_ArchiverStats *pgstat_fetch_stat_archiver(void); -extern PgStat_BgWriterStats *pgstat_fetch_stat_bgwriter(void); -extern PgStat_CheckpointerStats *pgstat_fetch_stat_checkpointer(void); +extern PgStat_BgWriterStats * pgstat_fetch_stat_bgwriter(void); +extern PgStat_CheckpointerStats * pgstat_fetch_stat_checkpointer(void); extern PgStat_GlobalStats *pgstat_fetch_global(void); extern PgStat_WalStats *pgstat_fetch_stat_wal(void); extern PgStat_SLRUStats *pgstat_fetch_slru(void); diff --git a/src/include/postmaster/startup.h b/src/include/postmaster/startup.h index 2fb208bdb5..db7fb0db7f 100644 --- a/src/include/postmaster/startup.h +++ b/src/include/postmaster/startup.h @@ -23,7 +23,7 @@ ereport(LOG, errmsg(msg, secs, (usecs / 10000), __VA_ARGS__ )); \ } while(0) -extern int log_startup_progress_interval; +extern int log_startup_progress_interval; extern void HandleStartupProcInterrupts(void); extern void StartupProcessMain(void) pg_attribute_noreturn(); diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out index 27f7525b3e..01d6b26759 100644 --- a/src/test/regress/expected/partition_join.out +++ b/src/test/regress/expected/partition_join.out @@ -459,6 +459,19 @@ SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL 550 | | (12 rows) +-- join pruning +CREATE UNIQUE INDEX prt1_p1_a_idx ON prt1 (a); +EXPLAIN (COSTS OFF) +SELECT t1.a FROM prt1 t1 LEFT JOIN prt1 USING (a); + QUERY PLAN +-------------------------------- + Append + -> Seq Scan on prt1_p1 t1_1 + -> Seq Scan on prt1_p2 t1_2 + -> Seq Scan on prt1_p3 t1_3 +(4 rows) + +DROP INDEX prt1_p1_a_idx; -- bug with inadequate sort key representation SET enable_partitionwise_aggregate TO true; SET enable_hashjoin TO false; diff --git a/src/test/regress/sql/partition_join.sql b/src/test/regress/sql/partition_join.sql index d97b5b69ff..e39c05231a 100644 --- a/src/test/regress/sql/partition_join.sql +++ b/src/test/regress/sql/partition_join.sql @@ -90,6 +90,13 @@ SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL (SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.a) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a; +-- join pruning +CREATE UNIQUE INDEX prt1_p1_a_idx ON prt1 (a); + +EXPLAIN (COSTS OFF) +SELECT t1.a FROM prt1 t1 LEFT JOIN prt1 USING (a); + +DROP INDEX prt1_p1_a_idx; -- bug with inadequate sort key representation SET enable_partitionwise_aggregate TO true;