Hello hackers,

Please consider fixing the following typos and inconsistencies living in
the source code starting from v11:
3.1 add_placeholders_to_child_joinrel -> remove (orphaned after 7cfdc770)
3.2 AttachIndexInfo -> IndexAttachInfo (an internal inconsistency)
3.3 BlockRecordInfo -> BlockInfoRecord (an internal inconsistency)
3.4 bount -> bound (a typo)
3.5 CopyBoth -> CopyBothResponse (an internal inconsistency)
3.6 directy -> directory (a typo)
3.7 ExecCreateSlotFromOuterPlan -> ExecCreateScanSlotFromOuterPlan (an
internal inconsistency)
3.8 es_epqTuple -> es_epqTupleSlot (an internal inconsistency)
3.9 ExecHashTableParallelInsert -> ExecParallelHashTableInsert (an
internal inconsistency)
3.10 ExecMakeFunctionResult -> ExecMakeFunctionResultSet (an internal
inconsistency)
3.11 fmgr_builtins_oid_index -> fmgr_builtin_oid_index (an internal
inconsistency)
3.12 freeScanStack -> remove (irrelevant after 2a636834, v12 only)
3.13 GetTupleTrigger -> GetTupleForTrigger (an internal inconsistency)
3.14 grow_barrier -> grow_batches_barrier (an internal inconsistency)
3.15 HAVE__BUIILTIN_CLZ -> HAVE__BUILTIN_CLZ (a typo, v12 only)
3.16 ignored_killed_tuples -> ignore_killed_tuples (a typo)
3.17 intset_tests_stats -> intset_test_stats (an internal inconsistency,
v12 only)
3.18 is_aggregate -> objtype (needed to determine error handling and
required result type) (an internal inconsistency)
3.19 iterate_json_string_values -> iterate_json_values (renamed in 1c1791e0)
3.20 $log_number -> remove (not used since introduction in ed8a7c6f)
3.21 mechinism -> mechanism (a typo)
3.22 new_node, new_node_item -> child, child_key (an internal
inconsistency, v12 only)
3.23 new_part_constaints -> new_part_constraints (a typo)
3.24 parentIndexOid -> parentIndexId (for the sake of consistency, but
this argument is still unused since 8b08f7d4)
3.25 particiant -> participant (a typo)
3.26 PathNameCreateShared -> SharedFileSetCreate (an internal inconsistency)
3.27 PathnameCreateTemporaryDir -> PathNameCreateTemporaryDir (an
inconsistent case)
3.28 pg_access_server_files -> pg_read_server_files or
pg_write_server_files (non-existing role referenced)
3.29 pg_beginmessage_reuse -> pq_beginmessage_reuse (a typo)
3.30 Form_pg_fdw & pg_fdw -> Form_pg_foreign_data_wrapper &
pg_foreign_data_wrapper (an internal inconsistency)
3.31 PG_MCV_LIST -> pg_mcv_list (an internal inconsistency, v12 only)
3.32 pg_partition_table -> pg_partitioned_table (an internal inconsistency)
3.33 pg_write -> pg_pwrite (an internal inconsistency, v12 only)
3.34 PLyObject_FromJsonb -> PLyObject_FromJsonbContainer (an internal
inconsistency)
3.35 port_win32.h -> win32_port.h (an internal inconsistency)
3.36 PruneCtxStateIdx -> PruneCxtStateIdx (an internal inconsistency)
3.37 SetErrormode -> SetErrorMode (an internal inconsistency)
3.38 SharedRecordTypemodRegistry -> SharedRecordTypmodRegistry (an
internal inconsistency)
3.39 SharedTupleStore -> SharedTuplestore (an internal inconsistency)
3.40 shm_mq_get_receive_bytes -> shm_mq_receive_bytes (an internal
inconsistency)
3.41 t_natts -> number-of-attributes (questionable) (renamed in
storage.sgml with 3e23b68d, but one reference is left)
3.42 tts_buffer -> remove (orphaned after 4da597ed, v12 only)
3.43 tts_flag -> tts_flags (an internal inconsistency, v12 only)
3.44 tts_off -> remove (orphaned after 4da597ed, v12 only)
3.45 _vaues -> _values (a typo)
3.46 wait_event_class -> wait_event_type (an internal inconsistency)
3.47 WarnNoTranactionBlock -> WarnNoTransactionBlock (a typo)
3.48 with-wal-segsize -> remove (orphaned after fc49e24f)
3.49 XLOG_SEG_SIZE -> WAL segment size (orphaned after fc49e24fa)

Two summary patches for REL_11_STABLE and master are attached.

Best regards,
Alexander

diff --git a/contrib/jsonb_plpython/jsonb_plpython.c b/contrib/jsonb_plpython/jsonb_plpython.c
index 1bc984d5c4..1c93a80fbc 100644
--- a/contrib/jsonb_plpython/jsonb_plpython.c
+++ b/contrib/jsonb_plpython/jsonb_plpython.c
@@ -133,7 +133,7 @@ PLyObject_FromJsonbValue(JsonbValue *jsonbValue)
 }
 
 /*
- * PLyObject_FromJsonb
+ * PLyObject_FromJsonbContainer
  *
  * Transform JsonbContainer to PyObject.
  */
diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c
index 3bd0010bf8..1fd65f30df 100644
--- a/contrib/pg_prewarm/autoprewarm.c
+++ b/contrib/pg_prewarm/autoprewarm.c
@@ -360,7 +360,7 @@ apw_load_buffers(void)
 		Oid			current_db = blkinfo[j].database;
 
 		/*
-		 * Advance the prewarm_stop_idx to the first BlockRecordInfo that does
+		 * Advance the prewarm_stop_idx to the first BlockInfoRecord that does
 		 * not belong to this database.
 		 */
 		j++;
@@ -369,7 +369,7 @@ apw_load_buffers(void)
 			if (current_db != blkinfo[j].database)
 			{
 				/*
-				 * Combine BlockRecordInfos for global objects with those of
+				 * Combine BlockInfoRecords for global objects with those of
 				 * the database.
 				 */
 				if (current_db != InvalidOid)
@@ -382,7 +382,7 @@ apw_load_buffers(void)
 
 		/*
 		 * If we reach this point with current_db == InvalidOid, then only
-		 * BlockRecordInfos belonging to global objects exist.  We can't
+		 * BlockInfoRecords belonging to global objects exist.  We can't
 		 * prewarm without a database connection, so just bail out.
 		 */
 		if (current_db == InvalidOid)
diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c
index 589dbb87ad..0d4614e9c8 100644
--- a/contrib/pg_trgm/trgm_op.c
+++ b/contrib/pg_trgm/trgm_op.c
@@ -500,7 +500,7 @@ iterate_word_similarity(int *trg2indexes,
 		word_similarity_threshold;
 
 	/*
-	 * Consider first trigram as initial lower bount for strict word
+	 * Consider first trigram as initial lower bound for strict word
 	 * similarity, or initialize it later with first trigram present for plain
 	 * word similarity.
 	 */
diff --git a/doc/src/sgml/storage.sgml b/doc/src/sgml/storage.sgml
index ea9640f76d..c969bb606b 100644
--- a/doc/src/sgml/storage.sgml
+++ b/doc/src/sgml/storage.sgml
@@ -934,9 +934,9 @@ data. Empty in ordinary tables.</entry>
   only present if the <firstterm>HEAP_HASNULL</firstterm> bit is set in
   <structfield>t_infomask</structfield>. If it is present it begins just after
   the fixed header and occupies enough bytes to have one bit per data column
-  (that is, <structfield>t_natts</structfield> bits altogether). In this list of bits, a
-  1 bit indicates not-null, a 0 bit is a null.  When the bitmap is not
-  present, all columns are assumed not-null.
+  (that is, <replaceable>number-of-attributes</replaceable> bits altogether).
+  In this list of bits, a 1 bit indicates not-null, a 0 bit is a null.
+  When the bitmap is not present, all columns are assumed not-null.
   The object ID is only present if the <firstterm>HEAP_HASOID</firstterm> bit
   is set in <structfield>t_infomask</structfield>.  If present, it appears just
   before the <structfield>t_hoff</structfield> boundary.  Any padding needed to make
diff --git a/src/backend/access/common/session.c b/src/backend/access/common/session.c
index ffa7432a3c..543080e561 100644
--- a/src/backend/access/common/session.c
+++ b/src/backend/access/common/session.c
@@ -133,7 +133,7 @@ GetSessionDsmHandle(void)
 	 * If we got this far, we can pin the shared memory so it stays mapped for
 	 * the rest of this backend's life.  If we don't make it this far, cleanup
 	 * callbacks for anything we installed above (ie currently
-	 * SharedRecordTypemodRegistry) will run when the DSM segment is detached
+	 * SharedRecordTypmodRegistry) will run when the DSM segment is detached
 	 * by CurrentResourceOwner so we aren't left with a broken CurrentSession.
 	 */
 	dsm_pin_mapping(seg);
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index 0002df30c0..9a365a97f3 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -333,7 +333,7 @@ hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
 
 		/*
 		 * _hash_first and _hash_next handle eliminate dead index entries
-		 * whenever scan->ignored_killed_tuples is true.  Therefore, there's
+		 * whenever scan->ignore_killed_tuples is true.  Therefore, there's
 		 * nothing to do here except add the results to the TIDBitmap.
 		 */
 		tbm_add_tuples(tbm, &(currItem->heapTid), 1, true);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index e958dbc6af..a5bc7601d2 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -3239,7 +3239,7 @@ PreventInTransactionBlock(bool isTopLevel, const char *stmtType)
 }
 
 /*
- *	WarnNoTranactionBlock
+ *	WarnNoTransactionBlock
  *	RequireTransactionBlock
  *
  *	These two functions allow for warnings or errors if a command is executed
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index d959b7cc81..998dbd5fac 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -587,7 +587,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
 static void
 UpdateIndexRelation(Oid indexoid,
 					Oid heapoid,
-					Oid parentIndexOid,
+					Oid parentIndexId,
 					IndexInfo *indexInfo,
 					Oid *collationOids,
 					Oid *classOids,
diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c
index 558022647c..54966458bd 100644
--- a/src/backend/catalog/partition.c
+++ b/src/backend/catalog/partition.c
@@ -301,7 +301,7 @@ get_default_partition_oid(Oid parentId)
 /*
  * update_default_partition_oid
  *
- * Update pg_partition_table.partdefid with a new default partition OID.
+ * Update pg_partitioned_table.partdefid with a new default partition OID.
  */
 void
 update_default_partition_oid(Oid parentId, Oid defaultPartId)
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index f4653485e2..aa2a995bb6 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -776,8 +776,8 @@ CopyLoadRawBuf(CopyState cstate)
  * input/output stream. The latter could be either stdin/stdout or a
  * socket, depending on whether we're running under Postmaster control.
  *
- * Do not allow a Postgres user without the 'pg_access_server_files' role to
- * read from or write to a file.
+ * Do not allow a Postgres user without the 'pg_read_server_files' or
+ * 'pg_write_server_files' role to read from or write to a file.
  *
  * Do not allow the copy if user doesn't have proper permission to access
  * the table or the specifically requested columns.
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 1802cacbc9..15ba4224a5 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -171,7 +171,7 @@ compute_return_type(TypeName *returnType, Oid languageOid,
  * Input parameters:
  * parameters: list of FunctionParameter structs
  * languageOid: OID of function language (InvalidOid if it's CREATE AGGREGATE)
- * is_aggregate: needed only to determine error handling
+ * objtype: needed only to determine error handling and required result type
  *
  * Results are stored into output parameters.  parameterTypes must always
  * be created, but the other arrays are set to NULL if not needed.
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index 9c0dd84836..dd650c473a 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -3230,7 +3230,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
 
 	/*
 	 * Each EState must have its own es_epqScanDone state, but if we have
-	 * nested EPQ checks they should share es_epqTuple arrays.  This allows
+	 * nested EPQ checks they should share es_epqTupleSlot arrays.  This allows
 	 * sub-rechecks to inherit the values being examined by an outer recheck.
 	 */
 	estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 5b3eaec80b..4a20b015a9 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -599,7 +599,7 @@ ExecAssignScanType(ScanState *scanstate, TupleDesc tupDesc)
 }
 
 /* ----------------
- *		ExecCreateSlotFromOuterPlan
+ *		ExecCreateScanSlotFromOuterPlan
  * ----------------
  */
 void
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 6ffaa751f2..a013cdf16d 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -1046,8 +1046,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
 
 /*
  * ExecParallelHashIncreaseNumBatches
- *		Every participant attached to grow_barrier must run this function
- *		when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
+ *		Every participant attached to grow_batches_barrier must run this
+ *		function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
  */
 static void
 ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
@@ -1103,7 +1103,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
 					 * The combined work_mem of all participants wasn't
 					 * enough. Therefore one batch per participant would be
 					 * approximately equivalent and would probably also be
-					 * insufficient.  So try two batches per particiant,
+					 * insufficient.  So try two batches per participant,
 					 * rounded up to a power of two.
 					 */
 					new_nbatch = 1 << my_log2(pstate->nparticipants * 2);
@@ -1667,7 +1667,7 @@ ExecHashTableInsert(HashJoinTable hashtable,
 }
 
 /*
- * ExecHashTableParallelInsert
+ * ExecParallelHashTableInsert
  *		insert a tuple into a shared hash table or shared batch tuplestore
  */
 void
diff --git a/src/backend/executor/nodeProjectSet.c b/src/backend/executor/nodeProjectSet.c
index 6d6ed38cee..5c3af4de56 100644
--- a/src/backend/executor/nodeProjectSet.c
+++ b/src/backend/executor/nodeProjectSet.c
@@ -297,7 +297,7 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags)
 	Assert(node->plan.qual == NIL);
 
 	/*
-	 * Create a memory context that ExecMakeFunctionResult can use to evaluate
+	 * Create a memory context that ExecMakeFunctionResultSet can use to evaluate
 	 * function arguments in.  We can't use the per-tuple context for this
 	 * because it gets reset too often; but we don't want to leak evaluation
 	 * results into the query-lifespan context either.  We use one context for
diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile
index e2db4cea65..17ff0691f3 100644
--- a/src/backend/jit/llvm/Makefile
+++ b/src/backend/jit/llvm/Makefile
@@ -22,7 +22,7 @@ endif
 PGFILEDESC = "llvmjit - JIT using LLVM"
 NAME = llvmjit
 
-# All files in this directy use LLVM.
+# All files in this directory use LLVM.
 CFLAGS += $(LLVM_CFLAGS)
 CXXFLAGS += $(LLVM_CXXFLAGS)
 override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS)
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index 1c7e99019d..901ae137ff 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -308,7 +308,7 @@ pq_endmessage(StringInfo buf)
  *		pq_endmessage_reuse	- send the completed message to the frontend
  *
  * The data buffer is *not* freed, allowing to reuse the buffer with
- * pg_beginmessage_reuse.
+ * pq_beginmessage_reuse.
  --------------------------------
  */
 
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index c9ff4a2b3d..7cc4f74677 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -1068,7 +1068,8 @@ StartLogicalReplication(StartReplicationCmd *cmd)
 	 * Create our decoding context, making it start at the previously ack'ed
 	 * position.
 	 *
-	 * Do this before sending CopyBoth, so that any errors are reported early.
+	 * Do this before sending a CopyBothResponse message,
+	 * so that any errors are reported early.
 	 */
 	logical_decoding_ctx =
 		CreateDecodingContext(cmd->startpoint, cmd->options, false,
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 8d2b8aacfe..c11cf18ce6 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -1641,7 +1641,7 @@ OpenTemporaryFileInTablespace(Oid tblspcOid, bool rejectError)
  * If the file is inside the top-level temporary directory, its name should
  * begin with PG_TEMP_FILE_PREFIX so that it can be identified as temporary
  * and deleted at startup by RemovePgTempFiles().  Alternatively, it can be
- * inside a directory created with PathnameCreateTemporaryDir(), in which case
+ * inside a directory created with PathNameCreateTemporaryDir(), in which case
  * the prefix isn't needed.
  */
 File
diff --git a/src/backend/storage/file/sharedfileset.c b/src/backend/storage/file/sharedfileset.c
index d41b39a177..f63e3aadd1 100644
--- a/src/backend/storage/file/sharedfileset.c
+++ b/src/backend/storage/file/sharedfileset.c
@@ -141,7 +141,7 @@ SharedFileSetOpen(SharedFileSet *fileset, const char *name)
 }
 
 /*
- * Delete a file that was created with PathNameCreateShared().
+ * Delete a file that was created with SharedFileSetCreate().
  * Return true if the file existed, false if didn't.
  */
 bool
diff --git a/src/backend/storage/ipc/barrier.c b/src/backend/storage/ipc/barrier.c
index 00ab57c0f6..464552450e 100644
--- a/src/backend/storage/ipc/barrier.c
+++ b/src/backend/storage/ipc/barrier.c
@@ -113,7 +113,7 @@ BarrierInit(Barrier *barrier, int participants)
  * too and then return.  Increments the current phase.  The caller must be
  * attached.
  *
- * While waiting, pg_stat_activity shows a wait_event_class and wait_event
+ * While waiting, pg_stat_activity shows a wait_event_type and wait_event
  * controlled by the wait_event_info passed in, which should be a value from
  * from one of the WaitEventXXX enums defined in pgstat.h.
  *
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index 9c227c0a64..7b5fe4cfe5 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -1235,7 +1235,7 @@ shm_mq_inc_bytes_written(shm_mq *mq, Size n)
 	/*
 	 * Separate prior reads of mq_ring from the write of mq_bytes_written
 	 * which we're about to do.  Pairs with the read barrier found in
-	 * shm_mq_get_receive_bytes.
+	 * shm_mq_receive_bytes.
 	 */
 	pg_write_barrier();
 
diff --git a/src/backend/utils/Gen_fmgrtab.pl b/src/backend/utils/Gen_fmgrtab.pl
index fa30436895..28c092bf3f 100644
--- a/src/backend/utils/Gen_fmgrtab.pl
+++ b/src/backend/utils/Gen_fmgrtab.pl
@@ -250,7 +250,7 @@ const int fmgr_nbuiltins = (sizeof(fmgr_builtins) / sizeof(FmgrBuiltin));
 |;
 
 
-# Create fmgr_builtins_oid_index table.
+# Create fmgr_builtin_oid_index table.
 #
 # Note that the array has to be filled up to FirstBootstrapObjectId,
 # as we can't rely on zero initialization as 0 is a valid mapping.
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 17de89d69c..3c67f3d8cb 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -53,7 +53,7 @@ typedef struct OkeysState
 	int			sent_count;
 } OkeysState;
 
-/* state for iterate_json_string_values function */
+/* state for iterate_json_values function */
 typedef struct IterateJsonStringValuesState
 {
 	JsonLexContext *lex;
diff --git a/src/backend/utils/sort/sharedtuplestore.c b/src/backend/utils/sort/sharedtuplestore.c
index 265c04b3d3..89bd0ed166 100644
--- a/src/backend/utils/sort/sharedtuplestore.c
+++ b/src/backend/utils/sort/sharedtuplestore.c
@@ -173,7 +173,7 @@ sts_initialize(SharedTuplestore *sts, int participants,
 }
 
 /*
- * Attach to a SharedTupleStore that has been initialized by another backend,
+ * Attach to a SharedTuplestore that has been initialized by another backend,
  * so that this backend can read and write tuples.
  */
 SharedTuplestoreAccessor *
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index 9b5869add8..58e2d2fb8e 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -366,7 +366,7 @@ flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
 
 /*
  * flagInhIndexes -
- *	 Create AttachIndexInfo objects for partitioned indexes, and add
+ *	 Create IndexAttachInfo objects for partitioned indexes, and add
  *	 appropriate dependency links.
  */
 static void
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index 5b2d3de4ff..d6204d286a 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -826,7 +826,6 @@ sub check_pgbench_logs
 	ok(@logs == $nb, "number of log files");
 	ok(grep(/\/$prefix\.\d+(\.\d+)?$/, @logs) == $nb, "file name format");
 
-	my $log_number = 0;
 	for my $log (sort @logs)
 	{
 		eval {
diff --git a/src/include/access/xlogdefs.h b/src/include/access/xlogdefs.h
index 0a48d1cfb4..2821f2cbf9 100644
--- a/src/include/access/xlogdefs.h
+++ b/src/include/access/xlogdefs.h
@@ -22,7 +22,7 @@ typedef uint64 XLogRecPtr;
 
 /*
  * Zero is used indicate an invalid pointer. Bootstrap skips the first possible
- * WAL segment, initializing the first WAL page at XLOG_SEG_SIZE, so no XLOG
+ * WAL segment, initializing the first WAL page at WAL segment size, so no XLOG
  * record can begin at zero.
  */
 #define InvalidXLogRecPtr	0
diff --git a/src/include/catalog/partition.h b/src/include/catalog/partition.h
index 1f49e5d3a9..321f5ffd15 100644
--- a/src/include/catalog/partition.h
+++ b/src/include/catalog/partition.h
@@ -41,6 +41,6 @@ extern bool has_partition_attrs(Relation rel, Bitmapset *attnums,
 extern Oid	get_default_oid_from_partdesc(PartitionDesc partdesc);
 extern Oid	get_default_partition_oid(Oid parentId);
 extern void update_default_partition_oid(Oid parentId, Oid defaultPartId);
-extern List *get_proposed_default_constraint(List *new_part_constaints);
+extern List *get_proposed_default_constraint(List *new_part_constraints);
 
 #endif							/* PARTITION_H */
diff --git a/src/include/catalog/pg_foreign_data_wrapper.h b/src/include/catalog/pg_foreign_data_wrapper.h
index 3e6191e3e2..0a8c6ca805 100644
--- a/src/include/catalog/pg_foreign_data_wrapper.h
+++ b/src/include/catalog/pg_foreign_data_wrapper.h
@@ -40,8 +40,8 @@ CATALOG(pg_foreign_data_wrapper,2328,ForeignDataWrapperRelationId)
 } FormData_pg_foreign_data_wrapper;
 
 /* ----------------
- *		Form_pg_fdw corresponds to a pointer to a tuple with
- *		the format of pg_fdw relation.
+ *		Form_pg_foreign_data_wrapper corresponds to a pointer to a tuple with
+ *		the format of pg_foreign_data_wrapper relation.
  * ----------------
  */
 typedef FormData_pg_foreign_data_wrapper *Form_pg_foreign_data_wrapper;
diff --git a/src/include/optimizer/placeholder.h b/src/include/optimizer/placeholder.h
index 91ebdb90fc..cc862a94b6 100644
--- a/src/include/optimizer/placeholder.h
+++ b/src/include/optimizer/placeholder.h
@@ -28,7 +28,5 @@ extern void fix_placeholder_input_needed_levels(PlannerInfo *root);
 extern void add_placeholders_to_base_rels(PlannerInfo *root);
 extern void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel,
 							RelOptInfo *outer_rel, RelOptInfo *inner_rel);
-extern void add_placeholders_to_child_joinrel(PlannerInfo *root,
-								  RelOptInfo *childrel, RelOptInfo *parentrel);
 
 #endif							/* PLACEHOLDER_H */
diff --git a/src/include/partitioning/partprune.h b/src/include/partitioning/partprune.h
index b20839ae6e..126da7486d 100644
--- a/src/include/partitioning/partprune.h
+++ b/src/include/partitioning/partprune.h
@@ -40,7 +40,7 @@
  *					subsidiary data, such as the FmgrInfos.
  * planstate		Points to the parent plan node's PlanState when called
  *					during execution; NULL when called from the planner.
- * exprstates		Array of ExprStates, indexed as per PruneCtxStateIdx; one
+ * exprstates		Array of ExprStates, indexed as per PruneCxtStateIdx; one
  *					for each partition key in each pruning step.  Allocated if
  *					planstate is non-NULL, otherwise NULL.
  */
diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h
index 6b483a15a6..d45188284c 100644
--- a/src/include/utils/jsonapi.h
+++ b/src/include/utils/jsonapi.h
@@ -145,7 +145,7 @@ typedef enum JsonToIndex
 	jtiAll = jtiKey | jtiString | jtiNumeric | jtiBool
 } JsonToIndex;
 
-/* an action that will be applied to each value in iterate_json(b)_vaues functions */
+/* an action that will be applied to each value in iterate_json(b)_values functions */
 typedef void (*JsonIterateStringValuesAction) (void *state, char *elem_value, int elem_len);
 
 /* an action that will be applied to each value in transform_json(b)_values functions */
diff --git a/src/include/utils/sharedtuplestore.h b/src/include/utils/sharedtuplestore.h
index 834773511d..02fcb7771c 100644
--- a/src/include/utils/sharedtuplestore.h
+++ b/src/include/utils/sharedtuplestore.h
@@ -1,7 +1,7 @@
 /*-------------------------------------------------------------------------
  *
  * sharedtuplestore.h
- *	  Simple mechinism for sharing tuples between backends.
+ *	  Simple mechanism for sharing tuples between backends.
  *
  * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
diff --git a/src/pl/plperl/plperl.h b/src/pl/plperl/plperl.h
index e6241f03ea..8dbf902271 100644
--- a/src/pl/plperl/plperl.h
+++ b/src/pl/plperl/plperl.h
@@ -71,7 +71,7 @@
  */
 #ifdef PG_NEED_PERL_XSUB_H
 /*
- * On Windows, port_win32.h defines macros for a lot of these same functions.
+ * On Windows, win32_port.h defines macros for a lot of these same functions.
  * To avoid compiler warnings when XSUB.h redefines them, #undef our versions.
  */
 #ifdef WIN32
diff --git a/src/test/isolation/specs/partition-key-update-4.spec b/src/test/isolation/specs/partition-key-update-4.spec
index 1d53a7d0c6..3d1579b244 100644
--- a/src/test/isolation/specs/partition-key-update-4.spec
+++ b/src/test/isolation/specs/partition-key-update-4.spec
@@ -67,7 +67,7 @@ step "s2c"	 { COMMIT; }
 # in the new partition should contain the changes made by session s2.
 permutation "s1b" "s2b" "s2u1" "s1u" "s2c" "s1c" "s1s"
 
-# Same as above, except, session s1 is waiting in GetTupleTrigger().
+# Same as above, except, session s1 is waiting in GetTupleForTrigger().
 permutation "s1b" "s2b" "s2ut1" "s1ut" "s2c" "s1c" "s1st" "s1stl"
 
 # Below two cases are similar to the above two; except that the session s1
diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl
index d7a9fc5039..2553636dc1 100644
--- a/src/tools/msvc/config_default.pl
+++ b/src/tools/msvc/config_default.pl
@@ -11,7 +11,6 @@ our $config = {
 
 	# blocksize => 8,         # --with-blocksize, 8kB by default
 	# wal_blocksize => 8,     # --with-wal-blocksize, 8kB by default
-	# wal_segsize => 16,      # --with-wal-segsize, 16MB by default
 	ldap      => 1,        # --with-ldap
 	extraver  => undef,    # --with-extra-version=<string>
 	gss       => undef,    # --with-gssapi=<path>
diff --git a/src/tools/msvc/dummylib/Win32API/File.pm b/src/tools/msvc/dummylib/Win32API/File.pm
index bfba9cc7d6..ccd271ef54 100644
--- a/src/tools/msvc/dummylib/Win32API/File.pm
+++ b/src/tools/msvc/dummylib/Win32API/File.pm
@@ -4,7 +4,7 @@ use strict;
 use warnings;
 
 use constant { SEM_FAILCRITICALERRORS => 1, SEM_NOGPFAULTERRORBOX => 2 };
-sub SetErrormode { }
+sub SetErrorMode { }
 use Exporter;
 our (@ISA, @EXPORT_OK, %EXPORT_TAGS);
 @ISA         = qw(Exporter);
diff --git a/contrib/jsonb_plpython/jsonb_plpython.c b/contrib/jsonb_plpython/jsonb_plpython.c
index ea8fd5b3e6..776cf7c8b9 100644
--- a/contrib/jsonb_plpython/jsonb_plpython.c
+++ b/contrib/jsonb_plpython/jsonb_plpython.c
@@ -133,7 +133,7 @@ PLyObject_FromJsonbValue(JsonbValue *jsonbValue)
 }
 
 /*
- * PLyObject_FromJsonb
+ * PLyObject_FromJsonbContainer
  *
  * Transform JsonbContainer to PyObject.
  */
diff --git a/contrib/pg_prewarm/autoprewarm.c b/contrib/pg_prewarm/autoprewarm.c
index 5378ff0141..38ae240c55 100644
--- a/contrib/pg_prewarm/autoprewarm.c
+++ b/contrib/pg_prewarm/autoprewarm.c
@@ -356,7 +356,7 @@ apw_load_buffers(void)
 		Oid			current_db = blkinfo[j].database;
 
 		/*
-		 * Advance the prewarm_stop_idx to the first BlockRecordInfo that does
+		 * Advance the prewarm_stop_idx to the first BlockInfoRecord that does
 		 * not belong to this database.
 		 */
 		j++;
@@ -365,7 +365,7 @@ apw_load_buffers(void)
 			if (current_db != blkinfo[j].database)
 			{
 				/*
-				 * Combine BlockRecordInfos for global objects with those of
+				 * Combine BlockInfoRecords for global objects with those of
 				 * the database.
 				 */
 				if (current_db != InvalidOid)
@@ -378,7 +378,7 @@ apw_load_buffers(void)
 
 		/*
 		 * If we reach this point with current_db == InvalidOid, then only
-		 * BlockRecordInfos belonging to global objects exist.  We can't
+		 * BlockInfoRecords belonging to global objects exist.  We can't
 		 * prewarm without a database connection, so just bail out.
 		 */
 		if (current_db == InvalidOid)
diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c
index 589dbb87ad..0d4614e9c8 100644
--- a/contrib/pg_trgm/trgm_op.c
+++ b/contrib/pg_trgm/trgm_op.c
@@ -500,7 +500,7 @@ iterate_word_similarity(int *trg2indexes,
 		word_similarity_threshold;
 
 	/*
-	 * Consider first trigram as initial lower bount for strict word
+	 * Consider first trigram as initial lower bound for strict word
 	 * similarity, or initialize it later with first trigram present for plain
 	 * word similarity.
 	 */
diff --git a/doc/src/sgml/storage.sgml b/doc/src/sgml/storage.sgml
index 1047c77a63..0170d86670 100644
--- a/doc/src/sgml/storage.sgml
+++ b/doc/src/sgml/storage.sgml
@@ -957,9 +957,9 @@ data. Empty in ordinary tables.</entry>
   only present if the <firstterm>HEAP_HASNULL</firstterm> bit is set in
   <structfield>t_infomask</structfield>. If it is present it begins just after
   the fixed header and occupies enough bytes to have one bit per data column
-  (that is, <structfield>t_natts</structfield> bits altogether). In this list of bits, a
-  1 bit indicates not-null, a 0 bit is a null.  When the bitmap is not
-  present, all columns are assumed not-null.
+  (that is, <replaceable>number-of-attributes</replaceable> bits altogether).
+  In this list of bits, a 1 bit indicates not-null, a 0 bit is a null. 
+  When the bitmap is not present, all columns are assumed not-null.
   The object ID is only present if the <firstterm>HEAP_HASOID_OLD</firstterm> bit
   is set in <structfield>t_infomask</structfield>.  If present, it appears just
   before the <structfield>t_hoff</structfield> boundary.  Any padding needed to make
diff --git a/src/backend/access/common/session.c b/src/backend/access/common/session.c
index d3e73d223b..8bcb90b9c6 100644
--- a/src/backend/access/common/session.c
+++ b/src/backend/access/common/session.c
@@ -133,7 +133,7 @@ GetSessionDsmHandle(void)
 	 * If we got this far, we can pin the shared memory so it stays mapped for
 	 * the rest of this backend's life.  If we don't make it this far, cleanup
 	 * callbacks for anything we installed above (ie currently
-	 * SharedRecordTypemodRegistry) will run when the DSM segment is detached
+	 * SharedRecordTypmodRegistry) will run when the DSM segment is detached
 	 * by CurrentResourceOwner so we aren't left with a broken CurrentSession.
 	 */
 	dsm_pin_mapping(seg);
diff --git a/src/backend/access/hash/hash.c b/src/backend/access/hash/hash.c
index e9f2c84af1..5cc30dac42 100644
--- a/src/backend/access/hash/hash.c
+++ b/src/backend/access/hash/hash.c
@@ -340,7 +340,7 @@ hashgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
 
 		/*
 		 * _hash_first and _hash_next handle eliminate dead index entries
-		 * whenever scan->ignored_killed_tuples is true.  Therefore, there's
+		 * whenever scan->ignore_killed_tuples is true.  Therefore, there's
 		 * nothing to do here except add the results to the TIDBitmap.
 		 */
 		tbm_add_tuples(tbm, &(currItem->heapTid), 1, true);
diff --git a/src/backend/access/spgist/spgscan.c b/src/backend/access/spgist/spgscan.c
index 7bc5ec09bf..debf83b3da 100644
--- a/src/backend/access/spgist/spgscan.c
+++ b/src/backend/access/spgist/spgscan.c
@@ -146,11 +146,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
 {
 	MemoryContext oldCtx;
 
-	/*
-	 * clear traversal context before proceeding to the next scan; this must
-	 * not happen before the freeScanStack above, else we get double-free
-	 * crashes.
-	 */
+	/* clear traversal context before proceeding to the next scan */
 	MemoryContextReset(so->traversalCxt);
 
 	oldCtx = MemoryContextSwitchTo(so->traversalCxt);
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 821652b25b..d7930c077d 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -3364,7 +3364,7 @@ PreventInTransactionBlock(bool isTopLevel, const char *stmtType)
 }
 
 /*
- *	WarnNoTranactionBlock
+ *	WarnNoTransactionBlock
  *	RequireTransactionBlock
  *
  *	These two functions allow for warnings or errors if a command is executed
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index d2e4f53a80..587b717242 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -528,7 +528,7 @@ AppendAttributeTuples(Relation indexRelation, int numatts)
 static void
 UpdateIndexRelation(Oid indexoid,
 					Oid heapoid,
-					Oid parentIndexOid,
+					Oid parentIndexId,
 					IndexInfo *indexInfo,
 					Oid *collationOids,
 					Oid *classOids,
diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c
index 8134098fc1..e96620e401 100644
--- a/src/backend/catalog/partition.c
+++ b/src/backend/catalog/partition.c
@@ -319,7 +319,7 @@ get_default_partition_oid(Oid parentId)
 /*
  * update_default_partition_oid
  *
- * Update pg_partition_table.partdefid with a new default partition OID.
+ * Update pg_partitioned_table.partdefid with a new default partition OID.
  */
 void
 update_default_partition_oid(Oid parentId, Oid defaultPartId)
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
index ac86f3d5be..f1161f0fee 100644
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -829,8 +829,8 @@ CopyLoadRawBuf(CopyState cstate)
  * input/output stream. The latter could be either stdin/stdout or a
  * socket, depending on whether we're running under Postmaster control.
  *
- * Do not allow a Postgres user without the 'pg_access_server_files' role to
- * read from or write to a file.
+ * Do not allow a Postgres user without the 'pg_read_server_files' or
+ * 'pg_write_server_files' role to read from or write to a file.
  *
  * Do not allow the copy if user doesn't have proper permission to access
  * the table or the specifically requested columns.
diff --git a/src/backend/commands/functioncmds.c b/src/backend/commands/functioncmds.c
index 4f62e48d98..40f1f9a1b6 100644
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -170,7 +170,7 @@ compute_return_type(TypeName *returnType, Oid languageOid,
  * Input parameters:
  * parameters: list of FunctionParameter structs
  * languageOid: OID of function language (InvalidOid if it's CREATE AGGREGATE)
- * is_aggregate: needed only to determine error handling
+ * objtype: needed only to determine error handling and required result type
  *
  * Results are stored into output parameters.  parameterTypes must always
  * be created, but the other arrays are set to NULL if not needed.
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index a2bd9a7859..1dfb855ebf 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -2876,7 +2876,7 @@ EvalPlanQualStart(EPQState *epqstate, EState *parentestate, Plan *planTree)
 
 	/*
 	 * Each EState must have its own es_epqScanDone state, but if we have
-	 * nested EPQ checks they should share es_epqTuple arrays.  This allows
+	 * nested EPQ checks they should share es_epqTupleSlot arrays.  This allows
 	 * sub-rechecks to inherit the values being examined by an outer recheck.
 	 */
 	estate->es_epqScanDone = (bool *) palloc0(rtsize * sizeof(bool));
diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c
index 2835a01e15..9b866a5dd6 100644
--- a/src/backend/executor/execUtils.c
+++ b/src/backend/executor/execUtils.c
@@ -642,7 +642,7 @@ ExecAssignScanType(ScanState *scanstate, TupleDesc tupDesc)
 }
 
 /* ----------------
- *		ExecCreateSlotFromOuterPlan
+ *		ExecCreateScanSlotFromOuterPlan
  * ----------------
  */
 void
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 3c33ce74e0..d16120b9c4 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -1049,8 +1049,8 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable)
 
 /*
  * ExecParallelHashIncreaseNumBatches
- *		Every participant attached to grow_barrier must run this function
- *		when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
+ *		Every participant attached to grow_batches_barrier must run this
+ *		function when it observes growth == PHJ_GROWTH_NEED_MORE_BATCHES.
  */
 static void
 ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
@@ -1106,7 +1106,7 @@ ExecParallelHashIncreaseNumBatches(HashJoinTable hashtable)
 					 * The combined work_mem of all participants wasn't
 					 * enough. Therefore one batch per participant would be
 					 * approximately equivalent and would probably also be
-					 * insufficient.  So try two batches per particiant,
+					 * insufficient.  So try two batches per participant,
 					 * rounded up to a power of two.
 					 */
 					new_nbatch = 1 << my_log2(pstate->nparticipants * 2);
@@ -1674,7 +1674,7 @@ ExecHashTableInsert(HashJoinTable hashtable,
 }
 
 /*
- * ExecHashTableParallelInsert
+ * ExecParallelHashTableInsert
  *		insert a tuple into a shared hash table or shared batch tuplestore
  */
 void
diff --git a/src/backend/executor/nodeProjectSet.c b/src/backend/executor/nodeProjectSet.c
index 515dd61f07..42774424ac 100644
--- a/src/backend/executor/nodeProjectSet.c
+++ b/src/backend/executor/nodeProjectSet.c
@@ -297,7 +297,7 @@ ExecInitProjectSet(ProjectSet *node, EState *estate, int eflags)
 	Assert(node->plan.qual == NIL);
 
 	/*
-	 * Create a memory context that ExecMakeFunctionResult can use to evaluate
+	 * Create a memory context that ExecMakeFunctionResultSet can use to evaluate
 	 * function arguments in.  We can't use the per-tuple context for this
 	 * because it gets reset too often; but we don't want to leak evaluation
 	 * results into the query-lifespan context either.  We use one context for
diff --git a/src/backend/jit/llvm/Makefile b/src/backend/jit/llvm/Makefile
index e2db4cea65..17ff0691f3 100644
--- a/src/backend/jit/llvm/Makefile
+++ b/src/backend/jit/llvm/Makefile
@@ -22,7 +22,7 @@ endif
 PGFILEDESC = "llvmjit - JIT using LLVM"
 NAME = llvmjit
 
-# All files in this directy use LLVM.
+# All files in this directory use LLVM.
 CFLAGS += $(LLVM_CFLAGS)
 CXXFLAGS += $(LLVM_CXXFLAGS)
 override CPPFLAGS := $(LLVM_CPPFLAGS) $(CPPFLAGS)
diff --git a/src/backend/lib/integerset.c b/src/backend/lib/integerset.c
index 6d51c7903e..58b0be1f32 100644
--- a/src/backend/lib/integerset.c
+++ b/src/backend/lib/integerset.c
@@ -261,7 +261,7 @@ struct IntegerSet
  * Prototypes for internal functions.
  */
 static void intset_update_upper(IntegerSet *intset, int level,
-								intset_node *new_node, uint64 new_node_item);
+								intset_node *child, uint64 child_key);
 static void intset_flush_buffered_values(IntegerSet *intset);
 
 static int	intset_binsrch_uint64(uint64 value, uint64 *arr, int arr_elems,
diff --git a/src/backend/libpq/pqformat.c b/src/backend/libpq/pqformat.c
index 805b996d6d..aca44a0aec 100644
--- a/src/backend/libpq/pqformat.c
+++ b/src/backend/libpq/pqformat.c
@@ -308,7 +308,7 @@ pq_endmessage(StringInfo buf)
  *		pq_endmessage_reuse	- send the completed message to the frontend
  *
  * The data buffer is *not* freed, allowing to reuse the buffer with
- * pg_beginmessage_reuse.
+ * pq_beginmessage_reuse.
  --------------------------------
  */
 
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index 3f31368022..c16c990332 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -1087,7 +1087,8 @@ StartLogicalReplication(StartReplicationCmd *cmd)
 	 * Create our decoding context, making it start at the previously ack'ed
 	 * position.
 	 *
-	 * Do this before sending CopyBoth, so that any errors are reported early.
+	 * Do this before sending a CopyBothResponse message,
+	 * so that any errors are reported early.
 	 */
 	logical_decoding_ctx =
 		CreateDecodingContext(cmd->startpoint, cmd->options, false,
diff --git a/src/backend/statistics/mcv.c b/src/backend/statistics/mcv.c
index d1f0fd55e8..35d887aa6f 100644
--- a/src/backend/statistics/mcv.c
+++ b/src/backend/statistics/mcv.c
@@ -1314,7 +1314,7 @@ pg_mcv_list_in(PG_FUNCTION_ARGS)
 
 
 /*
- * pg_mcv_list_out		- output routine for type PG_MCV_LIST.
+ * pg_mcv_list_out		- output routine for type pg_mcv_list.
  *
  * MCV lists are serialized into a bytea value, so we simply call byteaout()
  * to serialize the value into text. But it'd be nice to serialize that into
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index 73c455e66f..6bfd4fa13e 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -1592,7 +1592,7 @@ OpenTemporaryFileInTablespace(Oid tblspcOid, bool rejectError)
  * If the file is inside the top-level temporary directory, its name should
  * begin with PG_TEMP_FILE_PREFIX so that it can be identified as temporary
  * and deleted at startup by RemovePgTempFiles().  Alternatively, it can be
- * inside a directory created with PathnameCreateTemporaryDir(), in which case
+ * inside a directory created with PathNameCreateTemporaryDir(), in which case
  * the prefix isn't needed.
  */
 File
diff --git a/src/backend/storage/file/sharedfileset.c b/src/backend/storage/file/sharedfileset.c
index 3cfc0c385c..26e5091172 100644
--- a/src/backend/storage/file/sharedfileset.c
+++ b/src/backend/storage/file/sharedfileset.c
@@ -143,7 +143,7 @@ SharedFileSetOpen(SharedFileSet *fileset, const char *name)
 }
 
 /*
- * Delete a file that was created with PathNameCreateShared().
+ * Delete a file that was created with SharedFileSetCreate().
  * Return true if the file existed, false if didn't.
  */
 bool
diff --git a/src/backend/storage/ipc/barrier.c b/src/backend/storage/ipc/barrier.c
index ff376b574c..69ed034e52 100644
--- a/src/backend/storage/ipc/barrier.c
+++ b/src/backend/storage/ipc/barrier.c
@@ -113,7 +113,7 @@ BarrierInit(Barrier *barrier, int participants)
  * too and then return.  Increments the current phase.  The caller must be
  * attached.
  *
- * While waiting, pg_stat_activity shows a wait_event_class and wait_event
+ * While waiting, pg_stat_activity shows a wait_event_type and wait_event
  * controlled by the wait_event_info passed in, which should be a value from
  * one of the WaitEventXXX enums defined in pgstat.h.
  *
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
index 91f7714650..4c245d1f85 100644
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -1238,7 +1238,7 @@ shm_mq_inc_bytes_written(shm_mq *mq, Size n)
 	/*
 	 * Separate prior reads of mq_ring from the write of mq_bytes_written
 	 * which we're about to do.  Pairs with the read barrier found in
-	 * shm_mq_get_receive_bytes.
+	 * shm_mq_receive_bytes.
 	 */
 	pg_write_barrier();
 
diff --git a/src/backend/utils/Gen_fmgrtab.pl b/src/backend/utils/Gen_fmgrtab.pl
index 0f57fd352a..80e99189e4 100644
--- a/src/backend/utils/Gen_fmgrtab.pl
+++ b/src/backend/utils/Gen_fmgrtab.pl
@@ -230,7 +230,7 @@ const Oid fmgr_last_builtin_oid = %u;
 |, $last_builtin_oid;
 
 
-# Create fmgr_builtins_oid_index table.
+# Create fmgr_builtin_oid_index table.
 printf $tfh qq|
 const uint16 fmgr_builtin_oid_index[%u] = {
 |, $last_builtin_oid + 1;
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index 9e7035c71a..fe351edb2b 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -53,7 +53,7 @@ typedef struct OkeysState
 	int			sent_count;
 } OkeysState;
 
-/* state for iterate_json_string_values function */
+/* state for iterate_json_values function */
 typedef struct IterateJsonStringValuesState
 {
 	JsonLexContext *lex;
diff --git a/src/backend/utils/sort/sharedtuplestore.c b/src/backend/utils/sort/sharedtuplestore.c
index 8df18eb2ee..8f74e8d40f 100644
--- a/src/backend/utils/sort/sharedtuplestore.c
+++ b/src/backend/utils/sort/sharedtuplestore.c
@@ -173,7 +173,7 @@ sts_initialize(SharedTuplestore *sts, int participants,
 }
 
 /*
- * Attach to a SharedTupleStore that has been initialized by another backend,
+ * Attach to a SharedTuplestore that has been initialized by another backend,
  * so that this backend can read and write tuples.
  */
 SharedTuplestoreAccessor *
diff --git a/src/bin/pg_dump/common.c b/src/bin/pg_dump/common.c
index 8fbaeccf49..02a865f456 100644
--- a/src/bin/pg_dump/common.c
+++ b/src/bin/pg_dump/common.c
@@ -327,7 +327,7 @@ flagInhTables(Archive *fout, TableInfo *tblinfo, int numTables,
 
 /*
  * flagInhIndexes -
- *	 Create AttachIndexInfo objects for partitioned indexes, and add
+ *	 Create IndexAttachInfo objects for partitioned indexes, and add
  *	 appropriate dependency links.
  */
 static void
diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl
index dc2c72fa92..2a40d26ee2 100644
--- a/src/bin/pgbench/t/001_pgbench_with_server.pl
+++ b/src/bin/pgbench/t/001_pgbench_with_server.pl
@@ -874,7 +874,6 @@ sub check_pgbench_logs
 	ok(@logs == $nb, "number of log files");
 	ok(grep(/\/$prefix\.\d+(\.\d+)?$/, @logs) == $nb, "file name format");
 
-	my $log_number = 0;
 	for my $log (sort @logs)
 	{
 		eval {
diff --git a/src/include/access/xlogdefs.h b/src/include/access/xlogdefs.h
index cadecab721..daded3dca0 100644
--- a/src/include/access/xlogdefs.h
+++ b/src/include/access/xlogdefs.h
@@ -22,7 +22,7 @@ typedef uint64 XLogRecPtr;
 
 /*
  * Zero is used indicate an invalid pointer. Bootstrap skips the first possible
- * WAL segment, initializing the first WAL page at XLOG_SEG_SIZE, so no XLOG
+ * WAL segment, initializing the first WAL page at WAL segment size, so no XLOG
  * record can begin at zero.
  */
 #define InvalidXLogRecPtr	0
diff --git a/src/include/catalog/partition.h b/src/include/catalog/partition.h
index e234b3c0ef..5c3565ce36 100644
--- a/src/include/catalog/partition.h
+++ b/src/include/catalog/partition.h
@@ -30,6 +30,6 @@ extern bool has_partition_attrs(Relation rel, Bitmapset *attnums,
 
 extern Oid	get_default_partition_oid(Oid parentId);
 extern void update_default_partition_oid(Oid parentId, Oid defaultPartId);
-extern List *get_proposed_default_constraint(List *new_part_constaints);
+extern List *get_proposed_default_constraint(List *new_part_constraints);
 
 #endif							/* PARTITION_H */
diff --git a/src/include/catalog/pg_foreign_data_wrapper.h b/src/include/catalog/pg_foreign_data_wrapper.h
index 9ee9afd97c..3f0cef33b4 100644
--- a/src/include/catalog/pg_foreign_data_wrapper.h
+++ b/src/include/catalog/pg_foreign_data_wrapper.h
@@ -41,8 +41,8 @@ CATALOG(pg_foreign_data_wrapper,2328,ForeignDataWrapperRelationId)
 } FormData_pg_foreign_data_wrapper;
 
 /* ----------------
- *		Form_pg_fdw corresponds to a pointer to a tuple with
- *		the format of pg_fdw relation.
+ *		Form_pg_foreign_data_wrapper corresponds to a pointer to a tuple with
+ *		the format of pg_foreign_data_wrapper relation.
  * ----------------
  */
 typedef FormData_pg_foreign_data_wrapper *Form_pg_foreign_data_wrapper;
diff --git a/src/include/executor/tuptable.h b/src/include/executor/tuptable.h
index 0710a7dd38..203b1ab7dc 100644
--- a/src/include/executor/tuptable.h
+++ b/src/include/executor/tuptable.h
@@ -68,8 +68,8 @@
  * A TupleTableSlot can also be "empty", indicated by flag TTS_FLAG_EMPTY set
  * in tts_flags, holding no valid data.  This is the only valid state for a
  * freshly-created slot that has not yet had a tuple descriptor assigned to
- * it.  In this state, TTS_SHOULDFREE should not be set in tts_flag, tts_tuple
- * must be NULL, tts_buffer InvalidBuffer, and tts_nvalid zero.
+ * it.  In this state, TTS_SHOULDFREE should not be set in tts_flags, tts_tuple
+ * must be NULL and tts_nvalid zero.
  *
  * The tupleDescriptor is simply referenced, not copied, by the TupleTableSlot
  * code.  The caller of ExecSetSlotDescriptor() is responsible for providing
@@ -87,7 +87,7 @@
  * the descriptor is provided), or when a descriptor is assigned to the slot;
  * they are of length equal to the descriptor's natts.
  *
- * The TTS_FLAG_SLOW flag and tts_off are saved state for
+ * The TTS_FLAG_SLOW flag is saved state for
  * slot_deform_heap_tuple, and should not be touched by any other code.
  *----------
  */
diff --git a/src/include/optimizer/placeholder.h b/src/include/optimizer/placeholder.h
index be27751974..08f7187923 100644
--- a/src/include/optimizer/placeholder.h
+++ b/src/include/optimizer/placeholder.h
@@ -28,7 +28,5 @@ extern void fix_placeholder_input_needed_levels(PlannerInfo *root);
 extern void add_placeholders_to_base_rels(PlannerInfo *root);
 extern void add_placeholders_to_joinrel(PlannerInfo *root, RelOptInfo *joinrel,
 										RelOptInfo *outer_rel, RelOptInfo *inner_rel);
-extern void add_placeholders_to_child_joinrel(PlannerInfo *root,
-											  RelOptInfo *childrel, RelOptInfo *parentrel);
 
 #endif							/* PLACEHOLDER_H */
diff --git a/src/include/partitioning/partprune.h b/src/include/partitioning/partprune.h
index 06080b19cc..81318c785a 100644
--- a/src/include/partitioning/partprune.h
+++ b/src/include/partitioning/partprune.h
@@ -41,7 +41,7 @@ struct RelOptInfo;
  *					subsidiary data, such as the FmgrInfos.
  * planstate		Points to the parent plan node's PlanState when called
  *					during execution; NULL when called from the planner.
- * exprstates		Array of ExprStates, indexed as per PruneCtxStateIdx; one
+ * exprstates		Array of ExprStates, indexed as per PruneCxtStateIdx; one
  *					for each partition key in each pruning step.  Allocated if
  *					planstate is non-NULL, otherwise NULL.
  */
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index fe7c3d0ffc..5197926696 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -67,7 +67,7 @@ pg_leftmost_one_pos64(uint64 word)
 		shift -= 8;
 
 	return shift + pg_leftmost_one_pos[(word >> shift) & 255];
-#endif							/* HAVE__BUIILTIN_CLZ */
+#endif							/* HAVE__BUILTIN_CLZ */
 }
 
 /*
diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h
index 35830df8c3..5f4d479a7b 100644
--- a/src/include/utils/jsonapi.h
+++ b/src/include/utils/jsonapi.h
@@ -145,7 +145,7 @@ typedef enum JsonToIndex
 	jtiAll = jtiKey | jtiString | jtiNumeric | jtiBool
 } JsonToIndex;
 
-/* an action that will be applied to each value in iterate_json(b)_vaues functions */
+/* an action that will be applied to each value in iterate_json(b)_values functions */
 typedef void (*JsonIterateStringValuesAction) (void *state, char *elem_value, int elem_len);
 
 /* an action that will be applied to each value in transform_json(b)_values functions */
diff --git a/src/include/utils/sharedtuplestore.h b/src/include/utils/sharedtuplestore.h
index 863c6e4f90..9dea626e84 100644
--- a/src/include/utils/sharedtuplestore.h
+++ b/src/include/utils/sharedtuplestore.h
@@ -1,7 +1,7 @@
 /*-------------------------------------------------------------------------
  *
  * sharedtuplestore.h
- *	  Simple mechinism for sharing tuples between backends.
+ *	  Simple mechanism for sharing tuples between backends.
  *
  * Portions Copyright (c) 1996-2019, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
diff --git a/src/pl/plperl/plperl.h b/src/pl/plperl/plperl.h
index e94a4c345b..3748158a86 100644
--- a/src/pl/plperl/plperl.h
+++ b/src/pl/plperl/plperl.h
@@ -84,7 +84,7 @@
  */
 #ifdef PG_NEED_PERL_XSUB_H
 /*
- * On Windows, port_win32.h defines macros for a lot of these same functions.
+ * On Windows, win32_port.h defines macros for a lot of these same functions.
  * To avoid compiler warnings when XSUB.h redefines them, #undef our versions.
  */
 #ifdef WIN32
diff --git a/src/port/pwrite.c b/src/port/pwrite.c
index 164b4f6197..a44e991277 100644
--- a/src/port/pwrite.c
+++ b/src/port/pwrite.c
@@ -9,7 +9,7 @@
  *	  src/port/pwrite.c
  *
  * Note that this implementation changes the current file position, unlike
- * the POSIX function, so we use the name pg_write().
+ * the POSIX function, so we use the name pg_pwrite().
  *
  *-------------------------------------------------------------------------
  */
diff --git a/src/test/isolation/specs/partition-key-update-4.spec b/src/test/isolation/specs/partition-key-update-4.spec
index 1d53a7d0c6..3d1579b244 100644
--- a/src/test/isolation/specs/partition-key-update-4.spec
+++ b/src/test/isolation/specs/partition-key-update-4.spec
@@ -67,7 +67,7 @@ step "s2c"	 { COMMIT; }
 # in the new partition should contain the changes made by session s2.
 permutation "s1b" "s2b" "s2u1" "s1u" "s2c" "s1c" "s1s"
 
-# Same as above, except, session s1 is waiting in GetTupleTrigger().
+# Same as above, except, session s1 is waiting in GetTupleForTrigger().
 permutation "s1b" "s2b" "s2ut1" "s1ut" "s2c" "s1c" "s1st" "s1stl"
 
 # Below two cases are similar to the above two; except that the session s1
diff --git a/src/test/modules/test_integerset/README b/src/test/modules/test_integerset/README
index 6fd7e3c0ca..a8b271869a 100644
--- a/src/test/modules/test_integerset/README
+++ b/src/test/modules/test_integerset/README
@@ -2,6 +2,6 @@ test_integerset contains unit tests for testing the integer set implementation
 in src/backend/lib/integerset.c.
 
 The tests verify the correctness of the implementation, but they can also be
-used as a micro-benchmark.  If you set the 'intset_tests_stats' flag in
+used as a micro-benchmark.  If you set the 'intset_test_stats' flag in
 test_integerset.c, the tests will print extra information about execution time
 and memory usage.
diff --git a/src/tools/msvc/config_default.pl b/src/tools/msvc/config_default.pl
index d7a9fc5039..2553636dc1 100644
--- a/src/tools/msvc/config_default.pl
+++ b/src/tools/msvc/config_default.pl
@@ -11,7 +11,6 @@ our $config = {
 
 	# blocksize => 8,         # --with-blocksize, 8kB by default
 	# wal_blocksize => 8,     # --with-wal-blocksize, 8kB by default
-	# wal_segsize => 16,      # --with-wal-segsize, 16MB by default
 	ldap      => 1,        # --with-ldap
 	extraver  => undef,    # --with-extra-version=<string>
 	gss       => undef,    # --with-gssapi=<path>
diff --git a/src/tools/msvc/dummylib/Win32API/File.pm b/src/tools/msvc/dummylib/Win32API/File.pm
index bfba9cc7d6..ccd271ef54 100644
--- a/src/tools/msvc/dummylib/Win32API/File.pm
+++ b/src/tools/msvc/dummylib/Win32API/File.pm
@@ -4,7 +4,7 @@ use strict;
 use warnings;
 
 use constant { SEM_FAILCRITICALERRORS => 1, SEM_NOGPFAULTERRORBOX => 2 };
-sub SetErrormode { }
+sub SetErrorMode { }
 use Exporter;
 our (@ISA, @EXPORT_OK, %EXPORT_TAGS);
 @ISA         = qw(Exporter);

Reply via email to