From 735f6574d77e0f3f8a2ff95ce4aa74f04db29541 Mon Sep 17 00:00:00 2001
From: Daniel Gustafsson <daniel@yesql.se>
Date: Thu, 11 Apr 2024 14:27:35 +0200
Subject: [PATCH v1] Fix typos

---
 contrib/amcheck/expected/check_btree.out                  | 2 +-
 contrib/amcheck/sql/check_btree.sql                       | 2 +-
 src/backend/access/gin/ginbtree.c                         | 2 +-
 src/backend/access/nbtree/nbtutils.c                      | 2 +-
 src/backend/commands/copyfrom.c                           | 2 +-
 src/backend/commands/tablecmds.c                          | 5 +++--
 src/backend/optimizer/path/pathkeys.c                     | 2 +-
 src/backend/optimizer/prep/prepunion.c                    | 2 +-
 src/backend/parser/parse_utilcmd.c                        | 4 ++--
 src/backend/replication/walsender.c                       | 2 +-
 src/backend/storage/buffer/bufmgr.c                       | 2 +-
 src/backend/storage/lmgr/lock.c                           | 2 +-
 src/backend/storage/lmgr/proc.c                           | 2 +-
 src/backend/utils/adt/jsonpath_exec.c                     | 4 ++--
 src/backend/utils/adt/selfuncs.c                          | 2 +-
 src/common/unicode_category.c                             | 2 +-
 src/include/access/tableam.h                              | 2 +-
 src/include/lib/radixtree.h                               | 8 ++++----
 src/test/isolation/expected/temp-schema-cleanup.out       | 4 ++--
 src/test/isolation/specs/temp-schema-cleanup.spec         | 2 +-
 src/test/modules/test_json_parser/README                  | 2 +-
 .../test_json_parser/test_json_parser_incremental.c       | 4 ++--
 src/test/modules/test_json_parser/test_json_parser_perf.c | 2 +-
 src/test/regress/expected/foreign_key.out                 | 2 +-
 src/test/regress/expected/publication.out                 | 4 ++--
 src/test/regress/expected/tsdicts.out                     | 2 +-
 src/test/regress/sql/foreign_key.sql                      | 2 +-
 src/test/regress/sql/publication.sql                      | 4 ++--
 src/test/regress/sql/tsdicts.sql                          | 2 +-
 src/test/subscription/t/004_sync.pl                       | 2 +-
 src/test/subscription/t/026_stats.pl                      | 2 +-
 31 files changed, 42 insertions(+), 41 deletions(-)

diff --git a/contrib/amcheck/expected/check_btree.out b/contrib/amcheck/expected/check_btree.out
index cf8284fe12..e7fb5f5515 100644
--- a/contrib/amcheck/expected/check_btree.out
+++ b/contrib/amcheck/expected/check_btree.out
@@ -2,7 +2,7 @@ CREATE TABLE bttest_a(id int8);
 CREATE TABLE bttest_b(id int8);
 CREATE TABLE bttest_multi(id int8, data int8);
 CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint);
--- Stabalize tests
+-- Stabilize tests
 ALTER TABLE bttest_a SET (autovacuum_enabled = false);
 ALTER TABLE bttest_b SET (autovacuum_enabled = false);
 ALTER TABLE bttest_multi SET (autovacuum_enabled = false);
diff --git a/contrib/amcheck/sql/check_btree.sql b/contrib/amcheck/sql/check_btree.sql
index 68bd71b064..0793dbfeeb 100644
--- a/contrib/amcheck/sql/check_btree.sql
+++ b/contrib/amcheck/sql/check_btree.sql
@@ -3,7 +3,7 @@ CREATE TABLE bttest_b(id int8);
 CREATE TABLE bttest_multi(id int8, data int8);
 CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint);
 
--- Stabalize tests
+-- Stabilize tests
 ALTER TABLE bttest_a SET (autovacuum_enabled = false);
 ALTER TABLE bttest_b SET (autovacuum_enabled = false);
 ALTER TABLE bttest_multi SET (autovacuum_enabled = false);
diff --git a/src/backend/access/gin/ginbtree.c b/src/backend/access/gin/ginbtree.c
index 86f938686c..b7a5013896 100644
--- a/src/backend/access/gin/ginbtree.c
+++ b/src/backend/access/gin/ginbtree.c
@@ -766,7 +766,7 @@ ginFinishSplit(GinBtree btree, GinBtreeStack *stack, bool freestack,
 /*
  * An entry point to ginFinishSplit() that is used when we stumble upon an
  * existing incompletely split page in the tree, as opposed to completing a
- * split that we just made outselves. The difference is that stack->buffer may
+ * split that we just made ourselves. The difference is that stack->buffer may
  * be merely share-locked on entry, and will be upgraded to exclusive mode.
  *
  * Note: Upgrading the lock momentarily releases it. Doing that in a scan
diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c
index 2eff34c4aa..052f693138 100644
--- a/src/backend/access/nbtree/nbtutils.c
+++ b/src/backend/access/nbtree/nbtutils.c
@@ -4126,7 +4126,7 @@ _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate,
 	else
 	{
 		/*
-		 * Failure -- "ahead" tuple is too far ahead (we were too aggresive).
+		 * Failure -- "ahead" tuple is too far ahead (we were too aggressive).
 		 *
 		 * Reset the number of rechecks, and aggressively reduce the target
 		 * distance (we're much more aggressive here than we were when the
diff --git a/src/backend/commands/copyfrom.c b/src/backend/commands/copyfrom.c
index 9d2900041e..a99589c7d8 100644
--- a/src/backend/commands/copyfrom.c
+++ b/src/backend/commands/copyfrom.c
@@ -998,7 +998,7 @@ CopyFrom(CopyFromState cstate)
 			cstate->escontext->error_occurred)
 		{
 			/*
-			 * Soft error occured, skip this tuple and deal with error
+			 * Soft error occurred, skip this tuple and deal with error
 			 * information according to ON_ERROR.
 			 */
 			if (cstate->opts.on_error == COPY_ON_ERROR_IGNORE)
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 8a98a0af48..fdc4917420 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -20888,7 +20888,8 @@ GetAttributeStorage(Oid atttypid, const char *storagemode)
 }
 
 /*
- * Struct with context of new partition for insert rows from splited partition
+ * Struct with context of new partition for insert rows from partition which
+ * has been split.
  */
 typedef struct SplitPartitionContext
 {
@@ -21486,7 +21487,7 @@ ATExecMergePartitions(List **wqueue, AlteredTableInfo *tab, Relation rel,
 	/* Create table for new partition, use partitioned table as model. */
 	if (isSameName)
 	{
-		/* Create partition table with generated temparary name. */
+		/* Create partition table with generated temporary name. */
 		sprintf(tmpRelName, "merge-%u-%X-tmp", RelationGetRelid(rel), MyProcPid);
 		mergePartName = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)),
 									 tmpRelName, -1);
diff --git a/src/backend/optimizer/path/pathkeys.c b/src/backend/optimizer/path/pathkeys.c
index 1d61881a6b..8b258cbef9 100644
--- a/src/backend/optimizer/path/pathkeys.c
+++ b/src/backend/optimizer/path/pathkeys.c
@@ -384,7 +384,7 @@ group_keys_reorder_by_pathkeys(List *pathkeys, List **group_pathkeys,
 	 * *group_pathkeys containing grouping pathkeys altogether with aggregate
 	 * pathkeys.  If we process aggregate pathkeys we could get an invalid
 	 * result of get_sortgroupref_clause_noerr(), because their
-	 * pathkey->pk_eclass->ec_sortref doesn't referece query targetlist.  So,
+	 * pathkey->pk_eclass->ec_sortref doesn't reference query targetlist.  So,
 	 * we allocate a separate list of pathkeys for lookups.
 	 */
 	grouping_pathkeys = list_copy_head(*group_pathkeys, num_groupby_pathkeys);
diff --git a/src/backend/optimizer/prep/prepunion.c b/src/backend/optimizer/prep/prepunion.c
index afcb5c0f0f..75655ee43c 100644
--- a/src/backend/optimizer/prep/prepunion.c
+++ b/src/backend/optimizer/prep/prepunion.c
@@ -214,7 +214,7 @@ set_operation_ordered_results_useful(SetOperationStmt *setop)
  *
  * Returns a RelOptInfo for the subtree, as well as these output parameters:
  * *pTargetList: receives the fully-fledged tlist for the subtree's top plan
- * *istrivial_tlist: true iif datatypes between parent and child match.
+ * *istrivial_tlist: true if datatypes between parent and child match.
  *
  * The pTargetList output parameter is mostly redundant with the pathtarget
  * of the returned RelOptInfo, but for the moment we need it because much of
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index ceba069905..6f2be56def 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -3451,7 +3451,7 @@ checkPartition(Relation rel, Oid partRelOid)
 
 /*
  * transformPartitionCmdForSplit
- *		Analyze the ALTER TABLLE ... SPLIT PARTITION command
+ *		Analyze the ALTER TABLE ... SPLIT PARTITION command
  *
  * For each new partition sps->bound is set to the transformed value of bound.
  * Does checks for bounds of new partitions.
@@ -3490,7 +3490,7 @@ transformPartitionCmdForSplit(CreateStmtContext *cxt, PartitionCmd *partcmd)
 
 /*
  * transformPartitionCmdForMerge
- *		Analyze the ALTER TABLLE ... MERGE PARTITIONS command
+ *		Analyze the ALTER TABLE ... MERGE PARTITIONS command
  *
  * Does simple checks for merged partitions. Calculates bound of result
  * partition.
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index bc40c454de..9bf7c67f37 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -3493,7 +3493,7 @@ WalSndDone(WalSndSendDataCallback send_data)
  * Returns the latest point in WAL that has been safely flushed to disk.
  * This should only be called when in recovery.
  *
- * This is called either by cascading walsender to find WAL postion to be sent
+ * This is called either by cascading walsender to find WAL position to be sent
  * to a cascaded standby or by slot synchronization operation to validate remote
  * slot's lsn before syncing it locally.
  *
diff --git a/src/backend/storage/buffer/bufmgr.c b/src/backend/storage/buffer/bufmgr.c
index 901b7230fb..49637284f9 100644
--- a/src/backend/storage/buffer/bufmgr.c
+++ b/src/backend/storage/buffer/bufmgr.c
@@ -1073,7 +1073,7 @@ PinBufferForBlock(Relation rel,
 
 	/*
 	 * If there is no Relation it usually implies recovery and thus permanent,
-	 * but we take an argmument because CreateAndCopyRelationData can reach us
+	 * but we take an argument because CreateAndCopyRelationData can reach us
 	 * with only an SMgrRelation for an unlogged relation that we don't want
 	 * to flag with BM_PERMANENT.
 	 */
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
index 5022a50dd7..5154353c84 100644
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -1032,7 +1032,7 @@ LockAcquireExtended(const LOCKTAG *locktag,
 
 		/*
 		 * Sleep till someone wakes me up. We do this even in the dontWait
-		 * case, beause while trying to go to sleep, we may discover that we
+		 * case, because while trying to go to sleep, we may discover that we
 		 * can acquire the lock immediately after all.
 		 */
 
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 4b830dc3c8..bd682dd810 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -1053,7 +1053,7 @@ AuxiliaryPidGetProc(int pid)
  * called, because it could be that when we try to find a position at which
  * to insert ourself into the wait queue, we discover that we must be inserted
  * ahead of everyone who wants a lock that conflict with ours. In that case,
- * we get the lock immediately. Beause of this, it's sensible for this function
+ * we get the lock immediately. Because of this, it's sensible for this function
  * to have a dontWait argument, despite the name.
  *
  * The lock table's partition lock must be held at entry, and will be held
diff --git a/src/backend/utils/adt/jsonpath_exec.c b/src/backend/utils/adt/jsonpath_exec.c
index 103572ed93..ad6a5369d9 100644
--- a/src/backend/utils/adt/jsonpath_exec.c
+++ b/src/backend/utils/adt/jsonpath_exec.c
@@ -4200,7 +4200,7 @@ JsonTableSetDocument(TableFuncScanState *state, Datum value)
 }
 
 /*
- * Evaluate a JsonTablePlan's jsonpath to get a new row pattren from
+ * Evaluate a JsonTablePlan's jsonpath to get a new row pattern from
  * the given context item
  */
 static void
@@ -4318,7 +4318,7 @@ JsonTablePlanScanNextRow(JsonTablePlanState *planstate)
 		/*
 		 * Now fetch the nested plan's current row to be joined against the
 		 * parent row.  Any further nested plans' paths will be re-evaluated
-		 * reursively, level at a time, after setting each nested plan's
+		 * recursively, level at a time, after setting each nested plan's
 		 * current row.
 		 */
 		(void) JsonTablePlanNextRow(planstate->nested);
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 35f8f306ee..5f5d7959d8 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -6968,7 +6968,7 @@ btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
 		 * btree scans, making the top-level scan look like a continuous scan
 		 * (as opposed to num_sa_scans-many primitive index scans).  After
 		 * all, btree scans mostly work like that at runtime.  However, such a
-		 * scheme would badly bias genericcostestimate's simplistic appraoch
+		 * scheme would badly bias genericcostestimate's simplistic approach
 		 * to calculating numIndexPages through prorating.
 		 *
 		 * Stick with the approach taken by non-native SAOP scans for now.
diff --git a/src/common/unicode_category.c b/src/common/unicode_category.c
index bece7334f5..359e82ec31 100644
--- a/src/common/unicode_category.c
+++ b/src/common/unicode_category.c
@@ -23,7 +23,7 @@
 /*
  * Create bitmasks from pg_unicode_category values for efficient comparison of
  * multiple categories. For instance, PG_U_MN_MASK is a bitmask representing
- * the general cateogry Mn; and PG_U_M_MASK represents general categories Mn,
+ * the general category Mn; and PG_U_M_MASK represents general categories Mn,
  * Me, and Mc.
  *
  * The number of Unicode General Categories should never grow, so a 32-bit
diff --git a/src/include/access/tableam.h b/src/include/access/tableam.h
index d1cd71b7a1..5f7cdd0e2b 100644
--- a/src/include/access/tableam.h
+++ b/src/include/access/tableam.h
@@ -1872,7 +1872,7 @@ table_index_validate_scan(Relation table_rel,
 }
 
 /*
- * table_relation_analyze - fill the infromation for a sampling statistics
+ * table_relation_analyze - fill the information for a sampling statistics
  *							acquisition
  *
  * The pointer to a function that will collect sample rows from the table
diff --git a/src/include/lib/radixtree.h b/src/include/lib/radixtree.h
index dc4c00d38a..e780b8d323 100644
--- a/src/include/lib/radixtree.h
+++ b/src/include/lib/radixtree.h
@@ -64,7 +64,7 @@
  * small enough.
  *
  * There are two other techniques described in the paper that are not
- * impemented here:
+ * implemented here:
  * - path compression "...removes all inner nodes that have only a single child."
  * - lazy path expansion "...inner nodes are only created if they are required
  *   to distinguish at least two leaf nodes."
@@ -385,7 +385,7 @@ typedef struct RT_NODE
 
 	/*
 	 * Number of children. uint8 is sufficient for all node kinds, because
-	 * nodes shrink when this number gets lower than some thresold. Since
+	 * nodes shrink when this number gets lower than some threshold. Since
 	 * node256 cannot possibly have zero children, we let the counter overflow
 	 * and we interpret zero as "256" for this node kind.
 	 */
@@ -1581,7 +1581,7 @@ RT_EXTEND_UP(RT_RADIX_TREE * tree, uint64 key)
 
 	Assert(shift < target_shift);
 
-	/* Grow tree upwards until start shift can accomodate the key */
+	/* Grow tree upwards until start shift can accommodate the key */
 	while (shift < target_shift)
 	{
 		RT_CHILD_PTR node;
@@ -1869,7 +1869,7 @@ RT_CREATE(MemoryContext ctx)
 	 */
 	if (sizeof(RT_VALUE_TYPE) > sizeof(RT_PTR_ALLOC))
 		tree->leaf_context = SlabContextCreate(ctx,
-											   RT_STR(RT_PREFIX) "radix_tree leaf contex",
+											   RT_STR(RT_PREFIX) "radix_tree leaf context",
 											   RT_SLAB_BLOCK_SIZE(sizeof(RT_VALUE_TYPE)),
 											   sizeof(RT_VALUE_TYPE));
 #endif							/* !RT_VARLEN_VALUE_SIZE */
diff --git a/src/test/isolation/expected/temp-schema-cleanup.out b/src/test/isolation/expected/temp-schema-cleanup.out
index 35b91d9e45..d10aee53a8 100644
--- a/src/test/isolation/expected/temp-schema-cleanup.out
+++ b/src/test/isolation/expected/temp-schema-cleanup.out
@@ -9,7 +9,7 @@ step s1_create_temp_objects:
         CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
 	(SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
 
-    -- The above bug requirs function removal to happen after a catalog
+    -- The above bug requires function removal to happen after a catalog
     -- invalidation. dependency.c sorts objects in descending oid order so
     -- that newer objects are deleted before older objects, so create a
     -- table after.
@@ -66,7 +66,7 @@ step s1_create_temp_objects:
         CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
 	(SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
 
-    -- The above bug requirs function removal to happen after a catalog
+    -- The above bug requires function removal to happen after a catalog
     -- invalidation. dependency.c sorts objects in descending oid order so
     -- that newer objects are deleted before older objects, so create a
     -- table after.
diff --git a/src/test/isolation/specs/temp-schema-cleanup.spec b/src/test/isolation/specs/temp-schema-cleanup.spec
index a9417b7e90..72decba6cb 100644
--- a/src/test/isolation/specs/temp-schema-cleanup.spec
+++ b/src/test/isolation/specs/temp-schema-cleanup.spec
@@ -30,7 +30,7 @@ step s1_create_temp_objects {
         CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
 	(SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
 
-    -- The above bug requirs function removal to happen after a catalog
+    -- The above bug requires function removal to happen after a catalog
     -- invalidation. dependency.c sorts objects in descending oid order so
     -- that newer objects are deleted before older objects, so create a
     -- table after.
diff --git a/src/test/modules/test_json_parser/README b/src/test/modules/test_json_parser/README
index 7e410db24b..b07b3e8799 100644
--- a/src/test/modules/test_json_parser/README
+++ b/src/test/modules/test_json_parser/README
@@ -11,7 +11,7 @@ This module contains two programs for testing the json parsers.
 - `test_json_parser_perf` is for speed testing both the standard
   recursive descent parser and the non-recursive incremental
   parser. If given the `-i` flag it uses the non-recursive parser,
-  otherwise the stardard parser. The remaining flags are the number of
+  otherwise the standard parser. The remaining flags are the number of
   parsing iterations and the file containing the input. Even when
   using the non-recursive parser, the input is passed to the parser in a
   single chunk. The results are thus comparable to those of the
diff --git a/src/test/modules/test_json_parser/test_json_parser_incremental.c b/src/test/modules/test_json_parser/test_json_parser_incremental.c
index c28db05647..383be7b888 100644
--- a/src/test/modules/test_json_parser/test_json_parser_incremental.c
+++ b/src/test/modules/test_json_parser/test_json_parser_incremental.c
@@ -8,10 +8,10 @@
  * IDENTIFICATION
  *    src/test/modules/test_json_parser/test_json_parser_incremental.c
  *
- * This progam tests incremental parsing of json. The input is fed into
+ * This program tests incremental parsing of json. The input is fed into
  * the parser in very small chunks. In practice you would normally use
  * much larger chunks, but doing this makes it more likely that the
- * full range of incement handling, especially in the lexer, is exercised.
+ * full range of increment handling, especially in the lexer, is exercised.
  * If the "-c SIZE" option is provided, that chunk size is used instead.
  *
  * The argument specifies the file containing the JSON input.
diff --git a/src/test/modules/test_json_parser/test_json_parser_perf.c b/src/test/modules/test_json_parser/test_json_parser_perf.c
index 517dc8529a..85b615a96e 100644
--- a/src/test/modules/test_json_parser/test_json_parser_perf.c
+++ b/src/test/modules/test_json_parser/test_json_parser_perf.c
@@ -8,7 +8,7 @@
  * IDENTIFICATION
  *    src/test/modules/test_json_parser/test_json_parser_perf.c
  *
- * This progam tests either the standard (recursive descent) JSON parser
+ * This program tests either the standard (recursive descent) JSON parser
  * or the incremental (table driven) parser, but without breaking the input
  * into chunks in the latter case. Thus it can be used to compare the pure
  * parsing speed of the two parsers. If the "-i" option is used, then the
diff --git a/src/test/regress/expected/foreign_key.out b/src/test/regress/expected/foreign_key.out
index af2a878dd6..0b55167ac8 100644
--- a/src/test/regress/expected/foreign_key.out
+++ b/src/test/regress/expected/foreign_key.out
@@ -2734,7 +2734,7 @@ UPDATE fkpart10.tbl1 SET f1 = 2 WHERE f1 = 1;
 INSERT INTO fkpart10.tbl1 VALUES (0), (1);
 COMMIT;
 -- test that cross-partition updates correctly enforces the foreign key
--- restriction (specifically testing INITIAILLY DEFERRED)
+-- restriction (specifically testing INITIALLY DEFERRED)
 BEGIN;
 UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0;
 UPDATE fkpart10.tbl3 SET f1 = f1 * -1;
diff --git a/src/test/regress/expected/publication.out b/src/test/regress/expected/publication.out
index 0c5521d2aa..09a8d8221c 100644
--- a/src/test/regress/expected/publication.out
+++ b/src/test/regress/expected/publication.out
@@ -945,10 +945,10 @@ ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUE
 SET client_min_messages = 'ERROR';
 CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b);
 RESET client_min_messages;
--- ok - (a,b) coverts all PK cols
+-- ok - (a,b) covers all PK cols
 UPDATE rf_tbl_abcd_pk SET a = 1;
 ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c);
--- ok - (a,b,c) coverts all PK cols
+-- ok - (a,b,c) covers all PK cols
 UPDATE rf_tbl_abcd_pk SET a = 1;
 ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a);
 -- fail - "b" is missing from the column list
diff --git a/src/test/regress/expected/tsdicts.out b/src/test/regress/expected/tsdicts.out
index 4eff85da79..0bbf2ff4ca 100644
--- a/src/test/regress/expected/tsdicts.out
+++ b/src/test/regress/expected/tsdicts.out
@@ -689,7 +689,7 @@ CREATE TEXT SEARCH DICTIONARY tsdict_case
 ERROR:  unrecognized Ispell parameter: "DictFile"
 -- Test grammar for configurations
 CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english);
--- Overriden mapping change with duplicated tokens.
+-- Overridden mapping change with duplicated tokens.
 ALTER TEXT SEARCH CONFIGURATION dummy_tst
   ALTER MAPPING FOR word, word WITH ispell;
 -- Not a token supported by the configuration's parser, fails.
diff --git a/src/test/regress/sql/foreign_key.sql b/src/test/regress/sql/foreign_key.sql
index 22e177f89b..f5e0938999 100644
--- a/src/test/regress/sql/foreign_key.sql
+++ b/src/test/regress/sql/foreign_key.sql
@@ -1943,7 +1943,7 @@ INSERT INTO fkpart10.tbl1 VALUES (0), (1);
 COMMIT;
 
 -- test that cross-partition updates correctly enforces the foreign key
--- restriction (specifically testing INITIAILLY DEFERRED)
+-- restriction (specifically testing INITIALLY DEFERRED)
 BEGIN;
 UPDATE fkpart10.tbl1 SET f1 = 3 WHERE f1 = 0;
 UPDATE fkpart10.tbl3 SET f1 = f1 * -1;
diff --git a/src/test/regress/sql/publication.sql b/src/test/regress/sql/publication.sql
index 8ba8036bfb..479d4f3264 100644
--- a/src/test/regress/sql/publication.sql
+++ b/src/test/regress/sql/publication.sql
@@ -603,10 +603,10 @@ ALTER TABLE rf_tbl_abcd_part_pk ATTACH PARTITION rf_tbl_abcd_part_pk_1 FOR VALUE
 SET client_min_messages = 'ERROR';
 CREATE PUBLICATION testpub6 FOR TABLE rf_tbl_abcd_pk (a, b);
 RESET client_min_messages;
--- ok - (a,b) coverts all PK cols
+-- ok - (a,b) covers all PK cols
 UPDATE rf_tbl_abcd_pk SET a = 1;
 ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a, b, c);
--- ok - (a,b,c) coverts all PK cols
+-- ok - (a,b,c) covers all PK cols
 UPDATE rf_tbl_abcd_pk SET a = 1;
 ALTER PUBLICATION testpub6 SET TABLE rf_tbl_abcd_pk (a);
 -- fail - "b" is missing from the column list
diff --git a/src/test/regress/sql/tsdicts.sql b/src/test/regress/sql/tsdicts.sql
index 6a2b00369c..cf08410bb2 100644
--- a/src/test/regress/sql/tsdicts.sql
+++ b/src/test/regress/sql/tsdicts.sql
@@ -254,7 +254,7 @@ CREATE TEXT SEARCH DICTIONARY tsdict_case
 
 -- Test grammar for configurations
 CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english);
--- Overriden mapping change with duplicated tokens.
+-- Overridden mapping change with duplicated tokens.
 ALTER TEXT SEARCH CONFIGURATION dummy_tst
   ALTER MAPPING FOR word, word WITH ispell;
 -- Not a token supported by the configuration's parser, fails.
diff --git a/src/test/subscription/t/004_sync.pl b/src/test/subscription/t/004_sync.pl
index e077e255fc..a2d9462395 100644
--- a/src/test/subscription/t/004_sync.pl
+++ b/src/test/subscription/t/004_sync.pl
@@ -165,7 +165,7 @@ $node_subscriber->safe_psql('postgres', "DROP SUBSCRIPTION tap_sub");
 
 # When DROP SUBSCRIPTION tries to drop the tablesync slot, the slot may not
 # have been created, which causes the slot to be created after the DROP
-# SUSCRIPTION finishes. Such slots eventually get dropped at walsender exit
+# SUBSCRIPTION finishes. Such slots eventually get dropped at walsender exit
 # time. So, to prevent being affected by such ephemeral tablesync slots, we
 # wait until all the slots have been cleaned.
 ok( $node_publisher->poll_query_until(
diff --git a/src/test/subscription/t/026_stats.pl b/src/test/subscription/t/026_stats.pl
index bac1cf3983..d1d68fad9a 100644
--- a/src/test/subscription/t/026_stats.pl
+++ b/src/test/subscription/t/026_stats.pl
@@ -271,7 +271,7 @@ is( $node_subscriber->safe_psql(
 my $sub2_oid = $node_subscriber->safe_psql($db,
 	qq(SELECT oid FROM pg_subscription WHERE subname = '$sub2_name'));
 
-# Diassociate the subscription 2 from its replication slot and drop it
+# Disassociate the subscription 2 from its replication slot and drop it
 $node_subscriber->safe_psql(
 	$db,
 	qq(
-- 
2.39.3 (Apple Git-146)

