It's now split more or less to your suggestion:
https://github.com/jsoref/postgres/commits/spelling
diff --git a/configure b/configure
--- a/configure
+++ b/configure
@@ -7088,7 +7088,7 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCR
 test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
 
 # When Autoconf chooses install-sh as install program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
 # with our Makefile.global concept. This workaround helps.
 case $INSTALL in
   *install-sh*) install_bin='';;
@@ -7232,7 +7232,7 @@ fi
 $as_echo "$MKDIR_P" >&6; }
 
 # When Autoconf chooses install-sh as mkdir -p program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
 # with our Makefile.global concept. This workaround helps.
 case $MKDIR_P in
   *install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
diff --git a/configure.in b/configure.in
--- a/configure.in
+++ b/configure.in
@@ -887,7 +887,7 @@ fi
 
 AC_PROG_INSTALL
 # When Autoconf chooses install-sh as install program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
 # with our Makefile.global concept. This workaround helps.
 case $INSTALL in
   *install-sh*) install_bin='';;
@@ -900,7 +900,7 @@ AC_PROG_LN_S
 AC_PROG_AWK
 AC_PROG_MKDIR_P
 # When Autoconf chooses install-sh as mkdir -p program it tries to generate
-# a relative path to it in each makefile where it subsitutes it. This clashes
+# a relative path to it in each makefile where it substitutes it. This clashes
 # with our Makefile.global concept. This workaround helps.
 case $MKDIR_P in
   *install-sh*) MKDIR_P='\${SHELL} \${top_srcdir}/config/install-sh -c -d';;
diff --git a/contrib/bloom/blvacuum.c b/contrib/bloom/blvacuum.c
--- a/contrib/bloom/blvacuum.c
+++ b/contrib/bloom/blvacuum.c
@@ -51,7 +51,7 @@ blbulkdelete(IndexVacuumInfo *info, Inde
        initBloomState(&state, index);
 
        /*
-        * Interate over the pages. We don't care about concurrently added 
pages,
+        * Iterate over the pages. We don't care about concurrently added pages,
         * they can't contain tuples to delete.
         */
        npages = RelationGetNumberOfBlocks(index);
diff --git a/contrib/cube/sql/cube.sql b/contrib/cube/sql/cube.sql
--- a/contrib/cube/sql/cube.sql
+++ b/contrib/cube/sql/cube.sql
@@ -256,7 +256,7 @@ SELECT cube_dim('(0,0,0)'::cube);
 SELECT cube_dim('(42,42,42),(42,42,42)'::cube);
 SELECT cube_dim('(4,8,15,16,23),(4,8,15,16,23)'::cube);
 
--- Test of cube_ll_coord function (retrieves LL coodinate values)
+-- Test of cube_ll_coord function (retrieves LL coordinate values)
 --
 SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 1);
 SELECT cube_ll_coord('(-1,1),(2,-2)'::cube, 2);
@@ -268,7 +268,7 @@ SELECT cube_ll_coord('(42,137)'::cube, 1
 SELECT cube_ll_coord('(42,137)'::cube, 2);
 SELECT cube_ll_coord('(42,137)'::cube, 3);
 
--- Test of cube_ur_coord function (retrieves UR coodinate values)
+-- Test of cube_ur_coord function (retrieves UR coordinate values)
 --
 SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 1);
 SELECT cube_ur_coord('(-1,1),(2,-2)'::cube, 2);
diff --git a/contrib/earthdistance/earthdistance--1.1.sql 
b/contrib/earthdistance/earthdistance--1.1.sql
--- a/contrib/earthdistance/earthdistance--1.1.sql
+++ b/contrib/earthdistance/earthdistance--1.1.sql
@@ -11,7 +11,7 @@ CREATE FUNCTION earth() RETURNS float8
 LANGUAGE SQL IMMUTABLE PARALLEL SAFE
 AS 'SELECT ''6378168''::float8';
 
--- Astromers may want to change the earth function so that distances will be
+-- Astronomers may want to change the earth function so that distances will be
 -- returned in degrees. To do this comment out the above definition and
 -- uncomment the one below. Note that doing this will break the regression
 -- tests.
diff --git a/contrib/isn/ISSN.h b/contrib/isn/ISSN.h
--- a/contrib/isn/ISSN.h
+++ b/contrib/isn/ISSN.h
@@ -23,7 +23,7 @@
  * Product             9 + 21 + 7 + 3 + 1 + 12 + 4 + 24 + 7 + 15 + 0 + 0 = 103
  *                             103 / 10 = 10 remainder 3
  * Check digit 10 - 3 = 7
- * => 977-1144875-00-7 ??  <- suplemental number (number of the week, month, 
etc.)
+ * => 977-1144875-00-7 ??  <- supplemental number (number of the week, month, 
etc.)
  *                               ^^ 00 for non-daily publications (01=Monday, 
02=Tuesday, ...)
  *
  * The hyphenation is always in after the four digits of the ISSN code.
diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c
--- a/contrib/isn/isn.c
+++ b/contrib/isn/isn.c
@@ -160,7 +160,7 @@ dehyphenate(char *bufO, char *bufI)
  *                               into bufO using the given hyphenation range 
TABLE.
  *                               Assumes the input string to be used is of 
only digits.
  *
- * Returns the number of characters acctually hyphenated.
+ * Returns the number of characters actually hyphenated.
  */
 static unsigned
 hyphenate(char *bufO, char *bufI, const char *(*TABLE)[2], const unsigned 
TABLE_index[10][2])
@@ -748,7 +748,7 @@ string2ean(const char *str, bool errorOK
                }
                else if (*aux2 == '!' && *(aux2 + 1) == '\0')
                {
-                       /* the invalid check digit sufix was found, set it */
+                       /* the invalid check digit suffix was found, set it */
                        if (!magic)
                                valid = false;
                        magic = true;
diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c
--- a/contrib/ltree/ltxtquery_io.c
+++ b/contrib/ltree/ltxtquery_io.c
@@ -197,7 +197,7 @@ pushval_asis(QPRS_STATE *state, int type
 
 #define STACKDEPTH             32
 /*
- * make polish notaion of query
+ * make polish notation of query
  */
 static int32
 makepol(QPRS_STATE *state)
diff --git a/contrib/ltree/sql/ltree.sql b/contrib/ltree/sql/ltree.sql
--- a/contrib/ltree/sql/ltree.sql
+++ b/contrib/ltree/sql/ltree.sql
@@ -209,7 +209,7 @@ SELECT 'a.b.c.d.e'::ltree ? '{A.b.c.d.e,
 SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e}';
 SELECT '{a.b.c.d.e,B.df}'::ltree[] ? '{A.b.c.d.e,*.df}';
 
---exractors
+--extractors
 SELECT ('{3456,1.2.3.34}'::ltree[] ?@> '1.2.3.4') is null;
 SELECT '{3456,1.2.3}'::ltree[] ?@> '1.2.3.4';
 SELECT '{3456,1.2.3.4}'::ltree[] ?<@ '1.2.3';
diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c
--- a/contrib/pg_standby/pg_standby.c
+++ b/contrib/pg_standby/pg_standby.c
@@ -779,7 +779,7 @@ main(int argc, char **argv)
                {
                        /*
                         * Once we have restored this file successfully we can 
remove some
-                        * prior WAL files. If this restore fails we musn't 
remove any
+                        * prior WAL files. If this restore fails we mustn't 
remove any
                         * file because some of them will be requested again 
immediately
                         * after the failed restore, or when we restart 
recovery.
                         */
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c 
b/contrib/pg_stat_statements/pg_stat_statements.c
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -139,7 +139,7 @@ typedef struct Counters
 {
        int64           calls;                  /* # of times executed */
        double          total_time;             /* total execution time, in 
msec */
-       double          min_time;               /* minimim execution time in 
msec */
+       double          min_time;               /* minimum execution time in 
msec */
        double          max_time;               /* maximum execution time in 
msec */
        double          mean_time;              /* mean execution time in msec 
*/
        double          sum_var_time;   /* sum of variances in execution time 
in msec */
diff --git a/contrib/pg_trgm/trgm_op.c b/contrib/pg_trgm/trgm_op.c
--- a/contrib/pg_trgm/trgm_op.c
+++ b/contrib/pg_trgm/trgm_op.c
@@ -413,7 +413,7 @@ comp_ptrgm(const void *v1, const void *v
  * ulen1: count of unique trigrams of array "trg1".
  * len2: length of array "trg2" and array "trg2indexes".
  * len: length of the array "found".
- * check_only: if true then only check existaince of similar search pattern in
+ * check_only: if true then only check existence of similar search pattern in
  *                        text.
  *
  * Returns word similarity.
@@ -456,7 +456,7 @@ iterate_word_similarity(int *trg2indexes
                        lastpos[trgindex] = i;
                }
 
-               /* Adjust lower bound if this trigram is present in required 
substing */
+               /* Adjust lower bound if this trigram is present in required 
substring */
                if (found[trgindex])
                {
                        int                     prev_lower,
@@ -547,7 +547,7 @@ iterate_word_similarity(int *trg2indexes
  *
  * str1: search pattern string, of length slen1 bytes.
  * str2: text in which we are looking for a word, of length slen2 bytes.
- * check_only: if true then only check existaince of similar search pattern in
+ * check_only: if true then only check existence of similar search pattern in
  *                        text.
  *
  * Returns word similarity.
diff --git a/contrib/pg_trgm/trgm_regexp.c b/contrib/pg_trgm/trgm_regexp.c
--- a/contrib/pg_trgm/trgm_regexp.c
+++ b/contrib/pg_trgm/trgm_regexp.c
@@ -844,7 +844,7 @@ convertPgWchar(pg_wchar c, trgm_mb_char 
        if (c == 0)
                return false;
 
-       /* Do the conversion, making sure the result is NUL-terminated */
+       /* Do the conversion, making sure the result is NULL-terminated */
        memset(s, 0, sizeof(s));
        pg_wchar2mb_with_len(&c, s, 1);
 
diff --git a/contrib/pgcrypto/crypt-des.c b/contrib/pgcrypto/crypt-des.c
--- a/contrib/pgcrypto/crypt-des.c
+++ b/contrib/pgcrypto/crypt-des.c
@@ -416,7 +416,7 @@ des_setkey(const char *key)
                && rawkey1 == old_rawkey1)
        {
                /*
-                * Already setup for this key. This optimisation fails on a 
zero key
+                * Already setup for this key. This optimization fails on a 
zero key
                 * (which is weak and has bad parity anyway) in order to 
simplify the
                 * starting conditions.
                 */
diff --git a/contrib/pgcrypto/imath.h b/contrib/pgcrypto/imath.h
--- a/contrib/pgcrypto/imath.h
+++ b/contrib/pgcrypto/imath.h
@@ -163,7 +163,7 @@ mp_result   mp_int_sqrt(mp_int a, mp_int c
 /* Convert to an int, if representable (returns MP_RANGE if not). */
 mp_result      mp_int_to_int(mp_int z, int *out);
 
-/* Convert to nul-terminated string with the specified radix, writing at
+/* Convert to null-terminated string with the specified radix, writing at
    most limit characters including the nul terminator  */
 mp_result mp_int_to_string(mp_int z, mp_size radix,
                                 char *str, int limit);
diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c
--- a/contrib/pgcrypto/mbuf.c
+++ b/contrib/pgcrypto/mbuf.c
@@ -311,7 +311,7 @@ pullf_read_max(PullFilter *pf, int len, 
 }
 
 /*
- * caller wants exatly len bytes and dont bother with references
+ * caller wants exactly len bytes and dont bother with references
  */
 int
 pullf_read_fixed(PullFilter *src, int len, uint8 *dst)
diff --git a/contrib/pgcrypto/pgp-mpi-internal.c 
b/contrib/pgcrypto/pgp-mpi-internal.c
--- a/contrib/pgcrypto/pgp-mpi-internal.c
+++ b/contrib/pgcrypto/pgp-mpi-internal.c
@@ -141,7 +141,7 @@ bn_to_mpi(mpz_t *bn)
 }
 
 /*
- * Decide the number of bits in the random componont k
+ * Decide the number of bits in the random component k
  *
  * It should be in the same range as p for signing (which
  * is deprecated), but can be much smaller for encrypting.
@@ -149,8 +149,8 @@ bn_to_mpi(mpz_t *bn)
  * Until I research it further, I just mimic gpg behaviour.
  * It has a special mapping table, for values <= 5120,
  * above that it uses 'arbitrary high number'.  Following
- * algorihm hovers 10-70 bits above gpg values.  And for
- * larger p, it uses gpg's algorihm.
+ * algorithm hovers 10-70 bits above gpg values.  And for
+ * larger p, it uses gpg's algorithm.
  *
  * The point is - if k gets large, encryption will be
  * really slow.  It does not matter for decryption.
diff --git a/contrib/pgcrypto/pgp-mpi-openssl.c 
b/contrib/pgcrypto/pgp-mpi-openssl.c
--- a/contrib/pgcrypto/pgp-mpi-openssl.c
+++ b/contrib/pgcrypto/pgp-mpi-openssl.c
@@ -74,7 +74,7 @@ bn_to_mpi(BIGNUM *bn)
 }
 
 /*
- * Decide the number of bits in the random componont k
+ * Decide the number of bits in the random component k
  *
  * It should be in the same range as p for signing (which
  * is deprecated), but can be much smaller for encrypting.
@@ -82,8 +82,8 @@ bn_to_mpi(BIGNUM *bn)
  * Until I research it further, I just mimic gpg behaviour.
  * It has a special mapping table, for values <= 5120,
  * above that it uses 'arbitrary high number'.  Following
- * algorihm hovers 10-70 bits above gpg values.  And for
- * larger p, it uses gpg's algorihm.
+ * algorithm hovers 10-70 bits above gpg values.  And for
+ * larger p, it uses gpg's algorithm.
  *
  * The point is - if k gets large, encryption will be
  * really slow.  It does not matter for decryption.
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql 
b/contrib/postgres_fdw/sql/postgres_fdw.sql
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -493,7 +493,7 @@ EXPLAIN (VERBOSE, COSTS OFF)
 SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 
FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" 
OFFSET 10 LIMIT 10;
 SELECT t1."C 1" FROM "S 1"."T 1" t1, LATERAL (SELECT DISTINCT t2.c1, t3.c1 
FROM ft1 t2, ft2 t3 WHERE t2.c1 = t3.c1 AND t2.c2 = t1.c2) q ORDER BY t1."C 1" 
OFFSET 10 LIMIT 10;
 
--- non-Var items in targelist of the nullable rel of a join preventing
+-- non-Var items in targetlist of the nullable rel of a join preventing
 -- push-down in some cases
 -- unable to push {ft1, ft2}
 EXPLAIN (VERBOSE, COSTS OFF)
diff --git a/contrib/seg/seg.c b/contrib/seg/seg.c
--- a/contrib/seg/seg.c
+++ b/contrib/seg/seg.c
@@ -888,7 +888,7 @@ restore(char *result, float val, int n)
                if (Abs(exp) <= 4)
                {
                        /*
-                        * remove the decimal point from the mantyssa and write 
the digits
+                        * remove the decimal point from the mantissa and write 
the digits
                         * to the buf array
                         */
                        for (p = result + sign, i = 10, dp = 0; *p != 'e'; p++, 
i++)
diff --git a/contrib/sepgsql/selinux.c b/contrib/sepgsql/selinux.c
--- a/contrib/sepgsql/selinux.c
+++ b/contrib/sepgsql/selinux.c
@@ -23,7 +23,7 @@
  * When we ask SELinux whether the required privileges are allowed or not,
  * we use security_compute_av(3). It needs us to represent object classes
  * and access vectors using 'external' codes defined in the security policy.
- * It is determinded in the runtime, not build time. So, it needs an internal
+ * It is determined in the runtime, not build time. So, it needs an internal
  * service to translate object class/access vectors which we want to check
  * into the code which kernel want to be given.
  */
diff --git a/contrib/sepgsql/sql/label.sql b/contrib/sepgsql/sql/label.sql
--- a/contrib/sepgsql/sql/label.sql
+++ b/contrib/sepgsql/sql/label.sql
@@ -206,7 +206,7 @@ SELECT * FROM auth_tbl;     -- failed
 SELECT sepgsql_setcon(NULL);   -- end of session
 SELECT sepgsql_getcon();
 
--- the pooler cannot touch these tables directry
+-- the pooler cannot touch these tables directly
 SELECT * FROM foo_tbl; -- failed
 
 SELECT * FROM var_tbl; -- failed
diff --git a/contrib/spi/refint.c b/contrib/spi/refint.c
--- a/contrib/spi/refint.c
+++ b/contrib/spi/refint.c
@@ -89,7 +89,7 @@ check_primary_key(PG_FUNCTION_ARGS)
                /* internal error */
                elog(ERROR, "check_primary_key: cannot process DELETE events");
 
-       /* If UPDATion the must check new Tuple, not old one */
+       /* If UPDATE, then must check new Tuple, not old one */
        else
                tuple = trigdata->tg_newtuple;
 
diff --git a/contrib/spi/timetravel.c b/contrib/spi/timetravel.c
--- a/contrib/spi/timetravel.c
+++ b/contrib/spi/timetravel.c
@@ -51,7 +51,7 @@ static EPlan *find_plan(char *ident, EPl
  *                     and stop_date eq INFINITY [ and update_user eq current 
user ]
  *                     and all other column values as in new tuple, and insert 
tuple
  *                     with old data and stop_date eq current date
- *                     ELSE - skip updation of tuple.
+ *                     ELSE - skip UPDATE of tuple.
  *             2.  IF a delete affects tuple with stop_date eq INFINITY
  *                     then insert the same tuple with stop_date eq current 
date
  *                     [ and delete_user eq current user ]
diff --git a/contrib/start-scripts/osx/PostgreSQL 
b/contrib/start-scripts/osx/PostgreSQL
--- a/contrib/start-scripts/osx/PostgreSQL
+++ b/contrib/start-scripts/osx/PostgreSQL
@@ -29,7 +29,7 @@
 # modified by Ray Aspeitia 12-03-2003 :
 # added log rotation script to db startup
 # modified StartupParameters.plist "Provides" parameter to make it easier to
-# start and stop with the SystemStarter utitlity
+# start and stop with the SystemStarter utility
 
 # use the below command in order to correctly start/stop/restart PG with log 
rotation script:
 # SystemStarter [start|stop|restart] PostgreSQL
diff --git a/contrib/tsearch2/tsearch2--1.0.sql 
b/contrib/tsearch2/tsearch2--1.0.sql
--- a/contrib/tsearch2/tsearch2--1.0.sql
+++ b/contrib/tsearch2/tsearch2--1.0.sql
@@ -414,7 +414,7 @@ CREATE FUNCTION stat(text,text)
        LANGUAGE INTERNAL
        RETURNS NULL ON NULL INPUT;
 
---reset - just for debuging
+--reset - just for debugging
 CREATE FUNCTION reset_tsearch()
         RETURNS void
         as 'MODULE_PATHNAME', 'tsa_reset_tsearch'
diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c
--- a/contrib/xml2/xpath.c
+++ b/contrib/xml2/xpath.c
@@ -610,7 +610,7 @@ xpath_table(PG_FUNCTION_ARGS)
 
        /*
         * At the moment we assume that the returned attributes make sense for 
the
-        * XPath specififed (i.e. we trust the caller). It's not fatal if they 
get
+        * XPath specified (i.e. we trust the caller). It's not fatal if they 
get
         * it wrong - the input function for the column type will raise an error
         * if the path result can't be converted into the correct binary
         * representation.
diff --git a/src/Makefile.shlib b/src/Makefile.shlib
--- a/src/Makefile.shlib
+++ b/src/Makefile.shlib
@@ -377,7 +377,7 @@ DLL_DEFFILE = lib$(NAME)dll.def
        $(CC) $(CFLAGS)  -shared -static-libgcc -o $@  $(OBJS) $(DLL_DEFFILE) 
$(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) $(LIBS) -Wl,--out-implib=$(stlib)
 endif
 
-endif # PORTNAME == cgywin
+endif # PORTNAME == cygwin
 endif # PORTNAME == cygwin || PORTNAME == win32
 
 
diff --git a/src/backend/access/gist/README b/src/backend/access/gist/README
--- a/src/backend/access/gist/README
+++ b/src/backend/access/gist/README
@@ -28,7 +28,7 @@ The current implementation of GiST suppo
 
 The support for concurrency implemented in PostgreSQL was developed based on
 the paper "Access Methods for Next-Generation Database Systems" by
-Marcel Kornaker:
+Marcel Kornacker:
 
     
http://www.sai.msu.su/~megera/postgres/gist/papers/concurrency/access-methods-for-next-generation.pdf.gz
 
diff --git a/src/backend/access/hash/hashpage.c 
b/src/backend/access/hash/hashpage.c
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -1077,7 +1077,7 @@ static void
  * already moved before the split operation was previously interrupted.
  *
  * The caller must hold a pin, but no lock, on the metapage and old bucket's
- * primay page buffer.  The buffers are returned in the same state.  (The
+ * primary page buffer.  The buffers are returned in the same state.  (The
  * metapage is only touched if it becomes necessary to add or remove overflow
  * pages.)
  */
diff --git a/src/backend/access/heap/rewriteheap.c 
b/src/backend/access/heap/rewriteheap.c
--- a/src/backend/access/heap/rewriteheap.c
+++ b/src/backend/access/heap/rewriteheap.c
@@ -209,7 +209,7 @@ typedef struct RewriteMappingFile
 } RewriteMappingFile;
 
 /*
- * A single In-Memeory logical rewrite mapping, hanging of
+ * A single In-Memory logical rewrite mapping, hanging of
  * RewriteMappingFile->mappings.
  */
 typedef struct RewriteMappingDataEntry
diff --git a/src/backend/access/transam/commit_ts.c 
b/src/backend/access/transam/commit_ts.c
--- a/src/backend/access/transam/commit_ts.c
+++ b/src/backend/access/transam/commit_ts.c
@@ -615,7 +615,7 @@ CommitTsParameterChange(bool newvalue, b
 
 /*
  * Activate this module whenever necessary.
- *             This must happen during postmaster or standalong-backend 
startup,
+ *             This must happen during postmaster or standalone-backend 
startup,
  *             or during WAL replay anytime the track_commit_timestamp setting 
is
  *             changed in the master.
  *
diff --git a/src/backend/access/transam/twophase.c 
b/src/backend/access/transam/twophase.c
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -1613,7 +1613,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horiz
         *
         * It's also possible to move I/O out of the lock, but on every error we
         * should check whether somebody committed our transaction in different
-        * backend. Let's leave this optimisation for future, if somebody will
+        * backend. Let's leave this optimization for future, if somebody will
         * spot that this place cause bottleneck.
         *
         * Note that it isn't possible for there to be a GXACT with a
diff --git a/src/backend/access/transam/xact.c 
b/src/backend/access/transam/xact.c
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -2752,7 +2752,7 @@ CommitTransactionCommand(void)
                         * These shouldn't happen.  TBLOCK_DEFAULT means the 
previous
                         * StartTransactionCommand didn't set the STARTED state
                         * appropriately, while TBLOCK_PARALLEL_INPROGRESS 
should be ended
-                        * by EndParallelWorkerTranaction(), not this function.
+                        * by EndParallelWorkerTransaction(), not this function.
                         */
                case TBLOCK_DEFAULT:
                case TBLOCK_PARALLEL_INPROGRESS:
diff --git a/src/backend/catalog/objectaddress.c 
b/src/backend/catalog/objectaddress.c
--- a/src/backend/catalog/objectaddress.c
+++ b/src/backend/catalog/objectaddress.c
@@ -770,7 +770,7 @@ static void getRelationIdentity(StringIn
  *
  * Note: If the object is not found, we don't give any indication of the
  * reason.  (It might have been a missing schema if the name was qualified, or
- * an inexistant type name in case of a cast, function or operator; etc).
+ * a nonexistent type name in case of a cast, function or operator; etc).
  * Currently there is only one caller that might be interested in such info, so
  * we don't spend much effort here.  If more callers start to care, it might be
  * better to add some support for that in this function.
diff --git a/src/backend/commands/amcmds.c b/src/backend/commands/amcmds.c
--- a/src/backend/commands/amcmds.c
+++ b/src/backend/commands/amcmds.c
@@ -34,7 +34,7 @@ static const char *get_am_type_string(ch
 
 
 /*
- * CreateAcessMethod
+ * CreateAccessMethod
  *             Registers a new access method.
  */
 ObjectAddress
diff --git a/src/backend/commands/async.c b/src/backend/commands/async.c
--- a/src/backend/commands/async.c
+++ b/src/backend/commands/async.c
@@ -313,7 +313,7 @@ typedef enum
 typedef struct
 {
        ListenActionKind action;
-       char            channel[FLEXIBLE_ARRAY_MEMBER]; /* nul-terminated 
string */
+       char            channel[FLEXIBLE_ARRAY_MEMBER]; /* null-terminated 
string */
 } ListenAction;
 
 static List *pendingActions = NIL;             /* list of ListenAction */
diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c
--- a/src/backend/commands/copy.c
+++ b/src/backend/commands/copy.c
@@ -2405,7 +2405,7 @@ CopyFrom(CopyState cstate)
         * earlier scan or command. This ensures that if this subtransaction
         * aborts then the frozen rows won't be visible after xact cleanup. Note
         * that the stronger test of exactly which subtransaction created it is
-        * crucial for correctness of this optimisation.
+        * crucial for correctness of this optimization.
         */
        if (cstate->freeze)
        {
@@ -2972,7 +2972,7 @@ BeginCopyFrom(ParseState *pstate,
                                 * the special case of when the default 
expression is the
                                 * nextval() of a sequence which in this 
specific case is
                                 * known to be safe for use with the 
multi-insert
-                                * optimisation. Hence we use this special case 
function
+                                * optimization. Hence we use this special case 
function
                                 * checker rather than the standard check for
                                 * contain_volatile_functions().
                                 */
diff --git a/src/backend/commands/dbcommands.c 
b/src/backend/commands/dbcommands.c
--- a/src/backend/commands/dbcommands.c
+++ b/src/backend/commands/dbcommands.c
@@ -685,7 +685,7 @@ createdb(ParseState *pstate, const Creat
 
                /*
                 * Force synchronous commit, thus minimizing the window between
-                * creation of the database files and commital of the 
transaction. If
+                * creation of the database files and committal of the 
transaction. If
                 * we crash before committing, we'll have a DB that's taking up 
disk
                 * space but is not in pg_database, which is not good.
                 */
@@ -955,7 +955,7 @@ dropdb(const char *dbname, bool missing_
 
        /*
         * Force synchronous commit, thus minimizing the window between removal 
of
-        * the database files and commital of the transaction. If we crash 
before
+        * the database files and committal of the transaction. If we crash 
before
         * committing, we'll have a DB that's gone on disk but still there
         * according to pg_database, which is not good.
         */
@@ -1309,7 +1309,7 @@ movedb(const char *dbname, const char *t
 
                /*
                 * Force synchronous commit, thus minimizing the window between
-                * copying the database files and commital of the transaction. 
If we
+                * copying the database files and committal of the transaction. 
If we
                 * crash before committing, we'll leave an orphaned set of 
files on
                 * disk, which is not fatal but not good either.
                 */
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -3401,7 +3401,7 @@ ExplainYAMLLineStarting(ExplainState *es
 }
 
 /*
- * YAML is a superset of JSON; unfortuantely, the YAML quoting rules are
+ * YAML is a superset of JSON; unfortunately, the YAML quoting rules are
  * ridiculously complicated -- as documented in sections 5.3 and 7.3.3 of
  * http://yaml.org/spec/1.2/spec.html -- so we chose to just quote everything.
  * Empty strings, strings with leading or trailing whitespace, and strings
diff --git a/src/backend/commands/functioncmds.c 
b/src/backend/commands/functioncmds.c
--- a/src/backend/commands/functioncmds.c
+++ b/src/backend/commands/functioncmds.c
@@ -1040,7 +1040,7 @@ CreateFunction(ParseState *pstate, Creat
        }
        else
        {
-               /* store SQL NULL instead of emtpy array */
+               /* store SQL NULL instead of empty array */
                trftypes = NULL;
        }
 
@@ -1441,7 +1441,7 @@ CreateCast(CreateCastStmt *stmt)
                                (errcode(ERRCODE_WRONG_OBJECT_TYPE),
                                 errmsg("cast will be ignored because the 
target data type is a domain")));
 
-       /* Detemine the cast method */
+       /* Determine the cast method */
        if (stmt->func != NULL)
                castmethod = COERCION_METHOD_FUNCTION;
        else if (stmt->inout)
diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -99,7 +99,7 @@ static void RangeVarCallbackForReindexIn
  * Errors arising from the attribute list still apply.
  *
  * Most column type changes that can skip a table rewrite do not invalidate
- * indexes.  We ackowledge this when all operator classes, collations and
+ * indexes.  We acknowledge this when all operator classes, collations and
  * exclusion operators match.  Though we could further permit intra-opfamily
  * changes for btree and hash indexes, that adds subtle complexity with no
  * concrete benefit for core types.
@@ -965,7 +965,7 @@ CheckMutability(Expr *expr)
  * indxpath.c could do something with.  However, that seems overly
  * restrictive.  One useful application of partial indexes is to apply
  * a UNIQUE constraint across a subset of a table, and in that scenario
- * any evaluatable predicate will work.  So accept any predicate here
+ * any evaluable predicate will work.  So accept any predicate here
  * (except ones requiring a plan), and let indxpath.c fend for itself.
  */
 static void
diff --git a/src/backend/commands/publicationcmds.c 
b/src/backend/commands/publicationcmds.c
--- a/src/backend/commands/publicationcmds.c
+++ b/src/backend/commands/publicationcmds.c
@@ -525,7 +525,7 @@ OpenTableList(List *tables)
                myrelid = RelationGetRelid(rel);
                /*
                 * filter out duplicates when user specifies "foo, foo"
-                * Note that this algrithm is know to not be very effective 
(O(N^2))
+                * Note that this algorithm is know to not be very effective 
(O(N^2))
                 * but given that it only works on list of tables given to us 
by user
                 * it's deemed acceptable.
                 */
diff --git a/src/backend/commands/subscriptioncmds.c 
b/src/backend/commands/subscriptioncmds.c
--- a/src/backend/commands/subscriptioncmds.c
+++ b/src/backend/commands/subscriptioncmds.c
@@ -474,7 +474,7 @@ DropSubscription(DropSubscriptionStmt *s
        InvokeObjectDropHook(SubscriptionRelationId, subid, 0);
 
        /*
-        * Lock the subscription so noboby else can do anything with it
+        * Lock the subscription so nobody else can do anything with it
         * (including the replication workers).
         */
        LockSharedObject(SubscriptionRelationId, subid, 0, AccessExclusiveLock);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -3167,7 +3167,7 @@ AlterTableGetLockLevel(List *cmds)
                                break;
 
                                /*
-                                * Changing foreign table options may affect 
optimisation.
+                                * Changing foreign table options may affect 
optimization.
                                 */
                        case AT_GenericOptions:
                        case AT_AlterColumnGenericOptions:
@@ -6630,7 +6630,7 @@ ATAddCheckConstraint(List **wqueue, Alte
 
        /*
         * Check if ONLY was specified with ALTER TABLE.  If so, allow the
-        * contraint creation only if there are no children currently.  Error 
out
+        * constraint creation only if there are no children currently.  Error 
out
         * otherwise.
         */
        if (!recurse && children != NIL)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -1261,7 +1261,7 @@ InitResultRelInfo(ResultRelInfo *resultR
        resultRelInfo->ri_projectReturning = NULL;
 
        /*
-        * If partition_root has been specified, that means we are builiding the
+        * If partition_root has been specified, that means we are building the
         * ResultRelationInfo for one of its leaf partitions.  In that case, we
         * need *not* initialize the leaf partition's constraint, but rather the
         * the partition_root's (if any).  We must do that explicitly like this,
diff --git a/src/backend/executor/execParallel.c 
b/src/backend/executor/execParallel.c
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -533,7 +533,7 @@ ExecParallelRetrieveInstrumentation(Plan
        int                     plan_node_id = planstate->plan->plan_node_id;
        MemoryContext oldcontext;
 
-       /* Find the instumentation for this node. */
+       /* Find the instrumentation for this node. */
        for (i = 0; i < instrumentation->num_plan_nodes; ++i)
                if (instrumentation->plan_node_id[i] == plan_node_id)
                        break;
diff --git a/src/backend/executor/execReplication.c 
b/src/backend/executor/execReplication.c
--- a/src/backend/executor/execReplication.c
+++ b/src/backend/executor/execReplication.c
@@ -391,7 +391,7 @@ ExecSimpleRelationInsert(EState *estate,
                if (rel->rd_att->constr)
                        ExecConstraints(resultRelInfo, slot, slot, estate);
 
-               /* Store the slot into tuple that we can insett. */
+               /* Store the slot into tuple that we can inspect. */
                tuple = ExecMaterializeSlot(slot);
 
                /* OK, store the tuple and create index entries for it */
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -304,7 +304,7 @@ typedef struct AggStatePerTransData
        /*
         * Slots for holding the evaluated input arguments.  These are set up
         * during ExecInitAgg() and then used for each input row requiring
-        * procesessing besides what's done in AggState->evalproj.
+        * processing besides what's done in AggState->evalproj.
         */
        TupleTableSlot *sortslot;       /* current input tuple */
        TupleTableSlot *uniqslot;       /* used for multi-column DISTINCT */
diff --git a/src/backend/executor/nodeWindowAgg.c 
b/src/backend/executor/nodeWindowAgg.c
--- a/src/backend/executor/nodeWindowAgg.c
+++ b/src/backend/executor/nodeWindowAgg.c
@@ -354,7 +354,7 @@ advance_windowaggregate(WindowAggState *
 
        /*
         * We must track the number of rows included in transValue, since to
-        * remove the last input, advance_windowaggregate_base() musn't call the
+        * remove the last input, advance_windowaggregate_base() mustn't call 
the
         * inverse transition function, but simply reset transValue back to its
         * initial value.
         */
diff --git a/src/backend/foreign/foreign.c b/src/backend/foreign/foreign.c
--- a/src/backend/foreign/foreign.c
+++ b/src/backend/foreign/foreign.c
@@ -724,7 +724,7 @@ GetExistingLocalJoinPath(RelOptInfo *joi
                Path       *path = (Path *) lfirst(lc);
                JoinPath   *joinpath = NULL;
 
-               /* Skip parameterised paths. */
+               /* Skip parameterized paths. */
                if (path->param_info != NULL)
                        continue;
 
diff --git a/src/backend/libpq/hba.c b/src/backend/libpq/hba.c
--- a/src/backend/libpq/hba.c
+++ b/src/backend/libpq/hba.c
@@ -109,7 +109,7 @@ static MemoryContext parsed_hba_context 
  *
  * NOTE: the IdentLine structs can contain pre-compiled regular expressions
  * that live outside the memory context. Before destroying or resetting the
- * memory context, they need to be expliticly free'd.
+ * memory context, they need to be explicitly free'd.
  */
 static List *parsed_ident_lines = NIL;
 static MemoryContext parsed_ident_context = NULL;
diff --git a/src/backend/optimizer/geqo/geqo_erx.c 
b/src/backend/optimizer/geqo/geqo_erx.c
--- a/src/backend/optimizer/geqo/geqo_erx.c
+++ b/src/backend/optimizer/geqo/geqo_erx.c
@@ -111,7 +111,7 @@ gimme_edge_table(PlannerInfo *root, Gene
        for (index1 = 0; index1 < num_gene; index1++)
        {
                /*
-                * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this 
operaton
+                * presume the tour is circular, i.e. 1->2, 2->3, 3->1 this 
operation
                 * maps n back to 1
                 */
 
@@ -314,7 +314,7 @@ gimme_gene(PlannerInfo *root, Edge edge,
                /*
                 * give priority to candidates with fewest remaining unused 
edges;
                 * find out what the minimum number of unused edges is
-                * (minimum_edges); if there is more than one cadidate with the
+                * (minimum_edges); if there is more than one candidate with the
                 * minimum number of unused edges keep count of this number
                 * (minimum_count);
                 */
diff --git a/src/backend/optimizer/path/joinpath.c 
b/src/backend/optimizer/path/joinpath.c
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -1618,7 +1618,7 @@ select_mergejoin_clauses(PlannerInfo *ro
                /*
                 * Insist that each side have a non-redundant eclass.  This
                 * restriction is needed because various bits of the planner 
expect
-                * that each clause in a merge be associatable with some 
pathkey in a
+                * that each clause in a merge be associable with some pathkey 
in a
                 * canonical pathkey list, but redundant eclasses can't appear 
in
                 * canonical sort orderings.  (XXX it might be worth relaxing 
this,
                 * but not enough time to address it for 8.3.)
diff --git a/src/backend/optimizer/plan/createplan.c 
b/src/backend/optimizer/plan/createplan.c
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -4467,7 +4467,7 @@ fix_indexqual_operand(Node *node, IndexO
                }
        }
 
-       /* Ooops... */
+       /* Oops... */
        elog(ERROR, "index key does not match expected index column");
        return NULL;                            /* keep compiler quiet */
 }
diff --git a/src/backend/optimizer/plan/planmain.c 
b/src/backend/optimizer/plan/planmain.c
--- a/src/backend/optimizer/plan/planmain.c
+++ b/src/backend/optimizer/plan/planmain.c
@@ -195,7 +195,7 @@ query_planner(PlannerInfo *root, List *t
        /*
         * Now distribute "placeholders" to base rels as needed.  This has to be
         * done after join removal because removal could change whether a
-        * placeholder is evaluatable at a base rel.
+        * placeholder is evaluable at a base rel.
         */
        add_placeholders_to_base_rels(root);
 
diff --git a/src/backend/optimizer/util/joininfo.c 
b/src/backend/optimizer/util/joininfo.c
--- a/src/backend/optimizer/util/joininfo.c
+++ b/src/backend/optimizer/util/joininfo.c
@@ -24,7 +24,7 @@
  *             Detect whether there is a joinclause that involves
  *             the two given relations.
  *
- * Note: the joinclause does not have to be evaluatable with only these two
+ * Note: the joinclause does not have to be evaluable with only these two
  * relations.  This is intentional.  For example consider
  *             SELECT * FROM a, b, c WHERE a.x = (b.y + c.z)
  * If a is much larger than the other tables, it may be worthwhile to
diff --git a/src/backend/optimizer/util/restrictinfo.c 
b/src/backend/optimizer/util/restrictinfo.c
--- a/src/backend/optimizer/util/restrictinfo.c
+++ b/src/backend/optimizer/util/restrictinfo.c
@@ -515,7 +515,7 @@ join_clause_is_movable_into(RestrictInfo
                                                        Relids currentrelids,
                                                        Relids 
current_and_outer)
 {
-       /* Clause must be evaluatable given available context */
+       /* Clause must be evaluable given available context */
        if (!bms_is_subset(rinfo->clause_relids, current_and_outer))
                return false;
 
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -11312,7 +11312,7 @@ table_ref:      relation_expr opt_alias_claus
                                        n->lateral = true;
                                        n->subquery = $2;
                                        n->alias = $3;
-                                       /* same coment as above */
+                                       /* same comment as above */
                                        if ($3 == NULL)
                                        {
                                                if (IsA($2, SelectStmt) &&
diff --git a/src/backend/parser/parse_collate.c 
b/src/backend/parser/parse_collate.c
--- a/src/backend/parser/parse_collate.c
+++ b/src/backend/parser/parse_collate.c
@@ -805,7 +805,7 @@ merge_collation_state(Oid collation,
                                        else if (collation != 
DEFAULT_COLLATION_OID)
                                        {
                                                /*
-                                                * Ooops, we have a conflict.  
We cannot throw error
+                                                * Oops, we have a conflict.  
We cannot throw error
                                                 * here, since the conflict 
could be resolved by a
                                                 * later sibling CollateExpr, 
or the parent might not
                                                 * care about collation anyway. 
 Return enough info to
@@ -824,7 +824,7 @@ merge_collation_state(Oid collation,
                                if (collation != context->collation)
                                {
                                        /*
-                                        * Ooops, we have a conflict of 
explicit COLLATE clauses.
+                                        * Oops, we have a conflict of explicit 
COLLATE clauses.
                                         * Here we choose to throw error 
immediately; that is what
                                         * the SQL standard says to do, and 
there's no good reason
                                         * to be less strict.
diff --git a/src/backend/parser/parse_param.c b/src/backend/parser/parse_param.c
--- a/src/backend/parser/parse_param.c
+++ b/src/backend/parser/parse_param.c
@@ -210,7 +210,7 @@ variable_coerce_param_hook(ParseState *p
                }
                else
                {
-                       /* Ooops */
+                       /* Oops */
                        ereport(ERROR,
                                        (errcode(ERRCODE_AMBIGUOUS_PARAMETER),
                                         errmsg("inconsistent types deduced for 
parameter $%d",
diff --git a/src/backend/parser/parse_utilcmd.c 
b/src/backend/parser/parse_utilcmd.c
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -3050,7 +3050,7 @@ transformAttachPartition(CreateStmtConte
                                 errmsg("\"%s\" is not partitioned",
                                                
RelationGetRelationName(parentRel))));
 
-       /* tranform the values */
+       /* transform the values */
        Assert(RelationGetPartitionKey(parentRel) != NULL);
        cxt->partbound = transformPartitionBound(cxt->pstate, parentRel,
                                                                                
         cmd->bound);
diff --git a/src/backend/postmaster/bgworker.c 
b/src/backend/postmaster/bgworker.c
--- a/src/backend/postmaster/bgworker.c
+++ b/src/backend/postmaster/bgworker.c
@@ -321,7 +321,7 @@ BackgroundWorkerStateChange(void)
 
                /*
                 * Copy strings in a paranoid way.  If shared memory is 
corrupted, the
-                * source data might not even be NUL-terminated.
+                * source data might not even be NULL-terminated.
                 */
                ascii_safe_strlcpy(rw->rw_worker.bgw_name,
                                                   slot->worker.bgw_name, 
BGW_MAXLEN);
diff --git a/src/backend/postmaster/bgwriter.c 
b/src/backend/postmaster/bgwriter.c
--- a/src/backend/postmaster/bgwriter.c
+++ b/src/backend/postmaster/bgwriter.c
@@ -211,7 +211,7 @@ BackgroundWriterMain(void)
                /* Flush any leaked data in the top-level context */
                MemoryContextResetAndDeleteChildren(bgwriter_context);
 
-               /* re-initilialize to avoid repeated errors causing problems */
+               /* re-initialize to avoid repeated errors causing problems */
                WritebackContextInit(&wb_context, &bgwriter_flush_after);
 
                /* Now we can allow interrupts again */
diff --git a/src/backend/postmaster/postmaster.c 
b/src/backend/postmaster/postmaster.c
--- a/src/backend/postmaster/postmaster.c
+++ b/src/backend/postmaster/postmaster.c
@@ -5156,7 +5156,7 @@ RandomCancelKey(int32 *cancel_key)
 }
 
 /*
- * Count up number of child processes of specified types (dead_end chidren
+ * Count up number of child processes of specified types (dead_end children
  * are always excluded).
  */
 static int
diff --git a/src/backend/regex/regc_pg_locale.c 
b/src/backend/regex/regc_pg_locale.c
--- a/src/backend/regex/regc_pg_locale.c
+++ b/src/backend/regex/regc_pg_locale.c
@@ -224,7 +224,7 @@ static const unsigned char pg_char_prope
  * pg_set_regex_collation: set collation for these functions to obey
  *
  * This is called when beginning compilation or execution of a regexp.
- * Since there's no need for re-entrancy of regexp operations, it's okay
+ * Since there's no need for reentrancy of regexp operations, it's okay
  * to store the results in static variables.
  */
 void
diff --git a/src/backend/replication/logical/launcher.c 
b/src/backend/replication/logical/launcher.c
--- a/src/backend/replication/logical/launcher.c
+++ b/src/backend/replication/logical/launcher.c
@@ -170,7 +170,7 @@ WaitForReplicationWorkerAttach(LogicalRe
 
                /*
                 * Worker started and attached to our shmem. This check is safe
-                * because only laucher ever starts the workers, so nobody can 
steal
+                * because only launcher ever starts the workers, so nobody can 
steal
                 * the worker slot.
                 */
                if (status == BGWH_STARTED && worker->proc)
@@ -180,7 +180,7 @@ WaitForReplicationWorkerAttach(LogicalRe
                        return false;
 
                /*
-                * We need timeout because we generaly don't get notified via 
latch
+                * We need timeout because we generally don't get notified via 
latch
                 * about the worker attach.
                 */
                rc = WaitLatch(MyLatch,
@@ -533,7 +533,7 @@ AtCommit_ApplyLauncher(void)
 /*
  * Request wakeup of the launcher on commit of the transaction.
  *
- * This is used to send launcher signal to stop sleeping and proccess the
+ * This is used to send launcher signal to stop sleeping and process the
  * subscriptions when current transaction commits. Should be used when new
  * tuple was added to the pg_subscription catalog.
 */
@@ -638,7 +638,7 @@ ApplyLauncherMain(Datum main_arg)
                else
                {
                        /*
-                        * The wait in previous cycle was interruped in less 
than
+                        * The wait in previous cycle was interrupted in less 
than
                         * wal_retrieve_retry_interval since last worker was 
started,
                         * this usually means crash of the worker, so we should 
retry
                         * in wal_retrieve_retry_interval again.
diff --git a/src/backend/replication/logical/origin.c 
b/src/backend/replication/logical/origin.c
--- a/src/backend/replication/logical/origin.c
+++ b/src/backend/replication/logical/origin.c
@@ -1250,7 +1250,7 @@ pg_replication_origin_session_is_setup(P
  * Return the replication progress for origin setup in the current session.
  *
  * If 'flush' is set to true it is ensured that the returned value corresponds
- * to a local transaction that has been flushed. this is useful if asychronous
+ * to a local transaction that has been flushed. this is useful if asynchronous
  * commits are used when replaying replicated transactions.
  */
 Datum
@@ -1336,7 +1336,7 @@ pg_replication_origin_advance(PG_FUNCTIO
  * Return the replication progress for an individual replication origin.
  *
  * If 'flush' is set to true it is ensured that the returned value corresponds
- * to a local transaction that has been flushed. this is useful if asychronous
+ * to a local transaction that has been flushed. this is useful if asynchronous
  * commits are used when replaying replicated transactions.
  */
 Datum
diff --git a/src/backend/replication/logical/proto.c 
b/src/backend/replication/logical/proto.c
--- a/src/backend/replication/logical/proto.c
+++ b/src/backend/replication/logical/proto.c
@@ -539,7 +539,7 @@ logicalrep_write_attrs(StringInfo out, R
                if (att->attisdropped)
                        continue;
 
-               /* REPLICA IDENTITY FULL means all colums are sent as part of 
key. */
+               /* REPLICA IDENTITY FULL means all columns are sent as part of 
key. */
                if (replidentfull ||
                        bms_is_member(att->attnum - 
FirstLowInvalidHeapAttributeNumber,
                                                  idattrs))
diff --git a/src/backend/replication/logical/reorderbuffer.c 
b/src/backend/replication/logical/reorderbuffer.c
--- a/src/backend/replication/logical/reorderbuffer.c
+++ b/src/backend/replication/logical/reorderbuffer.c
@@ -1714,7 +1714,7 @@ ReorderBufferCommit(ReorderBuffer *rb, T
  *
  * NB: Transactions handled here have to have actively aborted (i.e. have
  * produced an abort record). Implicitly aborted transactions are handled via
- * ReorderBufferAbortOld(); transactions we're just not interesteded in, but
+ * ReorderBufferAbortOld(); transactions we're just not interested in, but
  * which have committed are handled in ReorderBufferForget().
  *
  * This function purges this transaction and its contents from memory and
@@ -1782,7 +1782,7 @@ ReorderBufferAbortOld(ReorderBuffer *rb,
  * toplevel xid.
  *
  * This is significantly different to ReorderBufferAbort() because
- * transactions that have committed need to be treated differenly from aborted
+ * transactions that have committed need to be treated differently from aborted
  * ones since they may have modified the catalog.
  *
  * Note that this is only allowed to be called in the moment a transaction
@@ -2660,7 +2660,7 @@ StartupReorderBuffer(void)
 
                /*
                 * ok, has to be a surviving logical slot, iterate and delete
-                * everythign starting with xid-*
+                * everything starting with xid-*
                 */
                sprintf(path, "pg_replslot/%s", logical_de->d_name);
 
diff --git a/src/backend/replication/logical/snapbuild.c 
b/src/backend/replication/logical/snapbuild.c
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -614,7 +614,7 @@ SnapBuildGetOrBuildSnapshot(SnapBuild *b
        if (builder->snapshot == NULL)
        {
                builder->snapshot = SnapBuildBuildSnapshot(builder, xid);
-               /* inrease refcount for the snapshot builder */
+               /* increase refcount for the snapshot builder */
                SnapBuildSnapIncRefcount(builder->snapshot);
        }
 
@@ -678,7 +678,7 @@ SnapBuildProcessChange(SnapBuild *builde
                if (builder->snapshot == NULL)
                {
                        builder->snapshot = SnapBuildBuildSnapshot(builder, 
xid);
-                       /* inrease refcount for the snapshot builder */
+                       /* increase refcount for the snapshot builder */
                        SnapBuildSnapIncRefcount(builder->snapshot);
                }
 
@@ -911,7 +911,7 @@ SnapBuildEndTxn(SnapBuild *builder, XLog
                {
                        /*
                         * None of the originally running transaction is 
running anymore,
-                        * so our incrementaly built snapshot now is consistent.
+                        * so our incrementally built snapshot now is 
consistent.
                         */
                        ereport(LOG,
                                  (errmsg("logical decoding found consistent 
point at %X/%X",
diff --git a/src/backend/replication/logical/worker.c 
b/src/backend/replication/logical/worker.c
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -327,7 +327,7 @@ slot_store_cstrings(TupleTableSlot *slot
 /*
  * Modify slot with user data provided as C strigs.
  * This is somewhat similar to heap_modify_tuple but also calls the type
- * input fuction on the user data as the input is the text representation
+ * input function on the user data as the input is the text representation
  * of the types.
  */
 static void
diff --git a/src/backend/replication/pgoutput/pgoutput.c 
b/src/backend/replication/pgoutput/pgoutput.c
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -172,7 +172,7 @@ pgoutput_startup(LogicalDecodingContext 
                                                                
&data->protocol_version,
                                                                
&data->publication_names);
 
-               /* Check if we support requested protol */
+               /* Check if we support requested protocol */
                if (data->protocol_version != LOGICALREP_PROTO_VERSION_NUM)
                        ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
@@ -424,7 +424,7 @@ publication_invalidation_cb(Datum arg, i
 /*
  * Initialize the relation schema sync cache for a decoding session.
  *
- * The hash table is destoyed at the end of a decoding session. While
+ * The hash table is destroyed at the end of a decoding session. While
  * relcache invalidations still exist and will still be invoked, they
  * will just see the null hash table global and take no action.
  */
@@ -540,7 +540,7 @@ rel_sync_cache_relation_cb(Datum arg, Oi
 
        /*
         * We can get here if the plugin was used in SQL interface as the
-        * RelSchemaSyncCache is detroyed when the decoding finishes, but there
+        * RelSchemaSyncCache is destroyed when the decoding finishes, but there
         * is no way to unregister the relcache invalidation callback.
         */
        if (RelationSyncCache == NULL)
@@ -580,7 +580,7 @@ rel_sync_cache_publication_cb(Datum arg,
 
        /*
         * We can get here if the plugin was used in SQL interface as the
-        * RelSchemaSyncCache is detroyed when the decoding finishes, but there
+        * RelSchemaSyncCache is destroyed when the decoding finishes, but there
         * is no way to unregister the relcache invalidation callback.
         */
        if (RelationSyncCache == NULL)
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -860,7 +860,7 @@ WaitEventAdjustWin32(WaitEventSet *set, 
  * reached.  At most nevents occurred events are returned.
  *
  * If timeout = -1, block until an event occurs; if 0, check sockets for
- * readiness, but don't block; if > 0, block for at most timeout miliseconds.
+ * readiness, but don't block; if > 0, block for at most timeout milliseconds.
  *
  * Returns the number of events occurred, or 0 if the timeout was reached.
  *
diff --git a/src/backend/storage/ipc/procarray.c 
b/src/backend/storage/ipc/procarray.c
--- a/src/backend/storage/ipc/procarray.c
+++ b/src/backend/storage/ipc/procarray.c
@@ -280,7 +280,7 @@ ProcArrayAdd(PGPROC *proc)
        if (arrayP->numProcs >= arrayP->maxProcs)
        {
                /*
-                * Ooops, no room.  (This really shouldn't happen, since there 
is a
+                * Oops, no room.  (This really shouldn't happen, since there 
is a
                 * fixed supply of PGPROC structs too, and so we should have 
failed
                 * earlier.)
                 */
@@ -370,7 +370,7 @@ ProcArrayRemove(PGPROC *proc, Transactio
                }
        }
 
-       /* Ooops */
+       /* Oops */
        LWLockRelease(ProcArrayLock);
 
        elog(LOG, "failed to find proc %p in ProcArray", proc);
diff --git a/src/backend/storage/ipc/shm_mq.c b/src/backend/storage/ipc/shm_mq.c
--- a/src/backend/storage/ipc/shm_mq.c
+++ b/src/backend/storage/ipc/shm_mq.c
@@ -501,7 +501,7 @@ shm_mq_sendv(shm_mq_handle *mqh, shm_mq_
  * it will point to a temporary buffer.  This mostly avoids data copying in
  * the hoped-for case where messages are short compared to the buffer size,
  * while still allowing longer messages.  In either case, the return value
- * remains valid until the next receive operation is perfomed on the queue.
+ * remains valid until the next receive operation is performed on the queue.
  *
  * When nowait = false, we'll wait on our process latch when the ring buffer
  * is empty and we have not yet received a full message.  The sender will
diff --git a/src/backend/storage/ipc/standby.c 
b/src/backend/storage/ipc/standby.c
--- a/src/backend/storage/ipc/standby.c
+++ b/src/backend/storage/ipc/standby.c
@@ -967,7 +967,7 @@ LogStandbySnapshot(void)
  * similar. We keep them separate because xl_xact_running_xacts is a
  * contiguous chunk of memory and never exists fully until it is assembled in
  * WAL. The inserted records are marked as not being important for durability,
- * to avoid triggering superflous checkpoint / archiving activity.
+ * to avoid triggering superfluous checkpoint / archiving activity.
  */
 static XLogRecPtr
 LogCurrentRunningXacts(RunningTransactions CurrRunningXacts)
diff --git a/src/backend/storage/lmgr/lock.c b/src/backend/storage/lmgr/lock.c
--- a/src/backend/storage/lmgr/lock.c
+++ b/src/backend/storage/lmgr/lock.c
@@ -1125,7 +1125,7 @@ SetupLockInTable(LockMethod lockMethodTa
                                                                                
                                &found);
        if (!proclock)
        {
-               /* Ooops, not enough shmem for the proclock */
+               /* Oops, not enough shmem for the proclock */
                if (lock->nRequested == 0)
                {
                        /*
@@ -2778,7 +2778,7 @@ GetLockConflicts(const LOCKTAG *locktag,
                vxids = (VirtualTransactionId *)
                        palloc0(sizeof(VirtualTransactionId) * (MaxBackends + 
1));
 
-       /* Compute hash code and partiton lock, and look up conflicting modes. 
*/
+       /* Compute hash code and partition lock, and look up conflicting modes. 
*/
        hashcode = LockTagHashCode(locktag);
        partitionLock = LockHashPartitionLock(hashcode);
        conflictMask = lockMethodTable->conflictTab[lockmode];
@@ -4046,7 +4046,7 @@ lock_twophase_recover(TransactionId xid,
                                                                                
                                &found);
        if (!proclock)
        {
-               /* Ooops, not enough shmem for the proclock */
+               /* Oops, not enough shmem for the proclock */
                if (lock->nRequested == 0)
                {
                        /*
diff --git a/src/backend/storage/lmgr/lwlock.c 
b/src/backend/storage/lmgr/lwlock.c
--- a/src/backend/storage/lmgr/lwlock.c
+++ b/src/backend/storage/lmgr/lwlock.c
@@ -781,7 +781,7 @@ LWLockAttemptLock(LWLock *lock, LWLockMo
                                return false;
                        }
                        else
-                               return true;    /* someobdy else has the lock */
+                               return true;    /* somebody else has the lock */
                }
        }
        pg_unreachable();
@@ -953,7 +953,7 @@ LWLockWakeup(LWLock *lock)
                 * that happens before the list unlink happens, the list would 
end up
                 * being corrupted.
                 *
-                * The barrier pairs with the LWLockWaitListLock() when 
enqueueing for
+                * The barrier pairs with the LWLockWaitListLock() when 
enqueuing for
                 * another lock.
                 */
                pg_write_barrier();
@@ -1029,7 +1029,7 @@ LWLockDequeueSelf(LWLock *lock)
 
        /*
         * Can't just remove ourselves from the list, but we need to iterate 
over
-        * all entries as somebody else could have unqueued us.
+        * all entries as somebody else could have dequeued us.
         */
        proclist_foreach_modify(iter, &lock->waiters, lwWaitLink)
        {
diff --git a/src/backend/storage/lmgr/predicate.c 
b/src/backend/storage/lmgr/predicate.c
--- a/src/backend/storage/lmgr/predicate.c
+++ b/src/backend/storage/lmgr/predicate.c
@@ -3193,7 +3193,7 @@ ReleasePredicateLocks(bool isCommit)
        /*
         * We can't trust XactReadOnly here, because a transaction which started
         * as READ WRITE can show as READ ONLY later, e.g., within
-        * substransactions.  We want to flag a transaction as READ ONLY if it
+        * subtransactions.  We want to flag a transaction as READ ONLY if it
         * commits without writing so that de facto READ ONLY transactions get 
the
         * benefit of some RO optimizations, so we will use this local variable 
to
         * get some cleanup logic right which is based on whether the 
transaction
diff --git a/src/backend/storage/smgr/md.c b/src/backend/storage/smgr/md.c
--- a/src/backend/storage/smgr/md.c
+++ b/src/backend/storage/smgr/md.c
@@ -1728,7 +1728,7 @@ static void
        else
        {
                /*
-                * It doesn't seem worthwile complicating the code by having a 
more
+                * It doesn't seem worthwhile complicating the code by having a 
more
                 * aggressive growth strategy here; the number of segments 
doesn't
                 * grow that fast, and the memory context internally will 
sometimes
                 * avoid doing an actual reallocation.
diff --git a/src/backend/tsearch/dict_thesaurus.c 
b/src/backend/tsearch/dict_thesaurus.c
--- a/src/backend/tsearch/dict_thesaurus.c
+++ b/src/backend/tsearch/dict_thesaurus.c
@@ -23,7 +23,7 @@
 
 
 /*
- * Temporay we use TSLexeme.flags for inner use...
+ * Temporary we use TSLexeme.flags for inner use...
  */
 #define DT_USEASIS             0x1000
 
diff --git a/src/backend/tsearch/spell.c b/src/backend/tsearch/spell.c
--- a/src/backend/tsearch/spell.c
+++ b/src/backend/tsearch/spell.c
@@ -37,7 +37,7 @@
  *       Spell field. The AffixData field is initialized if AF parameter is not
  *       defined.
  *     - NISortAffixes():
- *       - builds a list of compond affixes from the affix list and stores it
+ *       - builds a list of compound affixes from the affix list and stores it
  *             in the CompoundAffix.
  *       - builds prefix trees (Trie) from the affix list for prefixes and 
suffixes
  *             and stores them in Suffix and Prefix fields.
diff --git a/src/backend/tsearch/ts_parse.c b/src/backend/tsearch/ts_parse.c
--- a/src/backend/tsearch/ts_parse.c
+++ b/src/backend/tsearch/ts_parse.c
@@ -179,7 +179,7 @@ LexizeExec(LexizeData *ld, ParsedLex **c
        if (ld->curDictId == InvalidOid)
        {
                /*
-                * usial mode: dictionary wants only one word, but we should 
keep in
+                * usual mode: dictionary wants only one word, but we should 
keep in
                 * mind that we should go through all stack
                 */
 
@@ -272,7 +272,7 @@ LexizeExec(LexizeData *ld, ParsedLex **c
 
                                /*
                                 * We should be sure that current type of 
lexeme is recognized
-                                * by our dictinonary: we just check is it 
exist in list of
+                                * by our dictionary: we just check is it exist 
in list of
                                 * dictionaries ?
                                 */
                                for (i = 0; i < map->len && !dictExists; i++)
@@ -627,7 +627,7 @@ generateHeadline(HeadlineParsedText *prs
                                /* start of a new fragment */
                                infrag = 1;
                                numfragments++;
-                               /* add a fragment delimitor if this is after 
the first one */
+                               /* add a fragment delimiter if this is after 
the first one */
                                if (numfragments > 1)
                                {
                                        memcpy(ptr, prs->fragdelim, 
prs->fragdelimlen);
diff --git a/src/backend/tsearch/wparser_def.c 
b/src/backend/tsearch/wparser_def.c
--- a/src/backend/tsearch/wparser_def.c
+++ b/src/backend/tsearch/wparser_def.c
@@ -2445,7 +2445,7 @@ mark_hl_words(HeadlineParsedText *prs, T
                                                break;
                                }
                                if (curlen < min_words && i >= prs->curwords)
-                               {                               /* got end of 
text and our cover is shoter
+                               {                               /* got end of 
text and our cover is shorter
                                                                 * than 
min_words */
                                        for (i = p - 1; i >= 0; i--)
                                        {
diff --git a/src/backend/utils/adt/datetime.c b/src/backend/utils/adt/datetime.c
--- a/src/backend/utils/adt/datetime.c
+++ b/src/backend/utils/adt/datetime.c
@@ -85,7 +85,7 @@ const char *const days[] = {"Sunday", "M
  * Note that this table must be strictly alphabetically ordered to allow an
  * O(ln(N)) search algorithm to be used.
  *
- * The token field must be NUL-terminated; we truncate entries to TOKMAXLEN
+ * The token field must be NULL-terminated; we truncate entries to TOKMAXLEN
  * characters to fit.
  *
  * The static table contains no TZ, DTZ, or DYNTZ entries; rather those
diff --git a/src/backend/utils/adt/formatting.c 
b/src/backend/utils/adt/formatting.c
--- a/src/backend/utils/adt/formatting.c
+++ b/src/backend/utils/adt/formatting.c
@@ -2265,7 +2265,7 @@ seq_search(char *name, const char *const
 
        for (last = 0, a = array; *a != NULL; a++)
        {
-               /* comperate first chars */
+               /* compare first chars */
                if (*name != **a)
                        continue;
 
diff --git a/src/backend/utils/adt/rangetypes_selfuncs.c 
b/src/backend/utils/adt/rangetypes_selfuncs.c
--- a/src/backend/utils/adt/rangetypes_selfuncs.c
+++ b/src/backend/utils/adt/rangetypes_selfuncs.c
@@ -533,7 +533,7 @@ calc_hist_selectivity(TypeCacheEntry *ty
                        {
                                /*
                                 * Lower bound no longer matters. Just estimate 
the fraction
-                                * with an upper bound <= const uppert bound
+                                * with an upper bound <= const upper bound
                                 */
                                hist_selec =
                                        calc_hist_selectivity_scalar(typcache, 
&const_upper,
diff --git a/src/backend/utils/adt/ruleutils.c 
b/src/backend/utils/adt/ruleutils.c
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -2687,7 +2687,7 @@ is_input_argument(int nth, const char *a
 }
 
 /*
- * Append used transformated types to specified buffer
+ * Append used transformed types to specified buffer
  */
 static void
 print_function_trftypes(StringInfo buf, HeapTuple proctup)
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -4329,7 +4329,7 @@ get_restriction_variable(PlannerInfo *ro
                return true;
        }
 
-       /* Ooops, clause has wrong structure (probably var op var) */
+       /* Oops, clause has wrong structure (probably var op var) */
        ReleaseVariableStats(*vardata);
        ReleaseVariableStats(rdata);
 
diff --git a/src/backend/utils/adt/tsrank.c b/src/backend/utils/adt/tsrank.c
--- a/src/backend/utils/adt/tsrank.c
+++ b/src/backend/utils/adt/tsrank.c
@@ -899,7 +899,7 @@ calc_rank_cd(const float4 *arrdata, TSVe
 
                /*
                 * if doc are big enough then ext.q may be equal to ext.p due 
to limit
-                * of posional information. In this case we approximate number 
of
+                * of positional information. In this case we approximate 
number of
                 * noise word as half cover's length
                 */
                nNoise = (ext.q - ext.p) - (ext.end - ext.begin);
@@ -908,7 +908,7 @@ calc_rank_cd(const float4 *arrdata, TSVe
                Wdoc += Cpos / ((double) (1 + nNoise));
 
                CurExtPos = ((double) (ext.q + ext.p)) / 2.0;
-               if (NExtent > 0 && CurExtPos > PrevExtPos               /* 
prevent devision by
+               if (NExtent > 0 && CurExtPos > PrevExtPos               /* 
prevent division by
                                                                                
                                 * zero in a case of
                                multiple lexize */ )
                        SumDist += 1.0 / (CurExtPos - PrevExtPos);
diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c
--- a/src/backend/utils/adt/varlena.c
+++ b/src/backend/utils/adt/varlena.c
@@ -2222,7 +2222,7 @@ varstr_abbrev_convert(Datum original, So
                        goto done;
                }
 
-               /* Just like strcoll(), strxfrm() expects a NUL-terminated 
string */
+               /* Just like strcoll(), strxfrm() expects a NULL-terminated 
string */
                memcpy(sss->buf1, authoritative_data, len);
                sss->buf1[len] = '\0';
                sss->last_len1 = len;
diff --git a/src/backend/utils/adt/windowfuncs.c 
b/src/backend/utils/adt/windowfuncs.c
--- a/src/backend/utils/adt/windowfuncs.c
+++ b/src/backend/utils/adt/windowfuncs.c
@@ -342,7 +342,7 @@ window_lag(PG_FUNCTION_ARGS)
 
 /*
  * lag_with_offset
- * returns the value of VE evelulated on a row that is OFFSET
+ * returns the value of VE evaluated on a row that is OFFSET
  * rows before the current row within a partition,
  * per spec.
  */
diff --git a/src/backend/utils/cache/lsyscache.c 
b/src/backend/utils/cache/lsyscache.c
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -2332,7 +2332,7 @@ get_typavgwidth(Oid typid, int32 typmod)
        }
 
        /*
-        * Ooops, we have no idea ... wild guess time.
+        * Oops, we have no idea ... wild guess time.
         */
        return 32;
 }
diff --git a/src/backend/utils/cache/plancache.c 
b/src/backend/utils/cache/plancache.c
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -621,7 +621,7 @@ RevalidateCachedQuery(CachedPlanSource *
                        return NIL;
                }
 
-               /* Ooops, the race case happened.  Release useless locks. */
+               /* Oops, the race case happened.  Release useless locks. */
                AcquirePlannerLocks(plansource->query_list, false);
        }
 
@@ -845,7 +845,7 @@ CheckCachedPlan(CachedPlanSource *planso
                        return true;
                }
 
-               /* Ooops, the race case happened.  Release useless locks. */
+               /* Oops, the race case happened.  Release useless locks. */
                AcquireExecutorLocks(plan->stmt_list, false);
        }
 
diff --git a/src/backend/utils/cache/relcache.c 
b/src/backend/utils/cache/relcache.c
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1433,7 +1433,7 @@ RelationInitPhysicalAddr(Relation relati
                 * points to the current file since the older file will be gone 
(or
                 * truncated). The new file will still contain older rows so 
lookups
                 * in them will work correctly. This wouldn't work correctly if
-                * rewrites were allowed to change the schema in a 
noncompatible way,
+                * rewrites were allowed to change the schema in an 
incompatible way,
                 * but those are prevented both on catalog tables and on user 
tables
                 * declared as additional catalog tables.
                 */
diff --git a/src/backend/utils/error/elog.c b/src/backend/utils/error/elog.c
--- a/src/backend/utils/error/elog.c
+++ b/src/backend/utils/error/elog.c
@@ -318,7 +318,7 @@ errstart(int elevel, const char *filenam
         */
        if (ErrorContext == NULL)
        {
-               /* Ooops, hard crash time; very little we can do safely here */
+               /* Oops, hard crash time; very little we can do safely here */
                write_stderr("error occurred at %s:%d before error message 
processing is available\n",
                                         filename ? filename : "(unknown 
file)", lineno);
                exit(2);
@@ -331,7 +331,7 @@ errstart(int elevel, const char *filenam
        if (recursion_depth++ > 0 && elevel >= ERROR)
        {
                /*
-                * Ooops, error during error processing.  Clear ErrorContext as
+                * Oops, error during error processing.  Clear ErrorContext as
                 * discussed at top of file.  We will not return to the original
                 * error's reporter or handler, so we don't need it.
                 */
@@ -1302,7 +1302,7 @@ elog_start(const char *filename, int lin
        /* Make sure that memory context initialization has finished */
        if (ErrorContext == NULL)
        {
-               /* Ooops, hard crash time; very little we can do safely here */
+               /* Oops, hard crash time; very little we can do safely here */
                write_stderr("error occurred at %s:%d before error message 
processing is available\n",
                                         filename ? filename : "(unknown 
file)", lineno);
                exit(2);
diff --git a/src/backend/utils/fmgr/fmgr.c b/src/backend/utils/fmgr/fmgr.c
--- a/src/backend/utils/fmgr/fmgr.c
+++ b/src/backend/utils/fmgr/fmgr.c
@@ -877,7 +877,7 @@ struct fmgr_security_definer_cache
  * To execute a call, we temporarily replace the flinfo with the cached
  * and looked-up one, while keeping the outer fcinfo (which contains all
  * the actual arguments, etc.) intact.  This is not re-entrant, but then
- * the fcinfo itself can't be used re-entrantly anyway.
+ * the fcinfo itself can't be used reentrantly anyway.
  */
 static Datum
 fmgr_security_definer(PG_FUNCTION_ARGS)
diff --git a/src/backend/utils/fmgr/funcapi.c b/src/backend/utils/fmgr/funcapi.c
--- a/src/backend/utils/fmgr/funcapi.c
+++ b/src/backend/utils/fmgr/funcapi.c
@@ -879,7 +879,7 @@ get_func_arg_info(HeapTuple procTup,
 /*
  * get_func_trftypes
  *
- * Returns a number of transformated types used by function.
+ * Returns a number of transformed types used by function.
  */
 int
 get_func_trftypes(HeapTuple procTup,
diff --git a/src/backend/utils/hash/dynahash.c 
b/src/backend/utils/hash/dynahash.c
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -72,7 +72,7 @@
  * when combined with HASH_DEBUG, these are displayed by hdestroy().
  *
  * Problems & fixes to e...@ausmelb.oz. WARNING: relies on pre-processor
- * concatenation property, in probably unnecessary code 'optimisation'.
+ * concatenation property, in probably unnecessary code 'optimization'.
  *
  * Modified ma...@postgres.berkeley.edu February 1990
  *             added multiple table interface
diff --git a/src/backend/utils/hash/hashfn.c b/src/backend/utils/hash/hashfn.c
--- a/src/backend/utils/hash/hashfn.c
+++ b/src/backend/utils/hash/hashfn.c
@@ -26,7 +26,7 @@
 
 
 /*
- * string_hash: hash function for keys that are NUL-terminated strings.
+ * string_hash: hash function for keys that are NULL-terminated strings.
  *
  * NOTE: this is the default hash function if none is specified.
  */
diff --git a/src/backend/utils/init/postinit.c 
b/src/backend/utils/init/postinit.c
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -1108,7 +1108,7 @@ process_settings(Oid databaseid, Oid rol
 
        relsetting = heap_open(DbRoleSettingRelationId, AccessShareLock);
 
-       /* read all the settings under the same snapsot for efficiency */
+       /* read all the settings under the same snapshot for efficiency */
        snapshot = 
RegisterSnapshot(GetCatalogSnapshot(DbRoleSettingRelationId));
 
        /* Later settings are ignored if set earlier. */
diff --git a/src/backend/utils/misc/Makefile b/src/backend/utils/misc/Makefile
--- a/src/backend/utils/misc/Makefile
+++ b/src/backend/utils/misc/Makefile
@@ -19,7 +19,7 @@ OBJS = backend_random.o guc.o help_confi
        tzparser.o
 
 # This location might depend on the installation directories. Therefore
-# we can't subsitute it into pg_config.h.
+# we can't substitute it into pg_config.h.
 ifdef krb_srvtab
 override CPPFLAGS += -DPG_KRB_SRVTAB='"$(krb_srvtab)"'
 endif
diff --git a/src/backend/utils/misc/README b/src/backend/utils/misc/README
--- a/src/backend/utils/misc/README
+++ b/src/backend/utils/misc/README
@@ -114,7 +114,7 @@ If a show_hook is provided, it points to
 This hook allows variable-specific computation of the value displayed
 by SHOW (and other SQL features for showing GUC variable values).
 The return value can point to a static buffer, since show functions are
-not used re-entrantly.
+not used reentrantly.
 
 
 Saving/Restoring GUC Variable Values
diff --git a/src/backend/utils/mmgr/freepage.c 
b/src/backend/utils/mmgr/freepage.c
--- a/src/backend/utils/mmgr/freepage.c
+++ b/src/backend/utils/mmgr/freepage.c
@@ -318,7 +318,7 @@ sum_free_pages(FreePageManager *fpm)
 
 /*
  * Compute the size of the largest run of pages that the user could
- * succesfully get.
+ * successfully get.
  */
 static Size
 FreePageManagerLargestContiguous(FreePageManager *fpm)
@@ -360,7 +360,7 @@ FreePageManagerLargestContiguous(FreePag
 
 /*
  * Recompute the size of the largest run of pages that the user could
- * succesfully get, if it has been marked dirty.
+ * successfully get, if it has been marked dirty.
  */
 static void
 FreePageManagerUpdateLargest(FreePageManager *fpm)
@@ -1704,7 +1704,7 @@ FreePageManagerPutInternal(FreePageManag
                         * The act of allocating pages for use in constructing 
our btree
                         * should never cause any page to become more full, so 
the new
                         * split depth should be no greater than the old one, 
and perhaps
-                        * less if we fortutiously allocated a chunk that freed 
up a slot
+                        * less if we fortuitously allocated a chunk that freed 
up a slot
                         * on the page we need to update.
                         */
                        Assert(result.split_pages <= fpm->btree_recycle_count);
diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c
--- a/src/backend/utils/time/tqual.c
+++ b/src/backend/utils/time/tqual.c
@@ -1625,7 +1625,7 @@ HeapTupleHeaderIsOnlyLocked(HeapTupleHea
 }
 
 /*
- * check whether the transaciont id 'xid' is in the pre-sorted array 'xip'.
+ * check whether the transaction id 'xid' is in the pre-sorted array 'xip'.
  */
 static bool
 TransactionIdInArray(TransactionId xid, TransactionId *xip, Size num)
diff --git a/src/bin/pg_dump/parallel.c b/src/bin/pg_dump/parallel.c
--- a/src/bin/pg_dump/parallel.c
+++ b/src/bin/pg_dump/parallel.c
@@ -94,7 +94,7 @@ struct ParallelSlot
 
        /* These fields are valid if workerStatus == WRKR_WORKING: */
        ParallelCompletionPtr callback;         /* function to call on 
completion */
-       void       *callback_data;      /* passthru data for it */
+       void       *callback_data;      /* passthrough data for it */
 
        ArchiveHandle *AH;                      /* Archive data worker is using 
*/
 
diff --git a/src/bin/pg_dump/pg_backup_custom.c 
b/src/bin/pg_dump/pg_backup_custom.c
--- a/src/bin/pg_dump/pg_backup_custom.c
+++ b/src/bin/pg_dump/pg_backup_custom.c
@@ -198,7 +198,7 @@ InitArchiveFmt_Custom(ArchiveHandle *AH)
  *
  * Optional.
  *
- * Set up extrac format-related TOC data.
+ * Set up extract format-related TOC data.
 */
 static void
 _ArchiveEntry(ArchiveHandle *AH, TocEntry *te)
diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c
--- a/src/bin/pg_dump/pg_dump.c
+++ b/src/bin/pg_dump/pg_dump.c
@@ -3500,7 +3500,7 @@ getPublicationTables(Archive *fout, Tabl
 
                resetPQExpBuffer(query);
 
-               /* Get the publication memebership for the table. */
+               /* Get the publication membership for the table. */
                appendPQExpBuffer(query,
                                                  "SELECT pr.tableoid, pr.oid, 
p.pubname "
                                                  "FROM 
pg_catalog.pg_publication_rel pr,"
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
--- a/src/bin/psql/common.c
+++ b/src/bin/psql/common.c
@@ -828,7 +828,7 @@ StoreQueryTuple(const PGresult *result)
                        char       *varname;
                        char       *value;
 
-                       /* concate prefix and column name */
+                       /* concatenate prefix and column name */
                        varname = psprintf("%s%s", pset.gset_prefix, colname);
 
                        if (!PQgetisnull(result, 0, i))
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
--- a/src/bin/psql/describe.c
+++ b/src/bin/psql/describe.c
@@ -2127,7 +2127,7 @@ describeOneTableDetails(const char *sche
                                printTableAddFooter(&cont, _("Check 
constraints:"));
                                for (i = 0; i < tuples; i++)
                                {
-                                       /* untranslated contraint name and def 
*/
+                                       /* untranslated constraint name and def 
*/
                                        printfPQExpBuffer(&buf, "    \"%s\" %s",
                                                                          
PQgetvalue(result, i, 0),
                                                                          
PQgetvalue(result, i, 1));
@@ -3197,7 +3197,7 @@ listTables(const char *tabtypes, const c
        if (verbose)
        {
                /*
-                * As of PostgreSQL 9.0, use pg_table_size() to show a more 
acurate
+                * As of PostgreSQL 9.0, use pg_table_size() to show a more 
accurate
                 * size of a table, including FSM, VM and TOAST tables.
                 */
                if (pset.sversion >= 90000)
@@ -5108,7 +5108,7 @@ describeSubscriptions(const char *patter
                                                  gettext_noop("Conninfo"));
        }
 
-       /* Only display subscritpions in current database. */
+       /* Only display subscriptions in current database. */
        appendPQExpBufferStr(&buf,
                                                 "FROM 
pg_catalog.pg_subscription\n"
                                                 "WHERE subdbid = (SELECT oid\n"
diff --git a/src/fe_utils/string_utils.c b/src/fe_utils/string_utils.c
--- a/src/fe_utils/string_utils.c
+++ b/src/fe_utils/string_utils.c
@@ -173,7 +173,7 @@ fmtQualifiedId(int remoteVersion, const 
  * returned by PQserverVersion()) as a string.  This exists mainly to
  * encapsulate knowledge about two-part vs. three-part version numbers.
  *
- * For re-entrancy, caller must supply the buffer the string is put in.
+ * For reentrancy, caller must supply the buffer the string is put in.
  * Recommended size of the buffer is 32 bytes.
  *
  * Returns address of 'buf', as a notational convenience.
diff --git a/src/include/access/visibilitymap.h 
b/src/include/access/visibilitymap.h
--- a/src/include/access/visibilitymap.h
+++ b/src/include/access/visibilitymap.h
@@ -26,7 +26,7 @@
 #define VISIBILITYMAP_ALL_VISIBLE      0x01
 #define VISIBILITYMAP_ALL_FROZEN       0x02
 #define VISIBILITYMAP_VALID_BITS       0x03            /* OR of all valid
-                                                                               
                 * visiblitymap flags bits */
+                                                                               
                 * visibilitymap flags bits */
 
 /* Macros for visibilitymap test */
 #define VM_ALL_VISIBLE(r, b, v) \
diff --git a/src/include/access/xact.h b/src/include/access/xact.h
--- a/src/include/access/xact.h
+++ b/src/include/access/xact.h
@@ -65,7 +65,7 @@ typedef enum
                                                                                
 * apply */
 }      SyncCommitLevel;
 
-/* Define the default setting for synchonous_commit */
+/* Define the default setting for synchronous_commit */
 #define SYNCHRONOUS_COMMIT_ON  SYNCHRONOUS_COMMIT_REMOTE_FLUSH
 
 /* Synchronous commit level */
diff --git a/src/include/c.h b/src/include/c.h
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -989,7 +989,7 @@ typedef NameData *Name;
 /* gettext domain name mangling */
 
 /*
- * To better support parallel installations of major PostgeSQL
+ * To better support parallel installations of major PostgreSQL
  * versions as well as parallel installations of major library soname
  * versions, we mangle the gettext domain name by appending those
  * version numbers.  The coding rule ought to be that wherever the
diff --git a/src/include/catalog/partition.h b/src/include/catalog/partition.h
--- a/src/include/catalog/partition.h
+++ b/src/include/catalog/partition.h
@@ -41,7 +41,7 @@ typedef struct PartitionDescData *Partit
 
 /*-----------------------
  * PartitionDispatch - information about one partitioned table in a partition
- * hiearchy required to route a tuple to one of its partitions
+ * hierarchy required to route a tuple to one of its partitions
  *
  *     reldesc         Relation descriptor of the table
  *     key                     Partition key information of the table
diff --git a/src/include/catalog/pg_subscription.h 
b/src/include/catalog/pg_subscription.h
--- a/src/include/catalog/pg_subscription.h
+++ b/src/include/catalog/pg_subscription.h
@@ -23,7 +23,7 @@
 #define SubscriptionRelation_Rowtype_Id        6101
 
 /*
- * Technicaly, the subscriptions live inside the database, so a shared catalog
+ * Technically, the subscriptions live inside the database, so a shared catalog
  * seems weird, but the replication launcher process needs to access all of
  * them to be able to start the workers, so we have to put them in a shared,
  * nailed catalog.
@@ -35,7 +35,7 @@ CATALOG(pg_subscription,6100) BKI_SHARED
 
        Oid                     subowner;               /* Owner of the 
subscription */
 
-       bool            subenabled;             /* True if the subsription is 
enabled
+       bool            subenabled;             /* True if the subscription is 
enabled
                                                                 * (the worker 
should be running) */
 
 #ifdef CATALOG_VARLEN                  /* variable-length fields start here */
@@ -65,7 +65,7 @@ typedef FormData_pg_subscription *Form_p
 typedef struct Subscription
 {
        Oid             oid;                    /* Oid of the subscription */
-       Oid             dbid;                   /* Oid of the database which 
dubscription is in */
+       Oid             dbid;                   /* Oid of the database which 
subscription is in */
        char   *name;                   /* Name of the subscription */
        Oid             owner;                  /* Oid of the subscription 
owner */
        bool    enabled;                /* Indicates if the subscription is 
enabled */
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -345,7 +345,7 @@ SH_GROW(SH_TYPE *tb, uint32 newsize)
         * we need. We neither want tb->members increased, nor do we need to do
         * deal with deleted elements, nor do we need to compare keys. So a
         * special-cased implementation is lot faster. As resizing can be time
-        * consuming and frequent, that's worthwile to optimize.
+        * consuming and frequent, that's worthwhile to optimize.
         *
         * To be able to simply move entries over, we have to start not at the
         * first bucket (i.e olddata[0]), but find the first bucket that's 
either
@@ -620,7 +620,7 @@ SH_DELETE(SH_TYPE *tb, SH_KEY_TYPE key)
 
                        /*
                         * Backward shift following elements till either an 
empty element
-                        * or an element at its optimal position is 
encounterered.
+                        * or an element at its optimal position is encountered.
                         *
                         * While that sounds expensive, the average chain 
length is short,
                         * and deletions would otherwise require toombstones.
diff --git a/src/include/replication/syncrep.h 
b/src/include/replication/syncrep.h
--- a/src/include/replication/syncrep.h
+++ b/src/include/replication/syncrep.h
@@ -50,7 +50,7 @@ typedef struct SyncRepConfigData
                                                                 * wait for */
        uint8           syncrep_method; /* method to choose sync standbys */
        int                     nmembers;               /* number of members in 
the following list */
-       /* member_names contains nmembers consecutive nul-terminated C strings 
*/
+       /* member_names contains nmembers consecutive null-terminated C strings 
*/
        char            member_names[FLEXIBLE_ARRAY_MEMBER];
 } SyncRepConfigData;
 
diff --git a/src/include/storage/s_lock.h b/src/include/storage/s_lock.h
--- a/src/include/storage/s_lock.h
+++ b/src/include/storage/s_lock.h
@@ -842,7 +842,7 @@ typedef LONG slock_t;
 #define SPIN_DELAY() spin_delay()
 
 /* If using Visual C++ on Win64, inline assembly is unavailable.
- * Use a _mm_pause instrinsic instead of rep nop.
+ * Use a _mm_pause intrinsic instead of rep nop.
  */
 #if defined(_WIN64)
 static __forceinline void
diff --git a/src/include/tsearch/dicts/spell.h 
b/src/include/tsearch/dicts/spell.h
--- a/src/include/tsearch/dicts/spell.h
+++ b/src/include/tsearch/dicts/spell.h
@@ -147,7 +147,7 @@ typedef struct
 } CMPDAffix;
 
 /*
- * Type of encoding affix flags in Hunspel dictionaries
+ * Type of encoding affix flags in Hunspell dictionaries
  */
 typedef enum
 {
diff --git a/src/include/utils/datetime.h b/src/include/utils/datetime.h
--- a/src/include/utils/datetime.h
+++ b/src/include/utils/datetime.h
@@ -209,7 +209,7 @@ struct tzEntry;
 /* keep this struct small; it gets used a lot */
 typedef struct
 {
-       char            token[TOKMAXLEN + 1];   /* always NUL-terminated */
+       char            token[TOKMAXLEN + 1];   /* always NULL-terminated */
        char            type;                   /* see field type codes above */
        int32           value;                  /* meaning depends on type */
 } datetkn;
@@ -227,7 +227,7 @@ typedef struct TimeZoneAbbrevTable
 typedef struct DynamicZoneAbbrev
 {
        pg_tz      *tz;                         /* NULL if not yet looked up */
-       char            zone[FLEXIBLE_ARRAY_MEMBER];    /* NUL-terminated zone 
name */
+       char            zone[FLEXIBLE_ARRAY_MEMBER];    /* NULL-terminated zone 
name */
 } DynamicZoneAbbrev;
 
 
diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h
--- a/src/include/utils/jsonapi.h
+++ b/src/include/utils/jsonapi.h
@@ -45,7 +45,7 @@ typedef enum
  * error reporting routines.
  * token_terminator and prev_token_terminator point to the character
  * AFTER the end of the token, i.e. where there would be a nul byte
- * if we were using nul-terminated strings.
+ * if we were using null-terminated strings.
  */
 typedef struct JsonLexContext
 {
@@ -127,7 +127,7 @@ extern JsonLexContext *makeJsonLexContex
 /*
  * Utility function to check if a string is a valid JSON number.
  *
- * str agrument does not need to be nul-terminated.
+ * str argument does not need to be null-terminated.
  */
 extern bool IsValidJsonNumber(const char *str, int len);
 
diff --git a/src/interfaces/ecpg/ecpglib/execute.c 
b/src/interfaces/ecpg/ecpglib/execute.c
--- a/src/interfaces/ecpg/ecpglib/execute.c
+++ b/src/interfaces/ecpg/ecpglib/execute.c
@@ -2,7 +2,7 @@
 
 /*
  * The aim is to get a simpler interface to the database routines.
- * All the tidieous messing around with tuples is supposed to be hidden
+ * All the tedious messing around with tuples is supposed to be hidden
  * by this function.
  */
 /* Author: Linus Tolke
diff --git a/src/interfaces/ecpg/pgtypeslib/datetime.c 
b/src/interfaces/ecpg/pgtypeslib/datetime.c
--- a/src/interfaces/ecpg/pgtypeslib/datetime.c
+++ b/src/interfaces/ecpg/pgtypeslib/datetime.c
@@ -324,7 +324,7 @@ PGTYPESdate_fmt_asc(date dDate, const ch
  *
  * function works as follows:
  *      - first we analyze the parameters
- *      - if this is a special case with no delimiters, add delimters
+ *      - if this is a special case with no delimiters, add delimiters
  *      - find the tokens. First we look for numerical values. If we have found
  *        less than 3 tokens, we check for the months' names and thereafter for
  *        the abbreviations of the months' names.
diff --git a/src/interfaces/ecpg/pgtypeslib/dt.h 
b/src/interfaces/ecpg/pgtypeslib/dt.h
--- a/src/interfaces/ecpg/pgtypeslib/dt.h
+++ b/src/interfaces/ecpg/pgtypeslib/dt.h
@@ -213,7 +213,7 @@ typedef double fsec_t;
 /* keep this struct small; it gets used a lot */
 typedef struct
 {
-       char            token[TOKMAXLEN + 1];   /* always NUL-terminated */
+       char            token[TOKMAXLEN + 1];   /* always NULL-terminated */
        char            type;                   /* see field type codes above */
        int32           value;                  /* meaning depends on type */
 } datetkn;
diff --git a/src/interfaces/ecpg/pgtypeslib/numeric.c 
b/src/interfaces/ecpg/pgtypeslib/numeric.c
--- a/src/interfaces/ecpg/pgtypeslib/numeric.c
+++ b/src/interfaces/ecpg/pgtypeslib/numeric.c
@@ -1368,11 +1368,11 @@ PGTYPESnumeric_cmp(numeric *var1, numeri
 {
        /* use cmp_abs function to calculate the result */
 
-       /* both are positive: normal comparation with cmp_abs */
+       /* both are positive: normal comparison with cmp_abs */
        if (var1->sign == NUMERIC_POS && var2->sign == NUMERIC_POS)
                return cmp_abs(var1, var2);
 
-       /* both are negative: return the inverse of the normal comparation */
+       /* both are negative: return the inverse of the normal comparison */
        if (var1->sign == NUMERIC_NEG && var2->sign == NUMERIC_NEG)
        {
                /*
diff --git a/src/interfaces/ecpg/preproc/ecpg.header 
b/src/interfaces/ecpg/preproc/ecpg.header
--- a/src/interfaces/ecpg/preproc/ecpg.header
+++ b/src/interfaces/ecpg/preproc/ecpg.header
@@ -207,7 +207,7 @@ create_questionmarks(char *name, bool ar
 
        /* In case we have a struct, we have to print as many "?" as there are 
attributes in the struct
         * An array is only allowed together with an element argument
-        * This is essantially only used for inserts, but using a struct as 
input parameter is an error anywhere else
+        * This is essentially only used for inserts, but using a struct as 
input parameter is an error anywhere else
         * so we don't have to worry here. */
 
        if (p->type->type == ECPGt_struct || (array && p->type->type == 
ECPGt_array && p->type->u.element->type == ECPGt_struct))
diff --git a/src/interfaces/ecpg/preproc/ecpg.trailer 
b/src/interfaces/ecpg/preproc/ecpg.trailer
--- a/src/interfaces/ecpg/preproc/ecpg.trailer
+++ b/src/interfaces/ecpg/preproc/ecpg.trailer
@@ -355,7 +355,7 @@ ECPGExecuteImmediateStmt: EXECUTE IMMEDI
                          $$ = $3;
                        };
 /*
- * variable decalartion outside exec sql declare block
+ * variable declaration outside exec sql declare block
  */
 ECPGVarDeclaration: single_vt_declaration;
 
@@ -707,7 +707,7 @@ struct_union_type_with_symbol: s_struct_
                        free(forward_name);
                        forward_name = NULL;
 
-                       /* This is essantially a typedef but needs the keyword 
struct/union as well.
+                       /* This is essentially a typedef but needs the keyword 
struct/union as well.
                         * So we create the typedef for each struct definition 
with symbol */
                        for (ptr = types; ptr != NULL; ptr = ptr->next)
                        {
@@ -1275,7 +1275,7 @@ descriptor_item:  SQL_CARDINALITY                 { $$ 
                ;
 
 /*
- * set/reset the automatic transaction mode, this needs a differnet handling
+ * set/reset the automatic transaction mode, this needs a different handling
  * as the other set commands
  */
 ECPGSetAutocommit:     SET SQL_AUTOCOMMIT '=' on_off   { $$ = $4; }
@@ -1287,7 +1287,7 @@ on_off: ON                                { $$ = 
mm_strdup("on"); }
                ;
 
 /*
- * set the actual connection, this needs a differnet handling as the other
+ * set the actual connection, this needs a different handling as the other
  * set commands
  */
 ECPGSetConnection:     SET CONNECTION TO connection_object { $$ = $4; }
diff --git a/src/interfaces/ecpg/preproc/parse.pl 
b/src/interfaces/ecpg/preproc/parse.pl
--- a/src/interfaces/ecpg/preproc/parse.pl
+++ b/src/interfaces/ecpg/preproc/parse.pl
@@ -550,7 +550,7 @@ sub dump_fields
                        if ($len == 1)
                        {
 
-                               # Straight assignement
+                               # Straight assignment
                                $str = ' $$ = ' . $flds_new[0] . ';';
                                add_to_buffer('rules', $str);
                        }
diff --git a/src/interfaces/libpq/fe-auth.c b/src/interfaces/libpq/fe-auth.c
--- a/src/interfaces/libpq/fe-auth.c
+++ b/src/interfaces/libpq/fe-auth.c
@@ -803,7 +803,7 @@ pg_fe_getauthname(PQExpBuffer errorMessa
  * be sent in cleartext if it is encrypted on the client side.  This is
  * good because it ensures the cleartext password won't end up in logs,
  * pg_stat displays, etc.  We export the function so that clients won't
- * be dependent on low-level details like whether the enceyption is MD5
+ * be dependent on low-level details like whether the encryption is MD5
  * or something else.
  *
  * Arguments are the cleartext password, and the SQL name of the user it
diff --git a/src/interfaces/libpq/fe-connect.c 
b/src/interfaces/libpq/fe-connect.c
--- a/src/interfaces/libpq/fe-connect.c
+++ b/src/interfaces/libpq/fe-connect.c
@@ -2136,7 +2136,7 @@ keep_going:                                               
/* We will come back to
                                }                               /* loop over 
addresses */
 
                                /*
-                                * Ooops, no more addresses.  An appropriate 
error message is
+                                * Oops, no more addresses.  An appropriate 
error message is
                                 * already set up, so just set the right status.
                                 */
                                goto error_return;
diff --git a/src/interfaces/libpq/fe-exec.c b/src/interfaces/libpq/fe-exec.c
--- a/src/interfaces/libpq/fe-exec.c
+++ b/src/interfaces/libpq/fe-exec.c
@@ -2334,7 +2334,7 @@ PQputCopyEnd(PGconn *conn, const char *e
        {
                if (errormsg)
                {
-                       /* Ooops, no way to do this in 2.0 */
+                       /* Oops, no way to do this in 2.0 */
                        printfPQExpBuffer(&conn->errorMessage,
                                                          
libpq_gettext("function requires at least protocol version 3.0\n"));
                        return -1;
@@ -3231,7 +3231,7 @@ PQfreeNotify(PGnotify *notify)
  *
  * For safety the buffer at "to" must be at least 2*length + 1 bytes long.
  * A terminating NUL character is added to the output string, whether the
- * input is NUL-terminated or not.
+ * input is NULL-terminated or not.
  *
  * Returns the actual length of the output (not counting the terminating NUL).
  */
diff --git a/src/interfaces/libpq/libpq-int.h b/src/interfaces/libpq/libpq-int.h
--- a/src/interfaces/libpq/libpq-int.h
+++ b/src/interfaces/libpq/libpq-int.h
@@ -145,7 +145,7 @@ typedef struct pgMessageField
 {
        struct pgMessageField *next;    /* list link */
        char            code;                   /* field code */
-       char            contents[FLEXIBLE_ARRAY_MEMBER];                /* 
value, nul-terminated */
+       char            contents[FLEXIBLE_ARRAY_MEMBER];                /* 
value, null-terminated */
 } PGMessageField;
 
 /* Fields needed for notice handling */
@@ -309,7 +309,7 @@ typedef struct pg_conn_host
        char       *host;                       /* host name or address, or 
socket path */
        pg_conn_host_type type;         /* type of host */
        char       *port;                       /* port number for this host; 
if not NULL,
-                                                                * overrrides 
the PGConn's pgport */
+                                                                * overrides 
the PGConn's pgport */
        char       *password;           /* password for this host, read from the
                                                                 * password 
file.  only set if the PGconn's
                                                                 * pgpass field 
is NULL. */
@@ -666,7 +666,7 @@ extern void pq_reset_sigpipe(sigset_t *o
 #endif
 
 /*
- * The SSL implementatation provides these functions (fe-secure-openssl.c)
+ * The SSL implementation provides these functions (fe-secure-openssl.c)
  */
 extern void pgtls_init_library(bool do_ssl, int do_crypto);
 extern int     pgtls_init(PGconn *conn);
diff --git a/src/interfaces/libpq/win32.c b/src/interfaces/libpq/win32.c
--- a/src/interfaces/libpq/win32.c
+++ b/src/interfaces/libpq/win32.c
@@ -32,7 +32,7 @@
 
 #include "win32.h"
 
-/* Declared here to avoid pulling in all includes, which causes name 
collissions */
+/* Declared here to avoid pulling in all includes, which causes name 
collisions */
 #ifdef ENABLE_NLS
 extern char *libpq_gettext(const char *msgid) pg_attribute_format_arg(1);
 #else
diff --git a/src/pl/plperl/ppport.h b/src/pl/plperl/ppport.h
--- a/src/pl/plperl/ppport.h
+++ b/src/pl/plperl/ppport.h
@@ -79,7 +79,7 @@ to be installed on your system.
 If this option is given, a copy of each file will be saved with
 the given suffix that contains the suggested changes. This does
 not require any external programs. Note that this does not
-automagially add a dot between the original filename and the
+automagically add a dot between the original filename and the
 suffix. If you want the dot, you have to include it in the option
 argument.
 
@@ -4364,9 +4364,9 @@ DPPP_(my_vload_module)(U32 flags, SV *na
 
     OP * const modname = newSVOP(OP_CONST, 0, name);
     /* 5.005 has a somewhat hacky force_normal that doesn't croak on
-       SvREADONLY() if PL_compling is true. Current perls take care in
+       SvREADONLY() if PL_compiling is true. Current perls take care in
        ck_require() to correctly turn off SvREADONLY before calling
-       force_normal_flags(). This seems a better fix than fudging PL_compling
+       force_normal_flags(). This seems a better fix than fudging PL_compiling
      */
     SvREADONLY_off(((SVOP*)modname)->op_sv);
     modname->op_private |= OPpCONST_BARE;
@@ -6205,10 +6205,10 @@ DPPP_(my_grok_number)(pTHX_ const char *
     /* UVs are at least 32 bits, so the first 9 decimal digits cannot
        overflow.  */
     UV value = *s - '0';
-    /* This construction seems to be more optimiser friendly.
+    /* This construction seems to be more optimizer friendly.
        (without it gcc does the isDIGIT test and the *s - '0' separately)
        With it gcc on arm is managing 6 instructions (6 cycles) per digit.
-       In theory the optimiser could deduce how far to unroll the loop
+       In theory the optimizer could deduce how far to unroll the loop
        before checking for overflow.  */
     if (++s < send) {
       int digit = *s - '0';
@@ -6606,7 +6606,7 @@ DPPP_(my_grok_oct)(pTHX_ const char *sta
     bool overflowed = FALSE;
 
     for (; len-- && *s; s++) {
-         /* gcc 2.95 optimiser not smart enough to figure that this subtraction
+         /* gcc 2.95 optimizer not smart enough to figure that this subtraction
             out front allows slicker code.  */
         int digit = *s - '0';
         if (digit >= 0 && digit <= 7) {
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -5541,7 +5541,7 @@ exec_eval_simple_expr(PLpgSQL_execstate 
                        exec_check_rw_parameter(expr, expr->rwparam);
                if (expr->expr_simple_expr == NULL)
                {
-                       /* Ooops, release refcount and fail */
+                       /* Oops, release refcount and fail */
                        ReleaseCachedPlan(cplan, true);
                        return false;
                }
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -357,7 +357,7 @@ typedef struct PLpgSQL_nsitem
         */
        int                     itemno;
        struct PLpgSQL_nsitem *prev;
-       char            name[FLEXIBLE_ARRAY_MEMBER];    /* nul-terminated 
string */
+       char            name[FLEXIBLE_ARRAY_MEMBER];    /* null-terminated 
string */
 } PLpgSQL_nsitem;
 
 /*
diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c
--- a/src/pl/plpython/plpy_elog.c
+++ b/src/pl/plpython/plpy_elog.c
@@ -303,7 +303,7 @@ PLy_traceback(PyObject *e, PyObject *v, 
                        long            plain_lineno;
 
                        /*
-                        * The second frame points at the internal function, 
but to mimick
+                        * The second frame points at the internal function, 
but to mimic
                         * Python error reporting we want to say <module>.
                         */
                        if (*tb_depth == 1)
diff --git a/src/pl/plpython/plpy_plpymodule.c 
b/src/pl/plpython/plpy_plpymodule.c
--- a/src/pl/plpython/plpy_plpymodule.c
+++ b/src/pl/plpython/plpy_plpymodule.c
@@ -463,7 +463,7 @@ PLy_output(volatile int level, PyObject 
 
                        if (strcmp(keyword, "message") == 0)
                        {
-                               /* the message should not be overwriten */
+                               /* the message should not be overwritten */
                                if (PyTuple_Size(args) != 0)
                                {
                                        PLy_exception_set(PyExc_TypeError, 
"Argument 'message' given by name and position");
diff --git a/src/pl/plpython/plpy_procedure.c b/src/pl/plpython/plpy_procedure.c
--- a/src/pl/plpython/plpy_procedure.c
+++ b/src/pl/plpython/plpy_procedure.c
@@ -122,7 +122,7 @@ PLy_procedure_get(Oid fn_oid, Oid fn_rel
        }
        PG_CATCH();
        {
-               /* Do not leave an uninitialised entry in the cache */
+               /* Do not leave an uninitialized entry in the cache */
                if (use_cache)
                        hash_search(PLy_procedure_cache, &key, HASH_REMOVE, 
NULL);
                PG_RE_THROW();
diff --git a/src/pl/plpython/plpy_typeio.c b/src/pl/plpython/plpy_typeio.c
--- a/src/pl/plpython/plpy_typeio.c
+++ b/src/pl/plpython/plpy_typeio.c
@@ -833,7 +833,7 @@ PLyObject_ToComposite(PLyObToDatum *arg,
 
        /*
         * This will set up the dummy PLyTypeInfo's output conversion routines,
-        * since we left is_rowtype as 2. A future optimisation could be caching
+        * since we left is_rowtype as 2. A future optimization could be caching
         * that info instead of looking it up every time a tuple is returned 
from
         * the function.
         */
diff --git a/src/pl/plpython/plpy_typeio.h b/src/pl/plpython/plpy_typeio.h
--- a/src/pl/plpython/plpy_typeio.h
+++ b/src/pl/plpython/plpy_typeio.h
@@ -43,7 +43,7 @@ typedef union PLyTypeInput
 } PLyTypeInput;
 
 /*
- * Conversion from Python object to a Postgresql Datum.
+ * Conversion from Python object to a PostgreSQL Datum.
  *
  * The 'inarray' argument to the conversion function is true, if the
  * converted value was in an array (Python list). It is used to give a
@@ -78,7 +78,7 @@ typedef union PLyTypeOutput
        PLyObToTuple r;
 } PLyTypeOutput;
 
-/* all we need to move Postgresql data to Python objects,
+/* all we need to move PostgreSQL data to Python objects,
  * and vice versa
  */
 typedef struct PLyTypeInfo
diff --git a/src/test/isolation/specs/receipt-report.spec 
b/src/test/isolation/specs/receipt-report.spec
--- a/src/test/isolation/specs/receipt-report.spec
+++ b/src/test/isolation/specs/receipt-report.spec
@@ -7,7 +7,7 @@
 # be changed and a report of the closed day's receipts subsequently
 # run which will miss a receipt from the date which has been closed.
 #
-# There are only six permuations which must cause a serialization failure.
+# There are only six permutations which must cause a serialization failure.
 # Failure cases are where s1 overlaps both s2 and s3, but s2 commits before
 # s3 executes its first SELECT.
 #
diff --git a/src/test/isolation/specs/two-ids.spec 
b/src/test/isolation/specs/two-ids.spec
--- a/src/test/isolation/specs/two-ids.spec
+++ b/src/test/isolation/specs/two-ids.spec
@@ -2,7 +2,7 @@
 #
 # Small, simple test showing read-only anomalies.
 #
-# There are only four permuations which must cause a serialization failure.
+# There are only four permutations which must cause a serialization failure.
 # Required failure cases are where s2 overlaps both s1 and s3, but s1
 # commits before s3 executes its first SELECT.
 #
diff --git a/src/test/regress/sql/alter_table.sql 
b/src/test/regress/sql/alter_table.sql
--- a/src/test/regress/sql/alter_table.sql
+++ b/src/test/regress/sql/alter_table.sql
@@ -255,7 +255,7 @@ INSERT INTO tmp3 values (5,50);
 -- Try (and fail) to add constraint due to invalid source columns
 ALTER TABLE tmp3 add constraint tmpconstr foreign key(c) references tmp2 match 
full;
 
--- Try (and fail) to add constraint due to invalide destination columns 
explicitly given
+-- Try (and fail) to add constraint due to invalid destination columns 
explicitly given
 ALTER TABLE tmp3 add constraint tmpconstr foreign key(a) references tmp2(b) 
match full;
 
 -- Try (and fail) to add constraint due to invalid data
@@ -1829,7 +1829,7 @@ CREATE UNLOGGED TABLE unlogged3(f1 SERIA
 ALTER TABLE unlogged3 SET LOGGED; -- skip self-referencing foreign key
 ALTER TABLE unlogged2 SET LOGGED; -- fails because a foreign key to an 
unlogged table exists
 ALTER TABLE unlogged1 SET LOGGED;
--- check relpersistence of an unlogged table after changing to permament
+-- check relpersistence of an unlogged table after changing to permanent
 SELECT relname, relkind, relpersistence FROM pg_class WHERE relname ~ 
'^unlogged1'
 UNION ALL
 SELECT 'toast table', t.relkind, t.relpersistence FROM pg_class r JOIN 
pg_class t ON t.oid = r.reltoastrelid WHERE r.relname ~ '^unlogged1'
@@ -1917,7 +1917,7 @@ ALTER TABLE partitioned ALTER COLUMN b T
 -- cannot drop NOT NULL on columns in the range partition key
 ALTER TABLE partitioned ALTER COLUMN a DROP NOT NULL;
 
--- partitioned table cannot partiticipate in regular inheritance
+-- partitioned table cannot participate in regular inheritance
 CREATE TABLE foo (
        a int,
        b int
diff --git a/src/test/regress/sql/create_table.sql 
b/src/test/regress/sql/create_table.sql
--- a/src/test/regress/sql/create_table.sql
+++ b/src/test/regress/sql/create_table.sql
@@ -418,7 +418,7 @@ SELECT attname, attnotnull FROM pg_attri
 -- prevent a function referenced in partition key from being dropped
 DROP FUNCTION plusone(int);
 
--- partitioned table cannot partiticipate in regular inheritance
+-- partitioned table cannot participate in regular inheritance
 CREATE TABLE partitioned2 (
        a int
 ) PARTITION BY LIST ((a+1));
diff --git a/src/test/regress/sql/errors.sql b/src/test/regress/sql/errors.sql
--- a/src/test/regress/sql/errors.sql
+++ b/src/test/regress/sql/errors.sql
@@ -2,7 +2,7 @@
 -- ERRORS
 --
 
--- bad in postquel, but ok in postsql
+-- bad in postquel, but ok in PostgreSQL
 select 1;
 
 
diff --git a/src/test/regress/sql/groupingsets.sql 
b/src/test/regress/sql/groupingsets.sql
--- a/src/test/regress/sql/groupingsets.sql
+++ b/src/test/regress/sql/groupingsets.sql
@@ -140,7 +140,7 @@ select *
   from (values (1),(2)) v(x),
        lateral (select a, b, sum(v.x) from gstest_data(v.x) group by rollup 
(a,b)) s;
 
--- min max optimisation should still work with GROUP BY ()
+-- min max optimization should still work with GROUP BY ()
 explain (costs off)
   select min(unique1) from tenk1 GROUP BY ();
 
diff --git a/src/test/regress/sql/indirect_toast.sql 
b/src/test/regress/sql/indirect_toast.sql
--- a/src/test/regress/sql/indirect_toast.sql
+++ b/src/test/regress/sql/indirect_toast.sql
@@ -11,7 +11,7 @@ SELECT descr, substring(make_tuple_indir
 -- modification without changing varlenas
 UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
 
--- modification without modifying asigned value
+-- modification without modifying assigned value
 UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING 
substring(toasttest::text, 1, 200);
 
 -- modification modifying, but effectively not changing
@@ -20,7 +20,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = 
 UPDATE toasttest SET cnt = cnt +1, f1 = '-'||f1||'-' RETURNING 
substring(toasttest::text, 1, 200);
 
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
 VACUUM FREEZE toasttest;
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
 
@@ -42,7 +42,7 @@ CREATE TRIGGER toasttest_update_indirect
 -- modification without changing varlenas
 UPDATE toasttest SET cnt = cnt +1 RETURNING substring(toasttest::text, 1, 200);
 
--- modification without modifying asigned value
+-- modification without modifying assigned value
 UPDATE toasttest SET cnt = cnt +1, f1 = f1 RETURNING 
substring(toasttest::text, 1, 200);
 
 -- modification modifying, but effectively not changing
@@ -53,7 +53,7 @@ UPDATE toasttest SET cnt = cnt +1, f1 = 
 INSERT INTO toasttest(descr, f1, f2) VALUES('one-toasted,one-null, via 
indirect', repeat('1234567890',30000), NULL);
 
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
--- check we didn't screw with main/toast tuple visiblity
+-- check we didn't screw with main/toast tuple visibility
 VACUUM FREEZE toasttest;
 SELECT substring(toasttest::text, 1, 200) FROM toasttest;
 
diff --git a/src/test/regress/sql/init_privs.sql 
b/src/test/regress/sql/init_privs.sql
--- a/src/test/regress/sql/init_privs.sql
+++ b/src/test/regress/sql/init_privs.sql
@@ -1,4 +1,4 @@
--- Test iniital privileges
+-- Test initial privileges
 
 -- There should always be some initial privileges, set up by initdb
 SELECT count(*) > 0 FROM pg_init_privs;
diff --git a/src/test/regress/sql/insert_conflict.sql 
b/src/test/regress/sql/insert_conflict.sql
--- a/src/test/regress/sql/insert_conflict.sql
+++ b/src/test/regress/sql/insert_conflict.sql
@@ -138,7 +138,7 @@ insert into insertconflicttest values (1
 drop index comp_key_index;
 
 --
--- Partial index tests, no inference predicate specificied
+-- Partial index tests, no inference predicate specified
 --
 create unique index part_comp_key_index on insertconflicttest(key, fruit) 
where key < 5;
 create unique index expr_part_comp_key_index on insertconflicttest(key, 
lower(fruit)) where key < 5;
diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql
--- a/src/test/regress/sql/join.sql
+++ b/src/test/regress/sql/join.sql
@@ -1456,7 +1456,7 @@ select * from
 --
 
 select t1.uunique1 from
-  tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestipn
+  tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t1" suggestion
 select t2.uunique1 from
   tenk1 t1 join tenk2 t2 on t1.two = t2.two; -- error, prefer "t2" suggestion
 select uunique1 from
diff --git a/src/test/regress/sql/matview.sql b/src/test/regress/sql/matview.sql
--- a/src/test/regress/sql/matview.sql
+++ b/src/test/regress/sql/matview.sql
@@ -92,7 +92,7 @@ SELECT * FROM mvtest_tvvm;
 -- test diemv when the mv does not exist
 DROP MATERIALIZED VIEW IF EXISTS no_such_mv;
 
--- make sure invalid comination of options is prohibited
+-- make sure invalid combination of options is prohibited
 REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm WITH NO DATA;
 
 -- no tuple locks on materialized views
diff --git a/src/test/regress/sql/plpgsql.sql b/src/test/regress/sql/plpgsql.sql
--- a/src/test/regress/sql/plpgsql.sql
+++ b/src/test/regress/sql/plpgsql.sql
@@ -1350,7 +1350,7 @@ select * from WSlot order by slotname;
 
 --
 -- Install the central phone system and create the phone numbers.
--- They are weired on insert to the patchfields. Again the
+-- They are wired on insert to the patchfields. Again the
 -- triggers automatically tell the PSlots to update their
 -- backlink field.
 --
diff --git a/src/test/regress/sql/replica_identity.sql 
b/src/test/regress/sql/replica_identity.sql
--- a/src/test/regress/sql/replica_identity.sql
+++ b/src/test/regress/sql/replica_identity.sql
@@ -56,7 +56,7 @@ SELECT relreplident FROM pg_class WHERE 
 -- succeed, oid unique index
 ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX 
test_replica_identity_oid_idx;
 
--- succeed, nondeferrable unique constraint over nonullable cols
+-- succeed, nondeferrable unique constraint over nonnullable cols
 ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX 
test_replica_identity_unique_nondefer;
 
 -- succeed unique index over nonnullable cols
diff --git a/src/test/regress/sql/rolenames.sql 
b/src/test/regress/sql/rolenames.sql
--- a/src/test/regress/sql/rolenames.sql
+++ b/src/test/regress/sql/rolenames.sql
@@ -176,7 +176,7 @@ ALTER USER PUBLIC SET application_name t
 ALTER USER NONE SET application_name to 'BOMB'; -- error
 ALTER USER nonexistent SET application_name to 'BOMB'; -- error
 
--- CREAETE SCHEMA
+-- CREATE SCHEMA
 set client_min_messages to error;
 CREATE SCHEMA newschema1 AUTHORIZATION CURRENT_USER;
 CREATE SCHEMA newschema2 AUTHORIZATION "current_user";
diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql
--- a/src/test/regress/sql/rules.sql
+++ b/src/test/regress/sql/rules.sql
@@ -522,7 +522,7 @@ CREATE TABLE shoe_data (
        shoename   char(10),      -- primary key
        sh_avail   integer,       -- available # of pairs
        slcolor    char(10),      -- preferred shoelace color
-       slminlen   float,         -- miminum shoelace length
+       slminlen   float,         -- minimum shoelace length
        slmaxlen   float,         -- maximum shoelace length
        slunit     char(8)        -- length unit
 );
diff --git a/src/test/regress/sql/tsdicts.sql b/src/test/regress/sql/tsdicts.sql
--- a/src/test/regress/sql/tsdicts.sql
+++ b/src/test/regress/sql/tsdicts.sql
@@ -96,7 +96,7 @@ SELECT ts_lexize('hunspell_num', 'footba
 SELECT ts_lexize('hunspell_num', 'ballyklubber');
 SELECT ts_lexize('hunspell_num', 'footballyklubber');
 
--- Synonim dictionary
+-- Synonym dictionary
 CREATE TEXT SEARCH DICTIONARY synonym (
                                                Template=synonym,
                                                Synonyms=synonym_sample
diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm
--- a/src/test/ssl/ServerSetup.pm
+++ b/src/test/ssl/ServerSetup.pm
@@ -7,7 +7,7 @@
 # - ssl/root+client_ca.crt as the CA root for validating client certs.
 # - reject non-SSL connections
 # - a database called trustdb that lets anyone in
-# - another database called certdb that uses certificate authentiction, ie.
+# - another database called certdb that uses certificate authentication, ie.
 #   the client must present a valid certificate signed by the client CA
 # - two users, called ssltestuser and anotheruser.
 #
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to