diff --git src/backend/access/brin/brin_pageops.c src/backend/access/brin/brin_pageops.c
index 4e37ccb..ce4cbad 100644
--- src/backend/access/brin/brin_pageops.c
+++ src/backend/access/brin/brin_pageops.c
@@ -494,7 +494,7 @@ brin_evacuate_page(Relation idxRel, BlockNumber pagesPerRange,
  * index item of size itemsz.  If oldbuf is a valid buffer, it is also locked
  * (in an order determined to avoid deadlocks.)
  *
- * If there's no existing page with enough free space to accomodate the new
+ * If there's no existing page with enough free space to accommodate the new
  * item, the relation is extended.  If this happens, *extended is set to true.
  *
  * If we find that the old page is no longer a regular index page (because
diff --git src/backend/access/transam/twophase.c src/backend/access/transam/twophase.c
index d9a3fab..4743cac 100644
--- src/backend/access/transam/twophase.c
+++ src/backend/access/transam/twophase.c
@@ -269,7 +269,7 @@ AtAbort_Twophase(void)
 	 * can be finished later, so just unlock it.
 	 *
 	 * If we abort during prepare, after having written the WAL record, we
-	 * might not have transfered all locks and other state to the prepared
+	 * might not have transferred all locks and other state to the prepared
 	 * transaction yet.  Likewise, if we abort during commit or rollback,
 	 * after having written the WAL record, we might not have released
 	 * all the resources held by the transaction yet.  In those cases, the
diff --git src/backend/access/transam/xact.c src/backend/access/transam/xact.c
index 62f0045..2340105 100644
--- src/backend/access/transam/xact.c
+++ src/backend/access/transam/xact.c
@@ -2317,7 +2317,7 @@ PrepareTransaction(void)
 	/*
 	 * In normal commit-processing, this is all non-critical post-transaction
 	 * cleanup.  When the transaction is prepared, however, it's important that
-	 * the locks and other per-backend resources are transfered to the
+	 * the locks and other per-backend resources are transferred to the
 	 * prepared transaction's PGPROC entry.  Note that if an error is raised
 	 * here, it's too late to abort the transaction. XXX: This probably should
 	 * be in a critical section, to force a PANIC if any of this fails, but
diff --git src/backend/commands/event_trigger.c src/backend/commands/event_trigger.c
index 43aeac5..d786c7d 100644
--- src/backend/commands/event_trigger.c
+++ src/backend/commands/event_trigger.c
@@ -883,7 +883,7 @@ EventTriggerSQLDrop(Node *parsetree)
 	/*
 	 * Nothing to do if run list is empty.  Note this shouldn't happen,
 	 * because if there are no sql_drop events, then objects-to-drop wouldn't
-	 * have been collected in the first place and we would have quitted above.
+	 * have been collected in the first place and we would have quit above.
 	 */
 	if (runlist == NIL)
 		return;
diff --git src/backend/commands/vacuumlazy.c src/backend/commands/vacuumlazy.c
index c3d6e59..c94575c 100644
--- src/backend/commands/vacuumlazy.c
+++ src/backend/commands/vacuumlazy.c
@@ -952,7 +952,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
 				heap_execute_freeze_tuple(htup, &frozen[i]);
 			}
 
-			/* Now WAL-log freezing if neccessary */
+			/* Now WAL-log freezing if necessary */
 			if (RelationNeedsWAL(onerel))
 			{
 				XLogRecPtr	recptr;
diff --git src/backend/replication/basebackup.c src/backend/replication/basebackup.c
index 89e8cf0..4c1460c 100644
--- src/backend/replication/basebackup.c
+++ src/backend/replication/basebackup.c
@@ -85,7 +85,7 @@ static char *statrelpath = NULL;
 /* The actual number of bytes, transfer of which may cause sleep. */
 static uint64 throttling_sample;
 
-/* Amount of data already transfered but not yet throttled.  */
+/* Amount of data already transferred but not yet throttled.  */
 static int64 throttling_counter;
 
 /* The minimum time required to transfer throttling_sample bytes. */
@@ -172,7 +172,7 @@ perform_base_backup(basebackup_options *opt, DIR *tblspcdir)
 
 			/*
 			 * The minimum amount of time for throttling_sample bytes to be
-			 * transfered.
+			 * transferred.
 			 */
 			elapsed_min_unit = USECS_PER_SEC / THROTTLING_FREQUENCY;
 
diff --git src/backend/replication/logical/origin.c src/backend/replication/logical/origin.c
index 738e4b7..b4b98a5 100644
--- src/backend/replication/logical/origin.c
+++ src/backend/replication/logical/origin.c
@@ -1342,7 +1342,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS)
 	/*
 	 * Can't sensibly pass a local commit to be flushed at checkpoint - this
 	 * xact hasn't committed yet. This is why this function should be used to
-	 * set up the intial replication state, but not for replay.
+	 * set up the initial replication state, but not for replay.
 	 */
 	replorigin_advance(node, remote_commit, InvalidXLogRecPtr,
 					   true /* go backward */, true /* wal log */);
diff --git src/backend/replication/logical/reorderbuffer.c src/backend/replication/logical/reorderbuffer.c
index 5b407aa..2d86323 100644
--- src/backend/replication/logical/reorderbuffer.c
+++ src/backend/replication/logical/reorderbuffer.c
@@ -143,7 +143,7 @@ typedef struct ReorderBufferDiskChange
  * without hitting disk in OLTP workloads, while starting to spool to disk in
  * other workloads reasonably fast.
  *
- * At some point in the future it probaly makes sense to have a more elaborate
+ * At some point in the future it probably makes sense to have a more elaborate
  * resource management here, but it's not entirely clear what that would look
  * like.
  */
@@ -1704,7 +1704,7 @@ ReorderBufferForget(ReorderBuffer *rb, TransactionId xid, XLogRecPtr lsn)
 	txn->final_lsn = lsn;
 
 	/*
-	 * Proccess cache invalidation messages if there are any. Even if we're
+	 * Process cache invalidation messages if there are any. Even if we're
 	 * not interested in the transaction's contents, it could have manipulated
 	 * the catalog and we need to update the caches according to that.
 	 */
diff --git src/backend/replication/logical/snapbuild.c src/backend/replication/logical/snapbuild.c
index 37476ca..35e1c06 100644
--- src/backend/replication/logical/snapbuild.c
+++ src/backend/replication/logical/snapbuild.c
@@ -770,7 +770,7 @@ SnapBuildDistributeNewCatalogSnapshot(SnapBuild *builder, XLogRecPtr lsn)
 	/*
 	 * Iterate through all toplevel transactions. This can include
 	 * subtransactions which we just don't yet know to be that, but that's
-	 * fine, they will just get an unneccesary snapshot queued.
+	 * fine, they will just get an unnecessary snapshot queued.
 	 */
 	dlist_foreach(txn_i, &builder->reorder->toplevel_by_lsn)
 	{
@@ -1212,7 +1212,7 @@ SnapBuildFindSnapshot(SnapBuild *builder, XLogRecPtr lsn, xl_running_xacts *runn
 	 *	  to CONSISTENT.
 	 *	  NB: We need to search running.xip when seeing a transaction's end to
 	 *	  make sure it's a toplevel transaction and it's been one of the
-	 *	  intially running ones.
+	 *	  initially running ones.
 	 *	  Interestingly, in contrast to HS, this allows us not to care about
 	 *	  subtransactions - and by extension suboverflowed xl_running_xacts -
 	 *	  at all.
@@ -1657,7 +1657,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn)
 	 * Make sure the snapshot had been stored safely to disk, that's normally
 	 * cheap.
 	 * Note that we do not need PANIC here, nobody will be able to use the
-	 * slot without fsyncing, and saving it won't suceed without an fsync()
+	 * slot without fsyncing, and saving it won't succeed without an fsync()
 	 * either...
 	 * ----
 	 */
@@ -1749,7 +1749,7 @@ SnapBuildRestore(SnapBuild *builder, XLogRecPtr lsn)
 
 	/*
 	 * We are only interested in consistent snapshots for now, comparing
-	 * whether one imcomplete snapshot is more "advanced" seems to be
+	 * whether one incomplete snapshot is more "advanced" seems to be
 	 * unnecessarily complex.
 	 */
 	if (ondisk.builder.state < SNAPBUILD_CONSISTENT)
diff --git src/backend/replication/slot.c src/backend/replication/slot.c
index 79c7791..e02571b 100644
--- src/backend/replication/slot.c
+++ src/backend/replication/slot.c
@@ -79,7 +79,7 @@ typedef struct ReplicationSlotOnDisk
 /* size of the part covered by the checksum */
 #define SnapBuildOnDiskChecksummedSize \
 	sizeof(ReplicationSlotOnDisk) - SnapBuildOnDiskNotChecksummedSize
-/* size of the slot data that is version dependant */
+/* size of the slot data that is version dependent */
 #define ReplicationSlotOnDiskV2Size \
 	sizeof(ReplicationSlotOnDisk) - ReplicationSlotOnDiskConstantSize
 
diff --git src/backend/storage/buffer/bufmgr.c src/backend/storage/buffer/bufmgr.c
index 2e6e0ca..861ec3e 100644
--- src/backend/storage/buffer/bufmgr.c
+++ src/backend/storage/buffer/bufmgr.c
@@ -2165,7 +2165,7 @@ CheckForBufferLeaks(void)
 		}
 	}
 
-	/* if neccessary search the hash */
+	/* if necessary search the hash */
 	if (PrivateRefCountOverflowed)
 	{
 		HASH_SEQ_STATUS hstat;
diff --git src/backend/storage/ipc/dsm.c src/backend/storage/ipc/dsm.c
index 29e46c2..b82ae05 100644
--- src/backend/storage/ipc/dsm.c
+++ src/backend/storage/ipc/dsm.c
@@ -225,7 +225,7 @@ dsm_cleanup_using_control_segment(dsm_handle old_control_handle)
 	/*
 	 * Try to attach the segment.  If this fails, it probably just means that
 	 * the operating system has been rebooted and the segment no longer
-	 * exists, or an unrelated proces has used the same shm ID.  So just fall
+	 * exists, or an unrelated process has used the same shm ID.  So just fall
 	 * out quietly.
 	 */
 	if (!dsm_impl_op(DSM_OP_ATTACH, old_control_handle, 0, &impl_private,
diff --git src/backend/storage/ipc/procarray.c src/backend/storage/ipc/procarray.c
index b4b4613..0b3ad72 100644
--- src/backend/storage/ipc/procarray.c
+++ src/backend/storage/ipc/procarray.c
@@ -2001,7 +2001,7 @@ GetOldestSafeDecodingTransactionId(void)
 	/*
 	 * If there's already a slot pegging the xmin horizon, we can start with
 	 * that value, it's guaranteed to be safe since it's computed by this
-	 * routine initally and has been enforced since.
+	 * routine initially and has been enforced since.
 	 */
 	if (TransactionIdIsValid(procArray->replication_slot_catalog_xmin) &&
 		TransactionIdPrecedes(procArray->replication_slot_catalog_xmin,
diff --git src/backend/storage/ipc/shm_mq.c src/backend/storage/ipc/shm_mq.c
index d42a8d1..daca634 100644
--- src/backend/storage/ipc/shm_mq.c
+++ src/backend/storage/ipc/shm_mq.c
@@ -1047,7 +1047,7 @@ shm_mq_inc_bytes_read(volatile shm_mq *mq, Size n)
 
 /*
  * Get the number of bytes written.  The sender need not use this to access
- * the count of bytes written, but the reciever must.
+ * the count of bytes written, but the receiver must.
  */
 static uint64
 shm_mq_get_bytes_written(volatile shm_mq *mq, bool *detached)
diff --git src/backend/utils/sort/logtape.c src/backend/utils/sort/logtape.c
index 252ba22..ef92b61 100644
--- src/backend/utils/sort/logtape.c
+++ src/backend/utils/sort/logtape.c
@@ -926,7 +926,7 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
 /*
  * Seek to an arbitrary position in a logical tape.
  *
- * *Only* a frozen-for-read tape can be seeked.
+ * *Only* a frozen-for-read tape can be sought.
  *
  * Return value is TRUE if seek successful, FALSE if there isn't that much
  * data in the tape (in which case there's no state change).
diff --git src/bin/pg_dump/pg_dump.c src/bin/pg_dump/pg_dump.c
index dccb472..d39abf9 100644
--- src/bin/pg_dump/pg_dump.c
+++ src/bin/pg_dump/pg_dump.c
@@ -15484,7 +15484,7 @@ dumpRule(Archive *fout, DumpOptions *dopt, RuleInfo *rinfo)
  *    is able and expected to modify those tables after the extension has been
  *    loaded.  For these tables, we dump out only the data- the structure is
  *    expected to be handled at CREATE EXTENSION time, including any indexes or
- *    foriegn keys, which brings us to-
+ *    foreign keys, which brings us to-
  *
  * 3. Record FK dependencies between configuration tables.
  *
diff --git src/bin/pg_upgrade/pg_upgrade.c src/bin/pg_upgrade/pg_upgrade.c
index cc81fa0..4e6a9f9 100644
--- src/bin/pg_upgrade/pg_upgrade.c
+++ src/bin/pg_upgrade/pg_upgrade.c
@@ -198,7 +198,7 @@ setup(char *argv0, bool *live_check)
 		 * start, assume the server is running.  If the pid file is left over
 		 * from a server crash, this also allows any committed transactions
 		 * stored in the WAL to be replayed so they are not lost, because WAL
-		 * files are not transfered from old to new servers.
+		 * files are not transferred from old to new servers.
 		 */
 		if (start_postmaster(&old_cluster, false))
 			stop_postmaster(false);
diff --git src/bin/psql/print.c src/bin/psql/print.c
index e97b563..94c6984 100644
--- src/bin/psql/print.c
+++ src/bin/psql/print.c
@@ -749,7 +749,7 @@ print_aligned_text(const printTableContent *cont, FILE *fout)
 	{
 		/*
 		 * Optional optimized word wrap. Shrink columns with a high max/avg
-		 * ratio.  Slighly bias against wider columns. (Increases chance a
+		 * ratio.  Slightly bias against wider columns. (Increases chance a
 		 * narrow column will fit in its cell.)  If available columns is
 		 * positive...  and greater than the width of the unshrinkable column
 		 * headers
diff --git src/include/c.h src/include/c.h
index e63fd2f..92c5202 100644
--- src/include/c.h
+++ src/include/c.h
@@ -982,7 +982,7 @@ typedef NameData *Name;
  * To better support parallel installations of major PostgeSQL
  * versions as well as parallel installations of major library soname
  * versions, we mangle the gettext domain name by appending those
- * version numbers.  The coding rule ought to be that whereever the
+ * version numbers.  The coding rule ought to be that wherever the
  * domain name is mentioned as a literal, it must be wrapped into
  * PG_TEXTDOMAIN().  The macros below do not work on non-literals; but
  * that is somewhat intentional because it avoids having to worry
diff --git src/include/utils/rel.h src/include/utils/rel.h
index fd40366..8a55a09 100644
--- src/include/utils/rel.h
+++ src/include/utils/rel.h
@@ -89,7 +89,7 @@ typedef struct RelationData
 	 * survived into, or zero if not changed in the current transaction (or we
 	 * have forgotten changing it). rd_newRelfilenodeSubid can be forgotten
 	 * when a relation has multiple new relfilenodes within a single
-	 * transaction, with one of them occuring in a subsequently aborted
+	 * transaction, with one of them occurring in a subsequently aborted
 	 * subtransaction, e.g. BEGIN; TRUNCATE t; SAVEPOINT save; TRUNCATE t;
 	 * ROLLBACK TO save; -- rd_newRelfilenode is now forgotten
 	 */
diff --git src/interfaces/libpq/fe-auth.c src/interfaces/libpq/fe-auth.c
index 08cc906..be64010 100644
--- src/interfaces/libpq/fe-auth.c
+++ src/interfaces/libpq/fe-auth.c
@@ -373,7 +373,7 @@ pg_SSPI_startup(PGconn *conn, int use_negotiate)
 	conn->sspictx = NULL;
 
 	/*
-	 * Retreive credentials handle
+	 * Retrieve credentials handle
 	 */
 	conn->sspicred = malloc(sizeof(CredHandle));
 	if (conn->sspicred == NULL)
