diff --git a/src/backend/replication/logical/applyparallelworker.c b/src/backend/replication/logical/applyparallelworker.c
index ef52333477..b6f81ae7c8 100644
--- a/src/backend/replication/logical/applyparallelworker.c
+++ b/src/backend/replication/logical/applyparallelworker.c
@@ -99,13 +99,13 @@ volatile ParallelApplyWorkerShared *MyParallelShared = NULL;
 volatile bool ParallelApplyMessagePending = false;
 
 /*
- * Cache a pointer to the parallel apply worker which is responsible for
- * applying the current streaming transactions. It is used to save the
- * cost of searching the hash table when applying the changes between
- * STREAM_START and STREAM_STOP.
+ * Cache the parallel apply worker information required for applying the
+ * current streaming transaction. It is used to save the cost of searching the
+ * hash table when applying the changes between STREAM_START and STREAM_STOP.
  */
 ParallelApplyWorkerInfo *stream_apply_worker = NULL;
 
+/* A list to maintain subtransactions, if any. */
 List	   *subxactlist = NIL;
 
 static bool parallel_apply_can_start(TransactionId xid);
@@ -113,7 +113,8 @@ static bool parallel_apply_setup_dsm(ParallelApplyWorkerInfo *winfo);
 static ParallelApplyWorkerInfo *parallel_apply_setup_worker(void);
 
 /*
- * Check if starting a new parallel apply worker is allowed.
+ * Returns true, if it is allowed to start a parallel apply worker, false,
+ * otherwise.
  */
 static bool
 parallel_apply_can_start(TransactionId xid)
diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index ccb8ba62d6..225dafa2aa 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -39,10 +39,17 @@
  * transactions that update the same set of rows/tables in opposite order to be
  * applied in parallel can lead to deadlocks.
  *
- * If there are enough parallel apply workers (reached half of the
- * max_parallel_apply_workers_per_subscription) in the pool, the new parallel apply
- * worker will be shut down at transaction end. Otherwise, it will stay in the
- * pool.
+ * We maintain a worker pool to avoid restarting workers for each streaming
+ * transaction. We maintain each worker's information in the
+ * ParallelApplyWorkersList. After successfully, launching a new worker it's
+ * information is added to the ParallelApplyWorkersList. Once the worker
+ * finishes applying the transaction, we mark it available for use. Now,
+ * before starting a new worker to apply the streaming transaction, we check
+ * the list and use any worker, if available. Note that we maintain a maximum
+ * of half the max_parallel_apply_workers_per_subscription workers in the pool
+ * and after that, we simply exit the worker after applying the transaction.
+ * This worker pool threshold is a bit arbitrary and we can provide a guc for
+ * this in the future if required.
  *
  * The leader apply worker will create separate dynamic shared memory segment
  * when each parallel apply worker starts. The reason for this design is that
