commit a8b461c0333db905ee4f6a9d2bfc657a37543893
Author: David G. Johnston <david.g.johnston@gmail.com>
Date:   Mon Nov 16 23:17:10 2020 +0000

    v25 doc suggestions

diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml
index 2a94f8f6b9..f446c013ea 100644
--- a/doc/src/sgml/libpq.sgml
+++ b/doc/src/sgml/libpq.sgml
@@ -4871,6 +4871,13 @@ int PQflush(PGconn *conn);
    corresponds to which query in the queue.
   </para>
 
+  <para>
+   Batch mode also generally consumes more memory on both the client and server,
+   though careful and aggressive management of the send/receive queue can mitigate
+   this.  This applies whether or not the connection is in blocking or non-blocking
+   mode.
+  </para>
+
   <sect2 id="libpq-batch-using">
    <title>Using Batch Mode</title>
 
@@ -4882,9 +4889,9 @@ int PQflush(PGconn *conn);
     In batch mode, only <link linkend="libpq-async">asynchronous operations</link>
     are permitted, and <literal>COPY</literal> is not recommended as it
     may trigger failure in batch processing.  Using any synchronous
-    command execution functions such as <function>PQfn</function>,
-    <function>PQexec</function> or one of its sibling functions are error
-    conditions.
+    command execution functions, such as <function>PQfn</function>, or
+    <function>PQexec</function> and its sibling functions, is an error
+    condition.
    </para>
 
    <note>
@@ -4903,10 +4910,6 @@ int PQflush(PGconn *conn);
        </para>
       </footnote>
     </para>
-    <para>
-     Batch mode consumes more memory when send/receive is not done as required,
-     even in non-blocking mode.
-    </para>
    </note>
 
    <sect3 id="libpq-batch-sending">
@@ -4914,13 +4917,13 @@ int PQflush(PGconn *conn);
 
     <para>
      After entering batch mode the application dispatches requests using
-     normal asynchronous <application>libpq</application> functions such as
+     normal asynchronous <application>libpq</application> functions, such as:
      <function>PQsendQueryParams</function>, <function>PQsendPrepare</function>,
      <function>PQsendQueryPrepared</function>, <function>PQsendDescribePortal</function>,
-     <function>PQsendDescribePrepared</function>.
+     and <function>PQsendDescribePrepared</function>.
      The asynchronous requests are followed by a
      <xref linkend="libpq-PQbatchSendQueue"/>
-     call to mark the end of the batch. The client needs not
+     call to mark the end of the batch. The client need not
      call <function>PQgetResult</function> immediately after
      dispatching each operation.
      <link linkend="libpq-batch-results">Result processing</link>
@@ -4929,8 +4932,8 @@ int PQflush(PGconn *conn);
 
     <para>
      The server executes statements, and returns results, in the order the
-     client sends them.  The server may begin executing the batch before all
-     commands in the batch are queued and the end of batch command is sent.
+     client sends them.  The server will begin executing the batch commands
+     immediately, not waiting for the end of batch command.
      If any statement encounters an error the server aborts the current
      transaction and skips processing the rest of the batch.
      Query processing resumes after the end of the failed batch.
@@ -4981,8 +4984,10 @@ int PQflush(PGconn *conn);
      and <literal>PGRES_BATCH_ABORTED</literal>.
      <literal>PGRES_BATCH_END</literal> is reported exactly once for each
      <function>PQbatchSendQueue</function> call at the corresponding point in
-     the result stream and at no other time.
-     <literal>PGRES_BATCH_ABORTED</literal> is emitted during error handling;
+     the result stream.
+     <literal>PGRES_BATCH_ABORTED</literal> is emitted in place of a normal
+     result stream result for the first error and all subsequent results
+     except <literal>PGRES_BATCH_END</literal> and null;
      see <xref linkend="libpq-batch-errors"/>.
     </para>
 
@@ -5038,8 +5043,8 @@ int PQflush(PGconn *conn);
      transaction state at the end of the batch. If a batch contains <emphasis>
      multiple explicit transactions</emphasis>, all transactions that committed
      prior to the error remain committed, the currently in-progress transaction
-     is aborted and all subsequent operations in the current and all later
-     transactions in the same batch are skipped completely.
+     is aborted, and all subsequent operations are skipped completely. Including
+     subsequent transactions.
     </para>
 
     <note>
@@ -5071,10 +5076,10 @@ int PQflush(PGconn *conn);
      it should dispatch more work. When the socket is readable it should
      read results and process them, matching them up to the next entry in
      its expected results queue.  Based on available memory, results from
-     socket should be read frequently and there's no need to wait till the
+     socket should be read frequently: there's no need to wait until the
      batch end to read the results.  Batches should be scoped to logical
      units of work, usually (but not necessarily) one transaction per batch.
-     There's no need to exit batch mode and re-enter it between batches
+     There's no need to exit batch mode and re-enter it between batches,
      or to wait for one batch to finish before sending the next.
     </para>
 
@@ -5090,8 +5095,8 @@ int PQflush(PGconn *conn);
     <title>Ending Batch Mode</title>
 
     <para>
-     Once all dispatched commands have had their results processed and
-     the end batch result has been consumed the application may return
+     Once all dispatched commands have had their results processed, and
+     the end batch result has been consumed, the application may return
      to non-batched mode with <xref linkend="libpq-PQexitBatchMode"/>.
     </para>
    </sect3>
@@ -5258,10 +5263,11 @@ int PQbatchSendQueue(PGconn *conn);
    <title>When to Use Batching</title>
 
    <para>
-    Much like asynchronous query mode, there is no performance disadvantage to
-    using batching and pipelining. It increases client application complexity
+    Much like asynchronous query mode, there is no meaningful performance
+    overhead when using batching. It increases client application complexity,
     and extra caution is required to prevent client/server deadlocks, but
-    pipelining can sometimes offer considerable performance improvements.
+    batching can offer considerable performance improvements, in exchange for
+    increased memory usage from leaving state around longer.
    </para>
 
    <para>
@@ -5279,7 +5285,7 @@ int PQbatchSendQueue(PGconn *conn);
     Use batches when your application does lots of small
     <literal>INSERT</literal>, <literal>UPDATE</literal> and
     <literal>DELETE</literal> operations that can't easily be transformed
-    into operations on sets or into a <literal>COPY</literal> operation.
+    into operations on sets, or into a <literal>COPY</literal> operation.
    </para>
 
    <para>
@@ -5310,7 +5316,7 @@ UPDATE mytable SET x = x + 1 WHERE id = 42;
 
    <note>
     <para>
-     The batch API was introduced in PostgreSQL 14.0, but clients using
+     The batch API was introduced in PostgreSQL 14, but clients using
      the PostgreSQL 14 version of <application>libpq</application> can use
      batches on server versions 7.4 and newer. Batching works on any server
      that supports the v3 extended query protocol.
