On Tue, May 07, 2019 at 06:25:12PM -0400, Tom Lane wrote:
> Stephen Frost <sfr...@snowman.net> writes:
> > I'm generally in favor of doing something like what Tom is suggesting
> > with VERBOSE, but I also feel like it should be the default for formats
> > like JSON.  If you're asking for the output in JSON, then we really
> > should include everything that a flag like VERBOSE would contain because
> > you're pretty clearly planning to copy/paste that output into something
> > else to read it anyway.
> 
> Meh --- I don't especially care for non-orthogonal behaviors like that.
> If you wanted JSON but *not* all of the additional info, how would you
> specify that?  (The implementation I had in mind would make VERBOSE OFF
> more or less a no-op, so that wouldn't get you there.)
> 
> >> I do feel that it's a good idea to keep ANALYZE separate. "Execute
> >> the query or not" is a mighty fundamental thing.  I've never liked
> >> that name for the option though --- maybe we could deprecate it
> >> in favor of EXECUTE?
> 
> > Let's not fool ourselves by saying we'd 'deprecate' it because that
> > implies, at least to me, that there's some intention of later on
> > removing it
> 
> True, the odds of ever actually removing it are small :-(.  I meant
> mostly changing all of our docs to use the other spelling, except
> for some footnote.  Maybe we could call ANALYZE a "legacy spelling"
> of EXECUTE.

I tried changing it to EXEC (EXPLAIN EXECUTE is already a thing), but
got a giant flock of reduce-reduce conflicts along with a few
shift-reduce conflicts.

How do I fix this?

Best,
David.
-- 
David Fetter <david(at)fetter(dot)org> http://fetter.org/
Phone: +1 415 235 3778

Remember to vote!
Consider donating to Postgres: http://www.postgresql.org/about/donate
>From c8cdcf6ea0ee25b7159898a09210fc343b14e5e5 Mon Sep 17 00:00:00 2001
From: David Fetter <da...@fetter.org>
Date: Sun, 12 May 2019 21:48:09 -0700
Subject: [PATCH v1] WIP (broken): Changed EXPLAIN ANALYZE to EXPLAIN EXEC
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.21.0"

This is a multi-part message in MIME format.
--------------2.21.0
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit


- In passing, changed a few of EXPLAIN's defaults

diff --git a/doc/src/sgml/ref/explain.sgml b/doc/src/sgml/ref/explain.sgml
index 385d10411f..63a3faede8 100644
--- a/doc/src/sgml/ref/explain.sgml
+++ b/doc/src/sgml/ref/explain.sgml
@@ -32,11 +32,11 @@ PostgreSQL documentation
  <refsynopsisdiv>
 <synopsis>
 EXPLAIN [ ( <replaceable class="parameter">option</replaceable> [, ...] ) ] <replaceable class="parameter">statement</replaceable>
-EXPLAIN [ ANALYZE ] [ VERBOSE ] <replaceable class="parameter">statement</replaceable>
+EXPLAIN [ EXEC ] [ VERBOSE ] <replaceable class="parameter">statement</replaceable>
 
 <phrase>where <replaceable class="parameter">option</replaceable> can be one of:</phrase>
 
-    ANALYZE [ <replaceable class="parameter">boolean</replaceable> ]
+    EXEC [ <replaceable class="parameter">boolean</replaceable> ]
     VERBOSE [ <replaceable class="parameter">boolean</replaceable> ]
     COSTS [ <replaceable class="parameter">boolean</replaceable> ]
     SETTINGS [ <replaceable class="parameter">boolean</replaceable> ]
@@ -76,36 +76,37 @@ EXPLAIN [ ANALYZE ] [ VERBOSE ] <replaceable class="parameter">statement</replac
   </para>
 
   <para>
-   The <literal>ANALYZE</literal> option causes the statement to be actually
+   The <literal>EXEC</literal> option causes the statement to be actually
    executed, not only planned.  Then actual run time statistics are added to
    the display, including the total elapsed time expended within each plan
    node (in milliseconds) and the total number of rows it actually returned.
    This is useful for seeing whether the planner's estimates
-   are close to reality.
+   are close to reality. For historical reasons, ANALYZE and ANALYSE can be
+   used instead of EXEC.
   </para>
 
   <important>
    <para>
     Keep in mind that the statement is actually executed when
-    the <literal>ANALYZE</literal> option is used.  Although
+    the <literal>EXEC</literal> option is used.  Although
     <command>EXPLAIN</command> will discard any output that a
     <command>SELECT</command> would return, other side effects of the
     statement will happen as usual.  If you wish to use
-    <command>EXPLAIN ANALYZE</command> on an
+    <command>EXPLAIN EXEC</command> on an
     <command>INSERT</command>, <command>UPDATE</command>,
     <command>DELETE</command>, <command>CREATE TABLE AS</command>,
     or <command>EXECUTE</command> statement
     without letting the command affect your data, use this approach:
 <programlisting>
 BEGIN;
-EXPLAIN ANALYZE ...;
+EXPLAIN EXEC ...;
 ROLLBACK;
 </programlisting>
    </para>
   </important>
 
   <para>
-   Only the <literal>ANALYZE</literal> and <literal>VERBOSE</literal> options
+   Only the <literal>EXEC</literal> and <literal>VERBOSE</literal> options
    can be specified, and only in that order, without surrounding the option
    list in parentheses.  Prior to <productname>PostgreSQL</productname> 9.0,
    the unparenthesized syntax was the only one supported.  It is expected that
@@ -118,7 +119,7 @@ ROLLBACK;
 
   <variablelist>
    <varlistentry>
-    <term><literal>ANALYZE</literal></term>
+    <term><literal>EXEC</literal></term>
     <listitem>
      <para>
       Carry out the command and show actual run times and other statistics.
@@ -159,7 +160,7 @@ ROLLBACK;
      <para>
       Include information on configuration parameters.  Specifically, include
       options affecting query planning with value different from the built-in
-      default value.  This parameter defaults to <literal>FALSE</literal>.
+      default value.  This parameter defaults to <literal>TRUE</literal>.
      </para>
     </listitem>
    </varlistentry>
@@ -186,8 +187,8 @@ ROLLBACK;
       The number of blocks shown for an
       upper-level node includes those used by all its child nodes.  In text
       format, only non-zero values are printed.  This parameter may only be
-      used when <literal>ANALYZE</literal> is also enabled.  It defaults to
-      <literal>FALSE</literal>.
+      used when <literal>EXEC</literal> is also enabled.  It defaults to
+      <literal>TRUE</literal>.
      </para>
     </listitem>
    </varlistentry>
@@ -203,7 +204,7 @@ ROLLBACK;
       not exact times, are needed.  Run time of the entire statement is
       always measured, even when node-level timing is turned off with this
       option.
-      This parameter may only be used when <literal>ANALYZE</literal> is also
+      This parameter may only be used when <literal>EXEC</literal> is also
       enabled.  It defaults to <literal>TRUE</literal>.
      </para>
     </listitem>
@@ -215,9 +216,9 @@ ROLLBACK;
      <para>
       Include summary information (e.g., totaled timing information) after the
       query plan.  Summary information is included by default when
-      <literal>ANALYZE</literal> is used but otherwise is not included by
+      <literal>EXEC</literal> is used but otherwise is not included by
       default, but can be enabled using this option.  Planning time in
-      <command>EXPLAIN EXECUTE</command> includes the time required to fetch
+      <command>EXPLAIN EXEC</command> includes the time required to fetch
       the plan from the cache and the time required for re-planning, if
       necessary.
      </para>
@@ -295,8 +296,8 @@ ROLLBACK;
   <para>
    In order to measure the run-time cost of each node in the execution
    plan, the current implementation of <command>EXPLAIN
-   ANALYZE</command> adds profiling overhead to query execution.
-   As a result, running <command>EXPLAIN ANALYZE</command>
+   EXEC</command> adds profiling overhead to query execution.
+   As a result, running <command>EXPLAIN EXEC</command>
    on a query can sometimes take significantly longer than executing
    the query normally. The amount of overhead depends on the nature of
    the query, as well as the platform being used.  The worst case occurs
@@ -423,7 +424,7 @@ PREPARE query(int, int) AS SELECT sum(bar) FROM test
     WHERE id &gt; $1 AND id &lt; $2
     GROUP BY foo;
 
-EXPLAIN ANALYZE EXECUTE query(100, 200);
+EXPLAIN EXEC EXECUTE query(100, 200);
 
                                                        QUERY PLAN                                                       
 ------------------------------------------------------------------------------------------------------------------------
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 3dc0e8a4fb..c1057ca729 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -10632,7 +10632,7 @@ opt_vacuum_relation_list:
 /*****************************************************************************
  *
  *		QUERY:
- *				EXPLAIN [ANALYZE] [VERBOSE] query
+ *				EXPLAIN [EXEC] [VERBOSE] query
  *				EXPLAIN ( options ) query
  *
  *****************************************************************************/
@@ -10645,11 +10645,11 @@ ExplainStmt:
 					n->options = NIL;
 					$$ = (Node *) n;
 				}
-		| EXPLAIN analyze_keyword opt_verbose ExplainableStmt
+		| EXPLAIN exec_keyword opt_verbose ExplainableStmt
 				{
 					ExplainStmt *n = makeNode(ExplainStmt);
 					n->query = $4;
-					n->options = list_make1(makeDefElem("analyze", NULL, @2));
+					n->options = list_make1(makeDefElem("exec", NULL, @2));
 					if ($3)
 						n->options = lappend(n->options,
 											 makeDefElem("verbose", NULL, @3));
@@ -10703,7 +10703,7 @@ explain_option_elem:
 
 explain_option_name:
 			NonReservedWord			{ $$ = $1; }
-			| analyze_keyword		{ $$ = "analyze"; }
+			| exec_keyword			{ $$ = "execute"; }
 		;
 
 explain_option_arg:
@@ -10712,6 +10712,12 @@ explain_option_arg:
 			| /* EMPTY */			{ $$ = NULL; }
 		;
 
+exec_keyword:
+			EXECUTE									{}
+			| ANALYZE /* Legacy */					{}
+			| ANALYSE /* British and legacy */		{}
+		;
+
 /*****************************************************************************
  *
  *		QUERY:
diff --git a/src/include/commands/explain.h b/src/include/commands/explain.h
index db48f29501..09cc806d7e 100644
--- a/src/include/commands/explain.h
+++ b/src/include/commands/explain.h
@@ -30,8 +30,8 @@ typedef struct ExplainState
 	StringInfo	str;			/* output buffer */
 	/* options */
 	bool		verbose;		/* be verbose */
-	bool		analyze;		/* print actual times */
 	bool		costs;			/* print estimated costs */
+	bool		exec;			/* actually execute the query, measure it */
 	bool		buffers;		/* print buffer usage */
 	bool		timing;			/* print detailed node timing */
 	bool		summary;		/* print total planning and execution timing */
diff --git a/src/include/executor/executor.h b/src/include/executor/executor.h
index eb4c8b5e79..72730fcfaf 100644
--- a/src/include/executor/executor.h
+++ b/src/include/executor/executor.h
@@ -52,7 +52,7 @@
  * mean that the plan can't queue any AFTER triggers; just that the caller
  * is responsible for there being a trigger context for them to be queued in.
  */
-#define EXEC_FLAG_EXPLAIN_ONLY	0x0001	/* EXPLAIN, no ANALYZE */
+#define EXEC_FLAG_EXPLAIN_ONLY	0x0001	/* EXPLAIN, no EXEC */
 #define EXEC_FLAG_REWIND		0x0002	/* need efficient rescan */
 #define EXEC_FLAG_BACKWARD		0x0004	/* need backward scan */
 #define EXEC_FLAG_MARK			0x0008	/* need mark/restore */
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index ff3328752e..51f9263e0b 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -2189,7 +2189,7 @@ typedef struct GatherMergeState
 } GatherMergeState;
 
 /* ----------------
- *	 Values displayed by EXPLAIN ANALYZE
+ *	 Values displayed by EXPLAIN EXEC
  * ----------------
  */
 typedef struct HashInstrumentation
diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c
index edc50f9368..1aeda40ea1 100644
--- a/contrib/auto_explain/auto_explain.c
+++ b/contrib/auto_explain/auto_explain.c
@@ -23,7 +23,7 @@ PG_MODULE_MAGIC;
 
 /* GUC variables */
 static int	auto_explain_log_min_duration = -1; /* msec or -1 */
-static bool auto_explain_log_analyze = false;
+static bool auto_explain_log_exec = false;
 static bool auto_explain_log_verbose = false;
 static bool auto_explain_log_buffers = false;
 static bool auto_explain_log_triggers = false;
@@ -102,10 +102,10 @@ _PG_init(void)
 							NULL,
 							NULL);
 
-	DefineCustomBoolVariable("auto_explain.log_analyze",
-							 "Use EXPLAIN ANALYZE for plan logging.",
+	DefineCustomBoolVariable("auto_explain.log_exec",
+							 "Use EXPLAIN EXEC for plan logging.",
 							 NULL,
-							 &auto_explain_log_analyze,
+							 &auto_explain_log_exec,
 							 false,
 							 PGC_SUSET,
 							 0,
@@ -148,7 +148,7 @@ _PG_init(void)
 
 	DefineCustomBoolVariable("auto_explain.log_triggers",
 							 "Include trigger statistics in plans.",
-							 "This has no effect unless log_analyze is also set.",
+							 "This has no effect unless log_exec is also set.",
 							 &auto_explain_log_triggers,
 							 false,
 							 PGC_SUSET,
@@ -258,8 +258,8 @@ explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
 
 	if (auto_explain_enabled() && current_query_sampled)
 	{
-		/* Enable per-node instrumentation iff log_analyze is required. */
-		if (auto_explain_log_analyze && (eflags & EXEC_FLAG_EXPLAIN_ONLY) == 0)
+		/* Enable per-node instrumentation iff log_exec is required. */
+		if (auto_explain_log_exec && (eflags & EXEC_FLAG_EXPLAIN_ONLY) == 0)
 		{
 			if (auto_explain_log_timing)
 				queryDesc->instrument_options |= INSTRUMENT_TIMER;
@@ -362,18 +362,18 @@ explain_ExecutorEnd(QueryDesc *queryDesc)
 		{
 			ExplainState *es = NewExplainState();
 
-			es->analyze = (queryDesc->instrument_options && auto_explain_log_analyze);
+			es->exec = (queryDesc->instrument_options && auto_explain_log_exec);
 			es->verbose = auto_explain_log_verbose;
-			es->buffers = (es->analyze && auto_explain_log_buffers);
-			es->timing = (es->analyze && auto_explain_log_timing);
-			es->summary = es->analyze;
+			es->buffers = (es->exec && auto_explain_log_buffers);
+			es->timing = (es->exec && auto_explain_log_timing);
+			es->summary = es->exec;
 			es->format = auto_explain_log_format;
 			es->settings = auto_explain_log_settings;
 
 			ExplainBeginOutput(es);
 			ExplainQueryText(es, queryDesc);
 			ExplainPrintPlan(es, queryDesc);
-			if (es->analyze && auto_explain_log_triggers)
+			if (es->exec && auto_explain_log_triggers)
 				ExplainPrintTriggers(es, queryDesc);
 			if (es->costs)
 				ExplainPrintJITSummary(es, queryDesc);
diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c
index 85534a3a76..58e88c9957 100644
--- a/contrib/file_fdw/file_fdw.c
+++ b/contrib/file_fdw/file_fdw.c
@@ -661,7 +661,7 @@ fileBeginForeignScan(ForeignScanState *node, int eflags)
 	FileFdwExecutionState *festate;
 
 	/*
-	 * Do nothing in EXPLAIN (no ANALYZE) case.  node->fdw_state stays NULL.
+	 * Do nothing in EXPLAIN (no EXEC) case.  node->fdw_state stays NULL.
 	 */
 	if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
 		return;
diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c
index 4cbfb12a66..e84d31cd7f 100644
--- a/contrib/postgres_fdw/postgres_fdw.c
+++ b/contrib/postgres_fdw/postgres_fdw.c
@@ -1413,7 +1413,7 @@ postgresBeginForeignScan(ForeignScanState *node, int eflags)
 	int			numParams;
 
 	/*
-	 * Do nothing in EXPLAIN (no ANALYZE) case.  node->fdw_state stays NULL.
+	 * Do nothing in EXPLAIN (no EXEC) case.  node->fdw_state stays NULL.
 	 */
 	if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
 		return;
@@ -1804,7 +1804,7 @@ postgresBeginForeignModify(ModifyTableState *mtstate,
 	RangeTblEntry *rte;
 
 	/*
-	 * Do nothing in EXPLAIN (no ANALYZE) case.  resultRelInfo->ri_FdwState
+	 * Do nothing in EXPLAIN (no EXEC) case.  resultRelInfo->ri_FdwState
 	 * stays NULL.
 	 */
 	if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
@@ -2345,7 +2345,7 @@ postgresBeginDirectModify(ForeignScanState *node, int eflags)
 	int			numParams;
 
 	/*
-	 * Do nothing in EXPLAIN (no ANALYZE) case.  node->fdw_state stays NULL.
+	 * Do nothing in EXPLAIN (no EXEC) case.  node->fdw_state stays NULL.
 	 */
 	if (eflags & EXEC_FLAG_EXPLAIN_ONLY)
 		return;
@@ -2477,7 +2477,7 @@ postgresIterateDirectModify(ForeignScanState *node)
 		if (dmstate->set_processed)
 			estate->es_processed += dmstate->num_tuples;
 
-		/* Increment the tuple count for EXPLAIN ANALYZE if necessary. */
+		/* Increment the tuple count for EXPLAIN EXEC if necessary. */
 		if (instr)
 			instr->tuplecount += dmstate->num_tuples;
 
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index a6c6de78f1..15dd95e3b7 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -147,6 +147,7 @@ ExplainQuery(ParseState *pstate, ExplainStmt *stmt, const char *queryString,
 	TupOutputState *tstate;
 	List	   *rewritten;
 	ListCell   *lc;
+	bool		timing_buffers_set = false;
 	bool		timing_set = false;
 	bool		summary_set = false;
 
@@ -155,14 +156,17 @@ ExplainQuery(ParseState *pstate, ExplainStmt *stmt, const char *queryString,
 	{
 		DefElem    *opt = (DefElem *) lfirst(lc);
 
-		if (strcmp(opt->defname, "analyze") == 0)
-			es->analyze = defGetBoolean(opt);
+		if (strcmp(opt->defname, "exec") == 0)
+			es->exec = defGetBoolean(opt);
 		else if (strcmp(opt->defname, "verbose") == 0)
 			es->verbose = defGetBoolean(opt);
 		else if (strcmp(opt->defname, "costs") == 0)
 			es->costs = defGetBoolean(opt);
 		else if (strcmp(opt->defname, "buffers") == 0)
+		{
+			buffers_set = true;
 			es->buffers = defGetBoolean(opt);
+		}
 		else if (strcmp(opt->defname, "settings") == 0)
 			es->settings = defGetBoolean(opt);
 		else if (strcmp(opt->defname, "timing") == 0)
@@ -202,22 +206,21 @@ ExplainQuery(ParseState *pstate, ExplainStmt *stmt, const char *queryString,
 					 parser_errposition(pstate, opt->location)));
 	}
 
-	if (es->buffers && !es->analyze)
-		ereport(ERROR,
-				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-				 errmsg("EXPLAIN option BUFFERS requires ANALYZE")));
+	/* Turn buffers off automatically when exec isn't set */
+	if (!es->exec)
+		es->buffers = false;
 
 	/* if the timing was not set explicitly, set default value */
-	es->timing = (timing_set) ? es->timing : es->analyze;
+	es->timing = (timing_set) ? es->timing : es->exec;
 
-	/* check that timing is used with EXPLAIN ANALYZE */
-	if (es->timing && !es->analyze)
+	/* check that timing is used with EXPLAIN EXEC */
+	if (es->timing && !es->exec)
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-				 errmsg("EXPLAIN option TIMING requires ANALYZE")));
+				 errmsg("EXPLAIN option TIMING requires EXEC")));
 
 	/* if the summary was not set explicitly, set default value */
-	es->summary = (summary_set) ? es->summary : es->analyze;
+	es->summary = (summary_set) ? es->summary : es->exec;
 
 	/*
 	 * Parse analysis was done already, but we still have to run the rule
@@ -286,8 +289,11 @@ NewExplainState(void)
 {
 	ExplainState *es = (ExplainState *) palloc0(sizeof(ExplainState));
 
-	/* Set default options (most fields can be left as zeroes). */
+	/* Set default options (fields can be left as zeroes). */
 	es->costs = true;
+	es->buffers = true;
+	es->settings = true;
+	es->timing = true;
 	/* Prepare output buffer. */
 	es->str = makeStringInfo();
 
@@ -415,7 +421,7 @@ ExplainOneUtility(Node *utilityStmt, IntoClause *into, ExplainState *es,
 		/*
 		 * Likewise for DECLARE CURSOR.
 		 *
-		 * Notice that if you say EXPLAIN ANALYZE DECLARE CURSOR then we'll
+		 * Notice that if you say EXPLAIN EXEC DECLARE CURSOR then we'll
 		 * actually run the query.  This is different from pre-8.3 behavior
 		 * but seems more useful than not running the query.  No cursor will
 		 * be created, however.
@@ -475,9 +481,9 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 
 	Assert(plannedstmt->commandType != CMD_UTILITY);
 
-	if (es->analyze && es->timing)
+	if (es->exec && es->timing)
 		instrument_option |= INSTRUMENT_TIMER;
-	else if (es->analyze)
+	else if (es->exec)
 		instrument_option |= INSTRUMENT_ROWS;
 
 	if (es->buffers)
@@ -512,7 +518,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 								dest, params, queryEnv, instrument_option);
 
 	/* Select execution options */
-	if (es->analyze)
+	if (es->exec)
 		eflags = 0;				/* default run-to-completion flags */
 	else
 		eflags = EXEC_FLAG_EXPLAIN_ONLY;
@@ -523,11 +529,11 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 	ExecutorStart(queryDesc, eflags);
 
 	/* Execute the plan for statistics if asked for */
-	if (es->analyze)
+	if (es->exec)
 	{
 		ScanDirection dir;
 
-		/* EXPLAIN ANALYZE CREATE TABLE AS WITH NO DATA is weird */
+		/* EXPLAIN EXEC CREATE TABLE AS WITH NO DATA is weird */
 		if (into && into->skipData)
 			dir = NoMovementScanDirection;
 		else
@@ -556,7 +562,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 	}
 
 	/* Print info about runtime of triggers */
-	if (es->analyze)
+	if (es->exec)
 		ExplainPrintTriggers(es, queryDesc);
 
 	/*
@@ -581,7 +587,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 	PopActiveSnapshot();
 
 	/* We need a CCI just in case query expanded to multiple plans */
-	if (es->analyze)
+	if (es->exec)
 		CommandCounterIncrement();
 
 	totaltime += elapsed_time(&starttime);
@@ -592,7 +598,7 @@ ExplainOnePlan(PlannedStmt *plannedstmt, IntoClause *into, ExplainState *es,
 	 * user can set SUMMARY OFF to not have the timing information included in
 	 * the output).  By default, ANALYZE sets SUMMARY to true.
 	 */
-	if (es->summary && es->analyze)
+	if (es->summary && es->exec)
 		ExplainPropertyFloat("Execution Time", "ms", 1000.0 * totaltime, 3,
 							 es);
 
@@ -834,7 +840,7 @@ ExplainPrintJIT(ExplainState *es, int jit_flags,
 						 "Expressions", jit_flags & PGJIT_EXPR ? "true" : "false",
 						 "Deforming", jit_flags & PGJIT_DEFORM ? "true" : "false");
 
-		if (es->analyze && es->timing)
+		if (es->exec && es->timing)
 		{
 			appendStringInfoSpaces(es->str, es->indent * 2);
 			appendStringInfo(es->str,
@@ -860,7 +866,7 @@ ExplainPrintJIT(ExplainState *es, int jit_flags,
 		ExplainPropertyBool("Deforming", jit_flags & PGJIT_DEFORM, es);
 		ExplainCloseGroup("Options", "Options", true, es);
 
-		if (es->analyze && es->timing)
+		if (es->exec && es->timing)
 		{
 			ExplainOpenGroup("Timing", "Timing", true, es);
 
@@ -1505,7 +1511,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 	if (planstate->instrument)
 		InstrEndLoop(planstate->instrument);
 
-	if (es->analyze &&
+	if (es->exec &&
 		planstate->instrument && planstate->instrument->nloops > 0)
 	{
 		double		nloops = planstate->instrument->nloops;
@@ -1537,7 +1543,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			ExplainPropertyFloat("Actual Loops", NULL, nloops, 0, es);
 		}
 	}
-	else if (es->analyze)
+	else if (es->exec)
 	{
 		if (es->format == EXPLAIN_FORMAT_TEXT)
 			appendStringInfoString(es->str, " (never executed)");
@@ -1606,7 +1612,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			if (plan->qual)
 				show_instrumentation_count("Rows Removed by Filter", 1,
 										   planstate, es);
-			if (es->analyze)
+			if (es->exec)
 				ExplainPropertyFloat("Heap Fetches", NULL,
 									 planstate->instrument->ntuples2, 0, es);
 			break;
@@ -1624,7 +1630,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 			if (plan->qual)
 				show_instrumentation_count("Rows Removed by Filter", 1,
 										   planstate, es);
-			if (es->analyze)
+			if (es->exec)
 				show_tidbitmap_info((BitmapHeapScanState *) planstate, es);
 			break;
 		case T_SampleScan:
@@ -1658,7 +1664,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 				if (gather->initParam)
 					show_eval_params(gather->initParam, es);
 
-				if (es->analyze)
+				if (es->exec)
 				{
 					int			nworkers;
 
@@ -1704,7 +1710,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 				if (gm->initParam)
 					show_eval_params(gm->initParam, es);
 
-				if (es->analyze)
+				if (es->exec)
 				{
 					int			nworkers;
 
@@ -1869,7 +1875,7 @@ ExplainNode(PlanState *planstate, List *ancestors,
 		show_buffer_usage(es, &planstate->instrument->bufusage);
 
 	/* Show worker detail */
-	if (es->analyze && es->verbose && planstate->worker_instrument)
+	if (es->exec && es->verbose && planstate->worker_instrument)
 	{
 		WorkerInstrumentation *w = planstate->worker_instrument;
 		bool		opened_group = false;
@@ -2531,12 +2537,12 @@ show_tablesample(TableSampleClause *tsc, PlanState *planstate,
 }
 
 /*
- * If it's EXPLAIN ANALYZE, show tuplesort stats for a sort node
+ * If it's EXPLAIN EXEC, show tuplesort stats for a sort node
  */
 static void
 show_sort_info(SortState *sortstate, ExplainState *es)
 {
-	if (!es->analyze)
+	if (!es->exec)
 		return;
 
 	if (sortstate->sort_Done && sortstate->tuplesortstate != NULL)
@@ -2716,7 +2722,7 @@ show_hash_info(HashState *hashstate, ExplainState *es)
 }
 
 /*
- * If it's EXPLAIN ANALYZE, show exact/lossy pages for a BitmapHeapScan node
+ * If it's EXPLAIN EXEC, show exact/lossy pages for a BitmapHeapScan node
  */
 static void
 show_tidbitmap_info(BitmapHeapScanState *planstate, ExplainState *es)
@@ -2744,7 +2750,7 @@ show_tidbitmap_info(BitmapHeapScanState *planstate, ExplainState *es)
 }
 
 /*
- * If it's EXPLAIN ANALYZE, show instrumentation information for a plan node
+ * If it's EXPLAIN EXEC, show instrumentation information for a plan node
  *
  * "which" identifies which instrumentation counter to print
  */
@@ -2755,7 +2761,7 @@ show_instrumentation_count(const char *qlabel, int which,
 	double		nfiltered;
 	double		nloops;
 
-	if (!es->analyze || !planstate->instrument)
+	if (!es->exec || !planstate->instrument)
 		return;
 
 	if (which == 2)
@@ -3283,8 +3289,8 @@ show_modifytable_info(ModifyTableState *mtstate, List *ancestors,
 			show_instrumentation_count("Rows Removed by Conflict Filter", 1, &mtstate->ps, es);
 		}
 
-		/* EXPLAIN ANALYZE display of actual outcome for each tuple proposed */
-		if (es->analyze && mtstate->ps.instrument)
+		/* EXPLAIN EXEC display of actual outcome for each tuple proposed */
+		if (es->exec && mtstate->ps.instrument)
 		{
 			double		total;
 			double		insert_path;
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 2beb378145..8c4320df58 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -2365,7 +2365,7 @@ FindTriggerIncompatibleWithInheritance(TriggerDesc *trigdesc)
  *		trigdata: trigger descriptor.
  *		tgindx: trigger's index in finfo and instr arrays.
  *		finfo: array of cached trigger function call information.
- *		instr: optional array of EXPLAIN ANALYZE instrumentation state.
+ *		instr: optional array of EXPLAIN EXEC instrumentation state.
  *		per_tuple_context: memory context to execute the function in.
  *
  * Returns the tuple (or NULL) as returned by the function.
@@ -2406,7 +2406,7 @@ ExecCallTriggerFunc(TriggerData *trigdata,
 	Assert(finfo->fn_oid == trigdata->tg_trigger->tgfoid);
 
 	/*
-	 * If doing EXPLAIN ANALYZE, start charging time to this trigger.
+	 * If doing EXPLAIN EXEC, start charging time to this trigger.
 	 */
 	if (instr)
 		InstrStartNode(instr + tgindx);
@@ -2455,7 +2455,7 @@ ExecCallTriggerFunc(TriggerData *trigdata,
 						fcinfo->flinfo->fn_oid)));
 
 	/*
-	 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
+	 * If doing EXPLAIN EXEC, stop charging time to this trigger, and count
 	 * one "tuple returned" (really the number of firings).
 	 */
 	if (instr)
@@ -4171,7 +4171,7 @@ afterTriggerDeleteHeadEventChunk(AfterTriggersQueryData *qs)
  *	rel: open relation for event.
  *	trigdesc: working copy of rel's trigger info.
  *	finfo: array of fmgr lookup cache entries (one per trigger in trigdesc).
- *	instr: array of EXPLAIN ANALYZE instrumentation nodes (one per trigger),
+ *	instr: array of EXPLAIN EXEC instrumentation nodes (one per trigger),
  *		or NULL if no instrumentation is wanted.
  *	per_tuple_context: memory context to call trigger function in.
  *	trig_tuple_slot1: scratch slot for tg_trigtuple (foreign tables only)
@@ -4216,7 +4216,7 @@ AfterTriggerExecute(EState *estate,
 		elog(ERROR, "could not find trigger %u", tgoid);
 
 	/*
-	 * If doing EXPLAIN ANALYZE, start charging time to this trigger. We want
+	 * If doing EXPLAIN EXEC, start charging time to this trigger. We want
 	 * to include time spent re-fetching tuples in the trigger cost.
 	 */
 	if (instr)
@@ -4359,7 +4359,7 @@ AfterTriggerExecute(EState *estate,
 		ExecClearTuple(LocTriggerData.tg_newslot);
 
 	/*
-	 * If doing EXPLAIN ANALYZE, stop charging time to this trigger, and count
+	 * If doing EXPLAIN EXEC, stop charging time to this trigger, and count
 	 * one "tuple returned" (really the number of firings).
 	 */
 	if (instr)
diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c
index ed7c0606bf..cbbc7e087b 100644
--- a/src/backend/executor/execMain.c
+++ b/src/backend/executor/execMain.c
@@ -390,7 +390,7 @@ standard_ExecutorRun(QueryDesc *queryDesc,
  *
  *		This routine must be called after the last ExecutorRun call.
  *		It performs cleanup such as firing AFTER triggers.  It is
- *		separate from ExecutorEnd because EXPLAIN ANALYZE needs to
+ *		separate from ExecutorEnd because EXPLAIN EXEC needs to
  *		include these actions in the total runtime.
  *
  *		We provide a function hook variable that lets loadable plugins
@@ -1365,7 +1365,7 @@ InitResultRelInfo(ResultRelInfo *resultRelInfo,
  * on other relations, which will be processed in the context of the outer
  * query.  For efficiency's sake, we want to have a ResultRelInfo for those
  * triggers too; that can avoid repeated re-opening of the relation.  (It
- * also provides a way for EXPLAIN ANALYZE to report the runtimes of such
+ * also provides a way for EXPLAIN EXEC to report the runtimes of such
  * triggers.)  So we make additional ResultRelInfo's as needed, and save them
  * in es_trig_target_relations.
  */
diff --git a/src/backend/executor/execParallel.c b/src/backend/executor/execParallel.c
index 3d4b01cb4d..adcccbbdcb 100644
--- a/src/backend/executor/execParallel.c
+++ b/src/backend/executor/execParallel.c
@@ -273,11 +273,11 @@ ExecParallelEstimate(PlanState *planstate, ExecParallelEstimateContext *e)
 									 e->pcxt);
 			break;
 		case T_HashState:
-			/* even when not parallel-aware, for EXPLAIN ANALYZE */
+			/* even when not parallel-aware, for EXPLAIN EXEC */
 			ExecHashEstimate((HashState *) planstate, e->pcxt);
 			break;
 		case T_SortState:
-			/* even when not parallel-aware, for EXPLAIN ANALYZE */
+			/* even when not parallel-aware, for EXPLAIN EXEC */
 			ExecSortEstimate((SortState *) planstate, e->pcxt);
 			break;
 
@@ -486,11 +486,11 @@ ExecParallelInitializeDSM(PlanState *planstate,
 										  d->pcxt);
 			break;
 		case T_HashState:
-			/* even when not parallel-aware, for EXPLAIN ANALYZE */
+			/* even when not parallel-aware, for EXPLAIN EXEC */
 			ExecHashInitializeDSM((HashState *) planstate, d->pcxt);
 			break;
 		case T_SortState:
-			/* even when not parallel-aware, for EXPLAIN ANALYZE */
+			/* even when not parallel-aware, for EXPLAIN EXEC */
 			ExecSortInitializeDSM((SortState *) planstate, d->pcxt);
 			break;
 
@@ -1294,11 +1294,11 @@ ExecParallelInitializeWorker(PlanState *planstate, ParallelWorkerContext *pwcxt)
 											 pwcxt);
 			break;
 		case T_HashState:
-			/* even when not parallel-aware, for EXPLAIN ANALYZE */
+			/* even when not parallel-aware, for EXPLAIN EXEC */
 			ExecHashInitializeWorker((HashState *) planstate, pwcxt);
 			break;
 		case T_SortState:
-			/* even when not parallel-aware, for EXPLAIN ANALYZE */
+			/* even when not parallel-aware, for EXPLAIN EXEC */
 			ExecSortInitializeWorker((SortState *) planstate, pwcxt);
 			break;
 
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index 64eec91f8b..c6a2906db5 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -193,7 +193,7 @@ MultiExecPrivateHash(HashState *node)
 	if (hashtable->nbuckets != hashtable->nbuckets_optimal)
 		ExecHashIncreaseNumBuckets(hashtable);
 
-	/* Account for the buckets in spaceUsed (reported in EXPLAIN ANALYZE) */
+	/* Account for the buckets in spaceUsed (reported in EXPLAIN EXEC) */
 	hashtable->spaceUsed += hashtable->nbuckets * sizeof(HashJoinTuple);
 	if (hashtable->spaceUsed > hashtable->spacePeak)
 		hashtable->spacePeak = hashtable->spaceUsed;
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index 92d0507949..cd5e834ae9 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -3215,19 +3215,19 @@ GetCommandLogLevel(Node *parsetree)
 		case T_ExplainStmt:
 			{
 				ExplainStmt *stmt = (ExplainStmt *) parsetree;
-				bool		analyze = false;
+				bool		exec = false;
 				ListCell   *lc;
 
-				/* Look through an EXPLAIN ANALYZE to the contained stmt */
+				/* Look through an EXPLAIN EXEC to the contained stmt */
 				foreach(lc, stmt->options)
 				{
 					DefElem    *opt = (DefElem *) lfirst(lc);
 
-					if (strcmp(opt->defname, "analyze") == 0)
-						analyze = defGetBoolean(opt);
+					if (strcmp(opt->defname, "exec") == 0)
+						exec = defGetBoolean(opt);
 					/* don't "break", as explain.c will use the last value */
 				}
-				if (analyze)
+				if (exec)
 					return GetCommandLogLevel(stmt->query);
 
 				/* Plain EXPLAIN isn't so interesting */
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index e4c03de221..ac0673dafc 100644
--- a/src/bin/psql/tab-complete.c
+++ b/src/bin/psql/tab-complete.c
@@ -2848,11 +2848,11 @@ psql_completion(const char *text, int start, int end)
 
 /*
  * EXPLAIN [ ( option [, ...] ) ] statement
- * EXPLAIN [ ANALYZE ] [ VERBOSE ] statement
+ * EXPLAIN [ EXEC ] [ VERBOSE ] statement
  */
 	else if (Matches("EXPLAIN"))
 		COMPLETE_WITH("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE",
-					  "ANALYZE", "VERBOSE");
+					  "EXEC", "VERBOSE");
 	else if (HeadMatches("EXPLAIN", "(*") &&
 			 !HeadMatches("EXPLAIN", "(*)"))
 	{
@@ -2862,19 +2862,23 @@ psql_completion(const char *text, int start, int end)
 		 * one word, so the above test is correct.
 		 */
 		if (ends_with(prev_wd, '(') || ends_with(prev_wd, ','))
-			COMPLETE_WITH("ANALYZE", "VERBOSE", "COSTS", "BUFFERS",
+			COMPLETE_WITH("EXEC", "VERBOSE", "COSTS", "BUFFERS",
 						  "TIMING", "SUMMARY", "FORMAT");
-		else if (TailMatches("ANALYZE|VERBOSE|COSTS|BUFFERS|TIMING|SUMMARY"))
+		else if (TailMatches("EXEC|VERBOSE|COSTS|BUFFERS|TIMING|SUMMARY"))
 			COMPLETE_WITH("ON", "OFF");
 		else if (TailMatches("FORMAT"))
 			COMPLETE_WITH("TEXT", "XML", "JSON", "YAML");
 	}
-	else if (Matches("EXPLAIN", "ANALYZE"))
+	else if (Matches("EXPLAIN", "EXEC") ||
+			 Matches("EXPLAIN", "ANALYZE") ||
+			 Matches("EXPLAIN", "ANALYSE"))
 		COMPLETE_WITH("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE",
 					  "VERBOSE");
 	else if (Matches("EXPLAIN", "(*)") ||
 			 Matches("EXPLAIN", "VERBOSE") ||
-			 Matches("EXPLAIN", "ANALYZE", "VERBOSE"))
+			 Matches("EXPLAIN", "EXEC", "VERBOSE") ||
+			 Matches("EXPLAIN", "ANALYZE", "VERBOSE") ||
+			 Matches("EXPLAIN", "ANALYSE", "VERBOSE"))
 		COMPLETE_WITH("SELECT", "INSERT", "DELETE", "UPDATE", "DECLARE");
 
 /* FETCH && MOVE */
diff --git a/doc/src/sgml/auto-explain.sgml b/doc/src/sgml/auto-explain.sgml
index 3d619d4a3d..3025a57232 100644
--- a/doc/src/sgml/auto-explain.sgml
+++ b/doc/src/sgml/auto-explain.sgml
@@ -65,14 +65,14 @@ LOAD 'auto_explain';
 
    <varlistentry>
     <term>
-     <varname>auto_explain.log_analyze</varname> (<type>boolean</type>)
+     <varname>auto_explain.log_exec</varname> (<type>boolean</type>)
      <indexterm>
-      <primary><varname>auto_explain.log_analyze</varname> configuration parameter</primary>
+      <primary><varname>auto_explain.log_exec</varname> configuration parameter</primary>
      </indexterm>
     </term>
     <listitem>
      <para>
-      <varname>auto_explain.log_analyze</varname> causes <command>EXPLAIN ANALYZE</command>
+      <varname>auto_explain.log_exec</varname> causes <command>EXPLAIN EXEC</command>
       output, rather than just <command>EXPLAIN</command> output, to be printed
       when an execution plan is logged. This parameter is off by default.
       Only superusers can change this setting.
@@ -102,7 +102,7 @@ LOAD 'auto_explain';
       usage statistics are printed when an execution plan is logged; it's
       equivalent to the <literal>BUFFERS</literal> option of <command>EXPLAIN</command>.
       This parameter has no effect
-      unless <varname>auto_explain.log_analyze</varname> is enabled.
+      unless <varname>auto_explain.log_exec</varname> is enabled.
       This parameter is off by default.
       Only superusers can change this setting.
      </para>
@@ -126,7 +126,7 @@ LOAD 'auto_explain';
       parameter to off when only actual row counts, and not exact times, are
       needed.
       This parameter has no effect
-      unless <varname>auto_explain.log_analyze</varname> is enabled.
+      unless <varname>auto_explain.log_exec</varname> is enabled.
       This parameter is on by default.
       Only superusers can change this setting.
      </para>
@@ -145,7 +145,7 @@ LOAD 'auto_explain';
       <varname>auto_explain.log_triggers</varname> causes trigger
       execution statistics to be included when an execution plan is logged.
       This parameter has no effect
-      unless <varname>auto_explain.log_analyze</varname> is enabled.
+      unless <varname>auto_explain.log_exec</varname> is enabled.
       This parameter is off by default.
       Only superusers can change this setting.
      </para>
@@ -283,7 +283,7 @@ auto_explain.log_min_duration = '3s'
 <programlisting>
 postgres=# LOAD 'auto_explain';
 postgres=# SET auto_explain.log_min_duration = 0;
-postgres=# SET auto_explain.log_analyze = true;
+postgres=# SET auto_explain.log_exec = true;
 postgres=# SELECT count(*)
            FROM pg_class, pg_index
            WHERE oid = indrelid AND indisunique;
diff --git a/doc/src/sgml/bloom.sgml b/doc/src/sgml/bloom.sgml
index 6eeaddee09..5b4ede2db1 100644
--- a/doc/src/sgml/bloom.sgml
+++ b/doc/src/sgml/bloom.sgml
@@ -129,7 +129,7 @@ CREATE INDEX
   <para>
    A sequential scan over this large table takes a long time:
 <programlisting>
-=# EXPLAIN ANALYZE SELECT * FROM tbloom WHERE i2 = 898732 AND i5 = 123451;
+=# EXPLAIN EXEC SELECT * FROM tbloom WHERE i2 = 898732 AND i5 = 123451;
                                                  QUERY PLAN
 ------------------------------------------------------------------------------------------------------------
  Seq Scan on tbloom  (cost=0.00..213694.08 rows=1 width=24) (actual time=1445.438..1445.438 rows=0 loops=1)
@@ -145,7 +145,7 @@ CREATE INDEX
    So the planner will usually select an index scan if possible.
    With a btree index, we get results like this:
 <programlisting>
-=# EXPLAIN ANALYZE SELECT * FROM tbloom WHERE i2 = 898732 AND i5 = 123451;
+=# EXPLAIN EXEC SELECT * FROM tbloom WHERE i2 = 898732 AND i5 = 123451;
                                                            QUERY PLAN
 --------------------------------------------------------------------------------------------------------------------------------
  Index Only Scan using btreeidx on tbloom  (cost=0.56..298311.96 rows=1 width=24) (actual time=445.709..445.709 rows=0 loops=1)
@@ -160,7 +160,7 @@ CREATE INDEX
   <para>
    Bloom is better than btree in handling this type of search:
 <programlisting>
-=# EXPLAIN ANALYZE SELECT * FROM tbloom WHERE i2 = 898732 AND i5 = 123451;
+=# EXPLAIN EXEC SELECT * FROM tbloom WHERE i2 = 898732 AND i5 = 123451;
                                                         QUERY PLAN
 ---------------------------------------------------------------------------------------------------------------------------
  Bitmap Heap Scan on tbloom  (cost=178435.39..178439.41 rows=1 width=24) (actual time=76.698..76.698 rows=0 loops=1)
@@ -187,7 +187,7 @@ CREATE INDEX
    A better strategy for btree is to create a separate index on each column.
    Then the planner will choose something like this:
 <programlisting>
-=# EXPLAIN ANALYZE SELECT * FROM tbloom WHERE i2 = 898732 AND i5 = 123451;
+=# EXPLAIN EXEC SELECT * FROM tbloom WHERE i2 = 898732 AND i5 = 123451;
                                                           QUERY PLAN
 ------------------------------------------------------------------------------------------------------------------------------
  Bitmap Heap Scan on tbloom  (cost=9.29..13.30 rows=1 width=24) (actual time=0.148..0.148 rows=0 loops=1)
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index 84341a30e5..41e9a64182 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -6441,7 +6441,7 @@ log_line_prefix = '%m [%p] %q%u@%d/%a '
         <command>UPDATE</command>, <command>DELETE</command>, <command>TRUNCATE</command>,
         and <command>COPY FROM</command>.
         <command>PREPARE</command>, <command>EXECUTE</command>, and
-        <command>EXPLAIN ANALYZE</command> statements are also logged if their
+        <command>EXPLAIN EXEC</command> statements are also logged if their
         contained command is of an appropriate type.  For clients using
         extended query protocol, logging occurs when an Execute message
         is received, and values of the Bind parameters are included
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index a0a7435a03..407f94dfbe 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -4498,7 +4498,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
        performed here for parameter values which are known during the
        initialization phase of execution.  Partitions which are pruned
        during this stage will not show up in the query's
-       <command>EXPLAIN</command> or <command>EXPLAIN ANALYZE</command>.
+       <command>EXPLAIN</command> or <command>EXPLAIN EXEC</command>.
        It is possible to determine the number of partitions which were
        removed during this phase by observing the
        <quote>Subplans Removed</quote> property in the
@@ -4518,7 +4518,7 @@ EXPLAIN SELECT count(*) FROM measurement WHERE logdate &gt;= DATE '2008-01-01';
        execution parameters being used by partition pruning changes.
        Determining if partitions were pruned during this phase requires
        careful inspection of the <literal>loops</literal> property in
-       the <command>EXPLAIN ANALYZE</command> output.  Subplans
+       the <command>EXPLAIN EXEC</command> output.  Subplans
        corresponding to different partitions may have different values
        for it depending on how many times each of them was pruned during
        execution.  Some may be shown as <literal>(never executed)</literal>
diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml
index 27b94fb611..0f1e80b3b9 100644
--- a/doc/src/sgml/fdwhandler.sgml
+++ b/doc/src/sgml/fdwhandler.sgml
@@ -937,7 +937,7 @@ IterateDirectModify(ForeignScanState *node);
      Whether the query has the clause or not, the query's reported row count
      must be incremented by the FDW itself.  When the query doesn't have the
      clause, the FDW must also increment the row count for the
-     <structname>ForeignScanState</structname> node in the <command>EXPLAIN ANALYZE</command>
+     <structname>ForeignScanState</structname> node in the <command>EXPLAIN EXEC</command>
      case.
     </para>
 
@@ -1123,7 +1123,7 @@ ExplainForeignScan(ForeignScanState *node,
      The flag fields in <literal>es</literal> can be used to determine what to
      print, and the state of the <structname>ForeignScanState</structname> node
      can be inspected to provide run-time statistics in the <command>EXPLAIN
-     ANALYZE</command> case.
+     EXEC</command> case.
     </para>
 
     <para>
@@ -1148,7 +1148,7 @@ ExplainForeignModify(ModifyTableState *mtstate,
      The flag fields in <literal>es</literal> can be used to determine what to
      print, and the state of the <structname>ModifyTableState</structname> node
      can be inspected to provide run-time statistics in the <command>EXPLAIN
-     ANALYZE</command> case.  The first four arguments are the same as for
+     EXEC</command> case.  The first four arguments are the same as for
      <function>BeginForeignModify</function>.
     </para>
 
@@ -1172,7 +1172,7 @@ ExplainDirectModify(ForeignScanState *node,
      The flag fields in <literal>es</literal> can be used to determine what to
      print, and the state of the <structname>ForeignScanState</structname> node
      can be inspected to provide run-time statistics in the <command>EXPLAIN
-     ANALYZE</command> case.
+     EXEC</command> case.
     </para>
 
     <para>
diff --git a/doc/src/sgml/indices.sgml b/doc/src/sgml/indices.sgml
index 95c0a1926c..decaaa6209 100644
--- a/doc/src/sgml/indices.sgml
+++ b/doc/src/sgml/indices.sgml
@@ -1498,7 +1498,7 @@ CREATE INDEX test1c_content_y_index ON test1c (content COLLATE "y");
      possibilities: Either the system is right and using the index is
      indeed not appropriate, or the cost estimates of the query plans
      are not reflecting reality.  So you should time your query with
-     and without indexes.  The <command>EXPLAIN ANALYZE</command>
+     and without indexes.  The <command>EXPLAIN EXEC</command>
      command can be useful here.
     </para>
    </listitem>
diff --git a/doc/src/sgml/jit.sgml b/doc/src/sgml/jit.sgml
index a21a07ef71..cb288d8b73 100644
--- a/doc/src/sgml/jit.sgml
+++ b/doc/src/sgml/jit.sgml
@@ -145,7 +145,7 @@
    <acronym>JIT</acronym> is used or not.  As an example, here is a query that
    is not using <acronym>JIT</acronym>:
 <screen>
-=# EXPLAIN ANALYZE SELECT SUM(relpages) FROM pg_class;
+=# EXPLAIN EXEC SELECT SUM(relpages) FROM pg_class;
                                                  QUERY PLAN
 -------------------------------------------------------------------------------------------------------------
  Aggregate  (cost=16.27..16.29 rows=1 width=8) (actual time=0.303..0.303 rows=1 loops=1)
@@ -161,7 +161,7 @@
 <screen>
 =# SET jit_above_cost = 10;
 SET
-=# EXPLAIN ANALYZE SELECT SUM(relpages) FROM pg_class;
+=# EXPLAIN EXEC SELECT SUM(relpages) FROM pg_class;
                                                  QUERY PLAN
 -------------------------------------------------------------------------------------------------------------
  Aggregate  (cost=16.27..16.29 rows=1 width=8) (actual time=6.049..6.049 rows=1 loops=1)
diff --git a/doc/src/sgml/parallel.sgml b/doc/src/sgml/parallel.sgml
index af5d48a5c7..b3e2285f05 100644
--- a/doc/src/sgml/parallel.sgml
+++ b/doc/src/sgml/parallel.sgml
@@ -442,7 +442,7 @@ EXPLAIN SELECT * FROM pgbench_accounts WHERE filler LIKE '%x%';
   </para>
 
   <para>
-    When executing a parallel plan, you can use <literal>EXPLAIN (ANALYZE,
+    When executing a parallel plan, you can use <literal>EXPLAIN (EXEC,
     VERBOSE)</literal> to display per-worker statistics for each plan node.
     This may be useful in determining whether the work is being evenly
     distributed between all plan nodes and more generally in understanding the
diff --git a/doc/src/sgml/perform.sgml b/doc/src/sgml/perform.sgml
index a84be85159..3a890c5a71 100644
--- a/doc/src/sgml/perform.sgml
+++ b/doc/src/sgml/perform.sgml
@@ -533,25 +533,25 @@ WHERE t1.unique1 &lt; 100 AND t1.unique2 = t2.unique2;
     which shows that the planner thinks that sorting <literal>onek</literal> by
     index-scanning is about 12% more expensive than sequential-scan-and-sort.
     Of course, the next question is whether it's right about that.
-    We can investigate that using <command>EXPLAIN ANALYZE</command>, as discussed
+    We can investigate that using <command>EXPLAIN EXEC</command>, as discussed
     below.
    </para>
 
   </sect2>
 
-  <sect2 id="using-explain-analyze">
-   <title><command>EXPLAIN ANALYZE</command></title>
+  <sect2 id="using-explain-exec">
+   <title><command>EXPLAIN EXEC</command></title>
 
    <para>
     It is possible to check the accuracy of the planner's estimates
-    by using <command>EXPLAIN</command>'s <literal>ANALYZE</literal> option.  With this
+    by using <command>EXPLAIN</command>'s <literal>EXEC</literal> option.  With this
     option, <command>EXPLAIN</command> actually executes the query, and then displays
     the true row counts and true run time accumulated within each plan node,
     along with the same estimates that a plain <command>EXPLAIN</command>
     shows.  For example, we might get a result like this:
 
 <screen>
-EXPLAIN ANALYZE SELECT *
+EXPLAIN EXEC SELECT *
 FROM tenk1 t1, tenk2 t2
 WHERE t1.unique1 &lt; 10 AND t1.unique2 = t2.unique2;
 
@@ -590,12 +590,12 @@ WHERE t1.unique1 &lt; 10 AND t1.unique2 = t2.unique2;
    </para>
 
    <para>
-    In some cases <command>EXPLAIN ANALYZE</command> shows additional execution
+    In some cases <command>EXPLAIN EXEC</command> shows additional execution
     statistics beyond the plan node execution times and row counts.
     For example, Sort and Hash nodes provide extra information:
 
 <screen>
-EXPLAIN ANALYZE SELECT *
+EXPLAIN EXEC SELECT *
 FROM tenk1 t1, tenk2 t2
 WHERE t1.unique1 &lt; 100 AND t1.unique2 = t2.unique2 ORDER BY t1.fivethous;
 
@@ -630,7 +630,7 @@ WHERE t1.unique1 &lt; 100 AND t1.unique2 = t2.unique2 ORDER BY t1.fivethous;
     filter condition:
 
 <screen>
-EXPLAIN ANALYZE SELECT * FROM tenk1 WHERE ten &lt; 7;
+EXPLAIN EXEC SELECT * FROM tenk1 WHERE ten &lt; 7;
 
                                                QUERY PLAN
 ---------------------------------------------------------------------------------------------------------
@@ -653,7 +653,7 @@ EXPLAIN ANALYZE SELECT * FROM tenk1 WHERE ten &lt; 7;
     specific point:
 
 <screen>
-EXPLAIN ANALYZE SELECT * FROM polygon_tbl WHERE f1 @&gt; polygon '(0.5,2.0)';
+EXPLAIN EXEC SELECT * FROM polygon_tbl WHERE f1 @&gt; polygon '(0.5,2.0)';
 
                                               QUERY PLAN
 ------------------------------------------------------------------------------------------------------
@@ -672,7 +672,7 @@ EXPLAIN ANALYZE SELECT * FROM polygon_tbl WHERE f1 @&gt; polygon '(0.5,2.0)';
 <screen>
 SET enable_seqscan TO off;
 
-EXPLAIN ANALYZE SELECT * FROM polygon_tbl WHERE f1 @&gt; polygon '(0.5,2.0)';
+EXPLAIN EXEC SELECT * FROM polygon_tbl WHERE f1 @&gt; polygon '(0.5,2.0)';
 
                                                         QUERY PLAN
 --------------------------------------------------------------------------------------------------------------------------
@@ -695,7 +695,7 @@ EXPLAIN ANALYZE SELECT * FROM polygon_tbl WHERE f1 @&gt; polygon '(0.5,2.0)';
     <literal>ANALYZE</literal> to get even more run time statistics:
 
 <screen>
-EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM tenk1 WHERE unique1 &lt; 100 AND unique2 &gt; 9000;
+EXPLAIN (EXEC, BUFFERS) SELECT * FROM tenk1 WHERE unique1 &lt; 100 AND unique2 &gt; 9000;
 
                                                            QUERY PLAN
 ---------------------------------------------------------------------------------------------------------------------------------
@@ -719,7 +719,7 @@ EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM tenk1 WHERE unique1 &lt; 100 AND unique
    </para>
 
    <para>
-    Keep in mind that because <command>EXPLAIN ANALYZE</command> actually
+    Keep in mind that because <command>EXPLAIN EXEC</command> actually
     runs the query, any side-effects will happen as usual, even though
     whatever results the query might output are discarded in favor of
     printing the <command>EXPLAIN</command> data.  If you want to analyze a
@@ -729,7 +729,7 @@ EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM tenk1 WHERE unique1 &lt; 100 AND unique
 <screen>
 BEGIN;
 
-EXPLAIN ANALYZE UPDATE tenk1 SET hundred = hundred + 1 WHERE unique1 &lt; 100;
+EXPLAIN EXEC UPDATE tenk1 SET hundred = hundred + 1 WHERE unique1 &lt; 100;
 
                                                            QUERY PLAN
 --------------------------------------------------------------------------------------------------------------------------------
@@ -795,13 +795,13 @@ EXPLAIN UPDATE parent SET f2 = f2 + 1 WHERE f1 = 101;
 
    <para>
     The <literal>Planning time</literal> shown by <command>EXPLAIN
-    ANALYZE</command> is the time it took to generate the query plan from the
+    EXEC</command> is the time it took to generate the query plan from the
     parsed query and optimize it. It does not include parsing or rewriting.
    </para>
 
    <para>
     The <literal>Execution time</literal> shown by <command>EXPLAIN
-    ANALYZE</command> includes executor start-up and shut-down time, as well
+    EXEC</command> includes executor start-up and shut-down time, as well
     as the time to run any triggers that are fired, but it does not include
     parsing, rewriting, or planning time.
     Time spent executing <literal>BEFORE</literal> triggers, if any, is included in
@@ -812,7 +812,7 @@ EXPLAIN UPDATE parent SET f2 = f2 + 1 WHERE f1 = 101;
     (either <literal>BEFORE</literal> or <literal>AFTER</literal>) is also shown separately.
     Note that deferred constraint triggers will not be executed
     until end of transaction and are thus not considered at all by
-    <command>EXPLAIN ANALYZE</command>.
+    <command>EXPLAIN EXEC</command>.
    </para>
 
   </sect2>
@@ -822,11 +822,11 @@ EXPLAIN UPDATE parent SET f2 = f2 + 1 WHERE f1 = 101;
 
    <para>
     There are two significant ways in which run times measured by
-    <command>EXPLAIN ANALYZE</command> can deviate from normal execution of
+    <command>EXPLAIN EXEC</command> can deviate from normal execution of
     the same query.  First, since no output rows are delivered to the client,
     network transmission costs and I/O conversion costs are not included.
     Second, the measurement overhead added by <command>EXPLAIN
-    ANALYZE</command> can be significant, especially on machines with slow
+    EXEC</command> can be significant, especially on machines with slow
     <function>gettimeofday()</function> operating-system calls. You can use the
     <xref linkend="pgtesttiming"/> tool to measure the overhead of timing
     on your system.
@@ -853,7 +853,7 @@ EXPLAIN UPDATE parent SET f2 = f2 + 1 WHERE f1 = 101;
     effect.  For example, in the <literal>LIMIT</literal> query we used before,
 
 <screen>
-EXPLAIN ANALYZE SELECT * FROM tenk1 WHERE unique1 &lt; 100 AND unique2 &gt; 9000 LIMIT 2;
+EXPLAIN EXEC SELECT * FROM tenk1 WHERE unique1 &lt; 100 AND unique2 &gt; 9000 LIMIT 2;
 
                                                           QUERY PLAN
 -------------------------------------------------------------------------------------------------------------------------------
@@ -883,7 +883,7 @@ EXPLAIN ANALYZE SELECT * FROM tenk1 WHERE unique1 &lt; 100 AND unique2 &gt; 9000
     of one child, with results like those mentioned for <literal>LIMIT</literal>.
     Also, if the outer (first) child contains rows with duplicate key values,
     the inner (second) child is backed up and rescanned for the portion of its
-    rows matching that key value.  <command>EXPLAIN ANALYZE</command> counts these
+    rows matching that key value.  <command>EXPLAIN EXEC</command> counts these
     repeated emissions of the same inner rows as if they were real additional
     rows.  When there are many outer duplicates, the reported actual row count
     for the inner child plan node can be significantly larger than the number
diff --git a/doc/src/sgml/planstats.sgml b/doc/src/sgml/planstats.sgml
index 4b1d3f4952..3a4405c850 100644
--- a/doc/src/sgml/planstats.sgml
+++ b/doc/src/sgml/planstats.sgml
@@ -489,7 +489,7 @@ SELECT relpages, reltuples FROM pg_class WHERE relname = 't';
     condition on the <structfield>a</structfield> column:
 
 <programlisting>
-EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1;
+EXPLAIN (EXEC, TIMING OFF) SELECT * FROM t WHERE a = 1;
                                  QUERY PLAN                                  
 -------------------------------------------------------------------------------
  Seq Scan on t  (cost=0.00..170.00 rows=100 width=8) (actual rows=100 loops=1)
@@ -506,7 +506,7 @@ EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1;
     condition on both columns, combining them with <literal>AND</literal>:
 
 <programlisting>
-EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1;
+EXPLAIN (EXEC, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1;
                                  QUERY PLAN                                  
 -----------------------------------------------------------------------------
  Seq Scan on t  (cost=0.00..195.00 rows=1 width=8) (actual rows=100 loops=1)
@@ -530,7 +530,7 @@ EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1;
 <programlisting>
 CREATE STATISTICS stts (dependencies) ON a, b FROM t;
 ANALYZE t;
-EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1;
+EXPLAIN (EXEC, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1;
                                   QUERY PLAN                                   
 -------------------------------------------------------------------------------
  Seq Scan on t  (cost=0.00..195.00 rows=100 width=8) (actual rows=100 loops=1)
@@ -551,7 +551,7 @@ EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1;
     estimated number of rows returned by the HashAggregate node) is very
     accurate:
 <programlisting>
-EXPLAIN (ANALYZE, TIMING OFF) SELECT COUNT(*) FROM t GROUP BY a;
+EXPLAIN (EXEC, TIMING OFF) SELECT COUNT(*) FROM t GROUP BY a;
                                        QUERY PLAN                                        
 -----------------------------------------------------------------------------------------
  HashAggregate  (cost=195.00..196.00 rows=100 width=12) (actual rows=100 loops=1)
@@ -562,7 +562,7 @@ EXPLAIN (ANALYZE, TIMING OFF) SELECT COUNT(*) FROM t GROUP BY a;
     groups in a query with two columns in <command>GROUP BY</command>, as
     in the following example, is off by an order of magnitude:
 <programlisting>
-EXPLAIN (ANALYZE, TIMING OFF) SELECT COUNT(*) FROM t GROUP BY a, b;
+EXPLAIN (EXEC, TIMING OFF) SELECT COUNT(*) FROM t GROUP BY a, b;
                                        QUERY PLAN                                        
 --------------------------------------------------------------------------------------------
  HashAggregate  (cost=220.00..230.00 rows=1000 width=16) (actual rows=100 loops=1)
@@ -575,7 +575,7 @@ EXPLAIN (ANALYZE, TIMING OFF) SELECT COUNT(*) FROM t GROUP BY a, b;
 DROP STATISTICS stts;
 CREATE STATISTICS stts (dependencies, ndistinct) ON a, b FROM t;
 ANALYZE t;
-EXPLAIN (ANALYZE, TIMING OFF) SELECT COUNT(*) FROM t GROUP BY a, b;
+EXPLAIN (EXEC, TIMING OFF) SELECT COUNT(*) FROM t GROUP BY a, b;
                                        QUERY PLAN                                        
 --------------------------------------------------------------------------------------------
  HashAggregate  (cost=220.00..221.00 rows=100 width=16) (actual rows=100 loops=1)
@@ -615,7 +615,7 @@ EXPLAIN (ANALYZE, TIMING OFF) SELECT COUNT(*) FROM t GROUP BY a, b;
 DROP STATISTICS stts;
 CREATE STATISTICS stts2 (mcv) ON a, b FROM t;
 ANALYZE t;
-EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1;
+EXPLAIN (EXEC, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 1;
                                    QUERY PLAN
 -------------------------------------------------------------------------------
  Seq Scan on t  (cost=0.00..195.00 rows=100 width=8) (actual rows=100 loops=1)
@@ -672,7 +672,7 @@ SELECT m.* FROM pg_statistic_ext,
     to decide which combinations are compatible.
 
 <programlisting>
-EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 10;
+EXPLAIN (EXEC, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 10;
                                  QUERY PLAN
 ---------------------------------------------------------------------------
  Seq Scan on t  (cost=0.00..195.00 rows=1 width=8) (actual rows=0 loops=1)
@@ -685,7 +685,7 @@ EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a = 1 AND b = 10;
     example range query, presented earlier:
 
 <programlisting>
-EXPLAIN (ANALYZE, TIMING OFF) SELECT * FROM t WHERE a &lt;= 49 AND b &gt; 49;
+EXPLAIN (EXEC, TIMING OFF) SELECT * FROM t WHERE a &lt;= 49 AND b &gt; 49;
                                 QUERY PLAN
 ---------------------------------------------------------------------------
  Seq Scan on t  (cost=0.00..195.00 rows=1 width=8) (actual rows=0 loops=1)
diff --git a/doc/src/sgml/ref/create_statistics.sgml b/doc/src/sgml/ref/create_statistics.sgml
index ae1d8024a4..18ca19fa8f 100644
--- a/doc/src/sgml/ref/create_statistics.sgml
+++ b/doc/src/sgml/ref/create_statistics.sgml
@@ -147,14 +147,14 @@ INSERT INTO t1 SELECT i/100, i/500
 ANALYZE t1;
 
 -- the number of matching rows will be drastically underestimated:
-EXPLAIN ANALYZE SELECT * FROM t1 WHERE (a = 1) AND (b = 0);
+EXPLAIN EXEC SELECT * FROM t1 WHERE (a = 1) AND (b = 0);
 
 CREATE STATISTICS s1 (dependencies) ON a, b FROM t1;
 
 ANALYZE t1;
 
 -- now the row count estimate is more accurate:
-EXPLAIN ANALYZE SELECT * FROM t1 WHERE (a = 1) AND (b = 0);
+EXPLAIN EXEC SELECT * FROM t1 WHERE (a = 1) AND (b = 0);
 </programlisting>
 
    Without functional-dependency statistics, the planner would assume
@@ -183,10 +183,10 @@ CREATE STATISTICS s2 (mcv) ON (a, b) FROM t2;
 ANALYZE t2;
 
 -- valid combination (found in MCV)
-EXPLAIN ANALYZE SELECT * FROM t2 WHERE (a = 1) AND (b = 1);
+EXPLAIN EXEC SELECT * FROM t2 WHERE (a = 1) AND (b = 1);
 
 -- invalid combination (not found in MCV)
-EXPLAIN ANALYZE SELECT * FROM t2 WHERE (a = 1) AND (b = 2);
+EXPLAIN EXEC SELECT * FROM t2 WHERE (a = 1) AND (b = 2);
 </programlisting>
 
    The MCV list gives the planner more detailed information about the
diff --git a/doc/src/sgml/ref/pgtesttiming.sgml b/doc/src/sgml/ref/pgtesttiming.sgml
index 545a934cf8..4470435dd2 100644
--- a/doc/src/sgml/ref/pgtesttiming.sgml
+++ b/doc/src/sgml/ref/pgtesttiming.sgml
@@ -30,7 +30,7 @@
   <application>pg_test_timing</application> is a tool to measure the timing overhead
   on your system and confirm that the system time never moves backwards.
   Systems that are slow to collect timing data can give less accurate
-  <command>EXPLAIN ANALYZE</command> results.
+  <command>EXPLAIN EXEC</command> results.
  </para>
  </refsect1>
 
@@ -120,7 +120,7 @@ Histogram of timing durations:
 
   <para>
    When the query executor is running a statement using
-   <command>EXPLAIN ANALYZE</command>, individual operations are timed as well
+   <command>EXPLAIN EXEC</command>, individual operations are timed as well
    as showing a summary.  The overhead of your system can be checked by
    counting rows with the <application>psql</application> program:
 
@@ -128,13 +128,13 @@ Histogram of timing durations:
 CREATE TABLE t AS SELECT * FROM generate_series(1,100000);
 \timing
 SELECT COUNT(*) FROM t;
-EXPLAIN ANALYZE SELECT COUNT(*) FROM t;
+EXPLAIN EXEC SELECT COUNT(*) FROM t;
 </screen>
   </para>
 
   <para>
    The i7-860 system measured runs the count query in 9.8 ms while
-   the <command>EXPLAIN ANALYZE</command> version takes 16.6 ms, each
+   the <command>EXPLAIN EXEC</command> version takes 16.6 ms, each
    processing just over 100,000 rows.  That 6.8 ms difference means the timing
    overhead per row is 68 ns, about twice what pg_test_timing estimated it
    would be.  Even that relatively small amount of overhead is making the fully
@@ -169,12 +169,12 @@ Histogram of timing durations:
   </para>
 
   <para>
-   In this configuration, the sample <command>EXPLAIN ANALYZE</command> above
+   In this configuration, the sample <command>EXPLAIN EXEC</command> above
    takes 115.9 ms.  That's 1061 ns of timing overhead, again a small multiple
    of what's measured directly by this utility.  That much timing overhead
    means the actual query itself is only taking a tiny fraction of the
    accounted for time, most of it is being consumed in overhead instead.  In
-   this configuration, any <command>EXPLAIN ANALYZE</command> totals involving
+   this configuration, any <command>EXPLAIN EXEC</command> totals involving
    many timed operations would be inflated significantly by timing overhead.
   </para>
 
diff --git a/doc/src/sgml/ref/set_transaction.sgml b/doc/src/sgml/ref/set_transaction.sgml
index 43b1c6c892..cef790ecd7 100644
--- a/doc/src/sgml/ref/set_transaction.sgml
+++ b/doc/src/sgml/ref/set_transaction.sgml
@@ -133,7 +133,7 @@ SET SESSION CHARACTERISTICS AS TRANSACTION <replaceable class="parameter">transa
    <literal>CREATE</literal>, <literal>ALTER</literal>, and
    <literal>DROP</literal> commands; <literal>COMMENT</literal>,
    <literal>GRANT</literal>, <literal>REVOKE</literal>,
-   <literal>TRUNCATE</literal>; and <literal>EXPLAIN ANALYZE</literal>
+   <literal>TRUNCATE</literal>; and <literal>EXPLAIN EXEC</literal>
    and <literal>EXECUTE</literal> if the command they would execute is
    among those listed.  This is a high-level notion of read-only that
    does not prevent all writes to disk.
diff --git a/doc/src/sgml/rules.sgml b/doc/src/sgml/rules.sgml
index 4e20664ea1..499e6fcced 100644
--- a/doc/src/sgml/rules.sgml
+++ b/doc/src/sgml/rules.sgml
@@ -1030,7 +1030,7 @@ SELECT count(*) FROM words WHERE word = 'caterpiler';
 (1 row)
 </programlisting>
 
-    With <command>EXPLAIN ANALYZE</command>, we see:
+    With <command>EXPLAIN EXEC</command>, we see:
 
 <programlisting>
  Aggregate  (cost=21763.99..21764.00 rows=1 width=0) (actual time=188.180..188.181 rows=1 loops=1)
diff --git a/src/test/regress/expected/join_hash.out b/src/test/regress/expected/join_hash.out
index 9eee39bdd3..89000f9156 100644
--- a/src/test/regress/expected/join_hash.out
+++ b/src/test/regress/expected/join_hash.out
@@ -4,7 +4,7 @@
 begin;
 set local min_parallel_table_scan_size = 0;
 set local parallel_setup_cost = 0;
--- Extract bucket and batch counts from an explain analyze plan.  In
+-- Extract bucket and batch counts from an explain exec plan.  In
 -- general we can't make assertions about how many batches (or
 -- buckets) will be required because it can vary, but we can in some
 -- special cases and we can check for growth.
@@ -39,7 +39,7 @@ declare
   hash_node json;
 begin
   for whole_plan in
-    execute 'explain (analyze, format ''json'') ' || query
+    execute 'explain (exec, format ''json'') ' || query
   loop
     hash_node := find_hash(json_extract_path(whole_plan, '0', 'Plan'));
     original := hash_node->>'Original Hash Batches';
@@ -518,7 +518,7 @@ $$);
 
 rollback to settings;
 -- A couple of other hash join tests unrelated to work_mem management.
--- Check that EXPLAIN ANALYZE has data even if the leader doesn't participate
+-- Check that EXPLAIN EXEC has data even if the leader doesn't participate
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
 set local work_mem = '4MB';
diff --git a/src/test/regress/expected/partition_prune.out b/src/test/regress/expected/partition_prune.out
index bd64bed8fc..ad5eeea7b4 100644
--- a/src/test/regress/expected/partition_prune.out
+++ b/src/test/regress/expected/partition_prune.out
@@ -1623,7 +1623,7 @@ create table ab_a3_b1 partition of ab_a3 for values in (1);
 create table ab_a3_b2 partition of ab_a3 for values in (2);
 create table ab_a3_b3 partition of ab_a3 for values in (3);
 -- Disallow index only scans as concurrent transactions may stop visibility
--- bits being set causing "Heap Fetches" to be unstable in the EXPLAIN ANALYZE
+-- bits being set causing "Heap Fetches" to be unstable in the EXPLAIN EXEC
 -- output.
 set enable_indexonlyscan = off;
 prepare ab_q1 (int, int, int) as
@@ -1655,7 +1655,7 @@ execute ab_q1 (1, 8, 3);
 ---+---
 (0 rows)
 
-explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2, 3);
+explain (exec, costs off, summary off, timing off) execute ab_q1 (2, 2, 3);
                        QUERY PLAN                        
 ---------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -1668,7 +1668,7 @@ explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2, 3);
          Filter: ((a >= $1) AND (a <= $2) AND (b <= $3))
 (8 rows)
 
-explain (analyze, costs off, summary off, timing off) execute ab_q1 (1, 2, 3);
+explain (exec, costs off, summary off, timing off) execute ab_q1 (1, 2, 3);
                        QUERY PLAN                        
 ---------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -1718,7 +1718,7 @@ execute ab_q1 (1, 8);
 ---
 (0 rows)
 
-explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2);
+explain (exec, costs off, summary off, timing off) execute ab_q1 (2, 2);
                       QUERY PLAN                       
 -------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -1729,7 +1729,7 @@ explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2);
          Filter: ((a >= $1) AND (a <= $2) AND (b < 3))
 (6 rows)
 
-explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 4);
+explain (exec, costs off, summary off, timing off) execute ab_q1 (2, 4);
                       QUERY PLAN                       
 -------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -1773,7 +1773,7 @@ execute ab_q2 (1, 8);
 ---
 (0 rows)
 
-explain (analyze, costs off, summary off, timing off) execute ab_q2 (2, 2);
+explain (exec, costs off, summary off, timing off) execute ab_q2 (2, 2);
                        QUERY PLAN                       
 --------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -1816,7 +1816,7 @@ execute ab_q3 (1, 8);
 ---
 (0 rows)
 
-explain (analyze, costs off, summary off, timing off) execute ab_q3 (2, 2);
+explain (exec, costs off, summary off, timing off) execute ab_q3 (2, 2);
                        QUERY PLAN                       
 --------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -1857,7 +1857,7 @@ begin;
 -- Test run-time pruning using stable functions
 create function list_part_fn(int) returns int as $$ begin return $1; end;$$ language plpgsql stable;
 -- Ensure pruning works using a stable function containing no Vars
-explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1);
+explain (exec, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1);
                       QUERY PLAN                      
 ------------------------------------------------------
  Append (actual rows=1 loops=1)
@@ -1867,7 +1867,7 @@ explain (analyze, costs off, summary off, timing off) select * from list_part wh
 (4 rows)
 
 -- Ensure pruning does not take place when the function has a Var parameter
-explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(a);
+explain (exec, costs off, summary off, timing off) select * from list_part where a = list_part_fn(a);
                       QUERY PLAN                      
 ------------------------------------------------------
  Append (actual rows=4 loops=1)
@@ -1882,7 +1882,7 @@ explain (analyze, costs off, summary off, timing off) select * from list_part wh
 (9 rows)
 
 -- Ensure pruning does not take place when the expression contains a Var.
-explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1) + a;
+explain (exec, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1) + a;
                       QUERY PLAN                      
 ------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -1913,7 +1913,7 @@ declare
     ln text;
 begin
     for ln in
-        execute format('explain (analyze, costs off, summary off, timing off) %s',
+        execute format('explain (exec, costs off, summary off, timing off) %s',
             $1)
     loop
         if ln like '%Parallel%' then
@@ -2286,7 +2286,7 @@ reset parallel_tuple_cost;
 reset min_parallel_table_scan_size;
 reset max_parallel_workers_per_gather;
 -- Test run-time partition pruning with an initplan
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from ab where a = (select max(a) from lprt_a) and b = (select max(a)-1 from lprt_a);
                                QUERY PLAN                                
 -------------------------------------------------------------------------
@@ -2345,7 +2345,7 @@ select * from ab where a = (select max(a) from lprt_a) and b = (select max(a)-1
 (52 rows)
 
 -- Test run-time partition pruning with UNION ALL parents
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from (select * from ab where a = 1 union all select * from ab) ab where b = (select 1);
                                   QUERY PLAN                                   
 -------------------------------------------------------------------------------
@@ -2389,7 +2389,7 @@ select * from (select * from ab where a = 1 union all select * from ab) ab where
 (37 rows)
 
 -- A case containing a UNION ALL with a non-partitioned child.
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from (select * from ab where a = 1 union all (values(10,5)) union all select * from ab) ab where b = (select 1);
                                   QUERY PLAN                                   
 -------------------------------------------------------------------------------
@@ -2449,7 +2449,7 @@ union all
 	select tableoid::regclass,a,b from ab
 ) ab where a = $1 and b = (select -10);
 -- Ensure the xy_1 subplan is not pruned.
-explain (analyze, costs off, summary off, timing off) execute ab_q6(1);
+explain (exec, costs off, summary off, timing off) execute ab_q6(1);
                        QUERY PLAN                       
 --------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -2491,7 +2491,7 @@ deallocate ab_q5;
 deallocate ab_q6;
 -- UPDATE on a partition subtree has been seen to have problems.
 insert into ab values (1,2);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 update ab_a1 set b = 3 from ab where ab.a = 1 and ab.a = ab_a1.a;
                                      QUERY PLAN                                      
 -------------------------------------------------------------------------------------
@@ -2571,7 +2571,7 @@ table ab;
 -- Test UPDATE where source relation has run-time pruning enabled
 truncate ab;
 insert into ab values (1, 1), (1, 2), (1, 3), (2, 1);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 update ab_a1 set b = 3 from ab_a2 where ab_a2.b = (select 1);
                               QUERY PLAN                              
 ----------------------------------------------------------------------
@@ -2643,7 +2643,7 @@ create index tprt6_idx on tprt_6 (col1);
 insert into tprt values (10), (20), (501), (502), (505), (1001), (4500);
 set enable_hashjoin = off;
 set enable_mergejoin = off;
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 join tprt on tbl1.col1 > tprt.col1;
                                 QUERY PLAN                                
 --------------------------------------------------------------------------
@@ -2664,7 +2664,7 @@ select * from tbl1 join tprt on tbl1.col1 > tprt.col1;
                Index Cond: (col1 < tbl1.col1)
 (15 rows)
 
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 join tprt on tbl1.col1 = tprt.col1;
                                 QUERY PLAN                                
 --------------------------------------------------------------------------
@@ -2709,7 +2709,7 @@ order by tbl1.col1, tprt.col1;
 
 -- Multiple partitions
 insert into tbl1 values (1001), (1010), (1011);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 inner join tprt on tbl1.col1 > tprt.col1;
                                 QUERY PLAN                                
 --------------------------------------------------------------------------
@@ -2730,7 +2730,7 @@ select * from tbl1 inner join tprt on tbl1.col1 > tprt.col1;
                Index Cond: (col1 < tbl1.col1)
 (15 rows)
 
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 inner join tprt on tbl1.col1 = tprt.col1;
                                 QUERY PLAN                                
 --------------------------------------------------------------------------
@@ -2794,7 +2794,7 @@ order by tbl1.col1, tprt.col1;
 -- Last partition
 delete from tbl1;
 insert into tbl1 values (4400);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 join tprt on tbl1.col1 < tprt.col1;
                                 QUERY PLAN                                
 --------------------------------------------------------------------------
@@ -2826,7 +2826,7 @@ order by tbl1.col1, tprt.col1;
 -- No matching partition
 delete from tbl1;
 insert into tbl1 values (10000);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 join tprt on tbl1.col1 = tprt.col1;
                             QUERY PLAN                             
 -------------------------------------------------------------------
@@ -2893,7 +2893,7 @@ execute part_abc_q1 (1, 2, 3);
 (0 rows)
 
 -- Single partition should be scanned.
-explain (analyze, costs off, summary off, timing off) execute part_abc_q1 (1, 2, 3);
+explain (exec, costs off, summary off, timing off) execute part_abc_q1 (1, 2, 3);
                    QUERY PLAN                    
 -------------------------------------------------
  Seq Scan on part_abc_p1 (actual rows=0 loops=1)
@@ -2943,7 +2943,7 @@ execute q1 (1,2);
 ---+---
 (0 rows)
 
-explain (analyze, costs off, summary off, timing off)  execute q1 (1,1);
+explain (exec, costs off, summary off, timing off)  execute q1 (1,1);
                      QUERY PLAN                      
 -----------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -2952,7 +2952,7 @@ explain (analyze, costs off, summary off, timing off)  execute q1 (1,1);
          Filter: (b = ANY (ARRAY[$1, $2]))
 (4 rows)
 
-explain (analyze, costs off, summary off, timing off)  execute q1 (2,2);
+explain (exec, costs off, summary off, timing off)  execute q1 (2,2);
                      QUERY PLAN                      
 -----------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -2963,7 +2963,7 @@ explain (analyze, costs off, summary off, timing off)  execute q1 (2,2);
 
 -- Try with no matching partitions. One subplan should remain in this case,
 -- but it shouldn't be executed.
-explain (analyze, costs off, summary off, timing off)  execute q1 (0,0);
+explain (exec, costs off, summary off, timing off)  execute q1 (0,0);
                   QUERY PLAN                  
 ----------------------------------------------
  Append (actual rows=0 loops=1)
@@ -3001,7 +3001,7 @@ execute q1 (1,2,3,4);
 (0 rows)
 
 -- Both partitions allowed by IN clause, but one disallowed by <> clause
-explain (analyze, costs off, summary off, timing off)  execute q1 (1,2,2,0);
+explain (exec, costs off, summary off, timing off)  execute q1 (1,2,2,0);
                                QUERY PLAN                                
 -------------------------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -3012,7 +3012,7 @@ explain (analyze, costs off, summary off, timing off)  execute q1 (1,2,2,0);
 
 -- Both partitions allowed by IN clause, then both excluded again by <> clauses.
 -- One subplan will remain in this case, but it should not be executed.
-explain (analyze, costs off, summary off, timing off)  execute q1 (1,2,2,1);
+explain (exec, costs off, summary off, timing off)  execute q1 (1,2,2,1);
                                QUERY PLAN                                
 -------------------------------------------------------------------------
  Append (actual rows=0 loops=1)
@@ -3022,7 +3022,7 @@ explain (analyze, costs off, summary off, timing off)  execute q1 (1,2,2,1);
 (4 rows)
 
 -- Ensure Params that evaluate to NULL properly prune away all partitions
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from listp where a = (select null::int);
                   QUERY PLAN                  
 ----------------------------------------------
@@ -3042,7 +3042,7 @@ insert into boolvalues values('t'),('f');
 create table boolp (a bool) partition by list (a);
 create table boolp_t partition of boolp for values in('t');
 create table boolp_f partition of boolp for values in('f');
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from boolp where a = (select value from boolvalues where value);
                        QUERY PLAN                       
 --------------------------------------------------------
@@ -3057,7 +3057,7 @@ select * from boolp where a = (select value from boolvalues where value);
          Filter: (a = $0)
 (9 rows)
 
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from boolp where a = (select value from boolvalues where not value);
                        QUERY PLAN                       
 --------------------------------------------------------
@@ -3128,7 +3128,7 @@ execute mt_q1(0);
  25
 (3 rows)
 
-explain (analyze, costs off, summary off, timing off) execute mt_q1(15);
+explain (exec, costs off, summary off, timing off) execute mt_q1(15);
                                   QUERY PLAN                                   
 -------------------------------------------------------------------------------
  Merge Append (actual rows=2 loops=1)
@@ -3149,7 +3149,7 @@ execute mt_q1(15);
  25
 (2 rows)
 
-explain (analyze, costs off, summary off, timing off) execute mt_q1(25);
+explain (exec, costs off, summary off, timing off) execute mt_q1(25);
                                   QUERY PLAN                                   
 -------------------------------------------------------------------------------
  Merge Append (actual rows=1 loops=1)
@@ -3167,7 +3167,7 @@ execute mt_q1(25);
 (1 row)
 
 -- Ensure MergeAppend behaves correctly when no subplans match
-explain (analyze, costs off, summary off, timing off) execute mt_q1(35);
+explain (exec, costs off, summary off, timing off) execute mt_q1(35);
                                QUERY PLAN                               
 ------------------------------------------------------------------------
  Merge Append (actual rows=0 loops=1)
@@ -3184,7 +3184,7 @@ execute mt_q1(35);
 
 deallocate mt_q1;
 -- ensure initplan params properly prune partitions
-explain (analyze, costs off, summary off, timing off) select * from ma_test where a >= (select min(b) from ma_test_p2) order by b;
+explain (exec, costs off, summary off, timing off) select * from ma_test where a >= (select min(b) from ma_test_p2) order by b;
                                                  QUERY PLAN                                                 
 ------------------------------------------------------------------------------------------------------------
  Merge Append (actual rows=20 loops=1)
@@ -3629,7 +3629,7 @@ create table listp (a int, b int) partition by list (a);
 create table listp1 partition of listp for values in(1);
 create table listp2 partition of listp for values in(2) partition by list(b);
 create table listp2_10 partition of listp2 for values in (10);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from listp where a = (select 2) and b <> 10;
                  QUERY PLAN                 
 --------------------------------------------
diff --git a/src/test/regress/expected/select.out b/src/test/regress/expected/select.out
index c441049f41..16ac7bdf43 100644
--- a/src/test/regress/expected/select.out
+++ b/src/test/regress/expected/select.out
@@ -753,7 +753,7 @@ select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
 (1 row)
 
 -- actually run the query with an analyze to use the partial index
-explain (costs off, analyze on, timing off, summary off)
+explain (costs off, exec on, timing off, summary off)
 select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
                            QUERY PLAN                            
 -----------------------------------------------------------------
diff --git a/src/test/regress/expected/select_into.out b/src/test/regress/expected/select_into.out
index f373fae679..5c0c1d7b88 100644
--- a/src/test/regress/expected/select_into.out
+++ b/src/test/regress/expected/select_into.out
@@ -112,12 +112,12 @@ SELECT * FROM created_table;
  4567890123456789 | -4567890123456789
 (5 rows)
 
--- Try EXPLAIN ANALYZE SELECT INTO and EXPLAIN ANALYZE CREATE TABLE AS
+-- Try EXPLAIN EXEC SELECT INTO and EXPLAIN EXEC CREATE TABLE AS
 -- WITH NO DATA, but hide the outputs since they won't be stable.
 DO $$
 BEGIN
-	EXECUTE 'EXPLAIN ANALYZE SELECT * INTO TABLE easi FROM int8_tbl';
-	EXECUTE 'EXPLAIN ANALYZE CREATE TABLE easi2 AS SELECT * FROM int8_tbl WITH NO DATA';
+	EXECUTE 'EXPLAIN EXEC SELECT * INTO TABLE easi FROM int8_tbl';
+	EXECUTE 'EXPLAIN EXEC CREATE TABLE easi2 AS SELECT * FROM int8_tbl WITH NO DATA';
 END$$;
 DROP TABLE created_table;
 DROP TABLE easi, easi2;
diff --git a/src/test/regress/expected/select_parallel.out b/src/test/regress/expected/select_parallel.out
index 0eca76cb41..c4c8f476ff 100644
--- a/src/test/regress/expected/select_parallel.out
+++ b/src/test/regress/expected/select_parallel.out
@@ -505,7 +505,7 @@ select count(*) from bmscantest where a>1;
 -- test accumulation of stats for parallel nodes
 reset enable_seqscan;
 alter table tenk2 set (parallel_workers = 0);
-explain (analyze, timing off, summary off, costs off)
+explain (exec, timing off, summary off, costs off)
    select count(*) from tenk1, tenk2 where tenk1.hundred > 1
         and tenk2.thousand=0;
                                 QUERY PLAN                                
@@ -531,7 +531,7 @@ $$
 declare ln text;
 begin
     for ln in
-        explain (analyze, timing off, summary off, costs off)
+        explain (exec, timing off, summary off, costs off)
           select * from
           (select ten from tenk1 where ten < 100 order by ten) ss
           right join (values (1),(2),(3)) v(x) on true
@@ -1006,7 +1006,7 @@ explain (costs off)
 -- to increase the parallel query test coverage
 SAVEPOINT settings;
 SET LOCAL force_parallel_mode = 1;
-EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1;
+EXPLAIN (exec, timing off, summary off, costs off) SELECT * FROM tenk1;
                          QUERY PLAN                          
 -------------------------------------------------------------
  Gather (actual rows=10000 loops=1)
diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out
index 6dfca7a606..c30447fac2 100644
--- a/src/test/regress/expected/stats_ext.out
+++ b/src/test/regress/expected/stats_ext.out
@@ -12,7 +12,7 @@ declare
     first_row bool := true;
 begin
     for ln in
-        execute format('explain analyze %s', $1)
+        execute format('explain exec %s', $1)
     loop
         if first_row then
             first_row := false;
diff --git a/src/test/regress/expected/subselect.out b/src/test/regress/expected/subselect.out
index 2d1963d12a..c1e8bec7a0 100644
--- a/src/test/regress/expected/subselect.out
+++ b/src/test/regress/expected/subselect.out
@@ -1120,8 +1120,8 @@ drop function tattle(x int, y int);
 --
 -- Test that LIMIT can be pushed to SORT through a subquery that just projects
 -- columns.  We check for that having happened by looking to see if EXPLAIN
--- ANALYZE shows that a top-N sort was used.  We must suppress or filter away
--- all the non-invariant parts of the EXPLAIN ANALYZE output.
+-- EXEC shows that a top-N sort was used.  We must suppress or filter away
+-- all the non-invariant parts of the EXPLAIN EXEC output.
 --
 create table sq_limit (pk int primary key, c1 int, c2 int);
 insert into sq_limit values
@@ -1138,7 +1138,7 @@ $$
 declare ln text;
 begin
     for ln in
-        explain (analyze, summary off, timing off, costs off)
+        explain (exec, summary off, timing off, costs off)
         select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3
     loop
         ln := regexp_replace(ln, 'Memory: \S*',  'Memory: xxx');
diff --git a/src/test/regress/expected/tidscan.out b/src/test/regress/expected/tidscan.out
index 9b5eb04bfd..8e62c9b28b 100644
--- a/src/test/regress/expected/tidscan.out
+++ b/src/test/regress/expected/tidscan.out
@@ -189,7 +189,7 @@ FETCH NEXT FROM c;
 (1 row)
 
 -- perform update
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+EXPLAIN (EXEC, COSTS OFF, SUMMARY OFF, TIMING OFF)
 UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *;
                     QUERY PLAN                     
 ---------------------------------------------------
@@ -205,7 +205,7 @@ FETCH NEXT FROM c;
 (1 row)
 
 -- perform update
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+EXPLAIN (EXEC, COSTS OFF, SUMMARY OFF, TIMING OFF)
 UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *;
                     QUERY PLAN                     
 ---------------------------------------------------
@@ -229,7 +229,7 @@ FETCH NEXT FROM c;
 (0 rows)
 
 -- should error out
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+EXPLAIN (EXEC, COSTS OFF, SUMMARY OFF, TIMING OFF)
 UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *;
 ERROR:  cursor "c" is not positioned on a row
 ROLLBACK;
diff --git a/src/test/regress/sql/join_hash.sql b/src/test/regress/sql/join_hash.sql
index ae352e9b0b..b44d705d57 100644
--- a/src/test/regress/sql/join_hash.sql
+++ b/src/test/regress/sql/join_hash.sql
@@ -7,7 +7,7 @@ begin;
 set local min_parallel_table_scan_size = 0;
 set local parallel_setup_cost = 0;
 
--- Extract bucket and batch counts from an explain analyze plan.  In
+-- Extract bucket and batch counts from an explain exec plan.  In
 -- general we can't make assertions about how many batches (or
 -- buckets) will be required because it can vary, but we can in some
 -- special cases and we can check for growth.
@@ -42,7 +42,7 @@ declare
   hash_node json;
 begin
   for whole_plan in
-    execute 'explain (analyze, format ''json'') ' || query
+    execute 'explain (exec, format ''json'') ' || query
   loop
     hash_node := find_hash(json_extract_path(whole_plan, '0', 'Plan'));
     original := hash_node->>'Original Hash Batches';
@@ -280,7 +280,7 @@ rollback to settings;
 
 -- A couple of other hash join tests unrelated to work_mem management.
 
--- Check that EXPLAIN ANALYZE has data even if the leader doesn't participate
+-- Check that EXPLAIN EXEC has data even if the leader doesn't participate
 savepoint settings;
 set local max_parallel_workers_per_gather = 2;
 set local work_mem = '4MB';
diff --git a/src/test/regress/sql/partition_prune.sql b/src/test/regress/sql/partition_prune.sql
index 246c6274af..684aa3c464 100644
--- a/src/test/regress/sql/partition_prune.sql
+++ b/src/test/regress/sql/partition_prune.sql
@@ -327,7 +327,7 @@ create table ab_a3_b2 partition of ab_a3 for values in (2);
 create table ab_a3_b3 partition of ab_a3 for values in (3);
 
 -- Disallow index only scans as concurrent transactions may stop visibility
--- bits being set causing "Heap Fetches" to be unstable in the EXPLAIN ANALYZE
+-- bits being set causing "Heap Fetches" to be unstable in the EXPLAIN EXEC
 -- output.
 set enable_indexonlyscan = off;
 
@@ -342,8 +342,8 @@ execute ab_q1 (1, 8, 3);
 execute ab_q1 (1, 8, 3);
 execute ab_q1 (1, 8, 3);
 
-explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2, 3);
-explain (analyze, costs off, summary off, timing off) execute ab_q1 (1, 2, 3);
+explain (exec, costs off, summary off, timing off) execute ab_q1 (2, 2, 3);
+explain (exec, costs off, summary off, timing off) execute ab_q1 (1, 2, 3);
 
 deallocate ab_q1;
 
@@ -359,8 +359,8 @@ execute ab_q1 (1, 8);
 execute ab_q1 (1, 8);
 execute ab_q1 (1, 8);
 
-explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 2);
-explain (analyze, costs off, summary off, timing off) execute ab_q1 (2, 4);
+explain (exec, costs off, summary off, timing off) execute ab_q1 (2, 2);
+explain (exec, costs off, summary off, timing off) execute ab_q1 (2, 4);
 
 -- Ensure a mix of PARAM_EXTERN and PARAM_EXEC Params work together at
 -- different levels of partitioning.
@@ -373,7 +373,7 @@ execute ab_q2 (1, 8);
 execute ab_q2 (1, 8);
 execute ab_q2 (1, 8);
 
-explain (analyze, costs off, summary off, timing off) execute ab_q2 (2, 2);
+explain (exec, costs off, summary off, timing off) execute ab_q2 (2, 2);
 
 -- As above, but swap the PARAM_EXEC Param to the first partition level
 prepare ab_q3 (int, int) as
@@ -385,7 +385,7 @@ execute ab_q3 (1, 8);
 execute ab_q3 (1, 8);
 execute ab_q3 (1, 8);
 
-explain (analyze, costs off, summary off, timing off) execute ab_q3 (2, 2);
+explain (exec, costs off, summary off, timing off) execute ab_q3 (2, 2);
 
 -- Test a backwards Append scan
 create table list_part (a int) partition by list (a);
@@ -416,13 +416,13 @@ begin;
 create function list_part_fn(int) returns int as $$ begin return $1; end;$$ language plpgsql stable;
 
 -- Ensure pruning works using a stable function containing no Vars
-explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1);
+explain (exec, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1);
 
 -- Ensure pruning does not take place when the function has a Var parameter
-explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(a);
+explain (exec, costs off, summary off, timing off) select * from list_part where a = list_part_fn(a);
 
 -- Ensure pruning does not take place when the expression contains a Var.
-explain (analyze, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1) + a;
+explain (exec, costs off, summary off, timing off) select * from list_part where a = list_part_fn(1) + a;
 
 rollback;
 
@@ -440,7 +440,7 @@ declare
     ln text;
 begin
     for ln in
-        execute format('explain (analyze, costs off, summary off, timing off) %s',
+        execute format('explain (exec, costs off, summary off, timing off) %s',
             $1)
     loop
         if ln like '%Parallel%' then
@@ -537,15 +537,15 @@ reset min_parallel_table_scan_size;
 reset max_parallel_workers_per_gather;
 
 -- Test run-time partition pruning with an initplan
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from ab where a = (select max(a) from lprt_a) and b = (select max(a)-1 from lprt_a);
 
 -- Test run-time partition pruning with UNION ALL parents
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from (select * from ab where a = 1 union all select * from ab) ab where b = (select 1);
 
 -- A case containing a UNION ALL with a non-partitioned child.
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from (select * from ab where a = 1 union all (values(10,5)) union all select * from ab) ab where b = (select 1);
 
 -- Another UNION ALL test, but containing a mix of exec init and exec run-time pruning.
@@ -566,7 +566,7 @@ union all
 ) ab where a = $1 and b = (select -10);
 
 -- Ensure the xy_1 subplan is not pruned.
-explain (analyze, costs off, summary off, timing off) execute ab_q6(1);
+explain (exec, costs off, summary off, timing off) execute ab_q6(1);
 
 -- Ensure we see just the xy_1 row.
 execute ab_q6(100);
@@ -584,14 +584,14 @@ deallocate ab_q6;
 
 -- UPDATE on a partition subtree has been seen to have problems.
 insert into ab values (1,2);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 update ab_a1 set b = 3 from ab where ab.a = 1 and ab.a = ab_a1.a;
 table ab;
 
 -- Test UPDATE where source relation has run-time pruning enabled
 truncate ab;
 insert into ab values (1, 1), (1, 2), (1, 3), (2, 1);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 update ab_a1 set b = 3 from ab_a2 where ab_a2.b = (select 1);
 select tableoid::regclass, * from ab;
 
@@ -622,10 +622,10 @@ insert into tprt values (10), (20), (501), (502), (505), (1001), (4500);
 set enable_hashjoin = off;
 set enable_mergejoin = off;
 
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 join tprt on tbl1.col1 > tprt.col1;
 
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 join tprt on tbl1.col1 = tprt.col1;
 
 select tbl1.col1, tprt.col1 from tbl1
@@ -638,10 +638,10 @@ order by tbl1.col1, tprt.col1;
 
 -- Multiple partitions
 insert into tbl1 values (1001), (1010), (1011);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 inner join tprt on tbl1.col1 > tprt.col1;
 
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 inner join tprt on tbl1.col1 = tprt.col1;
 
 select tbl1.col1, tprt.col1 from tbl1
@@ -655,7 +655,7 @@ order by tbl1.col1, tprt.col1;
 -- Last partition
 delete from tbl1;
 insert into tbl1 values (4400);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 join tprt on tbl1.col1 < tprt.col1;
 
 select tbl1.col1, tprt.col1 from tbl1
@@ -665,7 +665,7 @@ order by tbl1.col1, tprt.col1;
 -- No matching partition
 delete from tbl1;
 insert into tbl1 values (10000);
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from tbl1 join tprt on tbl1.col1 = tprt.col1;
 
 select tbl1.col1, tprt.col1 from tbl1
@@ -696,7 +696,7 @@ execute part_abc_q1 (1, 2, 3);
 execute part_abc_q1 (1, 2, 3);
 
 -- Single partition should be scanned.
-explain (analyze, costs off, summary off, timing off) execute part_abc_q1 (1, 2, 3);
+explain (exec, costs off, summary off, timing off) execute part_abc_q1 (1, 2, 3);
 
 deallocate part_abc_q1;
 
@@ -722,13 +722,13 @@ execute q1 (1,2);
 execute q1 (1,2);
 execute q1 (1,2);
 
-explain (analyze, costs off, summary off, timing off)  execute q1 (1,1);
+explain (exec, costs off, summary off, timing off)  execute q1 (1,1);
 
-explain (analyze, costs off, summary off, timing off)  execute q1 (2,2);
+explain (exec, costs off, summary off, timing off)  execute q1 (2,2);
 
 -- Try with no matching partitions. One subplan should remain in this case,
 -- but it shouldn't be executed.
-explain (analyze, costs off, summary off, timing off)  execute q1 (0,0);
+explain (exec, costs off, summary off, timing off)  execute q1 (0,0);
 
 deallocate q1;
 
@@ -742,14 +742,14 @@ execute q1 (1,2,3,4);
 execute q1 (1,2,3,4);
 
 -- Both partitions allowed by IN clause, but one disallowed by <> clause
-explain (analyze, costs off, summary off, timing off)  execute q1 (1,2,2,0);
+explain (exec, costs off, summary off, timing off)  execute q1 (1,2,2,0);
 
 -- Both partitions allowed by IN clause, then both excluded again by <> clauses.
 -- One subplan will remain in this case, but it should not be executed.
-explain (analyze, costs off, summary off, timing off)  execute q1 (1,2,2,1);
+explain (exec, costs off, summary off, timing off)  execute q1 (1,2,2,1);
 
 -- Ensure Params that evaluate to NULL properly prune away all partitions
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from listp where a = (select null::int);
 
 drop table listp;
@@ -762,10 +762,10 @@ create table boolp (a bool) partition by list (a);
 create table boolp_t partition of boolp for values in('t');
 create table boolp_f partition of boolp for values in('f');
 
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from boolp where a = (select value from boolvalues where value);
 
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from boolp where a = (select value from boolvalues where not value);
 
 drop table boolp;
@@ -793,18 +793,18 @@ execute mt_q1(0);
 execute mt_q1(0);
 execute mt_q1(0);
 
-explain (analyze, costs off, summary off, timing off) execute mt_q1(15);
+explain (exec, costs off, summary off, timing off) execute mt_q1(15);
 execute mt_q1(15);
-explain (analyze, costs off, summary off, timing off) execute mt_q1(25);
+explain (exec, costs off, summary off, timing off) execute mt_q1(25);
 execute mt_q1(25);
 -- Ensure MergeAppend behaves correctly when no subplans match
-explain (analyze, costs off, summary off, timing off) execute mt_q1(35);
+explain (exec, costs off, summary off, timing off) execute mt_q1(35);
 execute mt_q1(35);
 
 deallocate mt_q1;
 
 -- ensure initplan params properly prune partitions
-explain (analyze, costs off, summary off, timing off) select * from ma_test where a >= (select min(b) from ma_test_p2) order by b;
+explain (exec, costs off, summary off, timing off) select * from ma_test where a >= (select min(b) from ma_test_p2) order by b;
 
 reset enable_seqscan;
 reset enable_sort;
@@ -987,7 +987,7 @@ create table listp1 partition of listp for values in(1);
 create table listp2 partition of listp for values in(2) partition by list(b);
 create table listp2_10 partition of listp2 for values in (10);
 
-explain (analyze, costs off, summary off, timing off)
+explain (exec, costs off, summary off, timing off)
 select * from listp where a = (select 2) and b <> 10;
 
 --
diff --git a/src/test/regress/sql/select.sql b/src/test/regress/sql/select.sql
index b5929b2eca..f573dfa921 100644
--- a/src/test/regress/sql/select.sql
+++ b/src/test/regress/sql/select.sql
@@ -196,7 +196,7 @@ explain (costs off)
 select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
 select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
 -- actually run the query with an analyze to use the partial index
-explain (costs off, analyze on, timing off, summary off)
+explain (costs off, exec on, timing off, summary off)
 select * from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
 explain (costs off)
 select unique2 from onek2 where unique2 = 11 and stringu1 = 'ATAAAA';
diff --git a/src/test/regress/sql/select_into.sql b/src/test/regress/sql/select_into.sql
index a708fef0ea..bd2f918294 100644
--- a/src/test/regress/sql/select_into.sql
+++ b/src/test/regress/sql/select_into.sql
@@ -85,12 +85,12 @@ SELECT make_table();
 
 SELECT * FROM created_table;
 
--- Try EXPLAIN ANALYZE SELECT INTO and EXPLAIN ANALYZE CREATE TABLE AS
+-- Try EXPLAIN EXEC SELECT INTO and EXPLAIN EXEC CREATE TABLE AS
 -- WITH NO DATA, but hide the outputs since they won't be stable.
 DO $$
 BEGIN
-	EXECUTE 'EXPLAIN ANALYZE SELECT * INTO TABLE easi FROM int8_tbl';
-	EXECUTE 'EXPLAIN ANALYZE CREATE TABLE easi2 AS SELECT * FROM int8_tbl WITH NO DATA';
+	EXECUTE 'EXPLAIN EXEC SELECT * INTO TABLE easi FROM int8_tbl';
+	EXECUTE 'EXPLAIN EXEC CREATE TABLE easi2 AS SELECT * FROM int8_tbl WITH NO DATA';
 END$$;
 
 DROP TABLE created_table;
diff --git a/src/test/regress/sql/select_parallel.sql b/src/test/regress/sql/select_parallel.sql
index 03c056b8b7..de8ef80ecf 100644
--- a/src/test/regress/sql/select_parallel.sql
+++ b/src/test/regress/sql/select_parallel.sql
@@ -197,7 +197,7 @@ select count(*) from bmscantest where a>1;
 -- test accumulation of stats for parallel nodes
 reset enable_seqscan;
 alter table tenk2 set (parallel_workers = 0);
-explain (analyze, timing off, summary off, costs off)
+explain (exec, timing off, summary off, costs off)
    select count(*) from tenk1, tenk2 where tenk1.hundred > 1
         and tenk2.thousand=0;
 alter table tenk2 reset (parallel_workers);
@@ -209,7 +209,7 @@ $$
 declare ln text;
 begin
     for ln in
-        explain (analyze, timing off, summary off, costs off)
+        explain (exec, timing off, summary off, costs off)
           select * from
           (select ten from tenk1 where ten < 100 order by ten) ss
           right join (values (1),(2),(3)) v(x) on true
@@ -376,7 +376,7 @@ explain (costs off)
 -- to increase the parallel query test coverage
 SAVEPOINT settings;
 SET LOCAL force_parallel_mode = 1;
-EXPLAIN (analyze, timing off, summary off, costs off) SELECT * FROM tenk1;
+EXPLAIN (exec, timing off, summary off, costs off) SELECT * FROM tenk1;
 ROLLBACK TO SAVEPOINT settings;
 
 -- provoke error in worker
diff --git a/src/test/regress/sql/stats_ext.sql b/src/test/regress/sql/stats_ext.sql
index c6a5776120..6492cd7c37 100644
--- a/src/test/regress/sql/stats_ext.sql
+++ b/src/test/regress/sql/stats_ext.sql
@@ -14,7 +14,7 @@ declare
     first_row bool := true;
 begin
     for ln in
-        execute format('explain analyze %s', $1)
+        execute format('explain exec %s', $1)
     loop
         if first_row then
             first_row := false;
diff --git a/src/test/regress/sql/subselect.sql b/src/test/regress/sql/subselect.sql
index 99ca69791e..b1d89228d2 100644
--- a/src/test/regress/sql/subselect.sql
+++ b/src/test/regress/sql/subselect.sql
@@ -598,8 +598,8 @@ drop function tattle(x int, y int);
 --
 -- Test that LIMIT can be pushed to SORT through a subquery that just projects
 -- columns.  We check for that having happened by looking to see if EXPLAIN
--- ANALYZE shows that a top-N sort was used.  We must suppress or filter away
--- all the non-invariant parts of the EXPLAIN ANALYZE output.
+-- EXEC shows that a top-N sort was used.  We must suppress or filter away
+-- all the non-invariant parts of the EXPLAIN EXEC output.
 --
 create table sq_limit (pk int primary key, c1 int, c2 int);
 insert into sq_limit values
@@ -617,7 +617,7 @@ $$
 declare ln text;
 begin
     for ln in
-        explain (analyze, summary off, timing off, costs off)
+        explain (exec, summary off, timing off, costs off)
         select * from (select pk,c2 from sq_limit order by c1,pk) as x limit 3
     loop
         ln := regexp_replace(ln, 'Memory: \S*',  'Memory: xxx');
diff --git a/src/test/regress/sql/tidscan.sql b/src/test/regress/sql/tidscan.sql
index ef05c09842..65e222681e 100644
--- a/src/test/regress/sql/tidscan.sql
+++ b/src/test/regress/sql/tidscan.sql
@@ -68,17 +68,17 @@ DECLARE c CURSOR FOR SELECT ctid, * FROM tidscan;
 FETCH NEXT FROM c; -- skip one row
 FETCH NEXT FROM c;
 -- perform update
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+EXPLAIN (EXEC, COSTS OFF, SUMMARY OFF, TIMING OFF)
 UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *;
 FETCH NEXT FROM c;
 -- perform update
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+EXPLAIN (EXEC, COSTS OFF, SUMMARY OFF, TIMING OFF)
 UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *;
 SELECT * FROM tidscan;
 -- position cursor past any rows
 FETCH NEXT FROM c;
 -- should error out
-EXPLAIN (ANALYZE, COSTS OFF, SUMMARY OFF, TIMING OFF)
+EXPLAIN (EXEC, COSTS OFF, SUMMARY OFF, TIMING OFF)
 UPDATE tidscan SET id = -id WHERE CURRENT OF c RETURNING *;
 ROLLBACK;
 

--------------2.21.0--


Reply via email to