On Thu, Oct 22, 2015 at 10:00 PM, Robert Haas <robertmh...@gmail.com> wrote:
>> ...and so I've committed it and back-patched to 9.4.
>
> Sigh.  This was buggy; I have no idea how it survived my earlier testing.
>
> I will go fix it.  Sorry.

Gah!  That, too, turned out to be buggy, although in a considerably
more subtle way.  I've pushed another fix with a detailed comment and
an explanatory commit message that hopefully squashes this problem for
good.  Combined with the fix at
http://www.postgresql.org/message-id/ca+tgmozzv3u9trsvcao+-otxbsz_u+a5q8x-_b+vzcehhtz...@mail.gmail.com
this seems to squash occasional complaints about workers "dying
unexpectedly" when they really had done no such thing.

The test code I used to find these problems is attached.  I compiled
and installed the parallel_dummy extension, did pgbench -i -s 100, and
then ran this:

while psql -c "select parallel_count('pgbench_accounts', 4)"; do sleep 1; done

Without these fixes, this can hang or error out, but with these fixes,
it works fine.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
commit 84fa9b17b8427721af23e38094a6cbfec8c8bb00
Author: Robert Haas <rh...@postgresql.org>
Date:   Thu Oct 15 19:21:38 2015 -0400

    parallel_dummy

diff --git a/contrib/parallel_dummy/Makefile b/contrib/parallel_dummy/Makefile
new file mode 100644
index 0000000..de00f50
--- /dev/null
+++ b/contrib/parallel_dummy/Makefile
@@ -0,0 +1,19 @@
+MODULE_big = parallel_dummy
+OBJS = parallel_dummy.o $(WIN32RES)
+PGFILEDESC = "parallel_dummy - dummy use of parallel infrastructure"
+
+EXTENSION = parallel_dummy
+DATA = parallel_dummy--1.0.sql
+
+REGRESS = parallel_dummy
+
+ifdef USE_PGXS
+PG_CONFIG = pg_config
+PGXS := $(shell $(PG_CONFIG) --pgxs)
+include $(PGXS)
+else
+subdir = contrib/parallel_dummy
+top_builddir = ../..
+include $(top_builddir)/src/Makefile.global
+include $(top_srcdir)/contrib/contrib-global.mk
+endif
diff --git a/contrib/parallel_dummy/parallel_dummy--1.0.sql b/contrib/parallel_dummy/parallel_dummy--1.0.sql
new file mode 100644
index 0000000..d49bd0f
--- /dev/null
+++ b/contrib/parallel_dummy/parallel_dummy--1.0.sql
@@ -0,0 +1,7 @@
+-- complain if script is sourced in psql, rather than via CREATE EXTENSION
+\echo Use "CREATE EXTENSION parallel_dummy" to load this file. \quit
+
+CREATE FUNCTION parallel_count(rel pg_catalog.regclass,
+							  nworkers pg_catalog.int4)
+    RETURNS pg_catalog.int8 STRICT
+	AS 'MODULE_PATHNAME' LANGUAGE C;
diff --git a/contrib/parallel_dummy/parallel_dummy.c b/contrib/parallel_dummy/parallel_dummy.c
new file mode 100644
index 0000000..f63b63e
--- /dev/null
+++ b/contrib/parallel_dummy/parallel_dummy.c
@@ -0,0 +1,152 @@
+/*--------------------------------------------------------------------------
+ *
+ * parallel_dummy.c
+ *		Test harness code for parallel mode code.
+ *
+ * Copyright (C) 2013-2014, PostgreSQL Global Development Group
+ *
+ * IDENTIFICATION
+ *		contrib/parallel_dummy/parallel_dummy.c
+ *
+ * -------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "access/heapam.h"
+#include "access/parallel.h"
+#include "access/relscan.h"
+#include "access/xact.h"
+#include "fmgr.h"
+#include "miscadmin.h"
+#include "storage/spin.h"
+#include "utils/builtins.h"
+#include "utils/guc.h"
+#include "utils/snapmgr.h"
+
+PG_MODULE_MAGIC;
+
+PG_FUNCTION_INFO_V1(parallel_count);
+
+#define		TOC_SCAN_KEY			1
+#define		TOC_RESULT_KEY			2
+
+void		_PG_init(void);
+void		count_worker_main(dsm_segment *seg, shm_toc *toc);
+
+static void count_helper(HeapScanDesc pscan, int64 *resultp);
+
+Datum
+parallel_count(PG_FUNCTION_ARGS)
+{
+	Oid			relid = PG_GETARG_OID(0);
+	int32		nworkers = PG_GETARG_INT32(1);
+	int64	   *resultp;
+	int64		result;
+	ParallelContext *pcxt;
+	ParallelHeapScanDesc pscan;
+	HeapScanDesc	scan;
+	Relation	rel;
+	Size		sz;
+
+	if (nworkers < 0)
+		ereport(ERROR,
+				(errmsg("number of parallel workers must be non-negative")));
+
+	rel = relation_open(relid, AccessShareLock);
+
+	EnterParallelMode();
+
+	pcxt = CreateParallelContextForExternalFunction("parallel_dummy",
+											 "count_worker_main",
+											 nworkers);
+	sz = heap_parallelscan_estimate(GetActiveSnapshot());
+	shm_toc_estimate_chunk(&pcxt->estimator, sz);
+	shm_toc_estimate_chunk(&pcxt->estimator, sizeof(int64));
+	shm_toc_estimate_keys(&pcxt->estimator, 2);
+	InitializeParallelDSM(pcxt);
+	pscan = shm_toc_allocate(pcxt->toc, sz);
+	heap_parallelscan_initialize(pscan, rel, GetActiveSnapshot());
+	shm_toc_insert(pcxt->toc, TOC_SCAN_KEY, pscan);
+	resultp = shm_toc_allocate(pcxt->toc, sizeof(int64));
+	shm_toc_insert(pcxt->toc, TOC_RESULT_KEY, resultp);
+
+	LaunchParallelWorkers(pcxt);
+
+	scan = heap_beginscan_parallel(rel, pscan);
+
+	/* here's where we do the "real work" ... */
+	count_helper(scan, resultp);
+
+	WaitForParallelWorkersToFinish(pcxt);
+
+	heap_rescan(scan, NULL);
+	*resultp = 0;
+
+	ReinitializeParallelDSM(pcxt);
+	LaunchParallelWorkers(pcxt);
+
+	result = *resultp;
+
+	/* and here's where we do the real work again */
+	count_helper(scan, resultp);
+
+	WaitForParallelWorkersToFinish(pcxt);
+
+	result = *resultp;
+
+	heap_endscan(scan);
+
+	DestroyParallelContext(pcxt);
+
+	relation_close(rel, AccessShareLock);
+
+	ExitParallelMode();
+
+	PG_RETURN_INT64(result);
+}
+
+void
+count_worker_main(dsm_segment *seg, shm_toc *toc)
+{
+	ParallelHeapScanDesc	pscan;
+	Relation	rel;
+	HeapScanDesc	scan;
+	int64	   *resultp;
+
+	pscan = shm_toc_lookup(toc, TOC_SCAN_KEY);
+	resultp = shm_toc_lookup(toc, TOC_RESULT_KEY);
+	Assert(pscan != NULL && resultp != NULL);
+
+	rel = relation_open(pscan->phs_relid, AccessShareLock);
+	scan = heap_beginscan_parallel(rel, pscan);
+	count_helper(scan, resultp);
+	heap_endscan(scan);
+	relation_close(rel, AccessShareLock);
+}
+
+static void
+count_helper(HeapScanDesc scan, int64 *resultp)
+{
+	int64		mytuples = 0;
+	BlockNumber	firstblock = InvalidBlockNumber;
+
+	for (;;)
+	{
+		HeapTuple	tup = heap_getnext(scan, ForwardScanDirection);
+
+		if (!HeapTupleIsValid(tup))
+			break;
+		if (firstblock == InvalidBlockNumber)
+			firstblock = scan->rs_cblock;
+
+		++mytuples;
+	}
+
+	SpinLockAcquire(&scan->rs_parallel->phs_mutex); /* dirty hack */
+	*resultp += mytuples;
+	SpinLockRelease(&scan->rs_parallel->phs_mutex);
+
+	elog(NOTICE, "PID %d counted " INT64_FORMAT " tuples starting at block %u",
+		MyProcPid, mytuples, firstblock);
+}
diff --git a/contrib/parallel_dummy/parallel_dummy.control b/contrib/parallel_dummy/parallel_dummy.control
new file mode 100644
index 0000000..90bae3f
--- /dev/null
+++ b/contrib/parallel_dummy/parallel_dummy.control
@@ -0,0 +1,4 @@
+comment = 'Dummy parallel code'
+default_version = '1.0'
+module_pathname = '$libdir/parallel_dummy'
+relocatable = true
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to