Re: [HACKERS] PATCH: pageinspect / add page_checksum and bt_page_items(bytea)

2017-03-31 Thread Tomas Vondra

On 03/31/2017 06:01 PM, Ashutosh Sharma wrote:


It seems like the latest patch(v4) shared by Tomas upthread is an
empty patch. If I am not wrong, please share the correct patch.
Thanks.



OMG, this is the second time I managed to generate an empty patch. I 
really need to learn not to do that ..


Attached is v5 of the patch, hopefully correct this time ;-)

regards

--
Tomas Vondra  http://www.2ndQuadrant.com
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
diff --git a/contrib/pageinspect/btreefuncs.c b/contrib/pageinspect/btreefuncs.c
index 3d69aa9..bc2d27c 100644
--- a/contrib/pageinspect/btreefuncs.c
+++ b/contrib/pageinspect/btreefuncs.c
@@ -41,6 +41,7 @@
 
 PG_FUNCTION_INFO_V1(bt_metap);
 PG_FUNCTION_INFO_V1(bt_page_items);
+PG_FUNCTION_INFO_V1(bt_page_items_bytea);
 PG_FUNCTION_INFO_V1(bt_page_stats);
 
 #define IS_INDEX(r) ((r)->rd_rel->relkind == RELKIND_INDEX)
@@ -253,14 +254,62 @@ struct user_args
 	OffsetNumber offset;
 };
 
+/*
+ * bt_page_print_tuples
+ *	form a tuple describing index tuple at a given offset
+ */
+static Datum
+bt_page_print_tuples(FuncCallContext *fctx, Page page, OffsetNumber offset)
+{
+	char	   *values[6];
+	HeapTuple	tuple;
+	ItemId		id;
+	IndexTuple	itup;
+	int			j;
+	int			off;
+	int			dlen;
+	char	   *dump;
+	char	   *ptr;
+
+	id = PageGetItemId(page, offset);
+
+	if (!ItemIdIsValid(id))
+		elog(ERROR, "invalid ItemId");
+
+	itup = (IndexTuple) PageGetItem(page, id);
+
+	j = 0;
+	values[j++] = psprintf("%d", offset);
+	values[j++] = psprintf("(%u,%u)",
+		   ItemPointerGetBlockNumberNoCheck(&itup->t_tid),
+		ItemPointerGetOffsetNumberNoCheck(&itup->t_tid));
+	values[j++] = psprintf("%d", (int) IndexTupleSize(itup));
+	values[j++] = psprintf("%c", IndexTupleHasNulls(itup) ? 't' : 'f');
+	values[j++] = psprintf("%c", IndexTupleHasVarwidths(itup) ? 't' : 'f');
+
+	ptr = (char *) itup + IndexInfoFindDataOffset(itup->t_info);
+	dlen = IndexTupleSize(itup) - IndexInfoFindDataOffset(itup->t_info);
+	dump = palloc0(dlen * 3 + 1);
+	values[j] = dump;
+	for (off = 0; off < dlen; off++)
+	{
+		if (off > 0)
+			*dump++ = ' ';
+		sprintf(dump, "%02x", *(ptr + off) & 0xff);
+		dump += 2;
+	}
+
+	tuple = BuildTupleFromCStrings(fctx->attinmeta, values);
+
+	return HeapTupleGetDatum(tuple);
+}
+
 Datum
 bt_page_items(PG_FUNCTION_ARGS)
 {
 	text	   *relname = PG_GETARG_TEXT_PP(0);
 	uint32		blkno = PG_GETARG_UINT32(1);
 	Datum		result;
-	char	   *values[6];
-	HeapTuple	tuple;
 	FuncCallContext *fctx;
 	MemoryContext mctx;
 	struct user_args *uargs;
@@ -300,6 +349,11 @@ bt_page_items(PG_FUNCTION_ARGS)
 		if (blkno == 0)
 			elog(ERROR, "block 0 is a meta page");
 
+		if (P_ISMETA(opaque))
+			ereport(ERROR,
+	(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+  errmsg("block is a meta page")));
+
 		CHECK_RELATION_BLOCK_RANGE(rel, blkno);
 
 		buffer = ReadBuffer(rel, blkno);
@@ -345,47 +399,8 @@ bt_page_items(PG_FUNCTION_ARGS)
 
 	if (fctx->call_cntr < fctx->max_calls)
 	{
-		ItemId		id;
-		IndexTuple	itup;
-		int			j;
-		int			off;
-		int			dlen;
-		char	   *dump;
-		char	   *ptr;
-
-		id = PageGetItemId(uargs->page, uargs->offset);
-
-		if (!ItemIdIsValid(id))
-			elog(ERROR, "invalid ItemId");
-
-		itup = (IndexTuple) PageGetItem(uargs->page, id);
-
-		j = 0;
-		values[j++] = psprintf("%d", uargs->offset);
-		values[j++] = psprintf("(%u,%u)",
-			   ItemPointerGetBlockNumberNoCheck(&itup->t_tid),
-			ItemPointerGetOffsetNumberNoCheck(&itup->t_tid));
-		values[j++] = psprintf("%d", (int) IndexTupleSize(itup));
-		values[j++] = psprintf("%c", IndexTupleHasNulls(itup) ? 't' : 'f');
-		values[j++] = psprintf("%c", IndexTupleHasVarwidths(itup) ? 't' : 'f');
-
-		ptr = (char *) itup + IndexInfoFindDataOffset(itup->t_info);
-		dlen = IndexTupleSize(itup) - IndexInfoFindDataOffset(itup->t_info);
-		dump = palloc0(dlen * 3 + 1);
-		values[j] = dump;
-		for (off = 0; off < dlen; off++)
-		{
-			if (off > 0)
-*dump++ = ' ';
-			sprintf(dump, "%02x", *(ptr + off) & 0xff);
-			dump += 2;
-		}
-
-		tuple = BuildTupleFromCStrings(fctx->attinmeta, values);
-		result = HeapTupleGetDatum(tuple);
-
-		uargs->offset = uargs->offset + 1;
-
+		result = bt_page_print_tuples(fctx, uargs->page, uargs->offset);
+		uargs->offset++;
 		SRF_RETURN_NEXT(fctx, result);
 	}
 	else
@@ -396,6 +411,90 @@ bt_page_items(PG_FUNCTION_ARGS)
 	}
 }
 
+/*---
+ * bt_page_items_bytea()
+ *
+ * Get IndexTupleData set in a btree page
+ *
+ * Usage: SELECT * FROM bt_page_items(get_raw_page('t1_pkey', 1));
+ *---
+ */
+
+Datum
+bt_page_items_bytea(PG_FUNCTION_ARGS)
+{
+	bytea	   *raw_page = PG_GETARG_BYTEA_P(0);
+	Datum		result;
+	FuncCallContext *fctx;
+	struct user_args *uargs;
+	int			raw_page_size;
+
+	if (!superuser())
+		ereport(ERROR,
+(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
+ (errmsg("must be superuser to use pageinspect functions";
+
+	if (SRF_

[HACKERS] Some never executed code regarding the table sync worker

2017-03-31 Thread Masahiko Sawada
Hi all,

After launched the sync table worker it enters ApplyWorkerMain
function. And then the table sync worker calls
LogicalRepSyncTableStart to synchronize the target table. In
LogicalRepSyncTableStart, finish_sync_worker is always called and then
the table sync worker process always exits after synched. On the other
hand, some source code seems to suppose that the table sync worker
still continue to working even after LogicalRepSyncTableStart

For example in ApplyWorkerMain, LogicalRepSyncTableStart returns
replication slot name that was used for synchronization but this code
is never executed.

if (am_tablesync_worker())
{
   char *syncslotname;

   /* This is table synchroniation worker, call initial sync. */
   syncslotname = LogicalRepSyncTableStart(&origin_startpos);

   /* The slot name needs to be allocated in permanent memory context. */
   oldctx = MemoryContextSwitchTo(ApplyCacheContext);
   myslotname = pstrdup(syncslotname);
   MemoryContextSwitchTo(oldctx);

   pfree(syncslotname);
}

--
And, since the table sync worker doesn't call process_syncing_tables
so far process_syncing_tables_for_sync is never executed.

/*
 * Process state possible change(s) of tables that are being synchronized.
 */
void
process_syncing_tables(XLogRecPtr current_lsn)
{
  if (am_tablesync_worker())
  process_syncing_tables_for_sync(current_lsn);
  else
  process_syncing_tables_for_apply(current_lsn);
}

These code will be used for future enhancement? I think that since
leaving unused code is not good we can get rid of these unnecessary
codes. Attached patch removes unnecessary codes related to the table
sync worker.
Please give me feedback.

Regards,

--
Masahiko Sawada
NIPPON TELEGRAPH AND TELEPHONE CORPORATION
NTT Open Source Software Center


unused_code_of_table_sync_worker.patch
Description: Binary data

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Somebody has not thought through subscription locking considerations

2017-03-31 Thread Tom Lane
Masahiko Sawada  writes:
> On Fri, Mar 31, 2017 at 9:53 AM, Petr Jelinek
>  wrote:
>> On 30/03/17 07:25, Tom Lane wrote:
>>> I await with interest an explanation of what "VACUUM FULL pg_class" is
>>> doing trying to acquire ShareRowExclusiveLock on pg_subscription_rel, not
>>> to mention why a DROP SEQUENCE is holding some fairly strong lock on that
>>> relation.

> VACUUM FULL of any table acquires ShareRowExclusiveLock on
> pg_subscription_rel because when doDeletion removes old heap the
> RemoveSubscriptionRel is called in heap_drop_with_catalog.

This seems entirely horrid: it *guarantees* deadlock possibilities.
And I wonder what happens when I VACUUM FULL pg_subscription_rel
itself.

At the very least, it would be a good idea to exclude the system
catalogs from logical replication, wouldn't it?

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Allow to specify #columns in heap/index_form_tuple

2017-03-31 Thread Robert Haas
On Fri, Mar 31, 2017 at 1:24 PM, Andres Freund  wrote:
> The covering indexes patch [1] really needs a version of
> heap_form_tuple/index_form_tuple that allows to specify the number of
> columns in the to-be generated tuple.  Previously the faster expression
> evaluation stuff could also have benefited form the same for both
> forming and deforming tuples.
>
> It's obviously trivial to add, codewise.
>
> I think for index_form_tuple we can just add a parameter, but for
> heap_form_tuple/heap_deform_tuple that'd probably break a number of
> external users.  How about adding _ex variants that allow to specify
> that?

I think our usual practice is _extended rather than just _ex, but no
objections otherwise.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] [PATCH] Transaction traceability - txid_status(bigint)

2017-03-31 Thread Robert Haas
On Fri, Mar 31, 2017 at 11:08 AM, David Steele  wrote:
> On 3/31/17 10:46 AM, Craig Ringer wrote:
>> Patches 1 and 2 were the key parts and thanks to Robert's helpful
>> review, advice and edits they're committed now.
>>
>> Committed, done. Yay.
>
> Excellent.  I have marked this a "Committed" by Robert.
>
> One down...

...71 to go?

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] REFERENCES privilege should not be symmetric (was Re: [GENERAL] Postgres Permissions Article)

2017-03-31 Thread Robert Haas
On Fri, Mar 31, 2017 at 11:29 AM, Tom Lane  wrote:
> Robert Haas  writes:
>> On Thu, Mar 30, 2017 at 4:45 PM, Tom Lane  wrote:
>>> In short, it seems like this statement in the docs is correctly describing
>>> our code's behavior, but said behavior is wrong and should be changed.
>>> I'd propose fixing it like that in HEAD; I'm not sure if the back branches
>>> should also be changed.
>
>> Sounds reasonable, but I don't see much advantage to changing it in
>> the back-branches.
>
> Well, it's a SQL-compliance bug, and we often back-patch bug fixes.

Personally, I'd be more inclined to view it as a definitional change.
Maybe we picked the wrong definition before, but it's well-established
behavior at this point.  The SQL standard also says that identifiers
are supposed to be folded to upper case, so I don't think the theory
that every deviation from the standard should be fixed and
back-patched holds a lot of water.

> The argument for not back-patching a bug fix usually boils down to
> fear of breaking existing applications, but it's hard to see how
> removal of a permission check could break a working application ---
> especially when the permission check is as hard to trigger as this one.
> How many table owners ever revoke their own REFERENCES permission?

Sure, but that argument cuts both ways.  If nobody ever does that, who
will be helped by back-patching this?

I certainly agree that back-patching this change is pretty low risk.
I just don't think it has any real benefits.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] Allow to specify #columns in heap/index_form_tuple

2017-03-31 Thread Andres Freund
Hi,

The covering indexes patch [1] really needs a version of
heap_form_tuple/index_form_tuple that allows to specify the number of
columns in the to-be generated tuple.  Previously the faster expression
evaluation stuff could also have benefited form the same for both
forming and deforming tuples.

It's obviously trivial to add, codewise.

I think for index_form_tuple we can just add a parameter, but for
heap_form_tuple/heap_deform_tuple that'd probably break a number of
external users.  How about adding _ex variants that allow to specify
that?

Regards,

Andres

[1] http://archives.postgresql.org/message-id/56168952.4010101%40postgrespro.ru


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] delta relations in AFTER triggers

2017-03-31 Thread Kevin Grittner
On Thu, Mar 30, 2017 at 11:51 AM, Kevin Grittner  wrote:

> New version attached.  It needs some of these problem cases added to
> the testing, and a mention in the docs that only C and plpgsql
> triggers can use the feature so far.  I'll add those tomorrow.

Done and attached.

Now the question is, should it be pushed?  It's been through just
about every CF in the last three years with little modification, and
finally got a thorough enough review in this CF that I think it can
be considered.  Here are the numbers:

 85 files changed, 2266 insertions(+), 132 deletions(-)

Of that, 70 lines are the plpgsql implementation (which I should
probably push separately), about 200 lines are docs and 623 lines
are new regression tests.  Most of the rest only comes into play if
the feature is used.

This adds support for SQL standard sub-feature, although only in
triggers written in C and plpgsql.  (Other PLs will probably require
fewer lines than plpgsql.)  It also provides infrastructure needed
to get incremental maintenance of materialized views based on just
simple declarative DDL.  Tom has expressed hope that it could be
used to improve performance and memory usage for AFTER triggers, and
I believe it can, but that that should be a follow-on patch.  It
might provide the basis of set-based statement-level enforcement of
referential integrity, with the regression tests providing a rough
proof of concept.

My inclination is to push it late today, but be ready to revert if
there are any hard-to-fix surprise problems missed in review and
testing; but if the general preference is to hold it for version 11,
that's OK with me, too.

--
Kevin Grittner


transition-v14.diff.gz
Description: GNU Zip compressed data

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Variable substitution in psql backtick expansion

2017-03-31 Thread Tom Lane
Corey Huinker  writes:
> On Thu, Mar 30, 2017 at 1:33 PM, Tom Lane  wrote:
>> single-quoted according to Unix shell conventions.  (So the
>> processing would be a bit different from what it is for the
>> same notation in SQL contexts.)

> +1

Here's a proposed patch for this.  I used the existing appendShellString()
logic, which already knows how to quote stuff safely on both Unix and
Windows.  A small problem is that appendShellString() rejects LF and CR
characters.  As written, it just printed an error and did exit(1), which
is more or less OK for existing callers but seemed like a pretty bad idea
for psql.  I refactored it to get the behavior proposed in the patch,
which is that we print an error and decline to substitute the variable's
value, leading to executing the backtick command with the :'variable'
text still in place.  This is more or less the same thing that happens
for encoding-check failures in the other variable-substitution cases,
so it didn't seem too unreasonable.

Perhaps it would be preferable to prevent execution of the backtick
command and/or fail the calling metacommand, but that seems to require
some very invasive refactoring (or else magic global variables), which
I didn't have the appetite for.

regards, tom lane

diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml
index b51b11b..ad463e7 100644
*** a/doc/src/sgml/ref/psql-ref.sgml
--- b/doc/src/sgml/ref/psql-ref.sgml
*** testdb=>
*** 770,786 
  
  
  
- Within an argument, text that is enclosed in backquotes
- (`) is taken as a command line that is passed to the
- shell. The output of the command (with any trailing newline removed)
- replaces the backquoted text.
- 
- 
- 
  If an unquoted colon (:) followed by a
  psql variable name appears within an argument, it is
  replaced by the variable's value, as described in .
  
  
  
--- 770,801 
  
  
  
  If an unquoted colon (:) followed by a
  psql variable name appears within an argument, it is
  replaced by the variable's value, as described in .
+ The forms :'variable_name' and
+ :"variable_name" described there
+ work as well.
+ 
+ 
+ 
+ Within an argument, text that is enclosed in backquotes
+ (`) is taken as a command line that is passed to the
+ shell.  The output of the command (with any trailing newline removed)
+ replaces the backquoted text.  Within the text enclosed in backquotes,
+ no special quoting or other processing occurs, except that appearances
+ of :variable_name where
+ variable_name is a psql variable name
+ are replaced by the variable's value.  Also, appearances of
+ :'variable_name' are replaced by the
+ variable's value suitably quoted to become a single shell command
+ argument.  (The latter form is almost always preferable, unless you are
+ very sure of what is in the variable.)  Because carriage return and line
+ feed characters cannot be safely quoted on all platforms, the
+ :'variable_name' form prints an
+ error message and does not substitute the variable value when such
+ characters appear in the value.
  
  
  
diff --git a/src/bin/psql/common.c b/src/bin/psql/common.c
index b06ae97..a2f1259 100644
*** a/src/bin/psql/common.c
--- b/src/bin/psql/common.c
*** setQFout(const char *fname)
*** 116,134 
   * If the specified variable exists, return its value as a string (malloc'd
   * and expected to be freed by the caller); else return NULL.
   *
!  * If "escape" is true, return the value suitably quoted and escaped,
!  * as an identifier or string literal depending on "as_ident".
!  * (Failure in escaping should lead to returning NULL.)
   *
   * "passthrough" is the pointer previously given to psql_scan_set_passthrough.
   * In psql, passthrough points to a ConditionalStack, which we check to
   * determine whether variable expansion is allowed.
   */
  char *
! psql_get_variable(const char *varname, bool escape, bool as_ident,
    void *passthrough)
  {
! 	char	   *result;
  	const char *value;
  
  	/* In an inactive \if branch, suppress all variable substitutions */
--- 116,134 
   * If the specified variable exists, return its value as a string (malloc'd
   * and expected to be freed by the caller); else return NULL.
   *
!  * If "quote" isn't PQUOTE_PLAIN, then return the value suitably quoted and
!  * escaped for the specified quoting requirement.  (Failure in escaping
!  * should lead to printing an error and returning NULL.)
   *
   * "passthrough" is the pointer previously given to psql_scan_set_passthrough.
   * In psql, passthrough points to a ConditionalStack, which we check to
   * determine whether variable expansion is allowed.
   */
  char *
! psql_get_variable(const char *varname, PsqlScanQuoteType quote,
    void *passthrough)
  {
! 	char	   *result = NULL;
  	const char *valu

Re: [HACKERS] postgres_fdw bug in 9.6

2017-03-31 Thread Jeff Janes
On Thu, Mar 23, 2017 at 5:20 AM, Etsuro Fujita 
wrote:

> On 2017/03/21 18:40, Etsuro Fujita wrote:
>
>> Ok, I'll update the patch.  One thing I'd like to revise in addition to
>> that is (1) add to JoinPathExtraData a flag member to indicate whether
>> to give the FDW a chance to consider a remote join, which will be set to
>> true if the joinrel's fdwroutine is not NULL and the fdwroutine's
>> GetForeignJoinPaths is not NULL, and (2) if the flag is true, save info
>> to create an alternative local join path, such as hashclauses and
>> mergeclauses proposed in the patch, into JoinPathExtraData in
>> add_paths_to_joinrel.  This would avoid useless overhead in saving such
>> info into JoinPathExtraData when we don't give the FDW that chance.
>>
>
> Done.  Attached is a new version of the patch.
>

Is the fix for 9.6.3 going to be just a back port of this, or will it look
different?

Cheers,

Jeff


Re: [HACKERS] delta relations in AFTER triggers

2017-03-31 Thread Kevin Grittner
On Thu, Mar 23, 2017 at 7:14 PM, Thomas Munro
 wrote:

> my only other comment would be a bikeshed issue:
> Enr isn't a great name for a struct.

I know, but EphemeralNamedRelation starts to get kinda long,
especially when making the normal sorts of concatenations.  I
started making the change and balked when I saw things like
EphemeralNamedRelationMetadataData and a function named
EphemeralNamedRelationMetadataGetTupDesc() in place of
EnrmdGetTupDesc().  A 40 character function name make for a lot of
line-wrapping to stay within pgindent limits.  Any suggestions?

--
Kevin Grittner


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] [bug fix] Savepoint-related statements terminates connection

2017-03-31 Thread Alvaro Herrera
Ashutosh Bapat wrote:
> Please add this to the next commitfest.

If this cannot be reproduced in 9.6, then it must be added to the
Open Items wiki page instead.

-- 
Álvaro Herrerahttps://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] PATCH: pageinspect / add page_checksum and bt_page_items(bytea)

2017-03-31 Thread Ashutosh Sharma
Hi,

On Wed, Mar 29, 2017 at 8:38 PM, Tomas Vondra
 wrote:
>
>
> On 03/24/2017 04:27 AM, Peter Eisentraut wrote:
>>
>> On 3/17/17 18:35, Tomas Vondra wrote:
>>>
>>> On 03/17/2017 05:23 PM, Peter Eisentraut wrote:

 I'm struggling to find a good way to share code between
 bt_page_items(text, int4) and bt_page_items(bytea).

 If we do it via the SQL route, as I had suggested, it makes the
 extension non-relocatable, and it will also create a bit of a mess
 during upgrades.

 If doing it in C, it will be a bit tricky to pass the SRF context
 around.  There is no "DirectFunctionCall within SRF context", AFAICT.
>>>
>>>
>>> Not sure what it has to do with DirectFunctionCall? You want to call the
>>> bytea variant from the existing one? Wouldn't it be easier to simply
>>> define a static function with the shared parts, and pass around the
>>> fctx/fcinfo? Not quite pretty, but should work.
>>
>>
>> Perhaps what was added in
>>
>> 
>> would actually work here.
>>
>
> I've tried to refactor the code using this, but the result was rather ugly
> because (a) it really is quite tricky to pass around the contexts and (b)
> the sanity checks are quite different for the two input types, so mixing
> those parts (essentially the SRF_IS_FIRSTCALL bits) does not make much sense
> IMHO.
>
> The attached patch is the best I came up with - it essentially shares just
> the tuple-forming part, which is exactly the same in both cases.
>
> It also adds the P_ISMETA(opaque) check to the original function, which
> seems like a useful defense against page written to a different place (which
> is essentially the issue I was originally investigating).
>

It seems like the latest patch(v4) shared by Tomas upthread is an
empty patch. If I am not wrong, please share the correct patch.
Thanks.

-- 
With Regards,
Ashutosh Sharma
EnterpriseDB:http://www.enterprisedb.com


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] postgres_fdw: support parameterized foreign joins

2017-03-31 Thread Arthur Zakirov

Hello,

On 31.03.2017 18:47, David Steele wrote:

Hi Arthur.

On 3/23/17 8:45 AM, Etsuro Fujita wrote:


Done.  Also, I added regression tests and revised code and comments a
bit.  (As for create_foreignscan_path(), I haven't done anything about
that yet.)  Please find attached a new version created on top of [1].


Are you planning to review this patch?

Thanks,


Yes, I wanted to review the patch. But there was a lack of time this 
week. I marked myself as reviewer in the commitfest app.


--
Arthur Zakirov
Postgres Professional: http://www.postgrespro.com
Russian Postgres Company


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] postgres_fdw: support parameterized foreign joins

2017-03-31 Thread David Steele

Hi Arthur.

On 3/23/17 8:45 AM, Etsuro Fujita wrote:

On 2017/03/21 18:38, Etsuro Fujita wrote:

On 2017/03/16 22:23, Arthur Zakirov wrote:

Can you rebase the patch? It is not applied now.



Ok, will do.  Thanks for the report!


Done.  Also, I added regression tests and revised code and comments a
bit.  (As for create_foreignscan_path(), I haven't done anything about
that yet.)  Please find attached a new version created on top of [1].


Are you planning to review this patch?

Thanks,
--
-David
da...@pgmasters.net


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] [patch] reorder tablespaces in basebackup tar stream for backup_label

2017-03-31 Thread Michael Banck
On Fri, Mar 31, 2017 at 02:11:44PM +0900, Kyotaro HORIGUCHI wrote:
> At Fri, 31 Mar 2017 13:37:38 +0900, Michael Paquier 
>  wrote in 
> 
> > In my first reviews of the patch, I completely forgot the fact that
> > BASE_BACKUP does send the start LSN of the backup in the first result
> > set, so the patch proposed is actually rather useless because the data
> > you are looking for is already at hand. If more data would be
> > interesting to have, like the start timestamp number, we could just
> > extend the first result set a bit as Fujii-san is coming at. Let's
> > drop this patch and move on.
> 
> +1 for dropping this.

I've marked it as "Rejected" now, as that is clearly the consensus.
 
> But I think we should edit the documentation a bit.
> 
> I don't fully understand those who want to handle it by a script,
> but the documentation seems to be suggesting that something like
> is possible. So it might be better add a description like that or
> just remove the example.

The documentation says "For the purpose of testing replication
commands", one could use psql and "IDENTIFY_SYSTEM", it doesn't exactly
suggest to run BASE_BACKUP this way.

Which, by the way, appears to be working totally fine from psql, modulo
the problem that you won't get to the starting transaction location.

> "psql doesn't handle this protocol properly. The instances of the
>  usage of these protocols are found in the source code of
>  walreceiver and pg_basebackup."

Something along those lines makes sense I think, yeah.


Michael

-- 
Michael Banck
Projektleiter / Senior Berater
Tel.: +49 2166 9901-171
Fax:  +49 2166 9901-100
Email: michael.ba...@credativ.de

credativ GmbH, HRB Mönchengladbach 12080
USt-ID-Nummer: DE204566209
Trompeterallee 108, 41189 Mönchengladbach
Geschäftsführung: Dr. Michael Meskes, Jörg Folz, Sascha Heuer


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] pg_partman 3.0.0 - real-world usage of native partitioning and a case for native default

2017-03-31 Thread Keith Fiske
I've gotten pg_partman working with native partitioning already so I can
hopefully have things ready to work when 10 is released. I've got a branch
on github with this version for anyone to test and I'll hopefully have this
released in the next few weeks after I finish some more testing myself. Any
feedback would be appreciated!

https://github.com/keithf4/pg_partman/tree/v3.0.0

Thankfully since native partitioning still uses inheritance internally for
the most part, pg_partman works pretty well without nearly as much change
as I thought I would need. The biggest deficiency I'm seeing has to do with
not having a "default" partition to put data that doesn't match any
children. The fact that it throws an error is a concern, but it's not where
I see the main problem. Where this really comes into play is when someone
wants to make an existing table into a partitioned table. There's really no
easy way to do this outside of making a completely brand new partition set
and copying/moving the data from the old to the new. Yes there are
technically ways to do this fairly seamlessly to the user of the
partitioned table, but the complexity of those methods seems absurd in the
face of just allowing a default partition.

A default would basically allow a method similar to what pg_partman does
prior to native. Before I would just make the old table the parent and then
the user could move data to the children as needed, eventually leaving the
parent empty. All data is still accessible during this period and new
writes go to the new children. The old table can't be made the parent with
native obviously, but being able to define a default partition would allow
defining the old table as the default and pretty much give the exact same,
easy migration path. Without defining it as a default, you really can't
attach the old table since it would require a constraint interval that
would likely interfere with other children.

I recall reading before that there are performance implications of having
the default. I think as long as those performance issues are clearly
documented and have no affect if there is no default, it shouldn't be a
concern that would hold this feature up. I believe the benefit of the
default partition to the migration process makes it more than worth it and
I'm hoping it's possible to get into 10 so users can more easily use this
new feature without having to wait for the next major version. Spoke with
several core members at PGConf this year and they asked me to send an email
to hackers making my case, so here it is!

--
Keith Fiske
Database Administrator
OmniTI Computer Consulting, Inc.
http://www.keithf4.com


Re: [HACKERS] REFERENCES privilege should not be symmetric (was Re: [GENERAL] Postgres Permissions Article)

2017-03-31 Thread Tom Lane
Robert Haas  writes:
> On Thu, Mar 30, 2017 at 4:45 PM, Tom Lane  wrote:
>> In short, it seems like this statement in the docs is correctly describing
>> our code's behavior, but said behavior is wrong and should be changed.
>> I'd propose fixing it like that in HEAD; I'm not sure if the back branches
>> should also be changed.

> Sounds reasonable, but I don't see much advantage to changing it in
> the back-branches.

Well, it's a SQL-compliance bug, and we often back-patch bug fixes.

The argument for not back-patching a bug fix usually boils down to
fear of breaking existing applications, but it's hard to see how
removal of a permission check could break a working application ---
especially when the permission check is as hard to trigger as this one.
How many table owners ever revoke their own REFERENCES permission?

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] multivariate statistics (v25)

2017-03-31 Thread David Rowley
On 31 March 2017 at 21:18, Kyotaro HORIGUCHI <
horiguchi.kyot...@lab.ntt.co.jp> wrote:

> > When adding these two parameters I had 2nd thoughts that the
> "tryextstats"
> > was required at all. We could just have this controlled by if the rel is
> a
> > base rel of kind RTE_RELATION. I ended up having to pass these parameters
> > further, down to clauselist_selectivity's singleton couterpart,
> > clause_selectivity(). This was due to clause_selectivity() calling
> > clauselist_selectivity() for some clause types. I'm not entirely sure if
> > this is actually required, but I can't see any reason for it to cause
> > problems.
>
> I understand that the reason for tryextstats is that the two are
> perfectly correlating but caluse_selectivity requires the
> RelOptInfo anyway. Some comment about that may be reuiqred in the
> function comment.


hmm, you could say one is functionally dependant on the other. I did
consider removing it, but it seemed weird to pass a NULL relation when we
dont want to attempt to use extended stats.


> Some random comments by just looking on the patch:
>
> ==
> The name of the function "collect_ext_attnums", and
> "clause_is_ext_compatible" seems odd since "ext" doesn't seem to
> be a part of "extended statistics". Some other names looks the
> same, too.
>

I agree. I've made some changes to the patch to change how the functional
dependency estimations are applied. I've removed most of the code from
clausesel.c and put it into dependencies.c. In doing so I've removed some
of the inefficiencies that were in the patch.  For example
clause_is_ext_compatible() was being called many times on the same clause
at different times. I've now nailed that down to just once per clause.


> Something like "collect_e(xt)stat_compatible_attnums" and
> "clause_is_e(xt)stat_compatible" seem better to me.
>
>
Changed to dependency_compatible_clause(), since this was searching for
equality clauses in the form Var = Const, or Const = Var. This seems
specific to the functional depdencies checking. A multivariate histogram
won't want the same.


> ==
> The following comment seems something wrong.
>
> + * When applying functional dependencies, we start with the strongest ones
> + * strongest dependencies. That is, we select the dependency that:
>
> ==
> dependency_is_fully_matched() is not found. Maybe some other
> patches are assumed?
>
> ==
> +   /* see if it actually has the right */
> +   ok = (NumRelids((Node *) expr) == 1) &&
> +   (is_pseudo_constant_clause(lsecond(expr->args)) ||
> +(varonleft = false,
> + is_pseudo_constant_clause(
> linitial(expr->args;
> +
> +   /* unsupported structure (two variables or so) */
> +   if (!ok)
> +   return true;
>
> Ok is used only here. I don't think seeming-expressions with side
> effect is not good idea here.
>
>
I thought the same, but I happened to notice that Tomas must have taken it
from clauselist_selectivity().


> ==
> +   switch (get_oprrest(expr->opno))
> +   {
> +   case F_EQSEL:
> +
> +   /* equality conditions are compatible with
> all statistics */
> +   break;
> +
> +   default:
> +
> +   /* unknown estimator */
> +   return true;
> +   }
>
> This seems somewhat stupid..
>

I agree. Changed.

I've attached an updated patch.

-- 
 David Rowley   http://www.2ndQuadrant.com/
 PostgreSQL Development, 24x7 Support, Training & Services


mv_functional-deps_2017-04-01.patch
Description: Binary data

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Variable substitution in psql backtick expansion

2017-03-31 Thread Tom Lane
"Daniel Verite"  writes:
> ISTM that expr is too painful to use to be seen as the
> idiomatic way of achieving comparison in psql.

I'm not proposing it as the best permanent solution, just saying
that having this in v10 is a lot better than having nothing in v10.
And we certainly aren't going to get any more-capable boolean
expression syntax into v10 at this point.

> Among its disadvantages, it won't work on windows, and its
> interface is hard to work with due to the necessary
> quoting of half its operators, and the mandatory spacing
> between arguments.
> Also the quoting rules and command line syntax
> depend on the underlying shell.

All true, but that's true of just about any use of backtick
or \! commands.  Shall we remove those features because they
are hard to use 100% portably?

> Isn't it going to be tricky to produce code that works
> across different families of shells, like bourne and csh?

Probably 95% of our users do not really care about that,
because they're only trying to produce scripts that will
work in their own environment.

> I think that users would rather have the option to just put
> an SQL expression behind \if. That implies a working connection
> to evaluate, which expr doesn't, but that's no
> different from the other backslash commands that read
> the database.

Well, it also implies being in a non-aborted transaction,
which seems like more of a problem to me for \if scripting.
Certainly there would be value in an option like that, but
it's not any more of a 100% solution than the `expr` one is.

Also, I didn't think I'd have to point it out, but expr(1)
is hardly the only command you can call from a backtick
substitution.  There are *lots* of potential use-cases
here ... but not so many if you can't plug any variable
material into the shell command.

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Variable substitution in psql backtick expansion

2017-03-31 Thread Tom Lane
Corey Huinker  writes:
> On Thu, Mar 30, 2017 at 1:33 PM, Tom Lane  wrote:
>> single-quoted according to Unix shell conventions.  (So the
>> processing would be a bit different from what it is for the
>> same notation in SQL contexts.)

> Any reason we wouldn't do :"VARIABLE" as well?

Well, what would that mean exactly?  The charter of :'FOO', I think,
is to get the string value through shell parsing unscathed.  I'm a
lot less clear on what :"FOO" ought to do.  If it has any use then
I'd imagine that that includes allowing $shell_variable references in
the string to become expanded.  But then you have a situation where some
shell special characters in the string are supposed to take effect but
others presumably still aren't.  Figuring out the exact semantics would
take some specific use-cases, and more time than I really have available
right now.

In short, that's something I thought was best left as a later
refinement, rather than doing a rush job we might regret.

> People might expect it given
> its use elsewhere, and it would make possible things like

> SELECT '$HOME/lamentable application name dir/bin/myprog' as myprog \gset
> `:"myprog" arg1 arg2`

You could get about 80% of the way there with `":myprog" arg1 arg2`,
since backtick processing doesn't have any rule that would prevent
:myprog from being expanded inside shell double quotes.

regards, tom lane


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] [PATCH] Transaction traceability - txid_status(bigint)

2017-03-31 Thread David Steele

On 3/31/17 10:46 AM, Craig Ringer wrote:


Patches 1 and 2 were the key parts and thanks to Robert's helpful
review, advice and edits they're committed now.

Committed, done. Yay.


Excellent.  I have marked this a "Committed" by Robert.

One down...

--
-David
da...@pgmasters.net


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] PATCH: Batch/pipelining support for libpq

2017-03-31 Thread David Steele

Hi,

On 3/30/17 2:12 PM, Daniel Verite wrote:

Vaishnavi Prabakaran wrote:


Hmm, With batch mode, after sending COPY command to server(and server
started processing the query and goes into COPY state) , client does not
immediately read the result , but it keeps sending other queries to the
server. By this time, server already encountered the error
scenario(Receiving different message during COPY state) and sent error
messages


IOW, the test intentionally violates the protocol and then all goes wonky
because of that.
That's why I was wondering upthread what's it's supposed to test.
I mean, regression tests are meant to warn against a desirable behavior
being unknowingly changed by new code into an undesirable behavior.
Here we have the undesirable behavior to start with.
What kind of regression could we fear from that?


The CF has been extended until April 7 but time is still growing short. 
Please respond with a new patch by 2017-04-04 00:00 AoE (UTC-12) or this 
submission will be marked "Returned with Feedback".


Thanks,
--
-David
da...@pgmasters.net


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Other formats in pset like markdown, rst, mediawiki

2017-03-31 Thread Pierre Ducroquet

On Friday, March 31, 2017 7:17:08 AM CEST, Jan Michálek wrote:



2017-03-30 21:53 GMT+02:00 Pavel Stehule :


2017-03-29 20:11 GMT+02:00 Jan Michálek :


2017-03-27 19:41 GMT+02:00 Jan Michálek :


2017-03-23 17:26 GMT+01:00 Pierre Ducroquet :
The following review has been posted through the commitfest application:
make installcheck-world:  tested, passed
Implements feature:   tested, passed
Spec compliant:   tested, passed
Documentation:tested, passed

Hi

This is my first review (Magnus said in his presentation in 
PGDay Paris that volunteers should just come and help, so here I 
am), so please notify me for any mistake I do when using the 
review tools...


The feature seems to work as expected, but I don't claim to be 
a markdown and rst expert.

Some minor issues with the code itself :
- some indentation issues (documentation and code itself with 
mix between space based and tab based indentation) and a few 
trailing spaces in code


corrected
 
- typographic issues in the documentation :
  - "The html, asciidoc, latex, latex-longtable, troff-ms, and 
markdown and rst formats" ==> duplicated and


corrected 
  - "Sets the output format to one of unaligned, aligned, 
wrapped, html, asciidoc, latex (uses tabular), latex-longtable, 
rst, markdown, or troff-ms." ==> extra comma at the end of the 
list
- the comment " dont add line after last row, because line is 
added after every row" is misleading, it should warn that it's 
only for rst

- there is a block of commented out code left
- in the print_aligned_vertical function, there is a mix 
between "cont->opt->format == PRINT_RST" and "format == &pg_rst" 
and I don't see any obvious reason for that
corrected 
- the documentation doesn't mention (but ok, it's kind of 
obvious) that the linestyle option will not work with rst and 
markdown



In this patch are corrected (i hope, i had correct changes in 
vimrc) indentation issues. Plese, look at this if it is OK (i 
men indentats) and some minor errors. And it should work on 
current master (probably).


Added \x option form markdown
In markdown works multiline cels (newline replaced by )
regre tests passed

\pset format rst
\x
select 10
crash on segfault

Program received signal SIGSEGV, Segmentation fault.
0x7f77673a866c in vfprintf () from /lib64/libc.so.6
(gdb) bt
#0  0x7f77673a866c in vfprintf () from /lib64/libc.so.6
#1  0x7f77673b1574 in fprintf () from /lib64/libc.so.6
#2  0x00437bc5 in print_aligned_vertical 
(cont=0x7fffade43da0, fout=, 
is_pager=) at print.c:1755
#3  0x0043a70d in printTable 
(cont=cont@entry=0x7fffade43da0, fout=, 
fout@entry=0x7f77677255e0 <_IO_2_1_stdout_>, 
is_pager=, is_pager@entry=0 '\000', 
flog=flog@entry=0x0) at print.c:3466
#4  0x0043c37f in printQuery 
(result=result@entry=0x9c4b60, opt=opt@entry=0x7fffade43f00, 
fout=0x7f77677255e0 <_IO_2_1_stdout_>, 
is_pager=is_pager@entry=0 '\000', flog=0x0) at print.c:3551
#5  0x0040da6d in PrintQueryTuples (results=0x9c4b60) 
at common.c:808

#6  PrintQueryResults (results=0x9c4b60) at common.c:1140
#7  SendQuery (query=0x9c1700 "select 10;") at common.c:1317
#8  0x0041c3d4 in MainLoop (source=0x7f77677248a0 
<_IO_2_1_stdin_>) at mainloop.c:319
#9  0x00405d5d in main (argc=, 
argv=) at startup.c:396


Regards

On source from monday it works (last commit on master I have is 
from 27.3 14:30). Or, maybe I didn`t generate diff well, or some 
gitt issue. 


I agree with Pavel, there is a segfault when you do these with your current 
patch. The current patch does not pass make check-world.
How did you generate the diff ? Basically, the simplest way to generate a 
patch serie is through git format-patch.
For instance, say you have a rstFormat branch freshly rebased upon 
origin/master, just do git format-patch origin/master..rstFormat and you 
will 
have one patch file per commit.

And don't forget to commit everything :)

I hope this helps, I don't have enough time to go through the patch and 
find 
out what is causing the segfault right now.



--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] Re: PATCH: pageinspect / add page_checksum and bt_page_items(bytea)

2017-03-31 Thread David Steele

On 3/29/17 11:08 AM, Tomas Vondra wrote:


The attached patch is the best I came up with - it essentially shares
just the tuple-forming part, which is exactly the same in both cases.


I have marked this submission "Needs Review".

--
-David
da...@pgmasters.net


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] [PATCH] Transaction traceability - txid_status(bigint)

2017-03-31 Thread Craig Ringer
On 31 Mar. 2017 22:31, "David Steele"  wrote:

On 3/25/17 12:12 AM, Peter Eisentraut wrote:

> I'm wondering if this is a perl version/platform issue around
>>
>> $tx->pump until $stdout =~ /[[:digit:]]+[\r\n]$/;
>>
>> where we're not recognising the required output from psql when we get it.
>>
>> What's in src/test/recovery/tmp_check/log/regress_log_011* ?
>>
>> I couldn't use PostgresNode::psql or PostgresNode::safe_psql here
>> because the session must stay open.
>>
>
> The problem was that psql needs to be called with -X.  Fix committed.
>

It's not clear to me what remains to be done on this patch.  I feel, at the
least, that patches 3 and 4 need to be rebased and the status set back to
"Needs Review".

This thread has been idle for six days.  Please respond with a new patch by
2017-04-04 00:00 AoE (UTC-12) or this submission will be marked "Returned
with Feedback".


I consider the feature complete and committed.

Patch 3 is optional and per discussion buried upthread we generally agreed
that it'd be better to replace most user visible uses of the 'xid' data
type with a new epoch extended 'xid64' or similar.

Patch 4, txid_incinerate, has never been intended for commit. It's a
testing tool.

Patches 1 and 2 were the key parts and thanks to Robert's helpful review,
advice and edits they're committed now.

Committed, done. Yay.


Re: [HACKERS] Adding support for Default partition in partitioning

2017-03-31 Thread David Steele

On 3/29/17 8:13 AM, Rahila Syed wrote:


Thanks for reporting. I have identified the problem and have a fix.
Currently working on allowing
adding a partition after default partition if the default partition does
not have any conflicting rows.
Will update the patch with both of these.


The CF has been extended but until April 7 but time is still growing 
short.  Please respond with a new patch by 2017-04-04 00:00 AoE (UTC-12) 
or this submission will be marked "Returned with Feedback".


Thanks,
--
-David
da...@pgmasters.net


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] GUC for cleanup indexes threshold.

2017-03-31 Thread Robert Haas
On Wed, Mar 29, 2017 at 2:23 AM, Masahiko Sawada  wrote:
> I was thinking that the status of this patch is still "Needs review"
> because I sent latest version patch[1].

I think you're right.

I took a look at this today.  I think there is some problem with the
design of this patch.  I originally proposed a threshold based on the
percentage of not-all-visible pages on the theory that we'd just skip
looking at the indexes altogether in that case.  But that's not what
the patch does: it only avoids the index *cleanup*, not the index
*vacuum*.  And the comments in btvacuumcleanup say this:

/*
 * If btbulkdelete was called, we need not do anything, just return the
 * stats from the latest btbulkdelete call.  If it wasn't called, we must
 * still do a pass over the index, to recycle any newly-recyclable pages
 * and to obtain index statistics.
 *
 * Since we aren't going to actually delete any leaf items, there's no
 * need to go through all the vacuum-cycle-ID pushups.
 */

So, if I'm reading this correctly, the only time this patch saves
substantial work - at least in the case of a btree index - is in the
case where there are no dead tuples at all.  But if we only want to
avoid the work in that case, then a threshold based on the percentage
of all-visible pages seems like the wrong thing, because the other
stuff btvacuumcleanup() is doing doesn't have anything to do with the
number of all-visible pages.

I'm not quite sure what the right thing to do is here, but I'm
doubtful that this is it.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] BRIN cost estimate

2017-03-31 Thread David Steele

On 3/26/17 7:44 AM, Emre Hasegeli wrote:

If we want to have a variable which stores the number of ranges, then
I think numRanges is better than numBlocks. I can't imagine many
people would disagree there.


I renamed it with other two.


At the very least please write a comment to explain this in the code.
Right now it looks broken. If I noticed this then one day in the
future someone else will. If you write a comment then person of the
future will likely read it, and then not raise any questions about the
otherwise questionable code.


I added a sentence about it.


I do strongly agree that the estimates need improved here. I've
personally had issues with bad brin estimates before, and I'd like to
see it improved. I think the patch is not quite complete without it
also considering stats on expression indexes. If you have time to go
do that I'd suggest you go ahead with that.


I copy-pasted expression index support from btcostestimate() as well,
and extended the regression test.

I think this function can use more polishing before committing, but I
don't know where to begin.  There are single functions for every
access method in here.  I don't like them being in there to begin
with.  selfuncs.s is quite long with all kinds of dependencies and
dependents.  I think it would be better to have the access method
selectivity estimation functions under src/access.  Maybe we should
start doing so by moving this to src/access/brin/brin_selfuncs.c.
What do you think?


I have marked this patch "Needs Review".

--
-David
da...@pgmasters.net


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Variable substitution in psql backtick expansion

2017-03-31 Thread Pavel Stehule
2017-03-31 15:00 GMT+02:00 Daniel Verite :

> Tom Lane wrote:
>
> > Thoughts?
>
> ISTM that expr is too painful to use to be seen as the
> idiomatic way of achieving comparison in psql.
>
> Among its disadvantages, it won't work on windows, and its
> interface is hard to work with due to the necessary
> quoting of half its operators, and the mandatory spacing
> between arguments.
>
> Also the quoting rules and command line syntax
> depend on the underlying shell.
> Isn't it going to be tricky to produce code that works
> across different families of shells, like bourne and csh?
>
> I think that users would rather have the option to just put
> an SQL expression behind \if. That implies a working connection
> to evaluate, which expr doesn't, but that's no
> different from the other backslash commands that read
> the database.
>

some basic expression can be done on client side like defvar, serverver,
... but generic expression should be evaluated in server - I am not sure,
if it is what we would - when I have PLpgSQL.

In psql scripting I expecting really simple expressions - but it should to
work everywhere - using bash is not good enough.

Regards

Pavel



>
>
> Best regards,
> --
> Daniel Vérité
> PostgreSQL-powered mailer: http://www.manitou-mail.org
> Twitter: @DanielVerite
>
>
> --
> Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
> To make changes to your subscription:
> http://www.postgresql.org/mailpref/pgsql-hackers
>


Re: [HACKERS] [PATCH] Transaction traceability - txid_status(bigint)

2017-03-31 Thread David Steele

On 3/25/17 12:12 AM, Peter Eisentraut wrote:

I'm wondering if this is a perl version/platform issue around

$tx->pump until $stdout =~ /[[:digit:]]+[\r\n]$/;

where we're not recognising the required output from psql when we get it.

What's in src/test/recovery/tmp_check/log/regress_log_011* ?

I couldn't use PostgresNode::psql or PostgresNode::safe_psql here
because the session must stay open.


The problem was that psql needs to be called with -X.  Fix committed.


It's not clear to me what remains to be done on this patch.  I feel, at 
the least, that patches 3 and 4 need to be rebased and the status set 
back to "Needs Review".


This thread has been idle for six days.  Please respond with a new patch 
by 2017-04-04 00:00 AoE (UTC-12) or this submission will be marked 
"Returned with Feedback".


--
-David
da...@pgmasters.net


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Patch: Write Amplification Reduction Method (WARM)

2017-03-31 Thread Pavan Deolasee
On Fri, Mar 31, 2017 at 6:47 PM, Robert Haas  wrote:
>
>
> 2. WARM is a non-optional feature which touches the on-disk format.
> There is nothing more dangerous than that.  If hash indexes have bugs,
> people can avoid those bugs by not using them; there are good reasons
> to suppose that hash indexes have very few existing users.  The
> expression evaluation changes, IMHO, are much more dangerous because
> everyone will be exposed to them, but they will not likely corrupt
> your data because they don't touch the on-disk format.  WARM is even a
> little more dangerous than that; everyone is exposed to those bugs,
> and in the worst case they could eat your data.
>
>
Having worked on it for some time now, I can say that WARM uses pretty much
the same infrastructure that HOT uses for cleanup/pruning tuples from the
heap. So the risk of having a bug which can eat your data from the heap is
very low. Sure, it might mess up with indexes, return duplicate keys, not
return a row when it should have. Not saying they are not bad bugs, but
probably much less severe than someone removing live rows from the heap.

And we can make it a table level property, keep it off by default, turn it
off on system tables in this release and change the defaults only when we
get more confidence assuming people use it by explicitly turning it on. Now
may be that's not the right approach and keeping it off by default will
mean it receives much less testing than we would like. So we can keep it on
in the beta cycle and then take a call. I went a good length to make it
work on system tables because during HOT development, Tom told me that it
better work for everything or it doesn't work at all. But with WARM it
works for system tables and I know no known bugs, but if we don't want to
risk system tables, we might want to turn it off (just prior to release may
be).

Thanks,
Pavan

-- 
 Pavan Deolasee   http://www.2ndQuadrant.com/
 PostgreSQL Development, 24x7 Support, Training & Services


Re: [HACKERS] GUC for cleanup indexes threshold.

2017-03-31 Thread David Steele

On 3/29/17 2:23 AM, Masahiko Sawada wrote:

On Wed, Mar 29, 2017 at 12:23 AM, David Steele  wrote:

On 3/23/17 1:54 AM, Masahiko Sawada wrote:


On Wed, Mar 15, 2017 at 7:51 AM, Peter Geoghegan  wrote:


On Tue, Mar 14, 2017 at 3:10 PM, Peter Geoghegan  wrote:


We already have BTPageOpaqueData.btpo, a union whose contained type
varies based on the page being dead. We could just do the same with
some other field in that struct, and then store epoch there. Clearly
nobody really cares about most data that remains on the page. Index
scans just need to be able to land on it to determine that it's dead,
and VACUUM needs to be able to determine whether or not there could
possibly be such an index scan at the time it considers recycling..



ISTM that we need all of the fields within BTPageOpaqueData even for
dead pages, actually. The left links and right links still need to be
sane, and the flag bits are needed. Plus, the field that stores an XID
already is clearly necessary. Even if they weren't needed, it would
probably still be a good idea to keep them around for forensic
purposes. However, the page header field pd_prune_xid is currently
unused for indexes, and is the same width as CheckPoint.nextXidEpoch
(the extra thing we might want to store -- the epoch).

Maybe you could store the epoch within that field when B-Tree VACUUM
deletes a page, and then compare that within _bt_page_recyclable(). It
would come before the existing XID comparison in that function. One
nice thing about this idea is that pd_prune_xid will be all-zero for
index pages from the current format, so there is no need to take
special care to make sure that databases that have undergone
pg_upgrade don't break.



Thank you for the suggestion!
If we store the poch within union field, I think we will not be able
to use BTPageOpaqueData.btpo.xact at the same time. Since comparing
btpo.xact is still necessary to determine if that page is recyclable
we cannot store the epoch into the same union field. And if we store
it into BTPageOpaqueData, it would break disk compatibility.



I have marked this patch "Waiting for Author".

This thread has been idle for five days.  Please respond with a new patch by
2017-03-30 00:00 AoE (UTC-12) or this submission will be marked "Returned
with Feedback".



I was thinking that the status of this patch is still "Needs review"
because I sent latest version patch[1]. We're still under discussion
how safely we can skip to the cleanup vacuum on btree index. That
patch has some restrictions but it would be good for first step.


I've marked this patch as "Needs Review".  Feel free to do that on your 
own if you think I've made a mistake in classifying a patch.


My view is that the patch is stalled and something might be required on 
your part to get it moving again, perhaps trying another approach.


--
-David
da...@pgmasters.net


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] [PATCH] Remove unused argument in btree_xlog_split

2017-03-31 Thread Aleksander Alekseev
Hi,

Turned out that there is an unused argument `isroot` in
`btree_xlog_split` procedure. Suggested patch fixes it.

This issue was discovered by Anastasia Lubennikova, coding is done by me.

-- 
Best regards,
Aleksander Alekseev
diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c
index ac60db0d49..2db8f13cae 100644
--- a/src/backend/access/nbtree/nbtxlog.c
+++ b/src/backend/access/nbtree/nbtxlog.c
@@ -193,7 +193,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, XLogReaderState *record)
 }
 
 static void
-btree_xlog_split(bool onleft, bool isroot, XLogReaderState *record)
+btree_xlog_split(bool onleft, XLogReaderState *record)
 {
 	XLogRecPtr	lsn = record->EndRecPtr;
 	xl_btree_split *xlrec = (xl_btree_split *) XLogRecGetData(record);
@@ -996,16 +996,12 @@ btree_redo(XLogReaderState *record)
 			btree_xlog_insert(false, true, record);
 			break;
 		case XLOG_BTREE_SPLIT_L:
-			btree_xlog_split(true, false, record);
-			break;
-		case XLOG_BTREE_SPLIT_R:
-			btree_xlog_split(false, false, record);
-			break;
 		case XLOG_BTREE_SPLIT_L_ROOT:
-			btree_xlog_split(true, true, record);
+			btree_xlog_split(true, record);
 			break;
+		case XLOG_BTREE_SPLIT_R:
 		case XLOG_BTREE_SPLIT_R_ROOT:
-			btree_xlog_split(false, true, record);
+			btree_xlog_split(false, record);
 			break;
 		case XLOG_BTREE_VACUUM:
 			btree_xlog_vacuum(record);


signature.asc
Description: PGP signature


Re: [HACKERS] Fix obsolete comment in GetSnapshotData

2017-03-31 Thread Robert Haas
On Wed, Mar 29, 2017 at 12:00 AM, Craig Ringer  wrote:
> There's an outdated reference to GetOldestXmin(true, true) in
> GetSnapshotData. It hasn't had that call signature for a long while
> now. Update the comment to reflect the current signature.
>
> diff --git a/src/backend/storage/ipc/procarray.c
> b/src/backend/storage/ipc/procarray.c
> index f32881b..4bf0243 100644
> --- a/src/backend/storage/ipc/procarray.c
> +++ b/src/backend/storage/ipc/procarray.c
> @@ -1556,7 +1556,8 @@ GetMaxSnapshotSubxidCount(void)
>   *  older than this are known not running any more.
>   *  RecentGlobalXmin: the global xmin (oldest TransactionXmin across all
>   *  running transactions, except those running LAZY VACUUM).  This is
> - *  the same computation done by GetOldestXmin(true, true).
> + *  the same computation done by
> + *  GetOldestXmin(NULL, 
> PROCARRAY_FLAGS_DEFAULT|PROCARRAY_FLAGS_VACUUM)
>   *  RecentGlobalDataXmin: the global xmin for non-catalog tables
>   *  >= RecentGlobalXmin
>   *

PROCARRAY_FLAGS_VACUUM is defined as a bitwise or with
PROCARRAY_FLAGS_DEFAULT.  So or-ing it back with that same value does
not seem quite right.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Other formats in pset like markdown, rst, mediawiki

2017-03-31 Thread Jan Michálek
2017-03-31 12:01 GMT+02:00 Pierre Ducroquet :

> On Friday, March 31, 2017 7:17:08 AM CEST, Jan Michálek wrote:
>
>>
>>
>> 2017-03-30 21:53 GMT+02:00 Pavel Stehule :
>>
>>
>> 2017-03-29 20:11 GMT+02:00 Jan Michálek :
>>
>>
>> 2017-03-27 19:41 GMT+02:00 Jan Michálek :
>>
>>
>> 2017-03-23 17:26 GMT+01:00 Pierre Ducroquet :
>> The following review has been posted through the commitfest application:
>> make installcheck-world:  tested, passed
>> Implements feature:   tested, passed
>> Spec compliant:   tested, passed
>> Documentation:tested, passed
>>
>> Hi
>>
>> This is my first review (Magnus said in his presentation in PGDay Paris
>> that volunteers should just come and help, so here I am), so please notify
>> me for any mistake I do when using the review tools...
>>
>> The feature seems to work as expected, but I don't claim to be a markdown
>> and rst expert.
>> Some minor issues with the code itself :
>> - some indentation issues (documentation and code itself with mix between
>> space based and tab based indentation) and a few trailing spaces in code
>>
>> corrected
>>  - typographic issues in the documentation :
>>   - "The html, asciidoc, latex, latex-longtable, troff-ms, and markdown
>> and rst formats" ==> duplicated and
>>
>> corrected   - "Sets the output format to one of unaligned, aligned,
>> wrapped, html, asciidoc, latex (uses tabular), latex-longtable, rst,
>> markdown, or troff-ms." ==> extra comma at the end of the list
>> - the comment " dont add line after last row, because line is added after
>> every row" is misleading, it should warn that it's only for rst
>> - there is a block of commented out code left
>> - in the print_aligned_vertical function, there is a mix between
>> "cont->opt->format == PRINT_RST" and "format == &pg_rst" and I don't see
>> any obvious reason for that
>> corrected - the documentation doesn't mention (but ok, it's kind of
>> obvious) that the linestyle option will not work with rst and markdown
>>
>>
>> In this patch are corrected (i hope, i had correct changes in vimrc)
>> indentation issues. Plese, look at this if it is OK (i men indentats) and
>> some minor errors. And it should work on current master (probably).
>>
>> Added \x option form markdown
>> In markdown works multiline cels (newline replaced by )
>> regre tests passed
>>
>> \pset format rst
>> \x
>> select 10
>> crash on segfault
>>
>> Program received signal SIGSEGV, Segmentation fault.
>> 0x7f77673a866c in vfprintf () from /lib64/libc.so.6
>> (gdb) bt
>> #0  0x7f77673a866c in vfprintf () from /lib64/libc.so.6
>> #1  0x7f77673b1574 in fprintf () from /lib64/libc.so.6
>> #2  0x00437bc5 in print_aligned_vertical (cont=0x7fffade43da0,
>> fout=, is_pager=) at print.c:1755
>> #3  0x0043a70d in printTable (cont=cont@entry=0x7fffade43da0,
>> fout=, fout@entry=0x7f77677255e0 <_IO_2_1_stdout_>,
>> is_pager=, is_pager@entry=0 '\000', flog=flog@entry=0x0)
>> at print.c:3466
>> #4  0x0043c37f in printQuery (result=result@entry=0x9c4b60,
>> opt=opt@entry=0x7fffade43f00, fout=0x7f77677255e0 <_IO_2_1_stdout_>,
>> is_pager=is_pager@entry=0 '\000', flog=0x0) at print.c:3551
>> #5  0x0040da6d in PrintQueryTuples (results=0x9c4b60) at
>> common.c:808
>> #6  PrintQueryResults (results=0x9c4b60) at common.c:1140
>> #7  SendQuery (query=0x9c1700 "select 10;") at common.c:1317
>> #8  0x0041c3d4 in MainLoop (source=0x7f77677248a0
>> <_IO_2_1_stdin_>) at mainloop.c:319
>> #9  0x00405d5d in main (argc=, argv=> out>) at startup.c:396
>>
>> Regards
>>
>> On source from monday it works (last commit on master I have is from 27.3
>> 14:30). Or, maybe I didn`t generate diff well, or some gitt issue.
>>
>
> I agree with Pavel, there is a segfault when you do these with your
> current patch. The current patch does not pass make check-world.
> How did you generate the diff ? Basically, the simplest way to generate a
> patch serie is through git format-patch.
> For instance, say you have a rstFormat branch freshly rebased upon
> origin/master, just do git format-patch origin/master..rstFormat and you
> will have one patch file per commit.
> And don't forget to commit everything :)
>
> I hope this helps, I don't have enough time to go through the patch and
> find out what is causing the segfault right now.
>

There was error in code (I don`t know why it works on my computer). Pavel
found this error and there is corrected patch.

Thanks

Jan

P.S.: There are few things i should change (linestyle comment, few things
in documentation, table annotation etc), so I didn`t change state of patch.



-- 
Jelen
Starší čeledín datovýho chlíva
diff --git a/doc/src/sgml/ref/psql-ref.sgml b/doc/src/sgml/ref/psql-ref.sgml
index b51b11baa3..7e9dca5523 100644
*** a/doc/src/sgml/ref/psql-ref.sgml
--- b/doc/src/sgml/ref/psql-ref.sgml
***
*** 2455,2461  lo_import 152801
aligned, wrapped,
html, asciidoc,
l

Re: [HACKERS] Multiple false-positive warnings from Valgrind

2017-03-31 Thread Aleksander Alekseev
Hi Kyotaro,

> > And it seems to me that this is caused by the routines of OpenSSL.
> > When building without --with-openssl, using the fallback
> > implementations of SHA256 and RAND_bytes I see no warnings generated
> > by scram_build_verifier... I think it makes most sense to discard that
> > from the list of open items.
> 
> FWIW a document of the function says that,
> 
> https://www.openssl.org/docs/man1.0.1/crypto/RAND_bytes.html
> 
> > The contents of buf is mixed into the entropy pool before
> > retrieving the new pseudo-random bytes unless disabled at compile
> > time (see FAQ).
> 
> This isn't saying that RAND_bytes does the same thing but
> something similar can be happening there.

OK, turned out that warnings regarding uninitialized values disappear
after removing --with-openssl. That's a good thing.

What about all these memory leak reports [1]? If I see them should I just
ignore them or, if reports look false positive, suggest a patch that
modifies a Valgrind suppression file? In other words what is current
consensus in community regarding Valgrind and it's reports?

[1] http://afiskon.ru/s/47/871f1e21ef_valgrind.txt.gz

-- 
Best regards,
Aleksander Alekseev


signature.asc
Description: PGP signature


Re: [HACKERS] logical replication launcher crash on buildfarm

2017-03-31 Thread Robert Haas
On Tue, Mar 28, 2017 at 7:39 PM, Petr Jelinek
 wrote:
> Sigh, forgot git add for the docs, so one more try...

+if (strncmp(worker->bgw_library_name, "postgres", 8) != 0)
+return NULL;

I think that's not right.  You don't want to match postgresshdkgjsdglkjs.

Aside from that, these look good to me.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] REFERENCES privilege should not be symmetric (was Re: [GENERAL] Postgres Permissions Article)

2017-03-31 Thread Robert Haas
On Thu, Mar 30, 2017 at 4:45 PM, Tom Lane  wrote:
> In short, it seems like this statement in the docs is correctly describing
> our code's behavior, but said behavior is wrong and should be changed.
> I'd propose fixing it like that in HEAD; I'm not sure if the back branches
> should also be changed.

Sounds reasonable, but I don't see much advantage to changing it in
the back-branches.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] strange parallel query behavior after OOM crashes

2017-03-31 Thread Robert Haas
On Thu, Mar 30, 2017 at 4:35 PM, Kuntal Ghosh
 wrote:
> 2. the server restarts automatically, initialize
> BackgroundWorkerData->parallel_register_count and
> BackgroundWorkerData->parallel_terminate_count in the shared memory.
> After that, it calls ForgetBackgroundWorker and it increments
> parallel_terminate_count.

Hmm.  So this seems like the root of the problem.  Presumably those
things need to be reset AFTER forgetting any background workers from
before the crash.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Parallel query execution with SPI

2017-03-31 Thread Konstantin Knizhnik



On 31.03.2017 13:48, Robert Haas wrote:

On Fri, Mar 31, 2017 at 3:33 AM, Konstantin Knizhnik
 wrote:

It is possible to execute query concurrently using SPI?
If so, how it can be enforced?
I tried to open cursor with CURSOR_OPT_PARALLEL_OK flag but it doesn't help:
query is executed by single backend while the same query been launched at
top level uses parallel plan:

 fsstate->portal = SPI_cursor_open_with_args(NULL, fsstate->query,
fsstate->numParams, argtypes, values, nulls, true, CURSOR_OPT_PARALLEL_OK);
 ...
 SPI_cursor_fetch(fsstate->portal, true, 1);

Parallel execution isn't possible if you are using a cursor-type
interface, because a parallel query can't be suspended and resumed
like a non-parallel query.  If you use a function that executes the
query to completion in one go, like SPI_execute_plan, then it's cool.
See also commit 61c2e1a95f94bb904953a6281ce17a18ac38ee6d.


Thank you very much for explanation.
In case of using SPI_execute the query is really executed concurrently.
But it means that when I am executing some query using SPI, I need to 
somehow predict number of returned tuples.
If it is not so much, then it is better to use SPI_execute to allow 
concurrent execution of the query.
But if it is large enough, then SPI_execute without limit can cause 
memory overflow.
Certainly I can specify some reasonable limit and it if is reached, then 
use cursor instead.

But it is neither convenient, neither efficient.

I wonder if somebody can suggest better solution?

--
Konstantin Knizhnik
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company



--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Patch: Write Amplification Reduction Method (WARM)

2017-03-31 Thread Robert Haas
On Fri, Mar 31, 2017 at 7:53 AM, Simon Riggs  wrote:
> So Andres says defer this, but Robert says "more review", which is
> more than just deferral.
>
> We have some risky things in this release such as Hash Indexes,
> function changes. I perfectly understand that perception of risk is
> affected significantly by whether you wrote something or not. Andres
> and Robert did not write it and so they see problems.

While that's probably true, I don't think that's the only thing going on here:

1. Hash indexes were reviewed and reworked repeatedly until nobody
could find any more problems, including people like Jesper Pederson
who do not work for EDB and who did extensive testing.  Similarly with
the expression evaluation stuff, which got some review from Heikki and
even more from Tom.  Now, several people who do not work for
2ndQuadrant have recently started looking at WARM and many of those
reviews have found problems and regressions.  If we're to hold things
to the same standard, those things should be looked into and fixed
before there is any talk of committing anything.  My concern is that
there seems to be (even with the patches already committed) a desire
to minimize the importance of the problems that have been found --
which I think is probably because fixing them would take time, and we
don't have much time left in this release cycle.  We should regard the
time between feature freeze and release as a time to fix the things
that good review missed, not as a substitute for fixing things that
should have (or actually were) found during review prior to commit.

2. WARM is a non-optional feature which touches the on-disk format.
There is nothing more dangerous than that.  If hash indexes have bugs,
people can avoid those bugs by not using them; there are good reasons
to suppose that hash indexes have very few existing users.  The
expression evaluation changes, IMHO, are much more dangerous because
everyone will be exposed to them, but they will not likely corrupt
your data because they don't touch the on-disk format.  WARM is even a
little more dangerous than that; everyone is exposed to those bugs,
and in the worst case they could eat your data.

I agree that WARM could be a pretty great feature, but I think you're
underestimating the negative effects that could result from committing
it too soon.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Variable substitution in psql backtick expansion

2017-03-31 Thread Daniel Verite
Tom Lane wrote:

> Thoughts?

ISTM that expr is too painful to use to be seen as the
idiomatic way of achieving comparison in psql.

Among its disadvantages, it won't work on windows, and its
interface is hard to work with due to the necessary
quoting of half its operators, and the mandatory spacing
between arguments.

Also the quoting rules and command line syntax
depend on the underlying shell.
Isn't it going to be tricky to produce code that works
across different families of shells, like bourne and csh?

I think that users would rather have the option to just put
an SQL expression behind \if. That implies a working connection
to evaluate, which expr doesn't, but that's no
different from the other backslash commands that read
the database.


Best regards,
-- 
Daniel Vérité
PostgreSQL-powered mailer: http://www.manitou-mail.org
Twitter: @DanielVerite


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] postgres_fdw bug in 9.6

2017-03-31 Thread Etsuro Fujita

On 2017/03/30 20:16, Ashutosh Bapat wrote:

The patch applies cleanly, compiles. make check in regress as well as
postgres_fdw works fine. Here are few comments


Thanks for the review!


local-join should be local join.


OK, done.


The comments should explain why.
+/* Should be unparameterized */
+Assert(outer_path->param_info == NULL);
+Assert(inner_path->param_info == NULL);


Done.


+ a suitable local join path, which can be used as the alternative local
May be we should reword this as ... which can be used to create an alternative
local ... This rewording is required even in the existing docs.


Done.


+/* outer_path should not require rels from inner_path */
+Assert(!PATH_PARAM_BY_REL(outer_path, inner_path->parent));
Probably this should throw an error or return NULL in such case rather than
Asserting. This function is callable from any FDW, and that FDW may provide
such paths, may be because of an internal bug. Same case with
+/* Neither path should require rels from the other path */
+Assert(!PATH_PARAM_BY_REL(outer_path, inner_path->parent) ||
+   !PATH_PARAM_BY_REL(inner_path, outer_path->parent));


Good idea!  I think it's better to throw an error because that is a bug 
in the FDW; done that way.



While the comment below mentions ON true, the testcase you have added is for ON
false. Either the testcase should change or this comment. That raises another
question, what happens when somebody does FULL JOIN ON false?
+ * If special case: for "x FULL JOIN y ON true", there


FULL JOIN ON FALSE would be handled the same way as FULL JOIN ON TRUE, 
so I think we should rewrite that comment into something like this: If 
special case: for "x FULL JOIN y ON true" or "x FULL JOIN y ON false"...



Why JOIN_RIGHT is being treated differently from JOIN_LEFT? We should be able
to create a nested loop join for JOIN_RIGHT?
+case JOIN_RIGHT:
+case JOIN_FULL:


I don't think so, because nestloop joins aren't supported for 
JOIN_RIGHT.  See ExecInitNestLoop().


Best regards,
Etsuro Fujita
*** a/contrib/postgres_fdw/expected/postgres_fdw.out
--- b/contrib/postgres_fdw/expected/postgres_fdw.out
***
*** 1629,1650  SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t
 Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
 Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
 Remote SQL: SELECT r1."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r2."C 1", CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1" ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR UPDATE OF r1
!->  Merge Join
   Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
!  Merge Cond: (t1.c1 = t2.c1)
!  ->  Sort
 Output: t1.c1, t1.c3, t1.*
!Sort Key: t1.c1
!->  Foreign Scan on public.ft1 t1
!  Output: t1.c1, t1.c3, t1.*
!  Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE
!  ->  Sort
 Output: t2.c1, t2.*
!Sort Key: t2.c1
!->  Foreign Scan on public.ft2 t2
!  Output: t2.c1, t2.*
!  Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1"
! (23 rows)
  
  SELECT t1.c1, t2.c1 FROM ft1 t1 JOIN ft2 t2 ON (t1.c1 = t2.c1) ORDER BY t1.c3, t1.c1 OFFSET 100 LIMIT 10 FOR UPDATE OF t1;
   c1  | c1  
--- 1629,1644 
 Output: t1.c1, t2.c1, t1.c3, t1.*, t2.*
 Relations: (public.ft1 t1) INNER JOIN (public.ft2 t2)
 Remote SQL: SELECT r1."C 1", r1.c3, CASE WHEN (r1.*)::text IS NOT NULL THEN ROW(r1."C 1", r1.c2, r1.c3, r1.c4, r1.c5, r1.c6, r1.c7, r1.c8) END, r2."C 1", CASE WHEN (r2.*)::text IS NOT NULL THEN ROW(r2."C 1", r2.c2, r2.c3, r2.c4, r2.c5, r2.c6, r2.c7, r2.c8) END FROM ("S 1"."T 1" r1 INNER JOIN "S 1"."T 1" r2 ON (((r1."C 1" = r2."C 1" ORDER BY r1.c3 ASC NULLS LAST, r1."C 1" ASC NULLS LAST FOR UPDATE OF r1
!->  Nested Loop
   Output: t1.c1, t1.c3, t1.*, t2.c1, t2.*
!  Join Filter: (t1.c1 = t2.c1)
!  ->  Foreign Scan on public.ft1 t1
 Output: t1.c1, t1.c3, t1.*
!Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" FOR UPDATE
!  ->  Foreign Scan on public.ft2 t2

Re: [HACKERS] [bug fix] Savepoint-related statements terminates connection

2017-03-31 Thread Ashutosh Bapat
Please add this to the next commitfest.

I think there's some misunderstanding between exec_simple_query() and
the way we manage transaction block state machine.

In exec_simple_query()
 952  * We'll tell PortalRun it's a top-level command iff there's
exactly one
 953  * raw parsetree.  If more than one, it's effectively a
transaction block
 954  * and we want PreventTransactionChain to reject unsafe
commands. (Note:
 955  * we're assuming that query rewrite cannot add commands that are
 956  * significant to PreventTransactionChain.)
 957  */
 958 isTopLevel = (list_length(parsetree_list) == 1);

it assumes that a multi-statement command is a transaction block. But
for every statement in this multi-statement, we toggle between
TBLOCK_STARTED and TBLOCK_DEFAULT never entering TBLOCK_INPROGRESS as
expected by a transaction block. It looks like we have to fix this
transaction block state machine for multi-statement commands. One way
to fix it is to call finish_xact_command() in exec_simple_query() at
958 when it sees that it's a transaction block. I am not sure if
that's correct. We have to at least fix the comment above or even stop
setting isTopLevel for mult-statement commands.

I don't think the fix in the patch is on the right track, since
RequireTransactionChain() is supposed to do exactly what the patch
intends to do.
3213 /*
3214  *  RequireTransactionChain
3215  *
3216  *  This routine is to be called by statements that must run inside
3217  *  a transaction block, because they have no effects that persist past
3218  *  transaction end (and so calling them outside a transaction block
3219  *  is presumably an error).  DECLARE CURSOR is an example.

Incidently we allow cursor operations in a multi-statement command
psql -d postgres -c "select 1; declare curs cursor for select * from
pg_class; fetch from curs;"
   relname| relnamespace | reltype | reloftype | relowner | relam
| relfilenode | reltablespace | relpages | reltuples | relallvisible |
reltoastre
lid | relhasindex | relisshared | relpersistence | relkind | relnatts
| relchecks | relhasoids | relhaspkey | relhasrules | relhastriggers |
relhassubc
lass | relrowsecurity | relforcerowsecurity | relispopulated |
relreplident | relispartition | relfrozenxid | relminmxid |
relacl
| reloptions | relpartbound
--+--+-+---+--+---+-+---+--+---+---+---
+-+-++-+--+---+++-++---
-++-++--++--++-
++--
 pg_statistic |   11 |   11258 | 0 |   10 | 0
|2619 | 0 |   16 |   388 |16 |
 2
840 | t   | f   | p  | r   |   26
| 0 | f  | f  | f   | f  |
f
 | f  | f   | t  | n
 | f  |  547 |  1 |
{ashutosh=arwdDxt/ashutosh}
||
(1 row)

Then the question is why not to allow savepoints as well? For that we
have to fix transaction block state machine.

On Fri, Mar 31, 2017 at 12:40 PM, Tsunakawa, Takayuki
 wrote:
> Hello,
>
> I found a trivial bug that terminates the connection.  The attached patch 
> fixes this.
>
>
> PROBLEM
> 
>
> Savepoint-related statements in a multi-command query terminates the 
> connection unexpectedly, as follows.
>
> $ psql -d postgres -c "SELECT 1; SAVEPOINT sp"
> FATAL:  DefineSavepoint: unexpected state STARTED
> server closed the connection unexpectedly
> This probably means the server terminated abnormally
> before or while processing the request.
> connection to server was lost
>
>
> CAUSE
> 
>
> 1. In exec_simple_query(), isTopLevel is set to false.
>
> isTopLevel = (list_length(parsetree_list) == 1);
>
> Then it is passed to PortalRun().
>
> (void) PortalRun(portal,
>  FETCH_ALL,
>  isTopLevel,
>  receiver,
>  receiver,
>  completionTag);
>
> 2. The isTopLevel flag is passed through ProcessUtility() to 
> RequireTransactionChain().
>
> 
> RequireTransactionChain(isTopLevel, "SAVEPOINT");
>
>
> 3. CheckTransactionChain() returns successfully here:
>
> /*
>  * inside a function call?
>  */
> if (!isTopLevel)
> return;
>
>
> 4. Fi

Re: [HACKERS] TPC-H Q20 from 1 hour to 19 hours!

2017-03-31 Thread Rafia Sabih
On Fri, Mar 31, 2017 at 5:13 PM, Amit Kapila  wrote:
> On Thu, Mar 30, 2017 at 8:24 AM, Robert Haas  wrote:
>> On Wed, Mar 29, 2017 at 8:00 PM, Tomas Vondra
>>  wrote:
>>> What is however strange is that changing max_parallel_workers_per_gather
>>> affects row estimates *above* the Gather node. That seems a bit, um,
>>> suspicious, no? See the parallel-estimates.log.
>>
>> Thanks for looking at this!  Comparing the parallel plan vs. the
>> non-parallel plan:
>>
>> part: parallel rows (after Gather) 20202, non-parallel rows 20202
>> partsupp: parallel rows 18, non-parallel rows 18
>> part-partsupp join: parallel rows 88988, non-parallel rows 355951
>> lineitem: parallel rows 59986112, non-parallel rows 59986112
>> lineitem after aggregation: parallel rows 5998611, non-parallel rows 5998611
>> final join: parallel rows 131, non-parallel rows 524
>>
>> I agree with you that that looks mighty suspicious.  Both the
>> part-partsupp join and the final join have exactly 4x as many
>> estimated rows in the non-parallel plan as in the parallel plan, and
>> it just so happens that the parallel divisor here will be 4.
>>
>> Hmm... it looks like the parallel_workers value from the Gather node
>> is being erroneously propagated up to the higher levels of the plan
>> tree.   Wow.   Somehow, Gather Merge managed to get the logic correct
>> here, but Gather is totally wrong.  Argh.   Attached is a draft patch,
>> which I haven't really tested beyond checking that it passes 'make
>> check'.
>>
>
> Your patch looks good to me.  I have verified some join cases as well
> where the behaviour is sane after patch.  I have also done testing
> with force_parallel_mode=regress (ran make check-world) and everything
> seems good.
>
> --

I tried checking the plan of Q20 with this patch, and got the following results,
with patch,
->  Merge Join  (cost=3025719.98..3499235.22 rows=6 width=16) (actual
time=176440.801..245903.143 rows=118124 loops=1)
   Merge Cond: ((lineitem.l_partkey =
partsupp.ps_partkey) AND (lineitem.l_suppkey = partsupp.ps_suppkey))
   Join Filter:
((partsupp.ps_availqty)::numeric > ((0.5 * sum(lineitem.l_quantity
and without patch,
->  Merge Join  (cost=3014830.12..3511637.54 rows=2 width=16) (actual
time=130994.237..208887.149 rows=118124 loops=1)
 Merge Cond: ((partsupp.ps_partkey =
lineitem.l_partkey) AND (partsupp.ps_suppkey = lineitem.l_suppkey))
 Join Filter:
((partsupp.ps_availqty)::numeric > ((0.5 * sum(lineitem.l_quantity

So, it looks like in the problematic area, it is not improving much.
Please find the attached file for the query plan of Q20 with and
without patch. I haven't evaluated the performance of this query with
patch.
-- 
Regards,
Rafia Sabih
EnterpriseDB: http://www.enterprisedb.com/
 with patch:

 QUERY PLAN 


 Limit  (cost=3500605.72..3500605.73 rows=1 width=52) (actual 
time=572114.740..572114.740 rows=1 loops=1)
   ->  Sort  (cost=3500605.72..3500605.73 rows=1 width=52) (actual 
time=572114.738..572114.738 rows=1 loops=1)
 Sort Key: supplier.s_name
 Sort Method: top-N heapsort  Memory: 25kB
 ->  Nested Loop Semi Join  (cost=3025720.40..3500605.71 rows=1 
width=52) (actual time=178675.638..572096.079 rows=3600 loops=1)
   Join Filter: (supplier.s_suppkey = lineitem.l_suppkey)
   Rows Removed by Join Filter: 723939540
   ->  Nested Loop  (cost=0.42..650.46 rows=8000 width=56) (actual 
time=5.210..128.099 rows=8090 loops=1)
 ->  Seq Scan on nation  (cost=0.00..0.41 rows=1 width=4) 
(actual time=0.016..0.039 rows=1 loops=1)
   Filter: (n_name = 'EGYPT'::bpchar)
   Rows Removed by Filter: 24
 ->  Index Scan using idx_supplier_nation_key on supplier  
(cost=0.42..570.04 rows=8000 width=64) (actual time=5.189..123.582 rows=8090 
loops=1)
   Index Cond: (s_nationkey = nation.n_nationkey)
   ->  Materialize  (cost=3025719.98..3499235.25 rows=6 width=16) 
(actual time=21.810..40.888 rows=89486 loops=8090)
 ->  Merge Join  (cost=3025719.98..3499235.22 rows=6 
width=16) (actual time=176440.801..245903.143 rows=118124 loops=1)
   Merge Cond: ((lineitem.l_partkey = 
partsupp.ps_partkey) AND (lineitem.l_suppkey = partsupp.ps_suppkey))
   Join Filter: ((partsupp.ps_availqty)::numeric > 
((0.5 * sum(lineitem.l_quantity
   Rows Removed by Join Filter: 242
   ->  GroupAggregate  (cost=2921830.80..3248925.16 
rows=9680570

Re: [HACKERS] Patch: Write Amplification Reduction Method (WARM)

2017-03-31 Thread Simon Riggs
On 30 March 2017 at 16:50, Robert Haas  wrote:
> On Thu, Mar 30, 2017 at 11:41 AM, Andres Freund  wrote:
>> On 2017-03-30 16:43:41 +0530, Pavan Deolasee wrote:
>>> Looks like OID conflict to me.. Please try rebased set.
>>
>> Pavan, Alvaro, everyone: I know you guys are working very hard on this,
>> but I think at this point it's too late to commit this for v10.  This is
>> patch that's affecting the on-disk format, in quite subtle
>> ways.  Committing this just at the end of the development cyle / shortly
>> before feature freeze, seems too dangerous to me.
>>
>> Let's commit this just at the beginning of the cycle, so we have time to
>> shake out the bugs.
>
> +1, although I think it should also have substantially more review first.

So Andres says defer this, but Robert says "more review", which is
more than just deferral.

We have some risky things in this release such as Hash Indexes,
function changes. I perfectly understand that perception of risk is
affected significantly by whether you wrote something or not. Andres
and Robert did not write it and so they see problems. I confess that
those two mentioned changes make me very scared and I'm wondering
whether we should disable them. Fear is normal.

A risk perspective is a good one to take. What I think we should do is
strip out the areas of complexity, like TOAST to reduce the footprint
and minimize the risk. There is benefit in WARM and PostgreSQL has
received public critiscism around our performance in this area. This
is more important than just a nice few % points of performance.

The bottom line is that this is written by Pavan, the guy we've
trusted for a decade to write and support HOT. We all know he can and
will fix any problems that emerge because he has shown us many times
he can and does.

We also observe that people from the same company sometimes support
their colleagues when they should not. I see no reason to believe that
is influencing my comments here.

The question is not whether this is ready today, but will it be
trusted and safe to use by Sept. Given the RMT, I would say yes, it
can be.

So I say we should commit WARM in PG10, with some restrictions.

-- 
Simon Riggshttp://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Variable substitution in psql backtick expansion

2017-03-31 Thread Corey Huinker
On Thu, Mar 30, 2017 at 1:33 PM, Tom Lane  wrote:

> single-quoted according to Unix shell conventions.  (So the
> processing would be a bit different from what it is for the
> same notation in SQL contexts.)
>

+1
Having been bit by format '%L' prepending an 'E' to any string that happens
to have a backslash in it, I'm in favor of this difference.

Any reason we wouldn't do :"VARIABLE" as well? People might expect it given
its use elsewhere, and it would make possible things like

SELECT '$HOME/lamentable application name dir/bin/myprog' as myprog \gset
`:"myprog" arg1 arg2`


both for expanding $HOME and keeping the lamentable dir path as one arg.


Re: [HACKERS] TPC-H Q20 from 1 hour to 19 hours!

2017-03-31 Thread Amit Kapila
On Thu, Mar 30, 2017 at 8:24 AM, Robert Haas  wrote:
> On Wed, Mar 29, 2017 at 8:00 PM, Tomas Vondra
>  wrote:
>> What is however strange is that changing max_parallel_workers_per_gather
>> affects row estimates *above* the Gather node. That seems a bit, um,
>> suspicious, no? See the parallel-estimates.log.
>
> Thanks for looking at this!  Comparing the parallel plan vs. the
> non-parallel plan:
>
> part: parallel rows (after Gather) 20202, non-parallel rows 20202
> partsupp: parallel rows 18, non-parallel rows 18
> part-partsupp join: parallel rows 88988, non-parallel rows 355951
> lineitem: parallel rows 59986112, non-parallel rows 59986112
> lineitem after aggregation: parallel rows 5998611, non-parallel rows 5998611
> final join: parallel rows 131, non-parallel rows 524
>
> I agree with you that that looks mighty suspicious.  Both the
> part-partsupp join and the final join have exactly 4x as many
> estimated rows in the non-parallel plan as in the parallel plan, and
> it just so happens that the parallel divisor here will be 4.
>
> Hmm... it looks like the parallel_workers value from the Gather node
> is being erroneously propagated up to the higher levels of the plan
> tree.   Wow.   Somehow, Gather Merge managed to get the logic correct
> here, but Gather is totally wrong.  Argh.   Attached is a draft patch,
> which I haven't really tested beyond checking that it passes 'make
> check'.
>

Your patch looks good to me.  I have verified some join cases as well
where the behaviour is sane after patch.  I have also done testing
with force_parallel_mode=regress (ran make check-world) and everything
seems good.

-- 
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Parallel query execution with SPI

2017-03-31 Thread Rafia Sabih
On Fri, Mar 31, 2017 at 4:18 PM, Robert Haas  wrote:
> On Fri, Mar 31, 2017 at 3:33 AM, Konstantin Knizhnik
>  wrote:
>> It is possible to execute query concurrently using SPI?
>> If so, how it can be enforced?
>> I tried to open cursor with CURSOR_OPT_PARALLEL_OK flag but it doesn't help:
>> query is executed by single backend while the same query been launched at
>> top level uses parallel plan:
>>
>> fsstate->portal = SPI_cursor_open_with_args(NULL, fsstate->query,
>> fsstate->numParams, argtypes, values, nulls, true, CURSOR_OPT_PARALLEL_OK);
>> ...
>> SPI_cursor_fetch(fsstate->portal, true, 1);
>
> Parallel execution isn't possible if you are using a cursor-type
> interface, because a parallel query can't be suspended and resumed
> like a non-parallel query.  If you use a function that executes the
> query to completion in one go, like SPI_execute_plan, then it's cool.
> See also commit 61c2e1a95f94bb904953a6281ce17a18ac38ee6d.
>
> --

Adding to that, for your case, passing CURSOR_OPT_PARALLEL_OK is not
enough, because PortalRun for the cursor would be having
portal->run_once set as false which restricts parallelism in
ExecutePlan,
if (!execute_once || dest->mydest == DestIntoRel)
  use_parallel_mode = false;

You may check [1] for the discussion on this.

[1] 
https://www.postgresql.org/message-id/flat/CAFiTN-vxhvvi-rMJFOxkGzNaQpf%2BKS76%2Bsu7-sG_NQZGRPJkQg%40mail.gmail.com#cafitn-vxhvvi-rmjfoxkgznaqpf+ks76+su7-sg_nqzgrpj...@mail.gmail.com

-- 
Regards,
Rafia Sabih
EnterpriseDB: http://www.enterprisedb.com/


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Supporting huge pages on Windows

2017-03-31 Thread Amit Kapila
On Fri, Mar 31, 2017 at 8:05 AM, Tsunakawa, Takayuki
 wrote:
> From: Amit Kapila [mailto:amit.kapil...@gmail.com]
>> The latest patch looks good to me apart from one Debug message, so I have
>> marked it as Ready For Committer.
>
> Thank you so much!
>
>
>> + ereport(DEBUG1,
>> + (errmsg("disabling huge pages")));
>>
>> I think this should be similar to what we display in sysv_shmem.c as below:
>>
>> elog(DEBUG1, "mmap(%zu) with MAP_HUGETLB failed, huge pages disabled: %m",
>> allocsize);
>
> I understood you suggested this to make the reason clear for disabling huge 
> pages.  OK, done as follows.
>
> +   elog(DEBUG1, "CreateFileMapping(%llu) with SEC_LARGE_PAGES 
> failed "
> +"due to insufficient large pages, huge pages disabled",
> +size);
>

You should use %zu instead of %llu to print Size type of variable.

-- 
With Regards,
Amit Kapila.
EnterpriseDB: http://www.enterprisedb.com


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] WIP: Covering + unique indexes.

2017-03-31 Thread Robert Haas
On Thu, Mar 30, 2017 at 5:22 PM, Anastasia Lubennikova
 wrote:
> Well,
> I don't know how can we estimate the quality of the review or testing.
> The patch was reviewed by many people.
> Here are those who marked themselves as reviewers on this and previous
> committfests: Stephen Frost (sfrost), Andrew Dunstan (adunstan), Aleksander
> Alekseev (a.alekseev), Amit Kapila (amitkapila), Andrey Borodin (x4m), Peter
> Geoghegan (pgeoghegan), David Rowley (davidrowley).

Sure, but the amount of in-depth review seems to have been limited.
Just because somebody put their name down in the CommitFest
application doesn't mean that they did a detailed review of all the
code.

>> It seems highly surprising to me that CheckIndexCompatible() only gets
>> a one line change in this patch.  That seems unlikely to be correct.
>
> What makes you think so? CheckIndexCompatible() only cares about possible
> opclasses' changes.
> For covering indexes opclasses are only applicable to indnkeyatts. And that
> is exactly what was changed in this patch.
> Do you think it needs some other changes?

Probably.  I mean, for an INCLUDE column, it wouldn't matter if a
collation or opclass change happened, but if the base data type had
changed, you'd still need to rebuild the index.  So presumably
CheckIndexCompatible() ought to be comparing some things, but not
everything, for INCLUDE columns.

>> Has anybody tested this patch with amcheck?  Does it break amcheck?
>
> Yes, it breaks amcheck. Amcheck should be patched in order to work with
> covering indexes.
> We've discussed it with Peter before and I even wrote small patch.
> I'll attach it in the following message.

Great.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Parallel query execution with SPI

2017-03-31 Thread Robert Haas
On Fri, Mar 31, 2017 at 3:33 AM, Konstantin Knizhnik
 wrote:
> It is possible to execute query concurrently using SPI?
> If so, how it can be enforced?
> I tried to open cursor with CURSOR_OPT_PARALLEL_OK flag but it doesn't help:
> query is executed by single backend while the same query been launched at
> top level uses parallel plan:
>
> fsstate->portal = SPI_cursor_open_with_args(NULL, fsstate->query,
> fsstate->numParams, argtypes, values, nulls, true, CURSOR_OPT_PARALLEL_OK);
> ...
> SPI_cursor_fetch(fsstate->portal, true, 1);

Parallel execution isn't possible if you are using a cursor-type
interface, because a parallel query can't be suspended and resumed
like a non-parallel query.  If you use a function that executes the
query to completion in one go, like SPI_execute_plan, then it's cool.
See also commit 61c2e1a95f94bb904953a6281ce17a18ac38ee6d.

-- 
Robert Haas
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] LWLock optimization for multicore Power machines

2017-03-31 Thread Alexander Korotkov
On Sun, Mar 26, 2017 at 12:29 AM, Alexander Korotkov <
a.korot...@postgrespro.ru> wrote:

> On Sat, Mar 25, 2017 at 11:32 PM, Tom Lane  wrote:
>
>> Alexander Korotkov  writes:
>> > I moved PPC implementation of pg_atomic_fetch_mask_add_u32() into
>> > port/atomics/arch-ppc.h.  I also had to declare pg_atomic_uint32 there
>> to
>> > satisfy usage of this type as argument
>> > of pg_atomic_fetch_mask_add_u32_impl().
>>
>> Hm, you did something wrong there, because now I get a bunch of failures:
>>
>> ccache gcc -Wall -Wmissing-prototypes -Wpointer-arith
>> -Wdeclaration-after-statement -Wendif-labels -Wmissing-format-attribute
>> -Wformat-security -fno-strict-aliasing -fwrapv -g -O2
>> -I../../../../src/include-c -o brin.o brin.c
>> In file included from ../../../../src/include/port/atomics.h:123,
>>  from ../../../../src/include/utils/dsa.h:17,
>>  from ../../../../src/include/nodes/tidbitmap.h:26,
>>  from ../../../../src/include/access/genam.h:19,
>>  from ../../../../src/include/nodes/execnodes.h:17,
>>  from ../../../../src/include/access/brin.h:14,
>>  from brin.c:18:
>> ../../../../src/include/port/atomics/generic.h:154:3: error: #error "No
>> pg_atomic_test_and_set provided"
>> ../../../../src/include/port/atomics.h: In function
>> 'pg_atomic_init_flag':
>> ../../../../src/include/port/atomics.h:178: warning: implicit
>> declaration of function 'pg_atomic_init_flag_impl'
>> ../../../../src/include/port/atomics.h: In function
>> 'pg_atomic_test_set_flag':
>> ../../../../src/include/port/atomics.h:193: warning: implicit
>> declaration of function 'pg_atomic_test_set_flag_impl'
>> ../../../../src/include/port/atomics.h: In function
>> 'pg_atomic_unlocked_test_flag':
>> ../../../../src/include/port/atomics.h:208: warning: implicit
>> declaration of function 'pg_atomic_unlocked_test_flag_impl'
>> ... and so on.
>>
>> I'm not entirely sure what the intended structure of these header files
>> is.  Maybe Andres can comment.
>>
>
> It seems that on this platform definition of atomics should be provided by
> fallback.h.  But it doesn't because I already defined 
> PG_HAVE_ATOMIC_U32_SUPPORT
> in arch-ppc.h.  I think in this case we shouldn't provide ppc-specific
> implementation of pg_atomic_fetch_mask_add_u32().  However, I don't know
> how to do this assuming arch-ppc.h is included before compiler-specific
> headers.  Thus, in arch-ppc.h we don't know yet if we would find
> implementation of atomics for this platform.  One possible solution is to
> provide assembly implementation for all atomics in arch-ppc.h.
>

BTW, implementation for all atomics in arch-ppc.h would be too invasive and
shouldn't be considered for v10.
However, I made following workaround: declare pg_atomic_uint32 and
pg_atomic_fetch_mask_add_u32_impl() only when we know that generic-gcc.h
would declare gcc-based atomics.
Could you, please, check it on Apple PPC?

--
Alexander Korotkov
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company


lwlock-power-5.patch
Description: Binary data

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] ANALYZE command progress checker

2017-03-31 Thread Masahiko Sawada
On Thu, Mar 30, 2017 at 6:19 PM, vinayak
 wrote:
>
> On 2017/03/30 17:39, Masahiko Sawada wrote:
>>
>> On Wed, Mar 29, 2017 at 5:38 PM, vinayak
>>  wrote:
>>>
>>> On 2017/03/25 4:30, Robert Haas wrote:

 On Fri, Mar 24, 2017 at 3:41 AM, vinayak
  wrote:
>
> I have updated the patch.

 You can't change the definition of AcquireSampleRowsFunc without
 updating the documentation in fdwhandler.sgml, but I think I don't
 immediately understand why that's a thing we want to do at all if
 neither file_fdw nor postgres_fdw are going to use the additional
 argument.  It seems to be entirely pointless because the new value
 being passed down is always zero and even if the callee were to update
 it, do_analyze_rel() would just ignore the change on return.  Am I
 missing something here?  Also, the whitespace-only changes to fdwapi.h
 related to AcquireSampleRowsFunc going to get undone by pgindent, so
 even if there's some reason to change that you should leave the lines
 that don't need changing untouched.
>>
>> I reviewed v7 patch.
>
> Thank you for reviewing the patch.
>>
>>
>> When I ran ANALYZE command to the table having 5 millions rows with 3
>> child tables, the progress report I got shows the strange result. The
>> following result was output after sampled all target rows from child
>> tables (i.g, after finished acquire_inherited_sample_rows). I think
>> the progress information should report 100% complete at this point.
>>
>> #= select * from pg_stat_progress_analyze ;
>>pid  | datid | datname  | relid |  phase   |
>> num_target_sample_rows | num_rows_sampled
>>
>> ---+---+--+---+--++--
>>   81719 | 13215 | postgres | 16440 | collecting inherited sample rows |
>> 300 |  180
>> (1 row)
>>
>> I guess the cause of this is that you always count the number of
>> sampled rows in acquire_sample_rows staring from 0 for each child
>> table. num_rows_sampled is reset whenever starting collecting sample
>> rows.
>> Also, even if table has child table, the total number of sampling row
>> is not changed. I think that main differences between analyzing on
>> normal table and on partitioned table is where we fetch the sample row
>> from. So I'm not sure if adding "computing inherited statistics" phase
>> is desirable. If you want to report progress of collecting sample rows
>> on child table I think that you might want to add the information
>> showing which child table is being analyzed.
>
> Yes. I think showing child table information would be good to user/DBA.
>
>> --
>> pg_stat_progress_analyze.num_target_sample_rows is set while ignoring
>> the number of rows the target table has actually. So If the table has
>> rows less than target number of rows, the num_rows_sampled never reach
>> to num_target_sample_rows.
>>
>> --
>> @@ -1180,6 +1213,9 @@ acquire_sample_rows(Relation onerel, int elevel,
>>  }
>>
>>  samplerows += 1;
>> +
>> +   /* Report number of rows sampled so far */
>> +
>> pgstat_progress_update_param(PROGRESS_ANALYZE_NUM_ROWS_SAMPLED,
>> numrows);
>>  }
>>  }
>>
>> I think that it's wrong to use numrows variable to report the progress
>> of the number of rows sampled. As the following comment mentioned, we
>> first save row into rows[] and increment numrows until numrows <
>> targrows. And then we could replace stored sample row with new sampled
>> row. That is, after collecting "numrows" rows, analyze can still take
>> a long time to get and replace the sample row. To computing progress
>> of collecting sample row, IMO reporting the number of blocks we
>> scanned so far is better than number of sample rows. Thought?
>>
>>>  /*
>>>   * The first targrows sample rows are simply copied into the
>>>   * reservoir. Then we start replacing tuples in the sample
>>>   * until we reach the end of the relation.  This algorithm is
>>>   * from Jeff Vitter's paper (see full citation below). It
>>>   * works by repeatedly computing the number of tuples to skip
>>>   * before selecting a tuple, which replaces a randomly chosen
>>>   * element of the reservoir (current set of tuples).  At all
>>>   * times the reservoir is a true random sample of the tuples
>>>   * we've passed over so far, so when we fall off the end of
>>>   * the relation we're done.
>>>   */
>
> I think we can either report number of blocks scanned so far or number of
> sample rows.
> But I agree with you that reporting the number of blocks scanned so far
> would be better than reporting number of sample rows.
>
>>> I Understood that we could not change the definition of
>>> AcquireSampleRowsFunc without updating the documentation in
>>> fdwhandler.sgml.
>

Re: [HACKERS] show "aggressive" or not in autovacuum logs

2017-03-31 Thread Masahiko Sawada
On Wed, Mar 29, 2017 at 12:46 PM, Kyotaro HORIGUCHI
 wrote:
> Hello, it would be too late but I'd like to propose this because
> this cannot be back-patched.
>
>
> In autovacuum logs, "%u skipped frozen" shows the number of pages
> skipped by ALL_FROZEN only in aggressive vacuum.
>
> So users cannot tell whether '0 skipped-frozen' means a
> non-agressive vacuum or no frozen-pages in an agressive vacuum.
>
> I think it is nice to have an indication whether the scan was
> "agressive" or not in log output.

Good idea. I also was thinking about this.

> Like this,
>
>> LOG:  automatic aggressive vacuum of table 
>> "template1.pg_catalog.pg_statistic": index scans: 0
>
> "0 skipped frozen" is uesless in non-aggressive vacuum but
> removing it would be too-much.  Inserting "aggressive" reduces
> machine-readability so it might be better in another place. The
> attached patch does the following.
>
>>  LOG:  automatic vacuum of table "postgres.public.pgbench_branches": mode: 
>> normal, index scans: 0
>>  LOG:  automatic vacuum of table "postgres.public.pgbench_branches": mode: 
>> aggressive, index scans: 0
>

Should we add this even to the manual vacuum verbose message?

Regards,

--
Masahiko Sawada
NIPPON TELEGRAPH AND TELEPHONE CORPORATION
NTT Open Source Software Center


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Foreign tables don't enforce the partition constraint

2017-03-31 Thread Ashutosh Bapat
On Fri, Mar 31, 2017 at 1:36 PM, Amit Langote
 wrote:
> We don't enforce the constraints defined on foreign tables in ExecInsert()
> and ExecUpdate().  (COPY FROM does not support foreign tables at all.)
> Since partition constraints are enforced using ExecConstraints() which is
> not called for foreign tables, they will not be checked if one inserts
> directly into foreign partitions.  So:
>
> create table p (a int) partition by list (a);
> create table p1t (like p);
> create table p2t (like p);
> create foreign table p1 partition of p for values in (1)
>   server loopback options (table_name 'p1t');
> create foreign table p2 partition of p for values in (2)
>   server loopback options (table_name 'p2t');
> insert into p1 values (2);  -- ungood
> insert into p2 values (1);  -- ungood
>
> While we have the ability to mark check constraints as being NOT VALID so
> that planner can ignore them, partition constraints are assumed to
> *always* hold, giving possibly surprising results.
>
> explain (costs off) select * from p where a = 1;
> QUERY PLAN
> --
>  Append
>->  Foreign Scan on p1
> (2 rows)
>
> select * from p where a = 1;
>  a
> ---
> (0 rows)
>
> explain (costs off) select * from p where a = 2;
> QUERY PLAN
> --
>  Append
>->  Foreign Scan on p2
> (2 rows)
>
> select * from p where a = 2;
>  a
> ---
> (0 rows)
>
> Should we do something about this (treat as an open item)?

Per https://www.postgresql.org/docs/devel/static/sql-createforeigntable.html,
constraints on the foreign table should represent a constraint that is
being enforced by the remote server. Similarly, a partition constraint
should also be enforced at the foreign server. Probably we should
update documentation of create foreign table to mention this. We have
updated ALTER TABLE ATTACH PARTITION documentation with a note on
foreign tables.
-- 
Best Wishes,
Ashutosh Bapat
EnterpriseDB Corporation
The Postgres Database Company


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] multivariate statistics (v25)

2017-03-31 Thread David Rowley
On 31 March 2017 at 21:18, Kyotaro HORIGUCHI <
horiguchi.kyot...@lab.ntt.co.jp> wrote:

> Hello,
>
> At Fri, 31 Mar 2017 03:03:06 +1300, David Rowley <
> david.row...@2ndquadrant.com> wrote in  T5JLce5ynCi1vvezXxX=w...@mail.gmail.com>
>
> FWIW, I tries this. This cleanly applied on it but make ends with
> the following error.
>
> $ make -s
> Writing postgres.bki
> Writing schemapg.h
> Writing postgres.description
> Writing postgres.shdescription
> Writing fmgroids.h
> Writing fmgrprotos.h
> Writing fmgrtab.c
> make[3]: *** No rule to make target `dependencies.o', needed by
> `objfiles.txt'.  Stop.
> make[2]: *** [statistics-recursive] Error 2
> make[1]: *** [all-backend-recurse] Error 2
> make: *** [all-src-recurse] Error 2


Apologies. I was caught out by patching back on to master, then committing,
and git diff'ing the last commit, where i'd of course forgotten to get add
those files.

I'm just in the middle of fixing up some other stuff. Hopefully I'll post a
working patch soon.

-- 
 David Rowley   http://www.2ndQuadrant.com/
 PostgreSQL Development, 24x7 Support, Training & Services


Re: [HACKERS] Something broken around FDW connection close

2017-03-31 Thread David Rowley
On 31 March 2017 at 16:32, Etsuro Fujita 
wrote:

> On 2017/03/31 8:28, David Rowley wrote:
>
>> create table t (a int, b int);
>> insert into t1 select x/100,x/100 from  generate_series(1,10) x;
>> create extension if not exists postgres_fdw;
>> create server test_server foreign data wrapper postgres_fdw options
>> (host 'localhost', port '5432', dbname 'postgres');
>> create foreign table ft_t (a int,b int) server test_server;
>> select 'create user mapping for current_user server test_server
>> options(user ''' || current_user || ''');';
>> \gexec
>> select count(*) from pg_stat_Activity; -- > 6
>> analyze ft_t;
>> ERROR:  could not connect to server "test_server"
>> DETAIL:  FATAL:  sorry, too many clients already
>> CONTEXT:  Remote SQL command: DECLARE c1 CURSOR FOR SELECT a, b FROM
>> public.ft_t
>> Remote SQL command: SELECT a, b FROM public.ft_t
>> Remote SQL command: SELECT a, b FROM public.ft_t
>> Remote SQL command: SELECT a, b FROM public.ft_t
>> (lots of these)
>>
>> select count(*) from pg_stat_Activity; --> 105
>>
>> I've not had a moment to check into what's going on.
>>
>
> IIUC, I think the cause would be that since the foreign table ft_t is
> considered to be still foreign on the foreign server, which is actually the
> same server, postgres_fdw recursively repeats the loopback access to ft_t.
> (So, the same thing would happen for something like: select * from ft_t.)
> If the analysis is right, ISTM that it's the user's fault.
>

Oh of course... I see exactly what I did wrong :-( sorry for the noise.



-- 
 David Rowley   http://www.2ndQuadrant.com/
 PostgreSQL Development, 24x7 Support, Training & Services


Re: [HACKERS] UPDATE of partition key

2017-03-31 Thread Amit Langote
Hi Amit,

Thanks for the updated patches.

On 2017/03/28 19:12, Amit Khandekar wrote:
> On 27 March 2017 at 13:05, Amit Khandekar  wrote:
>>> Also, there are a few places in the documentation mentioning that such 
>>> updates cause error,
>>> which will need to be updated.  Perhaps also add some explanatory notes
>>> about the mechanism (delete+insert), trigger behavior, caveats, etc.
>>> There were some points discussed upthread that could be mentioned in the
>>> documentation.
>>> Yeah, I agree. Documentation needs some important changes. I am still
>>> working on them.
> 
> Attached patch v5 has above required doc changes added.
> 
> In the section 5.11 "Partitioning" => subsection 5 "Caveats", I have
> removed the caveat of not being able to update partition key. And it
> is now replaced by the caveat where an update/delete operations can
> silently miss a row when there is a concurrent UPDATE of partition-key
> happening.

Hmm, how about just removing the "partition-changing updates are
disallowed" caveat from the list on the 5.11 Partitioning page and explain
the concurrency-related caveats on the UPDATE reference page?

> UPDATE row movement behaviour is described in :
> Part VI "Reference => SQL Commands => UPDATE
> 
>> On 4 March 2017 at 12:49, Robert Haas  wrote:
>>> How about running the BR update triggers for the old partition and the
>>> AR update triggers for the new partition?  It seems weird to run BR
>>> update triggers but not AR update triggers.  Another option would be
>>> to run BR and AR delete triggers and then BR and AR insert triggers,
>>> emphasizing the choice to treat this update as a delete + insert, but
>>> (as Amit Kh. pointed out to me when we were in a room together this
>>> week) that precludes using the BEFORE trigger to modify the row.
>>
>> I checked the trigger behaviour in case of UPSERT. Here, when there is
>> conflict found, ExecOnConflictUpdate() is called, and then the
>> function returns immediately, which means AR INSERT trigger will not
>> fire. And ExecOnConflictUpdate() calls ExecUpdate(), which means BR
>> and AR UPDATE triggers will be fired. So in short, when an INSERT
>> becomes an UPDATE, BR INSERT triggers do fire, but then the BR UPDATE
>> and AR UPDATE also get fired. On the same lines, it makes sense in
>> case of UPDATE=>DELETE+INSERT operation to fire a BR UPDATE trigger on
>> the original table, and then the BR and AR DELETE/INSERT triggers on
>> the respective tables.
>>
>> So the common policy can be :
>> Fire the BR trigger. It can be INESRT/UPDATE/DELETE trigger depending
>> upon what the statement is.
>> If there is a change in the operation, according to what the operation
>> is converted to (UPDATE->DELETE+INSERT or INSERT->UPDATE), all the
>> respective triggers would be fired.
> 
> The current patch already has the behaviour as per above policy. So I
> have included the description of this trigger related behaviour in the
> "Overview of Trigger Behavior" section of the docs. This has been
> derived from the way it is written for trigger behaviour for UPSERT in
> the preceding section.

I tested how various row-level triggers behave and it all seems to work as
you have described in your various messages, which the latest patch also
documents.

Some comments on the patch itself:

-  An UPDATE that causes a row to move from one partition to
-  another fails, because the new value of the row fails to satisfy the
-  implicit partition constraint of the original partition.  This might
-  change in future releases.
+  An UPDATE causes a row to move from one partition to
another
+  if the new value of the row fails to satisfy the implicit partition


As mentioned above, we can simply remove this item from the list of
caveats on ddl.sgml.  The new text can be moved to the Notes portion of
the UPDATE reference page.

+If an UPDATE on a partitioned table causes a row to
+move to another partition, it is possible that all row-level
+BEFORE UPDATE triggers and all row-level
+BEFORE DELETE/INSERT
+triggers are applied on the respective partitions in a way that is
apparent
+from the final state of the updated row.

How about dropping "it is possible that" from this sentence?

+UPDATE is done by doing a DELETE on

How about: s/is done/is performed/g

+triggers are not applied because the UPDATE is
converted
+to a DELETE and UPDATE.

I think you meant DELETE and INSERT.

+if (resultRelInfo->ri_PartitionCheck &&
+!ExecPartitionCheck(resultRelInfo, slot, estate))
+{

How about a one-line comment what this block of code does?

- * Check the constraints of the tuple.  Note that we pass the same
+ * Check the constraints of the tuple. Note that we pass the same

I think that this hunk is not necessary.  (I've heard that two spaces
after a sentence-ending period is not a problem [1].)

+ * We have already run partition constraints above, so skip them

Re: [HACKERS] Partitioned tables and relfilenode

2017-03-31 Thread Kyotaro HORIGUCHI
At Fri, 31 Mar 2017 16:17:05 +0900, Amit Langote 
 wrote in 
<2dec1acb-6e2f-5aa5-0e26-fcc172ce9...@lab.ntt.co.jp>
> Horiguchi-san,
> 
> On 2017/03/31 15:50, Kyotaro HORIGUCHI wrote:
> > At Thu, 30 Mar 2017 20:58:35 +0900, Amit Langote wrote:
> >> Updated patch attached.
> > 
> > Thank you.
> > 
> > - Applies cleanly on master (f90d23d)
> > - Compiled without error
> > - Code seems fine.
> > - Documentaion seems fine.. for me.
> > - Passes regression test.
> > - Leaving the ALTER-on-toast.* problem is fine for me.
> 
> Thanks for the review.
> 
> > The regression contains the tests to fail with several reloptions
> > only for partitioned tables. Are they still required even though
> > it is now in the same framework with other kind of reloptions?
> 
> Hmm, maybe we don't really need those tests, so removed.

I have no more comment on this. Thank you.

regards,

-- 
Kyotaro Horiguchi
NTT Open Source Software Center



-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] multivariate statistics (v25)

2017-03-31 Thread Kyotaro HORIGUCHI
Hello,

At Fri, 31 Mar 2017 03:03:06 +1300, David Rowley  
wrote in 
> On 25 March 2017 at 07:35, Alvaro Herrera  wrote:
> 
> > As I said in another thread, I pushed parts 0002,0003,0004.  Tomas said
> > he would try to rebase patches 0001,0005,0006 on top of what was
> > committed.  My intention is to give that one a look as soon as it is
> > available.  So we will have n-distinct and functional dependencies in
> > PG10.  It sounds unlikely that we will get MCVs and histograms in, since
> > they're each a lot of code.
> >
> 
> I've been working on the MV functional dependencies part of the patch to
> polish it up a bit. Tomas has been busy with a few other duties.
> 
> I've made some changes around how clauselist_selectivity() determines if it
> should try to apply any extended stats. The solution I came up with was to
> add two parameters to this function, one for the RelOptInfo in question,
> and one a bool to control if we should try to apply any extended stats.
> For clauselist_selectivity() usage involving join rels we just pass the rel
> as NULL, that way we can skip all the extended stats stuff with very low
> overhead. When we actually have a base relation to pass along we can do so,
> along with a true tryextstats value to have the function attempt to use any
> extended stats to assist with the selectivity estimation.
> 
> When adding these two parameters I had 2nd thoughts that the "tryextstats"
> was required at all. We could just have this controlled by if the rel is a
> base rel of kind RTE_RELATION. I ended up having to pass these parameters
> further, down to clauselist_selectivity's singleton couterpart,
> clause_selectivity(). This was due to clause_selectivity() calling
> clauselist_selectivity() for some clause types. I'm not entirely sure if
> this is actually required, but I can't see any reason for it to cause
> problems.

I understand that the reason for tryextstats is that the two are
perfectly correlating but caluse_selectivity requires the
RelOptInfo anyway. Some comment about that may be reuiqred in the
function comment.

> I've also attempted to simplify some of the logic within
> clauselist_selectivity and some other parts of clausesel.c to remove some
> unneeded code and make it a bit more efficient. For example, we no longer
> count the attributes in the clause list before calling a similar function
> to retrieve the actual attnums. This is now done as a single step.
> 
> I've not yet quite gotten as far as I'd like with this. I'd quite like to
> see clauselist_ext_split() gone, and instead we could build up a bitmapset
> of clause list indexes to ignore when applying the selectivity of clauses
> that couldn't use any extended stats. I'm planning on having a bit more of
> a look at this tomorrow.
> 
> The attached patch should apply to master as
> of f90d23d0c51895e0d7db7910538e85d3d38691f0.

FWIW, I tries this. This cleanly applied on it but make ends with
the following error.

$ make -s
Writing postgres.bki
Writing schemapg.h
Writing postgres.description
Writing postgres.shdescription
Writing fmgroids.h
Writing fmgrprotos.h
Writing fmgrtab.c
make[3]: *** No rule to make target `dependencies.o', needed by `objfiles.txt'. 
 Stop.
make[2]: *** [statistics-recursive] Error 2
make[1]: *** [all-backend-recurse] Error 2
make: *** [all-src-recurse] Error 2


Some random comments by just looking on the patch:

==
The name of the function "collect_ext_attnums", and
"clause_is_ext_compatible" seems odd since "ext" doesn't seem to
be a part of "extended statistics". Some other names looks the
same, too.

Something like "collect_e(xt)stat_compatible_attnums" and
"clause_is_e(xt)stat_compatible" seem better to me.

==
The following comment seems something wrong.

+ * When applying functional dependencies, we start with the strongest ones
+ * strongest dependencies. That is, we select the dependency that:

==
dependency_is_fully_matched() is not found. Maybe some other
patches are assumed?

==
+   /* see if it actually has the right */
+   ok = (NumRelids((Node *) expr) == 1) &&
+   (is_pseudo_constant_clause(lsecond(expr->args)) ||
+(varonleft = false,
+ is_pseudo_constant_clause(linitial(expr->args;
+
+   /* unsupported structure (two variables or so) */
+   if (!ok)
+   return true;

Ok is used only here. I don't think seeming-expressions with side
effect is not good idea here.

==
+   switch (get_oprrest(expr->opno))
+   {
+   case F_EQSEL:
+
+   /* equality conditions are compatible with all 
statistics */
+   break;
+
+   default:
+
+   /* unknown estimator */
+   return true;
+   }

This seems somewhat stupid..

regards,

-- 
Kyotaro Horig

Re: [HACKERS] REINDEX CONCURRENTLY 2.0

2017-03-31 Thread Andreas Karlsson

Thanks for the feedback. I will look at it when I get the time.

On 03/31/2017 08:27 AM, Michael Paquier wrote:

- Do a per-index rebuild and not a per-relation rebuild for concurrent
indexing. Doing a per-relation reindex has the disadvantage that many
objects need to be created at the same time, and in the case of
REINDEX CONCURRENTLY time of the operation is not what matters, it is
how intrusive the operation is. Relations with many indexes would also
result in much object locks taken at each step.


I am personally worried about the amount time spent waiting for long 
running transactions if you reindex per index rather than per relation. 
Because when you for one index wait on long running transactions nothing 
prevents new long transaction from starting, which we will have to wait 
for while reindexing the next index. If your database has many long 
running transactions more time will be spent waiting than the time spent 
working.


Are the number of locks really a big deal compared to other costs 
involved here? REINDEX does a lot of expensive things like staring 
transactions, taking snapshots, scanning large tables, building a new 
index, etc. The trade off I see is between temporary disk usage and time 
spent waiting for transactions, and doing the REINDEX per relation 
allows for flexibility since people can still explicitly reindex per 
index of they want to.


Andreas


--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] Foreign tables don't enforce the partition constraint

2017-03-31 Thread Amit Langote
We don't enforce the constraints defined on foreign tables in ExecInsert()
and ExecUpdate().  (COPY FROM does not support foreign tables at all.)
Since partition constraints are enforced using ExecConstraints() which is
not called for foreign tables, they will not be checked if one inserts
directly into foreign partitions.  So:

create table p (a int) partition by list (a);
create table p1t (like p);
create table p2t (like p);
create foreign table p1 partition of p for values in (1)
  server loopback options (table_name 'p1t');
create foreign table p2 partition of p for values in (2)
  server loopback options (table_name 'p2t');
insert into p1 values (2);  -- ungood
insert into p2 values (1);  -- ungood

While we have the ability to mark check constraints as being NOT VALID so
that planner can ignore them, partition constraints are assumed to
*always* hold, giving possibly surprising results.

explain (costs off) select * from p where a = 1;
QUERY PLAN
--
 Append
   ->  Foreign Scan on p1
(2 rows)

select * from p where a = 1;
 a
---
(0 rows)

explain (costs off) select * from p where a = 2;
QUERY PLAN
--
 Append
   ->  Foreign Scan on p2
(2 rows)

select * from p where a = 2;
 a
---
(0 rows)

Should we do something about this (treat as an open item)?

Thanks,
Amit




-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Allow interrupts on waiting standby

2017-03-31 Thread Tsunakawa, Takayuki
From: Michael Paquier [mailto:michael.paqu...@gmail.com]
> Oops, sorry for that, I quite mess up with this patch. The WaitLatch() call
> should still have WL_POSTMASTER_DEATH so as it can leave earlier, but yes
> I agree with your analysis that HandleStartupProcInterrupts() as this is
> part of the redo work.

Thank you, but did you remove WL_LATCH_SET from WaitLatch() intentionally?  I 
understood you added it for startup process to respond quickly to events other 
than the postmaster death.  Why don't we restore WL_LATCH_SET?  I won't object 
to not adding the flag if there's a reason.

I'll mark this as ready for committer when I see WL_LATCH_SET added (optional) 
and you have reported that you did the following test cases:

* Startup process vanishes immediately after postmaster dies, while it is 
waiting for a recovery conflict to be resolved.

* Startup process vanishes immediately after "pg_ctl stop -m fast", while it is 
waiting for a recovery conflict to be resolved.

* Startup process resumes WAL application when max_standby_{archive | 
streaming}_delay is changed from the default -1 to a short period, e.g. 10s, 
and "pg_ctl reload" is performed, while it is waiting for a recovery conflict 
to be resolved.


> > Did Simon's committed patch solve the problem as expected?
> 
> Does not seem so, I'll let Simon comment on this matter...

Agreed.  I guess his patch for earlier releases should work if 
CHECK_FOR_INTERRUPTS() is replaced with HandleStartupProcInterrupts().

Regards
Takayuki Tsunakawa


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


[HACKERS] Parallel query execution with SPI

2017-03-31 Thread Konstantin Knizhnik

Hi hackers,

It is possible to execute query concurrently using SPI?
If so, how it can be enforced?
I tried to open cursor with CURSOR_OPT_PARALLEL_OK flag but it doesn't 
help: query is executed by single backend while the same query been 
launched at top level uses parallel plan:


fsstate->portal = SPI_cursor_open_with_args(NULL, fsstate->query, 
fsstate->numParams, argtypes, values, nulls, true, CURSOR_OPT_PARALLEL_OK);

...
SPI_cursor_fetch(fsstate->portal, true, 1);

Thanks in advance,

--
Konstantin Knizhnik
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company



--
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Partitioned tables and relfilenode

2017-03-31 Thread Amit Langote
Horiguchi-san,

On 2017/03/31 15:50, Kyotaro HORIGUCHI wrote:
> At Thu, 30 Mar 2017 20:58:35 +0900, Amit Langote wrote:
>> Updated patch attached.
> 
> Thank you.
> 
> - Applies cleanly on master (f90d23d)
> - Compiled without error
> - Code seems fine.
> - Documentaion seems fine.. for me.
> - Passes regression test.
> - Leaving the ALTER-on-toast.* problem is fine for me.

Thanks for the review.

> The regression contains the tests to fail with several reloptions
> only for partitioned tables. Are they still required even though
> it is now in the same framework with other kind of reloptions?

Hmm, maybe we don't really need those tests, so removed.

Thanks,
Amit
>From bc80db31f218d299ad4ccfa5140088382ac947db Mon Sep 17 00:00:00 2001
From: amit 
Date: Tue, 24 Jan 2017 14:22:34 +0900
Subject: [PATCH] Do not allocate storage for partitioned tables.

Currently, it is not possible to insert any data into a partitioned
table.  So they're empty at all times, which means it is wasteful to
allocate relfilenode and related storage objects.

Also, because there is no storage, it doesn't make sense to allow
specifying storage parameters such as fillfactor, etc.

Patch by: Amit Langote & Maksim Milyutin
Reviewed by: Kyotaro Horiguchi
---
 doc/src/sgml/ref/create_table.sgml |  6 ++
 src/backend/access/common/reloptions.c | 33 -
 src/backend/catalog/heap.c | 20 
 src/include/access/reloptions.h|  3 ++-
 4 files changed, 44 insertions(+), 18 deletions(-)

diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index 283d53e203..5f13f240f5 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -1040,6 +1040,12 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
 will use the table's parameter value.

 
+   
+Note that specifying the following parameters for partitioned tables is
+not supported.  You must specify them for individual leaf partitions if
+necessary.
+   
+

 

diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c
index 72e12532ab..f3f5ade38f 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -1024,21 +1024,28 @@ parseRelOptions(Datum options, bool validate, relopt_kind kind,
 		if (relOpts[i]->kinds & kind)
 			numoptions++;
 
-	if (numoptions == 0)
+	/*
+	 * Even if we found no options of kind, it's better to raise the
+	 * "unrecognized parameter %s" error here, instead of at the caller.
+	 */
+	if (numoptions == 0 && !validate)
 	{
 		*numrelopts = 0;
 		return NULL;
 	}
 
-	reloptions = palloc(numoptions * sizeof(relopt_value));
-
-	for (i = 0, j = 0; relOpts[i]; i++)
+	if (numoptions > 0)
 	{
-		if (relOpts[i]->kinds & kind)
+		reloptions = palloc(numoptions * sizeof(relopt_value));
+
+		for (i = 0, j = 0; relOpts[i]; i++)
 		{
-			reloptions[j].gen = relOpts[i];
-			reloptions[j].isset = false;
-			j++;
+			if (relOpts[i]->kinds & kind)
+			{
+reloptions[j].gen = relOpts[i];
+reloptions[j].isset = false;
+j++;
+			}
 		}
 	}
 
@@ -1418,8 +1425,16 @@ heap_reloptions(char relkind, Datum reloptions, bool validate)
 			return (bytea *) rdopts;
 		case RELKIND_RELATION:
 		case RELKIND_MATVIEW:
-		case RELKIND_PARTITIONED_TABLE:
 			return default_reloptions(reloptions, validate, RELOPT_KIND_HEAP);
+
+		/*
+		 * There aren't any partitioned table options yet really, so the
+		 * following simply serves to reject whatever's specified as being
+		 * "unrecognized storage parameter" if validate is true.
+		 */
+		case RELKIND_PARTITIONED_TABLE:
+			return default_reloptions(reloptions, validate,
+	  RELOPT_KIND_PARTITIONED);
 		default:
 			/* other relkinds are not supported */
 			return NULL;
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index eee5e2f6ca..ff2f2e8efb 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -293,6 +293,7 @@ heap_create(const char *relname,
 		case RELKIND_VIEW:
 		case RELKIND_COMPOSITE_TYPE:
 		case RELKIND_FOREIGN_TABLE:
+		case RELKIND_PARTITIONED_TABLE:
 			create_storage = false;
 
 			/*
@@ -1347,14 +1348,13 @@ heap_create_with_catalog(const char *relname,
 	if (oncommit != ONCOMMIT_NOOP)
 		register_on_commit_action(relid, oncommit);
 
-	if (relpersistence == RELPERSISTENCE_UNLOGGED)
-	{
-		Assert(relkind == RELKIND_RELATION || relkind == RELKIND_MATVIEW ||
-			   relkind == RELKIND_TOASTVALUE ||
-			   relkind == RELKIND_PARTITIONED_TABLE);
-
+	/*
+	 * We do not want to create any storage objects for a partitioned
+	 * table, including the init fork.
+	 */
+	if (relpersistence == RELPERSISTENCE_UNLOGGED &&
+		relkind != RELKIND_PARTITIONED_TABLE)
 		heap_create_init_fork(new_rel_desc);
-	}
 
 	/*
 	 * ok, the relation has been cataloged, so close our relations and return
@@ -1378,6 +1378,9 @@ heap_create_with_catalog(

[HACKERS] [bug fix] Savepoint-related statements terminates connection

2017-03-31 Thread Tsunakawa, Takayuki
Hello,

I found a trivial bug that terminates the connection.  The attached patch fixes 
this.


PROBLEM


Savepoint-related statements in a multi-command query terminates the connection 
unexpectedly, as follows.

$ psql -d postgres -c "SELECT 1; SAVEPOINT sp"
FATAL:  DefineSavepoint: unexpected state STARTED
server closed the connection unexpectedly
This probably means the server terminated abnormally
before or while processing the request.
connection to server was lost


CAUSE


1. In exec_simple_query(), isTopLevel is set to false.

isTopLevel = (list_length(parsetree_list) == 1);

Then it is passed to PortalRun().

(void) PortalRun(portal,
 FETCH_ALL,
 isTopLevel,
 receiver,
 receiver,
 completionTag);

2. The isTopLevel flag is passed through ProcessUtility() to 
RequireTransactionChain().


RequireTransactionChain(isTopLevel, "SAVEPOINT");


3. CheckTransactionChain() returns successfully here:

/*
 * inside a function call?
 */
if (!isTopLevel)
return;


4. Finally, unexpectedly called DefineSavepoint() reports invalid transaction 
block state.

/* These cases are invalid. */
case TBLOCK_DEFAULT:
case TBLOCK_STARTED:
...
elog(FATAL, "DefineSavepoint: unexpected state %s",
 BlockStateAsString(s->blockState));



SOLUTION


The manual page says "Savepoints can only be established when inside a 
transaction block."  So just check for it.

https://www.postgresql.org/docs/devel/static/sql-savepoint.html


RESULT AFTER FIX


$ psql -d postgres -c "SELECT 1; SAVEPOINT sp"
ERROR:  SAVEPOINT can only be used in transaction blocks


Regards
Takayuki Tsunakawa



savept-in-multi-cmd.patch
Description: savept-in-multi-cmd.patch

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Implementation of SASLprep for SCRAM-SHA-256

2017-03-31 Thread Michael Paquier
On Wed, Mar 8, 2017 at 10:39 PM, Robert Haas  wrote:
> On Tue, Mar 7, 2017 at 10:01 PM, Michael Paquier
>  wrote:
>> This way, we can be sure that two UTf-8 strings are considered as
>> equivalent in a SASL exchange, in our case we care about the password
>> string (we should care about the username as well). Without SASLprep,
>> our implementation of SCRAM may fail with other third-part tools if
>> passwords are not made only of ASCII characters. And that sucks.
>
> Agreed.  I am not sure this quite rises to the level of a stop-ship
> issue; we could document the restriction.

I am not sure about that, what we have now on HEAD is still useful and
better than MD5.

> However, that's pretty ugly; it would be a whole lot better to get this fixed.

Agreed.

> I kinda hope Heikki is going to step up to the plate here, because I
> think he understands most of it already.

The second person who knows a good deal about NFKC is Ishii-san.

Attached is a rebased patch.
-- 
Michael


0001-Implement-SASLprep-aka-NFKC-for-SCRAM-authentication.patch.gz
Description: GNU Zip compressed data

-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Patch: Write Amplification Reduction Method (WARM)

2017-03-31 Thread Dilip Kumar
On Thu, Mar 30, 2017 at 5:27 PM, Amit Kapila  wrote:
> I am not sure if we can consider it as completely synthetic because we
> might see some similar cases for json datatypes.  Can we once try to
> see the impact when the same test runs from multiple clients?  For
> your information, I am also trying to setup some tests along with one
> of my colleague and we will report the results once the tests are
> complete.
We have done some testing and below is the test details and results.

Test:
I have derived this test from above test given by pavan[1] except
below difference.

- I have reduced the fill factor to 40 to ensure that multiple there
is scope in the page to store multiple WARM chains.
- WARM updated all the tuples.
- Executed a large select to enforce lot of recheck tuple within single query.
- Smaller tuple size (aid field is around ~100 bytes) just to ensure
tuple have sufficient space on a page to get WARM updated.

Results:
---
* I can see more than 15% of regression in this case. This regression
is repeatable.
* If I increase the fill factor to 90 than regression reduced to 7%,
may be only fewer tuples are getting WARM updated and others are not
because of no space left on page after few WARM update.

Test Setup:

Machine Information:

Intel(R) Xeon(R) CPU E5-2695 v3 @ 2.30GHz
RAM: 64GB

Config Change:
  synchronous_commit=off

——Setup.sql—

DROP TABLE IF EXISTS pgbench_accounts;
CREATE TABLE pgbench_accounts (
aid text,
bid bigint,
abalance bigint,
filler1 text DEFAULT md5(random()::text),
filler2 text DEFAULT md5(random()::text),
filler3 text DEFAULT md5(random()::text),
filler4 text DEFAULT md5(random()::text),
filler5 text DEFAULT md5(random()::text),
filler6 text DEFAULT md5(random()::text),
filler7 text DEFAULT md5(random()::text),
filler8 text DEFAULT md5(random()::text),
filler9 text DEFAULT md5(random()::text),
filler10 text DEFAULT md5(random()::text),
filler11 text DEFAULT md5(random()::text),
filler12 text DEFAULT md5(random()::text)
) WITH (fillfactor=40);

\set scale 10
\set end 0
\set start (:end + 1)
\set end (:start + (:scale * 100))

INSERT INTO pgbench_accounts SELECT generate_series(:start, :end
)::text || repeat('a', 100), (random()::bigint) % :scale, 0;

CREATE UNIQUE INDEX pgb_a_aid ON pgbench_accounts(aid);
CREATE INDEX pgb_a_filler1 ON pgbench_accounts(filler1);
CREATE INDEX pgb_a_filler2 ON pgbench_accounts(filler2);
CREATE INDEX pgb_a_filler3 ON pgbench_accounts(filler3);
CREATE INDEX pgb_a_filler4 ON pgbench_accounts(filler4);

UPDATE pgbench_accounts SET filler1 = 'X';--WARM update all the tuples

—Test.sql—
set enable_seqscan=off;
set enable_bitmapscan=off;
explain analyze select * FROM pgbench_accounts WHERE aid < '400' ||
repeat('a', 100) ORDER BY aid

—Script.sh—
./psql -d postgres -f setup.sql
./pgbench -c1 -j1 -T300 -M prepared -f test.sql postgres

Patch:
tps = 3554.345313 (including connections establishing)
tps = 3554.880776 (excluding connections establishing)

Head:
tps = 4208.876829 (including connections establishing)
tps = 4209.440321 (excluding connections establishing)

*** After changing fill factor to 90 —

Patch:
tps = 3794.414770 (including connections establishing)
tps = 3794.919592 (excluding connections establishing)

Head:
tps = 4206.445608 (including connections establishing)
tps = 4207.033559 (excluding connections establishing)

[1]
https://www.postgresql.org/message-id/CABOikdMduu9wOhfvNzqVuNW4YdBgbgwv-A%3DHNFCL7R5Tmbx7JA%40mail.gmail.com


I have done some perfing for the patch and I have noticed that time is
increased in heap_check_warm_chain function.

Top 10 functions in perf results (with patch):
+8.98% 1.04%  postgres  postgres[.] varstr_cmp
+7.24% 0.00%  postgres  [unknown]   [.] 
+6.34% 0.36%  postgres  libc-2.17.so[.] clock_gettime
+6.34% 0.00%  postgres  [unknown]   [.] 0x0003
+6.18% 6.15%  postgres  [vdso]  [.] __vdso_clock_gettime
+5.72% 0.02%  postgres  [kernel.kallsyms]   [k] system_call_fastpath
+4.08% 4.06%  postgres  libc-2.17.so[.] __memcpy_ssse3_back
+4.08% 4.06%  postgres  libc-2.17.so[.] get_next_seq
+3.92% 0.00%  postgres  [unknown]   [.] 0x6161616161616161
+3.07% 3.05%  postgres  postgres[.] heap_check_warm_chain


Thanks to Amit for helping in discussing the test ideas.

-- 
Regards,
Dilip Kumar
EnterpriseDB: http://www.enterprisedb.com


-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers


Re: [HACKERS] Typo in libpq

2017-03-31 Thread Magnus Hagander
On Thu, Mar 30, 2017 at 3:48 PM, Daniel Gustafsson  wrote:

> There seems to be a typo in libpq as per attached, “..we will loose error
> messages” should probably be “..we will lose error messages”.
>

Applied, thanks.

-- 
 Magnus Hagander
 Me: http://www.hagander.net/
 Work: http://www.redpill-linpro.com/


Re: [HACKERS] gitlab post-mortem: pg_basebackup waiting for checkpoint

2017-03-31 Thread Magnus Hagander
On Wed, Mar 29, 2017 at 1:05 PM, Michael Banck 
wrote:

> Hi,
>
> Am Montag, den 27.02.2017, 16:20 +0100 schrieb Magnus Hagander:
> > On Sun, Feb 26, 2017 at 9:59 PM, Tom Lane  wrote:
> > Is there an argument for back-patching this?
> >
> >
> > Seems you were typing that at the same time as we did.
> >
> >
> > I'm considering it, but not swayed in either direction. Should I take
> > your comment as a vote that we should back-patch it?
>
> I've checked back into this thread, and there seems to be a +1 from Tom
> and a +(0.5-1) from Simon for backpatching, and no obvious -1s. Did you
> decide against it in the end, or is this still an open item?


No, I plan to work on it, so it's still an open item. I've been backlogged
with other things, but I will try to get too it soon.

(This also includes considering Jeff's note)

-- 
 Magnus Hagander
 Me: http://www.hagander.net/
 Work: http://www.redpill-linpro.com/


<    1   2