On Sun, Apr 21, 2019 at 01:03:50AM -0400, Gunther wrote: > On 4/20/2019 21:14, Tomas Vondra wrote: > >Maybe. But before wasting any more time on the memory leak investigation, > >I suggest you first try the patch moving the BufFile allocations to a > >separate context. That'll either confirm or disprove the theory. > > OK, fair enough. So, first patch 0001-* applied, recompiled and > > 2019-04-21 04:08:04.364 UTC [11304] LOG: server process (PID 11313) was > terminated by signal 11: Segmentation fault ... > turns out the MemoryContext is NULL: > > (gdb) p context > $1 = (MemoryContext) 0x0
I updated Tomas' patch to unconditionally set the context. (Note, oldctx vs oldcxt is fairly subtle but I think deliberate?) Justin
>From ff76679b27c9decc93f7a5527367d98d3e99ddb8 Mon Sep 17 00:00:00 2001 From: Justin Pryzby <pryz...@telsasoft.com> Date: Sun, 21 Apr 2019 02:54:10 -0500 Subject: [PATCH v2] move BufFile stuff into separate context Author: Tomas Vondra --- src/backend/executor/nodeHash.c | 42 +++++++++++++++++++++++++++++++++---- src/backend/executor/nodeHashjoin.c | 3 +++ src/include/executor/hashjoin.h | 1 + 3 files changed, 42 insertions(+), 4 deletions(-) diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 64eec91..4361aac 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -499,6 +499,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, hashtable->skewTuples = 0; hashtable->innerBatchFile = NULL; hashtable->outerBatchFile = NULL; + hashtable->fileCtx = NULL; hashtable->spaceUsed = 0; hashtable->spacePeak = 0; hashtable->spaceAllowed = space_allowed; @@ -527,6 +528,9 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt, "HashBatchContext", ALLOCSET_DEFAULT_SIZES); + hashtable->fileCtx = AllocSetContextCreate(hashtable->hashCxt, + "hash batch files", + ALLOCSET_DEFAULT_SIZES); /* Allocate data that will live for the life of the hashjoin */ @@ -562,10 +566,8 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, if (nbatch > 1 && hashtable->parallel_state == NULL) { - /* - * allocate and initialize the file arrays in hashCxt (not needed for - * parallel case which uses shared tuplestores instead of raw files) - */ + MemoryContext oldctx; + oldctx = MemoryContextSwitchTo(hashtable->fileCtx); hashtable->innerBatchFile = (BufFile **) palloc0(nbatch * sizeof(BufFile *)); hashtable->outerBatchFile = (BufFile **) @@ -573,6 +575,7 @@ ExecHashTableCreate(HashState *state, List *hashOperators, List *hashCollations, /* The files will not be opened until needed... */ /* ... but make sure we have temp tablespaces established for them */ PrepareTempTablespaces(); + MemoryContextSwitchTo(oldctx); } MemoryContextSwitchTo(oldcxt); @@ -903,6 +906,11 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) nbatch = oldnbatch * 2; Assert(nbatch > 1); + elog(WARNING, "ExecHashIncreaseNumBatches: increasing number of batches from %d to %d", oldnbatch, nbatch); + + elog(LOG, "ExecHashIncreaseNumBatches ======= context stats start ======="); + MemoryContextStats(TopMemoryContext); + #ifdef HJDEBUG printf("Hashjoin %p: increasing nbatch to %d because space = %zu\n", hashtable, nbatch, hashtable->spaceUsed); @@ -910,6 +918,7 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) oldcxt = MemoryContextSwitchTo(hashtable->hashCxt); + MemoryContextSwitchTo(hashtable->fileCtx); if (hashtable->innerBatchFile == NULL) { /* we had no file arrays before */ @@ -1002,12 +1011,19 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) } else { + MemoryContext oldctx; + /* dump it out */ Assert(batchno > curbatch); + + oldctx = MemoryContextSwitchTo(hashtable->fileCtx); + ExecHashJoinSaveTuple(HJTUPLE_MINTUPLE(hashTuple), hashTuple->hashvalue, &hashtable->innerBatchFile[batchno]); + MemoryContextSwitchTo(oldctx); + hashtable->spaceUsed -= hashTupleSize; nfreed++; } @@ -1045,6 +1061,9 @@ ExecHashIncreaseNumBatches(HashJoinTable hashtable) hashtable); #endif } + + elog(LOG, "ExecHashIncreaseNumBatches ======= context stats end ======="); + MemoryContextStats(TopMemoryContext); } /* @@ -1660,13 +1679,20 @@ ExecHashTableInsert(HashJoinTable hashtable, } else { + MemoryContext oldctx; + /* * put the tuple into a temp file for later batches */ Assert(batchno > hashtable->curbatch); + + oldctx = MemoryContextSwitchTo(hashtable->fileCtx); + ExecHashJoinSaveTuple(tuple, hashvalue, &hashtable->innerBatchFile[batchno]); + + MemoryContextSwitchTo(oldctx); } if (shouldFree) @@ -2508,10 +2534,18 @@ ExecHashRemoveNextSkewBucket(HashJoinTable hashtable) } else { + MemoryContext oldctx; + /* Put the tuple into a temp file for later batches */ Assert(batchno > hashtable->curbatch); + + oldctx = MemoryContextSwitchTo(hashtable->fileCtx); + ExecHashJoinSaveTuple(tuple, hashvalue, &hashtable->innerBatchFile[batchno]); + + MemoryContextSwitchTo(oldctx); + pfree(hashTuple); hashtable->spaceUsed -= tupleSize; hashtable->spaceUsedSkew -= tupleSize; diff --git a/src/backend/executor/nodeHashjoin.c b/src/backend/executor/nodeHashjoin.c index aa43296..6dc93d9 100644 --- a/src/backend/executor/nodeHashjoin.c +++ b/src/backend/executor/nodeHashjoin.c @@ -393,6 +393,7 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel) bool shouldFree; MinimalTuple mintuple = ExecFetchSlotMinimalTuple(outerTupleSlot, &shouldFree); + MemoryContext oldctx; /* * Need to postpone this outer tuple to a later batch. @@ -400,12 +401,14 @@ ExecHashJoinImpl(PlanState *pstate, bool parallel) */ Assert(parallel_state == NULL); Assert(batchno > hashtable->curbatch); + oldctx = MemoryContextSwitchTo(hashtable->fileCtx); ExecHashJoinSaveTuple(mintuple, hashvalue, &hashtable->outerBatchFile[batchno]); if (shouldFree) heap_free_minimal_tuple(mintuple); + MemoryContextSwitchTo(oldctx); /* Loop around, staying in HJ_NEED_NEW_OUTER state */ continue; } diff --git a/src/include/executor/hashjoin.h b/src/include/executor/hashjoin.h index 2c94b92..6bfa5dc 100644 --- a/src/include/executor/hashjoin.h +++ b/src/include/executor/hashjoin.h @@ -328,6 +328,7 @@ typedef struct HashJoinTableData */ BufFile **innerBatchFile; /* buffered virtual temp file per batch */ BufFile **outerBatchFile; /* buffered virtual temp file per batch */ + MemoryContext fileCtx; /* * Info about the datatype-specific hash functions for the datatypes being -- 2.7.4