I think one of the most serious issues with BRIN indexes is how they
don't get updated automatically as the table is filled.  This patch
attempts to improve on that.  During brininsert() time, we check whether
we're inserting the first item on the first page in a range.  If we are,
request autovacuum to do a summarization run on that table.  This is
dependent on a new reloption for BRIN called "autosummarize", default
off.

The way the request works is that autovacuum maintains a DSA which can
be filled by backends with "work items".  Currently, work items can
specify a BRIN summarization of some specific index; in the future we
could use this framework to request other kinds of things that do not
fit in the "dead tuples / recently inserted tuples" logic that autovac
currently uses to decide to vacuum/analyze tables.

However, it seems I have not quite gotten the hang of DSA just yet,
because after a couple of iterations, crashes occur.  I think the reason
has to do with either a resource owner clearing the DSA at an unwelcome
time, or perhaps there's a mistake in my handling of DSA "relative
pointers" stuff.

This patch was initially written by Simon Riggs, who envisioned that
brininsert itself would invoke the summarization.  However, this doesn't
work because summarization requires having ShareUpdateExclusive lock,
which brininsert doesn't have.  So I modified things to instead use the
DSA stuff.  (He also set things up so that brininsert would only
summarize the just-filled range, but I didn't preserve that idea in the
autovacuum-based implementation; some changed lines there can probably
be removed.)

-- 
Álvaro Herrera                PostgreSQL Expert, https://www.2ndQuadrant.com/
diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml
index 6448b18..480895b 100644
--- a/doc/src/sgml/brin.sgml
+++ b/doc/src/sgml/brin.sgml
@@ -74,9 +74,13 @@
    tuple; those tuples remain unsummarized until a summarization run is
    invoked later, creating initial summaries.
    This process can be invoked manually using the
-   <function>brin_summarize_new_values(regclass)</function> function,
-   or automatically when <command>VACUUM</command> processes the table.
+   <function>brin_summarize_new_values(regclass)</function> function;
+   automatically when <command>VACUUM</command> processes the table;
+   or by automatic summarization executed by autovacuum, as insertions
+   occur.  (This last trigger is disabled by default and is enabled with
+   the parameter <literal>autosummarize</literal>.)
   </para>
+
  </sect2>
 </sect1>
 
diff --git a/doc/src/sgml/ref/create_index.sgml 
b/doc/src/sgml/ref/create_index.sgml
index fcb7a60..80d9c39 100644
--- a/doc/src/sgml/ref/create_index.sgml
+++ b/doc/src/sgml/ref/create_index.sgml
@@ -382,7 +382,7 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS 
] <replaceable class=
    </variablelist>
 
    <para>
-    <acronym>BRIN</> indexes accept a different parameter:
+    <acronym>BRIN</> indexes accept different parameters:
    </para>
 
    <variablelist>
@@ -396,6 +396,16 @@ CREATE [ UNIQUE ] INDEX [ CONCURRENTLY ] [ [ IF NOT EXISTS 
] <replaceable class=
     </para>
     </listitem>
    </varlistentry>
+
+   <varlistentry>
+    <term><literal>autosummarize</></term>
+    <listitem>
+    <para>
+     Defines whether a summarization run is invoked for the previous page
+     range whenever an insertion is detected on the next one.
+    </para>
+    </listitem>
+   </varlistentry>
    </variablelist>
   </refsect2>
 
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c
index b22563b..01586ff 100644
--- a/src/backend/access/brin/brin.c
+++ b/src/backend/access/brin/brin.c
@@ -26,6 +26,7 @@
 #include "catalog/pg_am.h"
 #include "miscadmin.h"
 #include "pgstat.h"
+#include "postmaster/autovacuum.h"
 #include "storage/bufmgr.h"
 #include "storage/freespace.h"
 #include "utils/builtins.h"
@@ -60,10 +61,12 @@ typedef struct BrinOpaque
        BrinDesc   *bo_bdesc;
 } BrinOpaque;
 
+#define BRIN_ALL_BLOCKRANGES   InvalidBlockNumber
+
 static BrinBuildState *initialize_brin_buildstate(Relation idxRel,
                                                   BrinRevmap *revmap, 
BlockNumber pagesPerRange);
 static void terminate_brin_buildstate(BrinBuildState *state);
-static void brinsummarize(Relation index, Relation heapRel,
+static void brinsummarize(Relation index, Relation heapRel, BlockNumber 
pageRange,
                          double *numSummarized, double *numExisting);
 static void form_and_insert_tuple(BrinBuildState *state);
 static void union_tuples(BrinDesc *bdesc, BrinMemTuple *a,
@@ -126,8 +129,11 @@ brinhandler(PG_FUNCTION_ARGS)
  * with those of the new tuple.  If the tuple values are not consistent with
  * the summary tuple, we need to update the index tuple.
  *
+ * If autosummarization is enabled, check if we need to summarize the previous
+ * page range.
+ *
  * If the range is not currently summarized (i.e. the revmap returns NULL for
- * it), there's nothing to do.
+ * it), there's nothing to do for this tuple.
  */
 bool
 brininsert(Relation idxRel, Datum *values, bool *nulls,
@@ -141,6 +147,7 @@ brininsert(Relation idxRel, Datum *values, bool *nulls,
        Buffer          buf = InvalidBuffer;
        MemoryContext tupcxt = NULL;
        MemoryContext oldcxt = CurrentMemoryContext;
+       bool            autosummarize = BrinGetAutoSummarize(idxRel);
 
        revmap = brinRevmapInitialize(idxRel, &pagesPerRange, NULL);
 
@@ -148,18 +155,41 @@ brininsert(Relation idxRel, Datum *values, bool *nulls,
        {
                bool            need_insert = false;
                OffsetNumber off;
-               BrinTuple  *brtup;
+               BrinTuple  *brtup = NULL;
                BrinMemTuple *dtup;
                BlockNumber heapBlk;
+               BlockNumber heapBlk0;
                int                     keyno;
 
                CHECK_FOR_INTERRUPTS();
 
                heapBlk = ItemPointerGetBlockNumber(heaptid);
                /* normalize the block number to be the first block in the 
range */
-               heapBlk = (heapBlk / pagesPerRange) * pagesPerRange;
-               brtup = brinGetTupleForHeapBlock(revmap, heapBlk, &buf, &off, 
NULL,
-                                                                               
 BUFFER_LOCK_SHARE, NULL);
+               heapBlk0 = (heapBlk / pagesPerRange) * pagesPerRange;
+
+               /*
+                * If auto-summarization is enabled and we just inserted the 
first
+                * tuple into the first block of a new page range, request a
+                * summarization run.
+                */
+               if (autosummarize &&
+                       heapBlk == heapBlk0 &&
+                       ItemPointerGetOffsetNumber(heaptid) == 
FirstOffsetNumber)
+               {
+                       BlockNumber lastPageRange = heapBlk0;
+
+                       if (heapBlk0 >= pagesPerRange)
+                               lastPageRange -= pagesPerRange;
+                       brtup = brinGetTupleForHeapBlock(revmap, lastPageRange, 
&buf, &off, NULL,
+                                                                               
         BUFFER_LOCK_SHARE, NULL);
+                       if (!brtup)
+                               AutoVacuumRequestWork(AVW_BRINSummarizeRange,
+                                                                         
RelationGetRelid(idxRel));
+               }
+
+               if (!brtup)
+                       brtup = brinGetTupleForHeapBlock(revmap, heapBlk0, 
&buf, &off,
+                                                                               
         NULL, BUFFER_LOCK_SHARE, NULL);
 
                /* if range is unsummarized, there's nothing to do */
                if (!brtup)
@@ -747,7 +777,7 @@ brinvacuumcleanup(IndexVacuumInfo *info, 
IndexBulkDeleteResult *stats)
 
        brin_vacuum_scan(info->index, info->strategy);
 
-       brinsummarize(info->index, heapRel,
+       brinsummarize(info->index, heapRel, BRIN_ALL_BLOCKRANGES,
                                  &stats->num_index_tuples, 
&stats->num_index_tuples);
 
        heap_close(heapRel, AccessShareLock);
@@ -765,7 +795,8 @@ brinoptions(Datum reloptions, bool validate)
        BrinOptions *rdopts;
        int                     numoptions;
        static const relopt_parse_elt tab[] = {
-               {"pages_per_range", RELOPT_TYPE_INT, offsetof(BrinOptions, 
pagesPerRange)}
+               {"pages_per_range", RELOPT_TYPE_INT, offsetof(BrinOptions, 
pagesPerRange)},
+               {"autosummarize", RELOPT_TYPE_BOOL, offsetof(BrinOptions, 
autosummarize)}
        };
 
        options = parseRelOptions(reloptions, validate, RELOPT_KIND_BRIN,
@@ -837,7 +868,7 @@ brin_summarize_new_values(PG_FUNCTION_ARGS)
                                                
RelationGetRelationName(indexRel))));
 
        /* OK, do it */
-       brinsummarize(indexRel, heapRel, &numSummarized, NULL);
+       brinsummarize(indexRel, heapRel, BRIN_ALL_BLOCKRANGES, &numSummarized, 
NULL);
 
        relation_close(indexRel, ShareUpdateExclusiveLock);
        relation_close(heapRel, ShareUpdateExclusiveLock);
@@ -1063,17 +1094,17 @@ summarize_range(IndexInfo *indexInfo, BrinBuildState 
*state, Relation heapRel,
 }
 
 /*
- * Scan a complete BRIN index, and summarize each page range that's not already
- * summarized.  The index and heap must have been locked by caller in at
- * least ShareUpdateExclusiveLock mode.
+ * Scan a portion of a BRIN index, and summarize each page range that's not
+ * already summarized.  The index and heap must have been locked by caller in
+ * at least ShareUpdateExclusiveLock mode.
  *
  * For each new index tuple inserted, *numSummarized (if not NULL) is
  * incremented; for each existing tuple, *numExisting (if not NULL) is
  * incremented.
  */
 static void
-brinsummarize(Relation index, Relation heapRel, double *numSummarized,
-                         double *numExisting)
+brinsummarize(Relation index, Relation heapRel, BlockNumber pageRange,
+                         double *numSummarized, double *numExisting)
 {
        BrinRevmap *revmap;
        BrinBuildState *state = NULL;
@@ -1082,6 +1113,8 @@ brinsummarize(Relation index, Relation heapRel, double 
*numSummarized,
        BlockNumber heapBlk;
        BlockNumber pagesPerRange;
        Buffer          buf;
+       BlockNumber startBlk;
+       BlockNumber endBlk;
 
        revmap = brinRevmapInitialize(index, &pagesPerRange, NULL);
 
@@ -1090,7 +1123,20 @@ brinsummarize(Relation index, Relation heapRel, double 
*numSummarized,
         */
        buf = InvalidBuffer;
        heapNumBlocks = RelationGetNumberOfBlocks(heapRel);
-       for (heapBlk = 0; heapBlk < heapNumBlocks; heapBlk += pagesPerRange)
+       if (pageRange == BRIN_ALL_BLOCKRANGES ||
+               pageRange > heapNumBlocks)
+       {
+               startBlk = 0;
+               endBlk = heapNumBlocks;
+       }
+       else
+       {
+               startBlk = pageRange;
+               endBlk = startBlk + pagesPerRange;
+               if (endBlk > heapNumBlocks)
+                       endBlk = heapNumBlocks - 1;
+       }
+       for (heapBlk = startBlk; heapBlk < endBlk; heapBlk += pagesPerRange)
        {
                BrinTuple  *tup;
                OffsetNumber off;
diff --git a/src/backend/access/brin/brin_revmap.c 
b/src/backend/access/brin/brin_revmap.c
index 0de6999..5d45b48 100644
--- a/src/backend/access/brin/brin_revmap.c
+++ b/src/backend/access/brin/brin_revmap.c
@@ -205,7 +205,11 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber 
heapBlk,
        /* normalize the heap block number to be the first page in the range */
        heapBlk = (heapBlk / revmap->rm_pagesPerRange) * 
revmap->rm_pagesPerRange;
 
-       /* Compute the revmap page number we need */
+       /*
+        * Compute the revmap page number we need.  If Invalid is returned 
(i.e.,
+        * the revmap page hasn't been created yet), the requested page range is
+        * not summarized.
+        */
        mapBlk = revmap_get_blkno(revmap, heapBlk);
        if (mapBlk == InvalidBlockNumber)
        {
diff --git a/src/backend/access/common/reloptions.c 
b/src/backend/access/common/reloptions.c
index 42b4ea4..66b493c 100644
--- a/src/backend/access/common/reloptions.c
+++ b/src/backend/access/common/reloptions.c
@@ -58,6 +58,15 @@ static relopt_bool boolRelOpts[] =
 {
        {
                {
+                       "autosummarize",
+                       "Enables automatic summarization on this BRIN index",
+                       RELOPT_KIND_BRIN,
+                       AccessExclusiveLock
+               },
+               false
+       },
+       {
+               {
                        "autovacuum_enabled",
                        "Enables autovacuum in this relation",
                        RELOPT_KIND_HEAP | RELOPT_KIND_TOAST,
diff --git a/src/backend/postmaster/autovacuum.c 
b/src/backend/postmaster/autovacuum.c
index e8de9a3..c9853a9 100644
--- a/src/backend/postmaster/autovacuum.c
+++ b/src/backend/postmaster/autovacuum.c
@@ -92,7 +92,9 @@
 #include "storage/procsignal.h"
 #include "storage/sinvaladt.h"
 #include "tcop/tcopprot.h"
+#include "utils/dsa.h"
 #include "utils/fmgroids.h"
+#include "utils/fmgrprotos.h"
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
 #include "utils/ps_status.h"
@@ -252,9 +254,10 @@ typedef enum
  * av_runningWorkers the WorkerInfo non-free queue
  * av_startingWorker pointer to WorkerInfo currently being started (cleared by
  *                                     the worker itself as soon as it's up 
and running)
+ * av_dsa_handle       handle for allocatable shared memory
  *
  * This struct is protected by AutovacuumLock, except for av_signal and parts
- * of the worker list (see above).
+ * of the worker list (see above).  av_dsa_handle is readable unlocked.
  *-------------
  */
 typedef struct
@@ -264,6 +267,8 @@ typedef struct
        dlist_head      av_freeWorkers;
        dlist_head      av_runningWorkers;
        WorkerInfo      av_startingWorker;
+       dsa_handle      av_dsa_handle;
+       dsa_pointer     av_workitems;
 } AutoVacuumShmemStruct;
 
 static AutoVacuumShmemStruct *AutoVacuumShmem;
@@ -278,6 +283,30 @@ static MemoryContext DatabaseListCxt = NULL;
 /* Pointer to my own WorkerInfo, valid on each worker */
 static WorkerInfo MyWorkerInfo = NULL;
 
+/*
+ * Autovacuum workitem array, stored in AutoVacuumShmem->av_workitems.  This
+ * list is mostly protected by AutovacuumLock, except that an autovacuum
+ * worker may "claim" an item (by marking it active), and then no other process
+ * is allowed to touch it.
+ */
+typedef struct AutoVacuumWorkItem
+{
+       AutoVacuumWorkItemType avw_type;
+       Oid                     avw_database;
+       Oid                     avw_relation;
+       bool            avw_active;
+       dsa_pointer     avw_next;
+} AutoVacuumWorkItem;
+
+#define NUM_WORKITEMS  256
+typedef struct
+{
+       dsa_pointer             avs_usedItems;
+       dsa_pointer             avs_freeItems;
+} AutovacWorkItems;
+
+static dsa_area        *AutoVacuumDSA = NULL;
+
 /* PID of launcher, valid only in worker while shutting down */
 int                    AutovacuumLauncherPid = 0;
 
@@ -316,6 +345,7 @@ static AutoVacOpts *extract_autovac_opts(HeapTuple tup,
 static PgStat_StatTabEntry *get_pgstat_tabentry_relid(Oid relid, bool isshared,
                                                  PgStat_StatDBEntry *shared,
                                                  PgStat_StatDBEntry *dbentry);
+static void perform_work_item(AutoVacuumWorkItem *workitem);
 static void autovac_report_activity(autovac_table *tab);
 static void av_sighup_handler(SIGNAL_ARGS);
 static void avl_sigusr2_handler(SIGNAL_ARGS);
@@ -574,6 +604,22 @@ AutoVacLauncherMain(int argc, char *argv[])
         */
        rebuild_database_list(InvalidOid);
 
+       /*
+        * Set up our DSA so that backends can install work-item requests.  It 
may
+        * already exist as created by a previous launcher.
+        */
+       if (!AutoVacuumShmem->av_dsa_handle)
+       {
+               LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE);
+               AutoVacuumDSA = dsa_create(LWTRANCHE_AUTOVACUUM);
+               AutoVacuumShmem->av_dsa_handle = dsa_get_handle(AutoVacuumDSA);
+               /* delay array allocation until first request */
+               AutoVacuumShmem->av_workitems = InvalidDsaPointer;
+               LWLockRelease(AutovacuumLock);
+       }
+       else
+               AutoVacuumDSA = dsa_attach(AutoVacuumShmem->av_dsa_handle);
+
        /* loop until shutdown request */
        while (!got_SIGTERM)
        {
@@ -1617,6 +1663,13 @@ AutoVacWorkerMain(int argc, char *argv[])
        {
                char            dbname[NAMEDATALEN];
 
+               if (AutoVacuumShmem->av_dsa_handle)
+               {
+                       /* First use of DSA in this worker, so attach to it */
+                       Assert(!AutoVacuumDSA);
+                       AutoVacuumDSA = 
dsa_attach(AutoVacuumShmem->av_dsa_handle);
+               }
+
                /*
                 * Report autovac startup to the stats collector.  We 
deliberately do
                 * this before InitPostgres, so that the last_autovac_time will 
get
@@ -2467,6 +2520,69 @@ deleted:
        }
 
        /*
+        * Perform additional work items, as requested by backends.
+        */
+       if (AutoVacuumShmem->av_workitems)
+       {
+               dsa_pointer             nextitem;
+               AutovacWorkItems *workitems;
+
+               LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE);
+
+               /*
+                * Scan the list of pending items, and process the inactive 
ones in our
+                * database.
+                */
+               workitems = (AutovacWorkItems *)
+                       dsa_get_address(AutoVacuumDSA, 
AutoVacuumShmem->av_workitems);
+               nextitem = workitems->avs_usedItems;
+
+               while (nextitem != InvalidDsaPointer)
+               {
+                       AutoVacuumWorkItem      *workitem;
+
+                       workitem = (AutoVacuumWorkItem *)
+                               dsa_get_address(AutoVacuumDSA, nextitem);
+
+                       if (workitem->avw_database == MyDatabaseId && 
!workitem->avw_active)
+                       {
+                               /* claim this one, and release lock while we 
process it */
+                               workitem->avw_active = true;
+
+                               LWLockRelease(AutovacuumLock);
+                               perform_work_item(workitem);
+
+                               /*
+                                * Check for config changes before acquiring 
lock for further
+                                * jobs.
+                                */
+                               CHECK_FOR_INTERRUPTS();
+                               if (got_SIGHUP)
+                               {
+                                       got_SIGHUP = false;
+                                       ProcessConfigFile(PGC_SIGHUP);
+                               }
+
+                               LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE);
+
+                               /*
+                                * Remove the job we just completed from the 
used list and put
+                                * the array item back on the free list.
+                                */
+                               workitems->avs_usedItems = workitem->avw_next;
+                               workitem->avw_next = workitems->avs_freeItems;
+                               workitems->avs_freeItems = nextitem;
+                       }
+
+                       /* prepare for next iteration */
+                       nextitem = workitems->avs_usedItems;
+               }
+
+               /* all done */
+               LWLockRelease(AutovacuumLock);
+       }
+
+       /*
         * We leak table_toast_map here (among other things), but since we're
         * going away soon, it's not a problem.
         */
@@ -2498,6 +2614,101 @@ deleted:
        CommitTransactionCommand();
 }
 
+static void
+perform_work_item(AutoVacuumWorkItem *workitem)
+{
+       char       *cur_datname = NULL;
+       char       *cur_nspname = NULL;
+       char       *cur_relname = NULL;
+
+       elog(LOG, "performing work on relation %u", workitem->avw_relation);
+
+       /*
+        * Note we do not store table info in MyWorkerInfo, since this is not
+        * vacuuming proper.
+        */
+
+       /*
+        * Save the relation name for a possible error message, to avoid a
+        * catalog lookup in case of an error.  If any of these return NULL,
+        * then the relation has been dropped since last we checked; skip it.
+        * Note: they must live in a long-lived memory context because we call
+        * vacuum and analyze in different transactions.
+        */
+
+       cur_relname = get_rel_name(workitem->avw_relation);
+       cur_nspname = 
get_namespace_name(get_rel_namespace(workitem->avw_relation));
+       cur_datname = get_database_name(MyDatabaseId);
+       if (!cur_relname || !cur_nspname || !cur_datname)
+               goto deleted2;
+
+       /*
+        * We will abort the current work item if something errors out, and
+        * continue with the next one; in particular, this happens if we are
+        * interrupted with SIGINT.  XXX but the work item was already deleted
+        * from the work list.  Maybe instead of this we should set a "being
+        * processed" flag in the work item, move it to the back of the list,
+        * and only delete if we're successful.
+        */
+       PG_TRY();
+       {
+               /* have at it */
+               MemoryContextSwitchTo(TopTransactionContext);
+
+               switch (workitem->avw_type)
+               {
+                       case AVW_BRINSummarizeRange:
+                               DirectFunctionCall1(brin_summarize_new_values,
+                                                                       
ObjectIdGetDatum(workitem->avw_relation));
+                               break;
+                       default:
+                               elog(WARNING, "unrecognized work item found: 
type %d",
+                                        workitem->avw_type);
+                               break;
+               }
+
+               /*
+                * Clear a possible query-cancel signal, to avoid a late 
reaction
+                * to an automatically-sent signal because of vacuuming the
+                * current table (we're done with it, so it would make no sense 
to
+                * cancel at this point.)
+                */
+               QueryCancelPending = false;
+       }
+       PG_CATCH();
+       {
+               /*
+                * Abort the transaction, start a new one, and proceed with the
+                * next table in our list.
+                */
+               HOLD_INTERRUPTS();
+               errcontext("processing work entry for relation \"%s.%s.%s\"",
+                                  cur_datname, cur_nspname, cur_relname);
+               EmitErrorReport();
+
+               /* this resets the PGXACT flags too */
+               AbortOutOfAnyTransaction();
+               FlushErrorState();
+               MemoryContextResetAndDeleteChildren(PortalContext);
+
+               /* restart our transaction for the following operations */
+               StartTransactionCommand();
+               RESUME_INTERRUPTS();
+       }
+       PG_END_TRY();
+
+       /* We intentionally do not set did_vacuum here */
+
+       /* be tidy */
+deleted2:
+       if (cur_datname)
+               pfree(cur_datname);
+       if (cur_nspname)
+               pfree(cur_nspname);
+       if (cur_relname)
+               pfree(cur_relname);
+}
+
 /*
  * extract_autovac_opts
  *
@@ -2959,6 +3170,119 @@ AutoVacuumingActive(void)
 }
 
 /*
+ * Request one work item to the next autovacuum run processing our database.
+ */
+void
+AutoVacuumRequestWork(AutoVacuumWorkItemType type, Oid relationId)
+{
+       AutovacWorkItems *workitems;
+       dsa_pointer             wi_ptr;
+       AutoVacuumWorkItem *workitem;
+
+       elog(LOG, "requesting work on relation %u", relationId);
+
+       LWLockAcquire(AutovacuumLock, LW_EXCLUSIVE);
+
+       /*
+        * It may be useful to deduplicate the list upon insertion.  For the 
only
+        * currently existing caller, this is not necessary.
+        */
+
+       /* First use in this process?  Initialize DSA */
+       if (!AutoVacuumDSA)
+       {
+               if (!AutoVacuumShmem->av_dsa_handle)
+               {
+                       /* autovacuum launcher not started; nothing can be done 
*/
+                       LWLockRelease(AutovacuumLock);
+                       return;
+               }
+               AutoVacuumDSA = dsa_attach(AutoVacuumShmem->av_dsa_handle);
+
+               if (!AutoVacuumDSA)
+               {
+                       /* cannot attach?  disregard request */
+                       LWLockRelease(AutovacuumLock);
+                       return;
+               }
+       }
+
+       /* First use overall?  Allocate work items array */
+       if (AutoVacuumShmem->av_workitems == InvalidDsaPointer)
+       {
+               int             i;
+               AutovacWorkItems *workitems;
+
+               AutoVacuumShmem->av_workitems =
+                       dsa_allocate_extended(AutoVacuumDSA,
+                                                                 
sizeof(AutovacWorkItems) +
+                                                                 NUM_WORKITEMS 
* sizeof(AutoVacuumWorkItem),
+                                                                 
DSA_ALLOC_NO_OOM);
+               /* if out of memory, silently disregard the request */
+               if (AutoVacuumShmem->av_workitems == InvalidDsaPointer)
+               {
+                       dsa_detach(AutoVacuumDSA);
+                       AutoVacuumDSA = NULL;
+                       LWLockRelease(AutovacuumLock);
+                       return;
+               }
+
+               /* Initialize each array entry as a member of the free list */
+               workitems = dsa_get_address(AutoVacuumDSA, 
AutoVacuumShmem->av_workitems);
+
+               workitems->avs_usedItems = InvalidDsaPointer;
+               workitems->avs_freeItems = InvalidDsaPointer;
+               for (i = 0; i < NUM_WORKITEMS; i++)
+               {
+                       /* XXX surely there is a simpler way to do this */
+                       wi_ptr = AutoVacuumShmem->av_workitems + 
sizeof(AutovacWorkItems) +
+                               sizeof(AutoVacuumWorkItem) * i;
+                       workitem = (AutoVacuumWorkItem *) 
dsa_get_address(AutoVacuumDSA, wi_ptr);
+
+                       workitem->avw_type = 0;
+                       workitem->avw_database = InvalidOid;
+                       workitem->avw_relation = InvalidOid;
+                       workitem->avw_active = false;
+
+                       /* put this item in the free list */
+                       workitem->avw_next = workitems->avs_freeItems;
+                       workitems->avs_freeItems = wi_ptr;
+               }
+       }
+
+       workitems = (AutovacWorkItems *)
+               dsa_get_address(AutoVacuumDSA, AutoVacuumShmem->av_workitems);
+
+       /* If array is full, disregard the request */
+       if (workitems->avs_freeItems == InvalidDsaPointer)
+       {
+               LWLockRelease(AutovacuumLock);
+               dsa_detach(AutoVacuumDSA);
+               AutoVacuumDSA = NULL;
+               return;
+       }
+
+       /* remove workitem struct from free list ... */
+       wi_ptr = workitems->avs_freeItems;
+       workitem = dsa_get_address(AutoVacuumDSA, wi_ptr);
+       workitems->avs_freeItems = workitem->avw_next;
+
+       /* ... initialize it ... */
+       workitem->avw_type = type;
+       workitem->avw_database = MyDatabaseId;
+       workitem->avw_relation = relationId;
+       workitem->avw_active = false;
+       workitem->avw_next = workitems->avs_usedItems;
+
+       /* ... and put it on autovacuum's to-do list */
+       workitems->avs_usedItems = wi_ptr;
+
+       LWLockRelease(AutovacuumLock);
+       dsa_detach(AutoVacuumDSA);
+       AutoVacuumDSA = NULL;
+}
+
+/*
  * autovac_init
  *             This is called at postmaster initialization.
  *
diff --git a/src/backend/storage/ipc/dsm.c b/src/backend/storage/ipc/dsm.c
index 54378bc..b8c96db 100644
--- a/src/backend/storage/ipc/dsm.c
+++ b/src/backend/storage/ipc/dsm.c
@@ -1095,7 +1095,8 @@ dsm_create_descriptor(void)
 {
        dsm_segment *seg;
 
-       ResourceOwnerEnlargeDSMs(CurrentResourceOwner);
+       if (CurrentResourceOwner)
+               ResourceOwnerEnlargeDSMs(CurrentResourceOwner);
 
        seg = MemoryContextAlloc(TopMemoryContext, sizeof(dsm_segment));
        dlist_push_head(&dsm_segment_list, &seg->node);
@@ -1106,8 +1107,11 @@ dsm_create_descriptor(void)
        seg->mapped_address = NULL;
        seg->mapped_size = 0;
 
-       seg->resowner = CurrentResourceOwner;
-       ResourceOwnerRememberDSM(CurrentResourceOwner, seg);
+       if (CurrentResourceOwner)
+       {
+               seg->resowner = CurrentResourceOwner;
+               ResourceOwnerRememberDSM(CurrentResourceOwner, seg);
+       }
 
        slist_init(&seg->on_detach);
 
diff --git a/src/backend/utils/mmgr/dsa.c b/src/backend/utils/mmgr/dsa.c
index 49e68b4..6d5d12a 100644
--- a/src/backend/utils/mmgr/dsa.c
+++ b/src/backend/utils/mmgr/dsa.c
@@ -498,7 +498,7 @@ dsa_get_handle(dsa_area *area)
 
 /*
  * Attach to an area given a handle generated (possibly in another process) by
- * dsa_get_area_handle.  The area must have been created with dsa_create (not
+ * dsa_get_handle.  The area must have been created with dsa_create (not
  * dsa_create_in_place).
  */
 dsa_area *
diff --git a/src/include/access/brin.h b/src/include/access/brin.h
index 896824a..3f4c29b 100644
--- a/src/include/access/brin.h
+++ b/src/include/access/brin.h
@@ -22,6 +22,7 @@ typedef struct BrinOptions
 {
        int32           vl_len_;                /* varlena header (do not touch 
directly!) */
        BlockNumber pagesPerRange;
+       bool            autosummarize;
 } BrinOptions;
 
 #define BRIN_DEFAULT_PAGES_PER_RANGE   128
@@ -29,5 +30,9 @@ typedef struct BrinOptions
        ((relation)->rd_options ? \
         ((BrinOptions *) (relation)->rd_options)->pagesPerRange : \
          BRIN_DEFAULT_PAGES_PER_RANGE)
+#define BrinGetAutoSummarize(relation) \
+       ((relation)->rd_options ? \
+        ((BrinOptions *) (relation)->rd_options)->autosummarize : \
+         false)
 
 #endif   /* BRIN_H */
diff --git a/src/include/postmaster/autovacuum.h 
b/src/include/postmaster/autovacuum.h
index 99d7f09..a871508 100644
--- a/src/include/postmaster/autovacuum.h
+++ b/src/include/postmaster/autovacuum.h
@@ -14,6 +14,15 @@
 #ifndef AUTOVACUUM_H
 #define AUTOVACUUM_H
 
+/*
+ * Other processes can request specific work from autovacuum, identified by
+ * AutoVacuumWorkItem elements.
+ */
+typedef enum
+{
+       AVW_BRINSummarizeRange
+} AutoVacuumWorkItemType;
+
 
 /* GUC variables */
 extern bool autovacuum_start_daemon;
@@ -60,6 +69,9 @@ extern void AutovacuumWorkerIAm(void);
 extern void AutovacuumLauncherIAm(void);
 #endif
 
+extern void AutoVacuumRequestWork(AutoVacuumWorkItemType type,
+                                         Oid relationId);
+
 /* shared memory stuff */
 extern Size AutoVacuumShmemSize(void);
 extern void AutoVacuumShmemInit(void);
diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h
index 8bd93c3..df27aca 100644
--- a/src/include/storage/lwlock.h
+++ b/src/include/storage/lwlock.h
@@ -211,6 +211,7 @@ typedef enum BuiltinTrancheIds
        LWTRANCHE_BUFFER_MAPPING,
        LWTRANCHE_LOCK_MANAGER,
        LWTRANCHE_PREDICATE_LOCK_MANAGER,
+       LWTRANCHE_AUTOVACUUM,
        LWTRANCHE_PARALLEL_QUERY_DSA,
        LWTRANCHE_FIRST_USER_DEFINED
 }      BuiltinTrancheIds;
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to