Attached please find patch for "ALTER INDEX ... WHERE ..." clause.
It is now able to handle all three possible situations:
1. Making index partial (add WHERE condition to the ordinary index)
2. Extend partial index range (less restricted index predicate)
3. Arbitrary change of partial index predicate

In case 2) new records are added to the index.
In other two cases index is completely reconstructed.

This patch includes src/bin/insbench utility for testing insert performance. It can be easily excluded from the patch to reduce it size. Also it is better to apply this patch together with "index-only scans with partial indexes" patch:

http://www.postgresql.org/message-id/560c7213.3010...@2ndquadrant.com

only in this case regression test will produce expected output.


On 27.01.2016 23:15, Robert Haas wrote:
On Wed, Jan 20, 2016 at 4:28 AM, Konstantin Knizhnik
<k.knizh...@postgrespro.ru> wrote:
Please notice that such alter table statement, changing condition for
partial index, is not supported now.
But I do not see any principle problems with supporting such construction.
We should just include in the index all records which match new condition
and do not match old condition:

    ts < '21/01/2016' and not (ts < '20/01/2016')
You'd also need to remove any rows from the index that match the old
condition but not the new one.  In your example, that's impossible,
but in general, it's definitely possible.


--
Konstantin Knizhnik
Postgres Professional: http://www.postgrespro.com
The Russian Postgres Company

diff --git a/src/backend/commands/indexcmds.c b/src/backend/commands/indexcmds.c
index b450bcf..b6ffb19 100644
--- a/src/backend/commands/indexcmds.c
+++ b/src/backend/commands/indexcmds.c
@@ -32,6 +32,7 @@
 #include "commands/tablespace.h"
 #include "mb/pg_wchar.h"
 #include "miscadmin.h"
+#include "funcapi.h"
 #include "nodes/nodeFuncs.h"
 #include "optimizer/clauses.h"
 #include "optimizer/planner.h"
@@ -50,6 +51,9 @@
 #include "utils/snapmgr.h"
 #include "utils/syscache.h"
 #include "utils/tqual.h"
+#include "utils/ruleutils.h"
+#include "executor/executor.h"
+#include "executor/spi.h"
 
 
 /* non-export function prototypes */
@@ -275,6 +279,160 @@ CheckIndexCompatible(Oid oldId,
 	return ret;
 }
 
+static void 
+UpdateIndex(Oid indexRelationId, Node* whereClause)
+{
+	Datum		values[Natts_pg_index];
+	bool		isnull[Natts_pg_index];
+	HeapTuple   oldTuple;
+	HeapTuple   newTuple;
+	Relation	pg_index;
+
+	pg_index = heap_open(IndexRelationId, RowExclusiveLock);
+	oldTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(indexRelationId));
+	if (!HeapTupleIsValid(oldTuple))
+		elog(ERROR, "cache lookup failed for index %u", indexRelationId);
+
+	heap_deform_tuple(oldTuple, RelationGetDescr(pg_index), values, isnull);
+	values[Anum_pg_index_indpred - 1] = CStringGetTextDatum(nodeToString(whereClause));
+	isnull[Anum_pg_index_indpred - 1] = false;
+	newTuple = heap_form_tuple(RelationGetDescr(pg_index), values, isnull);
+	simple_heap_update(pg_index, &oldTuple->t_self, newTuple);
+	CatalogUpdateIndexes(pg_index, newTuple);
+	heap_freetuple(newTuple);
+	heap_freetuple(oldTuple);
+	heap_close(pg_index, NoLock);
+}
+
+void
+AlterIndex(Oid indexRelationId, IndexStmt *stmt)
+{
+	char* select;
+	Oid heapRelationId;
+	IndexUniqueCheck checkUnique;
+	Datum		values[INDEX_MAX_KEYS];
+	bool		isnull[INDEX_MAX_KEYS];
+	Relation heapRelation;
+	Relation indexRelation;
+    SPIPlanPtr plan;
+    Portal portal;
+	HeapTuple tuple;
+	TupleTableSlot *slot;
+	ItemPointer tupleid;
+	IndexInfo  *indexInfo;
+	EState *estate;
+	Oid	namespaceId;
+	List*       deparseCtx;
+	char*       oldIndexPredicate;
+	char*       newIndexPredicate;
+	char*       relationName;
+
+	Assert(stmt->whereClause);
+	CheckPredicate((Expr *) stmt->whereClause);
+
+	/* Open and lock the parent heap relation */
+	heapRelationId = IndexGetRelation(indexRelationId, false);
+	heapRelation = heap_open(heapRelationId, AccessShareLock);
+
+	/* Open the target index relation */
+	/*	indexRelation = index_open(indexRelationId, RowExclusiveLock); */
+	indexRelation = index_open(indexRelationId, ShareUpdateExclusiveLock);
+	/* indexRelation = index_open(indexRelationId, AccessShareLock); */
+	namespaceId = RelationGetNamespace(indexRelation);
+
+	indexInfo = BuildIndexInfo(indexRelation);
+	Assert(!indexInfo->ii_ExclusionOps);
+ 
+	/*
+	 * Generate the constraint and default execution states
+	 */
+	estate = CreateExecutorState();
+
+	checkUnique = indexRelation->rd_index->indisunique ? UNIQUE_CHECK_YES : UNIQUE_CHECK_NO;
+
+	slot = MakeSingleTupleTableSlot(RelationGetDescr(heapRelation));
+	
+	deparseCtx = deparse_context_for(RelationGetRelationName(heapRelation), heapRelationId);
+	relationName = quote_qualified_identifier(get_namespace_name(namespaceId),
+											  get_rel_name(heapRelationId)),
+	newIndexPredicate = deparse_expression(stmt->whereClause, deparseCtx, false, false);
+	oldIndexPredicate = indexInfo->ii_Predicate 
+		? deparse_expression((Node*)make_ands_explicit(indexInfo->ii_Predicate), deparseCtx, false, false)
+		: "true";
+
+    SPI_connect();
+
+	select = psprintf("select * from  %s where %s and not (%s) limit 1",
+					  relationName, oldIndexPredicate, newIndexPredicate);
+	if (SPI_execute(select, true, 1) != SPI_OK_SELECT) 
+	{
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_CURSOR_STATE),
+				 errmsg("Failed to execute statement %s", select)));		
+	}	
+	if (SPI_processed) { 
+		/* There is no way in Postgres to exclude records from index, so we have to completelty rebuild index in this case */
+		bool relpersistence = indexRelation->rd_rel->relpersistence;
+		index_close(indexRelation, NoLock);
+		indexRelation->rd_indpred = make_ands_implicit((Expr *) stmt->whereClause);
+		indexRelation = NULL;
+		UpdateIndex(indexRelationId, stmt->whereClause);
+		reindex_index(indexRelationId, false, relpersistence, 0);		
+	} else { 
+		select = psprintf("select * from %s where %s and not (%s)",
+						  relationName, newIndexPredicate, oldIndexPredicate);
+		plan = SPI_prepare(select, 0, NULL); 
+		if (plan == NULL) {
+			ereport(ERROR,
+					(errcode(ERRCODE_INVALID_CURSOR_STATE),
+					 errmsg("Failed to preapre statement %s", select)));
+		} 
+		portal = SPI_cursor_open(NULL, plan, NULL, NULL, true);
+		if (portal == NULL) { 
+			ereport(ERROR,
+					(errcode(ERRCODE_INVALID_CURSOR_STATE),
+					 errmsg("Failed to open cursor for %s", select)));
+		}	
+		while (true)
+		{
+			SPI_cursor_fetch(portal, true, 1);
+			if (!SPI_processed) { 
+				break;
+			}										
+			tuple = SPI_tuptable->vals[0];
+			tupleid = &tuple->t_data->t_ctid;
+			ExecStoreTuple(tuple, slot, InvalidBuffer, false);
+			
+			FormIndexDatum(indexInfo,
+						   slot,
+						   estate,
+						   values,
+						   isnull);
+			index_insert(indexRelation, /* index relation */
+						 values,	/* array of index Datums */
+						 isnull,	/* null flags */
+						 tupleid,		/* tid of heap tuple */
+						 heapRelation,	/* heap relation */
+						 checkUnique);	/* type of uniqueness check to do */
+			
+			SPI_freetuple(tuple);
+			SPI_freetuptable(SPI_tuptable);
+		}
+		SPI_cursor_close(portal);
+
+		UpdateIndex(indexRelationId, stmt->whereClause);
+	}
+    SPI_finish();
+
+	ExecDropSingleTupleTableSlot(slot);
+	FreeExecutorState(estate);
+
+	heap_close(heapRelation, NoLock);
+	if (indexRelation) {
+		index_close(indexRelation, NoLock);
+	}
+}
+
 /*
  * DefineIndex
  *		Creates a new index.
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index ba04b72..93f76ce 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -3123,6 +3123,7 @@ _copyIndexStmt(const IndexStmt *from)
 	COPY_SCALAR_FIELD(transformed);
 	COPY_SCALAR_FIELD(concurrent);
 	COPY_SCALAR_FIELD(if_not_exists);
+	COPY_SCALAR_FIELD(is_alter);
 
 	return newnode;
 }
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 356fcaf..13b575e 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -1243,6 +1243,7 @@ _equalIndexStmt(const IndexStmt *a, const IndexStmt *b)
 	COMPARE_SCALAR_FIELD(transformed);
 	COMPARE_SCALAR_FIELD(concurrent);
 	COMPARE_SCALAR_FIELD(if_not_exists);
+	COMPARE_SCALAR_FIELD(is_alter);
 
 	return true;
 }
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index 63fae82..ebd050a 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -2184,6 +2184,7 @@ _outIndexStmt(StringInfo str, const IndexStmt *node)
 	WRITE_BOOL_FIELD(transformed);
 	WRITE_BOOL_FIELD(concurrent);
 	WRITE_BOOL_FIELD(if_not_exists);
+	WRITE_BOOL_FIELD(is_alter);
 }
 
 static void
diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y
index 7916df8..50c7f20 100644
--- a/src/backend/parser/gram.y
+++ b/src/backend/parser/gram.y
@@ -1801,6 +1801,15 @@ AlterTableStmt:
 					n->nowait = $13;
 					$$ = (Node *)n;
 				}
+        |   ALTER INDEX qualified_name WHERE a_expr
+		        {
+					IndexStmt* n = makeNode(IndexStmt);
+					n->relation = $3;
+					n->whereClause = $5;
+					n->is_alter = true;
+					$$ = (Node *)n;					
+				}
+			
 		|	ALTER INDEX qualified_name alter_table_cmds
 				{
 					AlterTableStmt *n = makeNode(AlterTableStmt);
diff --git a/src/backend/parser/parse_utilcmd.c b/src/backend/parser/parse_utilcmd.c
index 344a40c..401e42d 100644
--- a/src/backend/parser/parse_utilcmd.c
+++ b/src/backend/parser/parse_utilcmd.c
@@ -2016,6 +2016,10 @@ transformIndexStmt(Oid relid, IndexStmt *stmt, const char *queryString)
 	 * to its fields without qualification.  Caller is responsible for locking
 	 * relation, but we still need to open it.
 	 */
+	if (stmt->is_alter) 
+	{
+		relid = IndexGetRelation(relid, false);
+	}
 	rel = relation_open(relid, NoLock);
 	rte = addRangeTableEntryForRelation(pstate, rel, NULL, false, true);
 
diff --git a/src/backend/tcop/utility.c b/src/backend/tcop/utility.c
index e81bbc6..f59c224 100644
--- a/src/backend/tcop/utility.c
+++ b/src/backend/tcop/utility.c
@@ -1244,7 +1244,7 @@ ProcessUtilitySlow(Node *parsetree,
 					 * eventually be needed here, so the lockmode calculation
 					 * needs to match what DefineIndex() does.
 					 */
-					lockmode = stmt->concurrent ? ShareUpdateExclusiveLock
+					lockmode = stmt->is_alter || stmt->concurrent ? ShareUpdateExclusiveLock
 						: ShareLock;
 					relid =
 						RangeVarGetRelidExtended(stmt->relation, lockmode,
@@ -1257,22 +1257,29 @@ ProcessUtilitySlow(Node *parsetree,
 
 					/* ... and do it */
 					EventTriggerAlterTableStart(parsetree);
-					address =
-						DefineIndex(relid,		/* OID of heap relation */
-									stmt,
-									InvalidOid, /* no predefined OID */
-									false,		/* is_alter_table */
-									true,		/* check_rights */
-									false,		/* skip_build */
-									false);		/* quiet */
-
-					/*
-					 * Add the CREATE INDEX node itself to stash right away;
-					 * if there were any commands stashed in the ALTER TABLE
-					 * code, we need them to appear after this one.
-					 */
-					EventTriggerCollectSimpleCommand(address, secondaryObject,
-													 parsetree);
+					if (stmt->is_alter)
+					{
+						AlterIndex(relid, stmt);
+					}
+					else
+					{
+						address =
+							DefineIndex(relid,		/* OID of heap relation */
+										stmt,
+										InvalidOid, /* no predefined OID */
+										false,		/* is_alter_table */
+										true,		/* check_rights */
+										false,		/* skip_build */
+										false);		/* quiet */
+						
+						/*
+						 * Add the CREATE INDEX node itself to stash right away;
+						 * if there were any commands stashed in the ALTER TABLE
+						 * code, we need them to appear after this one.
+						 */
+						EventTriggerCollectSimpleCommand(address, secondaryObject,
+														 parsetree);
+					}
 					commandCollected = true;
 					EventTriggerAlterTableEnd();
 				}
diff --git a/src/bin/insbench/insbench.cpp b/src/bin/insbench/insbench.cpp
new file mode 100644
index 0000000..76beb8d
--- /dev/null
+++ b/src/bin/insbench/insbench.cpp
@@ -0,0 +1,323 @@
+#include <time.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <inttypes.h>
+#include <sys/time.h>
+#include <pthread.h>
+#include <unistd.h>
+
+#include <string>
+#include <vector>
+
+#include <pqxx/connection>
+#include <pqxx/transaction>
+#include <pqxx/nontransaction>
+#include <pqxx/pipeline>
+#include <pqxx/tablewriter>
+#include <pqxx/version>
+
+using namespace std;
+using namespace pqxx;
+
+typedef void* (*thread_proc_t)(void*);
+
+struct thread
+{
+    pthread_t t;
+
+    void start(thread_proc_t proc) { 
+        pthread_create(&t, NULL, proc, this);
+    }
+
+    void wait() { 
+        pthread_join(t, NULL);
+    }
+};
+
+struct config
+{
+    int indexUpdateInterval;
+    int nInserters;
+    int nIndexes;
+    int nIterations;
+	int transactionSize;
+	int initialSize;
+	bool useSystemTime;
+	bool noPK;
+	bool useCopy;
+    string connection;
+
+    config() {
+		initialSize = 1000000;
+		indexUpdateInterval = 0;
+        nInserters = 1;
+		nIndexes = 8;
+        nIterations = 10000;
+		transactionSize = 100;
+		useSystemTime = false;
+		noPK = false;
+		useCopy = false;
+    }
+};
+
+config cfg;
+bool running;
+int nIndexUpdates;
+time_t maxIndexUpdateTime;
+time_t totalIndexUpdateTime;
+time_t currTimestamp;
+
+#define USEC 1000000
+
+static time_t getCurrentTime()
+{
+    struct timeval tv;
+    gettimeofday(&tv, NULL);
+    return (time_t)tv.tv_sec*USEC + tv.tv_usec;
+}
+
+
+void exec(transaction_base& txn, char const* sql, ...)
+{
+    va_list args;
+    va_start(args, sql);
+    char buf[1024];
+    vsprintf(buf, sql, args);
+    va_end(args);
+    txn.exec(buf);
+}
+
+void* inserter(void* arg)
+{
+    connection con(cfg.connection);
+	if (cfg.useSystemTime) 
+	{
+#if PQXX_VERSION_MAJOR >= 4
+		con.prepare("insert", "insert into t values ($1,$2,$3,$4,$5,$6,$7,$8,$9)");
+#else
+		con.prepare("insert", "insert into t values ($1,$2,$3,$4,$5,$6,$7,$8,$9)")("bigint")("bigint")("bigint")("bigint")("bigint")("bigint")("bigint")("bigint")("bigint");
+#endif
+	} else {
+		con.prepare("insert", "insert into t (select generate_series($1::integer,$2::integer),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000))");
+	}
+	time_t curr = currTimestamp;
+
+    for (int i = 0; i < cfg.nIterations; i++)
+    { 
+		work txn(con);
+		if (cfg.useSystemTime) 
+		{
+			if (cfg.useCopy)
+			{				
+				tablewriter writer(txn,"t");
+				vector<int64_t> row(9);
+				for (int j = 0; j < cfg.transactionSize; j++) 
+				{ 
+					row[0] = getCurrentTime();
+					for (int c = 1; c <= 8; c++) {
+						row[c] = random();
+					}
+					writer << row;
+				}
+				writer.complete();
+			} else {
+				for (int j = 0; j < cfg.transactionSize; j++) 
+				{ 
+					txn.prepared("insert")(getCurrentTime())(random())(random())(random())(random())(random())(random())(random())(random()).exec();
+				}
+			}
+	    } else { 
+		    txn.prepared("insert")(curr)(curr+cfg.transactionSize-1).exec();
+			curr += cfg.transactionSize;
+			currTimestamp = curr;
+	    }
+		txn.commit();
+	}
+	return NULL;
+}
+
+void* indexUpdater(void* arg)
+{
+    connection con(cfg.connection);
+	while (running) {
+		sleep(cfg.indexUpdateInterval);
+		printf("Alter indexes\n");
+		time_t now = getCurrentTime();
+		time_t limit = cfg.useSystemTime ? now : currTimestamp;
+		{
+			work txn(con);
+			for (int i = 0; i < cfg.nIndexes; i++) { 
+				exec(txn, "alter index idx%d where pk<%lu", i, limit);
+			}
+			txn.commit();
+		}
+		printf("End alter indexes\n");
+		nIndexUpdates += 1;
+		time_t elapsed = getCurrentTime() - now;
+		totalIndexUpdateTime += elapsed;
+		if (elapsed > maxIndexUpdateTime) { 
+			maxIndexUpdateTime = elapsed;
+		}
+	}
+    return NULL;
+}
+      
+void initializeDatabase()
+{
+    connection con(cfg.connection);
+	work txn(con);
+	time_t now = getCurrentTime();
+	exec(txn, "drop table if exists t");
+	exec(txn, "create table t (pk bigint, k1 bigint, k2 bigint, k3 bigint, k4 bigint, k5 bigint, k6 bigint, k7 bigint, k8 bigint)");
+
+	if (cfg.initialSize)
+	{
+		if (cfg.useSystemTime) 
+		{
+#if PQXX_VERSION_MAJOR >= 4
+			con.prepare("insert", "insert into t values ($1,$2,$3,$4,$5,$6,$7,$8,$9)");
+#else
+			con.prepare("insert", "insert into t values ($1,$2,$3,$4,$5,$6,$7,$8,$9)")("bigint")("bigint")("bigint")("bigint")("bigint")("bigint")("bigint")("bigint")("bigint");
+#endif
+		} else {
+			con.prepare("insert", "insert into t (select generate_series($1::integer,$2::integer),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000),ceil(random()*1000000000))");
+		}
+		if (cfg.useSystemTime) 
+		{
+			if (cfg.useCopy) { 
+				tablewriter writer(txn,"t");
+				vector<int64_t> row(9);
+				for (int i = 0; i < cfg.initialSize; i++) 
+				{ 
+					row[0] = getCurrentTime();
+					for (int c = 1; c <= 8; c++) {
+						row[c] = random();
+					}
+					writer << row;
+				}
+				writer.complete();
+			} else { 
+				for (int i = 0; i < cfg.initialSize; i++) 
+				{ 
+					txn.prepared("insert")(getCurrentTime())(random())(random())(random())(random())(random())(random())(random())(random()).exec();
+				}
+	        }
+	    } else { 
+		    txn.prepared("insert")(cfg.initialSize)(cfg.initialSize-1).exec();
+			currTimestamp = cfg.initialSize;
+	    }
+	}
+	if (!cfg.noPK) { 
+		exec(txn, "create index pk on t(pk)");
+	}
+	for (int i = 0; i < cfg.nIndexes; i++) { 
+		if (cfg.indexUpdateInterval == 0)  { 
+			exec(txn, "create index idx%d on t(k%d)", i, i+1);
+		} else if (cfg.useSystemTime) { 
+			exec(txn, "create index idx%d on t(k%d) where pk<%ld", i, i+1, now);
+		} else { 
+			exec(txn, "create index idx%d on t(k%d) where pk<%ld", i, i+1, currTimestamp);
+		}
+	}
+	txn.commit();
+	{
+		nontransaction txn(con);
+		txn.exec("vacuum analyze");
+		sleep(2);
+	}
+	printf("Database intialized\n");
+}
+			
+	
+int main (int argc, char* argv[])
+{
+    if (argc == 1){
+        printf("Use -h to show usage options\n");
+        return 1;
+    }
+
+    for (int i = 1; i < argc; i++) { 
+        if (argv[i][0] == '-') { 
+            switch (argv[i][1]) { 
+            case 't':
+                cfg.transactionSize = atoi(argv[++i]);
+                continue;
+            case 'w':
+                cfg.nInserters = atoi(argv[++i]);
+                continue;                
+            case 'u':
+                cfg.indexUpdateInterval = atoi(argv[++i]);
+                continue;
+            case 'n':
+                cfg.nIterations = atoi(argv[++i]);
+                continue;
+            case 'x':
+                cfg.nIndexes = atoi(argv[++i]);
+                continue;
+            case 'i':
+                cfg.initialSize = atoi(argv[++i]);
+                continue;
+            case 'c':
+                cfg.connection = string(argv[++i]);
+                continue;
+			  case 'q':
+				cfg.useSystemTime = true;
+				continue;
+			  case 'p':
+				cfg.noPK = true;
+				continue;
+			  case 'C':
+				cfg.useCopy = true;
+				continue;
+            }
+        }
+        printf("Options:\n"
+               "\t-t N\ttransaction size (100)\n"
+               "\t-w N\tnumber of inserters (1)\n"
+               "\t-u N\tindex update interval (0)\n"
+               "\t-n N\tnumber of iterations (10000)\n"
+               "\t-x N\tnumber of indexes (8)\n"
+               "\t-i N\tinitial table size (1000000)\n"
+               "\t-q\tuse system time and libpq\n"
+               "\t-p\tno primary key\n"
+               "\t-C\tuse COPY command\n"
+               "\t-c STR\tdatabase connection string\n");
+        return 1;
+    }
+
+	initializeDatabase();
+
+    time_t start = getCurrentTime();
+    running = true;
+
+    vector<thread> inserters(cfg.nInserters);
+	thread bgw;
+    for (int i = 0; i < cfg.nInserters; i++) { 
+        inserters[i].start(inserter);
+    }
+	if (cfg.indexUpdateInterval != 0) {
+		bgw.start(indexUpdater);
+	}
+    for (int i = 0; i < cfg.nInserters; i++) { 
+        inserters[i].wait();
+    }    
+    time_t elapsed = getCurrentTime() - start;
+
+    running = false;
+	bgw.wait();
+ 
+
+    printf(
+        "{\"tps\":%f, \"index_updates\":%d, \"max_update_time\":%ld, \"avg_update_time\":%f,"
+        " \"inserters\":%d, \"indexes\":%d, \"transaction_size\":%d, \"iterations\":%d}\n",
+        (double)cfg.nInserters*cfg.transactionSize*cfg.nIterations*USEC/elapsed,
+        nIndexUpdates,
+		maxIndexUpdateTime,
+		(double)totalIndexUpdateTime/nIndexUpdates,
+		cfg.nInserters, 
+		cfg.nIndexes, 
+		cfg.transactionSize,
+		cfg.nIterations);
+    return 0;
+}
diff --git a/src/bin/insbench/makefile b/src/bin/insbench/makefile
new file mode 100644
index 0000000..e5a153b
--- /dev/null
+++ b/src/bin/insbench/makefile
@@ -0,0 +1,10 @@
+CXX=g++
+CXXFLAGS=-g -Wall -O2 -pthread 
+
+all: insbench
+
+insbench: insbench.cpp
+	$(CXX) $(CXXFLAGS) -o insbench insbench.cpp -lpqxx
+
+clean:
+	rm -f insbench
diff --git a/src/bin/insbench/run.sh b/src/bin/insbench/run.sh
new file mode 100755
index 0000000..86ba823
--- /dev/null
+++ b/src/bin/insbench/run.sh
@@ -0,0 +1,8 @@
+echo Insert with 1 index
+./insbench -c "dbname=postgres host=localhost port=5432 sslmode=disable" -q -C -x 0
+echo Insert with 9 indexex
+./insbench -c "dbname=postgres host=localhost port=5432 sslmode=disable" -q -C -x 8
+echo Insert with 9 concurrently update partial indexes
+./insbench -c "dbname=postgres host=localhost port=5432 sslmode=disable" -q -C -x 8 -u 1
+echo Insert with 9 frozen partial indexes
+./insbench -c "dbname=postgres host=localhost port=5432 sslmode=disable" -q -C -x 8 -u 100
diff --git a/src/include/commands/defrem.h b/src/include/commands/defrem.h
index adae296..9d0fe4b 100644
--- a/src/include/commands/defrem.h
+++ b/src/include/commands/defrem.h
@@ -42,6 +42,7 @@ extern bool CheckIndexCompatible(Oid oldId,
 					 List *attributeList,
 					 List *exclusionOpNames);
 extern Oid	GetDefaultOpClass(Oid type_id, Oid am_id);
+extern void AlterIndex(Oid relationId, IndexStmt *stmt);
 
 /* commands/functioncmds.c */
 extern ObjectAddress CreateFunction(CreateFunctionStmt *stmt, const char *queryString);
diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h
index 9142e94..1f96956 100644
--- a/src/include/nodes/parsenodes.h
+++ b/src/include/nodes/parsenodes.h
@@ -2435,6 +2435,7 @@ typedef struct IndexStmt
 	bool		transformed;	/* true when transformIndexStmt is finished */
 	bool		concurrent;		/* should this be a concurrent index build? */
 	bool		if_not_exists;	/* just do nothing if index already exists? */
+	bool        is_alter;       /* is alter index statement */
 } IndexStmt;
 
 /* ----------------------
diff --git a/src/test/regress/expected/aggregates.out b/src/test/regress/expected/aggregates.out
index de826b5..88f6a02 100644
--- a/src/test/regress/expected/aggregates.out
+++ b/src/test/regress/expected/aggregates.out
@@ -780,7 +780,6 @@ explain (costs off)
                  ->  Index Only Scan Backward using minmaxtest2i on minmaxtest2
                        Index Cond: (f1 IS NOT NULL)
                  ->  Index Only Scan using minmaxtest3i on minmaxtest3
-                       Index Cond: (f1 IS NOT NULL)
    InitPlan 2 (returns $1)
      ->  Limit
            ->  Merge Append
@@ -792,8 +791,7 @@ explain (costs off)
                  ->  Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest2_1
                        Index Cond: (f1 IS NOT NULL)
                  ->  Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest3_1
-                       Index Cond: (f1 IS NOT NULL)
-(25 rows)
+(23 rows)
 
 select min(f1), max(f1) from minmaxtest;
  min | max 
@@ -819,7 +817,6 @@ explain (costs off)
                  ->  Index Only Scan Backward using minmaxtest2i on minmaxtest2
                        Index Cond: (f1 IS NOT NULL)
                  ->  Index Only Scan using minmaxtest3i on minmaxtest3
-                       Index Cond: (f1 IS NOT NULL)
    InitPlan 2 (returns $1)
      ->  Limit
            ->  Merge Append
@@ -831,9 +828,8 @@ explain (costs off)
                  ->  Index Only Scan using minmaxtest2i on minmaxtest2 minmaxtest2_1
                        Index Cond: (f1 IS NOT NULL)
                  ->  Index Only Scan Backward using minmaxtest3i on minmaxtest3 minmaxtest3_1
-                       Index Cond: (f1 IS NOT NULL)
    ->  Result
-(27 rows)
+(25 rows)
 
 select distinct min(f1), max(f1) from minmaxtest;
  min | max 
diff --git a/src/test/regress/expected/alter_index_with.out b/src/test/regress/expected/alter_index_with.out
new file mode 100644
index 0000000..0e81d60
--- /dev/null
+++ b/src/test/regress/expected/alter_index_with.out
@@ -0,0 +1,132 @@
+create table tmptab (pk integer primary key, sk integer);
+-- insert ebnough records to make psotgresql optimizer use indexes
+insert into tmptab values (generate_series(1, 10000), generate_series(1, 10000));
+vacuum analyze;
+-- create normal index
+create index idx on tmptab(sk);
+-- just normal index search
+select * from tmptab where sk = 100;
+ pk  | sk  
+-----+-----
+ 100 | 100
+(1 row)
+
+-- make index partial
+alter index idx where pk < 1000;
+-- select using exact partial index range
+select * from tmptab where sk = 100 and pk < 1000;
+ pk  | sk  
+-----+-----
+ 100 | 100
+(1 row)
+
+explain select * from tmptab where sk = 100 and pk < 1000;
+                            QUERY PLAN                            
+------------------------------------------------------------------
+ Index Scan using idx on tmptab  (cost=0.28..8.29 rows=1 width=8)
+   Index Cond: (sk = 100)
+(2 rows)
+
+-- select using subset of partial index range 
+select * from tmptab where sk = 100 and pk < 200;
+ pk  | sk  
+-----+-----
+ 100 | 100
+(1 row)
+
+explain select * from tmptab where sk = 100 and pk < 200;
+                            QUERY PLAN                            
+------------------------------------------------------------------
+ Index Scan using idx on tmptab  (cost=0.28..8.29 rows=1 width=8)
+   Index Cond: (sk = 100)
+   Filter: (pk < 200)
+(3 rows)
+
+-- select outside partial index range 
+select * from tmptab where sk = 100 and pk > 1000;
+ pk | sk 
+----+----
+(0 rows)
+
+explain select * from tmptab where sk = 100 and pk > 1000;
+                       QUERY PLAN                       
+--------------------------------------------------------
+ Seq Scan on tmptab  (cost=0.00..195.00 rows=1 width=8)
+   Filter: ((pk > 1000) AND (sk = 100))
+(2 rows)
+
+-- select without partial index range
+select * from tmptab where sk = 100;
+ pk  | sk  
+-----+-----
+ 100 | 100
+(1 row)
+
+explain select * from tmptab where sk = 100;
+                       QUERY PLAN                       
+--------------------------------------------------------
+ Seq Scan on tmptab  (cost=0.00..170.00 rows=1 width=8)
+   Filter: (sk = 100)
+(2 rows)
+
+-- extend partial index range 
+alter index idx where pk < 10000;
+-- select using exact partial index range
+select * from tmptab where sk = 1000 and pk < 10000;
+  pk  |  sk  
+------+------
+ 1000 | 1000
+(1 row)
+
+explain select * from tmptab where sk = 1000 and pk < 10000;
+                            QUERY PLAN                            
+------------------------------------------------------------------
+ Index Scan using idx on tmptab  (cost=0.28..8.30 rows=1 width=8)
+   Index Cond: (sk = 1000)
+(2 rows)
+
+-- calculating aggregate within exact partial index range
+select count(*) from tmptab where sk < 1000 and pk < 10000;
+ count 
+-------
+   999
+(1 row)
+
+explain select count(*) from tmptab where sk < 1000 and pk < 10000;
+                                   QUERY PLAN                                    
+---------------------------------------------------------------------------------
+ Aggregate  (cost=40.28..40.29 rows=1 width=0)
+   ->  Index Only Scan using idx on tmptab  (cost=0.28..37.78 rows=1000 width=0)
+         Index Cond: (sk < 1000)
+(3 rows)
+
+-- reducing partial idex predicate
+alter index idx where pk < 9000;
+-- select using new exact partial index range and key value belonging to old range
+select * from tmptab where sk = 9000 and pk < 9000;
+ pk | sk 
+----+----
+(0 rows)
+
+explain select * from tmptab where sk = 9000 and pk < 9000;
+                            QUERY PLAN                            
+------------------------------------------------------------------
+ Index Scan using idx on tmptab  (cost=0.29..8.30 rows=1 width=8)
+   Index Cond: (sk = 9000)
+(2 rows)
+
+-- select using exact partial index range
+select * from tmptab where sk = 900 and pk < 9000;
+ pk  | sk  
+-----+-----
+ 900 | 900
+(1 row)
+
+explain select * from tmptab where sk = 900 and pk < 9000;
+                            QUERY PLAN                            
+------------------------------------------------------------------
+ Index Scan using idx on tmptab  (cost=0.29..8.30 rows=1 width=8)
+   Index Cond: (sk = 900)
+(2 rows)
+
+drop table tmptab;
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index b1bc7c7..7788e26 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -103,7 +103,7 @@ test: select_views portals_p2 foreign_key cluster dependency guc bitmapops combo
 # NB: temp.sql does a reconnect which transiently uses 2 connections,
 # so keep this parallel group to at most 19 tests
 # ----------
-test: plancache limit plpgsql copy2 temp domain rangefuncs prepare without_oid conversion truncate alter_table sequence polymorphism rowtypes returning largeobject with xml
+test: plancache limit plpgsql copy2 temp domain rangefuncs prepare without_oid conversion truncate alter_table alter_index_with sequence polymorphism rowtypes returning largeobject with xml
 
 # event triggers cannot run concurrently with any test that runs DDL
 test: event_trigger
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index ade9ef1..9ac950d 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -152,6 +152,7 @@ test: without_oid
 test: conversion
 test: truncate
 test: alter_table
+test: alter_index_with
 test: sequence
 test: polymorphism
 test: rowtypes
diff --git a/src/test/regress/sql/alter_index_with.sql b/src/test/regress/sql/alter_index_with.sql
new file mode 100644
index 0000000..2e089dc
--- /dev/null
+++ b/src/test/regress/sql/alter_index_with.sql
@@ -0,0 +1,55 @@
+create table tmptab (pk integer primary key, sk integer);
+
+-- insert ebnough records to make psotgresql optimizer use indexes
+insert into tmptab values (generate_series(1, 10000), generate_series(1, 10000));
+
+vacuum analyze;
+
+-- create normal index
+create index idx on tmptab(sk);
+
+-- just normal index search
+select * from tmptab where sk = 100;
+
+-- make index partial
+alter index idx where pk < 1000;
+
+-- select using exact partial index range
+select * from tmptab where sk = 100 and pk < 1000;
+explain select * from tmptab where sk = 100 and pk < 1000;
+
+-- select using subset of partial index range 
+select * from tmptab where sk = 100 and pk < 200;
+explain select * from tmptab where sk = 100 and pk < 200;
+
+-- select outside partial index range 
+select * from tmptab where sk = 100 and pk > 1000;
+explain select * from tmptab where sk = 100 and pk > 1000;
+
+-- select without partial index range
+select * from tmptab where sk = 100;
+explain select * from tmptab where sk = 100;
+
+-- extend partial index range 
+alter index idx where pk < 10000;
+
+-- select using exact partial index range
+select * from tmptab where sk = 1000 and pk < 10000;
+explain select * from tmptab where sk = 1000 and pk < 10000;
+
+-- calculating aggregate within exact partial index range
+select count(*) from tmptab where sk < 1000 and pk < 10000;
+explain select count(*) from tmptab where sk < 1000 and pk < 10000;
+
+-- reducing partial idex predicate
+alter index idx where pk < 9000;
+
+-- select using new exact partial index range and key value belonging to old range
+select * from tmptab where sk = 9000 and pk < 9000;
+explain select * from tmptab where sk = 9000 and pk < 9000;
+
+-- select using exact partial index range
+select * from tmptab where sk = 900 and pk < 9000;
+explain select * from tmptab where sk = 900 and pk < 9000;
+
+drop table tmptab;
-- 
Sent via pgsql-hackers mailing list (pgsql-hackers@postgresql.org)
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers

Reply via email to