VERSION 2, with all changed made as requested to date.

As discussed on -hackers, its possible to avoid writing any WAL at all
for COPY in these circumstances:

http://archives.postgresql.org/pgsql-hackers/2006-10/msg01172.php

and again recently.

BEGIN;
  CREATE TABLE foo..
  COPY foo...
COMMIT;

BEGIN;
  TRUNCATE foo..
  COPY foo...
COMMIT;

The enclosed patch implements this, as discussed. There is no user
interface to enable/disable, just as with CTAS and CREATE INDEX; no
docs, just code comments.

This plays nicely with the --single-transaction option in psql to allow
fast restores/upgrades.

YMMV but disk bound COPY will benefit greatly from this patch, some
tests showing 100% gain. COPY is still *very* CPU intensive, so some
tests have shown negligible benefit, fyi, but that isn't the typical
case.

While testing this, I realised something: small COPY commands get no
benefit at all, but larger ones do. When we do a small normal COPY the
data stays in cache, but the WAL is written to disk and fsynced. When we
do a small fast COPY, no WAL is written, but the data is written to disk
and fsynced. With COPY, WAL and data are roughly same size, hence no I/O
benefit. With larger COPY statements, benefit is very substantial.

Applies cleanly to CVS HEAD, passes make check.

I enclose a test case that shows whether the test has succeeded by
reading the WAL Insert pointer before/after each COPY. This has been
written in such a way that we could, if we wanted to, include a new
regression test for this. There is a function that returns an immutable
value if the test passes, rather than simply showing the WAL insert
pointer which would obviously vary between tests. The tests enclosed
here *also* include the WAL insert pointer so you can manually/visibly
see that the enclosed patch writes no WAL at appropriate times.

        psql -f copy_nowal_prep.sql postgres
        psql -f copy_nowal_test.sql postgres

Do we want an additional test case along these lines?

Agreed doc changes for Performance Tips forthcoming.

-- 
  Simon Riggs             
  EnterpriseDB   http://www.enterprisedb.com

Index: src/backend/access/heap/heapam.c
===================================================================
RCS file: /projects/cvsroot/pgsql/src/backend/access/heap/heapam.c,v
retrieving revision 1.223
diff -c -r1.223 heapam.c
*** src/backend/access/heap/heapam.c	5 Jan 2007 22:19:22 -0000	1.223
--- src/backend/access/heap/heapam.c	9 Jan 2007 19:41:43 -0000
***************
*** 28,33 ****
--- 28,34 ----
   *		heap_update		- replace a tuple in a relation with another tuple
   *		heap_markpos	- mark scan position
   *		heap_restrpos	- restore position to marked location
+  *		heap_sync		- sync heap, for when no WAL has been written
   *
   * NOTES
   *	  This file contains the heap_ routines which implement
***************
*** 50,55 ****
--- 51,57 ----
  #include "miscadmin.h"
  #include "pgstat.h"
  #include "storage/procarray.h"
+ #include "storage/smgr.h"
  #include "utils/inval.h"
  #include "utils/lsyscache.h"
  #include "utils/relcache.h"
***************
*** 1358,1364 ****
   * that all new tuples go into new pages not containing any tuples from other
   * transactions, that the relation gets fsync'd before commit, and that the
   * transaction emits at least one WAL record to ensure RecordTransactionCommit
!  * will decide to WAL-log the commit.
   *
   * use_fsm is passed directly to RelationGetBufferForTuple, which see for
   * more info.
--- 1360,1366 ----
   * that all new tuples go into new pages not containing any tuples from other
   * transactions, that the relation gets fsync'd before commit, and that the
   * transaction emits at least one WAL record to ensure RecordTransactionCommit
!  * will decide to WAL-log the commit. (see heap_sync() comments also)
   *
   * use_fsm is passed directly to RelationGetBufferForTuple, which see for
   * more info.
***************
*** 1418,1424 ****
  	 */
  	if (HeapTupleHasExternal(tup) ||
  		(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
! 		heaptup = toast_insert_or_update(relation, tup, NULL);
  	else
  		heaptup = tup;
  
--- 1420,1426 ----
  	 */
  	if (HeapTupleHasExternal(tup) ||
  		(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
! 		heaptup = toast_insert_or_update(relation, tup, NULL, use_wal);
  	else
  		heaptup = tup;
  
***************
*** 1536,1541 ****
--- 1538,1555 ----
  }
  
  /*
+  *	fast_heap_insert - insert a tuple with options to improve speed
+  *
+  * Currently, this routine allows specifying additional options for speed
+  * in certain cases, such as WAL-avoiding COPY command
+  */
+ Oid
+ fast_heap_insert(Relation relation, HeapTuple tup, bool use_wal)
+ {
+ 	return heap_insert(relation, tup, GetCurrentCommandId(), use_wal, use_wal);
+ }
+ 
+ /*
   *	heap_delete - delete a tuple
   *
   * NB: do not call this directly unless you are prepared to deal with
***************
*** 2086,2096 ****
  		 *
  		 * Note: below this point, heaptup is the data we actually intend to
  		 * store into the relation; newtup is the caller's original untoasted
! 		 * data.
  		 */
  		if (need_toast)
  		{
! 			heaptup = toast_insert_or_update(relation, newtup, &oldtup);
  			newtupsize = MAXALIGN(heaptup->t_len);
  		}
  		else
--- 2100,2110 ----
  		 *
  		 * Note: below this point, heaptup is the data we actually intend to
  		 * store into the relation; newtup is the caller's original untoasted
! 		 * data. (We always use WAL for toast table updates.)
  		 */
  		if (need_toast)
  		{
! 			heaptup = toast_insert_or_update(relation, newtup, &oldtup, true);
  			newtupsize = MAXALIGN(heaptup->t_len);
  		}
  		else
***************
*** 3966,3968 ****
--- 3980,4003 ----
  	else
  		appendStringInfo(buf, "UNKNOWN");
  }
+ 
+ /* ----------------
+  *		heap_sync - sync a heap, for use when no WAL has been written
+  *
+  * ----------------
+  */
+ void
+ heap_sync(Relation rel)
+ {
+ 	if (!rel->rd_istemp)
+ 	{
+ 		/*
+ 		 * If we skipped using WAL, and it's not a temp relation,
+ 		 * we must force the relation down to disk before it's
+ 		 * safe to commit the transaction.  This requires forcing
+ 		 * out any dirty buffers and then doing a forced fsync.
+ 		 */
+ 		FlushRelationBuffers(rel);
+ 		smgrimmedsync(rel->rd_smgr);
+ 	}
+ }
Index: src/backend/access/heap/tuptoaster.c
===================================================================
RCS file: /projects/cvsroot/pgsql/src/backend/access/heap/tuptoaster.c,v
retrieving revision 1.68
diff -c -r1.68 tuptoaster.c
*** src/backend/access/heap/tuptoaster.c	5 Jan 2007 22:19:22 -0000	1.68
--- src/backend/access/heap/tuptoaster.c	9 Jan 2007 19:41:44 -0000
***************
*** 42,48 ****
  #undef TOAST_DEBUG
  
  static void toast_delete_datum(Relation rel, Datum value);
! static Datum toast_save_datum(Relation rel, Datum value);
  static varattrib *toast_fetch_datum(varattrib *attr);
  static varattrib *toast_fetch_datum_slice(varattrib *attr,
  						int32 sliceoffset, int32 length);
--- 42,48 ----
  #undef TOAST_DEBUG
  
  static void toast_delete_datum(Relation rel, Datum value);
! static Datum toast_save_datum(Relation rel, Datum value, bool use_wal);
  static varattrib *toast_fetch_datum(varattrib *attr);
  static varattrib *toast_fetch_datum_slice(varattrib *attr,
  						int32 sliceoffset, int32 length);
***************
*** 342,348 ****
   * ----------
   */
  HeapTuple
! toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup)
  {
  	HeapTuple	result_tuple;
  	TupleDesc	tupleDesc;
--- 342,348 ----
   * ----------
   */
  HeapTuple
! toast_insert_or_update(Relation rel, HeapTuple newtup, HeapTuple oldtup, bool use_wal)
  {
  	HeapTuple	result_tuple;
  	TupleDesc	tupleDesc;
***************
*** 612,618 ****
  		i = biggest_attno;
  		old_value = toast_values[i];
  		toast_action[i] = 'p';
! 		toast_values[i] = toast_save_datum(rel, toast_values[i]);
  		if (toast_free[i])
  			pfree(DatumGetPointer(old_value));
  
--- 612,618 ----
  		i = biggest_attno;
  		old_value = toast_values[i];
  		toast_action[i] = 'p';
! 		toast_values[i] = toast_save_datum(rel, toast_values[i], use_wal);
  		if (toast_free[i])
  			pfree(DatumGetPointer(old_value));
  
***************
*** 724,730 ****
  		i = biggest_attno;
  		old_value = toast_values[i];
  		toast_action[i] = 'p';
! 		toast_values[i] = toast_save_datum(rel, toast_values[i]);
  		if (toast_free[i])
  			pfree(DatumGetPointer(old_value));
  
--- 724,730 ----
  		i = biggest_attno;
  		old_value = toast_values[i];
  		toast_action[i] = 'p';
! 		toast_values[i] = toast_save_datum(rel, toast_values[i], use_wal);
  		if (toast_free[i])
  			pfree(DatumGetPointer(old_value));
  
***************
*** 972,978 ****
   * ----------
   */
  static Datum
! toast_save_datum(Relation rel, Datum value)
  {
  	Relation	toastrel;
  	Relation	toastidx;
--- 972,978 ----
   * ----------
   */
  static Datum
! toast_save_datum(Relation rel, Datum value, bool use_wal)
  {
  	Relation	toastrel;
  	Relation	toastidx;
***************
*** 1057,1063 ****
  		if (!HeapTupleIsValid(toasttup))
  			elog(ERROR, "failed to build TOAST tuple");
  
! 		simple_heap_insert(toastrel, toasttup);
  
  		/*
  		 * Create the index entry.	We cheat a little here by not using
--- 1057,1063 ----
  		if (!HeapTupleIsValid(toasttup))
  			elog(ERROR, "failed to build TOAST tuple");
  
! 		fast_heap_insert(toastrel, toasttup, use_wal);
  
  		/*
  		 * Create the index entry.	We cheat a little here by not using
Index: src/backend/catalog/index.c
===================================================================
RCS file: /projects/cvsroot/pgsql/src/backend/catalog/index.c,v
retrieving revision 1.276
diff -c -r1.276 index.c
*** src/backend/catalog/index.c	9 Jan 2007 02:14:11 -0000	1.276
--- src/backend/catalog/index.c	9 Jan 2007 19:41:45 -0000
***************
*** 1245,1250 ****
--- 1245,1253 ----
  
  	heap_close(pg_class, RowExclusiveLock);
  
+ 	/* Remember we did this in current transaction, to allow later optimisations */
+ 	relation->rd_newRelfilenodeSubid = GetCurrentSubTransactionId();
+ 
  	/* Make sure the relfilenode change is visible */
  	CommandCounterIncrement();
  }
Index: src/backend/commands/copy.c
===================================================================
RCS file: /projects/cvsroot/pgsql/src/backend/commands/copy.c,v
retrieving revision 1.274
diff -c -r1.274 copy.c
*** src/backend/commands/copy.c	5 Jan 2007 22:19:25 -0000	1.274
--- src/backend/commands/copy.c	9 Jan 2007 19:41:46 -0000
***************
*** 1652,1657 ****
--- 1652,1658 ----
  	ExprContext *econtext;		/* used for ExecEvalExpr for default atts */
  	MemoryContext oldcontext = CurrentMemoryContext;
  	ErrorContextCallback errcontext;
+ 	bool		use_wal = true; /* By default, we use WAL to log db changes */
  
  	Assert(cstate->rel);
  
***************
*** 1843,1848 ****
--- 1844,1871 ----
  	nfields = file_has_oids ? (attr_count + 1) : attr_count;
  	field_strings = (char **) palloc(nfields * sizeof(char *));
  
+ 	/*
+ 	 * Check for performance optimization by avoiding WAL writes
+ 	 *
+ 	 * If archive logging is not be enabled *and* either
+ 	 * - table is created in same transaction as this COPY
+ 	 * - table data is now being written to new relfilenode
+ 	 * then we can safely avoid writing WAL. Why? 
+ 	 * The data files for the table plus toast table/index, plus any indexes
+ 	 * will all be dropped at the end of the transaction if it fails, so we
+ 	 * do not need to worry about inconsistent states.
+ 	 * As mentioned in comments in utils/rel.h, the in-same-transaction test is
+ 	 * not completely reliable, since rd_createSubId can be reset to zero in
+ 	 * certain cases before the end of the creating transaction. 
+ 	 * We are doing this for performance only, so we only need to know: 
+ 	 * if rd_createSubid != InvalidSubTransactionId then it is *always* just 
+ 	 * created. If we have PITR enabled, then we *must* use_wal
+ 	 */
+ 	if ((cstate->rel->rd_createSubid		 != InvalidSubTransactionId ||
+ 	     cstate->rel->rd_newRelfilenodeSubid != InvalidSubTransactionId)
+ 		&& !XLogArchivingActive())
+ 		use_wal = false;
+ 
  	/* Initialize state variables */
  	cstate->fe_eof = false;
  	cstate->eol_type = EOL_UNKNOWN;
***************
*** 2076,2082 ****
  				ExecConstraints(resultRelInfo, slot, estate);
  
  			/* OK, store the tuple and create index entries for it */
! 			simple_heap_insert(cstate->rel, tuple);
  
  			if (resultRelInfo->ri_NumIndices > 0)
  				ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
--- 2099,2105 ----
  				ExecConstraints(resultRelInfo, slot, estate);
  
  			/* OK, store the tuple and create index entries for it */
! 			fast_heap_insert(cstate->rel, tuple, use_wal);
  
  			if (resultRelInfo->ri_NumIndices > 0)
  				ExecInsertIndexTuples(slot, &(tuple->t_self), estate, false);
***************
*** 2093,2098 ****
--- 2116,2147 ----
  		}
  	}
  
+ 	/* 
+ 	 * If we skipped writing WAL for heaps, then we need to sync
+ 	 */
+ 	if (!use_wal)
+ 	{
+ 		/* main heap */
+ 		heap_sync(cstate->rel);
+ 
+ 		/* main heap indexes, if any */
+ 		/* we always use WAL for index inserts, so no need to sync */
+ 
+ 		/* toast heap, if any */
+ 		if (OidIsValid(cstate->rel->rd_rel->reltoastrelid))
+ 		{
+ 			 Relation		toastrel;
+ 
+ 			 toastrel = heap_open(cstate->rel->rd_rel->reltoastrelid,
+ 								  AccessShareLock);
+ 			 heap_sync(toastrel);
+ 			 heap_close(toastrel, AccessShareLock);
+ 		}
+ 
+ 		/* toast index, if toast heap */
+ 		/* we always use WAL for index inserts, so no need to sync */
+ 	}
+ 
  	/* Done, clean up */
  	error_context_stack = errcontext.previous;
  
Index: src/backend/executor/execMain.c
===================================================================
RCS file: /projects/cvsroot/pgsql/src/backend/executor/execMain.c,v
retrieving revision 1.283
diff -c -r1.283 execMain.c
*** src/backend/executor/execMain.c	5 Jan 2007 22:19:27 -0000	1.283
--- src/backend/executor/execMain.c	9 Jan 2007 19:41:47 -0000
***************
*** 2516,2526 ****
  		 */
  		if (!estate->es_into_relation_use_wal &&
  			!estate->es_into_relation_descriptor->rd_istemp)
! 		{
! 			FlushRelationBuffers(estate->es_into_relation_descriptor);
! 			/* FlushRelationBuffers will have opened rd_smgr */
! 			smgrimmedsync(estate->es_into_relation_descriptor->rd_smgr);
! 		}
  
  		/* close rel, but keep lock until commit */
  		heap_close(estate->es_into_relation_descriptor, NoLock);
--- 2516,2522 ----
  		 */
  		if (!estate->es_into_relation_use_wal &&
  			!estate->es_into_relation_descriptor->rd_istemp)
! 			heap_sync(estate->es_into_relation_descriptor);
  
  		/* close rel, but keep lock until commit */
  		heap_close(estate->es_into_relation_descriptor, NoLock);
Index: src/backend/utils/cache/relcache.c
===================================================================
RCS file: /projects/cvsroot/pgsql/src/backend/utils/cache/relcache.c,v
retrieving revision 1.254
diff -c -r1.254 relcache.c
*** src/backend/utils/cache/relcache.c	9 Jan 2007 02:14:15 -0000	1.254
--- src/backend/utils/cache/relcache.c	9 Jan 2007 19:41:49 -0000
***************
*** 836,841 ****
--- 836,842 ----
  	relation->rd_refcnt = 0;
  	relation->rd_isnailed = false;
  	relation->rd_createSubid = InvalidSubTransactionId;
+ 	relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
  	relation->rd_istemp = isTempNamespace(relation->rd_rel->relnamespace);
  
  	/*
***************
*** 1358,1363 ****
--- 1359,1365 ----
  	 */
  	relation->rd_isnailed = true;
  	relation->rd_createSubid = InvalidSubTransactionId;
+ 	relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
  	relation->rd_istemp = false;
  
  	/*
***************
*** 1769,1774 ****
--- 1771,1777 ----
  		Oid			save_relid = RelationGetRelid(relation);
  		int			old_refcnt = relation->rd_refcnt;
  		SubTransactionId old_createSubid = relation->rd_createSubid;
+ 		SubTransactionId old_newRelfilenodeSubid = relation->rd_newRelfilenodeSubid;
  		TupleDesc	old_att = relation->rd_att;
  		RuleLock   *old_rules = relation->rd_rules;
  		MemoryContext old_rulescxt = relation->rd_rulescxt;
***************
*** 1787,1792 ****
--- 1790,1797 ----
  		}
  		relation->rd_refcnt = old_refcnt;
  		relation->rd_createSubid = old_createSubid;
+ 		relation->rd_newRelfilenodeSubid = old_newRelfilenodeSubid;
+ 
  		if (equalTupleDescs(old_att, relation->rd_att))
  		{
  			/* needn't flush typcache here */
***************
*** 1827,1833 ****
  {
  	bool		rebuild;
  
! 	if (relation->rd_createSubid != InvalidSubTransactionId)
  	{
  		/*
  		 * New relcache entries are always rebuilt, not flushed; else we'd
--- 1832,1839 ----
  {
  	bool		rebuild;
  
! 	if (relation->rd_createSubid != InvalidSubTransactionId ||
! 		relation->rd_newRelfilenodeSubid != InvalidSubTransactionId)
  	{
  		/*
  		 * New relcache entries are always rebuilt, not flushed; else we'd
***************
*** 1909,1914 ****
--- 1915,1923 ----
   *	 so we do not touch new-in-transaction relations; they cannot be targets
   *	 of cross-backend SI updates (and our own updates now go through a
   *	 separate linked list that isn't limited by the SI message buffer size).
+  *	 We don't do anything special for newRelfilenode-in-transaction relations, 
+  *	 though since we have a lock on the relation nobody else should be 
+  *	 generating cache invalidation messages for it anyhow.
   *
   *	 We do this in two phases: the first pass deletes deletable items, and
   *	 the second one rebuilds the rebuildable items.  This is essential for
***************
*** 2085,2090 ****
--- 2094,2100 ----
  				continue;
  			}
  		}
+ 		relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
  
  		/*
  		 * Flush any temporary index list.
***************
*** 2146,2151 ****
--- 2156,2168 ----
  				continue;
  			}
  		}
+ 		if (relation->rd_newRelfilenodeSubid == mySubid)
+ 		{
+ 			if (isCommit)
+ 				relation->rd_newRelfilenodeSubid = parentSubid;
+ 			else
+ 			 relation->rd_newRelfilenodeSubid = InvalidSubTransactionId;
+ 		}
  
  		/*
  		 * Flush any temporary index list.
***************
*** 2235,2240 ****
--- 2252,2258 ----
  
  	/* it's being created in this transaction */
  	rel->rd_createSubid = GetCurrentSubTransactionId();
+ 	rel->rd_newRelfilenodeSubid = InvalidSubTransactionId;
  
  	/* must flag that we have rels created in this transaction */
  	need_eoxact_work = true;
***************
*** 3392,3397 ****
--- 3410,3416 ----
  		rel->rd_indexlist = NIL;
  		rel->rd_oidindex = InvalidOid;
  		rel->rd_createSubid = InvalidSubTransactionId;
+ 		rel->rd_newRelfilenodeSubid = InvalidSubTransactionId;
  		rel->rd_amcache = NULL;
  		MemSet(&rel->pgstat_info, 0, sizeof(rel->pgstat_info));
  
Index: src/include/access/heapam.h
===================================================================
RCS file: /projects/cvsroot/pgsql/src/include/access/heapam.h,v
retrieving revision 1.118
diff -c -r1.118 heapam.h
*** src/include/access/heapam.h	5 Jan 2007 22:19:51 -0000	1.118
--- src/include/access/heapam.h	9 Jan 2007 19:41:49 -0000
***************
*** 178,183 ****
--- 178,186 ----
  extern void simple_heap_update(Relation relation, ItemPointer otid,
  				   HeapTuple tup);
  
+ extern Oid fast_heap_insert(Relation relation, HeapTuple tup, bool use_wal);
+ 		
+ 
  extern void heap_markpos(HeapScanDesc scan);
  extern void heap_restrpos(HeapScanDesc scan);
  
***************
*** 236,239 ****
--- 239,244 ----
  extern HeapTuple heap_addheader(int natts, bool withoid,
  			   Size structlen, void *structure);
  
+ extern void heap_sync(Relation relation);
+ 
  #endif   /* HEAPAM_H */
Index: src/include/access/tuptoaster.h
===================================================================
RCS file: /projects/cvsroot/pgsql/src/include/access/tuptoaster.h,v
retrieving revision 1.29
diff -c -r1.29 tuptoaster.h
*** src/include/access/tuptoaster.h	5 Jan 2007 22:19:51 -0000	1.29
--- src/include/access/tuptoaster.h	9 Jan 2007 19:41:49 -0000
***************
*** 69,75 ****
   * ----------
   */
  extern HeapTuple toast_insert_or_update(Relation rel,
! 					   HeapTuple newtup, HeapTuple oldtup);
  
  /* ----------
   * toast_delete -
--- 69,75 ----
   * ----------
   */
  extern HeapTuple toast_insert_or_update(Relation rel,
! 					   HeapTuple newtup, HeapTuple oldtup, bool use_wal);
  
  /* ----------
   * toast_delete -
Index: src/include/utils/rel.h
===================================================================
RCS file: /projects/cvsroot/pgsql/src/include/utils/rel.h,v
retrieving revision 1.95
diff -c -r1.95 rel.h
*** src/include/utils/rel.h	9 Jan 2007 02:14:16 -0000	1.95
--- src/include/utils/rel.h	9 Jan 2007 19:41:49 -0000
***************
*** 137,142 ****
--- 137,143 ----
  	char		rd_indexvalid;	/* state of rd_indexlist: 0 = not valid, 1 =
  								 * valid, 2 = temporarily forced */
  	SubTransactionId rd_createSubid;	/* rel was created in current xact */
+ 	SubTransactionId rd_newRelfilenodeSubid;	/* rel had new relfilenode in current xact */
  
  	/*
  	 * rd_createSubid is the ID of the highest subtransaction the rel has
drop table if exists fastcopy1;
create table fastcopy1 as select generate_series(1,100000)::integer, repeat('a', 1500)::text;

copy fastcopy1
to '/tmp/fastcopy1' binary;

drop table fastcopy1;

drop table if exists fastcopy2;
create table fastcopy2 as select generate_series(1,100000)::integer;

copy fastcopy2
to '/tmp/fastcopy2' binary;

drop table fastcopy2;

-- table to test whether particular commands write WAL or not
-- relies on the fact that temp table writes do not write WAL

drop table if exists xlogpos;
create temp table xlogpos (id integer primary key, pos text);
insert into xlogpos values (1, pg_current_xlog_insert_location());

create or replace function wasWALwritten (expectation boolean) returns text 
as $$
declare
	WALresult boolean;
begin

select (case when position(pg_current_xlog_insert_location() in pos) = 1
            then false
			else true
	   end) into WALresult
from xlogpos where id = 1;

if (WALresult and expectation) or (not WALresult and not expectation) then
	return true;
else
	return false;
end if;

end;
$$ language plpgsql;

update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
--returns false because our expectation that no WAL has been written should be true
select wasWALwritten(true);

update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
--returns true because our expectation that no WAL has been written should be true
select wasWALwritten(false);
drop table if exists xlogpos;
create temp table xlogpos (id integer primary key, pos text);
insert into xlogpos values (1, pg_current_xlog_insert_location());

drop table if exists fastcopy1;
create table fastcopy1
(col1 integer
,col2 text);

checkpoint;
select pg_current_xlog_insert_location();
update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
\echo slow COPY
copy fastcopy1
from '/tmp/fastcopy1' binary;
select wasWALwritten(true);
select pg_current_xlog_insert_location();

\echo slow vacuum
vacuum fastcopy1;
drop table if exists fastcopy1;

checkpoint;
begin;
create table fastcopy1
(col1 integer
,col2 text);
select pg_current_xlog_insert_location();
update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
\echo fast COPY
copy fastcopy1
from '/tmp/fastcopy1' binary;
select wasWALwritten(false) as waswalavoided;
select pg_current_xlog_insert_location();
commit;

checkpoint;
begin;
truncate fastcopy1;
select pg_current_xlog_insert_location();
update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
\echo fast COPY
copy fastcopy1
from '/tmp/fastcopy1' binary;
select wasWALwritten(false) as waswalavoided;
select pg_current_xlog_insert_location();
commit;
select count(*) from fastcopy1;

checkpoint;
begin;
truncate fastcopy1;
savepoint sp;
select pg_current_xlog_insert_location();
update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
\echo fast COPY
copy fastcopy1
from '/tmp/fastcopy1' binary;
select wasWALwritten(false) as waswalavoided;
select pg_current_xlog_insert_location();
rollback to sp;
commit;
select count(*) from fastcopy1;

drop table if exists fastcopy2;
create table fastcopy2
(col1 integer);

checkpoint;
select pg_current_xlog_insert_location();
update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
\echo slow COPY
copy fastcopy2
from '/tmp/fastcopy2' binary;
select wasWALwritten(true);
select pg_current_xlog_insert_location();

\echo slow vacuum
vacuum fastcopy2;
drop table if exists fastcopy2;

checkpoint;
begin;
create table fastcopy2
(col1 integer);
select pg_current_xlog_insert_location();
update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
\echo fast COPY
copy fastcopy2
from '/tmp/fastcopy2' binary;
select wasWALwritten(false) as waswalavoided;
select pg_current_xlog_insert_location();
commit;

checkpoint;
begin;
truncate fastcopy2;
select pg_current_xlog_insert_location();
update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
\echo fast COPY
copy fastcopy2
from '/tmp/fastcopy2' binary;
select wasWALwritten(false) as waswalavoided;
select pg_current_xlog_insert_location();
commit;
select count(*) from fastcopy2;

checkpoint;
begin;
truncate fastcopy2;
savepoint sp;
select pg_current_xlog_insert_location();
update xlogpos set pos = pg_current_xlog_insert_location() where id = 1;
\echo fast COPY
copy fastcopy2
from '/tmp/fastcopy2' binary;
select wasWALwritten(false) as waswalavoided;
select pg_current_xlog_insert_location();
rollback to sp;
commit;
select count(*) from fastcopy2;
---------------------------(end of broadcast)---------------------------
TIP 3: Have you checked our extensive FAQ?

               http://www.postgresql.org/docs/faq

Reply via email to