Jamie Fox wrote:
> > > Here's what I have found that got broken during pg_migrate: In two side
> > by
> > > side databases (an 8.3.7 copy and 8.4.0 migrated with pg_migrator) the
> > > pg_largeobject table has the same number of rows. However, in the 8.4
> > > database any select for an loid in pg_largeobject returns zero rows. If
> > I
> > > select all loids to a file, and compare to select all loids from 8.3.7
> > > they're the same. When I select != an loid it seems to exclude the one
> > and
> > > return the rest, but all other comparisons <, > or = return zero rows.
> > Or
> > > I'm completely batty. Dereferencing via lo_open of blob_data (an oid) in
> > > other tables fails in the 8.4 database with 'large object xxxxid does not
> > > exist'.
> >
> > Oh, so maybe it's pg_largeobject's index that's borked ... Did you try
> > reindexing it?
> >
> > How are we transferring pg_largeobject, and are we transferring its
> > index too?
>
>
> Hi -
> REINDEX INDEX pg_largeobject_loid_pn_index;
>
> This seems to have fixed the problem, lo_open of lob data is working again -
> now to see how vacuumlo likes it.
I have applied the attached patch to pg_migrator to properly migrate the
pg_largeobject index. I have added large object comment migration as a
TODO item.
This eliminates the last known bug in pg_migrator.
--
Bruce Momjian <[email protected]> http://momjian.us
EnterpriseDB http://enterprisedb.com
+ If your life is a hard drive, Christ can be your backup. +
? tools
? log
? src/pg_migrator
Index: TODO
===================================================================
RCS file: /cvsroot/pg-migrator/pg_migrator/TODO,v
retrieving revision 1.15
diff -c -r1.15 TODO
*** TODO 2 Jun 2009 15:15:38 -0000 1.15
--- TODO 20 Jul 2009 21:55:58 -0000
***************
*** 1,3 ****
--- 1,4 ----
o support migration from Postgres 8.2 to 8.4?
o create pg_dump custom format for rebuilds so it can be done in parallel
o remove copy_dir code, or use it
+ o handle large object comments
Index: src/pg_migrator.h
===================================================================
RCS file: /cvsroot/pg-migrator/pg_migrator/src/pg_migrator.h,v
retrieving revision 1.64
diff -c -r1.64 pg_migrator.h
*** src/pg_migrator.h 20 Jul 2009 18:57:12 -0000 1.64
--- src/pg_migrator.h 20 Jul 2009 21:55:58 -0000
***************
*** 202,207 ****
--- 202,208 ----
char *pg_version_str; /* string PG_VERSION of cluster */
Oid pg_database_oid; /* OID of pg_database relation */
Oid pg_largeobject_oid; /* OID of pg_largeobject relation */
+ Oid pg_largeobject_index_oid; /* OID of pg_largeobject index */
char *libpath; /* pathname for cluster's pkglibdir */
} ClusterInfo;
Index: src/relfilenode.c
===================================================================
RCS file: /cvsroot/pg-migrator/pg_migrator/src/relfilenode.c,v
retrieving revision 1.26
diff -c -r1.26 relfilenode.c
*** src/relfilenode.c 2 Jul 2009 23:30:27 -0000 1.26
--- src/relfilenode.c 20 Jul 2009 21:55:58 -0000
***************
*** 164,184 ****
{
DbInfo *new_db = &newdb_arr->dbs[dbnum];
DbInfo *old_db = dbarr_lookup_db(olddb_arr, new_db->db_name);
FileNameMap *mappings;
int n_maps;
pageCnvCtx *pageConverter = NULL;
- assert(old_db);
-
n_maps = 0;
mappings = gen_db_file_maps(ctx, old_db, new_db, &n_maps, old_pgdata,
new_pgdata);
if (n_maps)
{
- char old_file[MAXPGPATH];
- char new_file[MAXPGPATH];
-
print_maps(ctx, mappings, n_maps, new_db->db_name);
#ifdef PAGE_CONVERSION
--- 164,181 ----
{
DbInfo *new_db = &newdb_arr->dbs[dbnum];
DbInfo *old_db = dbarr_lookup_db(olddb_arr, new_db->db_name);
+ char old_file[MAXPGPATH];
+ char new_file[MAXPGPATH];
FileNameMap *mappings;
int n_maps;
pageCnvCtx *pageConverter = NULL;
n_maps = 0;
mappings = gen_db_file_maps(ctx, old_db, new_db, &n_maps, old_pgdata,
new_pgdata);
if (n_maps)
{
print_maps(ctx, mappings, n_maps, new_db->db_name);
#ifdef PAGE_CONVERSION
***************
*** 187,206 ****
transfer_single_new_db(ctx, pageConverter, mappings, n_maps);
pg_free(mappings);
-
- /*
- * The pg_largeobject system table is treated as a user table.
- * Since we already know its OID we simply link it
- */
- snprintf(old_file, sizeof(old_file), "%s/base/%u/%u", old_pgdata,
- old_db->db_oid, ctx->old.pg_largeobject_oid);
- snprintf(new_file, sizeof(new_file), "%s/base/%u/%u", new_pgdata,
- new_db->db_oid, ctx->new.pg_largeobject_oid);
-
- unlink(new_file);
- transfer_relfile(ctx, pageConverter, old_file, new_file, "pg_catalog",
- "pg_largeobject", "pg_catalog", "pg_largeobject");
}
}
return msg;
--- 184,214 ----
transfer_single_new_db(ctx, pageConverter, mappings, n_maps);
pg_free(mappings);
}
+
+ /*
+ * The pg_largeobject system table is treated as a user table.
+ * Since we already know its OID we simply link it
+ */
+ snprintf(old_file, sizeof(old_file), "%s/base/%u/%u", old_pgdata,
+ old_db->db_oid, ctx->old.pg_largeobject_oid);
+ snprintf(new_file, sizeof(new_file), "%s/base/%u/%u", new_pgdata,
+ new_db->db_oid, ctx->new.pg_largeobject_oid);
+
+ unlink(new_file);
+ transfer_relfile(ctx, pageConverter, old_file, new_file, "pg_catalog",
+ "pg_largeobject", "pg_catalog", "pg_largeobject");
+
+ /* do the pg_largeobject index too */
+ snprintf(old_file, sizeof(old_file), "%s/base/%u/%u", old_pgdata,
+ old_db->db_oid, ctx->old.pg_largeobject_index_oid);
+ snprintf(new_file, sizeof(new_file), "%s/base/%u/%u", new_pgdata,
+ new_db->db_oid, ctx->new.pg_largeobject_index_oid);
+
+ unlink(new_file);
+ transfer_relfile(ctx, pageConverter, old_file, new_file, "pg_catalog",
+ "pg_largeobject_loid_pn_index", "pg_catalog",
+ "pg_largeobject_loid_pn_index");
}
return msg;
***************
*** 218,224 ****
{
PGconn *conn = connectToServer(ctx, "template1", whichCluster);
PGresult *res;
! int relfile_fnum;
res = executeQueryOrDie(ctx, conn,
"SELECT c.relname, c.relfilenode "
--- 226,232 ----
{
PGconn *conn = connectToServer(ctx, "template1", whichCluster);
PGresult *res;
! int i_relfile;
res = executeQueryOrDie(ctx, conn,
"SELECT c.relname, c.relfilenode "
***************
*** 227,245 ****
"WHERE c.relnamespace = n.oid AND "
" n.nspname = 'pg_catalog' AND "
" c.relname IN "
! " ('pg_database', 'pg_largeobject') "
"ORDER BY c.relname");
! relfile_fnum = PQfnumber(res, "relfilenode");
if (whichCluster == CLUSTER_OLD)
{
! ctx->old.pg_database_oid = atol(PQgetvalue(res, 0, relfile_fnum));
! ctx->old.pg_largeobject_oid = atol(PQgetvalue(res, 1, relfile_fnum));
}
else
{
! ctx->new.pg_database_oid = atol(PQgetvalue(res, 0, relfile_fnum));
! ctx->new.pg_largeobject_oid = atol(PQgetvalue(res, 1, relfile_fnum));
}
PQclear(res);
--- 235,256 ----
"WHERE c.relnamespace = n.oid AND "
" n.nspname = 'pg_catalog' AND "
" c.relname IN "
! " ('pg_database', 'pg_largeobject', "
! " 'pg_largeobject_loid_pn_index') "
"ORDER BY c.relname");
! i_relfile = PQfnumber(res, "relfilenode");
if (whichCluster == CLUSTER_OLD)
{
! ctx->old.pg_database_oid = atol(PQgetvalue(res, 0, i_relfile));
! ctx->old.pg_largeobject_oid = atol(PQgetvalue(res, 1, i_relfile));
! ctx->old.pg_largeobject_index_oid = atol(PQgetvalue(res, 2, i_relfile));
}
else
{
! ctx->new.pg_database_oid = atol(PQgetvalue(res, 0, i_relfile));
! ctx->new.pg_largeobject_oid = atol(PQgetvalue(res, 1, i_relfile));
! ctx->new.pg_largeobject_index_oid = atol(PQgetvalue(res, 2, i_relfile));
}
PQclear(res);
--
Sent via pgsql-hackers mailing list ([email protected])
To make changes to your subscription:
http://www.postgresql.org/mailpref/pgsql-hackers