Hi,
On 2021-06-10 16:42:01 +0300, Anastasia Lubennikova wrote:
> Cool. Thank you for working on that!
> Could you please share a WIP patch for the $subj? I'd be happy to help with
> it.
I've attached the current WIP state, which hasn't evolved much since
this message... I put the test in
src/backend/access/heap/t/001_emergency_vacuum.pl
but I'm not sure that's the best place. But I didn't think
src/test/recovery is great either.
Regards,
Andres
use strict;
use warnings;
use PostgresNode;
use TestLib;
use Test::More tests => 4;
# Initialize primary node
my $node_primary = get_new_node('primary');
$node_primary->init(allows_streaming => 1);
$node_primary->append_conf('postgresql.conf', qq/
max_prepared_transactions=10
autovacuum_naptime = 1s
# So it's easier to verify the order of operations
autovacuum_max_workers=1
autovacuum_vacuum_cost_delay=0
log_autovacuum_min_duration=0
/);
$node_primary->start;
#
# Create tables for a few different test scenarios
#
$node_primary->safe_psql('postgres', qq/
CREATE TABLE large(id serial primary key, data text, filler text default repeat(random()::text, 10));
INSERT INTO large(data) SELECT generate_series(1,30000);
CREATE TABLE large_trunc(id serial primary key, data text, filler text default repeat(random()::text, 10));
INSERT INTO large_trunc(data) SELECT generate_series(1,30000);
CREATE TABLE small(id serial primary key, data text, filler text default repeat(random()::text, 10));
INSERT INTO small(data) SELECT generate_series(1,15000);
CREATE TABLE small_trunc(id serial primary key, data text, filler text default repeat(random()::text, 10));
INSERT INTO small_trunc(data) SELECT generate_series(1,15000);
CREATE TABLE autovacuum_disabled(id serial primary key, data text) WITH (autovacuum_enabled=false);
INSERT INTO autovacuum_disabled(data) SELECT generate_series(1,1000);
/);
# To prevent autovacuum from handling the tables immediately after
# restart, acquire locks in a 2PC transaction. That allows us to test
# interactions with running commands.
$node_primary->safe_psql('postgres', qq(
BEGIN;
LOCK TABLE large IN SHARE UPDATE EXCLUSIVE MODE;
LOCK TABLE large_trunc IN SHARE UPDATE EXCLUSIVE MODE;
LOCK TABLE small IN SHARE UPDATE EXCLUSIVE MODE;
LOCK TABLE small_trunc IN SHARE UPDATE EXCLUSIVE MODE;
LOCK TABLE autovacuum_disabled IN SHARE UPDATE EXCLUSIVE MODE;
PREPARE TRANSACTION 'prevent_vacuum';
));
# Delete a few rows to ensure that vacuum has work to do.
$node_primary->safe_psql('postgres', qq/
DELETE FROM large WHERE id % 2 = 0;
DELETE FROM large_trunc WHERE id > 10000;
DELETE FROM small WHERE id % 2 = 0;
DELETE FROM small_trunc WHERE id > 1000;
DELETE FROM autovacuum_disabled WHERE id % 2 = 0;
/);
$node_primary->stop;
$node_primary->append_conf('postgresql.conf', qq/
wal_debug=0
log_min_messages=debug2
/);
# Need to reset to a clog page boundary, otherwise we'll get errors
# about the file not existing. With default compilation settings
# CLOG_XACTS_PER_PAGE is 32768. The value below is 32768 *
# (2000000000/32768 + 1), with 2000000000 being the max value for
# autovacuum_freeze_max_age.
command_like([ 'pg_resetwal', '-x2000027648', $node_primary->data_dir ],
qr/Write-ahead log reset/, 'pg_resetwal -x to');
#
# Now test autovacuum behaviour. Because of the 2PC transaction
# acquiring locks we can perform setup without autovacuum racing
# ahead.
#
$node_primary->start;
diag($node_primary->safe_psql('postgres',
qq/SELECT datname, datfrozenxid, age(datfrozenxid) FROM pg_database ORDER BY datname/));
$node_primary->safe_psql('postgres', qq/
COMMIT PREPARED 'prevent_vacuum';
/);
ok($node_primary->poll_query_until('postgres', qq/
SELECT NOT EXISTS (
SELECT *
FROM pg_database
WHERE age(datfrozenxid) > current_setting('autovacuum_freeze_max_age')::int)
/),
"xid horizon increased");
diag($node_primary->safe_psql('postgres',
qq/SELECT datname, datfrozenxid, age(datfrozenxid) FROM pg_database ORDER BY datname/));
diag($node_primary->safe_psql('postgres', qq/
SELECT oid::regclass, relfrozenxid
FROM pg_class
WHERE oid = ANY(ARRAY['large'::regclass, 'large_trunc', 'autovacuum_disabled'])
/));
$node_primary->stop;