[GENERAL] Server memory and efficientcy

2007-03-25 Thread amrit angsusingh

# - Connection Settings -

#listen_addresses = 'localhost' # what IP address(es) to listen on;

listen_addresses = '*' # comma-separated list of addresses;

# defaults to 'localhost', '*' = all

#port = 5432

max_connections = 200

# note: increasing max_connections costs ~400 bytes of shared memory per

# connection slot, plus lock space (see max_locks_per_transaction). You

# might also need to raise shared_buffers to support more connections.

#superuser_reserved_connections = 2

superuser_reserved_connections = 4

#unix_socket_directory = ''

#unix_socket_group = ''

#unix_socket_permissions = 0777 # octal

#bonjour_name = '' # defaults to the computer name

# - Security  Authentication -

#authentication_timeout = 60 # 1-600, in seconds

#ssl = off

#password_encryption = on

#db_user_namespace = off

# Kerberos

#krb_server_keyfile = ''

#krb_srvname = 'postgres'

#krb_server_hostname = '' # empty string matches any keytab entry

#krb_caseins_users = off

# - TCP Keepalives -

# see 'man 7 tcp' for details

#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;

# 0 selects the system default

#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;

# 0 selects the system default

#tcp_keepalives_count = 0 # TCP_KEEPCNT;

# 0 selects the system default



#---

# RESOURCE USAGE (except WAL)

#---

# - Memory -

#shared_buffers = 2000 # min 16 or max_connections*2, 8KB each

#shared_buffers = 31744

#shared_buffers = 8192

shared_buffers = 16384

#temp_buffers = 1000 # min 100, 8KB each

#max_prepared_transactions = 5 # can be 0 or more

# note: increasing max_prepared_transactions costs ~600 bytes of shared
memory

# per transaction slot, plus lock space (see max_locks_per_transaction).

#work_mem = 1024 # min 64, size in KB

work_mem = 4096

#maintenance_work_mem = 16384 # min 1024, size in KB

maintenance_work_mem = 131078

#max_stack_depth = 2048 # min 100, size in KB

# - Free Space Map -

#max_fsm_pages = 2 # min max_fsm_relations*16, 6 bytes each

#max_fsm_relations = 1000 # min 100, ~70 bytes each

# - Kernel Resource Usage -

#max_files_per_process = 1000 # min 25

#preload_libraries = ''

# - Cost-Based Vacuum Delay -

#vacuum_cost_delay = 0 # 0-1000 milliseconds

#vacuum_cost_page_hit = 1 # 0-1 credits

#vacuum_cost_page_miss = 10 # 0-1 credits

#vacuum_cost_page_dirty = 20 # 0-1 credits

#vacuum_cost_limit = 200 # 0-1 credits

# - Background writer -

#bgwriter_delay = 200 # 10-1 milliseconds between rounds

#bgwriter_lru_percent = 1.0 # 0-100% of LRU buffers scanned/round

#bgwriter_lru_maxpages = 5 # 0-1000 buffers max written/round

#bgwriter_all_percent = 0.333 # 0-100% of all buffers scanned/round

#bgwriter_all_maxpages = 5 # 0-1000 buffers max written/round



#---

# WRITE AHEAD LOG

#---

# - Settings -

#fsync = on # turns forced synchronization on or off

#wal_sync_method = fsync # the default is the first option

# supported by the operating system:

# open_datasync

# fdatasync

# fsync

# fsync_writethrough

# open_sync

#full_page_writes = on # recover from partial page writes

#wal_buffers = 8 # min 4, 8KB each

wal_buffers = 32

#commit_delay = 0 # range 0-10, in microseconds

commit_delay = 20

#commit_siblings = 5 # range 1-1000

# - Checkpoints -

#checkpoint_segments = 3 # in logfile segments, min 1, 16MB each

checkpoint_segments = 8

#checkpoint_timeout = 300 # range 30-3600, in seconds

#checkpoint_warning = 30 # in seconds, 0 is off

# - Archiving -

#archive_command = '' # command to use to archive a logfile

# segment



#---

# QUERY TUNING

#---

# - Planner Method Configuration -

#enable_bitmapscan = on

#enable_hashagg = on

#enable_hashjoin = on

#enable_indexscan = on

#enable_mergejoin = on

#enable_nestloop = on

#enable_seqscan = on

#enable_sort = on

#enable_tidscan = on

# - Planner Cost Constants -

#effective_cache_size = 1000 # typically 8KB each

effective_cache_size = 307800

#effective_cache_size = 16

#random_page_cost = 4 # units are one sequential page fetch

# cost

#cpu_tuple_cost = 0.01 # (same)

#cpu_index_tuple_cost = 0.001 # (same)

#cpu_operator_cost = 0.0025 # (same)

# - Genetic Query Optimizer -

#geqo = on

#geqo_threshold = 12

#geqo_effort = 5 # range 1-10

#geqo_pool_size = 0 # selects default based on effort

#geqo_generations = 0 # selects default based on effort

#geqo_selection_bias = 2.0 # range 1.5-2.0

# - Other Planner Options -

#default_statistics_target = 10 # range 1-1000

#constraint_exclusion = off

#from_collapse_limit = 8

#join_collapse_limit = 8 # 1 

[GENERAL] Howto optimize pg 8.1.4 and FC6 x64 ??

2007-03-23 Thread amrit angsusingh

I try to change my database server from the older one ie. 2Cpu Xeon 2.4 32
bit 4Gb SDram Hdd SCSI RAID 5 and FC 3 ix86 with 7..4.7 PG to the newer one
with 2CPU Xeon 3.0 64 Bit 4Gb DDRram SCSI Raid5 and FC6 X64 PG 8.14 and try
to use  rather the same parameter from the previous postgresql.conf :-
the older server config--
shared_buffers = 31744
#sort_mem = 1024# min 64, size in KB
sort_mem = 8192
#vacuum_mem = 8064  # min 1024, size in KB
vacuum_mem = 32768

# - Free Space Map -

#max_fsm_pages = 2  # min max_fsm_relations*16, 6 bytes each
#max_fsm_relations = 1000   # min 100, ~50 bytes each

# - Kernel Resource Usage -

#max_files_per_process = 1000   # min 25
#preload_libraries = ''


#---
# WRITE AHEAD LOG
#---

# - Settings -

#fsync = true   # turns forced synchronization on or off

#wal_sync_method = fsync# the default varies across platforms:
   # fsync, fdatasync, open_sync, or
open_datasync
#wal_buffers = 8# min 4, 8KB each

# - Checkpoints -

#checkpoint_segments = 3# in logfile segments, min 1, 16MB each
checkpoint_segments = 8
#checkpoint_timeout = 300   # range 30-3600, in seconds
#checkpoint_warning = 30# 0 is off, in seconds
#commit_delay = 0   # range 0-10, in microseconds
commit_delay = 20
#commit_siblings = 5# range 1-1000

#effective_cache_size = 153600
effective_cache_size = 307800


I use pgbench to test the speed of my older database server and the result
is

bash-3.00$ pgbench  test -t 20 -c 30 -s 50

starting vacuum...end.

transaction type: TPC-B (sort of)

scaling factor: 50

number of clients: 30

number of transactions per client: 20

number of transactions actually processed: 600/600

tps = 337.196481 (including connections establishing)

tps = 375.478735 (excluding connections establishing)

But my newer database server configuration is somewhat like this;-


max_connections = 200

#shared_buffers = 2000 # min 16 or max_connections*2, 8KB each

shared_buffers = 31744

#temp_buffers = 1000 # min 100, 8KB each

#max_prepared_transactions = 5 # can be 0 or more

# note: increasing max_prepared_transactions costs ~600 bytes of shared
memory

# per transaction slot, plus lock space (see max_locks_per_transaction).

#work_mem = 1024 # min 64, size in KB

work_mem = 8192

#maintenance_work_mem = 16384 # min 1024, size in KB

maintenance_work_mem = 131078

#max_stack_depth = 2048 # min 100, size in KB



#commit_delay = 0 # range 0-10, in microseconds

commit_delay = 20

#commit_siblings = 5 # range 1-1000

# - Checkpoints -

#checkpoint_segments = 3 # in logfile segments, min 1, 16MB each

checkpoint_segments = 8

#checkpoint_timeout = 300 # range 30-3600, in seconds

#checkpoint_warning = 30 # in seconds, 0 is off

#effective_cache_size = 1000 # typically 8KB each

effective_cache_size = 307800

#autovacuum = off # enable autovacuum subprocess?

#autovacuum_naptime = 60 # time between autovacuum runs, in secs

#autovacuum_vacuum_threshold = 1000 # min # of tuple updates before

# vacuum

#autovacuum_analyze_threshold = 500 # min # of tuple updates before

# analyze

#autovacuum_vacuum_scale_factor = 0.4 # fraction of rel size before

# vacuum

#autovacuum_analyze_scale_factor = 0.2 # fraction of rel size before

# analyze

#autovacuum_vacuum_cost_delay = -1 # default vacuum cost delay for

# autovac, -1 means use

# vacuum_cost_delay

#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for

# autovac, -1 means use

# vacuum_cost_limit
and the result of pgbench from my new server is only

-- pgbench  test -t 20 -c 30 -s 50

tps = 197 (including connections establishing)

tps = 212



1. How should I adjust my new configuration to improve the performance ?

2. And should I set autovaccum = on and if it is on what is the other proper
parameter to be set?


Thank a lot for your help.


--
Amrit Angsusingh
Thailand


[GENERAL] DBmirror

2006-12-21 Thread amrit
I would like to know whether I can set DBmirror replication from the fedora core
5_64 which postgresql8.1 was installed by rpm process not by compiling the
source. All of the component include server, contrib, perl, test, doc, devel
and lib were also rpm and what is the way to set it?
Do I need to install Pg-perl source after standard rpm process?
Anybody use postgresql with rpm and DBmirror?
Amrit
Thailand

---(end of broadcast)---
TIP 3: Have you checked our extensive FAQ?

   http://www.postgresql.org/docs/faq


[GENERAL] Cannot connect Postgresql 8.1 64 bit

2006-04-08 Thread amrit
I try to set  FC3 X64 and postgresql 64 8.1 in my new server but I cannot
connect it with pgadminIII. My pg_hba.conf is like below:


# local is for Unix domain socket connections only
local   all all   ident sameuser
# IPv4 local connections:
hostall all 127.0.0.1/32  trust sameuser
hostall all 192.168.0.0/255.255.0.0 trust   sameuser
# IPv6 local connections:
hostall all ::1/128   ident sameuser

No fire wall was set and i cannot set it via webmin because of module
configuration.
What should I set in postgresql.conf to be able to connect to my postgres
server?
Thanks.
Amrit
Thailand

---(end of broadcast)---
TIP 9: In versions below 8.0, the planner will ignore your desire to
   choose an index scan if your joining column's datatypes do not
   match


Re: [GENERAL] Could DBmirror be done with the mechine using RPM installation instaed of souce code [tar.gz] ?

2006-02-25 Thread Amrit Angsusing
I try to do it by rpm installation buttheseemed to miss some file which was described by the doc. for ex. pending.so and its' path and 
perl /./DBmirror.pl
could not run
Could you suggest me the URL how to install DBmirror by the rpm mechine?
Thanks
Amrit
2006/2/25, chris smith [EMAIL PROTECTED]:
 I use the twomechines with FC3 and postgresql 8.1 RPM installation and would like to do DBmirror or other postgresql database replication from
 master to slave server . Could I do this replication using RPM or I must use the tar.gz version ? Could anybody suggest me about replication?There should be a 'postgresql-contrib' rpm for FC3 (use your favourite
search engine).If you can't find one - create an rpm (it's pretty easy). That'sgetting way outside of this mailing list's scope though.


[GENERAL] Could DBmirror be done with the mechine using RPM installation instaed of souce code [tar.gz] ?

2006-02-24 Thread Amrit Angsusing
I use the two mechines with FC3 and postgresql 8.1 RPM installation and would like to do DBmirror or other postgresql database replication from master to slave server . Could I do this replication using RPM or I must use the 
tar.gz version ? Could anybody suggest me about replication?
Amrit