Skip to content

postgresql building lock tree from text log

postgresql building lock tree from text log published on Комментариев к записи postgresql building lock tree from text log нет

1) собрать все логи с бд по локам

[postgres@p38rmisdb01 postgres]$ grep -i 'lock:' postgresql-2018-04-06.log

2) made a with query enclosing every row with ‘ and with ; at the end
—-best of all do this on different server

with b as (
select unnest(array[
 
 
'2018-06-05 00:03:33 KRAT [22266]: [17-1] db=lsd,appname=[unknown],user=app_group_master,client=192.168.5.111 DETAIL:  Process holding the lock: 24957. Wait queue: 22266.',
'2018-06-05 09:00:36 KRAT [5922]: [6-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 17343. Wait queue: 5922.',
'2018-06-05 09:01:10 KRAT [5922]: [12-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 13309. Wait queue: 5922.',
'2018-06-05 09:06:56 KRAT [9196]: [6-1] db=lsd,appname=192.168.4.72,user=n2o,client=192.168.5.111 DETAIL:  Process holding the lock: 10136. Wait queue: .',
'2018-06-05 09:09:55 KRAT [28905]: [4-1] db=lsd,appname=192.168.4.22,user=lsd,client=192.168.5.111 DETAIL:  Process holding the lock: 24188. Wait queue: 28905.',
'2018-06-05 09:10:08 KRAT [28905]: [11-1] db=lsd,appname=192.168.4.22,user=lsd,client=192.168.5.111 DETAIL:  Process holding the lock: 24188. Wait queue: 28905.',
'2018-06-05 09:11:07 KRAT [28905]: [15-1] db=lsd,appname=192.168.4.22,user=lsd,client=192.168.5.111 DETAIL:  Process holding the lock: 24188. Wait queue: 28905.',
'2018-06-05 10:00:06 KRAT [342]: [5-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 23155. Wait queue: 342.',
'2018-06-05 10:00:41 KRAT [342]: [11-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 9430. Wait queue: 342.',
'2018-06-05 10:01:21 KRAT [342]: [17-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 1941. Wait queue: 342.',
'2018-06-05 10:09:23 KRAT [27511]: [411-1] db=lsd,appname=192.168.4.72,user=n2o,client=192.168.5.111 DETAIL:  Process holding the lock: 18429. Wait queue: .',
'2018-06-05 10:09:24 KRAT [18429]: [6-1] db=lsd,appname=192.168.4.72,user=n2o,client=192.168.5.111 DETAIL:  Process holding the lock: 30049. Wait queue: .',
'2018-06-05 10:09:24 KRAT [30049]: [4-1] db=lsd,appname=192.168.4.72,user=n2o,client=192.168.5.111 DETAIL:  Process holding the lock: 18429. Wait queue: 30049.',
'2018-06-05 11:00:43 KRAT [29866]: [6-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Processes holding the lock: 2667, 27024. Wait queue: 29866.',
'2018-06-05 11:00:56 KRAT [29866]: [12-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 29718. Wait queue: 29866.',
'2018-06-05 11:14:49 KRAT [13709]: [902-1] db=lsd,appname=192.168.4.72,user=n2o,client=192.168.5.111 DETAIL:  Process holding the lock: 4172. Wait queue: .',
'2018-06-05 11:14:49 KRAT [4172]: [4-1] db=lsd,appname=192.168.4.72,user=n2o,client=192.168.5.111 DETAIL:  Process holding the lock: 13719. Wait queue: .',
'2018-06-05 12:00:07 KRAT [24431]: [5-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 26119. Wait queue: 24431.',
'2018-06-05 12:00:41 KRAT [24431]: [11-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 23751. Wait queue: 24431.',
'2018-06-05 12:01:02 KRAT [24431]: [17-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 11215. Wait queue: 24431.',
'2018-06-05 13:00:43 KRAT [19265]: [6-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 7261. Wait queue: 19265.',
'2018-06-05 13:00:46 KRAT [19265]: [12-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 22841. Wait queue: 19265.',
'2018-06-05 14:00:02 KRAT [13762]: [4-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 32362. Wait queue: 13762.',
'2018-06-05 14:00:06 KRAT [13762]: [10-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 17264. Wait queue: 13762.',
'2018-06-05 14:00:34 KRAT [13762]: [16-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 28253. Wait queue: 13762.',
'2018-06-05 14:00:37 KRAT [13762]: [22-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 14070. Wait queue: 13762.',
'2018-06-05 14:12:13 KRAT [30079]: [702-1] db=lsd,appname=192.168.4.72,user=n2o,client=192.168.5.111 DETAIL:  Process holding the lock: 4418. Wait queue: .',
'2018-06-05 15:00:02 KRAT [8188]: [4-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 17264. Wait queue: 8188.',
'2018-06-05 15:00:07 KRAT [8188]: [10-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 1981. Wait queue: 8188.',
'2018-06-05 15:00:40 KRAT [8188]: [16-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Processes holding the lock: 13534, 27378. Wait queue: 8188.',
'2018-06-05 15:01:16 KRAT [8188]: [22-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 16850. Wait queue: 8188.',
'2018-06-05 16:00:02 KRAT [2756]: [4-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Processes holding the lock: 1981, 25953. Wait queue: 2756.',
'2018-06-05 16:00:07 KRAT [2756]: [10-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 23839. Wait queue: 2756.',
'2018-06-05 16:00:34 KRAT [2756]: [16-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 8448. Wait queue: 2756.',
'2018-06-05 16:01:07 KRAT [2756]: [22-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 6456. Wait queue: 2756.',
'2018-06-05 17:00:02 KRAT [29439]: [4-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 23839. Wait queue: 29439.',
'2018-06-05 17:00:29 KRAT [29439]: [11-1] db=lsd,appname=psql,user=postgres,client=[local] DETAIL:  Process holding the lock: 3378. Wait queue: 29439.'
 
              ])
as txt),
analyze_lock as (
select substring(txt, 1,19)::timestamp without time zone as ttime,
split_part(split_part(txt, 'holding the lock:', 2), '. Wait', 1) as holding_lock,
split_part(split_part(txt, 'Wait queue: ', 2), '.', 1) as wait_queue,
txt from b
---where txt like '%3103%'
--- здесь можно оганичить по пользователю, по процессу
),
analyze_lock_normalize as ( --нормализованное предсталвение блокировок (все перечисленные через запятую разбиты на отдельные строки)
SELECT ttime,
       unnest(string_to_array(regexp_replace(regexp_replace(cast((holding_lock) AS CHARACTER VARYING), '^\(',''), '\)$', ''), ','))::int AS holding_lock,
       unnest(string_to_array(regexp_replace(regexp_replace(cast((wait_queue) AS CHARACTER VARYING), '^\(',''), '\)$', ''), ','))::int AS wait_queue,
             txt
from analyze_lock
--WHERE ttime::time between '14:45:00'::time and  '15:15:00'::time
--- здесь можно оганичить по времени
)
,
--- обычная паррент таблица
pp as (
select distinct holding_lock::int as id, null::int as parent_id, null::timestamp without time zone as ttime from analyze_lock_normalize t where not exists (select from analyze_lock_normalize t2 where t2.wait_queue = t.holding_lock )
union all
select distinct wait_queue::int as h_id, holding_lock::int as parrent_id, ttime from analyze_lock_normalize t --where not exists (select from analyze_lock t2 where t2.wait_queue like '%'||t.holding_lock||'%' )
),
 
ltr as (
                 WITH RECURSIVE ltr_p AS (
                         SELECT id,
                            parent_id,
                            ttime,
                                                        id::text::ltree as ltree_lock,
                                                        id::text::ltree as main_lock,
                            1 as llevel
                           FROM pp
                          WHERE parent_id IS NULL
                        UNION ALL
                         SELECT n.id,
                            n.parent_id,
                            n.ttime,
                                                        r.ltree_lock || (n.id::text::ltree),
                                                        r.main_lock,
                            r.llevel + 1
                           FROM pp n
                             JOIN ltr_p r ON n.parent_id = r.id
                        where r.llevel < 10 -- ограничиваем глубину погружения в локи (главное понять от кого они идут а не кого блокируют)
                        )
                 SELECT *, min(ttime) over (partition by main_lock) as main_lock_time, max(ttime) over (partition by main_lock) as main_lock_time_max
                   FROM ltr_p
                  ORDER BY ltree_lock
)
 
select replace(ltree_lock::text,'.',' --> ') as ltree_lock, min(ttime)/*-INTERVAL '5h'*/ as time_lock, main_lock, main_lock_time/*-INTERVAL '5h'*/, main_lock_time_max/*-INTERVAL '5h'*/ from ltr group by 1, 3, 4, 5 order by 4, 3, min(ttime) nulls first, 1
--- you can change  " -INTERVAL '5h'" to any suitable value

Result is:

10040	
10040	06.04.2018 4:38	06.04.2018 4:38
10040 --> 10018	06.04.2018 4:38	10040	06.04.2018 4:38	06.04.2018 4:38
10040 --> 10100	06.04.2018 4:38	10040	06.04.2018 4:38	06.04.2018 4:38
10040 --> 27005	06.04.2018 4:38	10040	06.04.2018 4:38	06.04.2018 4:38
20930	
20930	06.04.2018 5:07	06.04.2018 5:07
20930 --> 21075	06.04.2018 5:07	20930	06.04.2018 5:07	06.04.2018 5:07
8548	
8548	06.04.2018 6:41	06.04.2018 6:41
8548 --> 30636	06.04.2018 6:41	8548	06.04.2018 6:41	06.04.2018 6:41
27429	
27429	06.04.2018 7:01	06.04.2018 7:01
27429 --> 3837	06.04.2018 7:01	27429	06.04.2018 7:01	06.04.2018 7:01
30857	
30857	06.04.2018 7:02	06.04.2018 7:02
30857 --> 3837	06.04.2018 7:02	30857	06.04.2018 7:02	06.04.2018 7:02
10219	
10219	06.04.2018 7:49	06.04.2018 7:57
10219 --> 31116	06.04.2018 7:49	10219	06.04.2018 7:49	06.04.2018 7:57
10219 --> 31116 --> 9138	06.04.2018 7:53	10219	06.04.2018 7:49	06.04.2018 7:57
10219 --> 31116 --> 31119	06.04.2018 7:55	10219	06.04.2018 7:49	06.04.2018 7:57
10219 --> 31116 --> 31119 --> 3410	06.04.2018 7:56	10219	06.04.2018 7:49	06.04.2018 7:57
10219 --> 31116 --> 9138 --> 31118	06.04.2018 7:57	10219	06.04.2018 7:49	06.04.2018 7:57
30944	
30944	06.04.2018 7:56	06.04.2018 7:56
30944 --> 31114	06.04.2018 7:56	30944	06.04.2018 7:56	06.04.2018 7:56
31110	
31110	06.04.2018 7:56	06.04.2018 7:56
31110 --> 12204	06.04.2018 7:56	31110	06.04.2018 7:56	06.04.2018 7:56
31110 --> 12204 --> 3411	06.04.2018 7:56	31110	06.04.2018 7:56	06.04.2018 7:56
15286	
15286	06.04.2018 8:03	06.04.2018 8:03
15286 --> 13436	06.04.2018 8:03	15286	06.04.2018 8:03	06.04.2018 8:03
18662	
18662	06.04.2018 8:04	06.04.2018 8:04
18662 --> 13436	06.04.2018 8:04	18662	06.04.2018 8:04	06.04.2018 8:04
22066	
22066	06.04.2018 8:04	06.04.2018 8:04
22066 --> 13436	06.04.2018 8:04	22066	06.04.2018 8:04	06.04.2018 8:04
28885	
28885	06.04.2018 9:02	06.04.2018 9:02
28885 --> 22676	06.04.2018 9:02	28885	06.04.2018 9:02	06.04.2018 9:02

as you can see the PID 10219 is the cause of cascade locks

4) also you can check what that PID do before locking

[postgres@p38rmisdb01 postgres]$ grep -i ‘\[10219\]’ postgresql-2018-04-06.log

ps: thx to Amir for material !

PostgreSQL explain plan operations

PostgreSQL explain plan operations published on Комментариев к записи PostgreSQL explain plan operations нет

to be continued

> – what does “Bitmap Heap Scan” phase do?

A plain indexscan fetches one tuple-pointer at a time from the index,
and immediately visits that tuple in the table. A bitmap scan fetches
all the tuple-pointers from the index in one go, sorts them using an
in-memory “bitmap” data structure, and then visits the table tuples in
physical tuple-location order. The bitmap scan improves locality of
reference to the table at the cost of more bookkeeping overhead to
manage the “bitmap” data structure — and at the cost that the data
is no longer retrieved in index order, which doesn’t matter for your
query but would matter if you said ORDER BY.

> – what is “Recheck condition” and why is it needed?

If the bitmap gets too large we convert it to “lossy” style, in which we
only remember which pages contain matching tuples instead of remembering
each tuple individually. When that happens, the table-visiting phase
has to examine each tuple on the page and recheck the scan condition to
see which tuples to return.

> – why are proposed “width” fields in the plan different between the two
> plans?

Updated statistics about average column widths, presumably.

> (actually, a nice explanation what exactly are those widths would also
> be nice :) )

Sum of the average widths of the columns being fetched from the table.

> – I thought “Bitmap Index Scan” was only used when there are two or more
> applicable indexes in the plan, so I don’t understand why is it used
> now?

True, we can combine multiple bitmaps via AND/OR operations to merge
results from multiple indexes before visiting the table … but it’s
still potentially worthwhile even for one index. A rule of thumb is
that plain indexscan wins for fetching a small number of tuples, bitmap
scan wins for a somewhat larger number of tuples, and seqscan wins if
you’re fetching a large percentage of the whole table.

regards, tom lane

https://www.postgresql.org/message-id/12553.1135634231@sss.pgh.pa.us

Postgresql how-to find top query for last 15 seconds

Postgresql how-to find top query for last 15 seconds published on Комментариев к записи Postgresql how-to find top query for last 15 seconds нет
DROP TABLE IF EXISTS pg_stat_statements_temp; 
  create table pg_stat_statements_temp as
  SELECT  d.datname,u.usename, s.queryid,
round(s.total_time::numeric, 2) AS total_time,
s.calls,
round(s.mean_time::numeric, 2) AS mean,
now() as cur_date
FROM    pg_stat_statements s
    join pg_database d on s.dbid=d.oid
    join pg_user u on s.userid=u.usesysid;
 
    select pg_sleep(15);



 with stat as (SELECT  d.datname,u.usename, s.queryid,s.query,
round(s.total_time::numeric, 2) AS total_time,
s.calls,
round(s.mean_time::numeric, 2) AS mean
FROM    pg_stat_statements s
    join pg_database d on s.dbid=d.oid
    join pg_user u on s.userid=u.usesysid  )
    select  s.datname,s.usename,s.queryid,regexp_replace(s.query, E'[\n\r]+', ' ', 'g' )::varchar(80) AS short_query,
    s.total_time-t.total_time as time,
    s.calls-t.calls as calls,
    s.mean -t.mean as mean, 
    round( 
      (100 * (s.total_time-t.total_time) /sum(s.total_time-t.total_time) OVER ()
            )::numeric, 2)     as cpu,
    now()-t.cur_date as diff_time, 
CASE WHEN s.calls-t.calls >0  THEN 
     (s.calls-t.calls )/(extract ( second from now()-t.cur_date) + extract ( minutes from now()-t.cur_date)*60 + extract ( minutes from now()-t.cur_date)*60*60 ) 
    else '0' 
    end as tps
     from stat s
    	join pg_stat_statements_temp t on s.datname=t.datname and s.usename=t.usename and s.queryid=t.queryid
    	order by cpu desc limit 20;

postgresql top 10 tables by io operations

postgresql top 10 tables by io operations published on Комментариев к записи postgresql top 10 tables by io operations нет

helps to keep focus on “hot” tables
top 10 tables by sum of io operatoins:

SELECT
st.schemaname||'.'||st.relname,
round (100*( 
		sum (coalesce(st.n_tup_ins,0)+coalesce(st.n_tup_upd,0)-coalesce(st.n_tup_hot_upd,0)+coalesce(st.n_tup_del,0)) 
) 
	/ 
(
		sum (coalesce(st.n_tup_ins,0)+coalesce(st.n_tup_upd,0)-coalesce(st.n_tup_hot_upd,0)+coalesce(st.n_tup_del,0)) over () 
),2) as pct_io_ops
 from pg_stat_user_tables st
JOIN pg_class c ON c.oid=st.relid 
LEFT JOIN pg_tablespace t ON t.oid=c.reltablespace
WHERE
coalesce(n_tup_ins,0)+coalesce(n_tup_upd,0)-coalesce(n_tup_hot_upd,0)+coalesce(n_tup_del,0)>100
group by st.schemaname||'.'||st.relname,st.n_tup_ins,st.n_tup_upd,st.n_tup_hot_upd,st.n_tup_del
order by pct_io_ops desc 
limit 10;

top 10 tables by index scans:

   SELECT 
        pg_stat_all_tables.schemaname||'.'||relid::regclass AS table, 
        round (100* (sum(pg_stat_user_indexes.idx_scan))
                      / (sum(pg_stat_user_indexes.idx_scan) over () )
                       ,2) pct_indx_scans
    FROM 
        pg_stat_user_indexes 
        JOIN pg_index USING (indexrelid)
        join pg_stat_all_tables using (relid)
        group by pg_stat_all_tables.schemaname||'.'||relid::regclass,pg_stat_user_indexes.idx_scan
        order by 2 desc 
        limit 10;

postgresql repmgr how-to

postgresql repmgr how-to published on 1 комментарий к записи postgresql repmgr how-to

For better result you should install database by this document
how-to install postgres database

i have 4 node config, 2 nodes in one datacenter and 2 in other

config will look like this:

DR1     DR2
DB01--->DB11
|       |
 \_DB02  \_DB12

create config file:

node1 /u01/postgres/9.5/main/conf/repmgr.conf

node=1
cluster=db_cluster
node_name=pg1
conninfo='host=pg01 user=repmgr dbname=postgres password=q1'
pg_bindir=/usr/pgsql-9.5/bin/
master_response_timeout=5
reconnect_attempts=2
reconnect_interval=2
failover=manual
promote_command='/usr/pgsql-9.5/bin/repmgr standby promote -f /u01/postgres/9.5/main/conf/repmgr.conf'
follow_command='/usr/pgsql-9.5/bin/repmgr standby follow -f /u01/postgres/9.5/main/conf/repmgr.conf'
use_replication_slots=1

node2 /u01/postgres/9.5/main/conf/repmgr.conf

node=2
cluster=db_cluster
node_name=pg2
conninfo='host=pg02 user=repmgr dbname=postgres password=q1'
pg_bindir=/usr/pgsql-9.5/bin/
master_response_timeout=5
reconnect_attempts=2
reconnect_interval=2
failover=manual
promote_command='/usr/pgsql-9.5/bin/repmgr standby promote -f /u01/postgres/9.5/main/conf/repmgr.conf'
follow_command='/usr/pgsql-9.5/bin/repmgr standby follow -f /u01/postgres/9.5/main/conf/repmgr.conf'
use_replication_slots=1

node3 /u01/postgres/9.5/main/conf/repmgr.conf

node=3
cluster=db_cluster
node_name=pg3
conninfo='host=pg03 user=repmgr dbname=postgres password=q1'
pg_bindir=/usr/pgsql-9.5/bin/
master_response_timeout=5
reconnect_attempts=2
reconnect_interval=2
failover=manual
promote_command='/usr/pgsql-9.5/bin/repmgr standby promote -f /u01/postgres/9.5/main/conf/repmgr.conf'
follow_command='/usr/pgsql-9.5/bin/repmgr standby follow -f /u01/postgres/9.5/main/conf/repmgr.conf'
use_replication_slots=1

node4 /u01/postgres/9.5/main/conf/repmgr.conf

node=4
cluster=db_cluster
node_name=pg4
conninfo='host=pg04 user=repmgr dbname=postgres password=q1'
pg_bindir=/usr/pgsql-9.5/bin/
master_response_timeout=5
reconnect_attempts=2
reconnect_interval=2
failover=manual
promote_command='/usr/pgsql-9.5/bin/repmgr standby promote -f /u01/postgres/9.5/main/conf/repmgr.conf'
follow_command='/usr/pgsql-9.5/bin/repmgr standby follow -f /u01/postgres/9.5/main/conf/repmgr.conf'
use_replication_slots=1
upstream_node=3

also we need to setup passwordless auth of repmgr user
create .pgpass file in postgres user home directory with 0600 premissions:

All nodes:

cat ~/.pgpass
*:*:*:repmgr:q1
chmod 600 ~/.pgpass

create repmgr user

createuser -s repmgr

also we need to allow repmgr to connect to database with replication role
add this lines to your pg_hba.conf file on all nodes:

cat /u01/postgres/9.5/main/conf/pg_hba.conf | tail -3

local   replication,postgres        repmgr                             trust
host    replicatoin,postgres        repmgr      127.0.0.1/32           trust
host    replication,postgres        repmgr      10.10.10.0/24        md5

Here I’m allowed to connect from all network, but if you want, you can limit this by certain machines.

Next step is configure repmgr

Create Master:

[postgres@pg1 ~]$  /usr/pgsql-9.5/bin/repmgr -f /u01/postgres/9.5/main/conf/repmgr.conf master register
[2016-12-05 19:06:36] [NOTICE] master node correctly registered for cluster db_cluster with id 1 (conninfo: host=pg1 user=repmgr dbname=postgres password=q1)

create standbys
on host pg2 and pg3:

/usr/pgsql-9.5/bin/repmgr  -c -h pg1 -d postgres -U repmgr -D $PGDATA -f /u01/postgres/9.5/main/conf/repmgr.conf standby clone

start standby:

 pg_ctl start -o "-c config_file=/u01/postgres/9.5/main/conf/postgresql.conf"

and register standby:

/usr/pgsql-9.5/bin/repmgr -f /u01/postgres/9.5/main/conf/repmgr.conf standby register

create last standby ( which is going to be cascade standby ) and we are create this standby from PG3 database

/usr/pgsql-9.5/bin/repmgr  -c -h pg3 -d postgres -U repmgr -D $PGDATA -f /u01/postgres/9.5/main/conf/repmgr.conf standby clone

start and register database:

pg_ctl start -o "-c config_file=/u01/postgres/9.5/main/conf/postgresql.conf"
/usr/pgsql-9.5/bin/repmgr -f /u01/postgres/9.5/main/conf/repmgr.conf standby register

final result is

postgres=# select * from repmgr_db_cluster.repl_nodes ;
 id |  type   | upstream_node_id |  cluster   | name |                            conninfo                            |   slot_name   | priority | active
----+---------+------------------+------------+------+----------------------------------------------------------------+---------------+----------+--------
  1 | master  |                  | db_cluster | pg1  | host=pg1 user=repmgr dbname=postgres password=xxxxxxxxxxxxxxxx | repmgr_slot_1 |      100 | t
  2 | standby |                1 | db_cluster | pg2  | host=pg2 user=repmgr dbname=postgres password=xxxxxxxxxxxxxxxx | repmgr_slot_2 |      100 | t
  3 | standby |                1 | db_cluster | pg3  | host=pg3 user=repmgr dbname=postgres password=xxxxxxxxxxxxxxxx | repmgr_slot_3 |      100 | t
  4 | standby |                3 | db_cluster | pg4  | host=pg4 user=repmgr dbname=postgres password=xxxxxxxxxxxxxxxx | repmgr_slot_4 |      100 | t

postgresql install how-to

postgresql install how-to published on 2 комментария к записи postgresql install how-to

I will split material of how-to install pgpool on several logical parts
here is the first one:

start install postgres with packages:

yum install  postgresql95-devel postgresql95 postgresql95-test postgresql95-libs postgresql95-server postgresql95-contrib pgbouncer.x86_64 repmgr95* pgpool-II-95 pgpool-II-95-extensions

for 9.6

yum install  postgresql96-devel postgresql96 postgresql96-test postgresql96-libs postgresql96-server postgresql96-contrib pgbouncer.x86_64 repmgr96*

for 10

yum install  postgresql10-devel postgresql10 postgresql10-test postgresql10-libs postgresql10-server postgresql10-contrib pgbouncer.x86_64 

create directories and change home directory of postgres user:

mkdir /var/log/postgres
chown postgres: /var/log/postgres


usermod -m -d /u01 postgres
chown postgres: /u01 -R
su - postgres
cp /etc/skel/.bash* $HOME
mkdir -p /u01/postgres/main/pgdata/
mkdir -p /u01/postgres/main/pgdatats/
mkdir -p /u01/postgres/main/recovery
mkdir -p /u01/postgres/main/conf
mkdir /u01/postgres/main/recovery/backup
mkdir /u01/postgres/main/recovery/walarchive
mkdir /u01/postgres/main/recovery/dmp

configure bash_profile:

echo "export PGDATA=/u01/postgres/main/pgdata ">> ~/.bash_profile
echo "export PGTBS=/u01/postgres/main/pgdatats ">> ~/.bash_profile
echo "export PGCFG=/u01/postgres/main/conf ">> ~/.bash_profile

echo "export HISTTIMEFORMAT='%Y-%m-%d %H:%M:%S ' ">> ~/.bash_profile
echo "export HISTFILESIZE=9000 ">> ~/.bash_profile
echo "export PROMPT_COMMAND='history -a' ">> ~/.bash_profile
echo "export PS1='\[\e]0;\u@\h: \w\a\][\u@\h \W]$ ' ">> ~/.bash_profile

create dummy database:

/usr/pgsql-9.5/bin/initdb  --locale=ru_RU.utf8 --data-checksums

configure init scripts ( or systemd daemon ) to work with our external config directory:
RH6:

/etc/init.d/postgresql-9.5
PGDATA=/u01/postgres/main/pgdata
PGOPTS="-c config_file=/u01/postgres/main/conf/postgresql.conf"

RH7:

vim /usr/lib/systemd/system/postgresql-9.5.service
# Note: changing PGDATA will typically require adjusting SELinux
# Note: do not use a PGDATA pathname containing spaces, or you will
Environment=PGDATA=/u01/postgres/main/pgdata
Environment=PGCONF=/u01/postgres/main/conf
ExecStartPre=/usr/pgsql-10/bin/postgresql-10-check-db-dir ${PGDATA}
ExecStart=/usr/pgsql-10/bin/postmaster -D ${PGDATA} -c config_file=${PGCONF}/postgresql.conf

don’t forget about:

systemctl daemon-reload

Create starter database:

/usr/pgsql-9.5/bin/initdb --lc-collate=ru_RU.UTF8 --locale=en_US.UTF8 --encoding=UTF8
/usr/pgsql-9.5/bin/pg_ctl stop

Move config file /u01/postgres/main/conf/postgresql.conf to $PGCFG directory and modify it :

data_directory='/u01/postgres/main/pgdata'
hba_file='/u01/postgres/main/conf/pg_hba.conf'
ident_file='/u01/postgres/main/conf/pg_ident.conf'

enable database startup:

chkconfig postgresql-9.5 on
cat postgresql.conf | egrep -v (^#|^s*$|^s* *#)

[code]
data_directory=’/u01/postgres/main/pgdata’
hba_file=’/u01/postgres/main/conf/pg_hba.conf’
ident_file=’/u01/postgres/main/conf/pg_ident.conf’
listen_addresses = ‘*’ # what IP address(es) to listen on;
max_connections = 500 # (change requires restart)
shared_buffers = 8GB # min 128kB
work_mem = 64MB # min 64kB
maintenance_work_mem = 2GB # min 1MB
dynamic_shared_memory_type = posix # the default is the first option
wal_level = hot_standby # minimal, archive, hot_standby, or logical
max_wal_size = 3GB
min_wal_size = 1GB
checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 – 1.0
archive_mode = on # enables archiving; off, on, or always
archive_command = ‘cp -i %p /u01/postgres/main/recovery/walarchive/%f < /dev/null’ # command to use to archive a logfile segment
max_wal_senders = 4 # max number of walsender processes
max_replication_slots = 4 # max number of replication slots
hot_standby = on # "on" allows queries during recovery
hot_standby_feedback = on # send info from standby to prevent
effective_cache_size = 24GB
log_destination = ‘stderr’ # Valid values are combinations of
logging_collector = on # Enable capturing of stderr and csvlog
log_directory = ‘/var/log/postgres’ # directory where log files are written,
log_filename = ‘postgresql-%Y-%m-%d.log’ # log file name pattern,
log_truncate_on_rotation = on # If on, an existing log file with the
log_rotation_age = 1d # Automatic rotation of logfiles will
log_rotation_size = 0 # Automatic rotation of logfiles will
log_min_duration_statement = 300 # -1 is disabled, 0 logs all statements
log_checkpoints = on
log_connections = on
log_disconnections = on
log_line_prefix = ‘%t [%p]: [%l-1] db=%d,appname=%q,user=%u,client=%h’ # special values:
log_statement = ‘ddl’ # none, ddl, mod, all
log_temp_files = 0 # log temporary files equal or larger
log_timezone = ‘Europe/Moscow’
cluster_name = ‘p00nrd’ # added to process titles if nonempty
track_io_timing = on
autovacuum_vacuum_scale_factor = 0.02 # fraction of table size before vacuum
autovacuum_analyze_scale_factor = 0.01 # fraction of table size before analyze
autovacuum_vacuum_cost_delay = 30ms # default vacuum cost delay for
datestyle = ‘iso, mdy’
timezone = ‘Europe/Moscow’
lc_messages = ‘en_US.UTF8’ # locale for system error message
lc_monetary = ‘en_US.UTF8’ # locale for monetary formatting
lc_numeric = ‘en_US.UTF8’ # locale for number formatting
lc_time = ‘en_US.UTF8’ # locale for time formatting
default_text_search_config = ‘pg_catalog.english’
shared_preload_libraries = ‘pg_stat_statements’ # (change requires restart)
pg_stat_statements.max=10000
pg_stat_statements.track=top
pg_stat_statements.track_utility=off
[/code]

[свернуть]

Upload csv files to postgresql database

Upload csv files to postgresql database published on Комментариев к записи Upload csv files to postgresql database нет

here is my drafts of how-to automate upload a lot of csv files to database ( use text fieds because there is no need to mach all types of columns in this case )

 for i in $(ls -1 *.csv); do echo "create table $i (" ; echo $(head -1 $i | sed 's/,/ text,\n/g' ); echo "text );" ;echo copy $i | awk -F "." '{print $1}'; echo "from '/u01/import/$i' DELIMITER ',' CSV HEADER;" ; done

postgresql query to find duplicate indexes

postgresql query to find duplicate indexes published on Комментариев к записи postgresql query to find duplicate indexes нет
     
SELECT sub.table, pg_size_pretty(SUM(pg_relation_size(idx))::BIGINT) AS SIZE,
     (array_agg(idx))[1] AS idx1, (array_agg(idx))[2] AS idx2,
     (array_agg(idx))[3] AS idx3, (array_agg(idx))[4] AS idx4 
            FROM (  SELECT indrelid::regclass as table,indexrelid::regclass AS idx, (indrelid::text ||E'\n'|| indclass::text ||E'\n'|| indkey::text ||E'\n'|| 
                    COALESCE(indexprs::text,'')||E'\n' || COALESCE(indpred::text,'')) AS KEY 
                    FROM pg_index
                     where  pg_relation_size(indexrelid::regclass)>100*1024*1024 ) sub 
GROUP BY sub.table, KEY HAVING COUNT(*)>1 
ORDER BY SUM(pg_relation_size(idx)) DESC;

sample output

       table       |  size  |            idx1             |                idx2                | idx3 | idx4
-------------------+--------+-----------------------------+------------------------------------+------+------
 mdm_record_column | 797 MB | mdm_record_column_column_fk | mdm_record_column_record_fk        |      |
 fin_bill_generate | 300 MB | fin_bill_generate_fk        | fin_bill_generate_spec_item_id_idx |      |

also usefull to look at usage stat on this indexes

SELECT 
    relid::regclass AS table, 
    indexrelid::regclass AS index, 
    pg_size_pretty(pg_relation_size(indexrelid::regclass)) AS index_size, 
    idx_tup_read, 
    idx_tup_fetch, 
    idx_scan,
    pg_get_indexdef(pg_index.indexrelid) as indexdef
FROM 
    pg_stat_user_indexes 
    JOIN pg_index USING (indexrelid) 
WHERE 
     (relid::regclass)::text ='mdm_record_column' order by idx_scan desc ;

       table       |              index              | index_size | idx_tup_read | idx_tup_fetch | idx_scan |                                         indexdef
-------------------+---------------------------------+------------+--------------+---------------+----------+-------------------------------------------------------------------------------------------
 mdm_record_column | mdm_record_column_record_fk     | 399 MB     |       758024 |        758024 |     2992 | CREATE INDEX mdm_record_column_record_fk ON mdm_record_column USING btree (column_id)
 mdm_record_column | mdm_record_column_record_id_idx | 399 MB     |         1922 |          1922 |      442 | CREATE INDEX mdm_record_column_record_id_idx ON mdm_record_column USING btree (record_id)
 mdm_record_column | mdm_record_column_pk            | 399 MB     |            0 |             0 |        0 | CREATE UNIQUE INDEX mdm_record_column_pk ON mdm_record_column USING btree (id)
 mdm_record_column | mdm_record_column_column_fk     | 399 MB     |            0 |             0 |        0 | CREATE INDEX mdm_record_column_column_fk ON mdm_record_column USING btree (column_id)

postgresql cat effective non default parameters from config file

postgresql cat effective non default parameters from config file published on Комментариев к записи postgresql cat effective non default parameters from config file нет
 cat postgresql.conf |egrep -v "^[[:blank:]]*#" | grep -v '^$'
data_directory='/u01/postgres/9.5/main/pgdata'
hba_file='/u01/postgres/9.5/main/conf/pg_hba.conf'
ident_file='/u01/postgres/9.5/main/conf/pg_ident.conf'
listen_addresses = '*'		# what IP address(es) to listen on;
max_connections = 300			# (change requires restart)
shared_buffers = 4096MB			# min 128kB
work_mem = 256MB				# min 64kB
maintenance_work_mem = 512MB		# min 1MB
dynamic_shared_memory_type = posix	# the default is the first option
checkpoint_timeout = 15min		# range 30s-1h
max_wal_size = 10GB
min_wal_size = 1GB
checkpoint_completion_target = 0.9	# checkpoint target duration, 0.0 - 1.0
archive_mode = on		# enables archiving; off, on, or always
archive_command = 'cp -i %p /u01/postgres/9.5/main/recovery/walarchive/%f </dev/null'		# command to use to archive a logfile segment
max_wal_senders = 3		# max number of walsender processes
max_replication_slots = 2	# max number of replication slots
hot_standby = on			# "on" allows queries during recovery
effective_cache_size = 8GB
log_destination = 'stderr'		# Valid values are combinations of
logging_collector = on			# Enable capturing of stderr and csvlog
log_directory = 'pg_log'		# directory where log files are written,
log_filename = 'postgresql-%Y-%m-%d.log'	# log file name pattern,
log_truncate_on_rotation = on		# If on, an existing log file with the
log_rotation_age = 1d			# Automatic rotation of logfiles will
log_rotation_size = 0			# Automatic rotation of logfiles will
log_min_duration_statement = 300	# -1 is disabled, 0 logs all statements
log_checkpoints = on
log_connections = on
log_disconnections = on
log_line_prefix = '%t [%p]: [%l-1] db=%d,appname=%a,user=%u,client=%h '			# special values:
log_lock_waits = on			# log lock waits >= deadlock_timeout
log_temp_files = 0			# log temporary files equal or larger
log_timezone = 'Europe/Moscow'
autovacuum_max_workers = 5		# max number of autovacuum subprocesses
autovacuum_vacuum_scale_factor = 0.01	# fraction of table size before vacuum
autovacuum_analyze_scale_factor = 0.05	# fraction of table size before analyze
datestyle = 'iso, mdy'
timezone = 'Europe/Moscow'
lc_messages = 'en_US.utf8'			# locale for system error message
lc_monetary = 'en_US.utf8'			# locale for monetary formatting
lc_numeric = 'en_US.utf8'			# locale for number formatting
lc_time = 'en_US.utf8'				# locale for time formatting
default_text_search_config = 'pg_catalog.english'

Primary Sidebar