quickest way to get info about statistics gathering:
select * from sys.optstat_hist_control$
personal blog
quickest way to get info about statistics gathering:
select * from sys.optstat_hist_control$
select attname, inherited, n_distinct, array_to_string(most_common_vals, E';') as most_common_vals,most_common_freqs from pg_stats where tablename = 'table_name' ;
to be continued
> — what does «Bitmap Heap Scan» phase do?
A plain indexscan fetches one tuple-pointer at a time from the index,
and immediately visits that tuple in the table. A bitmap scan fetches
all the tuple-pointers from the index in one go, sorts them using an
in-memory «bitmap» data structure, and then visits the table tuples in
physical tuple-location order. The bitmap scan improves locality of
reference to the table at the cost of more bookkeeping overhead to
manage the «bitmap» data structure — and at the cost that the data
is no longer retrieved in index order, which doesn’t matter for your
query but would matter if you said ORDER BY.
> — what is «Recheck condition» and why is it needed?
If the bitmap gets too large we convert it to «lossy» style, in which we
only remember which pages contain matching tuples instead of remembering
each tuple individually. When that happens, the table-visiting phase
has to examine each tuple on the page and recheck the scan condition to
see which tuples to return.
> — why are proposed «width» fields in the plan different between the two
> plans?
Updated statistics about average column widths, presumably.
> (actually, a nice explanation what exactly are those widths would also
> be nice :) )
Sum of the average widths of the columns being fetched from the table.
> — I thought «Bitmap Index Scan» was only used when there are two or more
> applicable indexes in the plan, so I don’t understand why is it used
> now?
True, we can combine multiple bitmaps via AND/OR operations to merge
results from multiple indexes before visiting the table … but it’s
still potentially worthwhile even for one index. A rule of thumb is
that plain indexscan wins for fetching a small number of tuples, bitmap
scan wins for a somewhat larger number of tuples, and seqscan wins if
you’re fetching a large percentage of the whole table.
regards, tom lane
https://www.postgresql.org/message-id/12553.1135634231@sss.pgh.pa.us
with stats as ( select /*+ materialize */ * from ( select parsing_schema_name,t.start#,t.end#,t.sql_id,t.plan_hash_value,t.execs,t.avg_sec,s.sql_text from ( select s.parsing_schema_name,min(begin_interval_time) start#,max(begin_interval_time) end#, sql_id, plan_hash_value, sum(nvl(executions_delta,0)) execs, round ((sum(elapsed_time_delta)/(sum(executions_delta)))/1000000) avg_sec from DBA_HIST_SQLSTAT S, DBA_HIST_SNAPSHOT SS where 1=1 and ss.begin_interval_time > trunc(sysdate-4,'iw') and ss.snap_id = S.snap_id and ss.instance_number = S.instance_number and executions_delta > 0 and (elapsed_time_delta/decode(nvl(executions_delta,0),0,1,executions_delta))/1000000 > &1 group by sql_id, plan_hash_value,parsing_schema_name )t join dba_hist_sqltext s on s.sql_id=t.sql_id order by execs*avg_sec desc ) tt where rownum <= 50 ) select * from stats s where parsing_schema_name not like 'SYS%' and s.sql_text not like '%/* DS_SVC */%' and s.sql_text not like '%_job_proc%' and s.sql_text not like '%SQL Analyze%' and lower(s.sql_text) not like '%dbms_feature%' and s.sql_text not like '%SYS_DBA_SEGS%' and lower(s.sql_text) not like '%v$sys%' and lower(s.sql_text) not like '%dba_audit_trail%' and lower(s.sql_text) not like '%no_index(mytab)%';
DROP TABLE IF EXISTS pg_stat_statements_temp; create table pg_stat_statements_temp as SELECT d.datname,u.usename, s.queryid, round(s.total_time::numeric, 2) AS total_time, s.calls, round(s.mean_time::numeric, 2) AS mean, now() as cur_date FROM pg_stat_statements s join pg_database d on s.dbid=d.oid join pg_user u on s.userid=u.usesysid; select pg_sleep(15); with stat as (SELECT d.datname,u.usename, s.queryid,s.query, round(s.total_time::numeric, 2) AS total_time, s.calls, round(s.mean_time::numeric, 2) AS mean FROM pg_stat_statements s join pg_database d on s.dbid=d.oid join pg_user u on s.userid=u.usesysid ) select s.datname,s.usename,s.queryid,regexp_replace(s.query, E'[\n\r]+', ' ', 'g' )::varchar(80) AS short_query, s.total_time-t.total_time as time, s.calls-t.calls as calls, s.mean -t.mean as mean, round( (100 * (s.total_time-t.total_time) /sum(s.total_time-t.total_time) OVER () )::numeric, 2) as cpu, now()-t.cur_date as diff_time, CASE WHEN s.calls-t.calls >0 THEN (s.calls-t.calls )/(extract ( second from now()-t.cur_date) + extract ( minutes from now()-t.cur_date)*60 + extract ( minutes from now()-t.cur_date)*60*60 ) else '0' end as tps from stat s join pg_stat_statements_temp t on s.datname=t.datname and s.usename=t.usename and s.queryid=t.queryid order by cpu desc limit 20;
from oracle 12c it is possible to find out detailed information about gather stats job
in em you may find it at
or use dbms_stats.report_* to get info
detailed at https://docs.oracle.com/database/121/TGSQL/tgsql_astat.htm#TGSQL95102
I use it this way:
variable mystatsreport clob declare begin :mystatsreport := dbms_stats.report_stats_operations( since=>SYSTIMESTAMP-3, until=>SYSTIMESTAMP, detail_level=>'ALL', format=>'TEXT'); end; / print mystatsreport
variable mystatsreport clob declare begin :mystatsreport := dbms_stats.report_single_stats_operation ( opid => 11898,detail_level => 'TYPICAL', format =>'TEXT' ); end; / print mystatsreport
variable mystatsreport clob declare begin :mystatsreport := dbms_stats.report_single_stats_operation ( opid => 11898,detail_level => 'TYPICAL', format =>'TEXT' ); end; / print mystatsreport
helps to keep focus on «hot» tables
top 10 tables by sum of io operatoins:
SELECT st.schemaname||'.'||st.relname, round (100*( sum (coalesce(st.n_tup_ins,0)+coalesce(st.n_tup_upd,0)-coalesce(st.n_tup_hot_upd,0)+coalesce(st.n_tup_del,0)) ) / ( sum (coalesce(st.n_tup_ins,0)+coalesce(st.n_tup_upd,0)-coalesce(st.n_tup_hot_upd,0)+coalesce(st.n_tup_del,0)) over () ),2) as pct_io_ops from pg_stat_user_tables st JOIN pg_class c ON c.oid=st.relid LEFT JOIN pg_tablespace t ON t.oid=c.reltablespace WHERE coalesce(n_tup_ins,0)+coalesce(n_tup_upd,0)-coalesce(n_tup_hot_upd,0)+coalesce(n_tup_del,0)>100 group by st.schemaname||'.'||st.relname,st.n_tup_ins,st.n_tup_upd,st.n_tup_hot_upd,st.n_tup_del order by pct_io_ops desc limit 10;
top 10 tables by index scans:
SELECT pg_stat_all_tables.schemaname||'.'||relid::regclass AS table, round (100* (sum(pg_stat_user_indexes.idx_scan)) / (sum(pg_stat_user_indexes.idx_scan) over () ) ,2) pct_indx_scans FROM pg_stat_user_indexes JOIN pg_index USING (indexrelid) join pg_stat_all_tables using (relid) group by pg_stat_all_tables.schemaname||'.'||relid::regclass,pg_stat_user_indexes.idx_scan order by 2 desc limit 10;
plan to make a script with time range as input values
19:22:41 (1)system@xxxxx> select * from table (dbms_workload_repository.ash_global_report_html( 4281370344,1,timestamp'2016-05-05 13:50:00',timestamp'2016-05-05 14:05:00') );
open in browser:
add for mac os x
host open %SQLPATH%\tmp\output_&_connect_identifier..html
add for linux:
host firefox %SQLPATH%\tmp\output_&_connect_identifier..html --linux
select t.start#,t.end#,t.sql_id,t.plan_hash_value,t.execs,t.avg_sec,s.sql_text from ( select min(begin_interval_time) start#,max(begin_interval_time) end#, sql_id, plan_hash_value, sum(nvl(executions_delta,0)) execs, round ((sum(elapsed_time_delta)/(sum(executions_delta)))/1000000) avg_sec from DBA_HIST_SQLSTAT S, DBA_HIST_SNAPSHOT SS where 1=1 and ss.snap_id = S.snap_id and ss.instance_number = S.instance_number and executions_delta > 0 and (elapsed_time_delta/decode(nvl(executions_delta,0),0,1,executions_delta))/1000000 > 5 group by sql_id, plan_hash_value )t join dba_hist_sqltext s on s.sql_id=t.sql_id order by execs desc;
save to my blog ( original )
to calculate tps by sql_id we can use sql_exec_id , ash and exel
SELECT TO_CHAR (sample_time, 'HH24:MI'),inst_id, MAX (sql_exec_id) - MIN (sql_exec_id) EXECUTIONS_PER_MINUTE FROM gv$active_Session_history WHERE sql_id = 'b6asbgkghps5h' and sample_time > sysdate-5/24 group by TO_CHAR (sample_time, 'HH24:MI'),inst_id order by 1 asc;
or to split results it frame by 10 minutes:
select date#,"'1'" as first_node,"'2'" as second_node from ( SELECT TRUNC(sample_time, 'MI') - MOD(TO_CHAR(sample_time, 'MI'), 10) / (24 * 60) as date#,instance_number, MAX (sql_exec_id) - MIN (sql_exec_id) EXECUTIONS_PER_10_MINUTE FROM gv$active_Session_history WHERE sql_id = '77qx41mkwcm92' group by TRUNC(sample_time, 'MI') - MOD(TO_CHAR(sample_time, 'MI'), 10) / (24 * 60),instance_number order by 1 asc ) pivot ( sum(EXECUTIONS_PER_10_MINUTE) for instance_number in ('1' ,'2' ) ) order by date# asc;
one more type of view
select * from ( select s.INSTANCE_NUMBER,s.sql_id, sum( nvl(s.executions_delta,0)) execs,TO_CHAR (ss.begin_interval_time, 'DD.MM.YYYY HH24') date# -- sum((buffer_gets_delta/decode(nvl(buffer_gets_delta,0),0,1,executions_delta))) avg_lio from DBA_HIST_SQLSTAT S, DBA_HIST_SNAPSHOT SS, dba_hist_sqltext st where ss.snap_id = S.snap_id and ss.instance_number = S.instance_number and executions_delta > 0 and elapsed_time_delta > 0 and st.sql_id=s.sql_id and s.sql_id='8xjwqbfwwppuf' -- and st.sql_text not like '/* SQL Analyze%' --and s.sql_id in ( select p.sql_id from dba_hist_sql_plan p where p.object_name=’OPN_HIS’) and ss.begin_interval_time > sysdate-14 group by TO_CHAR (ss.begin_interval_time, 'DD.MM.YYYY HH24'),s.sql_id,s.INSTANCE_NUMBER ) pivot ( sum(execs) for instance_number in (1,2 ) ) order by 1;
and result
or even that way
select * from ( select s.sql_id, sum( nvl(s.executions_delta,0)) execs,TO_CHAR (ss.begin_interval_time, 'DD.MM.YYYY HH24') date# -- sum((buffer_gets_delta/decode(nvl(buffer_gets_delta,0),0,1,executions_delta))) avg_lio from DBA_HIST_SQLSTAT S, DBA_HIST_SNAPSHOT SS, dba_hist_sqltext st where ss.snap_id = S.snap_id and ss.instance_number = S.instance_number and executions_delta > 0 and elapsed_time_delta > 0 and st.sql_id=s.sql_id -- and st.sql_text not like '/* SQL Analyze%'-- and s.sql_id in ( select p.sql_id from dba_hist_sql_plan p where p.object_name='OPN_HIS') and ss.begin_interval_time > sysdate-7 group by TO_CHAR (ss.begin_interval_time, 'DD.MM.YYYY HH24'),s.sql_id ) pivot ( sum(execs) for sql_id in ( '8xjwqbfwwppuf' ,'14crnjtpxh9aa') ) order by 1;