create big table for tests:
create table bigtab ( id number(12,6), v1 varchar2(10), padding varchar2(50) ) nologging -- just to save a bit of time / insert /*+ append ordered full(s1) use_nl(s2) */ into bigtab select 3000 + trunc((rownum-1)/500,6), to_char(rownum), rpad('x',50,'x') from all_objects s1, -- you'll need the privilege all_objects s2 where rownum <= 2000000 / commit;
check size:
21:07:01 (1)[PDBORCL]c##bushmelev_aa@orcl> @seg_space bigtab old 2: where segment_name=upper ('&1') new 2: where segment_name=upper ('bigtab') OWNER | SEGMENT_NAME | MB_SIZE --------------- | ---------------------------------------- | ----------- C##BUSHMELEV_AA | BIGTAB | 152.0 Elapsed: 00:00:07.42 23:28:41 (1)[PDBORCL]c##bushmelev_aa@orcl> l 1 select owner,segment_name,sum(bytes)/1024/1024 as MB_SIZE from dba_extents 2 where segment_name=upper ('&1') 3* group by segment_name,owner
ps: describe of x$kcbwds ds, x$kcbwbpd pd you may find at oracle x$ tables
usefull links:
http://enkitec.tv/2012/05/19/oracle-full-table-scans-direct-path-reads-object-level-checkpoints-ora-8103s/
lets_check:
select ksppinm as hidden_parameter , ksppstvl as value from x$ksppi join x$ksppcv using (indx) where ksppinm like '\_small_table_threshold%' escape '\' order by ksppinm; HIDDEN_PARAMETER | VALUE ------------------------- | ---------- _small_table_threshold | 2203 00:25:34 (1)[PDBORCL]sys@orcl> select 2203*8192/1024/1024 from dual; 2203*8192/1024/1024 ------------------- 17.2109375
so our 17 mb table should do DPR on full table scan
now lets full scan bigtable ( there is no indexes on table, so i use count (*) for minimize network influence on results ) an check sga info and session stats :
00:13:23 (1)[PDBORCL]c##bushmelev_aa@orcl> select count (*) from bigtab; COUNT(*) ---------- 2000000
using:
Script to Identify Objects and Amount of Blocks in the Buffer Pools — Default, Keep, Recycle, nK Cache (Doc ID 373472.1)
get info about sement in sga:
select BP_ID, decode(pd.bp_id,1,'KEEP',2,'RECYCLE',3,'DEFAULT',4,'2K SUBCACHE',5,'4K SUBCACHE',6,'8K SUBCACHE',7,'16K SUBCACHE',8,'32K SUBCACHE','UNKNOWN') subcache, bh.object_name, round (sum(bh.blocks)* (select to_number(value) from v$parameter where name='db_block_size') /(1024*1024),2) used_mb from x$kcbwds ds, x$kcbwbpd pd, (select /*+ use_hash(x) */ set_ds, o.name object_name, count(*) BLOCKS from obj$ o, x$bh x where o.dataobj#=x.obj and x.state!=0 and o.owner#!=0 and upper(o.name)=upper('&mytable') group by set_ds, o.name) bh where ds.set_id>=pd.bp_lo_sid and ds.set_id<=pd.bp_hi_sid and pd.bp_size!=0 and ds.addr=bh.set_ds group by pd.BP_ID, decode(pd.bp_id,1,'KEEP',2,'RECYCLE',3,'DEFAULT',4,'2K SUBCACHE',5,'4K SUBCACHE',6,'8K SUBCACHE',7,'16K SUBCACHE',8,'32K SUBCACHE','UNKNOWN') , bh.object_name; 00:50:04 (1)[PDBORCL]sys@orcl> @db_seg_cache2 Enter value for mytable: bigtab BP_ID | SUBCACHE | OBJECT_NAME | USED_MB ---------- | ---------- | --------------- | ---------- 3 | DEFAULT | BIGTAB | .01
now put full table (152mb) to keep_pool:
alter system set db_keep_cache_size=200m; alter table bigtab storage (buffer_pool keep );
now fullscan works from cache:
now check buffer pools :
select BP_ID, decode(pd.bp_id,1,'KEEP',2,'RECYCLE',3,'DEFAULT',4,'2K SUBCACHE',5,'4K SUBCACHE',6,'8K SUBCACHE',7,'16K SUBCACHE',8,'32K SUBCACHE','UNKNOWN') subcache, bh.object_name, round (sum(bh.blocks)* (select to_number(value) from v$parameter where name='db_block_size') /(1024*1024),2) used_mb from x$kcbwds ds, x$kcbwbpd pd, (select /*+ use_hash(x) */ set_ds, o.name object_name, count(*) BLOCKS from obj$ o, x$bh x where o.dataobj#=x.obj and x.state!=0 and o.owner#!=0 and upper(o.name)=upper('&mytable') group by set_ds, o.name) bh where ds.set_id>=pd.bp_lo_sid and ds.set_id<=pd.bp_hi_sid and pd.bp_size!=0 and ds.addr=bh.set_ds group by pd.BP_ID, decode(pd.bp_id,1,'KEEP',2,'RECYCLE',3,'DEFAULT',4,'2K SUBCACHE',5,'4K SUBCACHE',6,'8K SUBCACHE',7,'16K SUBCACHE',8,'32K SUBCACHE','UNKNOWN') , bh.object_name; BP_ID | SUBCACHE | OBJECT_NAME | USED_MB ---------- | ---------- | --------------- | ---------- 3 | DEFAULT | BIGTAB | .01 1 | KEEP | BIGTAB | 148.64
change keep pool to 140mb and repeat test:
alter system set db_keep_cache_size=120m; 00:39:38 (1)[PDBORCL]sys@orcl> / Enter value for mytable: bigtab BP_ID | SUBCACHE | OBJECT_NAME | USED_MB ---------- | ---------- | --------------- | ---------- 3 | DEFAULT | BIGTAB | .01 1 | KEEP | BIGTAB | 133.27
even when 87% of table are in current mode in keep pool session do DPR
01:01:19 (1)[PDBORCL]sys@orcl> @db_seg_cache c##bushmelev_aa bigtab AREA | STATUS | DISTINCT_BLOCK_COUNT | BLOCK_COUNT ------------ | ------------------------------ | ---------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ BUFFER CACHE | free | 3 | 3 BUFFER CACHE | xcur | 17060 | 17060 BUFFER CACHE | summary | 17063 | 17063 DATABASE | db blocks | 19456 | SGA | BUFFER CACHE of MAX SGA SIZE | 754,974,720 of 1,426,063,360 | (Resizeable) Elapsed: 00:00:00.11 01:01:20 (1)[PDBORCL]sys@orcl> l 1 with OBJLIST as 2 (select DATA_OBJECT_ID 3 from dba_objects 4 where (owner, object_name) in ((upper(nvl('&&1', 'user')), upper('&&2'))) 5 and DATA_OBJECT_ID is not null) 6 select 'BUFFER CACHE' as AREA, 7 nvl(status,'summary') as STATUS, 8 to_char(count(distinct(file# || '#' || block#))) as DISTINCT_BLOCK_COUNT, 9 to_char(count(*)) as BLOCK_COUNT 10 from V$BH, OBJLIST 11 where objd = OBJLIST.DATA_OBJECT_ID 12 group by rollup(status) 13 union all 14 select 'DATABASE', 15 'db blocks', 16 to_char(blocks), 17 '' as BH_COUNT 18 from dba_segments where (owner, segment_name) in ((upper(nvl('&&1', 'user')), upper('&&2'))) 19 union all 20 select 'SGA', 21 'BUFFER CACHE of MAX SGA SIZE', 22 trim(to_char(s1.bytes, '999,999,999,999,999')) || 23 ' of '|| 24 trim(to_char(s2.bytes, '999,999,999,999,999')), 25 '(' || decode(s1.resizeable, 'Yes', 'Resizeable', 'Fixed') || ')' 26* from v$sgainfo s1, v$sgainfo s2 where s1.name = 'Buffer Cache Size' and s2.name = 'Maximum SGA Size'
and FTS goes by DRP: