Oracle SQL
  • LICENSE

Yearly Archives: 2017

Top N biggest tables (with lobs, indexes and nested table)

Posted on December 16, 2017 by Sayan Malakshinov Posted in oracle Leave a comment

Script for SQL*Plus: https://github.com/xtender/xt_scripts/blob/master/tops/top_seg_by_size.sql

with 
  seg as (
     select 
       owner,segment_name
      ,segment_type
      ,tablespace_name
      ,sum(blocks) blocks
      ,sum(bytes)  bytes
     from dba_segments s
     where  segment_type not in (
       'TYPE2 UNDO'
      ,'ROLLBACK'
      ,'SYSTEM STATISTICS'
     )
     and segment_name not like 'BIN$%' --not in recyclebin
     and owner like '&owner_mask' -- you can specify schema here
     group by owner,segment_name,segment_type,tablespace_name
  )
 ,segs as (
     select 
       owner,segment_name
      ,case when segment_name like 'DR$%$%' then 'CTX INDEX' else segment_type end segment_type
      ,tablespace_name
      ,case 
         when segment_name like 'DR$%$%' 
           then (select table_owner||'.'||table_name from dba_indexes i where i.owner=s.owner and i.index_name = substr(segment_name,4,length(segment_name)-5))
         when segment_type in ('TABLE','TABLE PARTITION','TABLE SUBPARTITION')
            then owner||'.'||segment_name
         when segment_type in ('INDEX','INDEX PARTITION','INDEX SUBPARTITION')
            then (select i.table_owner||'.'||i.table_name from dba_indexes i where i.owner=s.owner and i.index_name=s.segment_name)
         when segment_type in ('LOBSEGMENT','LOB PARTITION','LOB SUBPARTITION')
            then (select l.owner||'.'||l.TABLE_NAME from dba_lobs l where l.segment_name = s.segment_name and l.owner = s.owner)
         when segment_type = 'LOBINDEX'
            then (select l.owner||'.'||l.TABLE_NAME from dba_lobs l where l.index_name = s.segment_name and l.owner = s.owner)
         when segment_type = 'NESTED TABLE'
            then (select nt.owner||'.'||nt.parent_table_name from dba_nested_tables nt where nt.owner=s.owner and nt.table_name=s.segment_name)
         when segment_type = 'CLUSTER'
            then (select min(owner||'.'||table_name) from dba_tables t where t.owner=s.owner and t.cluster_name=s.segment_name and rownum=1)
       end table_name
      ,blocks
      ,bytes
     from seg s
  )
 ,so as (
     select
       segs.owner
      ,substr(segs.table_name,instr(segs.table_name,'.')+1) TABLE_NAME
      ,sum(segs.bytes)  total_bytes
      ,sum(segs.blocks) total_blocks
      ,sum(case when segs.segment_type in ('TABLE','TABLE PARTITION','TABLE SUBPARTITION','NESTED TABLE','CLUSTER') then segs.bytes end) tab_size
      ,sum(case when segs.segment_type in ('INDEX','INDEX PARTITION','INDEX SUBPARTITION','CTX INDEX') then segs.bytes end) ind_size
      ,sum(case when segs.segment_type in ('CTX INDEX') then segs.bytes end) ctx_size
      ,sum(case when segs.segment_type in ('LOBSEGMENT','LOBINDEX','LOB PARTITION','LOB SUBPARTITION') then segs.bytes end) lob_size
     from segs
     group by owner,table_name
  )
 ,tops as (
     select 
           dense_rank()over (order by total_bytes desc) rnk
          ,so.*
     from so
  )
select *
from tops
where rnk<=50 -- top 50
/

“Collection iterator pickler fetch”: pipelined vs simple table functions

Posted on December 13, 2017 by Sayan Malakshinov Posted in oracle, PL/SQL, PL/SQL optimization, query optimizing, SQL, troubleshooting 2 Comments

Alex R recently discovered interesting thing: in SQL pipelined functions work much faster than simple non-pipelined table functions, so if you already have simple non-pipelined table function and want to get its results in sql (select * from table(fff)), it’s much better to create another pipelined function which will get and return its results through PIPE ROW().

A bit more details:

Assume we need to return collection “RESULT” from PL/SQL function into SQL query “select * from table(function_F(…))”.
If we create 2 similar functions: pipelined f_pipe and simple non-pipelined f_non_pipe,

create or replace function f_pipe(n int) return tt_id_value pipelined 
as
  result tt_id_value;
begin
  ...
  for i in 1..n loop
    pipe row (result(i));
  end loop;
end f_pipe;
/
create or replace function f_non_pipe(n int) return tt_id_value 
as
  result tt_id_value;
begin
  ...
  return result;
end f_non_pipe;
/
Full functions definitions
create or replace type to_id_value as object (id int, value int)
/
create or replace type tt_id_value as table of to_id_value
/
create or replace function f_pipe(n int) return tt_id_value pipelined 
as
  result tt_id_value;
  
  procedure gen is
  begin
     result:=tt_id_value();
     result.extend(n);
     for i in 1..n loop
        result(i):=to_id_value(i, 1);
     end loop;
  end;    
begin
  gen();
  for i in 1..n loop
    pipe row (result(i));
  end loop;
end f_pipe;
/
create or replace function f_non_pipe(n int) return tt_id_value 
as
  result tt_id_value;
  
  procedure gen is
  begin
     result:=tt_id_value();
     result.extend(n);
     for i in 1..n loop
        result(i):=to_id_value(i, 1);
     end loop;
  end;    
begin
  gen();
  return result;
end f_non_pipe;
/
create or replace function f_pipe_for_nonpipe(n int) return tt_id_value pipelined 
as
  result tt_id_value;
begin
  result:=f_non_pipe(n);
  for i in 1..result.count loop
    pipe row (result(i));
  end loop;
end;
/
create or replace function f_udf_pipe(n int) return tt_id_value pipelined 
as
  result tt_id_value;
  
  procedure gen is
  begin
     result:=tt_id_value();
     result.extend(n);
     for i in 1..n loop
        result(i):=to_id_value(i, 1);
     end loop;
  end;    
begin
  gen();
  for i in 1..n loop
    pipe row (result(i));
  end loop;
end;
/
create or replace function f_udf_non_pipe(n int) return tt_id_value 
as
  result tt_id_value;
  
  procedure gen is
  begin
     result:=tt_id_value();
     result.extend(n);
     for i in 1..n loop
        result(i):=to_id_value(i, 1);
     end loop;
  end;    
begin
  gen();
  return result;
end;
/

[collapse]
Test queries

set echo on feed only timing on;
--alter session set optimizer_adaptive_plans=false;
--alter session set "_optimizer_use_feedback"=false;

select sum(id * value) s from table(f_pipe(&1));
select sum(id * value) s from table(f_non_pipe(&1));
select sum(id * value) s from table(f_pipe_for_nonpipe(&1));
select sum(id * value) s from table(f_udf_pipe(&1));
select sum(id * value) s from table(f_udf_non_pipe(&1));
with function f_inline_non_pipe(n int) return tt_id_value 
as
  result tt_id_value;
begin
     result:=tt_id_value();
     result.extend(n);
     for i in 1..n loop
        result(i):=to_id_value(i, 1);
     end loop;
     return result;
end;
select sum(id * value) s from table(f_inline_non_pipe(&1));
/
set timing off echo off feed on;

[collapse]

we’ll find that the function with simple “return result” works at least twice slower than pipelined function:

Function 1 000 000 elements 100 000 elements
F_PIPE 2.46 0.20
F_NON_PIPE 4.39 0.44
F_PIPE_FOR_NONPIPE 2.61 0.26
F_UDF_PIPE 2.06 0.20
F_UDF_NON_PIPE 4.46 0.44

I was really surprised that even “COLLECTION ITERATOR PICKLER FETCH” with F_PIPE_FOR_NONPIPE that gets result of F_NON_PIPE and returns it through PIPE ROW() works almost twice faster than F_NON_PIPE, so I decided to analyze it using stapflame by Frits Hoogland.

I added “dbms_lock.sleep(1)” into both of these function after collection generation, to compare the difference only between “pipe row” in loop and “return result”:

Modified functions

create or replace function f_pipe(n int) return tt_id_value pipelined 
as
  result tt_id_value;
  
  procedure gen is
  begin
     result:=tt_id_value();
     result.extend(n);
     for i in 1..n loop
        result(i):=to_id_value(i, 1);
     end loop;
  end;    
begin
  gen();
  dbms_lock.sleep(1);
  for i in 1..n loop
    pipe row (result(i));
  end loop;
end f_pipe;
/
create or replace function f_non_pipe(n int) return tt_id_value 
as
  result tt_id_value;
  
  procedure gen is
  begin
     result:=tt_id_value();
     result.extend(n);
     for i in 1..n loop
        result(i):=to_id_value(i, 1);
     end loop;
  end;    
begin
  gen();
  dbms_lock.sleep(1);
  return result;
end f_non_pipe;
/

[collapse]

And stapflame showed that almost all overhead was consumed by the function “kgmpoa_Assign_Out_Arguments”:

I don’t know what this function is doing exactly, but we can see that oracle assign collection a bit later.
From other functions in this stack(pmucpkl, kopp2isize, kopp2colsize, kopp2atsize(attribute?), kopuadt) I suspect that is some type of preprocessiong of return arguments.
What do you think about it?

Full stapflame output:
stapflame_nonpipe
stapflame_pipe

oracle pipelined functions pl/sql pl/sql functions pl/sql optimization

SQL*Plus tips #8: How to read the output of dbms_output without “serveroutput on”

Posted on December 10, 2017 by Sayan Malakshinov Posted in oracle, SQL*Plus, SQL*PLus tips 1 Comment

When “serveroutput” is enabled, SQL*Plus executes “BEGIN DBMS_OUTPUT.GET_LINES(:LINES, :NUMLINES); END;” after each command.
That’s why I don’t like when it is always enabled: it adds extra calls and round-trips and it is inconvenient when I want to get a plan of the last executed query:

SQL> set serverout on;
SQL> select * from dual;

D
-
X

SQL> select * from table(dbms_xplan.display_cursor('','','allstats last'));

PLAN_TABLE_OUTPUT
--------------------------------------------------------------------------
SQL_ID  9babjv8yq8ru3, child number 0

BEGIN DBMS_OUTPUT.GET_LINES(:LINES, :NUMLINES); END;

NOTE: cannot fetch plan for SQL_ID: 9babjv8yq8ru3, CHILD_NUMBER: 0
      Please verify value of SQL_ID and CHILD_NUMBER;
      It could also be that the plan is no longer in cursor cache (check v$sql_plan)

So usually I switch “serveroutput” on only if needed, but sometimes I can forget to enable it. In such cases I use very simple script that reads the output using dbms_output.get_lines and prints it using refcursor:
https://github.com/xtender/xt_scripts/blob/master/output_print.sql

When you set “serveroutput on“, SQL*Plus also executes “dbms_output.enable” and if you set “serverout off” it executes “dbms_output.disable”, that’s why my glogin.sql contains “call dbms_output.enable(1e6);” and you need to execute it after each “set serverout off” if you want to use this script.

oracle SQL*Plus

Bug with integer literals in PL/SQL

Posted on December 9, 2017 by Sayan Malakshinov Posted in curious, oracle, PL/SQL, PL/SQL optimization, undocumented Leave a comment

This interesting question was posted on our russian forum yesterday:

We have a huge PL/SQL package and this simple function returns wrong result when it’s located at the end of package body:

create or replace package body PKGXXX as
  ...
  function ffff return number is
  nRes number;
  begin        
    nRes :=  268435456;
    return nRes;
  end;
end;
/

But it works fine in any of the following cases:
* replace 268435456 with power(2, 28), or
* replace 268435456 with small literal like 268, or
* move this function to the beginning of package body

The one of the interesting findings was that the returned value is equal to the one of literals in another function.
We can reproduce this bug even with an anonymous pl/sql block. The following test case uses 32768 integer literals from 1000001 to 1032768 and prints 5 other integers:

declare n number;
begin
  n:=1000001; -- this part
  n:=1000002; -- creates
  n:=1000003; -- 32768 
   ...        -- integer
  n:=1032768; -- literals
    dbms_output.put_line('100000='||100000); -- it should print: 100000=100000
    dbms_output.put_line('32766 ='||32766);
    dbms_output.put_line('32767 ='||32767);    
    dbms_output.put_line('32768 ='||32768);
    dbms_output.put_line('32769 ='||32769);
end;
Test code
declare
   c clob:='declare n number;begin'||chr(10);
   f varchar2(100):='n:=%s;'||chr(10);
   v varchar2(32767);
   n number:=32768;
begin
   for i in 1..n loop
      v:=v||utl_lms.format_message(f,to_char(1e7+i));
      if length(v)>30000 then
         c:=c||v;
         v:='';
      end if;
   end loop;
   v:=v||q'[
    dbms_output.put_line('100000='||100000);
    dbms_output.put_line('32766 ='||32766);
    dbms_output.put_line('32767 ='||32767);    
    dbms_output.put_line('32768 ='||32768);
    dbms_output.put_line('32769 ='||32769);
   end;
   ]';
   c:=c||v;
   execute immediate c;
end;
/

[collapse]
It produces the following output:

100000=10000001
32766 =32766
32767 =32767
32768 =10000002
32769 =10000003

This test case well demonstrates wrong results:
* instead of 100000 we get 10000001, which is the value from first line after “begin”, ie 1st integer literal in the code,
* for 32766 and 32767 oracle returns right values
* instead of 32768 (==32767+1) it returns 10000002, which is the integer from 2nd line, ie 2nd integer literal in the code,
* instead of 32769 (==32767+2) it returns 10000003, which is the integer from 3rd line, ie 3rd integer literal in the code
After several tests I can make a conclusion:

  • It doesn’t matter what plsql_optimize_level or plsql_code_type you set, was debug enabled or not, the behaviour is the same.
  • It seems that this is a kind of PL/SQL optimization: during parsing, oracle leaves integer literal in place if its value is in range -32768..32767 (16bit signed int), but if its value is out of this range, oracle adds this value into array of integers’ constants and replaces the value with the index of this element in this array. But because of index value overflow in cases when a count of such integer literals becomes larger than 32768, instead of Nth element of this array, oracle returns Mth element, where M is mod(N,32767).

So we can describe this behaviour using first test case:

declare n number;
begin
  n:=1000001; -- this part
  n:=1000002; -- creates
  n:=1000003; -- 32768 
   ...        -- integer
  n:=1032768; -- literals
    dbms_output.put_line('100000='||100000); -- it should print 100000, ie 32768th element of array, but prints 10000001
                                             -- where 10000001 is the 1st element of array (1==mod(32768,32767))
    dbms_output.put_line('32766 ='||32766);  -- these 2 lines print right values,
    dbms_output.put_line('32767 ='||32767);  -- because their values are in the range of -32768..32767
    dbms_output.put_line('32768 ='||32768);  -- this line contains 32769th element and prints 2nd element of array (2==mod(32769,32767))
    dbms_output.put_line('32769 ='||32769);  -- this line contains 32770th element and prints 3nd element of array (3==mod(32770,32767))
end;

The following query can help you to find objects which can potentially have this problem:

select
  s.owner,s.name,s.type
 ,sum(regexp_count(text,'(\W|^)3\d{4,}([^.0-9]|$)')) nums_count -- this regexp counts integer literals >= 30000
from dba_source s 
where 
    owner='&owner'
and type in ('FUNCTION','PROCEDURE','PACKAGE','PACKAGE BODY')
group by s.owner,s.name,s.type
having sum(regexp_count(text,'(\W|^)3\d{4,}([^.0-9]|$)'))>32767 -- filter only objects which have >=32767 integer literal

Workaround:
You may noticed that I wrote about INTEGER literals only, so the easiest workaround is to make them FLOAT – just add “.” to the end of each literal:

declare n number;
begin
  n:=1000001.;
  n:=1000002.;
  n:=1000003.;
   ...       
  n:=1032768.;
    dbms_output.put_line('100000='||100000.);
    dbms_output.put_line('32766 ='||32766.);
    dbms_output.put_line('32767 ='||32767.);    
    dbms_output.put_line('32768 ='||32768.);
    dbms_output.put_line('32769 ='||32769.);
end;
Fixed test cases

declare
   c clob:='declare n number;begin'||chr(10);
   f varchar2(100):='n:=%s.;'||chr(10); -- I've added here "."
   v varchar2(32767);
   n number:=32768;
begin
   for i in 1..n loop
      v:=v||utl_lms.format_message(f,to_char(1e7+i));
      if length(v)>30000 then
         c:=c||v;
         v:='';
      end if;
   end loop;
   v:=v||q'[
    dbms_output.put_line('100000='||100000.); -- .
    dbms_output.put_line('32766 ='||32766.);
    dbms_output.put_line('32767 ='||32767.);    
    dbms_output.put_line('32768 ='||32768.);
    dbms_output.put_line('32769 ='||32769.);
   end;
   ]';
   c:=c||v;
   execute immediate c;
end;
/

[collapse]
bug oracle oracle undocumented behaviour pl/sql pl/sql optimization

Oracle issues after upgrade to 12.2

Posted on November 24, 2017 by Sayan Malakshinov Posted in 12.2, bug, oracle 2 Comments

Sometimes it’s really hard even to create reproducible test case to send it to oracle support, especially in case of intermittent errors.
In such cases, I think it would be really great to have access to similar service requests or bugs of other oracle clients.
So while my poll about knowledge sharing is still active, I want to share a couple of bugs we have faced after upgrade to 12.2 (and one bug from Eric van Roon). I’m going to remove the bugs from this list when they become “public” or “fixed”.
If you want to add own findings into this list, you can add them into comments. To make this process easier, you can provide just symptomps, short description and the link to own post with details – I’ll add it just as a link.
Continue reading→

12.2.0.1 bind variable bug deterministic functions oracle troubleshooting undocumented oracle

Triggers and Redo: changes on 12.2

Posted on November 19, 2017 by Sayan Malakshinov Posted in 12c, oracle, trace 1 Comment

In one of the previous posts I showed How even empty trigger increases redo generation, but running the test from that post, I have found that this behaviour a bit changed on 12.2:
In my old test case, values of column A were equal to values of B, and on previous oracle versions including 12.1.0.2 we can see that even though “update … set B=A” doesn’t change really “B”, even empty trigger greatly increases redo generation.
But on 12.2.0.1 in case of equal values, the trigger doesn’t increase redo, so we can see small optimization here, though in case of different values, the trigger still increases reado generation greatly.

same_dumpredo.sql
set feed on;
drop table xt_curr1 purge;
drop table xt_curr2 purge;
-- simple table:
create table xt_curr1 as select '2' a, '2' b from dual connect by level<=1000;
-- same table but with empty trigger:
create table xt_curr2 as select '2' a, '2' b from dual connect by level<=1000;
create or replace trigger tr_xt_curr2 before update on xt_curr2 for each row
begin
  null;
end;
/
-- objectID and SCN:
col obj1 new_val obj1;
col obj2 new_val obj2;
col scn  new_val scn;
select 
  (select o.OBJECT_ID from user_objects o where o.object_name='XT_CURR1') obj1
 ,(select o.OBJECT_ID from user_objects o where o.object_name='XT_CURR2') obj2
 ,d.CURRENT_SCN scn
from v$database d
/
-- logfile1:
alter system switch logfile;
col member new_val logfile;
SELECT member
FROM v$logfile
WHERE 
     is_recovery_dest_file='NO'
 and group#=(SELECT group# FROM v$log WHERE status = 'CURRENT')
 and rownum=1;
-- update1:
set autot trace stat;
update xt_curr1 set b=a;
set autot off;
commit;
-- dump logfile1:
alter session set tracefile_identifier='log1_same';
ALTER SYSTEM DUMP LOGFILE '&logfile' SCN MIN &scn OBJNO &obj1;

-- logfile2:
alter system switch logfile;
col member new_val logfile;
SELECT member
FROM v$logfile
WHERE 
     is_recovery_dest_file='NO'
 and group#=(SELECT group# FROM v$log WHERE status = 'CURRENT')
 and rownum=1;
-- update2:
set autot trace stat;
update xt_curr2 set b=a;
set autot off;
commit;
-- dump logfile2:
alter session set tracefile_identifier='log2_same';
ALTER SYSTEM DUMP LOGFILE '&logfile' OBJNO &obj2;
alter session set tracefile_identifier='off';
disc;

[collapse]

diff_dumpredo.sql

set feed on;
drop table xt_curr1 purge;
drop table xt_curr2 purge;
-- simple table:
create table xt_curr1 as select '1' a, '2' b from dual connect by level<=1000;
-- same table but with empty trigger:
create table xt_curr2 as select '1' a, '2' b from dual connect by level<=1000;
create or replace trigger tr_xt_curr2 before update on xt_curr2 for each row
begin
  null;
end;
/
-- objectID and SCN:
col obj1 new_val obj1;
col obj2 new_val obj2;
col scn  new_val scn;
select 
  (select o.OBJECT_ID from user_objects o where o.object_name='XT_CURR1') obj1
 ,(select o.OBJECT_ID from user_objects o where o.object_name='XT_CURR2') obj2
 ,d.CURRENT_SCN scn
from v$database d
/
-- logfile1:
alter system switch logfile;
col member new_val logfile;
SELECT member
FROM v$logfile
WHERE 
     is_recovery_dest_file='NO'
 and group#=(SELECT group# FROM v$log WHERE status = 'CURRENT')
 and rownum=1;
-- update1:
set autot trace stat;
update xt_curr1 set b=a;
set autot off;
commit;
-- dump logfile1:
alter session set tracefile_identifier='log1_diff';
ALTER SYSTEM DUMP LOGFILE '&logfile' SCN MIN &scn OBJNO &obj1;

-- logfile2:
alter system switch logfile;
col member new_val logfile;
SELECT member
FROM v$logfile
WHERE 
     is_recovery_dest_file='NO'
 and group#=(SELECT group# FROM v$log WHERE status = 'CURRENT')
 and rownum=1;
-- update2:
set autot trace stat;
update xt_curr2 set b=a;
set autot off;
commit;
-- dump logfile2:
alter session set tracefile_identifier='log2_diff';
ALTER SYSTEM DUMP LOGFILE '&logfile' OBJNO &obj2;
alter session set tracefile_identifier='off';
disc;

[collapse]

Equal values:
12.1.0.2:

12.2.0.1:

Different values:
12.1.0.2:

12.2.0.1:

We can easily find that trigger disables batched “Array update”:

redo triggers

Adaptive serial direct path read decision ignores object statistics since 12.1

Posted on November 19, 2017 by Sayan Malakshinov Posted in 12c, adaptive serial direct path reads, oracle 1 Comment

On versions 11.2.0.2 – 11.2.0.4 Oracle uses objects’ statistics to make direct path reads decision (of course, if “_direct_read_decision_statistics_driven” haven’t changed it to “false”), and we can force serial direct reads on statement level using sql profiles with hints INDEX_STATS/TABLES_STATS, but since at least 12.1.0.2 this decision ignores statistics.

Btw, thanks to Jure Bratina, we know now, that we need to repeat hint TABLE_STATS at least twice to make it work 🙂 And from the following test case we know that it takes parameters from second one 🙂

Compare trace files of the following test cases:

table_stats2.sql

drop table t1;

create table t1 as select * from dual;
----------------------------
pro ######################################;
exec dbms_stats.gather_table_stats('','T1');
exec dbms_stats.set_table_stats(user,'T1',numblks => 33333333);

col value new_val oldval noprint;
select value from v$statname n, v$mystat s
where n.statistic#=s.statistic# and n.name ='physical reads direct';

----------------------------
alter session set tracefile_identifier='table_stats2';
--alter session set events '10053 trace name context forever, level 1';
alter session set events 'trace[nsmtio] disk highest';
select/*+ table_stats(t1, scale, blocks=66666666 rows=2222222222) 
          table_stats(t1, scale, blocks=99999999 rows=4444444444)
       */ 
       * 
from t1;
select value-&oldval directreads, value from v$statname n, v$mystat s
where n.statistic#=s.statistic# and n.name ='physical reads direct';

[collapse]

You can see that our hint successfully changed number of blocks and forced direct path reads on 11.2.0.4:
Oracle 11.2.0.4:

NSMTIO: qertbFetch:DirectRead:[OBJECT_SIZE>VLOT]
NSMTIO: Additional Info: VLOT=797445
Object# = 78376, Object_Size = 66666666 blocks
SqlId = 7naawntkc57yx, plan_hash_value = 3617692013, Partition# = 0

[collapse]

But on 12.1.0.2 and 12.2.0.1 we can see 2 lines with “NSMTIO: kcbism” with the different types(2 and 3) and different number of blocks, and the direct path read decision was based on second one – from segment header:

Oracle 12.1.0.2:

NSMTIO: kcbism: islarge 1 next 0 nblks 66666666 type 3, bpid 65535, kcbisdbfc 0 kcbnhl 4096 kcbstt 983 keep_nb 0 kcbnbh 59010 kcbnwp 1
NSMTIO: kcbism: islarge 0 next 0 nblks 4 type 2, bpid 3, kcbisdbfc 0 kcbnhl 4096 kcbstt 983 keep_nb 0 kcbnbh 59010 kcbnwp 1
NSMTIO: qertbFetch:NoDirectRead:[- STT < OBJECT_SIZE < MTT]:Obect's size: 4 (blocks), Threshold: MTT(4917 blocks),
_object_statistics: enabled, Sage: enabled, 
Direct Read for serial qry: enabled(::::::), Ascending SCN table scan: FALSE 
flashback_table_scan: FALSE, Row Versions Query: FALSE
SqlId: 7naawntkc57yx, plan_hash_value: 3617692013, Object#: 302342, Parition#: 0 DW_scan: disabled

[collapse]

Oracle 12.2.0.1:

NSMTIO: kcbism: islarge 1 next 0 nblks 66666666 type 3, bpid 65535, kcbisdbfc 0 kcbnhl 4096 kcbstt 1214 keep_nb 0 kcbnbh 45026 kcbnwp 1
NSMTIO: kcbism: islarge 0 next 0 nblks 4 type 2, bpid 3, kcbisdbfc 0 kcbnhl 4096 kcbstt 1214 keep_nb 0 kcbnbh 45026 kcbnwp 1
NSMTIO: qertbFetch:NoDirectRead:[- STT < OBJECT_SIZE < MTT]:Obect's size: 4 (blocks), Threshold: MTT(6072 blocks),
_object_statistics: enabled, Sage: enabled, 
Direct Read for serial qry: enabled(:::::::), Ascending SCN table scan: FALSE 
flashback_table_scan: FALSE, Row Versions Query: FALSE
SqlId: 7naawntkc57yx, plan_hash_value: 3617692013, Object#: 174411, Parition#: 0 DW_scan: disabled

[collapse]

And similar example, but for IFFS(index fast full scan):
nb: I set the number of index blocks using dbms_stats to 33333000 and hinted the query with 77777700

index_stats2.sql

drop table t2 purge;
ALTER SESSION SET optimizer_dynamic_sampling = 0;
ALTER SESSION SET "_optimizer_use_feedback" = FALSE;
ALTER SESSION SET optimizer_adaptive_features = FALSE;
ALTER SESSION SET optimizer_adaptive_plans=FALSE;

create table t2(x) as select level from dual connect by level<=1000;
create index t2_ix on t2(1,x,rpad(x,100));

begin
   dbms_stats.gather_table_stats('','T2',cascade => true);
   dbms_stats.set_table_stats(user,'T2'   ,numblks => 33333333);
   dbms_stats.set_index_stats(user,'T2_IX',numlblks => 33333000);
end;
/

col value new_val oldval noprint;
select value from v$statname n, v$mystat s
where n.statistic#=s.statistic# and n.name ='physical reads direct';

alter session set tracefile_identifier='index_stats2';
alter session set events 'trace[nsmtio] disk highest';
select/*+ index_stats(t2, t2_ix, scale, blocks=7777700)
          index_ffs(t2 t2_ix)
          dynamic_sampling(0)
       */ 
       count(*) cnt2
from t2;
select value-&oldval directreads, value from v$statname n, v$mystat s
where n.statistic#=s.statistic# and n.name ='physical reads direct';

disc;

[collapse]

You can see that on 11.2.0.4 oracle gets number of blocks from the hint (7777700)
Oracle 11.2.0.4 - index_stats:

NSMTIO: qerixFetchFastFullScan:DirectRead[OBJECT_SIZE > VLOT]:
NSMTIO: AdditionalInfo: Object_size: 7777700 (blocks), vlot=797445
SqlId=by2zv0k566hj5, plan_hash_value=3419274230,Object#=78375

[collapse]

From the first line we can see that kcbism takes the hinted number of blocks, but later kcbivlo rewrites it with the number from segment header:

Oracle 12.1.0.2 - index_stats:

NSMTIO: kcbism: islarge 1 next 0 nblks 7777700 type 3, bpid 65535, kcbisdbfc 0 kcbnhl 4096 kcbstt 983 keep_nb 0 kcbnbh 59010 kcbnwp 1
NSMTIO: kcbivlo: nblks 22 vlot 500 pnb 49175 kcbisdbfc 0 is_large 0
NSMTIO: qerixFetchFastFullScan:[MTT < OBJECT_SIZE < VLOT]:NSMTIO: AdditionalInfo: Object_size: 22 (blocks), vlot=245875
SqlId=by2zv0k566hj5, plan_hash_value=3419274230,Object#=302347

[collapse]

Oracle 12.2.0.1 - index_stats:

NSMTIO: kcbism: islarge 1 next 0 nblks 7777700 type 3, bpid 65535, kcbisdbfc 0 kcbnhl 4096 kcbstt 1214 keep_nb 0 kcbnbh 45026 kcbnwp 1
NSMTIO: kcbivlo: nblks 22 vlot 500 pnb 60729 kcbisdbfc 0 is_large 0
NSMTIO: qerixFetchFastFullScan:[MTT < OBJECT_SIZE < VLOT]:NSMTIO: AdditionalInfo: Object_size: 22 (blocks), vlot=303645
SqlId=by2zv0k566hj5, plan_hash_value=3419274230,Object#=174409

[collapse]

So we have 2 options to force direct reads:
1. to execute alter session set “_serial_direct_read”=’ALWAYS’;
2. or to force parallel plan to get parallel direct path reads (we can do it with even with dop=1)

12c direct path reads

Easy(lazy) way to check which programs have properly configured FetchSize

Posted on November 16, 2017 by Sayan Malakshinov Posted in oracle, SQL, statistics Leave a comment
select 
   s.module
  ,ceil(max(s.rows_processed/s.fetches)) rows_per_fetch
from v$sql s
where 
    s.rows_processed>100
and s.executions    >1
and s.fetches       >1
and s.module is not null
and s.command_type  = 3    -- SELECTs only
and s.program_id    = 0    -- do not account recursive queries from stored procs
and s.parsing_schema_id!=0 -- <> SYS
group by s.module
order by rows_per_fetch desc nulls last
/
arraysize fetchsize troubleshooting

Revisiting buffer cache Very Large Object Threshold

Posted on November 15, 2017 by Roger MacNicol Posted in adaptive serial direct path reads, oracle, SmartScan Leave a comment

If you turn on NSMTIO tracing you will see references to VLOT:

qertbFetch:[MTT < OBJECT_SIZE < VLOT]: Checking cost to read from caches (local/remote) and checking storage reduction factors (OLTP/EHCC Comp)

I had said you could ignore VLOT and Frits Hoogland pointed out that tracing showed it had some impact, so let me clarify:

VLOT is the absolute upper bound that cached reads can even be considered. 

This defaults to 500% of the number of buffers in the cache i.e.

_very_large_object_threshold = 500

While this number is not used in any calculations, it is used in two places as a cutoff to consider those calculations

1) Can we consider using Automatic Big Table Caching (a.k.a. DWSCAN) for this object?

2) Should we do a cost analysis for Buffer Cache scan vs Direct Read scan on tables larger than the MTT? 

The logic for tables above the calculated medium table threshold (MTT) and that are NOT part of searched DMLs and are NOT on Exadata with statistics based storage reduction factor enabled (_statistics_based_srf_enabled) is:

  • If _serial_direct_read == ALWAYS, use Direct Read
  • If _serial_direct_read == NEVER, use Buffer Cache
  • If _serial_direct_read == AUTO and #blocks in table < VLOT, use cost model
  • Else use Direct Read “qertbFetch:DirectRead:[OBJECT_SIZE>VLOT]”

In practice 5X buffer cache is so large the cost based decision will come to the same conclusion anyway – the default VLOT simply saves time spent doing the analysis.

For example, I got a quick count of the number of  blocks in non-partitioned TPC_H Scale 1 lineitem

select segment_name,sum(blocks),sum(bytes) from user_extents where segment_name='LINEITEM'

and created my buffer cache to be exactly the same size. With this setup, setting _very_large_object_threshold=100 bypassed the cost model and went straight to DR scan, while setting it to 200 forced the use of the cost model. 

The moral of this is that the default value of VLOT rarely changes the decisions made unless you reduce VLOT to a much smaller multiplier of the cache size and can start to see it cause a few more of your larger buffer cache scans move to direct read when they are no longer eligible for cost analysis. If you wish to stop some of the largest buffer cache scans from happening you would need to set _very_large_object_threshold less than 200.

direct path reads oracle Roger MacNicol SmartScan troubleshooting

Random thoughts on block sizes

Posted on November 9, 2017 by Roger MacNicol Posted in oracle, SmartScan 1 Comment

I heard “Oracle only tests on 8k and doesn’t really test 16k”

I heard someone assert that one reason you should only use 8k block sizes is that, and I quote, “Oracle only tests on 8k and doesn’t really test 16k”. I tried googling that rumour and tracked it back to AskTom. Also as disks and memory get bigger and CPUs get faster it is natural to ask if 8k is now “too small”.

So here are my thoughts:

1. A quick scan of the data layer regression tests showed a very large number running on 16k blocks

2. Oracle typically runs it DW stress tests on 16k blocks

So, clearly, the assertion is untrue but I did spot some areas where 32k testing could be improved. I also added a note to AskTom clarifying testing on 16k.

Does this mean I should look at using 16k blocks for me DW

Whoa, not so fast. Just because table scans typically run slightly faster on 16k blocks and compression algorithms typically get slightly better compression on 16k blocks does not mean your application will see improvements

1. Multiple block sizes mean added work for the DBA

2. Databases do a lot more than just scan tables – e.g. row based writes and reads could perform worse

3. Most apps have low hanging fruit that will give you far better ROI on your time than worrying about block sizes (if you don’t believe me, attend one of Jonathan Lewis’s excellent index talks).

Most applications run fine on 8k because it is a good trade off between different access paths and, in general, 8k is still the right choice for most applications. 

What about ‘N’ row pieces

In general Oracle’s block layout tries to ensure row pieces are split on column boundaries but in the case of very wide columns we will split in the middle of a column if too much space would be wasted by aligning with column boundaries. When a column is split in the middle it creates what is known as an ‘N’ row piece.

Rows are split by default at 255 column boundaries assuming the row piece fits in the block If you have a table with very wide rows or some very wide inline columns, smaller block sizes will result both in rows being split more often and in columns being split in the middle. At a minimum the number of block gets to retrieve a single row will likely increase. This is one case to consider where a 16k block size may be worth investigating. 

The curious case of 70X compression

We had case of a customer legitimately point out that ZLIB would give much better than 70X compression on his very repetitive data but COMPRESS FOR QUERY HIGH was stuck at 70X. And, as it turns out, this is a factor of block size.

Hybrid Columnar Compression (HCC) takes up to 8 MB of raw data and compresses it down to one CU (Compression Unit) subject to row limits depending on compression type. For example COMPRESS FOR ARCHIVE LOW (which is also ZLIB) is limited to 8,000 rows in a CU. But there is another limit which you may deduce from block dumps which is that HCC will only allow one head piece row per block because HCC rowids are the RDBA of the head piece plus the row offset into the CU. So if you have incredibly compressible data where 8,000 rows compress down to less 16k moving to a larger block size can be bad news because you will end up with wasted space at the end of each block.

Summary

There are typically better things to spend time on than worrying about block size but if you wish to run your DW on 16k blocks they are thoroughly tested just like 8k blocks are.

HCC oracle Roger MacNicol SmartScan
photo Sayan Malakshinov

Oracle ACE Pro Oracle ACE Pro

DEVVYOracle Database Developer Choice Award winner

Oracle performance tuning expert

UK / Cambridge

LinkedIn   Twitter
sayan@orasql.org

Recent Posts

  • CBO and Partial indexing
  • Slow index access “COL=:N” where :N is NULL
  • Where does the commit or rollback happen in PL/SQL code?
  • :1 and SP2-0553: Illegal variable name “1”.
  • ORA exceptions that can’t be caught by exception handler

Recent Comments

  • Oracle SGA 값을 증가 시킬 때 발생 장애 원인 – DBA의 정석 on Example of controlling “direct path reads” decision through SQL profile hints (index_stats/table_stats)
  • Oracle SQL | Oracle diagnostic events — Cheat sheet on Where does the commit or rollback happen in PL/SQL code?
  • Functions & Subqueries | Oracle Scratchpad on Deterministic function vs scalar subquery caching. Part 3
  • Materialized views state turns into compilation_error after refresh - kranar.top - Answering users questions... on Friday prank: select from join join join
  • Exadata Catalogue | Oracle Scratchpad on When bloggers get it wrong – part 1
  • Exadata Catalogue | Oracle Scratchpad on Serial Scans failing to offload
  • lateral join – decorrelation gone wrong – svenweller on Lateral view decorrelation(VW_DCL) causes wrong results with rownum
  • 255 column catalogue | Oracle Scratchpad on Intra-block row chaining optimization in 12.2
  • 255 column catalogue | Oracle Scratchpad on row pieces, 255 columns, intra-block row chaining in details
  • opt_estimate catalogue | Oracle Scratchpad on Correct syntax for the table_stats hint

Blogroll

  • Alex Fatkulin
  • Alexander Anokhin
  • Andrey Nikolaev
  • Charles Hooper
  • Christian Antognini
  • Coskan Gundogar
  • David Fitzjarrell
  • Igor Usoltsev
  • Jonathan Lewis
  • Karl Arao
  • Mark Bobak
  • Martin Bach
  • Martin Berger
  • Neil Chandler
  • Randolf Geist
  • Richard Foote
  • Riyaj Shamsudeen
  • Tanel Poder
  • Timur Akhmadeev
  • Valentin Nikotin

Meta

  • Log in
  • Entries feed
  • Comments feed
  • WordPress.org
  • 1
  • 2
  • 3
  • 4
  • Next
©Sayan Malakshinov. Oracle SQL