1) Table Function, pipelined, instead of inserts.
1@@@@ Replacing Row-Based Inserts with Pipelined Function-Based Loads
Preview:
In the blow case, I design a huge table with 2 millions rows by copy
dba_source, and a target table without any data.
Then I use the five methods to implement DML, comparing with each method,
to know what the advantage of table function with pipelined and parallel.
@@@Example: Make sure have enough space in your default tbs.
ALTER TABLESPACE USERS ADD
DATAFILE '/u01/app/oracle/oradata/ocm/users02.dbf' size 200M;
@@@Example: create a source_table and target_table.
Two tables have the same table structure.
HR@ocm> CREATE TABLE source_table AS SELECT * FROM dba_source;
Table created.
HR@ocm> desc source_table;
Name Null? Type
----------------------- -------- ----------------
OWNER VARCHAR2(30)
NAME VARCHAR2(30)
TYPE VARCHAR2(12)
LINE NUMBER
TEXT VARCHAR2(4000)
HR@ocm> CREATE TABLE target_table AS SELECT * FROM source_table WHERE 1!=1;
Table created.
HR@ocm> DESC target_table;
Name Null? Type
----------------------- -------- ----------------
OWNER VARCHAR2(30)
NAME VARCHAR2(30)
TYPE VARCHAR2(12)
LINE NUMBER
TEXT VARCHAR2(4000)
HR@ocm> INSERT INTO source_table SELECT * FROM source_table;
HR@ocm> INSERT INTO source_table SELECT * FROM source_table;
HR@ocm> COMMIT;
HR@ocm> SELECT count(*) FROM source_table;
COUNT(*)
----------
1180796
@@@Prepare a bigger redo logfile.
SYS@ocm> SELECT GROUP#, MEMBERS, BYTES, STATUS, ARCHIVED FROM v$log;
GROUP# MEMBERS BYTES STATUS ARC
---------- ---------- ---------- ---------------- ---
1 1 52428800 INACTIVE YES
2 1 52428800 CURRENT NO
3 1 52428800 INACTIVE YES
SYS@ocm> ALTER DATABASE DROP logfile group 1;
SYS@ocm> ALTER DATABASE ADD logfile group 1
'/u01/app/oracle/oradata/ocm/redo01.log' SIZE 200M REUSE;
SYS@ocm> ALTER DATABASE DROP logfile group 3;
SYS@ocm> ALTER DATABASE ADD logfile group 3
'/u01/app/oracle/oradata/ocm/redo03.log' SIZE 200M REUSE;
SYS@ocm> SELECT GROUP#, MEMBERS, BYTES, STATUS, ARCHIVED FROM v$log;
GROUP# MEMBERS BYTES STATUS ARC
---------- ---------- ---------- ---------------- ---
1 1 209715200 UNUSED YES
2 1 52428800 CURRENT NO
3 1 209715200 UNUSED YES
SYS@ocm> ALTER SYSTEM switch logfile;
SYS@ocm> ALTER SYSTEM checkpoint;
SYS@ocm> ALTER DATABASE DROP logfile group 2;
SYS@ocm> ALTER DATABASE ADD logfile group 2
'/u01/app/oracle/oradata/ocm/redo02.log' SIZE 200M REUSE;
SYS@ocm> SELECT GROUP#, MEMBERS, BYTES, STATUS, ARCHIVED FROM v$log;
GROUP# MEMBERS BYTES STATUS ARC
---------- ---------- ---------- ---------------- ---
1 1 209715200 CURRENT NO
2 1 209715200 UNUSED YES
3 1 209715200 UNUSED YES
@@@reducing redo logfile by set nologging.
HR@ocm> ALTER TABLE target_table NOLOGGING;
@@@Write the package pipe_pkg:
HR@ocm> !cat tmp.sql
DROP TYPE tab_object_t
/
CREATE TYPE tab_object_t IS OBJECT
( owner VARCHAR2(30)
, name VARCHAR2(30)
, type VARCHAR2(12)
, line NUMBER
, text VARCHAR2(4000) )
/
CREATE OR REPLACE PACKAGE pipe_pkg
IS
c_default_limit CONSTANT PLS_INTEGER := 100;
/* Create a record type using SUBTYPE ... IS ... */
SUBTYPE source_table_rec_type IS source_table%ROWTYPE;
TYPE tab_ntt IS TABLE OF tab_object_t; --output type
TYPE cur_type IS REF CURSOR --input type
RETURN source_table%ROWTYPE;
FUNCTION pipe_stocks_pl ( cur_data_in IN pipe_pkg.cur_type )
RETURN tab_ntt
PIPELINED;
FUNCTION pipe_stocks_pl_array
( cur_data_in IN pipe_pkg.cur_type
, limit_size_in IN PLS_INTEGER DEFAULT pipe_pkg.c_default_limit )
RETURN tab_ntt
PIPELINED;
FUNCTION pipe_stocks_pl_parallel
( cur_data_in IN pipe_pkg.cur_type
, limit_size_in IN PLS_INTEGER DEFAULT pipe_pkg.c_default_limit )
RETURN tab_ntt
PIPELINED
PARALLEL_ENABLE (PARTITION cur_data_in BY ANY );
PROCEDURE load_stocks_nopl_forall
( limit_size_in IN PLS_INTEGER DEFAULT pipe_pkg.c_default_limit );
PROCEDURE load_stocks_nopl_legacy;
PROCEDURE load_stocks_pl;
PROCEDURE load_stocks_pl_array;
PROCEDURE load_stocks_pl_parallel;
END;
/
HR@ocm> @tmp.sql
Type dropped.
Type created.
Package created.
@@@Write the package body:
HR@ocm> !cat tmpx.sql
CREATE OR REPLACE PACKAGE BODY pipe_pkg
IS
/*
Index:
part#1 dir function:
1.1 pipe_stocks_pl
1.2 pipe_stocks_pl_array
1.3 pipe_stocks_pl_parallel
part#2 dir procedure:
2.1 load_stocks_nopl_legacy
2.2 load_stocks_pl
2.3 load_stocks_pl_array
2.4 load_stocks_pl_parallel
2.5 load_stocks_nopl_forall
*/
/*
part#1 dir function:
1.1 pipe_stocks_pl
1.2 pipe_stocks_pl_array
1.3 pipe_stocks_pl_parallel
*/
-----------------------------------------------------------------------
--1.1) pipe_stocks_pl
FUNCTION pipe_stocks_pl
( cur_data_in IN pipe_pkg.cur_type )
RETURN tab_ntt
PIPELINED
IS
/* pipelined row recieve a object type, not a record type. for a nest tab */
target_table_rec tab_object_t := tab_object_t(null,null,null,null,null);
source_table_rec pipe_pkg.source_table_rec_type;
BEGIN
LOOP
FETCH cur_data_in INTO source_table_rec;
EXIT WHEN cur_data_in%NOTFOUND;
--Here could have complex logic.
target_table_rec.owner := 'Even Lines '||source_table_rec.owner;
target_table_rec.name := substr(source_table_rec.name,1,20);
target_table_rec.type := substr(source_table_rec.type,1,20);
target_table_rec.line := source_table_rec.line*20;
target_table_rec.text := substr(source_table_rec.text,1,20);
PIPE ROW (target_table_rec);
--Here double lines from sources table
target_table_rec.owner := 'Odd Lines '||source_table_rec.owner;
target_table_rec.name := substr(source_table_rec.name,1,20);
target_table_rec.type := substr(source_table_rec.type,1,20);
target_table_rec.line := source_table_rec.line*20;
target_table_rec.text := substr(source_table_rec.text,1,20);
PIPE ROW (target_table_rec);
END LOOP;
CLOSE cur_data_in;
RETURN;
END pipe_stocks_pl;
--1.2) pipe_stocks_pl_array
FUNCTION pipe_stocks_pl_array
( cur_data_in IN pipe_pkg.cur_type
, limit_size_in IN PLS_INTEGER DEFAULT pipe_pkg.c_default_limit)
RETURN tab_ntt
PIPELINED
IS
TYPE source_table_aat IS TABLE OF source_table%ROWTYPE
INDEX BY PLS_INTEGER;
source_table_aa source_table_aat;
target_table_rec tab_object_t := tab_object_t(null,null,null,null,null);
BEGIN
LOOP
FETCH cur_data_in BULK COLLECT INTO source_table_aa LIMIT limit_size_in;
EXIT WHEN source_table_aa.COUNT=0; --one or all, cur_data_in%NOTFOUND
FOR i IN 1 .. source_table_aa.COUNT
LOOP
--Here could have complex logic.
target_table_rec.owner := 'Even Lines '||source_table_aa(i).owner;
target_table_rec.name := substr(source_table_aa(i).name,1,20);
target_table_rec.type := substr(source_table_aa(i).type,1,20);
target_table_rec.line := source_table_aa(i).line*20;
target_table_rec.text := substr(source_table_aa(i).text,1,20);
PIPE ROW (target_table_rec);
--Here double lines from sources table
target_table_rec.owner := 'Odd Lines '||source_table_aa(i).owner;
target_table_rec.name := substr(source_table_aa(i).name,1,20);
target_table_rec.type := substr(source_table_aa(i).type,1,20);
target_table_rec.line := source_table_aa(i).line*20;
target_table_rec.text := substr(source_table_aa(i).text,1,20);
PIPE ROW (target_table_rec);
END LOOP;
END LOOP;
CLOSE cur_data_in;
RETURN;
END pipe_stocks_pl_array;
--1.3) pipe_stocks_pl_parallel
FUNCTION pipe_stocks_pl_parallel
( cur_data_in IN pipe_pkg.cur_type
, limit_size_in IN PLS_INTEGER DEFAULT pipe_pkg.c_default_limit )
RETURN tab_ntt
PIPELINED
PARALLEL_ENABLE (PARTITION cur_data_in BY ANY)
IS
TYPE source_table_aat IS TABLE OF source_table%ROWTYPE
INDEX BY PLS_INTEGER;
source_table_aa source_table_aat;
target_table_rec tab_object_t := tab_object_t(null,null,null,null,null);
BEGIN
LOOP
FETCH cur_data_in BULK COLLECT INTO source_table_aa LIMIT limit_size_in;
EXIT WHEN source_table_aa.COUNT=0; --all, or one ==> cur_data_in%NOTFOUND
FOR i IN 1 .. source_table_aa.COUNT
LOOP
--Here could have complex logic.
target_table_rec.owner := 'Even Lines '||source_table_aa(i).owner;
target_table_rec.name := substr(source_table_aa(i).name,1,20);
target_table_rec.type := substr(source_table_aa(i).type,1,20);
target_table_rec.line := source_table_aa(i).line*20;
target_table_rec.text := substr(source_table_aa(i).text,1,20);
PIPE ROW (target_table_rec);
--Here double lines from sources table
target_table_rec.owner := 'Odd Lines '||source_table_aa(i).owner;
target_table_rec.name := substr(source_table_aa(i).name,1,20);
target_table_rec.type := substr(source_table_aa(i).type,1,20);
target_table_rec.line := source_table_aa(i).line*20;
target_table_rec.text := substr(source_table_aa(i).text,1,20);
PIPE ROW (target_table_rec);
END LOOP;
END LOOP;
CLOSE cur_data_in;
RETURN;
END pipe_stocks_pl_parallel;
/*
part#2 dir procedure:
2.1 load_stocks_nopl_legacy
2.2 load_stocks_pl
2.3 load_stocks_pl_array
2.4 load_stocks_pl_parallel
2.5 load_stocks_nopl_forall
*/
------------------------------------------------------------------------
--2.1) load_stocks_nopl_legacy
PROCEDURE load_stocks_nopl_legacy
IS
CURSOR cur IS
SELECT owner, name, type, line, text
FROM source_table;
source_table_rec pipe_pkg.source_table_rec_type;
target_table_rec pipe_pkg.source_table_rec_type;
BEGIN
DBMS_OUTPUT.put_line('This is load_stocks_nopl_legacy.');
OPEN cur;
LOOP
FETCH cur INTO source_table_rec;
EXIT WHEN cur%NOTFOUND;
--Here could have complex logic.
target_table_rec.owner := 'Even Lines '||source_table_rec.owner;
target_table_rec.name := substr(source_table_rec.name,1,20);
target_table_rec.type := substr(source_table_rec.type,1,20 );
target_table_rec.line := source_table_rec.line*20;
target_table_rec.text := substr(source_table_rec.text,1,20);
INSERT INTO target_table VALUES target_table_rec;
--Here double lines from sources table
target_table_rec.owner := 'Odd Lines '||source_table_rec.owner;
target_table_rec.name := substr(source_table_rec.name,1,20);
target_table_rec.type := substr(source_table_rec.type,1,20 );
target_table_rec.line := source_table_rec.line*20;
target_table_rec.text := substr(source_table_rec.text,1,20);
INSERT INTO target_table VALUES target_table_rec;
IF mod(cur%ROWCOUNT, 100000)
BETWEEN 1 AND 500 THEN
COMMIT;
END IF;
END LOOP;
DBMS_OUTPUT.put_line('target table has'|| cur%ROWCOUNT*2 || ' rows inserted.');
CLOSE cur;
END load_stocks_nopl_legacy;
--2.2) load_stocks_pl
PROCEDURE load_stocks_pl
IS
BEGIN
DBMS_OUTPUT.put_line('This is load_stocks_pl.');
INSERT INTO target_table
SELECT * FROM TABLE( pipe_pkg.pipe_stocks_pl(
CURSOR(SELECT * FROM source_table)));
IF mod(SQL%ROWCOUNT, 100000)
BETWEEN 1 AND 500 THEN
COMMIT;
END IF;
DBMS_OUTPUT.put_line
('target table has '||SQL%ROWCOUNT || ' rows inserted.');
END load_stocks_pl;
--2.3) load_stocks_pl_array
PROCEDURE load_stocks_pl_array
IS
BEGIN
DBMS_OUTPUT.put_line('This is load_stocks_pl_array.');
INSERT INTO target_table
SELECT * FROM TABLE( pipe_pkg.pipe_stocks_pl_array(
CURSOR(SELECT * FROM source_table)));
IF mod(SQL%ROWCOUNT, 100000)
BETWEEN 1 AND 500 THEN
COMMIT;
END IF;
DBMS_OUTPUT.put_line
('target table has '||SQL%ROWCOUNT || ' rows inserted.');
END load_stocks_pl_array;
--2.4) load_stocks_pl_parallel
PROCEDURE load_stocks_pl_parallel
IS
BEGIN
DBMS_OUTPUT.put_line('This is load_stocks_pl_parallel.');
EXECUTE IMMEDIATE 'ALTER SESSION ENABLE PARALLEL DML';
INSERT /*+ PARALLEL(t,4) */ INTO target_table t
SELECT * FROM TABLE( pipe_pkg.pipe_stocks_pl_parallel(
CURSOR(SELECT /*+ PARALLEL(s,4) */ * FROM source_table s )));
IF mod(SQL%ROWCOUNT, 100000)
BETWEEN 1 AND 500 THEN
COMMIT;
END IF;
DBMS_OUTPUT.put_line
('target table has '||SQL%ROWCOUNT || ' rows inserted.');
END load_stocks_pl_parallel;
--2.5) load_stocks_nopl_forall
PROCEDURE load_stocks_nopl_forall
( limit_size_in IN PLS_INTEGER DEFAULT pipe_pkg.c_default_limit )
IS
CURSOR cur IS
SELECT * FROM source_table;
TYPE source_table_aat IS TABLE OF source_table%ROWTYPE
INDEX BY PLS_INTEGER;
source_table_aa source_table_aat;
target_table_aa source_table_aat;
lv_indx PLS_INTEGER;
lv_rowcounter PLS_INTEGER := 0;
BEGIN
DBMS_OUTPUT.put_line('This is load_stocks_nopl_forall.');
OPEN cur;
LOOP
FETCH cur BULK COLLECT INTO source_table_aa LIMIT limit_size_in;
EXIT WHEN source_table_aa.COUNT=0; --all, or one ==> cur%NOTFOUND
target_table_aa.DELETE;
FOR i IN 1 .. source_table_aa.COUNT -- 1 .. limit_size_in
LOOP
--Here could have complex logic.
lv_indx := target_table_aa.COUNT + 1;
target_table_aa(lv_indx).owner := 'Even Lines '||source_table_aa(i).owner;
target_table_aa(lv_indx).name := substr(source_table_aa(i).name,1,10);
target_table_aa(lv_indx).type := substr(source_table_aa(i).type,1,10);
target_table_aa(lv_indx).line := source_table_aa(i).line*20;
target_table_aa(lv_indx).text := substr(source_table_aa(i).text,1,20);
--Here double lines from sources table
lv_indx := target_table_aa.COUNT + 1;
target_table_aa(lv_indx).owner := 'Odd Lines '||source_table_aa(i).owner;
target_table_aa(lv_indx).name := substr(source_table_aa(i).name,1,15);
target_table_aa(lv_indx).type := substr(source_table_aa(i).type,1,10);
target_table_aa(lv_indx).line := source_table_aa(i).line*20;
target_table_aa(lv_indx).text := substr(source_table_aa(i).text,1,30);
END LOOP;
FORALL i IN INDICES OF target_table_aa --1 .. 2*limit_size_in
INSERT INTO target_table
VALUES target_table_aa(i);
lv_rowcounter := lv_rowcounter + SQL%ROWCOUNT;
IF mod(lv_rowcounter, 100000)
BETWEEN 50000-limit_size_in-1 AND 50000+limit_size_in+1 THEN
COMMIT;
END IF;
END LOOP;
DBMS_OUTPUT.put_line('target table has ' || lv_rowcounter || ' rows inserted.' );
CLOSE cur;
END load_stocks_nopl_forall;
END pipe_pkg;
/
HR@ocm> @tmpx.sql
Package body created.
@@@Tesing:
1.. pipe_pkg.load_stocks_nopl_legacy
HR@ocm> exec pipe_pkg.load_stocks_nopl_legacy;
PL/SQL procedure successfully completed.
Elapsed: 00:01:40.47
@@@alert log info: 3 times switch logfile, 61# ==> 64#
...
Thread 1 advanced to log sequence 61
Current log# 1 seq# 61 mem# 0: /u01/app/oracle/oradata/ocm/redo01.log
Thu Jan 10 17:39:04 2013
Thread 1 advanced to log sequence 62
Current log# 2 seq# 62 mem# 0: /u01/app/oracle/oradata/ocm/redo02.log
Thu Jan 10 17:39:32 2013
Thread 1 cannot allocate new log, sequence 63
Private strand flush not complete
Current log# 2 seq# 62 mem# 0: /u01/app/oracle/oradata/ocm/redo02.log
Thread 1 advanced to log sequence 63
Current log# 3 seq# 63 mem# 0: /u01/app/oracle/oradata/ocm/redo03.log
Thu Jan 10 17:40:04 2013
Thread 1 advanced to log sequence 64
Current log# 1 seq# 64 mem# 0: /u01/app/oracle/oradata/ocm/redo01.log
...
2..pipe_pkg.load_stocks_nopl_forall
HR@ocm> exec pipe_pkg.load_stocks_nopl_forall;
PL/SQL procedure successfully completed.
Elapsed: 00:00:17.29
@@@alert log info: 1 times 64# ==> 65#
...
Thread 1 advanced to log sequence 64
Current log# 1 seq# 64 mem# 0: /u01/app/oracle/oradata/ocm/redo01.log
Thu Jan 10 17:44:02 2013
Thread 1 cannot allocate new log, sequence 65
Checkpoint not complete
Current log# 1 seq# 64 mem# 0: /u01/app/oracle/oradata/ocm/redo01.log
Thread 1 advanced to log sequence 65
Current log# 2 seq# 65 mem# 0: /u01/app/oracle/oradata/ocm/redo02.log
...
3.. pipe_pkg.load_stocks_pl
HR@ocm> exec pipe_pkg.load_stocks_pl;
This is load_stocks_pl.
target table has 2361592 rows inserted.
Elapsed: 00:00:26.26
@@@alert log info: 1 times 67# ==> 68#
Thu Jan 10 18:19:11 2013
Thread 1 cannot allocate new log, sequence 68
Private strand flush not complete
Current log# 1 seq# 67 mem# 0: /u01/app/oracle/oradata/ocm/redo01.log
Thread 1 advanced to log sequence 68
Current log# 2 seq# 68 mem# 0: /u01/app/oracle/oradata/ocm/redo02.log
4..pipe_pkg.load_stocks_pl_array
HR@ocm> exec pipe_pkg.load_stocks_pl_array;
This is load_stocks_pl_array.
target table has 2361592 rows inserted.
Elapsed: 00:00:16.95
@@@alert_log_info: 1 times 68# ==> 69#
...
Thu Jan 10 18:26:32 2013
Thread 1 advanced to log sequence 69
Current log# 3 seq# 69 mem# 0: /u01/app/oracle/oradata/ocm/redo03.log
...
5.. pipe_pkg.load_stocks_pl_parallel
<1> use the procedure without parallel hints
HR@ocm> exec pipe_pkg.load_stocks_pl_parallel;
This is load_stocks_pl_parallel.
target table has 2361592 rows inserted.
PL/SQL procedure successfully completed.
Elapsed: 00:00:16.66
@@@alert log info: 1 times 69# ==> 70#
...
Thu Jan 10 18:31:58 2013
Thread 1 advanced to log sequence 70
Current log# 1 seq# 70 mem# 0: /u01/app/oracle/oradata/ocm/redo01.log
Thu Jan 10 18:31:58 2013
...
<2> use the same table function with parallel hints.
HR@ocm> exec pipe_pkg.load_stocks_pl_parallel;
This is load_stocks_pl_parallel.
target table has 2361592 rows inserted.
PL/SQL procedure successfully completed.
Elapsed: 00:00:09.22
@@@alert log info: no switch logfile; --excellent!!!