#查找被锁的对象:
select p.spid,
c.object_name,
b.session_id,
b.oracle_username,
b.os_user_name
from v$process p, v$session a, v$locked_object b, all_objects c
where p.addr = a.paddr
and a.process = b.process
and c.object_id = b.object_id;
RAC:
并行度参数,如
parallel_max_servers=256
parallel_min_servers=16
parallel_threads_per_cpu=2
parallel_execution_message_size=16384
跨节点并行的参数
instance_groups=crm,erp,oltp
parallel_instance_group=crm
#在linux使用top根据进程id号找出正在跑的sql(sql的前1000个字符)
select /*+ ordered */
sql_text
from v$sqltext a
where (a.hash_value, a.address) in
(select decode(sql_hash_value, 0, prev_hash_value, sql_hash_value),
decode(sql_hash_value, 0, prev_sql_addr, sql_address)
from v$session b, v$process c
where b.paddr = c.addr
and c.spid = '&pid')
order by piece asc;
select s.username, s.sid, s.program, s.machine, s.osuser
from v$session s, v$process p
where s.paddr = p.addr
and p.spid = '&pid';
#根据指定的事件抓出sql
select sql_text
from v$sqltext a, v$session b, v$session_wait c
where a.ADDRESS = b.SQL_ADDRESS
and b.sid = c.sid
and c.event = 'db file scattered read'
order by piece asc;
select sql_text
from v$sqltext a, v$session b, v$session_wait c
where a.ADDRESS = b.SQL_ADDRESS
and b.sid = c.sid
and c.event = 'read by other session'
order by piece asc;
#根据sid查出spid然后进到系统里面进行kill
select p.spid from v$session s,v$process p where s.PADDR=p.ADDR and s.sid=100;
#根据sid查出正在运行的sql(完整sql)
select sql_text
from v$sqltext_with_newlines st, v$session se
where st.address = se.sql_address
and st.hash_value = se.sql_hash_value
and se.sid = 458
order by piece;
#通过等待事件查找其集中在哪些对象
1)
Select count(*), event
from v$session_wait
where event not in ('smon timer', 'pmon timer', 'rdbms ipc message',
'SQL*Net message from client')
group by event
order by 1 desc;
2)
进一步分析,找出等待事件(buffer busy waits)的原因
Select count(*), p1, p2, p3
from v$session_wait
where event = 'buffer busy waits'
group by p1, p2, p3;
在buffer busy waits等待事件中
P1 = file#
P2 = block#
P3 = id ( 此id对应为等待的原因)
按照p1,p2,p3 group是为了明确buffer busy waits的等待集中在哪些对象上。
Metalink对buffer busy waits等待事件的描述有如下一段话:
“If P3 shows that the "buffer busy wait" is waiting for a block read to complete then the blocking session is likely to be waiting on an IO wait (eg: "db file sequential read" or "db file scattered read" for the same file# and block#.”
通过以下SQL可以找到具体的object.
3)
Select owner, segment_name, segment_type
from dba_extents
where file_id = P1
and P2 between block_id and block_id + blocks;
##########对分区表的操作#############
1.查询分区表(ODS_MAIL_BILL)的某个分区(MAIL_BILL_200703)中的数据
select * from ODS_MAIL_BILL partition(MAIL_BILL_200703);
2.truncate表(ODS_MAIL_BILL)的某个分区(MAIL_BILL_200703)中的所有数据
alter table ODS_MAIL_BILL truncate partition MAIL_BILL_200703;
3.delete表(ODS_MAIL_BILL)的某个分区(MAIL_BILL_200703)中的所有数据
delete ODS_MAIL_BILL partition (MAIL_BILL_200703);
#查询分区表的某一个分区
select count(*) from RDT_P_MAIL_TRACK partition (P20080220);
#查询分区信息:分区名称,分区键最大值,分区编号,分区所在表空间
select partition_name, high_value, partition_position, tablespace_name
from user_tab_partitions
where table_name = 'T_PUB_JNL_SAV'
order by partition_position desc;
#查询分区信息:分区表,分区方式,分区默认表空间
select * from user_part_tables;
#查询分区信息:分区字段名与位置,分区类型(表or索引)
select * from user_part_key_columns where name='T_PUB_JNL_SAV';
#truncate分区表中的某一个分区
alter table RDT_P_MAIL_TRACK truncate partition P20080220;
#drop分区表中的某一个分区
alter table RDT_P_MAIL_TRACK drop partition P20080220;
#增加分区表
alter table BILL_MONTHFEE_ZERO add Partition p_200409 values less than (200409) tablespace ts_ziken;
#将一个分区分为两个分区
alter table bill_monthfee_zero split Partition p_200409 at (200409) into (Partition p_200409_1 tablespace ts_ziken,
Partition p_200409_2 tablespace ts_ziken_idx);
#合并分区
ALTER TABLE bill_monthfee_zero MERGE PARTITIONS p_200408, p_200409 INTO PARTITION p_all;
#将分区改名
alter table bill_monthfee_zero rename Partition p_200408 to p_fee_200408;
#将分区改表空间
alter table bill_monthfee_zero move Partition p_200409 tablespace ts_ziken_01 nologging;
#EXPORT指定分区:
exp sales/sales_password tables=RDT_P_MAIL_TRACK:P20080220 file=sales1999_q1.dmp
#IMPORT指定分区:
imp sales/sales_password FILE =sales1999_q1.dmp tables=(RDT_P_MAIL_TRACK:P20080220) ignore=y
#新建表空间
CREATE TABLESPACE DATA_HIST
DATAFILE 'E:\DATA\ORC1\data_hist01.dbf' SIZE 2000M [blocksize 32k]
EXTENT MANAGEMENT LOCAL
uniform size 1M ------通常3种uniform size 128k(1G以下表)/1m(1G~20G的表)/8m or 16m(20G以上表)
SEGMENT SPACE MANAGEMENT AUTO;
#为表空间新增数据文件
alter tablespace data add datafile 'E:\DATA\ORC1\DATA03.dbf' size 2000m;
#更改数据文件的位置(在start mount状态下)
alter database rename file '/opt/oracle/test.dbf' to '/opt/oracle/oradata/conner/test.dbf';
###不论是否在归档模式,都可以通过离线表空间实现数据文件的位置改变:
1.alter tablespace xxx offline;
2.!cp /opt/oracle/test.dbf /opt/oracle/oradata/conner/test.dbf
3.alter tablespace xxx rename datafile '/opt/oracle/test.dbf' to '/opt/oracle/oradata/conner/test.dbf';
4.alter tablespace xxx online;
##删除cluster table
DROP CLUSTER emp_dept INCLUDING TABLES CASCADE CONSTRAINTS;
#改变数据文件大小
alter database datafile '/opt/oracle/oradata/shtd/shtd01.dbf' resize 2000m;
#丢失一个数据文件但是存在其日志,没有备份怎么恢复
#对于丢失一个数据文件的情况,必须要求该数据文件不能是系统的数据文件,而且控制文件记录有该数据文件的信息,并且保证有该数据文件创建以来的所有日志,则可以不利用备份恢复,只需要重新根据控制文件创建该数据文件并恢复即可。
代码:
SQL>startup mount
SQL>Alter database create datafile 'db file name' as 'db file name' size … reuse;
SQL>recover datafile n;
or
SQL>recover datafile 'db file name';
#没有备份,丢失数据文件
#对于这样的情况,如果没有任何备份,可能要承担数据的丢失(除非通过dul或者类似工具去数据文件中抓取数据)的风险。如果该数据文件不是系统数据文件,可以强行删除再打开数据库。如,在mount状态下:
代码:--------------------------------------------------------------------------------
--ARCHIVELOG模式命令
SQL>Alter database datafile 'file name' offline;
--NOARCHIVELOG模式命令
SQL>Alter database datafile 'file name' offline drop;
---------------------------------------------------------------------------------
#最后open数据库即可。
#当然,如果有备份是可以防止数据丢失的,可以从备份进行数据文件的恢复,如
代码:--------------------------------------------------------------------------------
rman> restore datafile n;
rman> recover datafile n;
---------------------------------------------------------------------------------
#8i修改回滚段的大小
alter rollback segment rbs0 offline;
drop rollback segment rbs0;
create public rollback segment rbs0 tablespace rbs storage (initial 150m);
alter rollback segment rbs0 online;
#查看某user具有哪些权限
select * from dba_role_privs where grantee='USERNAME';
select * from dba_sys_privs where grantee='USERNAME';
select * from dba_tab_privs where grantee='USERNAME';
#建立用户test,密码为password,默认表空间为ddd
create user test identified by password default tablespace ddd quota unlimited on ddd;
#给用户test授予connect角色
grant connect to test;
#拿掉用户test的connect角色
revoke connect from test;
#删除用户test,连带其下的所有表,索引,视图等所有物件
drop user test cascade;
select
#查询表空间使用情况(包含临时表空间)
set lin 160
set pagesize 9999
col tablespace_name for a20
col ext_manag for a10
col size_mb for a20
col used_mb for a20
col used_per for a10
SELECT d.status status,
d.tablespace_name tablespace_name,
d.contents type,
d.extent_management ext_manag,
TO_CHAR(NVL(a.bytes / 1024 / 1024, 0), '99,999,990.900') size_mb,
TO_CHAR(NVL(a.bytes - NVL(f.bytes, 0), 0) / 1024 / 1024,'99,999,990.900') used_mb,
TO_CHAR(NVL((a.bytes - NVL(f.bytes, 0)) / a.bytes * 100, 0),'990.00') used_per
FROM sys.dba_tablespaces d,
(select tablespace_name, sum(bytes) bytes
from dba_data_files
group by tablespace_name) a,
(select tablespace_name, sum(bytes) bytes
from dba_free_space
group by tablespace_name) f
WHERE d.tablespace_name = a.tablespace_name(+)
AND d.tablespace_name = f.tablespace_name(+)
AND NOT
(d.extent_management like 'LOCAL' AND d.contents like 'TEMPORARY')
UNION ALL
SELECT d.status status,
d.tablespace_name tablespace_name,
d.contents type,
d.extent_management ext_manag,
TO_CHAR(NVL(a.bytes / 1024 / 1024, 0), '99,999,990.900') size_mb,
TO_CHAR(NVL(t.bytes, 0) / 1024 / 1024, '99,999,990.900') used_mb,
TO_CHAR(NVL(t.bytes / a.bytes * 100, 0), '990.00') used_per
FROM sys.dba_tablespaces d,
(select tablespace_name, sum(bytes) bytes
from dba_temp_files
group by tablespace_name) a,
(select tablespace_name, sum(bytes_cached) bytes
from v$temp_extent_pool
group by tablespace_name) t
WHERE d.tablespace_name = a.tablespace_name(+)
AND d.tablespace_name = t.tablespace_name(+)
AND d.extent_management like 'LOCAL'
AND d.contents like 'TEMPORARY';
#查询表空间的总大小
SELECT tablespace_name, SUM(BYTES) / 1024 / 1024 sizemb
FROM dba_data_files
GROUP BY tablespace_name;
#查看表空间的使用率:
SELECT a.tablespace_name,
to_char(NVL((a.BYTES - NVL(f.BYTES, 0)), 0) * 100 / a.bytes, '90.99') || '%' used_rate
FROM (SELECT tablespace_name, SUM(BYTES) BYTES
FROM dba_data_files
GROUP BY tablespace_name) a,
(SELECT tablespace_name, SUM(BYTES) BYTES
FROM dba_free_space
GROUP BY tablespace_name) f
WHERE a.tablespace_name = f.tablespace_name(+)
union all
select t.tablespace_name,
to_char(t.user_bytes * 100 / t.bytes, '90.99') || '%' used_rate
from (select tablespace_name, sum(user_bytes) user_bytes, sum(bytes) bytes
from dba_temp_files
group by tablespace_name) t
#查询用户username下的所有table的大小
SELECT owner, segment_name, segment_type, BYTES / 1024 / 1024 sizemb
FROM dba_segments
WHERE owner = 'USERNAME'
AND segment_type = 'TABLE';
#Top SQL
SELECT username, sql_text, sofar, totalwork
FROM v$sql, v$session_longops
WHERE sql_address=address
AND sql_hash_value=hash_value
ORDER BY address, hash_value, child_number;
select *
from (select sql_text,
disk_reads / greatest(executions, 1) D,
disk_reads,
buffer_gets,
module,
executions
from v$sql
order by D desc)
where rownum <= 10;
select *
from (select sql_text,
buffer_gets / greatest(executions, 1) B,
disk_reads,
buffer_gets,
module,
executions
from v$sql
order by B desc)
where rownum <= 10;
#SQL Trace(SQL跟踪)
select sid,serial#,username from v$session;
#假如查出来某用户的sid是7,serial#是284
exec dbms_system.set_sql_trace_in_session(7,284,true)
#开始跟踪了。。。
#等候一段时间,继续查询此sid,serial#,username是否还存在
#然后结束跟踪
exec dbms_system.set_sql_trace_in_session(7,284,true)
#格式化trace文件
tkprof sungnie_ora_15937.trc tracefile
#查看格式化以后的文件
vi tracefile.prf
###logminer---9i,以在PL/SQL Developer运行为例
begin
DBMS_LOGMNR_D.BUILD(OPTIONS => DBMS_LOGMNR_D.STORE_IN_REDO_LOGS);
end;
begin
dbms_logmnr.add_logfile(logfilename => '/opt/oracle/oradata/shzz/archive/1_68.dbf',
options => dbms_logmnr.new);
end;
begin
dbms_logmnr.add_logfile(logfilename => '/opt/oracle/oradata/shzz/archive/1_69.dbf',
options => dbms_logmnr.addfile);
end;
............
begin
dbms_logmnr.start_logmnr(options => dbms_logmnr.dict_from_redo_logs +
dbms_logmnr.committed_data_only);
end;
select * from v$logmnr_contents;
begin
dbms_logmnr.end_logmnr;
end;
###logminer---8i,以在PL/SQL Developer运行为例
#For example, to set UTL_FILE_DIR to use /opt/oracle/coohoo as the directory where the dictionary file is placed,
#enter the following in the init.ora file:
UTL_FILE_DIR = /opt/oracle/coohoo
begin
dbms_logmnr_d.build('diction.ora', '/opt/oracle/coohoo');
end;
begin
dbms_logmnr.add_logfile(logfilename => '/opt/oracle/admin/shzz/arch/arch_1_365.arc',
options => dbms_logmnr.new);
end;
begin
dbms_logmnr.add_logfile(logfilename => '/opt/oracle/admin/shzz/arch/arch_1_366.arc',
options => dbms_logmnr.addfile);
end;
............
begin
dbms_logmnr.start_logmnr(starttime => to_date('2006-4-5 17:55:02',
'yyyy-mm-dd hh24:mi:ss'),
endtime => to_date('2006-4-5 17:55:05',
'yyyy-mm-dd hh24:mi:ss'),
dictfilename => '/opt/oracle/coohoo/diction.ora');
end;
select * from v$logmnr_contents
####查找/tmp目录及其所有子目录中内容包含有test字符串的文件名####
#!/usr/bin/ksh
for FILE in `find /tmp -name "*"`
do
if
test "`cat $FILE|grep -c test`" -gt "0"
then
echo $FILE
fi
done
##online log的各项操作
alter database drop logfile member '/home5/yzstb/yz_redolog5b'
alter database add logfile member '/oracle/yz/u01/oradata/yz_redolog4b' to group 4
alter database add logfile group 6 '/oracle/yz/u01/oradata/yz_redolog6a' size 100m
alter database drop logfile group 1
#缩小数据文件
ALTER DATABASE DATAFILE '/opt/oracle/oradata/shtd/shtd01.dbf' RESIZE 1000M;
#创建JOB
variable jobno number;
begin
dbms_job.submit(job=>:jobno,
what=> 'p_atm_init;',
next_date=>to_date('08-06-2011 10:59:00','dd-mm-yyyy hh24:mi:ss'),
interval=>'trunc(sysdate)+1+1/24',
instance=>2);
commit;
end;
/
print jobno
#假如what后面内容里有引号要注意使用两个单引号,如:
variable jobno number;
begin
dbms_job.submit(job=>:jobno,
what=> 'PKG_ATM_CHECK.p_check_qukuan(to_char(sysdate-7,''yyyymmdd''),to_char(sysdate-1,''yyyymmdd''));',
next_date=>to_date('08-06-2011 20:00:00','dd-mm-yyyy hh24:mi:ss'),
interval=>'trunc(sysdate)+1+8/24',
instance=>2);
commit;
end;
/
print jobno
#submit函数参数:
job,作业的作业号,是个输出参数,所以调用submit函数时,这个参数要指定一个已存在的变量。
what,作业要执行的代码,一般是存储过程。
next_date,第一次运行job的时间,不指定则为当前时间
interval,作业执行的间隔时间,这个参数默认值是null,也就是what指定的代码只在next_date这一时间执行一次。
no_parse,默认值是false,当参数值是false,提交作业时就对what指定的代码进行语法分析;当参数值是true,第一次运行what指定代码时才进行语法分析。
instance,指明运行作业的数据库实例。
force,如果是true,instance可以是任何正数;如果是false,instance指定的实例必须正在运行。
#停止一个JOB
exec dbms_job.broken(1,true);
commit;
#启动作业
exec dbms_job.broken(1,false);
commit;
#删除job
exec dbms_job.remove(:job);
commit;
#立即执行job
exec dbms_job.run(:jobno);
commit;
#修改作业
execute dbms_job.change(186,null,null,'sysdate+3');
execute dbms_job.change(186,'scott.test(update)');
##或者这样写,指定在每天的某一固定时间(9点半)执行:
begin
sys.dbms_job.change(job => 25,
what => 'P_UpdateXbdGazInfo;',
next_date => to_date('2009-06-23 09:30:00',
'yyyy-mm-dd hh24:mi:ss'),
interval => 'trunc(sysdate+1, ''hh24'') + 1/48');
commit;
end;
/
#RAC中指定编号184的JOB在节点2上运行
exec dbms_job.instance(184,2);
commit;
####数据字典
DBA_JOBS
===========================================
字段(列) 类型 描述
JOB NUMBER 任务的唯一标示号
LOG_USER VARCHAR2(30) 提交任务的用户
PRIV_USER VARCHAR2(30) 赋予任务权限的用户
SCHEMA_USER VARCHAR2(30) 对任务作语法分析的用户模式
LAST_DATE DATE 最后一次成功运行任务的时间
LAST_SEC VARCHAR2(8) 如HH24:MM:SS格式的last_date日期的小时,分钟和秒
THIS_DATE DATE 正在运行任务的开始时间,如果没有运行任务则为null
THIS_SEC VARCHAR2(8) 如HH24:MM:SS格式的this_date日期的小时,分钟和秒
NEXT_DATE DATE 下一次定时运行任务的时间
NEXT_SEC VARCHAR2(8) 如HH24:MM:SS格式的next_date日期的小时,分钟和秒
TOTAL_TIME NUMBER 该任务运行所需要的总时间,单位为秒
BROKEN VARCHAR2(1) 标志参数,Y标示任务中断,以后不会运行
INTERVAL VARCHAR2(200) 用于计算下一运行时间的表达式
FAILURES NUMBER 任务运行连续没有成功的次数
WHAT VARCHAR2(2000) 执行任务的PL/SQL块
CURRENT_SESSION_LABEL RAW MLSLABEL 该任务的信任Oracle会话符
CLEARANCE_HI RAW MLSLABEL 该任务可信任的Oracle最大间隙
CLEARANCE_LO RAW MLSLABEL 该任务可信任的Oracle最小间隙
NLS_ENV VARCHAR2(2000) 任务运行的NLS会话设置
MISC_ENV RAW(32) 任务运行的其他一些会话参数
描述 INTERVAL参数值
每天午夜12点 'TRUNC(SYSDATE + 1)'
每天早上8点30分 'TRUNC(SYSDATE + 1) + (8*60+30)/(24*60)'
每星期二中午12点 'NEXT_DAY(TRUNC(SYSDATE ), ''TUESDAY'' ) + 12/24'
每个月第一天的午夜12点 'TRUNC(LAST_DAY(SYSDATE ) + 1)'
每个季度最后一天的晚上11点 'TRUNC(ADD_MONTHS(SYSDATE + 2/24, 3 ), 'Q' ) -1/24'
每星期六和日早上6点10分 'TRUNC(LEAST(NEXT_DAY(SYSDATE, ''SATURDAY"), NEXT_DAY(SYSDATE, "SUNDAY"))) + (6×60+10)/(24×60)'
############Linux&Unix下测试Disk读写速度:
#Write Performance: 产生一个4G file 并用 time 算出时间
/usr/bin/time dd if=/dev/zero of=./testfile bs=16384 count=250000
##产生2G的file
/usr/bin/time dd if=/dev/zero of=./testfile bs=16384 count=128000
#Read Performance: 用刚刚产生的4G file 读档 算出时间
/usr/bin/time dd if=./testfile of=/dev/null bs=16384
####重建控制文件的脚本:
####注意:'yz',character,logfile和datafile应与实际db相符
create controlfile reuse database 'yz' resetlogs noarchivelog
maxlogfiles 64
maxlogmembers 2
maxdatafiles 254
maxinstances 32
maxloghistory 113
logfile
group 1 '/oracle/yz/u01/oradata/yz_redolog1a' size 20972032,
group 2 '/oracle/yz/u01/oradata/yz_redolog2a' size 20972032,
group 3 '/oracle/yz/u01/oradata/yz_redolog3a' size 20972032,
group 4 '/oracle/yz/u01/oradata/yz_redolog4a' size 20972032,
group 5 '/oracle/yz/u01/oradata/yz_redolog5a' size 20972032
datafile
'/oracle/yz/u01/oradata/yz_system',
'/oracle/yz/u01/oradata/yz_tool',
'/oracle/yz/u01/oradata/yz_rbs1',
'/oracle/yz/u01/oradata/yz_temp1',
'/oracle/yz/u01/oradata/yz_basic',
'/oracle/yz/u01/oradata/yz_basic_inx',
'/oracle/yz/u01/oradata/yz_flow1',
'/oracle/yz/u01/oradata/yz_flow1_inx',
'/oracle/yz/u01/oradata/yz_flow2',
'/oracle/yz/u01/oradata/yz_flow2_inx',
'/oracle/yz/u01/oradata/yz_rbs2',
'/oracle/yz/u01/oradata/yz_rbs3',
'/oracle/yz/u01/oradata/yz_flow2_inx01',
'/oracle/yz/u01/oradata/yz_flow2_02',
'/oracle/yz/u01/oradata/yz_flow2_03',
'/oracle/yz/u01/oradata/yz_flow2_inx02'
CHARACTER SET ZHS16CGB231280; --------->这一句有时候不需要
###to_char妙用
#今天是今年的第几天?
SELECT TO_CHAR(SYSDATE,'DDD') From Dual;
#今天是今年的第几周?
SELECT TO_CHAR(SYSDATE,'IW') From Dual;
#今天星期几?
select to_char(sysdate,'day') from dual;
#本月的天数?
SELECT to_char(last_day(SYSDATE),'dd') days FROM dual
#今年的天数?
select add_months(trunc(sysdate,'year'), 12) - trunc(sysdate,'year') from dual
#下个星期一的日期?
SELECT Next_day(SYSDATE,'monday') FROM dual
###在获取之前可以设置日期语言,如
ALTER SESSION SET NLS_DATE_LANGUAGE='AMERICAN';
##快速杀掉数据库进程
SQL> select sid,serial#,username from v$session where sid=154;
SID SERIAL# USERNAME
---------- ---------- ------------------------------
154 56090 SCOTT
再次在数据库中kill该session,并指定immediate选项:
SQL> alter system kill session ’154,56090’ immediate;
System altered.
##删除重复数据的一种高效的方法
##表demo是重复拷贝自dba_objects,有88万左右,不重复的是27323,没有索引
#方法一:delete from demo a where a.rowid <> (select max(rowid) from demo b where
b.object_id=a.object_id);
耗时:几个小时以上
#方法二: delete from demo where rowid in
(select rid from
(select rowid rid,row_number() over(partition by object_id,object_name order by rowid) rn
from demo)
where rn <> 1 );
耗时:30秒 (这里的object_id,object_name表示有2个字段重复)
#方法三: create table demo2 as
select object_id,owner... from
(select demo.*,row_number() over(partition by object_id order by rowid) rn from demo)
where rn = 1;
truncate table demo; insert into demo select * from demo2; drop table demo2;
#共耗时: 10秒,适合大数据量的情况,产生更少回滚量;
#######调整SCN有两种常用方法:
1.通过immediate trace name方式(在数据库Open状态下)
alter session set events 'IMMEDIATE trace name ADJUST_SCN level x';
2.通过10015事件(在数据库无法打开,mount状态下)
alter session set events '10015 trace name adjust_scn level x';
注:level 1为增进SCN 10亿 (1 billion) (1024*1024*1024),通常Level 1已经足够。也可以根据实际情况适当调整。
#######
##对standby DB的操作###
recover standby database;
alter database mount standby database;
alter database recover managed standby database disconnect;
alter database recover managed standby database cancel;
alter database active physical standby database skip standby logfile;
####对RAC DB的操作
#9i RAC已经建好了,现在archive log的路径都是/arch1/racdb,另外还有一个共享的ocfs文件格式的/arch2,我想让2#个节点的archive log分别使用路径/arch1/racdb,/arch2/racdb。当我在instance1上使用alter system set #log_archive_dest_1="LOCATION=/arch1/racdb";时,发现两个instance的参数都改变了。
#怎样让两个instance的归档路径不同?(两个节点都已mount上了/arch1,/arch2)
alter system set log_archive_dest_1="LOCATION=/arch1/racdb" sid='rac1';
##############online redo log损坏#####
alter database clear logfile group n;
alter database clear unarchived logfile group n;
#########同时对用户mes1,mes4下的所有表、索引进行分析
begin
dbms_stats.gather_schema_stats(ownname=> 'MES1' , cascade=> TRUE);
dbms_stats.gather_schema_stats(ownname=> 'MES4' , cascade=> TRUE);
end;
/
#########对指定的表进行分析,包括索引,取样20%,并行度为4
exec dbms_stats.gather_table_stats(ownname=>'ABUSER',tabname=>'PUB_JNL_SAV',estimate_percent=>20,degree=>4,cascade=>true);
exec dbms_stats.gather_table_stats(ownname=>'ABUSER',tabname=>'TCS_INDPTY_AGMT_RELA_H',estimate_percent=>20,degree=>4,cascade=>true);
#########从rman的备份集中恢复出archivelog
run{
allocate channel t1 type 'SBT_TAPE';
restore archivelog from logseq=xxx until logseq=xxx thread 1;
release channel t1;
}
###############当undo表空间不能offline时,使用如下SQL找出在使用undo表空间的session
SELECT d.sid, d.serial#
FROM v$rollstat a, v$rollname b, v$transaction c, v$session d
WHERE a.USN = b.usn
AND a.STATUS <> 'ONLINE'
AND c.XIDUSN = a.USN
AND c.ADDR = d.TADDR;
#############测量Buffer cache的命中率:
select 1 - ((physical.value - direct.value - lobs.value) / logical.value) "Buffer Cache Hit Ratio"
from V$SYSSTAT physical,
V$SYSSTAT direct,
V$SYSSTAT lobs,
V$SYSSTAT logical
where physical.name = 'physical reads'
and direct.name = 'physical reads direct'
and lobs.name = 'physical reads direct (lob)'
And logical.name = 'session logical reads';
############查看linux系统下磁盘的block大小,默认一般是4096
dumpe2fs -h /dev/sda5 |grep -F "Block size"
################standby db不限次数切换步骤
check 2 db:
select max(sequence#) from v$log_history;
PRIMARY:
select switchover_status from v$database;
if display:SESSIONS ACTIVE
alter database commit to switchover to physical standby with session shutdown;
if display:TO STANDBY
alter database commit to switchover to physical standby;
shutdown immediate;
startup nomount;
alter database mount standby database;
select database_role from v$database;
display: PHYSICAL STANDBY
alter database recover managed standby database disconnect from session;
STANDBY:
select switchover_status from v$database;
display: SESSIONS ACTIVE
alter database commit to switchover to primary with session shutdown;
shutdown immediate;
startup
select database_role from v$database;
if display: PRIMARY then successful!!
Now change ip and listener.ora and tnsnames.ora and so on...
################standby db不限次数切换步骤
################表分析
begin
dbms_stats.gather_schema_stats(ownname => 'AIRPORT',
options => 'GATHER',
estimate_percent => 20,
method_opt => 'for table for all indexes',
degree => 4,
cascade => true);
end;
/
begin
DBMS_STATS.GATHER_TABLE_STATS(ownname=>'COOH',
tabname=>'TEST_HISTO',
estimate_percent=>30,
method_opt=>'for columns object_id size 18',
cascade=>true,
degree=>4);
end;
/
begin
DBMS_STATS.GATHER_TABLE_STATS(ownname => 'AIRPORT',
tabname => 'tmailflow',
estimate_percent => 20,
method_opt => 'for all indexes',
cascade => true,
degree => 4);
end;
/
begin
dbms_stats.delete_table_stats(ownname => 'AIRPORT', tabname => 'tmailflow');
end;
/
begin
dbms_stats.delete_index_stats(ownname => 'AIRPORT',
indname => 'I_TMAILFLOW_SERIAL_ID');
end;
/
begin
DBMS_STATS.GATHER_INDEX_STATS(ownname => 'AIRPORT',
indname => 'PK_TMAILFLOW',
estimate_percent => 30,
degree => 4);
end;
/
begin
DBMS_STATS.SET_INDEX_STATS(ownname => 'AIRPORT',
indname => 'PK_TMAILFLOW',
clstfct => 164597);
end;
/
begin
dbms_stats.delete_column_stats(ownname => 'AIRPORT',
TABNAME => 'tmailflow',
COLNAME => 'mailid');
end;
/
#############################备注###########################################
为帮助进行明智的柱状图生成,Oracle 使用 dbms_stats 的 method_opt 参数;
在 method_opt 子句中还有重要的新选项,即 skewonly、repeat 和 auto;
DBMS_STATS默认不对索引进行分析,因为默认cascade是False,需要手工指定为True;
不能收集CHAINED ROWS,不能收集CLUSTER TABLE的信息,这两个仍旧需要使用Analyze语句;
使用estimate_percent => dbms_stats.auto_sample_size,可能会出现ORA-01555,
换成estimate_percent => 5能避免这一错误。
#############################备注############################################
#db link的建立方法:
-- Create database link use SID
create database link TWOLAN
connect to JK_BFJ identified by POST
using '(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=10.194.128.75)(PORT=1521)))(CONNECT_DATA=(SID=ora7)(SERVER=DEDICATED)))';
-- Create database link use SERVICE_NAME
create database link TIAOMA
connect to TMHUSER identified by TMHTDXT
using '(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=TCP)(HOST=10.194.128.28)(PORT=1521)))(CONNECT_DATA=(SERVICE_NAME=orc1)))';
#parallel并行操作语句
create table c_district_backup parallel (degree 4) as select * from c_district;
insert /*+ parallel(xxxx,4) */ into xxxx select /*+ parallel(a) */ * from xxx a; --此语句中2个hint,但只有into前面的hint生效
update /*+ PARALLEL (costs,4) */ costs set ......;
insert /*+ APPEND PARALLEL (COST_SUMMARY,4) */
into COST_SUMMARY
(Prod_Id, Time_Id, Cost)
select /*+ PARALLEL (COSTS, 6) */
Prod_Id, Time_Id, SUM(Unit_Cost) Cost
from COSTS
group by Prod_Id, Time_Id;
#对于查询结果进行分页显示,加入指定显示从第2行到第3行:
SELECT *
FROM (SELECT A.*, ROWNUM RN FROM (SELECT * FROM test) A WHERE ROWNUM <= 3)
WHERE RN >= 2;
#如果获得当前的SCN
--oracle 10g:
select current_scn from v$database;
--oracle 9i
select dbms_flashback.get_system_change_number from dual;
--oracle 9i之前
select max(ktuxescnw*power(2,32)+ktuxescnb) from x$ktuxe;