Thursday, January 31, 2013

Golden Gate pre-replication check

-- Do a complete pre-check of a schema for Golden Gate replication. -- This script spools the output to a file named schemaCheckOracle.out -- Oracle Support document ID 1296168.1 -- Example of running the script: -- sqlplus / @schemaCheckOracle.sql


set null "NULL VALUE" set feedback off set heading off set linesize 132 set pagesize 9999 set echo off set verify off col table_name for a30 col column_name for a30 col data_type for a15 col object_type for a20 col constraint_type_desc for a30 --spool schemaCheckOracle.&&schema_name.out ACCEPT schema_name char prompt 'Enter the Schema Name > ' variable b0 varchar2(50) exec :b0 := upper('&schema_name'); spool schemaCheckOracle.&&schema_name..out SET Heading off SELECT '------ System Info for: '||:b0 FROM dual; set heading on select to_char(sysdate, 'MM-DD-YYYY HH24:MI:SS') "DateTime: " from dual / select banner from v$version / SET Heading off SELECT '------ Database Level Supplemental Log Check - 9i and 10g: ' FROM dual; SET Heading on SELECT SUPPLEMENTAL_LOG_DATA_MIN MIN, SUPPLEMENTAL_LOG_DATA_PK PK, SUPPLEMENTAL_LOG_DATA_UI UI FROM V$DATABASE / select name, log_mode "LogMode", supplemental_log_data_min "SupLog: Min", supplemental_log_data_pk "PK", supplemental_log_data_ui "UI", force_logging "Forced", supplemental_log_data_fk "FK", supplemental_log_data_all "All", to_char(created, 'MM-DD-YYYY HH24:MI:SS') "Created" from v$database / select platform_name from v$database / set heading off SELECT '------ Objects stored in Tablespaces with Compression are not supported in the current release of OGG ' FROM dual; SET Heading on select TABLESPACE_NAME, DEF_TAB_COMPRESSION from DBA_TABLESPACES where DEF_TAB_COMPRESSION <> 'DISABLED'; set heading off SELECT '------ Distinct Object Types and their Count in the Schema: '||:b0 FROM dual; SET Heading on SELECT object_type, count(*) total FROM all_objects WHERE OWNER = :b0 GROUP BY object_type / SET Heading off SELECT '------ Distinct Column Data Types and their Count in the Schema: '||:b0 FROM dual; SET Heading on SELECT data_type, count(*) total FROM all_tab_columns WHERE OWNER = :b0 GROUP BY data_type / SET Heading off SELECT '------ Tables With No Primary Key or Unique Index in the Schema: '||:b0 FROM dual; SET Heading on SELECT distinct(table_name) FROM all_tables WHERE owner = :b0 MINUS (SELECT obj1.name FROM SYS.user$ user1, SYS.user$ user2, SYS.cdef$ cdef, SYS.con$ con1, SYS.con$ con2, SYS.obj$ obj1, SYS.obj$ obj2 WHERE user1.name = :b0 AND cdef.type# = 2 AND con2.owner# = user2.user#(+) AND cdef.robj# = obj2.obj#(+) AND cdef.rcon# = con2.con#(+) AND obj1.owner# = user1.user# AND cdef.con# = con1.con# AND cdef.obj# = obj1.obj# UNION SELECT idx.table_name FROM all_indexes idx WHERE idx.owner = :b0 AND idx.uniqueness = 'UNIQUE') / SET Heading off SELECT '------ Tables with no PK or UI and rowsize > 1M in the Schema: '||:b0 FROM dual; SET Heading on SELECT distinct(table_name) FROM all_tab_columns WHERE owner = :b0 group by table_name HAVING sum(data_length) > 1000000 MINUS (SELECT obj1.name FROM SYS.user$ user1, SYS.user$ user2, SYS.cdef$ cdef, SYS.con$ con1, SYS.con$ con2, SYS.obj$ obj1, SYS.obj$ obj2 WHERE user1.name = :b0 AND cdef.type# = 2 AND con2.owner# = user2.user#(+) AND cdef.robj# = obj2.obj#(+) AND cdef.rcon# = con2.con#(+) AND obj1.owner# = user1.user# AND cdef.con# = con1.con# AND cdef.obj# = obj1.obj# UNION SELECT idx.table_name FROM all_indexes idx WHERE idx.owner = :b0 AND idx.uniqueness = 'UNIQUE') / set heading off SELECT '------ Tables with NOLOGGING setting ' FROM dual; SELECT '------ This may cause problems with missing data down stream. Schema: ' ||:b0 FROM dual; set heading on select owner, table_name, ' ' "PARTITION_NAME", ' ' "PARTITIONING_TYPE", logging from DBA_TABLES where logging <> 'YES' and OWNER = :b0 union select owner, table_name, ' ', partitioning_type, DEF_LOGGING "LOGGING" from DBA_part_tables where DEF_LOGGING != 'YES' and owner = :b0 union select table_owner, table_name, PARTITION_NAME, ' ', logging from DBA_TAB_PARTITIONS where logging <> 'YES' and table_owner = :b0 union select table_owner, table_name, PARTITION_NAME, ' ', logging from DBA_TAB_SUBPARTITIONS where logging <> 'YES' and table_owner <> :b0 ; set heading off SELECT '------ Tables with Deferred constraints.Deferred constraints may cause TRANDATA to chose an incorrect Key ' FROM dual; SELECT '------ Tables with Deferred constraints should be added using KEYCOLS in the trandata statement. Schema: ' ||:b0 FROM dual; set heading on SELECT c.TABLE_NAME, c.CONSTRAINT_NAME, c.CONSTRAINT_TYPE, c.DEFERRABLE, c.DEFERRED, c.VALIDATED, c.STATUS, i.INDEX_TYPE, c.INDEX_NAME, c.INDEX_OWNER FROM dba_constraints c, dba_indexes i WHERE i.TABLE_NAME = c.TABLE_NAME AND i.OWNER = c.OWNER AND c.owner = :b0 AND c.DEFERRED = 'DEFERRED'; set heading off SELECT '------ Tables Defined with Rowsize > 2M in the Schema: '||:b0 FROM dual; SET Heading on SELECT table_name, sum(data_length) row_length_over_2M FROM all_tab_columns WHERE OWNER = :b0 GROUP BY table_name HAVING sum(data_length) > 2000000 / SET Heading off SELECT '------ Tables that will Fail Add Trandata (Only an issue for Oracle versions below Oracle 10G) in the Schema: '||:b0 FROM dual; SET Heading on SELECT distinct(table_name) FROM dba_tab_columns WHERE owner = :b0 AND column_id > 32 AND table_name in (SELECT distinct(table_name) FROM all_tables WHERE owner = :b0 MINUS (SELECT obj1.name FROM SYS.user$ user1, SYS.user$ user2, SYS.cdef$ cdef, SYS.con$ con1, SYS.con$ con2, SYS.obj$ obj1, SYS.obj$ obj2 WHERE user1.name = :b0 AND cdef.type# = 2 AND con2.owner# = user2.user#(+) AND cdef.robj# = obj2.obj#(+) AND cdef.rcon# = con2.con#(+) AND obj1.owner# = user1.user# AND cdef.con# = con1.con# AND cdef.obj# = obj1.obj# UNION SELECT idx.table_name FROM all_indexes idx WHERE idx.owner = :b0 AND idx.uniqueness = 'UNIQUE')) / SET Heading off SELECT '------ Tables With CLOB, BLOB, LONG, NCLOB or LONG RAW Columns in the Schema: '||:b0 FROM dual; SET Heading on SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE FROM all_tab_columns WHERE OWNER = :b0 AND data_type in ('CLOB', 'BLOB', 'LONG', 'LONG RAW', 'NCLOB') / SET Heading off SELECT '------ Tables With Columns of UNSUPPORTED Datatypes in the Schema: '||:b0 FROM dual; SET Heading on SELECT TABLE_NAME, COLUMN_NAME, DATA_TYPE FROM all_tab_columns WHERE OWNER = :b0 AND (data_type in ('ORDDICOM', 'BFILE', 'TIMEZONE_REGION', 'BINARY_INTEGER', 'PLS_INTEGER', 'UROWID', 'URITYPE', 'MLSLABEL', 'TIMEZONE_ABBR', 'ANYDATA', 'ANYDATASET', 'ANYTYPE') or data_type like 'INTERVAL%') / SET Heading off SELECT '----- Tables with Compressed data is not supported - in the Schema: '||:b0 FROM dual; SET Heading on SELECT TABLE_NAME, COMPRESSION FROM all_all_tables WHERE OWNER = :b0 AND (COMPRESSION = 'ENABLED'); SELECT TABLE_NAME, COMPRESSION FROM ALL_TAB_PARTITIONS WHERE TABLE_OWNER = :b0 AND (COMPRESSION = 'ENABLED'); SET Heading off SELECT '----- Cluster (DML and DDL supported for 9i or later) or Object Tables (DML supported for 10G or later, no DDL) - in the Schema: '||:b0 FROM dual; SET Heading on SELECT TABLE_NAME, CLUSTER_NAME, TABLE_TYPE FROM all_all_tables WHERE OWNER = :b0 AND (cluster_name is NOT NULL or TABLE_TYPE is NOT NULL) / SET Heading off SELECT '------ IOT (Fully support for Oracle 10GR2 (with or without overflows) using GGS 10.4 and higher) - in the Schema: '||:b0 FROM dual; SET Heading on SELECT TABLE_NAME, IOT_TYPE, TABLE_TYPE FROM all_all_tables WHERE OWNER = :b0 AND (IOT_TYPE is not null or TABLE_TYPE is NOT NULL) / SET Heading off SELECT '------ Tables with Domain or Context Indexes - in the Schema: '||:b0 FROM dual; SET Heading on SELECT TABLE_NAME, index_name, index_type FROM dba_indexes WHERE OWNER = :b0 and index_type = 'DOMAIN' / SET Heading off SELECT '------ Types of Constraints on the Tables in the Schema: '||:b0 FROM dual; SET Heading on SELECT DECODE(constraint_type,'P','PRIMARY KEY','U','UNIQUE', 'C', 'CHECK', 'R', 'REFERENTIAL') constraint_type_desc, count(*) total FROM all_constraints WHERE OWNER = :b0 GROUP BY constraint_type / SET Heading off SELECT '------ Cascading Deletes on the Tables in the Schema: '||:b0 FROM dual; SET Heading on SELECT table_name, constraint_name FROM all_constraints WHERE OWNER = :b0 and constraint_type = 'R' and delete_rule = 'CASCADE' / SET Heading off SELECT '------ Tables Defined with Triggers in the Schema: '||:b0 FROM dual; SET Heading on SELECT table_name, COUNT(*) trigger_count FROM all_triggers WHERE OWNER = :b0 GROUP BY table_name / SET Heading off SELECT '------ Performance issues - Reverse Key Indexes Defined in the Schema: '||:b0 FROM dual; col Owner format a10 col TABLE_OWNER format a10 col INDEX_TYPE format a12 SET Heading on select OWNER, INDEX_NAME, INDEX_TYPE, TABLE_OWNER, TABLE_NAME, TABLE_TYPE, UNIQUENESS, CLUSTERING_FACTOR, NUM_ROWS, LAST_ANALYZED, BUFFER_POOL from dba_indexes where index_type = 'NORMAL/REV' And OWNER = :b0 / SET Heading off SELECT '------ Sequence numbers: '||:b0 FROM dual; COLUMN SEQUENCE_OWNER FORMAT a15 COLUMN SEQUENCE_NAME FORMAT a30 COLUMN INCR FORMAT 999 COLUMN CYCLE FORMAT A5 COLUMN ORDER FORMAT A5 SET Heading on SELECT SEQUENCE_OWNER, SEQUENCE_NAME, MIN_VALUE, MAX_VALUE, INCREMENT_BY INCR, CYCLE_FLAG CYCLE, ORDER_FLAG "ORDER", CACHE_SIZE, LAST_NUMBER FROM DBA_SEQUENCES WHERE SEQUENCE_OWNER=UPPER(:b0) / set linesize 132 col "Avg Log Size" format 999,999,999 select sum (BLOCKS) * max(BLOCK_SIZE)/ count(*)"Avg Log Size" From gV$ARCHIVED_LOG; Prompt Table: Frequency of Log Switches by hour and day SELECT SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),1,5) DAY, TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'00',1,0)),'99') "00", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'01',1,0)),'99') "01", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'02',1,0)),'99') "02", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'03',1,0)),'99') "03", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'04',1,0)),'99') "04", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'05',1,0)),'99') "05", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'06',1,0)),'99') "06", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'07',1,0)),'99') "07", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'08',1,0)),'99') "08", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'09',1,0)),'99') "09", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'10',1,0)),'99') "10", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'11',1,0)),'99') "11", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'12',1,0)),'99') "12", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'13',1,0)),'99') "13", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'14',1,0)),'99') "14", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'15',1,0)),'99') "15", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'16',1,0)),'99') "16", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'17',1,0)),'99') "17", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'18',1,0)),'99') "18", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'19',1,0)),'99') "19", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'20',1,0)),'99') "20", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'21',1,0)),'99') "21", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'22',1,0)),'99') "22", TO_CHAR(SUM(DECODE(SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),10,2),'23',1,0)),'99') "23" FROM V$LOG_HISTORY GROUP BY SUBSTR(TO_CHAR(FIRST_TIME, 'MM-DD-YY HH24:MI:SS'),1,5) order by 1; SET Heading off SELECT '------ Summary of log volume processed by day for last 7 days: ' FROM dual; SET Heading on select to_char(first_time, 'mm/dd') ArchiveDate, sum(BLOCKS*BLOCK_SIZE/1024/1024) LOGMB from v$archived_log where first_time > sysdate - 7 group by to_char(first_time, 'mm/dd') order by to_char(first_time, 'mm/dd'); / SET Heading off SELECT '------ Summary of log volume processed per hour for last 7 days: ' FROM dual; SET Heading on select to_char(first_time, 'MM-DD-YYYY') ArchiveDate, to_char(first_time, 'HH24') ArchiveHour, sum(BLOCKS*BLOCK_SIZE/1024/1024) LogMB from v$archived_log where first_time > sysdate - 7 group by to_char(first_time, 'MM-DD-YYYY'), to_char(first_time, 'HH24') order by to_char(first_time, 'MM-DD-YYYY'), to_char(first_time, 'HH24'); / set heading off select '* This output may be found in file: schemaCheckOracle.&&schema_name..out' from dual / spool off undefine b0 -- exit

-- Get Row count for all the tables in a schema or with any where condition.

select owner, table_name, TO_NUMBER(extractvalue(XMLTYPE(dbms_xmlgen.getxml('select count(*) c from ' || owner || '.' || table_name)),'/ROWSET/ROW/C')) ROW_COUNT from dba_tables where owner ='DV2SCH2' ORDER BY 1,2;

Saturday, September 10, 2011

ADDING / RESIZING LOGS in DATAGUARD Environment

THINGS TO NOTE BEFORE DOING:
I did this on 10.2.0.4
Primary 2 node RAC
Standby 2 node RAC

The group number must be between 1 and the value of the MAXLOGFILES clause. Do not skip log file group numbers (that is, do not number groups 10, 20, 30, and so on), or you will use additional space in the standby database control file.

Although the STANDBY LOGs are only used when the database is running in the standby role, Oracle recommends that you create a standby redo log on the primary database so that the primary database can switch over quickly to the standby role without the need for additional DBA intervention.

The size of the current standby redo log files must exactly match the size of the current primary database online redo log files.

You will need one more STANDBY LOGFILE group than the number of LOGILE groups on the primary database ( for each thread).

STEP 1: Check existing LOG files and STANDBY LOG files
do this on both PRIMARY and STANDBY dbs
SELECT GROUP#,THREAD#,SEQUENCE#,BYTES/1024/1024 "SIZE MB", ARCHIVED,STATUS FROM V$LOG order by group#;
SELECT GROUP#,THREAD#,SEQUENCE#,BYTES/1024/1024 "SIZE MB", ARCHIVED,STATUS FROM V$STANDBY_LOG order by group#;

STEP 2: Cancel Recovery
do this on standby db.
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;

STEP 3: Set STANDBY_FILE_MANAGEMENT to manual
do this on both PRIMARY and STANDBY dbs
ALTER SYSTEM SET STANDBY_FILE_MANAGEMENT='MANUAL' scope=both sid='*';

STEP 4: DELETE and ADD LOG and STANDBY LOG files
This is usual process
do this on both PRIMARY and STANDBY dbs
alter system switch logfile; -- this on primary to make the GROUP ready for deletion
ALTER DATABASE CLEAR LOGFILE GROUP 6; -- this on standby to make the GROUP ready for deletion
ALTER DATABASE DROP LOGFILE GROUP 6;
ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 1 ('+DISKG1','+DISKG2') size 150M;
ALTER DATABASE DROP STANDBY LOGFILE GROUP 07;
ALTER DATABASE ADD STANDBY LOGFILE THREAD 1 GROUP 10 ('+DISKG1','+DISKG2') size 150M;

STEP 5: WINDUP and VERIFY
SELECT GROUP#,THREAD#,SEQUENCE#,BYTES/1024/1024 "SIZE MB", ARCHIVED,STATUS FROM V$LOG order by group#; -- on both PRIMARY and STANDBY dbs
SELECT GROUP#,THREAD#,SEQUENCE#,BYTES/1024/1024 "SIZE MB", ARCHIVED,STATUS FROM V$STANDBY_LOG order by group#; -- on both PRIMARY and STANDBY dbs
ALTER SYSTEM SET STANDBY_FILE_MANAGEMENT='AUTO' scope=both sid='*'; -- on both PRIMARY and STANDBY dbs
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION; -- on STANDBY
alter system switch logfile; -- on PRIMARY and verify standby is recovering the new logs

STEP 6: Make sure Standby is not too far behind PRIMARY
do this on PRIMARY. This forces a logswitch every 1800 secs.
We needed this because during off-peak hours log switch is happing once in 10+ hours.
alter system set ARCHIVE_LAG_TARGET=1800 scope=both sid='*';

Sunday, March 27, 2011

Script to Generate AWR reports for all RAC nodes

-- This generates AWR reports from all the RAC nodes for the input start and end timings.
-- Works for 10gR2
--
-- How to execute
-- Save this script in AWR_gen_script.sql
-- sql> @AWR_gen_script.sql 20100915_1400 20100915_1500 html
-- Param 1 = start time YYYYMMDD_HH24MI format
-- Param 2 = end time YYYYMMDD_HH24MI format
-- Param 3 = html or text
--
-- All the AWR reports ( one from each node ) will be generated in output_dir variable. Defaults to c:\
-- This can be run from client or server. Just change the output_dir depending on OS. Window c:\ or Unix $HOME/
-- This create a temporary sql file AWR_run_script.sql in the current directory


SET FEED OFF
SET VERIFY OFF
SET HEADING OFF
SET TERMOUT OFF
SET ECHO OFF
SET LINESIZE 32767
SET PAGES 0
SET WRAP OFF
SET SCAN ON
SET TRIM ON
SET TRIMS ON
SET TAB OFF
SET SERVEROUTPUT ON
SET PAUSE OFF
SET TIMING OFF

spool AWR_run_script.sql

DECLARE

output_dir VARCHAR2(40) := 'c:\';
-- output must end with a slash

awr_format VARCHAR2(4);
begin_time VARCHAR2(40) ;
end_time VARCHAR2(40) ;
snap_begin_time VARCHAR2(40);
snap_end_time VARCHAR2(40);
snap_begin_snap number;
snap_end_snap number;
v_dbid number;
v_dbname VARCHAR(20);
v_extention varchar(10);

BEGIN

begin_time := '&1';
end_time := '&2';
awr_format := '&3';

select dbid, name into v_dbid, v_dbname from v$database;

if awr_format = 'text' then
v_extention := 'txt';
else
v_extention := awr_format;
end if;

for x in ( select instance_number, instance_name from gv$instance order by 1 ) loop

SELECT to_char(max(end_interval_time),'YYYYMMDD_HH24MI'), max(snap_id) INTO snap_begin_time, snap_begin_snap
FROM DBA_HIST_SNAPSHOT
where instance_number= x.instance_number
and to_char(trunc(end_interval_time, 'MI'),'YYYYMMDD_HH24MI') <= begin_time;

SELECT to_char(min(end_interval_time),'YYYYMMDD_HH24MI'), min(snap_id) INTO snap_end_time, snap_end_snap
FROM DBA_HIST_SNAPSHOT
where instance_number= x.instance_number
and to_char(trunc(end_interval_time, 'MI'),'YYYYMMDD_HH24MI') >= end_time;

DBMS_OUTPUT.PUT_LINE(chr(10));
DBMS_OUTPUT.PUT_LINE('spool ' || output_dir || 'AWR_' || x.instance_name ||'_'|| snap_begin_time ||'_to_' || snap_end_time ||'.' || v_extention );
DBMS_OUTPUT.PUT_LINE('SELECT output FROM TABLE (dbms_workload_repository.awr_report_' || awr_format || '(' || v_dbid||','||x.instance_number||','|| snap_begin_snap ||','|| snap_end_snap||'));');
DBMS_OUTPUT.PUT_LINE('spool off');

end loop;
DBMS_OUTPUT.PUT_LINE('exit ');

end;
/

spool off;
@AWR_run_script.sql;
exit

Script to Generate ADDM reports for all RAC nodes

-- This generates ADDM reports from all the nodes for the input start and end timings
-- Works for 10gR2
-- How to execute.
-- Save this script in ADDM_gen_script.sql
-- sql> @ADDM_gen_script.sql 20100915_1400 20100915_1500
-- Param 1 = start time YYYYMMDD_HH24MI format
-- Param 2 = end time YYYYMMDD_HH24MI format
--
-- All the ADDM reports ( one from each node ) will be created in output_dir variable. Defaults to c:\
-- This can be run from client or server. Just change the output_dir depending on OS. Window c:\ or Unix $HOME/
-- This create a temporary sql file ADDM_run_script.sql in the current directory

SET FEED OFF
SET VERIFY OFF
SET HEADING OFF
SET TERMOUT OFF
SET ECHO OFF
SET LINESIZE 32767
SET PAGES 0
SET WRAP OFF
SET SCAN ON
SET TRIM ON
SET TRIMS ON
SET TAB OFF
SET SERVEROUTPUT ON
SET PAUSE OFF
SET TIMING OFF

spool ADDM_run_script.sql
PROMPT SET FEED OFF
PROMPT SET VERIFY OFF
PROMPT SET HEADING OFF
PROMPT SET TERMOUT ON
PROMPT SET ECHO OFF
PROMPT SET LINESIZE 32767
PROMPT SET PAGES 0
PROMPT SET WRAP ON
PROMPT SET SCAN ON
PROMPT SET TRIM ON
PROMPT SET TRIMS ON
PROMPT SET TAB ON
PROMPT SET SERVEROUTPUT ON
PROMPT SET PAUSE ON
PROMPT SET TIMING OFF

DECLARE

-- output must end with a slash. CHANGE THIS IF NEEDED.
output_dir VARCHAR2(40) := 'c:\' ;

begin_time VARCHAR2(40);
end_time VARCHAR2(40);
snap_begin_time VARCHAR2(40);
snap_end_time VARCHAR2(40);
snap_begin_snap number;
snap_end_snap number;
v_dbid number;
v_dbname varchar(20);
v_instance number;

tid number; -- Task ID
tname varchar2(100); -- Task Name
tdesc varchar2(500); -- Task Description

BEGIN

begin_time := '&1';
end_time := '&2';
-- DBMS_OUTPUT.PUT_LINE('-- Begin Time = ' || begin_time);
-- DBMS_OUTPUT.PUT_LINE('-- End Time = ' || end_time);
select dbid,name into v_dbid, v_dbname from v$database;

for x in ( select instance_number, instance_name from gv$instance order by 1 ) loop

-- Get the snap id for the input begin_time
SELECT to_char(max(end_interval_time),'YYYYMMDD_HH24MI'), max(snap_id) INTO snap_begin_time, snap_begin_snap
FROM DBA_HIST_SNAPSHOT
where instance_number= x.instance_number
and to_char(trunc(end_interval_time, 'MI'),'YYYYMMDD_HH24MI') <= begin_time;

-- Get the snap id for the input end_time
SELECT to_char(min(end_interval_time),'YYYYMMDD_HH24MI'), min(snap_id) INTO snap_end_time, snap_end_snap
FROM DBA_HIST_SNAPSHOT
where instance_number= x.instance_number
and to_char(trunc(end_interval_time, 'MI'),'YYYYMMDD_HH24MI') >= end_time;

tname := 'ADDM:' || x.instance_name || '_' || snap_begin_snap || '_' || snap_end_snap;
tdesc := 'ADDM manual run: snapshots [' || snap_begin_snap || ',' || snap_end_snap || '], ' || x.instance_name;

DBMS_OUTPUT.PUT_LINE('DECLARE');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tid NUMBER; -- Task ID');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tname VARCHAR2(30); -- Task Name');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tdesc VARCHAR2(256); -- Task Description');

DBMS_OUTPUT.PUT_LINE('BEGIN');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tname := ''' || tname || ''' ;');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tdesc := ''' || tdesc || ''';');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'DBMS_ADVISOR.CREATE_TASK( ''ADDM'', tid, tname, tdesc );');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'DBMS_ADVISOR.SET_TASK_PARAMETER( tname, ''START_SNAPSHOT'',' || snap_begin_snap || ' );' );
DBMS_OUTPUT.PUT_LINE(chr(9) || 'DBMS_ADVISOR.SET_TASK_PARAMETER( tname, ''END_SNAPSHOT'' ,' || snap_end_snap || ' );');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'dbms_advisor.set_task_parameter( tname, ''INSTANCE'',' || x.instance_number || ');');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'dbms_advisor.set_task_parameter( tname, ''DB_ID'',' || v_dbid || ');');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'DBMS_ADVISOR.EXECUTE_TASK( tname );');

DBMS_OUTPUT.PUT_LINE('END;');
DBMS_OUTPUT.PUT_LINE('/');

DBMS_OUTPUT.PUT_LINE('set long 1000000 pagesize 0 longchunksize 1000');
DBMS_OUTPUT.PUT_LINE('column get_clob format a80');
DBMS_OUTPUT.PUT_LINE('spool ' || output_dir || 'ADDM_' || x.instance_name ||'_'|| snap_begin_time ||'_to_' || snap_end_time ||'.txt' );
DBMS_OUTPUT.PUT_LINE('select dbms_advisor.get_task_report(''' || tname || ''', ''TEXT'', ''TYPICAL'') from sys.dual;');
DBMS_OUTPUT.PUT_LINE('spool off;');
DBMS_OUTPUT.PUT_LINE('exec DBMS_ADVISOR.DELETE_TASK('''|| tname ||''');');
DBMS_OUTPUT.PUT_LINE(chr(10));

end loop;

end;
/
spool off
@ADDM_run_script.sql
exit

Tuesday, January 18, 2011

RMAN Cloning (Refreshing) ASM/RAC database to ASM/RAC database

Assumptions:

Both dbs are on the same server
10.2.0.4
Source DB: SOURCE
Target DB: TARGET
Source Database can be in NOARCHIVE LOG MODE.
Target DB already exists (Okay if it doesn't. Just nomount the db )

============================================================================
STEP 1: COLD BACKUP SOURCE DATABASE.

srvctl stop database -d SOURCE

export ORACLE_SID=SOURCE1
export ORACLE_HOME=/u01/app/oracle/10gR2

rman msglog SOURCE_cold_backup.log << EOF
connect target /
startup mount;
CONFIGURE CONTROLFILE AUTOBACKUP ON;
run {
allocate channel d1 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
allocate channel d2 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
allocate channel d3 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
allocate channel d4 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
allocate channel d5 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
backup database tag=COLD_BACKUP;
}
EOF

srvctl start database -d SOURCE

============================================================================
STEP 2: TARGET DATABASE CLUSTER_DATABASE=FALSE

export ORACLE_SID=TARGET1
alter system set cluster_database=false scope=spfile sid='*';
srvctl stop database -d TARGET
startup nomount;

-- Delete datafiles in ASM diskgroup.
ASMCMD [+diskg/TARGET/datafile] > rm *

============================================================================
STEP 3: TARGET DATABASE CLUSTER_DATABASE=FALSE

startup nomount;

ORACLE_SID=TARGET1; export ORACLE_SID
rman msglog TARGET_clone.log << EOF
connect target sys/password@SOURCE1
connect auxiliary /
run
{
allocate channel ch1 type disk;
allocate channel ch2 type disk;
allocate auxiliary channel d1 device type disk;
allocate auxiliary channel d2 device type disk;
duplicate target database to TARGET;
}
EOF


============================================================================
STEP 4: TARGET DATABASE CLUSTER_DATABASE=TRUE

export ORACLE_SID=TARGET1
alter system set cluster_database=TRUE scope=spfile sid='*';
alter system set cluster_database_instances=2 scope=spfile sid='*';
shutdown immediate;
exit
10000 SCOPE=SPFILE sid='*';

Tuesday, June 30, 2009

PL/SQL script for finding invalid oracle directories

-- Script for Finding Invalid Directories.
-- Create a test file and check for the status

declare
f utl_file.file_type;
BEGIN

FOR X IN ( SELECT * FROM DBA_DIRECTORIES WHERE OWNER= 'SYS' ) LOOP
BEGIN
f := utl_file.fopen(x.directory_name,'madhu_test.txt','W');
IF utl_file.is_open(f) THEN
UTL_FILE.fclose(f);
UTL_FILE.fremove(x.directory_name,'madhu_test.txt');
end if;
EXCEPTION
WHEN OTHERS THEN dbms_output.put_line(x.directory_name || ' -- ' || x.directory_path || ' is invalid');
END;
END LOOP;

END;
/