Saturday, September 10, 2011

ADDING / RESIZING LOGS in DATAGUARD Environment

THINGS TO NOTE BEFORE DOING:
I did this on 10.2.0.4
Primary 2 node RAC
Standby 2 node RAC

The group number must be between 1 and the value of the MAXLOGFILES clause. Do not skip log file group numbers (that is, do not number groups 10, 20, 30, and so on), or you will use additional space in the standby database control file.

Although the STANDBY LOGs are only used when the database is running in the standby role, Oracle recommends that you create a standby redo log on the primary database so that the primary database can switch over quickly to the standby role without the need for additional DBA intervention.

The size of the current standby redo log files must exactly match the size of the current primary database online redo log files.

You will need one more STANDBY LOGFILE group than the number of LOGILE groups on the primary database ( for each thread).

STEP 1: Check existing LOG files and STANDBY LOG files
do this on both PRIMARY and STANDBY dbs
SELECT GROUP#,THREAD#,SEQUENCE#,BYTES/1024/1024 "SIZE MB", ARCHIVED,STATUS FROM V$LOG order by group#;
SELECT GROUP#,THREAD#,SEQUENCE#,BYTES/1024/1024 "SIZE MB", ARCHIVED,STATUS FROM V$STANDBY_LOG order by group#;

STEP 2: Cancel Recovery
do this on standby db.
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;

STEP 3: Set STANDBY_FILE_MANAGEMENT to manual
do this on both PRIMARY and STANDBY dbs
ALTER SYSTEM SET STANDBY_FILE_MANAGEMENT='MANUAL' scope=both sid='*';

STEP 4: DELETE and ADD LOG and STANDBY LOG files
This is usual process
do this on both PRIMARY and STANDBY dbs
alter system switch logfile; -- this on primary to make the GROUP ready for deletion
ALTER DATABASE CLEAR LOGFILE GROUP 6; -- this on standby to make the GROUP ready for deletion
ALTER DATABASE DROP LOGFILE GROUP 6;
ALTER DATABASE ADD LOGFILE THREAD 1 GROUP 1 ('+DISKG1','+DISKG2') size 150M;
ALTER DATABASE DROP STANDBY LOGFILE GROUP 07;
ALTER DATABASE ADD STANDBY LOGFILE THREAD 1 GROUP 10 ('+DISKG1','+DISKG2') size 150M;

STEP 5: WINDUP and VERIFY
SELECT GROUP#,THREAD#,SEQUENCE#,BYTES/1024/1024 "SIZE MB", ARCHIVED,STATUS FROM V$LOG order by group#; -- on both PRIMARY and STANDBY dbs
SELECT GROUP#,THREAD#,SEQUENCE#,BYTES/1024/1024 "SIZE MB", ARCHIVED,STATUS FROM V$STANDBY_LOG order by group#; -- on both PRIMARY and STANDBY dbs
ALTER SYSTEM SET STANDBY_FILE_MANAGEMENT='AUTO' scope=both sid='*'; -- on both PRIMARY and STANDBY dbs
ALTER DATABASE RECOVER MANAGED STANDBY DATABASE DISCONNECT FROM SESSION; -- on STANDBY
alter system switch logfile; -- on PRIMARY and verify standby is recovering the new logs

STEP 6: Make sure Standby is not too far behind PRIMARY
do this on PRIMARY. This forces a logswitch every 1800 secs.
We needed this because during off-peak hours log switch is happing once in 10+ hours.
alter system set ARCHIVE_LAG_TARGET=1800 scope=both sid='*';

Sunday, March 27, 2011

Script to Generate AWR reports for all RAC nodes

-- This generates AWR reports from all the RAC nodes for the input start and end timings.
-- Works for 10gR2
--
-- How to execute
-- Save this script in AWR_gen_script.sql
-- sql> @AWR_gen_script.sql 20100915_1400 20100915_1500 html
-- Param 1 = start time YYYYMMDD_HH24MI format
-- Param 2 = end time YYYYMMDD_HH24MI format
-- Param 3 = html or text
--
-- All the AWR reports ( one from each node ) will be generated in output_dir variable. Defaults to c:\
-- This can be run from client or server. Just change the output_dir depending on OS. Window c:\ or Unix $HOME/
-- This create a temporary sql file AWR_run_script.sql in the current directory


SET FEED OFF
SET VERIFY OFF
SET HEADING OFF
SET TERMOUT OFF
SET ECHO OFF
SET LINESIZE 32767
SET PAGES 0
SET WRAP OFF
SET SCAN ON
SET TRIM ON
SET TRIMS ON
SET TAB OFF
SET SERVEROUTPUT ON
SET PAUSE OFF
SET TIMING OFF

spool AWR_run_script.sql

DECLARE

output_dir VARCHAR2(40) := 'c:\';
-- output must end with a slash

awr_format VARCHAR2(4);
begin_time VARCHAR2(40) ;
end_time VARCHAR2(40) ;
snap_begin_time VARCHAR2(40);
snap_end_time VARCHAR2(40);
snap_begin_snap number;
snap_end_snap number;
v_dbid number;
v_dbname VARCHAR(20);
v_extention varchar(10);

BEGIN

begin_time := '&1';
end_time := '&2';
awr_format := '&3';

select dbid, name into v_dbid, v_dbname from v$database;

if awr_format = 'text' then
v_extention := 'txt';
else
v_extention := awr_format;
end if;

for x in ( select instance_number, instance_name from gv$instance order by 1 ) loop

SELECT to_char(max(end_interval_time),'YYYYMMDD_HH24MI'), max(snap_id) INTO snap_begin_time, snap_begin_snap
FROM DBA_HIST_SNAPSHOT
where instance_number= x.instance_number
and to_char(trunc(end_interval_time, 'MI'),'YYYYMMDD_HH24MI') <= begin_time;

SELECT to_char(min(end_interval_time),'YYYYMMDD_HH24MI'), min(snap_id) INTO snap_end_time, snap_end_snap
FROM DBA_HIST_SNAPSHOT
where instance_number= x.instance_number
and to_char(trunc(end_interval_time, 'MI'),'YYYYMMDD_HH24MI') >= end_time;

DBMS_OUTPUT.PUT_LINE(chr(10));
DBMS_OUTPUT.PUT_LINE('spool ' || output_dir || 'AWR_' || x.instance_name ||'_'|| snap_begin_time ||'_to_' || snap_end_time ||'.' || v_extention );
DBMS_OUTPUT.PUT_LINE('SELECT output FROM TABLE (dbms_workload_repository.awr_report_' || awr_format || '(' || v_dbid||','||x.instance_number||','|| snap_begin_snap ||','|| snap_end_snap||'));');
DBMS_OUTPUT.PUT_LINE('spool off');

end loop;
DBMS_OUTPUT.PUT_LINE('exit ');

end;
/

spool off;
@AWR_run_script.sql;
exit

Script to Generate ADDM reports for all RAC nodes

-- This generates ADDM reports from all the nodes for the input start and end timings
-- Works for 10gR2
-- How to execute.
-- Save this script in ADDM_gen_script.sql
-- sql> @ADDM_gen_script.sql 20100915_1400 20100915_1500
-- Param 1 = start time YYYYMMDD_HH24MI format
-- Param 2 = end time YYYYMMDD_HH24MI format
--
-- All the ADDM reports ( one from each node ) will be created in output_dir variable. Defaults to c:\
-- This can be run from client or server. Just change the output_dir depending on OS. Window c:\ or Unix $HOME/
-- This create a temporary sql file ADDM_run_script.sql in the current directory

SET FEED OFF
SET VERIFY OFF
SET HEADING OFF
SET TERMOUT OFF
SET ECHO OFF
SET LINESIZE 32767
SET PAGES 0
SET WRAP OFF
SET SCAN ON
SET TRIM ON
SET TRIMS ON
SET TAB OFF
SET SERVEROUTPUT ON
SET PAUSE OFF
SET TIMING OFF

spool ADDM_run_script.sql
PROMPT SET FEED OFF
PROMPT SET VERIFY OFF
PROMPT SET HEADING OFF
PROMPT SET TERMOUT ON
PROMPT SET ECHO OFF
PROMPT SET LINESIZE 32767
PROMPT SET PAGES 0
PROMPT SET WRAP ON
PROMPT SET SCAN ON
PROMPT SET TRIM ON
PROMPT SET TRIMS ON
PROMPT SET TAB ON
PROMPT SET SERVEROUTPUT ON
PROMPT SET PAUSE ON
PROMPT SET TIMING OFF

DECLARE

-- output must end with a slash. CHANGE THIS IF NEEDED.
output_dir VARCHAR2(40) := 'c:\' ;

begin_time VARCHAR2(40);
end_time VARCHAR2(40);
snap_begin_time VARCHAR2(40);
snap_end_time VARCHAR2(40);
snap_begin_snap number;
snap_end_snap number;
v_dbid number;
v_dbname varchar(20);
v_instance number;

tid number; -- Task ID
tname varchar2(100); -- Task Name
tdesc varchar2(500); -- Task Description

BEGIN

begin_time := '&1';
end_time := '&2';
-- DBMS_OUTPUT.PUT_LINE('-- Begin Time = ' || begin_time);
-- DBMS_OUTPUT.PUT_LINE('-- End Time = ' || end_time);
select dbid,name into v_dbid, v_dbname from v$database;

for x in ( select instance_number, instance_name from gv$instance order by 1 ) loop

-- Get the snap id for the input begin_time
SELECT to_char(max(end_interval_time),'YYYYMMDD_HH24MI'), max(snap_id) INTO snap_begin_time, snap_begin_snap
FROM DBA_HIST_SNAPSHOT
where instance_number= x.instance_number
and to_char(trunc(end_interval_time, 'MI'),'YYYYMMDD_HH24MI') <= begin_time;

-- Get the snap id for the input end_time
SELECT to_char(min(end_interval_time),'YYYYMMDD_HH24MI'), min(snap_id) INTO snap_end_time, snap_end_snap
FROM DBA_HIST_SNAPSHOT
where instance_number= x.instance_number
and to_char(trunc(end_interval_time, 'MI'),'YYYYMMDD_HH24MI') >= end_time;

tname := 'ADDM:' || x.instance_name || '_' || snap_begin_snap || '_' || snap_end_snap;
tdesc := 'ADDM manual run: snapshots [' || snap_begin_snap || ',' || snap_end_snap || '], ' || x.instance_name;

DBMS_OUTPUT.PUT_LINE('DECLARE');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tid NUMBER; -- Task ID');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tname VARCHAR2(30); -- Task Name');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tdesc VARCHAR2(256); -- Task Description');

DBMS_OUTPUT.PUT_LINE('BEGIN');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tname := ''' || tname || ''' ;');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'tdesc := ''' || tdesc || ''';');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'DBMS_ADVISOR.CREATE_TASK( ''ADDM'', tid, tname, tdesc );');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'DBMS_ADVISOR.SET_TASK_PARAMETER( tname, ''START_SNAPSHOT'',' || snap_begin_snap || ' );' );
DBMS_OUTPUT.PUT_LINE(chr(9) || 'DBMS_ADVISOR.SET_TASK_PARAMETER( tname, ''END_SNAPSHOT'' ,' || snap_end_snap || ' );');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'dbms_advisor.set_task_parameter( tname, ''INSTANCE'',' || x.instance_number || ');');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'dbms_advisor.set_task_parameter( tname, ''DB_ID'',' || v_dbid || ');');
DBMS_OUTPUT.PUT_LINE(chr(9) || 'DBMS_ADVISOR.EXECUTE_TASK( tname );');

DBMS_OUTPUT.PUT_LINE('END;');
DBMS_OUTPUT.PUT_LINE('/');

DBMS_OUTPUT.PUT_LINE('set long 1000000 pagesize 0 longchunksize 1000');
DBMS_OUTPUT.PUT_LINE('column get_clob format a80');
DBMS_OUTPUT.PUT_LINE('spool ' || output_dir || 'ADDM_' || x.instance_name ||'_'|| snap_begin_time ||'_to_' || snap_end_time ||'.txt' );
DBMS_OUTPUT.PUT_LINE('select dbms_advisor.get_task_report(''' || tname || ''', ''TEXT'', ''TYPICAL'') from sys.dual;');
DBMS_OUTPUT.PUT_LINE('spool off;');
DBMS_OUTPUT.PUT_LINE('exec DBMS_ADVISOR.DELETE_TASK('''|| tname ||''');');
DBMS_OUTPUT.PUT_LINE(chr(10));

end loop;

end;
/
spool off
@ADDM_run_script.sql
exit

Tuesday, January 18, 2011

RMAN Cloning (Refreshing) ASM/RAC database to ASM/RAC database

Assumptions:

Both dbs are on the same server
10.2.0.4
Source DB: SOURCE
Target DB: TARGET
Source Database can be in NOARCHIVE LOG MODE.
Target DB already exists (Okay if it doesn't. Just nomount the db )

============================================================================
STEP 1: COLD BACKUP SOURCE DATABASE.

srvctl stop database -d SOURCE

export ORACLE_SID=SOURCE1
export ORACLE_HOME=/u01/app/oracle/10gR2

rman msglog SOURCE_cold_backup.log << EOF
connect target /
startup mount;
CONFIGURE CONTROLFILE AUTOBACKUP ON;
run {
allocate channel d1 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
allocate channel d2 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
allocate channel d3 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
allocate channel d4 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
allocate channel d5 type disk format '/u01/dumps/rman_SOURCE/%d_S_%s_P_%p_T_%t';
backup database tag=COLD_BACKUP;
}
EOF

srvctl start database -d SOURCE

============================================================================
STEP 2: TARGET DATABASE CLUSTER_DATABASE=FALSE

export ORACLE_SID=TARGET1
alter system set cluster_database=false scope=spfile sid='*';
srvctl stop database -d TARGET
startup nomount;

-- Delete datafiles in ASM diskgroup.
ASMCMD [+diskg/TARGET/datafile] > rm *

============================================================================
STEP 3: TARGET DATABASE CLUSTER_DATABASE=FALSE

startup nomount;

ORACLE_SID=TARGET1; export ORACLE_SID
rman msglog TARGET_clone.log << EOF
connect target sys/password@SOURCE1
connect auxiliary /
run
{
allocate channel ch1 type disk;
allocate channel ch2 type disk;
allocate auxiliary channel d1 device type disk;
allocate auxiliary channel d2 device type disk;
duplicate target database to TARGET;
}
EOF


============================================================================
STEP 4: TARGET DATABASE CLUSTER_DATABASE=TRUE

export ORACLE_SID=TARGET1
alter system set cluster_database=TRUE scope=spfile sid='*';
alter system set cluster_database_instances=2 scope=spfile sid='*';
shutdown immediate;
exit
10000 SCOPE=SPFILE sid='*';