Monday, June 18, 2012

STEPS - SCHEMA REFRESH


@ Source

========
connect to Putty
enter the hostname
select load

enter userid and pwd

set the environment for the database
$ uname
AIX
$ cat /etc/oratab
export ORACLE_SID=DBNAME
export ORACLE_HOME=/usr/app/oracle/product/10.2.0/
export PATH=$PATH:$ORACLE_HOME/bin

$ uname
SunOS
$ cat /var/opt/oracle/oratab
export ORACLE_SID=DBNAME
export ORACLE_HOME=/usr/app/oracle/product/10.2.0/
export PATH=$PATH:$ORACLE_HOME/bin

To Check Database

---------------------
sqlplus "/as sysdba"
set pages 200 lines 170
col HOST_NAME for a15
select instance_name,name db_name,host_name,version db_version from gv$instance,v$database;


select username from dba_users where username=upper('&username');
select sum(bytes)/1024/1024/1024 from dba_segments where owner=upper('&owner');
select object_type,count(*) from dba_objects where owner=upper('&username') group by object_type;


Table Space Report
---------------------
col tablespace_name format a15
col TABLESPACE_NAME heading "Tablespace|Name"
col Allocated_size heading "Allocated|Size(MB)" form 99999999.99
col Current_size heading "Current|Size(MB)" form 99999999.99
col Used_size heading "Used|Size(MB)" form 99999999.99
col Available_size heading "Available|Size(MB)" form 99999999.99
col Pct_used heading "%Used(vs)|(Allocated)" form 99999999.99
select a.tablespace_name
        ,a.alloc_size/1024/1024 Allocated_size
        ,a.cur_size/1024/1024 Current_Size
        ,(u.used+a.file_count*65536)/1024/1024 Used_size
        ,(a.alloc_size-(u.used+a.file_count*65536))/1024/1024 Available_size
        ,((u.used+a.file_count*65536)*100)/a.alloc_size Pct_used
from     dba_tablespaces t
        ,(select t1.tablespace_name
                ,nvl(sum(s.bytes),0) used
          from  dba_segments s
               ,dba_tablespaces t1
          where t1.tablespace_name=s.tablespace_name(+)
          group by t1.tablespace_name) u
        ,(select d.tablespace_name
                ,sum(greatest(d.bytes,nvl(d.maxbytes,0))) alloc_size
                ,sum(d.bytes) cur_size
                ,count(*) file_count
          from dba_data_files d
          group by d.tablespace_name) a
where t.tablespace_name=u.tablespace_name
  and t.tablespace_name=a.tablespace_name
order by t.tablespace_name;

Datafiles of a particular TableSpace
-------------------------------------
set pages 200
set lines 175
col tablespace_name for a27
col file_name for a70
select tablespace_name,file_name,bytes/1024/1024 Size_MB,autoextensible,maxbytes/1024/1024 MAX_MB from dba_data_files  where tablespace_name='TABLESPACENAME' order by 1,2;



ASM Space Report
--------------------
SET LINESIZE 150
SET PAGESIZE 9999
SET VERIFY off
COLUMN group_name             FORMAT a20           HEAD 'Disk Group|Name'
COLUMN sector_size            FORMAT 99,999        HEAD 'Sector|Size'
COLUMN block_size             FORMAT 99,999        HEAD 'Block|Size'
COLUMN allocation_unit_size   FORMAT 999,999,999   HEAD 'Allocation|Unit Size'
COLUMN state                  FORMAT a11           HEAD 'State'
COLUMN type                   FORMAT a6            HEAD 'Type'
COLUMN total_mb               FORMAT 999,999,999   HEAD 'Total Size (MB)'
COLUMN used_mb                FORMAT 999,999,999   HEAD 'Used Size (MB)'
COLUMN pct_used               FORMAT 999.99        HEAD 'Pct. Used'

SELECT
    distinct name                                     group_name
  , sector_size                              sector_size
  , block_size                               block_size
  , allocation_unit_size                     allocation_unit_size
  , state                                    state
  , type                                     type
  , total_mb                                 total_mb
  , (total_mb - free_mb)                     used_mb
  , ROUND((1- (free_mb / total_mb))*100, 2)  pct_used
FROM
    gv$asm_diskgroup
ORDER BY
    name;
/

OTHER ASM QUERIES
--------------------------
select distinct name, total_mb/1024 , (total_mb-free_mb)/1024 , free_mb/1024, SYSDATE from GV$ASM_DISKGROUP;

select GROUP_NUMBER,NAME,TOTAL_MB,FREE_MB from gv$asm_diskgroup order by NAME;
select table_name from dict where table_name like '%ASM%';
select GROUP_NUMBER,DISK_NUMBER,TOTAL_MB,FREE_MB,NAME,MOUNT_STATUS from v$asm_disk;
select INST_ID,GROUP_NUMBER,DISK_NUMBER,TOTAL_MB,FREE_MB,NAME,MOUNT_STATUS from gv$asm_disk;


col file_name for a70
set linesize 150
set pagesize 100
select file_name,bytes/1024/1024 from dba_data_files where tablespace_name='TABLESPACENAME' order by file_name;
FILE_NAME                                                    BYTES/1024/1024
------------------------------------------------------------ ---------------
/FILE/LOCATION/

Check for the space availability for taking backup:
>! df -h /FILE/LOCATION/

cd /source/backup/location/
mkdir EXPORT_REF_NO
cd EXPORT_REF_NO
$pwd
/source/backup/location/EXPORT_REF_NO

NOTE: If the size is less better to go for EXP/IMP

A. For schema backup (EXPDP/IMPDP)
create or replace directory <EXPDPDIR> as '/directory/location/';
Directory created.

GRANT read,write ON DIRECTORY <EXPDPDIR> to <user>;
Grant succeeded.

nohup expdp <user>/`pwd` logfile=schemaname.log directory=EXPDPDIR dumpfile=schemaname_`date +\%b\%d`_%U.dmp schemas=schemaname exclude=statistics PARALLEL=4 &
For schema backup (EXP/IMP)

B. For schema backup (EXP/IMP)
nohup exp <user>/`pwd` file=source_schemaname_exp.dmp log=source_schemaname_exp.log owner=schemaname statistics=NONE resumable=y resumable_name=continue resumable_timeout=18000 &

Check the status of EXPORT:

$jobs or $ps -ef| grep expdp or $ps -ef| grep exp or ps -ef|grep <processid>
[1]+  Running
[1]+  Done
$ls -lrt
source_schemaname_exp
$cat source_schemaname_exp.log

@ Target
========
connect to Putty
enter the hostname
select load

enter userid and pwd

set the environment for the database


To Check Database

---------------------
sqlplus "/as sysdba"
set pages 200 lines 170
col HOST_NAME for a15
select instance_name,name db_name,host_name,version db_version from gv$instance,v$database;


select username from dba_users where username=upper('&username');
select sum(bytes)/1024/1024/1024 from dba_segments where owner=upper('&owner');
select object_type,count(*) from dba_objects where owner=upper('&username') group by object_type;


Check for the space availabilty for taking backup:
df -h
cd /target/backup/location/
mkdir EXPORT_REF_NO
cd EXPORT_REF_NO
$pwd
/target/backup/location/EXPORT_REF_NO

NOTE: If the size is less better to go for EXP/IMP

A. For schema backup (EXPDP/IMPDP)
create or replace directory <EXPDPDIR> as '/directory/location/';
GRANT read,write ON DIRECTORY <EXPDPDIR> to <user>;

nohup expdp <user>/`pwd` logfile=schemaname.log directory=EXPDPDIR dumpfile=schemaname_`date +\%b\%d`_%U.dmp schemas=schemaname exclude=statistics PARALLEL=4 &

B. For schema backup (EXP/IMP)
nohup exp <user>/`pwd` file=target_schemaname_exp.dmp log=target_schemaname_exp.log owner=schemaname statistics=NONE resumable=y resumable_name=continue resumable_timeout=18000 &

Check the status of EXPORT:
$jobs or $ps -ef| grep exp
$ls -lrt
target_schemaname_exp.log
$cat target_schemaname_exp.log

@ Source
========
scp source_schemaname_exp.dmp oracle@target_hostname:/target/backup/location/EXPORT_REF_NO/

@ Target
========
cd /target/backup/location/EXPORT_REF_NO/
ls -lrt
source_schemaname_exp.dmp

sqlplus "/as sysdba"
select name from v$database;

DROP SCHEMA OBJECTS
-------------------
set head off
set pagesize 0
set linesize 100
select 'drop table '||Table_name||' cascade constraints;' from user_tables;

set pagesize 200
select 'drop '||object_type||' '||object_name||';' from user_objects where object_type not in ('DATABASE LINK');

or

spool target_schemaname_drop_obj.sql
select 'drop '||object_type||' '||owner||'.'||object_name||DECODE(OBJECT_TYPE,'TABLE',' CASCADE CONSTRAINTS;',';')
from dba_objects
where owner in ('SCHEMANAME') and OBJECT_TYPE not in ('DATABASE LINK') order by object_type,object_name;
spool off;

OTHERS
------
select 'DROP '||decode(object_type, 'PACKAGE BODY', 'PACKAGE',object_type)|| ' '||owner||'.'||object_name|| decode(object_type,'TABLE',' CASCADE CONSTRAINTS;',';')
from dba_objects
where owner=upper('&Schema')
--and object_type not in ('TABLE');

exit

ls -lrt
target_schemaname_drop_obj.sql
vi target_schemaname_drop_obj.sql
edit and save using :wq

sqlplus "/as sysdba"
select name from v$database;

@target_schemaname_drop_obj.sql

schema object count:
select object_type,count(*) from dba_objects where owner=upper('&username') group by object_type;
exit

pwd
/target/backup/location/EXPORT_REF_NO/

ls -lrt
source_schemaname_exp.dmp

Restore schema backup:
---------------------
A. For schema import (EXPDP/IMPDP)
nohup impdp system/`pwd` logfile=source_schemaname_exp_import.log directory=EXPDPDIR remap_schema=sourceschemaname:targetschemaname dumpfile=source_schemaname_exp.dmp exclude=statistics PARALLEL=4 &

B. For schema import (EXP/IMP)
nohup imp system/`pwd` file=source_schemaname_exp.dmp log=source_schemaname_exp_import.log fromuser=schemaname touser=schemaname ignore=y statistics=NONE commit=y resumable=y resumable_name=continue resumable_timeout=18000 &

Check the status of IMPORT:
$jobs or $ps -ef| grep imp
$ls -lrt
$cat source_schemaname_exp_import.log

sqlplus "/as sysdba"
select name from v$database;
schema object count:
select object_type,count(*) from dba_objects where owner=upper('&username') group by object_type;

NOTE: THE TARGET SCHEMA OBJECT COUNT SHOULD BE SAME AS THE SOURCE SCHEMA OBJECT COUNT CAPTURED

DBLINKS DETAILS
---------------
set pages 100 lines 150
col owner for a15
col username for a15
col host for a35
col created for a20
select owner, db_link, username, host, to_char(created,'dd-mon-yyy hh24:mi:ss') CREATED from dba_db_links order by owner, db_link;
OWNER           DB_LINK                             USERNAME        HOST                                CREATED
--------------- ----------------------------------- --------------- ----------------------------------- --------------------
exit

Gather schema stats after schema refresh
----------------------------------------
set timing on
exec dbms_stats.gather_schema_stats(ownname=>'schemaname',CASCADE=>TRUE,ESTIMATE_PERCENT=>30,degree =>4);

NOTE: ONCE TASK COMPLETED AT PRODUCTION SERVER IMMEDIATLY DO EXIT.



"Hope for the best, prepare for the worst"
-Dharmendra

No comments:

Post a Comment