Friday, September 28, 2012

Document Installation RAC 11gR2 Grid Infrastructure




Enviroment :
Database Version        : 11.2.0.3

NODE 1 :
Hostname                : orarac01.made.co.id
IP Address eth0         : 192.168.115.10 (public address)
IP Address eth1         : 192.168.1.10 (private address)

NODE 2 :
Hostname                : orarac02.made.co.id
IP Address eth0         : 192.168.115.20 (public address)
IP Address eth1         : 192.168.1.20 (private address)

Add Group node 1 and node 2
[root@orarac01 ~]# groupadd -g 501 oinstall
[root@orarac01 ~]# groupadd -g 502 dba
[root@orarac01 ~]# groupadd -g 504 asmadmin
[root@orarac01 ~]# groupadd -g 506 asmdba
[root@orarac01 ~]# groupadd -g 507 asmoper


Add User node 1 and node 2
[root@orarac01 ~]# useradd -u 501 -g oinstall -G dba,asmadmin,asmdba,asmoper grid
[root@orarac01 ~]# useradd -u 502 -g oinstall -G dba,asmdba oracle
[root@orarac01 ~]# passwd oracle
Changing password for user oracle.
New UNIX password:oracle
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:oracle
passwd: all authentication tokens updated successfully.
[root@orarac01 ~]# passwd grid
Changing password for user grid.
New UNIX password:oracle
BAD PASSWORD: it is based on a dictionary word
Retype new UNIX password:oracle
passwd: all authentication tokens updated successfully.

Package for Oracle database node 1 and node 2
rpm -Uvh binutils-2.*
rpm -Uvh compat-libstdc++-33*
rpm -Uvh elfutils-libelf-0.*
rpm -Uvh elfutils-libelf-devel-*
rpm -Uvh gcc-4.*
rpm -Uvh gcc-c++-4.*
rpm -Uvh glibc-2.*
rpm -Uvh glibc-common-2.*
rpm -Uvh glibc-devel-2.*
rpm -Uvh glibc-headers-2.*
rpm -Uvh ksh-2*
rpm -Uvh libaio-0.*
rpm -Uvh libaio-devel-0.*
rpm -Uvh libgcc-4.*
rpm -Uvh libstdc++-4.*
rpm -Uvh libstdc++-devel-4.*
rpm -Uvh make-3.*
rpm -Uvh sysstat-7.*
rpm -Uvh unixODBC-2.*
rpm -Uvh unixODBC-devel-2.*

Verify Package for Oracle database node 1 and node 2
rpm -q --qf '%{NAME}-%{VERSION}-%{RELEASE} (%{ARCH})\n' binutils \
compat-libstdc++-33 \
elfutils-libelf \
elfutils-libelf-devel \
gcc \
gcc-c++ \
glibc \
glibc-common \
glibc-devel \
glibc-headers \
ksh \
libaio \
libaio-devel \
libgcc \
libstdc++ \
libstdc++-devel \
make \
sysstat \
unixODBC \
unixODBC-devel

Package ASM node 1 and node 2
rpm -ivh oracleasm-support-2.1.7-1.el5.i386.rpm
rpm -ivh oracleasmlib-2.0.4-1.el5.i386.rpm
rpm -ivh oracleasm-2.6.18-194.el5-2.0.5-1.el5.i686.rpm

cd /source/grid/rpm
rpm -Uvh cvuqdisk*

Add /etc/hosts node 1 and node 2
# Public
192.168.115.10          orarac01.made.co.id             orarac01
192.168.115.20          orarac02.made.co.id             orarac02
# Private
192.168.1.10            orarac01-priv.made.co.id        orarac01-priv
192.168.1.20            orarac02-priv.made.co.id        orarac02-priv
# Virtual
192.168.115.101          orarac01-vip.made.co.id         orarac01-vip
192.168.115.102          orarac02-vip.made.co.id         orarac02-vip
# SCAN
192.168.115.103          orarac-scan.made.co.id          orarac-scan
192.168.115.104          orarac-scan.made.co.id          orarac-scan
192.168.115.105          orarac-scan.made.co.id          orarac-scan

Test Connection node 1 and node 2
[root@orarac01 ~]# ping orarac02
[root@orarac01 ~]# ping orarac02-priv

Add kernel /etc/sysctl.conf node 1 and node 2
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
fs.file-max = 6553600
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048576
fs.aio-max-nr = 1048576
 
 
Add parameter /etc/security/limits.conf node 1 and node 2
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
 
Create Path Grid node 1 and node 2
[root@orarac01 ~]# mkdir -p /u01/app/oraInventory
[root@orarac01 ~]# chown -R grid:oinstall /u01/app/oraInventory
[root@orarac01 ~]# chmod -R 775 /u01/app/oraInventory
 
Create Grid Home node 1 and node 2
[root@orarac01 ~]# mkdir -p /u01/app/grid/11.2.0/grid
[root@orarac01 ~]# chown -R grid:oinstall /u01/app/grid/11.2.0/grid
[root@orarac01 ~]# chmod -R 775 /u01/app/grid/11.2.0/grid
 
Create Oracle base node 1 and node 2
[root@orarac01 ~]# mkdir -p /u01/app/oracle
[root@orarac01 ~]# chown -R oracle:oinstall /u01/app/oracle
[root@orarac01 ~]# chmod -R 775 /u01/app/oracle
 
View Partition disk
[root@orarac01 dev]# ls sd*
sda  sda1  sda2  sda3  sdb  sdc  sdd  sde  sdf  sdg

Create Partition
[root@orarac01 dev]# fdisk /dev/sdb
Device contains neither a valid DOS partition table, nor Sun, SGI or OSF disklabel
Building a new DOS disklabel. Changes will remain in memory only,
until you decide to write them. After that, of course, the previous
content won't be recoverable.


The number of cylinders for this disk is set to 1044.
There is nothing wrong with that, but this is larger than 1024,
and could in certain setups cause problems with:
1) software that runs at boot time (e.g., old versions of LILO)
2) booting and partitioning software from other OSs
   (e.g., DOS FDISK, OS/2 FDISK)
Warning: invalid flag 0x0000 of partition table 4 will be corrected by w(rite)

Command (m for help): n
Command action
   e   extended
   p   primary partition (1-4)
p
Partition number (1-4): 1
First cylinder (1-1044, default 1):
Using default value 1
Last cylinder or +size or +sizeM or +sizeK (1-1044, default 1044):
Using default value 1044

Command (m for help): w
The partition table has been altered!

Calling ioctl() to re-read partition table.
Syncing disks.

[root@orarac01 dev]# fdisk /dev/sdc
[root@orarac01 dev]# fdisk /dev/sdd
[root@orarac01 dev]# fdisk /dev/sde
[root@orarac01 dev]# fdisk /dev/sdf
[root@orarac01 dev]# fdisk /dev/sdg

[root@orarac01 dev]# ls sd*
sda   sda2  sdb   sdc   sdd   sde   sdf   sdg
sda1  sda3  sdb1  sdc1  sdd1  sde1  sdf1  sdg1

Configure ASM node 1 and node 2
[root@orarac01 dev]# oracleasm configure -i
Configuring the Oracle ASM library driver.

This will configure the on-boot properties of the Oracle ASM library
driver.  The following questions will determine whether the driver is
loaded on boot and what permissions it will have.  The current values
will be shown in brackets ('[]').  Hitting <ENTER> without typing an
answer will keep that current value.  Ctrl-C will abort.

Default user to own the driver interface []: grid
Default group to own the driver interface []: asmadmin
Start Oracle ASM library driver on boot (y/n) [n]: y
Scan for Oracle ASM disks on boot (y/n) [y]:
Writing Oracle ASM library driver configuration: done

[root@orarac01 dev]# /usr/sbin/oracleasm init
Creating /dev/oracleasm mount point: /dev/oracleasm
Loading module "oracleasm": oracleasm
Mounting ASMlib driver filesystem: /dev/oracleasm

[root@orarac01 dev]# /usr/sbin/oracleasm createdisk DISK2 /dev/sdb1
Writing disk header: done
Instantiating disk: done
[root@orarac01 dev]# /usr/sbin/oracleasm createdisk DISK2 /dev/sdc1
Writing disk header: done
Instantiating disk: done
[root@orarac01 dev]# /usr/sbin/oracleasm createdisk DISK3 /dev/sdd1
Writing disk header: done
Instantiating disk: done
[root@orarac01 dev]# /usr/sbin/oracleasm createdisk DISK4 /dev/sde1
Writing disk header: done
Instantiating disk: done
[root@orarac01 dev]# /usr/sbin/oracleasm createdisk DISK5 /dev/sdf1
Writing disk header: done
Instantiating disk: done
[root@orarac01 dev]# /usr/sbin/oracleasm createdisk DISK6 /dev/sdg1
Writing disk header: done
Instantiating disk: done

Scan Disk ASM node 1 and node 2
[root@orarac01 dev]# /usr/sbin/oracleasm scandisks
Reloading disk partitions: done
Cleaning any stale ASM disks...
Scanning system for ASM disks...

List Disk ASM node 1 and node 2
[root@orarac01 dev]# /usr/sbin/oracleasm listdisks
DISK1
DISK2
DISK3
DISK4
DISK5
DISK6

Configure ssh

NODE 1
Login to grid user
[grid@orarac01 ~]$ mkdir .ssh
[grid@orarac01 ~]$ chmod 700 .ssh/
[grid@orarac01 ~]$ /usr/bin/ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/grid/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/grid/.ssh/id_rsa.
Your public key has been saved in /home/grid/.ssh/id_rsa.pub.
The key fingerprint is:
5b:66:c6:7a:79:c9:d1:f2:8b:9e:58:73:24:9b:69:c7 grid@orarac01.made.co.id

[grid@orarac01 ~]$ /usr/bin/ssh-keygen -t dsa
Generating public/private dsa key pair.
Enter file in which to save the key (/home/grid/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/grid/.ssh/id_dsa.
Your public key has been saved in /home/grid/.ssh/id_dsa.pub.
The key fingerprint is:
1f:e4:59:bf:fe:f9:b3:c4:8e:d3:57:1a:ea:68:31:aa grid@orarac01.made.co.id

NODE 2
Login to oracle user
[applmgr@orarac02 ~]$ mkdir .ssh
[grid@orarac02 ~]$ chmod 700 .ssh/
[grid@orarac02 ~]$ /usr/bin/ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/home/grid/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/grid/.ssh/id_rsa.
Your public key has been saved in /home/grid/.ssh/id_rsa.pub.
The key fingerprint is:
40:c1:0b:cb:5b:fe:e8:af:df:25:81:08:fd:c1:55:af grid@orarac02.made.co.id

[grid@orarac02 ~]$ /usr/bin/ssh-keygen -t dsa
Generating public/private dsa key pair.
Enter file in which to save the key (/home/grid/.ssh/id_dsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /home/grid/.ssh/id_dsa.
Your public key has been saved in /home/grid/.ssh/id_dsa.pub.
The key fingerprint is:
10:b9:cd:6b:b3:a2:02:92:04:28:dd:e9:e1:9a:63:ec grid@orarac02.made.co.id


On NODE 1

[grid@orarac01 .ssh]$ cat id_rsa.pub >>authorized_keys
[grid@orarac01 .ssh]$ cat id_dsa.pub >>authorized_keys
[grid@orarac01 .ssh]$ scp authorized_keys grid@orarac02:/home/grid/.ssh/
The authenticity of host 'orarac02 (192.168.115.20)' can't be established.
RSA key fingerprint is 4a:f3:9f:b4:0f:1c:0a:b3:c8:79:f1:ec:f7:95:56:73.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'orarac02,192.168.115.20' (RSA) to the list of known hosts.
grid@orarac02's password:
authorized_keys                               100% 1020     1.0KB/s   00:00
[grid@orarac01 .ssh]$ ssh orarac02
Enter passphrase for key '/home/grid/.ssh/id_rsa':
Last login: Tue Aug 21 00:43:17 2012 from 192.168.115.1
[grid@orarac02 ~]$ cd .ssh/
[grid@orarac02 .ssh]$ ls
authorized_keys  id_dsa  id_dsa.pub  id_rsa  id_rsa.pub
[grid@orarac02 .ssh]$ cat id_rsa.pub  >> authorized_keys
[grid@orarac02 .ssh]$ cat id_dsa.pub  >> authorized_keys
[grid@orarac02 .ssh]$ scp authorized_keys grid@orarac01:/home/grid/.ssh/
The authenticity of host 'orarac01 (192.168.115.10)' can't be established.
RSA key fingerprint is 4a:f3:9f:b4:0f:1c:0a:b3:c8:79:f1:ec:f7:95:56:73.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'orarac01,192.168.115.10' (RSA) to the list of known hosts.
grid@orarac01's password:
authorized_keys                               100% 2040     2.0KB/s   00:00
[grid@orarac02 .ssh]$ exec /usr/bin/ssh-agent $SHELL
[grid@orarac02 .ssh]$ exit
exit
Connection to orarac02 closed.
[grid@orarac01 .ssh]$ exec /usr/bin/ssh-agent $SHELL
[grid@orarac01 .ssh]$ /usr/bin/ssh-add
Enter passphrase for /home/grid/.ssh/id_rsa:
Identity added: /home/grid/.ssh/id_rsa (/home/grid/.ssh/id_rsa)
Identity added: /home/grid/.ssh/id_dsa (/home/grid/.ssh/id_dsa)


On NODE 2
[grid@orarac02 ~]$ exec /usr/bin/ssh-agent $SHELL
[grid@orarac02 ~]$ /usr/bin/ssh-add
Enter passphrase for /home/grid/.ssh/id_rsa:
Identity added: /home/grid/.ssh/id_rsa (/home/grid/.ssh/id_rsa)
Identity added: /home/grid/.ssh/id_dsa (/home/grid/.ssh/id_dsa)

Test ssh
[grid@orarac02 ~]$ ssh orarac02 date
Tue Aug 21 00:54:01 WIT 2012
[grid@orarac02 ~]$ ssh orarac01 date
Tue Aug 21 00:54:11 WIT 2012
[grid@orarac01 .ssh]$ ssh orarac01 date
Tue Aug 21 00:53:43 WIT 2012
[grid@orarac01 .ssh]$ ssh orarac02 date
Tue Aug 21 00:53:52 WIT 2012

NODE 1 and NODE 2

[root@orarac01 ~]# service ntpd stop
Shutting down ntpd:                                        [  OK  ]
[root@orarac01 ~]# chkconfig ntpd off
[root@orarac01 ~]# mv /etc/ntp.conf /etc/ntp.conf.org
[root@orarac01 ~]# rm /var/run/ntpd.pid
[root@orarac01 ~]# vi /etc/sysconfig/ntpd
OPTIONS="-x -u ntp:ntp -p /var/run/ntpd.pid"
root@orarac01 ~]# service ntpd restart
Shutting down ntpd:                                        [FAILED]

Check Prereq
[applmgr@orarac01 grid]$ sh runcluvfy.sh stage -pre crsinst -n orarac01,orarac02 -verbose >> home/grid/runcluvfy.log

[root@orarac01 ~]# su - grid
[applmgr@orarac01 ~]$ vi grid.env
#Enviroment grid
GRID_HOME=/u01/app/grid/11.2.0/grid; export ORACLE_GRID
ORACLE_HOME=/u01/app/grid/11.2.0/grid; export ORACLE_HOME
PATH=/u01/app/grid/11.2.0/grid/bin:$PATH; export PATH

Install Grid Infrastructure
[applmgr@orarac01 grid]$ ./runInstaller












NODE 1

[root@orarac01 ~]# sh /u01/app/oraInventory/orainstRoot.sh
[root@orarac01 ~]# sh /u01/app/11.2.0/grid/root.sh
Configure Oracle Grid Infrastructure for a Cluster ... succeeded

NODE 2
[root@orarac02 ~]# sh /u01/app/oraInventory/orainstRoot.sh
[root@orarac02 ~]# sh /u01/app/11.2.0/grid/root.sh



Create ASM Diskgroup
[applmgr@orarac01 ~]$ . ./grid.env
[applmgr@orarac01 ~]$ asmca




Install Database Software

[oracle@orarac01 database]$ ./runInstaller





Install Database
[applmgr@orarac01 ~]$ vi db.env

#Enviroment Database
ORACLE_BASE=/u01/app/; export ORACLE_BASE
ORACLE_HOME=/u01/app/product/11.2.0/dbhome_1; export ORACLE_HOME
PATH=/usr/sbin:$PATH; export PATH
PATH=/u01/app/product/11.2.0/dbhome_1/bin:$PATH; export PATH

[applmgr@orarac02 ~]$ . ./db.env
[applmgr@orarac02 ~]$ dbca




Node Virtual IP :
Step 1: As root, run “$GRID_HOME/crs/install/rootcrs.pl -verbose -deconfig -force” on all nodes, except the last one.
Step 2: As root, run “$GRID_HOME/crs/install/rootcrs.pl -verbose -deconfig -force -lastnode” on last node. This command will zero out OCR and VD disk also.
Step 3: As root, run $GRID_HOME/root.sh on first node
Step 4: As root, run $GRID_HOME/root.sh on all other node(s), except last one.
Step 5: As root, run $GRID_HOME/root.sh on last node.










Start stop Database
srvctl start database -d TEST
srvctl stop database -d TEST

No comments:

Post a Comment