Showing posts with label Containers. Show all posts
Showing posts with label Containers. Show all posts

Thursday, January 17, 2013

Steps to create container for Oracle DB installation

  • Create a vnic named public1

dladm create-vnic public1 -l net0
  • Create a zone.template file as under:
create -b
set zonepath=/oraclePool/zones/test
set limitpriv=default,proc_priocntl
set max-shm-memory=50G
add net
 set physical=public1
end
  • Generate sysconfig.xml file for initial zone configuration
There are two ways to generate the sysconfig.xml file
  • Generate it from scratch by issuing the following command
sysconfig create-profile -o sysconfig.xml
  • Update the hostname by searching system/identity and ip address by searching network/install on an existing sysconfig.xml file
A typical sysconfig.xml file is as under:
<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
<service_bundle type="profile" name="sysconfig">
  <service version="1" type="service" name="system/config-user">
    <instance enabled="true" name="default">
      <property_group type="application" name="root_account">
        <propval type="astring" name="login" value="root"/>
        <propval type="astring" name="password" value="$5$Px97tHB0$X3mcS7MybV7fk
YTB7Z4NEDDwDJDBPEvIA41w.UmbRm."/>
        <propval type="astring" name="type" value="normal"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/timezone">
    <instance enabled="true" name="default">
      <property_group type="application" name="timezone">
        <propval type="astring" name="localtime" value="US/Eastern"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/environment">
    <instance enabled="true" name="init">
      <property_group type="application" name="environment">
        <propval type="astring" name="LANG" value="en_US.UTF-8"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/identity">
    <instance enabled="true" name="node">
      <property_group type="application" name="config">
        <propval type="astring" name="nodename" value="dat-zone1"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/keymap">
    <instance enabled="true" name="default">
      <property_group type="system" name="keymap">
        <propval type="astring" name="layout" value="US-English"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/console-login">
  <instance enabled="true" name="default">
      <property_group type="application" name="ttymon">
        <propval type="astring" name="terminal_type" value="vt100"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="network/physical">
    <instance enabled="true" name="default">
      <property_group type="application" name="netcfg">
        <propval type="astring" name="active_ncp" value="DefaultFixed"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="network/install">
    <instance enabled="true" name="default">
      <property_group type="application" name="install_ipv4_interface">
        <propval type="astring" name="address_type" value="static"/>
        <propval type="net_address_v4" name="static_address" value="10.129.195.211/24"/>
        <propval type="astring" name="name" value="public1/v4"/>
        <propval type="net_address_v4" name="default_route" value="10.129.195.1"/>
      </property_group>
      <property_group type="application" name="install_ipv6_interface">
        <propval type="astring" name="stateful" value="yes"/>
        <propval type="astring" name="stateless" value="yes"/>
        <propval type="astring" name="address_type" value="addrconf"/>
        <propval type="astring" name="name" value="public1/v6"/>
      </property_group>
    </instance>
  </service>
  <service version="1" type="service" name="system/name-service/switch">
    <property_group type="application" name="config">
      <propval type="astring" name="default" value="files"/>
      <propval type="astring" name="host" value="files dns"/>
      <propval type="astring" name="printer" value="user files"/>
    </property_group>
    <instance enabled="true" name="default"/>
  </service>
 <service version="1" type="service" name="system/name-service/cache">
    <instance enabled="true" name="default"/>
  </service>
  <service version="1" type="service" name="network/dns/client">
    <property_group type="application" name="config">
      <property type="net_address" name="nameserver">
        <net_address_list>
          <value_node value="130.35.249.41"/>
          <value_node value="130.35.249.52"/>
          <value_node value="144.20.190.70"/>
        </net_address_list>
      </property>
      <property type="astring" name="search">
        <astring_list>
          <value_node value="us.oracle.com oraclecorp.com oracle.com"/>
        </astring_list>
      </property>
    </property_group>
    <instance enabled="true" name="default"/>
  </service>
</service_bundle>
  • Execute the following script to configure, install and boot the zone
zoneName=test
zoneTemplate=zone.template
VNIC=public1
echo "zone template : ${zoneTemplate}

echo "Configuring the zone :${zoneName}
zonecfg -z ${zoneName} -f ${zoneTemplate}
echo "Verify that zone ${zoneName} has been configured"
zoneadm list -icv | grep ${zoneName}


echo " Install the zone : ${zoneName}"
zoneadm -z ${zoneName} install -c /tmp/sysconfig.xml
echo "Verify that zone ${zoneName} has been installed"
zoneadm list -icv | grep ${zoneName}


echo "Wait for a minute before booting the zone"
sleep 60
echo "Boot the zone : ${zoneName} "
zoneadm -z ${zoneName} boot
echo "Verify that zone ${zoneName} is running"
zoneadm list -icv | grep ${zoneName}

  • Verify if the smf services are up and running on the zone:
Issue : zlogin  test  ; svcs
If the smf services are not up, login to console for a couple of minutes and check again
zlogin -C test
  • Verify that zone is up and running

ping <zone-hostname> from global zone
ping <global-hostname> from local zone
Verify network is configured correctly in the local zone
zlogin  test
root@dat-zone1:~# dladm show-physroot@dat-zone1:~# dladm show-vnic
LINK                OVER         SPEED  MACADDRESS        MACADDRTYPE       VID
public1             ?            1000   2:8:20:af:91:c8   random            0
root@dat-zone1:~# ipadm show-addr
ADDROBJ           TYPE     STATE        ADDR
lo0/v4            static   ok           127.0.0.1/8
public1/v4        static   ok           10.129.195.211/24
lo0/v6            static   ok           ::1/128
public1/v6        addrconf ok           fe80::8:20ff:feaf:91c8/10
root@dat-zone1:~#


Add asm disk and the directory where the Oracle bits are available to zone and reboot the zone. 
Create a file named addtoZone.template

add device
        set match=/dev/rdsk/c2t8d3s2
        set allow-partition=true
        set allow-raw-io=true
end
add fs
   set dir=/installer
set special=/installer
set type=lofs
end
   

Reconfigure and reboot the zone


zonecfg -z test -f addtoZone.template
zonecfg -z test reboot
Prepare for Oracle installation. Install Oracle and asm

Log on to test container and execute the following script:
asmDisk=/dev/rdsk/c2t8d3s2
groupadd -g 201 dba
groupadd -g 200 oinstall
useradd -g oinstall -G dba -u 200 oracle
mkdir /export/home/oracle
chown -R oracle:oinstall /export/home/oracle
mkdir -p /u01/app/oracle
chown -R oracle:oinstall /u01
chmod -R 775 /u01
echo "Enter password for Oracle"
passwd oracle
pkg install   SUNWarc SUNWbtool SUNWhea SUNWlibm SUNWlibms SUNWpool SUNWsprot SUNWtoo SUNWuiu8 SUNWfont-xorg-core SUNWfont-xorg-iso8859-1 SUNWmfrun SUNWxorg-client-programs SUNWxorg-clientlibs SUNWxwfsw  pkg://solaris/SUNWxwplt
chown oracle:oinstall ${asmDisk}
chmod 660 ${asmDisk}
Invoke grid installer and db installer to install ASM and Oracle DB.

Monday, August 13, 2012

Installing Oracle database in Solaris Container: Issue resolution

I was trying to install Oracle clusterware on 11.2.0.2 on Solaris container. I got the following error:

Adding daemon to inittab
ACFS-9459: ADVM/ACFS is not supported on this OS version: 'Solaris Container: Solaris 10 update 10'
ACFS-9201: Not Supported
ACFS-9459: ADVM/ACFS is not supported on this OS version: 'Solaris Container: Solaris 10 update 10'
CRS-2672: Attempting to start 'ora.mdnsd' on 'dszone1'
CRS-2676: Start of 'ora.mdnsd' on 'dszone1' succeeded
CRS-2672: Attempting to start 'ora.gpnpd' on 'dszone1'
CRS-2676: Start of 'ora.gpnpd' on 'dszone1' succeeded
CRS-2672: Attempting to start 'ora.cssdmonitor' on 'dszone1'
CRS-2672: Attempting to start 'ora.gipcd' on 'dszone1'
CRS-2676: Start of 'ora.cssdmonitor' on 'dszone1' succeeded
CRS-2676: Start of 'ora.gipcd' on 'dszone1' succeeded
CRS-2672: Attempting to start 'ora.cssd' on 'dszone1'
CRS-2672: Attempting to start 'ora.diskmon' on 'dszone1'
CRS-2676: Start of 'ora.diskmon' on 'dszone1' succeeded
CRS-2674: Start of 'ora.cssd' on 'dszone1' failed
CRS-2679: Attempting to clean 'ora.cssd' on 'dszone1'
CRS-2681: Clean of 'ora.cssd' on 'dszone1' succeeded
CRS-2673: Attempting to stop 'ora.diskmon' on 'dszone1'
CRS-2677: Stop of 'ora.diskmon' on 'dszone1' succeeded
CRS-2673: Attempting to stop 'ora.gipcd' on 'dszone1'
CRS-2677: Stop of 'ora.gipcd' on 'dszone1' succeeded
CRS-2673: Attempting to stop 'ora.cssdmonitor' on 'dszone1'
CRS-2677: Stop of 'ora.cssdmonitor' on 'dszone1' succeeded
CRS-2673: Attempting to stop 'ora.gpnpd' on 'dszone1'
CRS-2677: Stop of 'ora.gpnpd' on 'dszone1' succeeded
CRS-2673: Attempting to stop 'ora.mdnsd' on 'dszone1'
CRS-2677: Stop of 'ora.mdnsd' on 'dszone1' succeeded
CRS-5804: Communication error with agent process
CRS-4000: Command Start failed, or completed with errors.
The exlusive mode cluster start failed, see Clusterware alert log for more information
Initial cluster configuration failed.  See /u01/app/11.2.0/grid/cfgtoollogs/crsconfig


The alert log under /u01/app/11.2.0/grid/log/<nodename> has the following error:



[cssd(8886)]CRS-1656:The CSS daemon is terminating due to a fatal error; Details at (:CSSSC00011:) in /u01/app/11.2.0/grid/
log/dszone1/cssd/ocssd.log

And the ocssd.log has following error:

2012-08-07 06:06:57.599: [    CSSD][1]clssscSetPrivEnv: Setting priority to 4
2012-08-07 06:06:57.607: [    CSSD][1]clssscSetPrivEnv: unable to set priority to 4
2012-08-07 06:06:57.607: [    CSSD][1]SLOS: cat=-2, opn=scls_set_priority_realtime, dep=1, loc=setsched
unable to escalate to real time
2012-08-07 06:06:57.607: [    CSSD][1](:CSSSC00011:)clssscExit: A fatal error occurred during initialization

Restarting the container with following privileges fixed the issue:

set limitpriv="default,proc_priocntl,proc_clock_highres,sys_time"
Refer to metalink note ID 1340694.1 for detailed information.

Tuesday, June 19, 2012

Backup Solaris Zones

Find out zonepath of the zone you need to backup


root@isve-111-116:~# zonecfg -z rac4Zone info zonepath
zonepath: /zonepools/zones/rac4Zone

Find out the data-set of zonepath

root@isve-111-116:~# zfs list -H -o name /zonepools/zones/rac4Zone
zonepools/zones/rac4Zone

Create a snapshot of the zonepath

root@isve-111-116:~# zfs snapshot -r zonepools/zones/rac4Zone@backup1

Verify that snapshot was successfully created

 zfs snapshot -r zonepools/zones/rac4Zone@backup1

root@isve-111-116:~# zfs list -t snap -r zonepools/zones/rac4Zone
NAME                                                      USED  AVAIL  REFER  MOUNTPOINT
zonepools/zones/rac4Zone@backup1                             0      -    34K  -
zonepools/zones/rac4Zone/rpool@backup1                       0      -    31K  -
zonepools/zones/rac4Zone/rpool/ROOT@backup1                  0      -    31K  -
zonepools/zones/rac4Zone/rpool/ROOT/solaris@install      6.97M      -   291M  -
zonepools/zones/rac4Zone/rpool/ROOT/solaris@backup1       387K      -  5.36G  -
zonepools/zones/rac4Zone/rpool/ROOT/solaris/var@install  27.4M      -  66.2M  -
zonepools/zones/rac4Zone/rpool/ROOT/solaris/var@backup1      0      -  82.5M  -
zonepools/zones/rac4Zone/rpool/dump@backup1                  0      -    16K  -
zonepools/zones/rac4Zone/rpool/export@backup1                0      -    32K  -
zonepools/zones/rac4Zone/rpool/export/home@backup1       6.24M      -  1.83G  -
zonepools/zones/rac4Zone/rpool/swap@backup1                  0      -    16K  -


Archive the zonepath

zfs send -rc zonepools/zones/rac4Zone@backup1 > /cont/rac4

Verify that the archive of the zonepath is created
root@isve-111-116:/cont# file /cont/rac4
/cont/rac4:     ZFS snapshot stream

Sunday, November 13, 2011

Oracle RAC installation in Solaris 11 Container

Here are the steps to create a containers on Solaris 11 to install Oracle RAC:


  • If the Servers do not enough physical NICs
    • Virtualize the NICs
  •  Create VNICs for public and private interface
    • dladm create-vnic -l igb0 vnic5
    •  dladm create-vnic -l igb2 priv5
  •  Create an exclusive IP container with these interfaces
    • set ip-type=exclusive
    •  add net set physical=vnic5 end
    •  add net set physical= priv5 end
  •     Setup private and public interface
    •  Ipadm show-if
    •  Ipadm create-ip priv5
    •  Ipadm create-addr -T static -a 199.6.6.6/24 priv5/v4addr
    •  Ipadm show-addr
  •  No need to update any /etc files with hostname if ipadm command is used
  •  Configure shared memory
    •  Set max-shm-memory=50G
  • Add ASM device/s to the zone
    • Add Device:
      • Set match: <device name>
      • Set allow-partition=true
      • Set allow-raw-io=true
    • End
  •  Provide right privileges to the container
    •  Set limitpriv=default,proc_priocntl
  •  Install RAC as you install in global container



Suppose you need to create a container that uses  igb0 as public interface and igb2 as private interface for Oracle RAC installation. It uses a pool with one CPU in it and mounts 2 FS. This blog summarizes the steps to create such a container.
  • Create the VNICs for public and private interface
From the global container create VNICs from public interface igb0 and for  private interface igb2
  • # dladm create-vnic -l igb0 vnic5
  • # dladm create-vnic -l igb2 priv5

  •  Create a pool with one CPU in it:
poolcfg -c 'create pset rac5_set(uint pset.min=1;uint pset.max=1)'
poolcfg -c ' create pool rac5Pool'
poolcfg -c 'associate pool rac5Pool (pset rac1_set)'
 poolcfg -c 'transfer to pset rac5_set(cpu 4)'
pooladm -c
pooladm

Check out http://ritukamboj.blogspot.com/search/label/ResourcePool for more info.
  • Create a zone:
Create racZone5 directory under /zonepools/zones and issue the following command:
  • zonecfg -z racZone5 -f zonetemplate.cfg
where zonetemplate.cfg is as under

create
set zonepath=/zonepools/zones/racZone5
set autoboot=false
set limitpriv=default,proc_priocntl
set ip-type=exclusive
add net
set physical=vnic5
end
add net
set physical= priv5
end
set max-shm-memory=50G
add fs
set dir=/u05
set special=/u05
set type=lofs
end
add fs
set dir=/installer
set special=/installer
set type=lofs
end
set pool=rac5Pool
Run
  • zoneadm -z racZone5 install
  • zoneadm -z racZone5 boot
  • zlogin -C racZone5 (for inital configuration)


  • Verify that you can see the VNICS in the container
root@etchst8-zone22:~# dladm show-link
LINK                CLASS     MTU    STATE    OVER
vnic5               vnic      1500   up       ?
priv5               vnic      1500   up       ?
root@etchst8-zone22:~# ^C
  • Verify that public interface NIC is plumped up (through initial configuration)
root@etchst8-zone23:~# ipadm show-if
IFNAME     CLASS    STATE    ACTIVE OVER
lo0        loopback ok       yes    --
vnic6      ip       ok       yes    --
  • Plump the private interface

  • ipadm show-if
  •  ipadm create-ip priv6
  • ipadm create-addr -T static -a 199.6.6.6/24 priv6/v4addr
root@etchst8-zone23:~# ipadm show-addr
ADDROBJ           TYPE     STATE        ADDR
lo0/v4            static   ok           127.0.0.1/8
vnic6/v4          static   ok           10.6.138.55/24
priv6/v4addr      static   duplicate    199.6.6.6/24
lo0/v6            static   ok           ::1/128
vnic6/v6          addrconf ok           fe80::8:20ff:fe31:49c4/10
 Create Oracle user and required directories for Oracle installation

Tuesday, March 8, 2011

Solaris Containers

  • How to create Zones
  • Create a root directory for zones with rwx permission for user.
    • mkdir /zones1
    • chmod 700 /zones1
  • Create a zone configuration file as under:
    • create
    • set zonepath=/zones1
    • set autoboot=true
    • Add network (exclusive IP or default shared IP):
      • Setting exclusive IP
    If you are setting up exclusive IP: (do not set address attribute for exclusive IP)
      set ip-type=exclusive
      add net
      set physical=e1000g2
      end

      Additional info about exclsive IP setup:
      http://download.oracle.com/docs/cd/E19082-01/819-6990/gicom/index.html

        • Setting default shared IP
      If you are setting default shared IPadd net
      set physical=e1000g2
      set address=<ip address of container>
      end

      • Add Raw devices
      To add raw devices to the containers configuration

      add device
      set match=/dev/rdsk/c6t20030003BACCC902*
      end
      add device
      set match=/dev/rdsk/c6t20030003BACCC8FA*
      end

      On Solaris 11:
      zonecfg:rac4Zone> add device
      zonecfg:rac4Zone:device> set match=/dev/rdsk/c1t0d0s0
      zonecfg:rac4Zone:device> set allow-partition=true
      zonecfg:rac4Zone:device> set allow-raw-io=true
      zonecfg:rac4Zone:device> end


      • Add FS
      To add fs refer to
      • http://docs.oracle.com/cd/E23824_01/html/821-1460/z.admin.task-11.html

      To mount a global directory as lofs to container

      zonecfg -z zones1
      add fs
      set dir=/oracle11gR2
      set special=/oracle11gR2
      set type=lofs
      set options=nodevices
      end

      • Set shared memory :
        global# zonecfg -z myzone
        zonecfg:myzone> set max-shm-memory=50G
        • Add dedicated CPU 
          Adding dedicated -cpu is a simpler form of creating a pool and attaching it to the container. Add dedicated-cpu command creates a temporary pool and assigns it to the container

          add dedicated-cpu
          set ncpus=32
          end

          • Remove dedicated CPU

          To remove the dedicated-cpu, issue the following command
          remove dedicated-cpu

          • Add resource pool
            • set pool=rac1pool
          • Remove pool
            • On Solaris 11
              •  clear pool
            • On Solaris 10 Update 3 and below
              • set pool=""

          • Steps to create and configure a container

          1. Invoke zonecfg command to configure the zone
            1. zonecfg -z zones1 -f test.cfg
          2.  Install the configured zone
            1. zoneadm -z zones1 install
          3.  Use the list subcommand of zoneadm to list the installed zones and verify their status. The status of the zone is "Installed"
            1. zoneadm list -iv
          4. Boot the zone
            1. zoneadm -z zones1 boot
          5.  Verify that status of the zone is "running"
            1. zoneadm list -v
          6.  Log in to Zone console to perform the internal zone configuration
            1. zlogin -C zone1
          7.  Optionally edit /etc/sysidcfg file to perfom the inital zone configuration
            1. Go to /etc/ directory of the local zone
              1. cd /export/home/zones/root/etc
            2. Create sysidcfg file in this directory

          • Steps to delete a container
            • zoneadm -z myzone halt
            • zoneadm -z myzone uninstall
            • zonecfg -z myzone delete -F

          • List a container and info about it

          • List all zones from global zone
            • zoneadm list -v
          • Login to local zone from global zone:
            • zlogin <zonename>
          • In the zone , issue zonename to print its name
          • Get info about the zone
            • zonecfg -z myzone info
          • To verify if /dev/rdsk/c6 is accessible in local container
            • ls /my_zone/root/dev/rdsk/.....

          • Initial zone configuration

          • Hostname and ip address: 
            • The hostname can be resolved through DNS or NIS
              • Verify if it is resolved through DNS
                • nslookup isve-111-212 
              • Verify if it resolves through NIS
                • ypcat hosts | grep isve-111-212
              • If hostname is resolved through dns /etc/nsswitch.conf should have the following 
                • ipnodes:        files dns
              • else it should be as under:
                • ipnodes:        files nis
          • netmask: /etc/inet/netmasks
          • Router IP : /etc/defaultrouter
          • Dominname : exec domainname command
          • Name service : Check /etc/nsswitch.conf to confirm it is NIS
          • Name server: output of ypwhich command
          • Ip addres of name server : ping -s <nameserver>
          • DNS name server ip address from /etc/resolv.conf

          • Modify exisiting zone

          • Change net settings {removing address or physical attribute of the net will delete both network parameters
            • zonecfg -z zone1 info
            • remove net address=<value> {or remove net physical=<value>}
            • add net
            •    set physical
            •     set address
            • end
          • Some changes need the zone to be rebooted to take effect
            • zoneadm -z zone1 reboot


          If you want to restart initial zone configuration
          • zlogin -S zonename /usr/sbin/sys-unconfig
          • or login to zones (zlogin zonename) , issue sys-unconfig and reboot
          • On Solaris 11, you can configure using the following commnd
            • /usr/sbin/sysconfig unconfigure
          How to specify netmask in containers:
          Why is zone booting command failing as under:

           zoneadm -z zones2 boot
          zoneadm: zone 'zones2': These file-systems are mounted on subdirectories of /zones2/root:
          zoneadm: zone 'zones2':   /zones2/root/var/sadm/install/.door
          zoneadm: zone 'zones2': call to zoneadmd failed
          You do not have the right zones patch installed..... Wait for 10 minutes and issue the command and it will work if you do not want  to install the updated patches


          If you see the following error, login in to the console to view the SCI ....


          root@unknown:~# sysconfig configure
          This program will re-configure your system.
          Do you want to continue (y/[n])? y
          Interactive configuration requested.
          System Configuration Interactive (SCI) tool will be launched on console.
          Since you are currently not logged on console,
          you may not be able to navigate SCI tool.
          Would you like to proceed with re-configuration (y/[n])? y