Flying zones sol11

From pressy's brainbackup
Jump to: navigation, search

Flying Zones in sol11 running OSC4.1

configure the environment

root@clnode01:~# cldevice status

=== Cluster DID Devices ===

Device Instance             Node                Status
---------------             ----                ------
/dev/did/rdsk/d1            clnode01            Ok
                            clnode02            Ok

/dev/did/rdsk/d2            clnode01            Unmonitored

/dev/did/rdsk/d4            clnode02            Unmonitored

root@clnode01:~#

root@clnode01:~# clrg create ha-group01
root@clnode01:~# clrt register SUNW.HAStoragePlus
root@clnode02:~# zpool import
  pool: ha-data
    id: 3768387658467332306
 state: ONLINE
action: The pool can be imported using its name or numeric identifier.
config:

        ha-data                                  ONLINE
          c0t600144F0DE794B00000053A403AA0001d0  ONLINE

root@clnode01:~# clrs create -g ha-group01 -t SUNW.HAStoragePlus -p Zpools=ha-data ha-data-rs
root@clnode01:~# clrg online -M -n clnode01 ha-group01

root@clnode01:~# cluster status -t rg,rs

=== Cluster Resource Groups ===

Group Name       Node Name       Suspended      State
----------       ---------       ---------      -----
ha-group01       clnode01        No             Online
                 clnode02        No             Offline


=== Cluster Resources ===

Resource Name       Node Name      State        Status Message
-------------       ---------      -----        --------------
ha-data-rs          clnode01       Online       Online
                    clnode02       Offline      Offline
root@clnode01:~# clrg switch -n clnode02 ha-group01
root@clnode01:~# cluster status -t rg,rs

=== Cluster Resource Groups ===

Group Name       Node Name       Suspended      State
----------       ---------       ---------      -----
ha-group01       clnode01        No             Offline
                 clnode02        No             Online


=== Cluster Resources ===

Resource Name       Node Name      State        Status Message
-------------       ---------      -----        --------------
ha-data-rs          clnode01       Offline      Offline
                    clnode02       Online       Online

root@clnode01:~#
root@clnode01:~# clrg remaster +
root@clnode01:~# df -h | tail -1
ha-data                 20G    31K        20G     1%    /ha-data
root@clnode01:~#

the zone

root@clnode01:~# more /etc/hosts
#
# Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
# Use is subject to license terms.
#
# Internet host table
#
::1             localhost
127.0.0.1       localhost loghost
192.168.56.200 clnode01 clnode01.local
192.168.56.201 clnode02 clnode02.local
####
192.168.56.210  clzone01        clzone01.local
####
192.168.56.100 vsol01 vsol01.local
root@clnode01:~#
root@clnode01:~# clreslogicalhostname create -g ha-group01 -h clzone01 clzone01-lh-rs
root@clnode01:~#
root@clnode01:~# clrs status

=== Cluster Resources ===

Resource Name       Node Name      State        Status Message
-------------       ---------      -----        --------------
clzone01-lh-rs      clnode01       Online       Online - LogicalHostname online.
                    clnode02       Offline      Offline

ha-data-rs          clnode01       Online       Online
                    clnode02       Offline      Offline

root@clnode01:~# ping clzone01
clzone01 is alive
root@clnode01:~#
root@clnode01:~# zonecfg -z clzone01
Use 'create' to begin configuring a new zone.
zonecfg:clzone01> create -b
zonecfg:clzone01> set zonepath=/ha-zones/clzone01
zonecfg:clzone01> set autoboot=false
zonecfg:clzone01> set ip-type=shared
zonecfg:clnode01> add attr
zonecfg:clnode01:attr> set name=osc-ha-zone
zonecfg:clnode01:attr> set type=boolean
zonecfg:clnode01:attr> set value=true
zonecfg:clnode01:attr> end
zonecfg:clzone01> verify
zonecfg:clzone01> commit
zonecfg:clzone01> exit
root@clnode01:~#
root@clnode01:~#
root@clnode01:~#
root@clnode01:~# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              solaris  shared
   - clzone01         configured /ha-zones/clzone01             solaris  shared
root@clnode01:~# zoneadm -z clzone01 install
The following ZFS file system(s) have been created:
    ha-data/ha-zones/clzone01
Progress being logged to /var/log/zones/zoneadm.20140623T122141Z.clzone01.install
       Image: Preparing at /ha-zones/clzone01/root.

 AI Manifest: /tmp/manifest.xml.Qqaqbg
  SC Profile: /usr/share/auto_install/sc_profiles/enable_sci.xml
    Zonename: clzone01
Installation: Starting ...

              Creating IPS image
Startup linked: 1/1 done
              Installing packages from:
                  solaris
                      origin:  http://192.168.56.101/
                  ha-cluster
                      origin:  http://192.168.56.101:8080/
DOWNLOAD                                PKGS         FILES    XFER (MB)   SPEED
Completed                            187/187   34366/34366  231.6/231.6  485k/s

PHASE                                          ITEMS
Installing new actions                   48351/48351
Updating package state database                 Done
Updating image state                            Done
Creating fast lookup database                   Done
Installation: Succeeded

        Note: Man pages can be obtained by installing pkg:/system/manual

 done.

        Done: Installation completed in 783.353 seconds.


  Next Steps: Boot the zone, then log into the zone console (zlogin -C)

              to complete the configuration process.

Log saved in non-global zone as /ha-zones/clzone01/root/var/log/zones/zoneadm.20140623T122141Z.clzone01.install
root@clnode01:~#
root@clnode01:~# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              solaris  shared
   - clzone01         installed  /ha-zones/clzone01             solaris  shared
root@clnode01:~# zoneadm -z clzone01 boot
root@clnode01:~# zlogin -C clzone01
[snip]
root@clnode01:~# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              solaris  shared
   1 clzone01         running    /ha-zones/clzone01             solaris  shared
root@clnode01:~# zoneadm -z clzone01 shutdown
root@clnode01:~#
root@clnode01:~# zoneadm -z clzone01 detach -F
root@clnode01:~#
root@clnode01:~# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              solaris  shared
   - clzone01         configured /ha-zones/clzone01             solaris  shared
root@clnode01:~#
root@clnode01:~# beadm list -H
non-sc;0f5f7660-63ad-6a84-ccd0-8bbea3d9ec7c;;;66463744;static;1400556054
non-sc-backup-1;51da94ac-2d1b-c9d1-c202-e3a125480b3f;;;68608;static;1403282602
s11.1.19.0.6.0;5f5a858a-e2e7-69ef-897c-8bb96173bd55;NR;/;7785575936;static;1403532512
solaris;65176f0c-2c86-642d-ec65-932ecb4f755a;;;3269120;static;1399981144
root@clnode01:~#
root@clnode01:~# df -b /
Filesystem              avail
rpool/ROOT/s11.1.19.0.6.0 12539479
root@clnode01:~# zfs get org.opensolaris.libbe:uuid rpool/ROOT/s11.1.19.0.6.0
NAME                       PROPERTY                    VALUE                                 SOURCE
rpool/ROOT/s11.1.19.0.6.0  org.opensolaris.libbe:uuid  5f5a858a-e2e7-69ef-897c-8bb96173bd55  local
root@clnode02:~# zfs get org.opensolaris.libbe:uuid rpool/ROOT/s11.1.19.0.6.0
NAME                       PROPERTY                    VALUE                                 SOURCE
rpool/ROOT/s11.1.19.0.6.0  org.opensolaris.libbe:uuid  003c22b8-be3a-eb9f-be05-8385af25e5b6  local
root@clnode02:~#
root@clnode02:~# zfs set org.opensolaris.libbe:uuid=5f5a858a-e2e7-69ef-897c-8bb96173bd55 rpool/ROOT/s11.1.19.0.6.0
root@clnode02:~# zfs get org.opensolaris.libbe:uuid rpool/ROOT/s11.1.19.0.6.0
NAME                       PROPERTY                    VALUE                                 SOURCE
rpool/ROOT/s11.1.19.0.6.0  org.opensolaris.libbe:uuid  5f5a858a-e2e7-69ef-897c-8bb96173bd55  local
root@clnode02:~# beadm list -H
non-sc;13d2053a-c6b1-ce44-85cb-daf36b2dbfa0;;;154624;static;1400556067
s11.1.19.0.6.0;5f5a858a-e2e7-69ef-897c-8bb96173bd55;NR;/;6764256256;static;1400550024
s11.1.19.0.6.0-backup-1;d640f3d4-a023-6bb3-fecf-81029189443c;;;68608;static;1403282566
solaris;65176f0c-2c86-642d-ec65-932ecb4f755a;;;3840512;static;1399981144
root@clnode02:~# 
root@clnode01:~# clrg switch -n clnode02 ha-group01
root@clnode01:~# clrg status

=== Cluster Resource Groups ===

Group Name       Node Name       Suspended      Status
----------       ---------       ---------      ------
ha-group01       clnode01        No             Offline
                 clnode02        No             Online
root@clnode02:~# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              solaris  shared
   - clnode01         configured /ha-zones/clzone01             solaris  shared
root@clnode02:~# zoneadm -z clnode01 attach -F
root@clnode02:~# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              solaris  shared
   - clnode01         installed  /ha-zones/clzone01             solaris  shared
root@clnode02:~#
root@clnode02:~# zoneadm -z clnode01 boot
root@clnode02:~# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              solaris  shared
   1 clnode01         running    /ha-zones/clzone01             solaris  shared
root@clnode02:~#

Zone under Cluster Control

root@clnode01:~# pkg list -a ha-cluster/data-service/ha-zones
NAME (PUBLISHER)                                  VERSION                    IFO
ha-cluster/data-service/ha-zones (ha-cluster)     4.1-6.1                    i--
root@clnode01:~# cd /opt/SUNWsczone/sczbt/util
root@clnode01:/opt/SUNWsczone/sczbt/util# ls
sczbt_config    sczbt_register
root@clnode01:/opt/SUNWsczone/sczbt/util#
root@clnode01:/opt/SUNWsczone/sczbt/util#
root@clnode01:/opt/SUNWsczone/sczbt/util#
root@clnode01:/opt/SUNWsczone/sczbt/util# cp -p sczbt_config sczbt_config_clzone01-res
root@clnode01:/opt/SUNWsczone/sczbt/util# grep -v "^#" sczbt_config > sczbt_config_clzone01-res
root@clnode01:/opt/SUNWsczone/sczbt/util# vi sczbt_config_clzone01-res
root@clnode01:/opt/SUNWsczone/sczbt/util# cat  sczbt_config_clzone01-res
RS=clnode01-zone-rs
RG=ha-group01
PARAMETERDIR=/ha-zones/clzone01/cl-params
SC_NETWORK=true
SC_LH=clzone01-lh-rs
FAILOVER=true
HAS_RS=ha-data-rs
Zonename="clzone01"
Zonebrand="solaris"
Zonebootopt=""
Milestone="svc:/milestone/multi-user-server"
LXrunlevel="3"
SLrunlevel="3"
Mounts=""
root@clnode01:/opt/SUNWsczone/sczbt/util# zoneadm -z clzone01 attach -F
root@clnode01:/opt/SUNWsczone/sczbt/util# ./sczbt_register -f ./sczbt_config_clzone01-res
sourcing ./sczbt_config_clzone01-res
Registration of resource clnode01-zone-rs succeeded.
Validation of resource clnode01-zone-rs succeeded.
root@clnode01:/opt/SUNWsczone/sczbt/util#
root@clnode01:/# clrs list -g ha-group01
clnode01-zone-rs
clzone01-lh-rs
ha-data-rs
root@clnode01:/#
root@clnode01:/# clrs enable clnode01-zone-rs
root@clnode01:/#
root@clnode01:/#
root@clnode01:/# clrs status

=== Cluster Resources ===

Resource Name         Node Name     State       Status Message
-------------         ---------     -----       --------------
clnode01-zone-rs      clnode01      Online      Online
                      clnode02      Offline     Offline

clzone01-lh-rs        clnode01      Online      Online - LogicalHostname online.
                      clnode02      Offline     Offline - LogicalHostname offline.

ha-data-rs            clnode01      Online      Online
                      clnode02      Offline     Offline

root@clnode01:/#
root@clnode01:/#
root@clnode01:/# zoneadm list -cv
  ID NAME             STATUS     PATH                           BRAND    IP
   0 global           running    /                              solaris  shared
   2 clzone01         running    /ha-zones/clzone01             solaris  shared
root@clnode01:/#
root@clnode01:/# clrg switch -n clnode02 ha-group01 

SPARC LDOM setting

don't forget to allow additional addresses for a ldom. each "auto" per zone/mac...

root@primary # ldm set-vnet alt-mac-addrs=auto,auto,auto,auto,auto,auto,auto,auto vnet0 ldom1
root@primary # ldm set-vnet alt-mac-addrs=auto,auto,auto,auto,auto,auto,auto,auto vnet1 ldom1

exclusive example

root@ldom # zonecfg -z zone1
Use 'create' to begin configuring a new zone.
zonecfg:zone1> create -b
zonecfg:zone1> set zonepath=/zones/zone1
zonecfg:zone1> set autoboot=false
zonecfg:zone1> set ip-type=exclusive
zonecfg:zone1> add attr
zonecfg:zone1:attr> set name=osc-ha-zone
zonecfg:zone1:attr> set type=boolean
zonecfg:zone1:attr> set value=true
zonecfg:zone1:attr> end
zonecfg:zone1> add anet
zonecfg:zone1:anet> set linkname=znet0
zonecfg:zone1:anet> set lower-link=net0
zonecfg:zone1:anet> set mac-address=auto
zonecfg:zone1:anet> end
zonecfg:zone1> add anet
zonecfg:zone1:anet> set linkname=znet1
zonecfg:zone1:anet> set lower-link=net1
zonecfg:zone1:anet> set mac-address=auto
zonecfg:zone1:anet> end
zonecfg:zone1> add dataset
zonecfg:zone1:dataset> set name=zpool1/zone1_pool
zonecfg:zone1:dataset> end
zonecfg:zone1> verify
zonecfg:zone1> commit
zonecfg:zone1> exit