Wednesday, May 13, 2009

Xclock ca resursa failover in SUN cluster

Asta e raspunsul pe care l-am dat unuia pe sunmanagers pentru a seta Xclock ca resursa in SUN cluster .... si cica i-a mers :-)

On any of the two nodes:

first you need to create the resorce type
the command to do this should look smth like this:

scdscreate -k -V SUN -T XCLOCK

Then you should create the resource package
the command to do this should look smth like this:

scdsconfig -s "/path_to_application_start_script" /
-t "/path_to_application_stop_script" /
-m "/path_to_application_monitor_script"
It's a goo idea to keep the rtconfig file (mv rtconfig SUNXCLOCK/ )
Now in SUNXCLOCK/pkg you should have got
a package for your resource

Install the obtained package on both nodes !!

pkgadd -d name_of_the_package

Register the resource in the cluster (to be done on only one node)

scrgadm -a -t SUN.XCLOCK

Create the resource group and then the resorce itself in the cluster (to be done on only one node):

scrgadm -a -g resorce-group-name

scrgadm -a -j resource-name -g resource-group-name -t SUN.XCLOCK

Activate the resource

scswitch -e -j resource-name

Good luck !!

SSH pe Solaris 6

# Se instaleaza openssl, openssh, zlib,libgcc, prngd
mkdir /var/empty
chown root:sys /var/empty
chmod 755 /var/empty
groupadd sshd
useradd -g sshd -c 'sshd privsep' -d /var/empty -s /bin/false sshd
cat /var/log/* > /etc/prngd-seed
/usr/local/sbin/prngd /var/run/egd-pool
ssh-keygen -t rsa1 -f /usr/local/etc/ssh_host_key -N ""
ssh-keygen -t rsa1 -f /usr/local/etc/ssh_host_key -N ""
ssh-keygen -t dsa -f /usr/local/etc/ssh_host_dsa_key -N ""
ssh-keygen -t rsa -f /usr/local/etc/ssh_host_rsa_key -N ""
ln -s /etc/init.d/sshd /etc/rc2.d/S98sshd
ln -s /etc/init.d/prngd /etc/rc2.d/S97sshd
chmod ug+x /etc/init.d/sshd
chown root /etc/init.d/sshd
chgrp sys /etc/init.d/sshd
/etc/init.d/sshd start

# /etc/init.d/sshd contine:
#!/bin/sh

pid=`/usr/bin/ps -e | /usr/bin/grep sshd | /usr/bin/sed -e 's/^ *//' -e 's/ .*//'`
case $1 in
'start')
/usr/local/sbin/sshd
;;
'stop')
if [ "${pid}" != "" ]
then
/usr/bin/kill ${pid}
fi
;;
*)
echo "usage: /etc/init.d/sshd {start|stop}"
;;
esac

############################################

# /etc/init.d/prngd contine:
#!/bin/sh

pid=`/usr/bin/ps -e | /usr/bin/grep prngd | /usr/bin/sed -e 's/^ *//' -e 's/ .*//'`
case $1 in
'start')
/usr/local/sbin/prngd /var/spool/prngd/pool
;;
'stop')
if [ "${pid}" != "" ]
then
/usr/bin/kill ${pid}
fi
;;
*)
echo "usage: /etc/init.d/prngd {start|stop}"
;;
esac

Vxvm 35 upgrade (SUN cluster)

init 0
boot -sx

/etc/init.d/volmgt start
volcheck
/cdrom/volume_manager/scripts/upgrade_start -check
/cdrom/volume_manager/scripts/upgrade_start
init 0
boot -sx

pkgrm VRTSvmsa VRTSvmdoc VRTSvmdev VRTSvmman VRTSvxvm VRTSvxfs
init 0
boot -sx

/etc/init.d/volmgt start
volcheck
cd /cdrom/volume_manager/pkgs
pkgadd -d . VRTSvlic
pkgadd -d . VRTSvxvm
../scripts/upgrade_finish
init 0
boot -rx

volcheck
cd /cdrom/volume_manager/pkgs
pkgadd -d . VRTSvmman
pkgadd -d . VRTSvxfs

Veritas cluster stuff

# Status resurse / grupuri in cluster
hastatus -sum |less
# Ca sa pot pune resursa online dupa ce resursa a fost marcata ca faulted pe ambele noduri:
hares -clear java_app -sys napoli
hares -clear java_app -sys roma
# O pun online pe napoli:
hagrp -online sg_app -sys napoli
# O pun in freeze (adika resursa este monitorizata dar nu mai face failover in caz ca este oprita manual)
hagrp -freeze sg_app
# Verific status-ul resurselor
hastatus -sum |less
# Mutare o resursa de pa o masina pe alta
hareg -switch nume-grup-resursa -to nume_server

# Creare grup de rersurse
hagrp -add oracle-sg
hagrp -modify oracle-sg SystemList chip 0 dale 1
hagrp -modify oracle-sg AutoStartList dale
hagrp -modify oracle-sg Parallel 0
# CReare resurse in grup
hares -add ora-lsnr IP oracle-sg
hares -modify ora-lsnr Critical 1
hares -modify ora-lsnr ArpDelay 1
hares -modify ora-lsnr IfconfigTwice 0
hares -modify ora-lsnr Device
hares -modify ora-lsnr Address 10.233.184.8

# Creare disk heartbeat (pe fiecare nod pentru disk-ul respectiv)
Pe chip:
astea 2 comenzi doar pe un nod !!!
gabdiskconf -i /dev/dsk/c4t0d0s2 -s 512 -S AABB
gabdiskconf -i /dev/dsk/c4t0d0s2 -s 1024 -S BBAA
asta 2 pe fiecare din noduri !!
/sbin/gabdiskhb -a /dev/dsk/c4t0d0s2 -p a -s 512
/sbin/gabdiskhb -d /dev/dsk/c4t0d0s2 -p h -s 1024

root@chip # cat /etc/gabtab
/sbin/gabconfig -c -n2
/sbin/gabdiskhb -a /dev/dsk/c4t0d0s2 -p a -s 512
/sbin/gabdiskhb -a /dev/dsk/c4t0d0s2 -p h -s 1024

Pe dale:
/sbin/gabdiskhb -a /dev/dsk/c3t0d0s2 -p a -s 512
/sbin/gabdiskhb -a /dev/dsk/c3t0d0s2 -p h -s 1024
gabdiskhb -l (ca sa verific ca merge treaba)

root@dale # cat /etc/gabtab
/sbin/gabconfig -c -n2
/sbin/gabdiskhb -a /dev/dsk/c3t0d0s2 -p a -s 512
/sbin/gabdiskhb -a /dev/dsk/c3t0d0s2 -p h -s 1024


#As the /etc/llttab is read during LLT intialization any changes will become effective
# only after the following steps (exectute step 1 on one node, execute steps 2-6 on all nodes):

1. hastop -all -force
2. gabconfig -U
3. lltconfig -U
4. lltconfig -c
5. /usr/bin/sh /etc/gabtab
6. hastart

Symcli stuff

## Split / establish pe fiecare device in parte

symmir -g the-group split DEV001
symmir -g the-group split DEV002
symmir -g the-group query >/tmp/PPCREP
symmir -g the-group split DEV003
symmir -g the-group split DEV004
symmir -g the-group split DEV005
symmir -g the-group split DEV006
symmir -g the-group query
symmir -g the-group split DEV005
symmir -g the-group establish DEV005
symmir -g the-group query
symmir -g the-group split DEV005
symmir -g the-group establish # pentru tot grupul

# ca sa vad device-urile mapate pe un anumit port din DMX
symdev -sid 123 -SA 7b -p 1 list
# ca sa vdd pe ce lun este mapat deviceul 0313
symdev show 0313
# ca sa vad wwn -ul unui port din DMX
symcfg -sid 123 -v -SA 10b list
# ca sa vad device-urle maskate pe un wwn
symmaskdb -sid 123 -wwn 10000000C94A7638 list devs
# ca sa vad catre ce wwn este asignat un device
symmaskdb -sid 123 -dev 24b list assig
# ca sa vad lock-urile de pe symetrix
symcfg -lockn all list
symcfg -sid 123 -force -lockn 15 release
# ca sa vad device-urile date pe un port

# Creare manuala de grup de BCV
symdg -type RDF1 create MEDI_DMX
symld -sid 123 -g MEDI_DMX add dev 8D5
symld -sid 123 -g MEDI_DMX add dev 8D9
symbcv -sid 123 -g MEDI_DMX associate dev 8E5
symbcv -sid 123 -g MEDI_DMX associate dev 8E9
symdg list
symmir -g MEDI_DMX query

# aflu grupurile de srdf de pe un sid
symcfg -sid 123 -rdfg all list

# creare meta-uri
symconfigure -sid 123 -f /tmp/map_e10kp1 preview
496 symconfigure -sid 123 -f /tmp/map_e10kp1 commit
#mapare deviceri (meta-uri)
symmask -sid 186 -wwn 10000000c934aaab -dir 10b -p 0 add devs 992,996,99A,99E,9A2,9A6,9AA,9AE,9B2,9B6,9BA,9BE,9C2,9C6,9CA,9CE,9D2,9D6,9DA

# rupere legaturi SRDF
symrdf -g RETAIL-SRDF -force deletepair
symdg -force delete RETAIL-SRDF