In order to upgrade GI we need to do:
- take the note 2111010.1.
- prepare the new file system for 18.4 GI binaries and
- run $GI/gridSerup.sh
0. Read the documentation
The best note for this operation is 2111010.1.
12.2 Grid Infrastructure and Database Upgrade steps for Exadata Database Machine running 11.2.0.3 and later on Oracle Linux (Doc ID 2111010.1)
The best note for this operation is 2111010.1.
12.2 Grid Infrastructure and Database Upgrade steps for Exadata Database Machine running 11.2.0.3 and later on Oracle Linux (Doc ID 2111010.1)
1. Create new file system
Go steps described below. I demonstrate this steps creating new file system for GI 18c home on the VM09 (9th guest VM). Login
to Dom0 where VM09 lives:
[root@Dom0 ~]# cd /EXAVMIMAGES/
[root@Dom0 EXAVMIMAGES]# ll
drwxr-xr-x 2 root root 3896 Aug 23 11:04 conf
-rw-r----- 1 root root 53687091200 Aug 1 16:38
db-klone-Linux-x86-64-12102170418.50.iso
-rw-r----- 1 root root 85 Aug 1 16:38
db-klone-Linux-x86-64-12102170418.50.md5
-rw-r--r-- 1 root root
4169738842 Aug 1 16:29
db-klone-Linux-x86-64-12102170418.zip
-rw-r----- 1 root root 53687091200 Aug 1 16:36 grid-klone-Linux-x86-64-12201180717.50.iso
-rw-r----- 1 root root 87 Aug 1 16:36
grid-klone-Linux-x86-64-12201180717.50.md5
-rw-r--r-- 1 root root
5010176802 Aug 1 16:29
grid-klone-Linux-x86-64-12201180717.zip
drwxr----- 4 root root 3896 Aug 23 11:06 GuestImages
-rw-r----- 1 root root 26843545600 Jun 13 2018 System.first.boot.18.1.5.0.0.180506.img
[root@Dom0 EXAVMIMAGES]# qemu-img create
/EXAVMIMAGES/grid-klone-Linux-x86-64-18c_vm09.iso 50G
Formatting '/EXAVMIMAGES/grid-klone-Linux-x86-64-18c_vm09.iso',
fmt=raw size=53687091200
[root@Dom0 EXAVMIMAGES]# parted
/EXAVMIMAGES/grid-klone-Linux-x86-64-18c_vm09.iso mklabel gpt
[root@Dom0 EXAVMIMAGES]# losetup -f
/dev/loop8
[root@Dom0 EXAVMIMAGES]# losetup /dev/loop8
/EXAVMIMAGES/grid-klone-Linux-x86-64-18c_vm09.iso
[root@Dom0 EXAVMIMAGES]# parted -s /dev/loop8
unit s print
Model: (file)
Disk /dev/loop8: 104857600s
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Number
Start End Size
File system Name Flags
[root@Dom0 EXAVMIMAGES]# parted -s /dev/loop8
mkpart primary 64s 104857566s set 1
Warning: The resulting partition is not properly
aligned for best performance.
[root@Dom0 EXAVMIMAGES]# mkfs -t ext4 -b 4096
/dev/loop8
mke2fs 1.43-WIP (20-Jun-2013)
Discarding device blocks: done
Filesystem label=
OS type: Linux
Block size=4096 (log=2)
Fragment size=4096 (log=2)
Stride=0 blocks, Stripe width=0 blocks
3276800 inodes, 13107200 blocks
655360 blocks (5.00%) reserved for the super user
First data block=0
Maximum filesystem blocks=4294967296
400 block groups
32768 blocks per group, 32768 fragments per group
8192 inodes per group
Superblock backups stored on blocks:
32768,
98304, 163840, 229376, 294912, 819200, 884736, 1605632, 2654208,
4096000,
7962624, 11239424
Allocating group tables: done
Writing inode tables: done
Creating journal (32768 blocks): done
Writing superblocks and filesystem accounting
information: done
[root@Dom0 EXAVMIMAGES]# /sbin/tune2fs -c 0 -i 0
/dev/loop8
tune2fs 1.43-WIP (20-Jun-2013)
Setting maximal mount count to -1
Setting interval between checks to 0 seconds
[root@Dom0 EXAVMIMAGES]# losetup -d /dev/loop8
[root@Dom0 EXAVMIMAGES]# sync
2. Put the binaries to this file system
In this step we put GI binaries into new FS. We do it as root@Dom0 user.
It is not a mistake. At next step (after login to the VM i'll do
# chowner oracle:oinstall $GI_HOME ). You may ommit this step and unzip GI distr later as oracle@VM OS user.
[root@Dom0 EXAVMIMAGES]# mkdir -p
/mnt/grid-klone-Linux-x86-64-18c_vm09
[root@Dom0 EXAVMIMAGES]# mount -o loop
/EXAVMIMAGES/grid-klone-Linux-x86-64-18c_vm09.iso
/mnt/grid-klone-Linux-x86-64-18c_vm09
Unzip GI distr to appropriate FS:
[root@Dom0 EXAVMIMAGES]# ls
/EXAVMIMAGES/18c/LINUX.X64_18.3_grid_home.zip
/EXAVMIMAGES/18c/LINUX.X64_18.3_grid_home.zip
[root@Dom0 EXAVMIMAGES]# unzip -q -d
/mnt/grid-klone-Linux-x86-64-18c_vm09
/EXAVMIMAGES/18c/LINUX.X64_18.3_grid_home.zip
[root@Dom0 EXAVMIMAGES]# umount
/mnt/grid-klone-Linux-x86-64-18c_vm09
[root@Dom0 EXAVMIMAGES]# rm -rf
/mnt/grid-klone-Linux-x86-64-18c_vm09
3. Attach new file system to guest VM (execute as root@Dom0):
[root@Dom0 EXAVMIMAGES]# ls
/EXAVMIMAGES/grid-klone-Linux-x86-64-18c_vm09.iso
/EXAVMIMAGES/grid-klone-Linux-x86-64-18c_vm09.iso
[root@Dom0 EXAVMIMAGES]# ls -l
/EXAVMIMAGES/GuestImages/
total 8
drwxr----- 2 root root 3896 Nov 25 16:41
mradm01vm08.moex.com
drwxr----- 2 root root 3896 Nov 25 16:39
mradm01vm09.moex.com
[root@Dom0 EXAVMIMAGES]# ls -l /EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/
total 82635782
-rw-r----- 1 root root 53687091200 Dec 21 12:58
db12.1.0.2.170418-3.img
-rw-r----- 1 root root 53687091200 Dec 21 12:58
grid12.2.0.1.180717.img
-rw-r----- 1 root root 2642 Aug 23 11:06
mradm01vm09.moex.com.cell.b0e1c27d1da94115b9344cd72290ed9d.conf
-rw-r----- 1 root root 4363 Aug 23 11:06
mradm01vm09.moex.com.virtualmachine.b0e1c27d1da94115b9344cd72290ed9d.conf
-rw-r----- 1 root root 66571993088 Dec 21 12:58
pv1_vgexadb.img
-rw-r----- 1 root root 26843545600 Dec 21 12:58
System.img
-rw-r----- 1 root root 2292 Nov 25 16:39 vm.cfg
[root@Dom0 EXAVMIMAGES]# reflink
/EXAVMIMAGES/grid-klone-Linux-x86-64-18c_vm09.iso /EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/grid18c_vm09.img
[root@Dom0 EXAVMIMAGES]# ls
/EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/grid18c_vm09.img
/EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/grid18c_vm09.img
Login to VM (DomU) and determine unused disk device name on this VM
[root@VM]# lsblk -id
[root@VM ~]# lsblk -id
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
xvda 202:0 0 25G 0 disk
xvdb 202:16 0 50G 0 disk /u01/app/12.2.0.1/grid
xvdc 202:32 0 50G 0 disk /u01/app/oracle/product/12.1.0.2/dbhome_1
xvdd 202:48 0 62G 0 disk
The next free device name is "xvde"
Return again at Dom0 and attach block device to domU
[root@VM]# lsblk -id
[root@VM ~]# lsblk -id
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
xvda 202:0 0 25G 0 disk
xvdb 202:16 0 50G 0 disk /u01/app/12.2.0.1/grid
xvdc 202:32 0 50G 0 disk /u01/app/oracle/product/12.1.0.2/dbhome_1
xvdd 202:48 0 62G 0 disk
The next free device name is "xvde"
Return again at Dom0 and attach block device to domU
[root@Dom0 EXAVMIMAGES]# xm block-attach
mradm01vm09.moex.com
file:/EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/grid18c_vm09.img /dev/xvde w
[root@Dom0 EXAVMIMAGES]# grep ^uuid
/EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/vm.cfg
uuid = 'b0e1c27d1da94115b9344cd72290ed9d'
[root@Dom0 EXAVMIMAGES]# uuidgen | tr -d '-'
fb281ca57229494aa886d83d3a4fa5f2
[root@Dom0 EXAVMIMAGES]# ls
/EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/grid18c_vm09.img
/EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/grid18c_vm09.img
[root@Dom0 EXAVMIMAGES]# /OVS/Repositories/
46baa46cf2bb40718892dc41f50c7b1e/
4f8f2aa8b83842da98b8d929691c8438/ a379bfb9e50b4038a4386bccf38ebc2c/
b0e1c27d1da94115b9344cd72290ed9d/
[root@Dom0 EXAVMIMAGES]# ln -sf
/EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/grid18c_vm09.img
/OVS/Repositories/b0e1c27d1da94115b9344cd72290ed9d/VirtualDisks/fb281ca57229494aa886d83d3a4fa5f2.img
[root@Dom0 EXAVMIMAGES]# cd
/EXAVMIMAGES/GuestImages/mradm01vm09.moex.com/
[root@Dom0 mradm01vm09.moex.com]# cd ../../
18c/
db-klone-Linux-x86-64-12102170418.zip
grid-klone-Linux-x86-64-18c_vm08.iso System.first.boot.18.1.5.0.0.180506.img
conf/
grid-klone-Linux-x86-64-12201180717.50.iso grid-klone-Linux-x86-64-18c_vm09.iso
db-klone-Linux-x86-64-12102170418.50.iso
grid-klone-Linux-x86-64-12201180717.50.md5 GuestImages/
db-klone-Linux-x86-64-12102170418.50.md5
grid-klone-Linux-x86-64-12201180717.zip lost+found/
[root@Dom0 mradm01vm09.moex.com]# cd
../mradm01vm09.moex.com/
[root@Dom0 mradm01vm08.moex.com]# cp vm.cfg
vm.cfg.20181221
Edit vm.cfg and add new line for new disk device (bold):
[root@Dom0 mradm01vm08.moex.com]# vi vm.cfg
[root@Dom0 mradm01vm09.moex.com]# cat vm.cfg
acpi = 1
apic = 1
pae = 1
builder = 'hvm'
kernel = '/usr/lib/xen/boot/hvmloader'
device_model = '/usr/lib/xen/bin/qemu-dm'
# To make VMs with more than 12 vCPUs work on exadata
server
# 1: Processor
Info and Feature Bits
# This returns
the CPU's stepping, model, and family information in EAX (also called the
signature of a CPU),
# feature
flags in EDX and ECX, and additional feature info in EBX.
# The format
of the information in EAX is as follows:
# 3:0 - Stepping
# 7:4 - Model
# 11:8 - Family
# 13:12 -
Processor Type
# 19:16 -
Extended Model
# 27:20 -
Extended Family
# Each register has 32 bits with 31st bit in the left
end and 0 bit in the right end.
# edx
register:
# 12 bit -
Memory Type Range Registers. Force to 0 set uncached access mode to memory
ranges.
# Each successive character represent a
lesser-significant bit:
# '1' ->
force the corresponding bit to 1
# '0' ->
force to 0
# 'x' -> Get a safe value (pass through and
mask with the default policy)
# 'k' ->
pass through the host bit value
# 's' ->
as 'k' but preserve across save/restore and migration
#
33222222222211111111110000000000
#
10987654321098765432109876543210
cpuid = ['1:edx=xxxxxxxxxxxxxxxxxxx0xxxxxxxxxxxx']
disk =
['file:/OVS/Repositories/b0e1c27d1da94115b9344cd72290ed9d/VirtualDisks/b2c152ef76ce489197f93e2727b216c4.img,xvda,w',
'file:/OVS/Repositories/b0e1c27d1da94115b9344cd72290ed9d/VirtualDisks/e8bbc556ef914757818f60adcd502d37.img,xvdb,w',
'file:/OVS/Repositories/b0e1c27d1da94115b9344cd72290ed9d/VirtualDisks/364ee8782fea430eb2bf6a024191d0d5.img,xvdc,w',
'file:/OVS/Repositories/b0e1c27d1da94115b9344cd72290ed9d/VirtualDisks/99cb835716034a37a79e199aa56b1841.img,xvdd,w',
'file:/OVS/Repositories/b0e1c27d1da94115b9344cd72290ed9d/VirtualDisks/fb281ca57229494aa886d83d3a4fa5f2.img,xvde,w']
memory = '376829'
maxmem = '376829'
OVM_simple_name = 'Exadata VM'
name = 'mradm01vm09.moex.com'
OVM_os_type = 'Oracle Linux 6'
vcpus = 16
maxvcpus = 16
uuid = 'b0e1c27d1da94115b9344cd72290ed9d'
on_crash = 'restart'
on_reboot = 'restart'
serial = 'pty'
keymap = 'en-us'
vif =
['type=netfront,mac=00:16:3e:2c:ec:be,bridge=vmbondeth0.1443','type=netfront,mac=00:16:3e:f4:28:bf,bridge=vmeth0']
timer_mode = 2
ib_pfs = ['3b:00.0']
ib_pkeys =
[{'pf':'3b:00.0','port':'1','pkey':['0xffff',]},{'pf':'3b:00.0','port':'2','pkey':['0xffff',]},]
4. Next steps will be executed on the guest VM.
We'll create mount point and mount new file system under VM.
Login to DomU
[root@VM]# mkdir -p /u01/app/18c/grid
[root@VM]# mount /dev/xvde /u01/app/18c/grid
Add the line to /etc/fstab:
Login to DomU
[root@VM]# mkdir -p /u01/app/18c/grid
[root@VM]# mount /dev/xvde /u01/app/18c/grid
Add the line to /etc/fstab:
/dev/xvde /u01/app/18c/grid ext4 defaults 1 1
Use df -h to verify new FS are mounted.
[root@VM]# df -h
.../dev/xvde 50G 8.0G 39G 17% /u01/app/18c/grid
No comments:
Post a Comment
Note: Only a member of this blog may post a comment.