#!/bin/ksh -p # # # # # # # # # # # # # # # # # # # # # Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. # . /usr/lib/brand/solaris-kz/common.ksh # Allows developers to override some things like PATH and PYTHONPATH . /usr/lib/brand/solaris/developerenv.ksh m_usage=$(gettext "clone [-c profile.xml | dir] [-x force-zpool-create=] {sourcezone}\n\tThe -c option gives a profile or a directory of profiles to be applied to the\n\tsystem after clone. The -x force-zpool-create= option allows\n\tre-use of bootable devices that contain an existing ZFS pool.") f_boot=$(gettext "Failed to boot source zone") f_boot_singleuser=$(gettext "Timed out awaiting source zone boot") f_cp=$(gettext "Failed to copy %s to %s") f_aimanifest_add=$(gettext "Failed to aimanifest add %s") f_src_not_ready=$(gettext "Source zone %s not prepared for cloning; at least one multi-user boot is required.") w_dubious_repo=$(gettext "WARNING: publisher %s may not be reachable at %s") scdir= function zlogin { /usr/sbin/zlogin -S "$@" } # Clean up on failure function trap_exit { typeset force_incomplete=false # Clean up devices only if source zone was initialized if [[ -n ${src.name} ]]; then remove_devices src idmap || force_incomplete=true fi if [[ -n $scdir ]] ; then rm -rf "$scdir" fi if $src_booted; then zoneadm -z "${src.name}" halt || force_incomplete=true fi if [[ $EXIT_CODE == $ZONE_SUBPROC_OK ]]; then typeset destroy=false else typeset destroy=true fi cleanup_storage dst $destroy || force_incomplete=true # Clean up temporary directory fini_tmpdir if [[ $EXIT_CODE == $ZONE_SUBPROC_FATAL && $force_incomplete != true ]]; then EXIT_CODE=$ZONE_SUBPROC_TRYAGAIN fi exit $EXIT_CODE } # Creates a manifest from scratch function create_manifest { [[ $# == 3 ]] || fail_internal "create_manifest: invalid arguments" typeset rpool=$1 typeset -n out_manifest=$2 typeset -n diskmap=$3 typeset sw=/auto_install/ai_instance/software typeset target=/auto_install/ai_instance/target typeset disk=$target/disk typeset swinst typeset aiinst [[ -z $TMPDIR ]] && fail_internal "TMPDIR not set" [[ ! -d $TMPDIR ]] && fail_internal "TMPDIR missing" export AIM_MANIFEST=$TMPDIR/ai-manifest.xml # XXX temporary out_manifest=$AIM_MANIFEST # load in the manifest framework. we'll jettison most of it aimanifest load /usr/share/auto_install/manifest/default.xml # clear out the instance aimanifest delete $sw >/dev/null aimanifest delete $target >/dev/null swinst=$(aimanifest add -r "$sw" "") || fatal "$f_aimanifest_add" "$sw" aiinst=$(dirname "$swinst") aimanifest set "$aiinst@name" default >/dev/null || fatal "$f_aimanifest_set" "$aiinst@name" # Set newly initialized software instance for cloning aimanifest set "$swinst@type" CLONE >/dev/null >/dev/null || fatal "$f_ai_manifest_set" "$swinst@type" aimanifest add "$aiinst/boot_mods" "" || fatal "$f_aimanifest_add" "$aiinst/boot_mods" if [[ $(uname -p) == i386 ]]; then aimanifest set "$aiinst/boot_mods[1]@firmware" "zvmm" >/dev/null || fatal "$f_aimanifest_set" "$aiinst/boot_mods[1]@firmware" else aimanifest set "$aiinst/boot_mods[1]@firmware" "obp" >/dev/null || fatal "$f_aimanifest_set" "$aiinst/boot_mods[1]@firmware" fi # Configure a root pool on the disks listed in diskmap for tid in "${!diskmap[@]}"; do typeset diskinst typeset rid=${diskmap["$tid"]} diskinst=$(aimanifest add -r "$disk" "") || fatal "$f_aimanifest_add" "$sw" aimanifest set "$diskinst@whole_disk" true >/dev/null || fatal "$f_ai_manifest_set" "$diskinst@whole_disk" aimanifest add "$diskinst/disk_name" "" || fatal "$f_aimanifest_add" "$diskinst/disk_name" aimanifest set "$diskinst/disk_name[1]@name" \ "c1d${tid}" >/dev/null || fatal "$f_ai_manifest_set" "$diskinst/disk_name[1]@name" aimanifest set "$diskinst/disk_name[1]@name_type" ctd \ >/dev/null || fatal "$f_ai_manifest_set" \ "$diskinst/disk_name[1]@name_type" # Be sure vdev labels get updated during export aimanifest add "$diskinst/vdev_label" "" || fatal "$f_aimanifest_add" "$diskinst/vdev_label" aimanifest set "$diskinst/vdev_label[1]@ctd" "c1d${rid}" \ >/dev/null || fatal "$f_ai_manifest_set" "$diskinst/vdev_label[1]@ctd" aimanifest set "$diskinst/vdev_label[1]@devid" "" \ >/dev/null || fatal "$f_ai_manifest_set" "$diskinst/vdev_label[1]@devid" typeset fmt if [[ $(uname -p) == sparc ]]; then fmt="/kz-devices@ff/disk@%d:a" else fmt="/zvnex/zvblk@%x" fi aimanifest set "$diskinst/vdev_label[1]@devpath" \ "$(printf "$fmt" "$rid")" >/dev/null || fatal "$f_ai_manifest_set" "$diskinst/vdev_label[1]@devpath" done # Add the root pool typeset zp=$aiinst/target/logical/zpool zp=$(aimanifest add -r "$zp" "") || fatal "$f_aimanifest_add" "$zp" aimanifest set "$zp@name" "$rpool" >/dev/null || fatal "$f_aimanifest_set" "$zp@name" aimanifest set "$zp@is_root" true >/dev/null || fatal "$f_aimanifest_set" "$zp@is_root" out_manifest=$AIM_MANIFEST } # Safe to call multiple times, as entries are removed from idmap as they are # removed from the zone configuration. function remove_devices { typeset -n zone=$1 typeset -n map=$2 typeset id typeset ret=0 for id in "${!map[@]}"; do zonecfg -z "${zone.name}" "remove device id=$id" if (( $? != 0 )); then error "failed to remove device id=%d from zone %s" \ "$id" "${zone.name}" ret=1 continue fi unset map["$id"] done return $ret } function augment_devices { typeset -n nzone=$1 # New zone typeset -n ezone=$2 # Existing zone typeset -n map=$3 # map[tid]=nid typeset -i eid # ID in existing zone typeset -i nid # ID in new zone typeset -i tid=65535 # Temporary ID in existing zone typeset -a nids set -A nids "${!nzone.boot_disks[@]}" (( bootpri = exp2(31) - 1 )) for nid in "${nids[@]}"; do vlog "Adding device %s (id %d)" \ "${nzone.boot_disks[$nid][dev]}" "$nid" # # Find the next available device ID in the existing zone. # Allocate device ids starting high where there's unlikely to be # conflicts. # while (( tid >= 0 )) && [[ -n ${ezone.all_disks["$tid"]} ]]; do let tid-- done if (( tid == 0 )); then remove_devices ezone map fatal "zone %s has too many devices" "${ezone.name}" fi # # Add the new device to the existing zone. add_install_disk() # has already mapped any storage uri, so we can just add the # device in all cases. # zonecfg -z "${ezone.name}" -f - <<-EOF add device set match=${nzone.boot_disks[$nid][dev]} set id=$tid set bootpri=$bootpri end EOF if (( $? != 0 )); then remove_devices ezone map fatal "failed to add device %s id %d to zone %s" \ "${nzone.boot_disks[$nid][dev]}" "$tid" \ "${ezone.name}" fi EXIT_CODE=$ZONE_SUBPROC_FATAL map["$tid"]=$nid ezone.all_disks["$tid"]=temporary let tid-- done } function get_root_pool { typeset -n zone=$1 typeset -n rootpool=$2 typeset -n zvol_size=$3 typeset rootpool bootdisk zoneadm -z "${src.name}" boot -- -m milestone=none || fatal "$f_boot" src_booted=true zlogin "${zone.name}" /usr/sbin/devfsadm rootpool=$(zlogin ${zone.name} "zfs list -Ho name / | cut -d/ -f 1") [[ -z $rootpool ]] && fatal "$f_root_pool" bootdisk=$(zlogin "${zone.name}" zpool status "$rootpool" | sed "1,/ $rootpool / d" | nawk '$1 !~ /^mirror-/ {print $1; exit }') [[ -z $bootdisk ]] && fatal "$f_root_pool" # If the zone has never been booted after installation, the device tree # will not be populated. This is just one of many things that will go # wrong - the source zone must be booted once before cloning it. zlogin "${zone.name}" test -c "/dev/rdsk/$bootdisk" || fatal "$f_src_not_ready" "${zone.name}" zvol_size=$(zlogin "${zone.name}" prtvtoc "/dev/rdsk/$bootdisk" | nawk '$1 == "*" && $3 == "bytes/sector" { bps=$2 } $1 == "*" && $3 == "sectors" { sectors=$2 } END { print sectors / 1024 * bps }') [[ -z $zvol_size ]] && fatal "$f_root_pool" zoneadm -z "${src.name}" halt src_booted=false zvol_size=${zvol_size}k } # Best effort to ensure that the pkg cache in the source zone has the files # with a revert tag. Do not error out if we can't get them all, as it is # possible that they won't even be needed. function seed_cache { typeset -n zone=$1 typeset pub=$2 typeset uri=$3 [[ $uri == file:///* ]] || fail_internal "Invalid uri: %s" "$uri" typeset -a files set -A files $(zlogin "${zone.name}" pkg contents -m "pkg://$pub/*" | nawk '$1 == "file" && $0 ~ / revert-tag=/ { printf("file/%s/%s\n", substr($2,0,2), $2) }' | sort -u) if (( ${#files[@]} == 0 )); then return fi vlog "Populating cache for publisher %s" "$pub" typeset indir="${uri#file://}/publisher/$pub" typeset outdir="/var/pkg/publisher/$pub" (cd "$indir" && /usr/bin/tar cf - "${files[@]}") | \ zlogin "${zone.name}" "(cd \"$outdir\" && /usr/bin/tar xf -)" } set -A save_args "$0" "$@" EXIT_CODE=$ZONE_SUBPROC_USAGE unset sc_config dst_zonename opt_d typeset src dst typeset -A idmap src_booted=false storage_created=false typeset -A x_opts=([force-zpool-create]= [storage-create-missing]=) while getopts :z:c:dx: opt; do case $opt in z) dst_zonename=$OPTARG ;; c) sc_config=$(mk_abs_path $OPTARG) ;; d) opt_d="-d" ;; x) # May call fail_usage process_xopt x_opts "$OPTARG" ;; *) fail_usage "option -%s not supported" "$opt" ;; esac done shift $((OPTIND - 1)) if (( $# < 1 )); then fail_usage "" fi trap trap_exit EXIT EXIT_CODE=$ZONE_SUBPROC_TRYAGAIN # Sets TMPDIR to a safe directory specific to this script. init_tmpdir check_sc_config "$sc_config" init_zone dst "$dst_zonename" init_zone src "$1" # Log this script's output and aimanifest(1M) output to the same place start_log dst clone "${save_args[@]}" export AIM_LOGFILE=$ZONEADM_LOGFILE get_install_disks dst (( ${#dst.boot_disks[@]} == 0 )) && fail_internal "no dst boot disks" get_install_disks src (( ${#src.boot_disks[@]} == 0 )) && fail_internal "no src boot disks" # Get the source zone's root pool name and root pool disk size. zvol_size is # used by create_storage(), below. get_root_pool src rootpool zvol_size # If "-x force-zpool-create=" is not used, ensure that the boot disks # are not already in use. if [[ ${x_opts[force-zpool-create]} == $rootpool ]]; then dst.force_create_rootpool=true else check_bootdisks_inuse dst "$rootpool" fi # Create storage for the new zone, if needed. Sets EXIT_CODE, uses zvol_size. create_storage dst # Add disks from dst to src, remembering the mapping of ids in idmap. Sets # EXIT_CODE. augment_devices dst src idmap # By now EXIT_CODE is ZONE_SUBPROC_FATAL # Initialize the AI manifest typeset manifest create_manifest "$rootpool" manifest idmap [[ -n $manifest && -f $manifest ]] || fail_internal "manifest not created" vlog "Generated clone manifest:\n%s" "$(cat $manifest)" # Copy the AI manifest into the shared file system if [[ ! -d ${src.root}/shared ]]; then mkdir -m 0555 "${src.root}/shared" || fatal "$f_mkdir" "${src.root}/shared" fi cp "$manifest" "${src.root}/shared/ai.xml" || fatal "$f_cp" "$manifest" "${src.root}/shared/ai.xml" # Boot the source zone and wait for single-user. We do a reconfiguration # reboot to ensure that /dev has the device node for the newly-added disk(s). zoneadm -z ${src.name} boot -- -m milestone=single-user || fatal "$f_boot" zlogin "${src.name}" /usr/sbin/devfsadm src_booted=true typeset -i timeleft for (( timeleft = 300; timeleft > 0; timeleft-- )); do state=$(zlogin "${src.name}" /usr/bin/svcs -H -o state \ svc:/milestone/single-user:default 2>/dev/null &2 if (( $aires != 0 )); then fatal "$f_ai" fi zoneadm -z "${src.name}" halt src_booted=false # Initialize the zone's host data. zoneadm -z "${dst.name}" attach -x initialize-hostdata-incomplete EXIT_CODE=$ZONE_SUBPROC_OK finish_log dst