# # Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved. # # # Only change PATH if you give full consideration to GNU or other variants # of common commands having different arguments and output. # export PATH=/usr/bin:/usr/sbin unset LD_LIBRARY_PATH . /usr/lib/brand/shared/common.ksh m_complete=$(gettext " Done: Installation completed in %s seconds.") e_surimap=$(gettext "unable to map storage URI '%s'") e_suriunmap=$(gettext "unable to unmap storage URI '%s'") e_lofiadm_d=$(gettext "failed to delete lofi device %s.") e_zpool_export=$(gettext "failed to export zpool %s") f_ai=$(gettext "auto-install failed.") f_ai_manifest_add=$(gettext "Could not add %s to AI manifest") f_ai_manifest_has_disk=$(gettext "AI manifest cannot have disk specified") f_ai_manifest_set=$(gettext "Could not set %s in AI manifest") f_configuring=$(gettext "failed to configure zone.") f_defds_mountprops=$(gettext "Unexpected mount properties on %s:\n\tmountpoint=%s mounted=%s canmount=%s") f_did_dev_but_no_cluster=$(gettext "Solaris Cluster not installed: cannot use %s") f_disk_in_use=$(gettext "One or more bootable disks is in use by an existing zpool.\nUse -x force-zpool-create=%s to overwrite existing pool.") f_interrupted=$(gettext "installation cancelled due to interrupt.") f_lofi_in_use=$(gettext "Device %s already is already a lofi device") f_lofi_a=$(gettext "Failed to add lofi device %s") f_no_ancestor=$(gettext "No ancestor exists for %s") f_no_did=$(gettext "No such did(7) device: %s (id %s)") f_no_space=$(gettext "Insufficient space in %s:\n\trequire %s available %s shortfall %s") f_root_pool=$(gettext "Unable to determine name of root zpool") f_zfs_get=$(gettext "Failed to get value of %s property on %s") f_zvol_in_use=$(gettext "zvol %s in use by zone(s) in boot environment(s): %s") f_zvol_in_use_thisbe=$(gettext "zvol %s in use by zone %s") f_zvol_resize=$(gettext "failed to resize %s from %ld bytes to %ld bytes") f_mktemp=$(gettext "Cannot create temporary filename") # Properties that may be set on zvols so that zoneadm uninstall only destroys # zvols created by zoneadm install. prop_prefix=com.oracle.zones.solaris-kz prop_createdby=$prop_prefix:createdby # The GZBE UUID will be appended to this property prop_gzbe_prefix=$prop_prefix.gzbe # # Overrides shared init_zone, as the special hooks for zfs paths and datasets # are not appropriate for ephemeral zone roots. # function init_zone { typeset -n zone=$1 typeset -C zone zone.name=$2 zoneadm -z "${zone.name}" list -p | cut -d: -f 4,6 | IFS=: read zone.path zone.brand [[ ${zone.brand} == solaris-kz ]] || fail_internal "unexpected brand '%s'" "${zone.brand}" zone.root= function zone.root.get { typeset -n pathref=${.sh.name%.root}.path .sh.value="$pathref/root" } } # # We have nothing to check, but common code expects this. # function sanity_check { ; } # # get_active_be zone # # Gets the currently active BE, storing the name of the active BE's root # dataset in in zone.active_ds. Kernel zones do not have boot environments in # the twilight zone, hence a no-op. # # Arguments: # # zone zone structure initilialized with init_zone # # Return: # # Always returns 0 # function get_active_be { return 0 } # # set_active_be zone bename # # Sets the specified BE as the active boot environment. Kernel zones do not have # boot environments in the twilight zone, hence a no-op. # # Arguments: # # zone A zone structure initialized with init_zone # bename The name of the boot environment. Should not contain "/". # function set_active_be { return 0 } # # discover_active_be zone # function discover_active_be { return 0 } function tag_candidate_zbes { (( $# < 2 )) && return 0 fail_internal "tag_candidate_zbes not supported with kernel zones" } # # Trigger system configuration inside a zone. For solaris-kz, we drop the # profile files into the shared directory, where SMF will pick them up. # function reconfigure_zone { typeset sc_config=$1 typeset profile_dir=$ZONEPATH/root/shared/sysconfig/ vlog "$v_reconfig" if [[ -n $sc_config ]]; then /usr/sbin/sysconfig configure -g system \ -o "$profile_dir" -c "$sc_config" --destructive else /usr/sbin/sysconfig configure -g system \ -o "$profile_dir" --destructive fi if (( $? != 0 )); then error "$e_reconfig" failed=1 fi [[ -n $failed ]] && fatal "$e_exitfail" } # # Adds the disk specified by match or suri to zone.boot_disks[]. If all goes # well zone.boot_disks[] is updated and 0 is returned. Otherwise, fatal() is # called. # function add_install_disk { typeset -n zone=$1 typeset match=$2 suri=$3 id=$4 typeset lofidev dev lofi lofifile surimp typeset -a mpp [[ -z $match && -z $suri ]] && fail_internal "dev and suri not set" [[ -z $id ]] && fail_internal "id not set" if [[ -n $match ]]; then dev=$match [[ -b $dev ]] && fail_internal "zonecfg did not catch block device %s" "$dev" else [[ -n $suri ]] || fail_internal "suri not set" # Set mountpoint-prefix for zone NFS SURI surimp=$(suriadm parse -Ho mountpoint "$suri" 2>/dev/null) if [[ $surimp != "-" ]]; then mpp+=(-p \ "mountpoint-prefix=/system/volatile/zones/${zone.name}") fi # Get device and suri error; If mktemp fails, then fail. errfile=$(mktemp -t suri-map-XXXXXX) if [[ -z $errfile ]]; then fatal "$f_mktemp" fi dev=$(suriadm map -Ho mapped-dev "${mpp[@]}" "$suri" \ 2>$errfile) if (( $? != 0 )) || [[ -z $dev ]]; then surierr=$(cat "$errfile") rm -f "$errfile" # If it is a zvol, create_boot_disks will fix things # up in a bit. Also, this is called in the uninstall # path, so it is best for it not to create things. typeset path=$(suriadm parse -Ho path "$suri") if [[ $path != @(/dev/|)zvol/dsk/* ]]; then # fail with suriadm map stderr info fatal "$e_surimap" "$surierr" fi # Path could be /dev/zvol/dsk/... or zvol/dsk/... # Normalize that to /dev/zvol/rdsk. dev=/dev/zvol/rdsk/${path#@(/dev/|)zvol/dsk/} # Convert mapped block to character device if needed. elif [[ -b $dev || -b ${dev}s2 ]]; then rm -f "$errfile" typeset bdev=$dev dev=${bdev/\/dsk\///rdsk/} if [[ ! -c $dev && ! -c ${dev}s2 ]]; then fatal "$e_suri_chardev" "$suri" "$bdev" fi fi fi # Solaris Cluster's did devices are not handled by auto-install so look # for the node-specific /dev/rdsk/ path and use that for direct install. if [[ ${zone.direct_install} == true && $dev == /dev/did/rdsk/* ]]; then typeset did did_id [[ -x /usr/cluster/bin/scdidadm ]] || fatal "$f_did_dev_but_no_cluster" "$dev" did=${dev%s+(\d)} did_id=${did/\/dev\/did\/rdsk\/d} [[ -z $did_id ]] && fail_internal "Could not convert '%s' to id" "$did" dev=$(/usr/cluster/bin/scdidadm -l -o path "$did_id") dev=${dev%%+(\s)} [[ -z $dev ]] && fatal "$f_no_did" "$did" "$did_id" fi # suriadm should have just created a labeled lofi if it is needed. # Figure out what that lofi device is. lofifile=$(lofiadm "$dev" 2>/dev/null) if (( $? == 0 )); then lofi=$(lofiadm "$lofifile" 2>/dev/null) && lofi=${lofi%p0} fi typeset -A zone.boot_disks[$id] zone.boot_disks[$id][dev]=$dev zone.boot_disks[$id][suri]=$suri zone.boot_disks[$id][lofi]=$lofi return 0 } # # Look for devices with bootpri set and add them to zone.install_disks[]. # function get_install_disks { typeset -n zone=$1 typeset dev suri id bootpri typeset -A zone.boot_disks typeset -A zone.all_disks zonecfg -z "${zone.name}" info device | while read key val; do case $key in device:) if [[ -n $bootpri ]]; then add_install_disk zone "$dev" "$suri" "$id" zone.all_disks["$id"]=present fi unset dev suri id bootpri ;; id:) id=$val ;; match:) if [[ $val == /dev/* ]]; then dev=$val else dev=/dev/$val fi ;; storage:) suri=$val ;; bootpri:) bootpri=$val ;; esac done if [[ -n $bootpri ]]; then add_install_disk zone "$dev" "$suri" "$id" zone.all_disks["$id"]=present fi } # Determines whether zoneadm was responsible for the creation of this dataset. function ds_created_by_zoneadm { typeset ds=$1 typeset val=$(zfs list -Ho "$prop_createdby" "$ds") # Return 0 (true) if $val is set to anything. [[ $val != - ]] } # Determines if the dataset is already claimed by a zone in this GZBE. If # in use, the name of the zone that is using it is returned. function ds_used_by_this_gzbe { typeset ds=$1 typeset -n usedby=$2 typeset uuid=$(beadm list -H | nawk -F\; '$4 == "/" { print $2 }') [[ -z $uuid ]] && fail_internal "Could not get gzbe uuid" usedby=$(zfs list -Ho "$prop_gzbe_prefix:$uuid" "$ds") # Return 0 (true) if $usedby is set to anything meaningful. [[ $usedby != - ]] } # ds_used_by_other_gzbe dataset [gzbe_array] # # Return true if the specified dataset has a property $prop_gzbe_prefix: # where matches the uuid of a global zone BE that is present on this # system and is not the currently running GZBE. If gzbe_array is specified, # the list of global zones where the dataset is in use is returned via a # variable by this name. # function ds_used_by_other_gzbe { typeset ds=$1 typeset -A dsbes typeset -A gzbes typeset prop val typeset curgzbe typeset be uuid flags junk if [[ -n $2 ]]; then typeset -n retgzbes=$2 else typeset -A retgzbes fi # Get a list of uuids of the global zones that have a hold on the # dataset. zfs get -Ho property,value all "$ds" 2>/dev/null | while IFS=$'\t' read prop val; do [[ $prop == $prop_gzbe_prefix:* ]] || continue uuid=${prop#$prop_gzbe_prefix:} dsbes[$uuid]=$val done # If the datataset doesn't exist or has not being held for any # gzbes it is not in use. if (( ${#dsbes[@]} == 0 )); then return 1 fi # Map gzbe uuids to BE names and figure out which gzbe is active. beadm list -H | while IFS=';' read be uuid flags junk; do gzbes[$uuid]=$be if [[ $flags == *N* ]]; then curgzbe=$be fi done # Of the gzbes tagged on the dataset, which ones point to other # gzbes on this system? for uuid in ${!dsbes[@]}; do typeset gzbe=${gzbes[$uuid]} # Skip gzbes that no longer exist on this system. [[ -z $gzbe ]] && continue # Skip the current gzbe [[ $gzbe == $curgzbe ]] && continue # Still active, save it for return retgzbes[$gzbe]=${dsbes[$uuid]} done if (( ${#retgzbes[@]} == 0 )); then return 1 fi return 0 } # Translates a human readable string (e.g. 1.5g) into the number of bytes. # Handles locale-specific decimal point appropriately. function human_to_bytes { typeset -l insize=$1 integer outsize=0 typeset dec=$(locale decimal_point) # Pattern to match a number that may have a decimal point in it. # Unfortunately, this pattern matches the empty string too. typeset pat_float="*([0-9]){0,1}($dec*([0-9]))" # Pattern to match an optional base-2 scaling factor typeset pat_scale="{0,1}([tgmkb])" if [[ -z $insize || $insize != ${pat_float}${pat_scale} ]]; then fatal "$m_bad_size" "$insize" fi integer pow case $insize in *t) (( pow=40 )) ;; *g) (( pow=30 )) ;; *m) (( pow=20 )) ;; *k) (( pow=10 )) ;; *b) (( pow=0 )) ;; +(\d) ) (( pow=0 )) ;; *) fail internal "unable to determine scale of %s" \ "$insize" ;; esac insize=$( echo "$insize" | sed 's/[tgmkb]$//' ) print $(( insize * 2 ** pow )) } # Translates bytes to a scaled value function bytes_to_human { integer insize=$1 typeset scale integer div=1 typeset res for scale in "" k m g t; do if (( (insize / div) < 1024 )); then break fi (( div *= 1024 )) done # bc doesn't know about locale-specific decimal points res=$(echo "$insize / $div" | bc -l | tr . "$(locale decimal_point)") printf "%0.1f%s" "$res" "$scale" } # Approximates the same function in libzfs. Since it doesn't know the value of # the copies property, it is not included in this calculation. Once the caller # knows where in the dataset hierarchy the new zvol will live, just multiply the # result from this function by copies. It will be a slight overestimate as # metadnode replicas will be overcounted. function zvol_compute_reservation { typeset volsize_human=$1 integer volblocksize=8192 integer numdb=7 integer nblocks integer volsize integer dnodes_per_level=128 volsize=$(human_to_bytes "$volsize_human") (( nblocks = volsize / volblocksize )) while (( nblocks > 1 )); do (( nblocks += dnodes_per_level - 1 )) (( nblocks /= dnodes_per_level )) (( numdb += nblocks )) done (( numdb *= 2 )) (( numdb *= 1 << 14 )) print $(( $volsize + $numdb )) } function find_ancestor { typeset ds=$1 while [[ -n $ds && $ds != "." ]]; do # Do not use zfs wrapper function, as it would offer no value. /usr/sbin/zfs list "$ds" >/dev/null 2>&1 if (( $? == 0 )); then print "$ds" return 0 fi ds=$(dirname $ds) done fatal "$f_no_ancestor" "$1" } function get_available { typeset ds=$1 zfs get -Hpo value available "$ds" || fatal "$f_zfs_get" available "$ds" } function get_copies { typeset ds=$1 zfs get -Hpo value copies "$ds" || fatal "$f_zfs_get" available "$ds" } function resize_zvol { typeset zvol=$1 typeset size=$(human_to_bytes "$2") typeset cursize cursize=$(zfs get -Hpo value volsize "$zvol") || fatal "$f_zfs_get" volsize "$zvol" if (( $size == $cursize )); then return fi zfs set volsize=$size "$zvol" && zfs set refreservation=auto "$zvol" || fatal "$f_zvol_resize" "$zvol" "$cursize" "$size" } # # Creates backing stores for devices and suspend images. # # If any dataset is created, EXIT_CODE is set to $ZONE_SUBPROC_FATAL. Each # dataset that is created is added to the zone.created_datasets indexed array # to allow for cleanup in the event of failure later in the install procoess. # function create_storage { typeset -n zone=$1 typeset zone_ds_needed=false typeset id typeset rootpool typeset defds integer zvol_size_bytes typeset -A avail typeset -A copies [[ -z $zvol_size ]] && fail_internal "zvol_size not defined" zvol_size_bytes=$(zvol_compute_reservation "$zvol_size") rootpool=$(zfs list -Ho name /) || fatal "$f_root_pool" rootpool=${rootpool/\/*} [[ -z $rootpool || $rootpool == */* ]] && fail_internal "Root pool '%s' invalid" "$rootpool" defds=$rootpool/VARSHARE/zones/${zone.name} for id in ${!zone.boot_disks[@]}; do typeset dev=${zone.boot_disks[$id][dev]} typeset zvol=${dev/\/dev\/zvol\/?(r)dsk\/} [[ $dev == /dev/zvol/?(r)dsk/* ]] || continue if [[ ${zone.force_create_rootpool} != true ]]; then # Prevent installation over a kernel zone installed in # another GZBE. typeset -A gzbes ds_used_by_other_gzbe "$zvol" gzbes if (( ${#gzbes[@]} != 0 )); then fatal "$f_zvol_in_use" "$dev" "${!gzbes[*]}" fi fi if [[ $dev == /dev/zvol/?(r)dsk/$defds/* ]]; then zone_ds_needed=true fi if [[ ! -b "$dev" && ! -c "$dev" ]]; then # The device doesn't exist, so we need to ensure that # the closest ancestor dataset has enough available # (not free!) space to accommodate the zvol. Be sure # that multiple zvols created as a child of the same # ancestor are accounted for appropriately. Also, don't # forget that zvols reserve space for metadata and # perhaps multiple copies. typeset ancestor ancestor=$(find_ancestor "$zvol") if [[ -z ${avail[$ancestor]} ]]; then integer avail[$ancestor] avail[$ancestor]=$(get_available "$ancestor") copies[$ancestor]=$(get_copies "$ancestor") fi integer sz (( sz = ${copies[$ancestor]} * $zvol_size_bytes )) if (( $sz <= 0 )); then fail_internal "Invalid size: %s (%s %s)" "$sz" \ "${copies[$ancestor]}" "$zvol_size_bytes" fi if (( ${avail[$ancestor]} < $sz )); then integer need (( need = sz - avail[$ancestor] )) fatal "$f_no_space" "$ancestor" \ "$(bytes_to_human "$sz")" \ "$(bytes_to_human "${avail[$ancestor]}")" \ "$(bytes_to_human "$need")" fi (( avail[$ancestor] -= sz )) fi done # Create default zone dataset and mount at /system/zones/$zonename if # any default datasets will need to be created. if $zone_ds_needed; then typeset mountpoint mounted LC_ALL=C /usr/sbin/zfs list -Ho mountpoint,mounted,canmount \ "$defds" 2>/dev/null | IFS=$'\t' read mountpoint mounted canmount if [[ -z $mountpoint ]]; then # Dataset does not exist, create it. zfs create -o "$prop_createdby=zoneadm" \ -o mountpoint="/system/zones/${zone.name}" \ "$defds" || fatal "$f_zfs_create" "$defds" EXIT_CODE=$ZONE_SUBPROC_FATAL zone.created_datasets+=("$defds") elif [[ $mountpoint != "/system/zones/${zone.name}" || $mounted != yes || $canmount != on ]]; then fatal "$f_defds_mountprops" "$defds" "$mountpoint" \ "$mounted" "$canmount" fi fi # # Create boot disk(s). Each one gets a property # "com.oracle.zones.be:" to track usage across global zone # BEs. # typeset gzbe_uuid=$(beadm list -H | nawk -F\; '$4 == "/" { print $2 }') [[ -z $gzbe_uuid ]] && fail_internal "Could not get gzbe uuid" for id in ${!zone.boot_disks[@]}; do typeset dev=${zone.boot_disks[$id][dev]} typeset zvol=${dev/\/dev\/zvol\/?(r)dsk\/} typeset usedby [[ $dev == /dev/zvol/* ]] || continue if [[ ${zone.force_create_rootpool} == true ]] && [[ -b $dev || -c $dev ]]; then if [[ -n ${zone.xopts[install-size]} ]]; then resize_zvol "$zvol" \ "${zone.xopts[install-size]}" fi elif [[ ! -b $dev && ! -c $dev ]]; then zfs create -pV "$zvol_size" -o primarycache=metadata \ -o secondarycache=metadata \ -o "$prop_createdby=zoneadm" \ -o "$prop_gzbe_prefix:$gzbe_uuid=${zone.name}" \ "$zvol" || fatal "$f_zfs_create" "$zvol" zone.boot_disks[$id][created]=true EXIT_CODE=$ZONE_SUBPROC_FATAL zone.created_datasets+=("$zvol") elif [[ -d $dev ]]; then fatal "$e_dataset_exists" "$zvol" elif ds_used_by_this_gzbe "$zvol" usedby; then fatal "$f_zvol_in_use_thisbe" "$zvol" "$usedby" elif ds_created_by_zoneadm "$zvol"; then typeset prop=$prop_gzbe_prefix:$gzbe_uuid=${zone.name} zfs set "$prop" "$zvol" || fatal "$e_zfs_set" "$prop" "$zvol" fi # XXX do we really need to configure lofi devices? Perhaps only # for direct install? if [[ -z ${zone.boot_disks[$id][lofi]} ]]; then lofiadm "$dev" >/dev/null 2>&1 && fatal "$f_lofi_in_use" "$dev" typeset lofi lofi=$(lofiadm -la "$dev") || fatal "$f_lofi_a" "$dev" zone.boot_disks[$id][lofi]=${lofi%p0} fi done } # # Look for a zpool that matches the zpool name created by auto-install. Be # very careful to only muck with a pool that has a name and altroot that match # the behavior of auto-install. # function export_install_zpool { typeset disk=$(basename "$1") typeset zpool typeset altroot [[ -z $disk ]] && fail_internal "invalid disk <%s>" "$1" zpool list -Ho name,altroot | while IFS=$'\t' read zpool altroot; do # auto-install creates pools as install_rpool_ and imports # them with an altroot of /system/volatile/install./. [[ $zpool == install_rpool_+(\d) ]] || continue [[ $altroot == /system/volatile/install.+(\d)/$zpool ]] || continue # This pool was imported by auto-install. Be sure it is the # right one typeset line typeset cfgsect=false typeset -i blanklines=0 zpool status "$zpool" | while read line; do if [[ $cfgsect == false ]]; then if [[ $line == "config:" ]]; then cfgsect=true fi continue fi # The config section has one blank line in the beginning # and another just before the next section starts. [[ -z $line ]] && (( blanklines++ )) (( blanklines > 1 )) && break # If the disk matches, export the zpool and return. set -- $line if [[ $1 == $disk ]]; then integer try integer maxtries=5 for (( try = 0; try < maxtries; try++ )); do zpool export "$zpool" && break (( try == maxtries )) || sleep 1 done zpool list "$zpool" >/dev/null 2>&1 && error "$e_zpool_export" "$zpool" return fi done done } # # Used by cleanup routines to roll back the work of create_storage. If the # second argument is "true" the datasets listed in zone.created_datasets will be # destroyed. # function cleanup_storage { typeset -n zone=$1 typeset destroy=$2 typeset id integer ret=0 integer try [[ $destroy == @(true|false) ]] || fail_internal "invalid value (%s) for destroy" "$destroy" # Check for NFS and file SURI devices for id in ${!zone.boot_disks[@]}; do typeset suri=${zone.boot_disks[$id][suri]} typeset lofi=${zone.boot_disks[$id][lofi]} typeset -a mpp typeset suri_err [[ $suri == @(nfs|file):* ]] || continue export_install_zpool "$lofi" # Set mountpoint-prefix for zone NFS SURI surimp=$(suriadm parse -Ho mountpoint "$suri" 2>/dev/null) if [[ $surimp != "-" ]]; then mpp+=(-p \ "mountpoint-prefix=/system/volatile/zones/${zone.name}") fi # If suri is mapped, then unmap. Suri may never have been # mapped or suri may have been unmapped due to attach # at the end of an install/clone. suriadm lookup-mapping "${mpp[@]}" "$suri" >/dev/null 2>&1 if (( $? == 0 )); then suri_err=$(suriadm unmap "${mpp[@]}" "$suri" 2>&1) if (( $? != 0 )); then error "$e_suriunmap" "$suri_err" ret=1 fi fi done # Check for lofi devices not already deleted by suriadm for id in ${!zone.boot_disks[@]}; do typeset lofi=${zone.boot_disks[$id][lofi]} [[ -e $lofi ]] || [[ -e ${lofi}p0 ]] || continue export_install_zpool "$lofi" [[ ! -e $lofi && -e ${lofi}p0 ]] && lofi=${lofi}p0 for (( try=1; try <= 5; try++ )); do lofiadm -d "$lofi" && break log "Failed to delete lofi %s on try %d." "$lofi" $try if (( try == 5 )); then error "$e_lofiadm_d" "$lofi" ret=1 else sleep $try fi done done $destroy || return $ret # Destroy in reverse order of creation. If destruction is successful, # the array element is unset. integer i for (( i=${#zone.created_datasets[@]} - 1; i >= 0; i-- )); do # Sometimes zfs destroy is busy due to transient holds. These # can come from someone running "zfs list" or similar commands. for (( try=1; try <= 5; try++ )); do typeset ds=${zone.created_datasets[i]} [[ -z $ds ]] && break; if zfs destroy "$ds"; then unset zone.created_datasets[i] fi if (( try == 5 )); then error "$e_zfs_destroy" "$ds" ret=1 else sleep $try fi done done return $ret } function check_bootdisks_inuse { typeset -n zone=$1 typeset rootpool=$2 typeset -a disks typeset id dev typeset errmsg= typeset -i err typeset -a cmd typeset lofi typeset -a rmlofi for id in "${!zone.boot_disks[@]}"; do if [[ -n ${zone.boot_disks[$id][lofi]} ]]; then dev=${zone.boot_disks[$id][lofi]} elif [[ ${zone.boot_disks[$id][dev]} == /dev/zvol/* ]]; then dev=${zone.boot_disks[$id][dev]} dev=${dev/\/dev\/zvol\/rdsk//dev/zvol/dsk} # The device may not yet exist. [[ -b $dev ]] || continue lofi=$(lofiadm -la "$dev") || fatal "$f_lofi_a" "$dev" rmlofi+=($lofi) dev=${lofi%p0} else dev=${zone.boot_disks[$id][dev]} fi disks+=($dev) done set -A cmd zpool create -n "test_rpool_create_${zone.name}" case ${#disks[@]} in 0) return 0 ;; 1) cmd+=("${disks[@]}") ;; *) cmd+=(mirror "${disks[@]}") ;; esac # Run zpool create in dry-run mode. errmsg=$("${cmd[@]}" 2>&1) err=$? # Clean up any temporary lofi devices for lofi in "${rmlofi[@]}"; do lofiadm -d "$lofi" || error "$e_lofiadm_d" "$lofi" done # If zpool create's dry run succeeded, we're done. (( err == 0 )) && return 0 error "%s" "$(print -- "$errmsg" | grep /dev/)" vlog "Full error from '${cmd[*]}' follows:\n------\n%s\n------" \ "$errmsg" fatal "$f_disk_in_use" "$rootpool" }