From 9fdd7321a52ddca1650a46caf5ac215e239d6127 Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Jan 11 2022 09:06:22 +0000 Subject: import rear-2.4-15.el7_9 --- diff --git a/SOURCES/rear-bz1930662.patch b/SOURCES/rear-bz1930662.patch new file mode 100644 index 0000000..c453df3 --- /dev/null +++ b/SOURCES/rear-bz1930662.patch @@ -0,0 +1,147 @@ +diff --git a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh b/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh +index 27e348ad..60330007 100644 +--- a/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh ++++ b/usr/share/rear/backup/RSYNC/default/200_check_rsync_relative_option.sh +@@ -5,6 +5,6 @@ + # for the default values see the standard definition in conf/default.conf file + + if ! grep -q relative <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then +- BACKUP_RSYNC_OPTIONS=( ${BACKUP_RSYNC_OPTIONS[@]} --relative ) ++ BACKUP_RSYNC_OPTIONS+=( --relative ) + Log "Added option '--relative' to the BACKUP_RSYNC_OPTIONS array during $WORKFLOW workflow" + fi +diff --git a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh +index 002bfa96..ed6097d3 100644 +--- a/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh ++++ b/usr/share/rear/backup/RSYNC/default/500_make_rsync_backup.sh +@@ -37,7 +37,7 @@ ProgressStart "Running archive operation" + ;; + + (*) +- # no other backup programs foreseen then rsync so far ++ # no other backup programs foreseen than rsync so far + : + ;; + +@@ -117,13 +117,12 @@ _rc="$(cat $TMP_DIR/retval)" + + sleep 1 + # everyone should see this warning, even if not verbose +-test "$_rc" -gt 0 && VERBOSE=1 LogPrint "WARNING ! ++test "$_rc" -gt 0 && Error " + There was an error (${rsync_err_msg[$_rc]}) during archive creation. + Please check the archive and see '$RUNTIME_LOGFILE' for more information. + +-Since errors are often related to files that cannot be saved by +-$BACKUP_PROG, we will continue the $WORKFLOW process. However, you MUST +-verify the backup yourself before trusting it ! ++If the error is related to files that cannot and should not be saved by ++$BACKUP_PROG, they should be excluded from the backup. + + " + +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index 6e98b427..0c9bf37d 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -922,7 +922,8 @@ BACKUP_ONLY_EXCLUDE="no" + MANUAL_INCLUDE=NO + # Disable SELinux policy during backup with NETFS or RSYNC (default yes) + BACKUP_SELINUX_DISABLE=1 +-# Enable integrity check of the backup archive (only with BACKUP=NETFS and BACKUP_PROG=tar) ++# Enable integrity check of the backup archive (full check only with BACKUP=NETFS and BACKUP_PROG=tar, ++# with BACKUP=rsync or BACKUP_PROG=rsync it only checks whether rsync completed the restore successfully) + BACKUP_INTEGRITY_CHECK= + # Define BACKUP_TYPE. + # By default BACKUP_TYPE is empty which means "rear mkbackup" will create a full backup. +diff --git a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh +index a3731fd9..f2a5782c 100644 +--- a/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh ++++ b/usr/share/rear/output/RSYNC/default/900_copy_result_files.sh +@@ -37,4 +37,4 @@ esac + + # cleanup the temporary space (need it for the log file during backup) + rm -rf "${TMP_DIR}/rsync/${RSYNC_PREFIX}/" +-LogIfError "Could not cleanup temoprary rsync space: ${TMP_DIR}/rsync/${RSYNC_PREFIX}/" ++LogIfError "Could not cleanup temporary rsync space: ${TMP_DIR}/rsync/${RSYNC_PREFIX}/" +diff --git a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh +index 1a2ffb8f..1e23eea2 100644 +--- a/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh ++++ b/usr/share/rear/prep/RSYNC/default/150_check_rsync_protocol_version.sh +@@ -30,7 +30,7 @@ else + + fi + +-if [ "${RSYNC_USER}" != "root" ]; then ++if [ "${RSYNC_USER}" != "root" -a $RSYNC_PROTO = "ssh" ]; then + if [ $RSYNC_PROTOCOL_VERSION -gt 29 ]; then + if grep -q "no xattrs" "$TMP_DIR/rsync_protocol"; then + # no xattrs available in remote rsync, so --fake-super is not possible +diff --git a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh +index 2a0bf15e..3c2ea9b1 100644 +--- a/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh ++++ b/usr/share/rear/restore/RSYNC/default/400_restore_rsync_backup.sh +@@ -33,9 +33,10 @@ ProgressStart "Restore operation" + ;; + + (*) +- # no other backup programs foreseen then rsync so far ++ # no other backup programs foreseen than rsync so far + : + ;; ++ + esac + echo $? >$TMP_DIR/retval + ) >"${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log" & +@@ -65,6 +66,7 @@ case "$(basename $BACKUP_PROG)" in + ProgressStep + done + ;; ++ + esac + ProgressStop + +@@ -72,15 +74,21 @@ transfertime="$((SECONDS-starttime))" + + # harvest return code from background job. The kill -0 $BackupPID loop above should + # have made sure that this wait won't do any real "waiting" :-) +-wait $BackupPID +-_rc=$? ++wait $BackupPID || LogPrintError "Restore job returned a nonzero exit code $?" ++# harvest the actual return code of rsync. Finishing the pipeline with an error code above is actually unlikely, ++# because rsync is not the last command in it. But error returns from rsync are common and must be handled. ++_rc="$(cat $TMP_DIR/retval)" + + sleep 1 +-test "$_rc" -gt 0 && LogPrint "WARNING ! ++if test "$_rc" -gt 0 ; then ++ # TODO: Shouldn't we tell the user to check ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log as well? ++ LogPrintError "WARNING ! + There was an error (${rsync_err_msg[$_rc]}) while restoring the archive. + Please check '$RUNTIME_LOGFILE' for more information. You should also + manually check the restored system to see whether it is complete. + " ++ is_true "$BACKUP_INTEGRITY_CHECK" && Error "Integrity check failed, restore aborted because BACKUP_INTEGRITY_CHECK is enabled" ++fi + + _message="$(tail -14 ${TMP_DIR}/${BACKUP_PROG_ARCHIVE}-restore.log)" + +@@ -89,3 +97,5 @@ if [ $_rc -eq 0 -a "$_message" ] ; then + elif [ "$size" ]; then + LogPrint "Restored $((size/1024/1024)) MiB in $((transfertime)) seconds [avg $((size/1024/transfertime)) KiB/sec]" + fi ++ ++return $backup_prog_rc +diff --git a/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh b/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh +index 3622884a..336163fb 100644 +--- a/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh ++++ b/usr/share/rear/verify/RSYNC/GNU/Linux/600_check_rsync_xattr.sh +@@ -4,7 +4,7 @@ + + # if --xattrs is already set; no need to do it again + if ! grep -q xattrs <<< $(echo ${BACKUP_RSYNC_OPTIONS[@]}); then +- RSYNC_OPTIONS=( "${BACKUP_RSYNC_OPTIONS[@]}" --xattrs ) ++ BACKUP_RSYNC_OPTIONS+=( --xattrs ) + fi + + } diff --git a/SOURCES/rear-bz1945869.patch b/SOURCES/rear-bz1945869.patch new file mode 100644 index 0000000..46e8f66 --- /dev/null +++ b/SOURCES/rear-bz1945869.patch @@ -0,0 +1,239 @@ +diff --git a/usr/share/rear/finalize/Linux-i386/630_run_efibootmgr.sh b/usr/share/rear/finalize/Linux-i386/630_run_efibootmgr.sh +index dfbeddcb..7791312b +--- a/usr/share/rear/finalize/Linux-i386/630_run_efibootmgr.sh ++++ b/usr/share/rear/finalize/Linux-i386/630_run_efibootmgr.sh +@@ -1,28 +1,79 @@ + # only useful for UEFI systems in combination with grub[2]-efi + is_true $USING_UEFI_BOOTLOADER || return 0 # empty or 0 means using BIOS + +-# check if $TARGET_FS_ROOT/boot/efi is mounted +-[[ -d "$TARGET_FS_ROOT/boot/efi" ]] +-StopIfError "Could not find directory $TARGET_FS_ROOT/boot/efi" +- +-BootEfiDev="$( mount | grep "boot/efi" | awk '{print $1}' )" +-Dev=$( get_device_name $BootEfiDev ) # /dev/sda1 or /dev/mapper/vol34_part2 or /dev/mapper/mpath99p4 +-ParNr=$( get_partition_number $Dev ) # 1 (must anyway be a low nr <9) +-Disk=$( echo ${Dev%$ParNr} ) # /dev/sda or /dev/mapper/vol34_part or /dev/mapper/mpath99p +- +-if [[ ${Dev/mapper//} != $Dev ]] ; then # we have 'mapper' in devname +- # we only expect mpath_partX or mpathpX or mpath-partX +- case $Disk in +- (*p) Disk=${Disk%p} ;; +- (*-part) Disk=${Disk%-part} ;; +- (*_part) Disk=${Disk%_part} ;; +- (*) Log "Unsupported kpartx partition delimiter for $Dev" +- esac ++LogPrint "Creating EFI Boot Manager entries..." ++ ++local esp_mountpoint esp_mountpoint_inside boot_efi_parts boot_efi_dev ++ ++# When UEFI_BOOTLOADER is not a regular file in the restored target system ++# (cf. how esp_mountpoint is set below) it means BIOS is used ++# (cf. rescue/default/850_save_sysfs_uefi_vars.sh) ++# which includes that also an empty UEFI_BOOTLOADER means using BIOS ++# because when UEFI_BOOTLOADER is empty the test below evaluates to ++# test -f /mnt/local/ ++# which also returns false because /mnt/local/ is a directory ++# (cf. https://github.com/rear/rear/pull/2051/files#r258826856) ++# but using BIOS conflicts with USING_UEFI_BOOTLOADER is true ++# i.e. we should create EFI Boot Manager entries but we cannot: ++if ! test -f "$TARGET_FS_ROOT/$UEFI_BOOTLOADER" ; then ++ LogPrintError "Failed to create EFI Boot Manager entries (UEFI bootloader '$UEFI_BOOTLOADER' not found under target $TARGET_FS_ROOT)" ++ return 1 + fi +-BootLoader=$( echo $UEFI_BOOTLOADER | cut -d"/" -f4- | sed -e 's;/;\\;g' ) # EFI\fedora\shim.efi +-Log efibootmgr --create --gpt --disk ${Disk} --part ${ParNr} --write-signature --label \"${OS_VENDOR} ${OS_VERSION}\" --loader \"\\${BootLoader}\" +-efibootmgr --create --gpt --disk ${Disk} --part ${ParNr} --write-signature --label "${OS_VENDOR} ${OS_VERSION}" --loader "\\${BootLoader}" +-LogIfError "Problem occurred with creating an efibootmgr entry" + +-# ok, boot loader has been set-up - tell rear we are done using following var. +-NOBOOTLOADER= ++esp_mountpoint="$TARGET_FS_ROOT/boot/efi" ++# check if esp_mountpoint is mounted ++[[ -d "$esp_mountpoint" ]] ++StopIfError "Could not find directory $esp_mountpoint" ++ ++# Mount point inside the target system ++esp_mountpoint_inside=/boot/efi ++ ++boot_efi_parts=$( find_partition "fs:$esp_mountpoint_inside" fs ) ++if ! test "$boot_efi_parts" ; then ++ LogPrint "Unable to find ESP $esp_mountpoint_inside in layout" ++ LogPrint "Trying to determine device currently mounted at $esp_mountpoint as fallback" ++ boot_efi_dev="$( mount | grep "$esp_mountpoint" | awk '{print $1}' )" ++ if ! test "$boot_efi_dev" ; then ++ LogPrintError "Cannot create EFI Boot Manager entry (unable to find ESP $esp_mountpoint among mounted devices)" ++ return 1 ++ fi ++ if test $(get_component_type "$boot_efi_dev") = part ; then ++ boot_efi_parts="$boot_efi_dev" ++ else ++ boot_efi_parts=$( find_partition "$boot_efi_dev" ) ++ fi ++ if ! test "$boot_efi_parts" ; then ++ LogPrintError "Cannot create EFI Boot Manager entry (unable to find partition for $boot_efi_dev)" ++ return 1 ++ fi ++ LogPrint "Using fallback EFI boot partition(s) $boot_efi_parts (unable to find ESP $esp_mountpoint_inside in layout)" ++fi ++ ++local bootloader partition_block_device partition_number disk efipart ++ ++# EFI\fedora\shim.efi ++bootloader=$( echo $UEFI_BOOTLOADER | cut -d"/" -f4- | sed -e 's;/;\\;g' ) ++ ++for efipart in $boot_efi_parts ; do ++ # /dev/sda1 or /dev/mapper/vol34_part2 or /dev/mapper/mpath99p4 ++ partition_block_device=$( get_device_name $efipart ) ++ # 1 or 2 or 4 for the examples above ++ partition_number=$( get_partition_number $partition_block_device ) ++ if ! disk=$( get_device_from_partition $partition_block_device $partition_number ) ; then ++ LogPrintError "Cannot create EFI Boot Manager entry for ESP $partition_block_device (unable to find the underlying disk)" ++ # do not error out - we may be able to locate other disks if there are more of them ++ continue ++ fi ++ LogPrint "Creating EFI Boot Manager entry '$OS_VENDOR $OS_VERSION' for '$bootloader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER') " ++ Log efibootmgr --create --gpt --disk $disk --part $partition_number --write-signature --label \"${OS_VENDOR} ${OS_VERSION}\" --loader \"\\${bootloader}\" ++ if efibootmgr --create --gpt --disk $disk --part $partition_number --write-signature --label "${OS_VENDOR} ${OS_VERSION}" --loader "\\${bootloader}" ; then ++ # ok, boot loader has been set-up - continue with other disks (ESP can be on RAID) ++ NOBOOTLOADER='' ++ else ++ LogPrintError "efibootmgr failed to create EFI Boot Manager entry on $disk partition $partition_number (ESP $partition_block_device )" ++ fi ++done ++ ++is_true $NOBOOTLOADER || return 0 ++LogPrintError "efibootmgr failed to create EFI Boot Manager entry for '$bootloader' (UEFI_BOOTLOADER='$UEFI_BOOTLOADER')" ++return 1 +diff --git a/usr/share/rear/lib/layout-functions.sh b/usr/share/rear/lib/layout-functions.sh +index eacc708f..69f80ed7 100644 +--- a/usr/share/rear/lib/layout-functions.sh ++++ b/usr/share/rear/lib/layout-functions.sh +@@ -271,12 +271,20 @@ get_child_components() { + done + } + +-# Return all ancestors of component $1 [ of type $2 ] ++# Return all ancestors of component $1 [ of type $2 [ skipping types $3 during resolution ] ] + get_parent_components() { +- declare -a ancestors devlist +- declare current child parent ++ declare -a ancestors devlist ignoretypes ++ declare current child parent parenttype + + devlist=( "$1" ) ++ if [[ "$3" ]] ; then ++ # third argument should, if present, be a space-separated list ++ # of types to ignore when walking up the dependency tree. ++ # Convert it to array ++ ignoretypes=( $3 ) ++ else ++ ignoretypes=() ++ fi + while (( ${#devlist[@]} )) ; do + current=${devlist[0]} + +@@ -287,6 +295,13 @@ get_parent_components() { + if IsInArray "$parent" "${ancestors[@]}" ; then + continue + fi ++ ### ...test if parent is of a correct type if requested... ++ if [[ ${#ignoretypes[@]} -gt 0 ]] ; then ++ parenttype=$(get_component_type "$parent") ++ if IsInArray "$parenttype" "${ignoretypes[@]}" ; then ++ continue ++ fi ++ fi + ### ...and add them to the list + devlist=( "${devlist[@]}" "$parent" ) + ancestors=( "${ancestors[@]}" "$parent" ) +@@ -312,26 +327,28 @@ get_parent_components() { + } + + # find_devices ++# ${2+"$2"} in the following functions ensures that $2 gets passed down quoted if present ++# and ignored if not present + # Find the disk device(s) component $1 resides on. + find_disk() { +- get_parent_components "$1" "disk" ++ get_parent_components "$1" "disk" ${2+"$2"} + } + + find_disk_and_multipath() { +- res=$(find_disk "$1") ++ res=$(find_disk "$1" ${2+"$2"}) + if [[ -n "$res" || "$AUTOEXCLUDE_MULTIPATH" =~ ^[yY1] ]]; then + echo $res + else +- find_multipath "$1" ++ find_multipath "$1" ${2+"$2"} + fi + } + + find_multipath() { +- get_parent_components "$1" "multipath" ++ get_parent_components "$1" "multipath" ${2+"$2"} + } + + find_partition() { +- get_parent_components "$1" "part" ++ get_parent_components "$1" "part" ${2+"$2"} + } + + # Function returns partition number of partition block device name +@@ -358,6 +375,55 @@ get_partition_number() { + + echo $number + } ++ ++# Extract the underlying device name from the full partition device name. ++# Underlying device may be a disk, a multipath device or other devices that can be partitioned. ++# Should we use the information in $LAYOUT_DEPS, like get_parent_component does, ++# instead of string munging? ++function get_device_from_partition() { ++ local partition_block_device ++ local device ++ local partition_number ++ ++ partition_block_device=$1 ++ test -b "$partition_block_device" || BugError "get_device_from_partition called with '$partition_block_device' that is no block device" ++ partition_number=${2-$(get_partition_number $partition_block_device )} ++ # /dev/sda or /dev/mapper/vol34_part or /dev/mapper/mpath99p or /dev/mmcblk0p ++ device=${partition_block_device%$partition_number} ++ ++ # Strip trailing partition remainders like '_part' or '-part' or 'p' ++ # if we have 'mapper' in disk device name: ++ if [[ ${partition_block_device/mapper//} != $partition_block_device ]] ; then ++ # we only expect mpath_partX or mpathpX or mpath-partX ++ case $device in ++ (*p) device=${device%p} ;; ++ (*-part) device=${device%-part} ;; ++ (*_part) device=${device%_part} ;; ++ (*) Log "Unsupported kpartx partition delimiter for $partition_block_device" ++ esac ++ fi ++ ++ # For eMMC devices the trailing 'p' in the $device value ++ # (as in /dev/mmcblk0p that is derived from /dev/mmcblk0p1) ++ # needs to be stripped (to get /dev/mmcblk0), otherwise the ++ # efibootmgr call fails because of a wrong disk device name. ++ # See also https://github.com/rear/rear/issues/2103 ++ if [[ $device = *'/mmcblk'+([0-9])p ]] ; then ++ device=${device%p} ++ fi ++ ++ # For NVMe devices the trailing 'p' in the $device value ++ # (as in /dev/nvme0n1p that is derived from /dev/nvme0n1p1) ++ # needs to be stripped (to get /dev/nvme0n1), otherwise the ++ # efibootmgr call fails because of a wrong disk device name. ++ # See also https://github.com/rear/rear/issues/1564 ++ if [[ $device = *'/nvme'+([0-9])n+([0-9])p ]] ; then ++ device=${device%p} ++ fi ++ ++ test -b "$device" && echo $device ++} ++ + # Returns partition start block or 'unknown' + # sda/sda1 or + # dm-XX diff --git a/SOURCES/rear-bz1958247.patch b/SOURCES/rear-bz1958247.patch new file mode 100644 index 0000000..b270e8e --- /dev/null +++ b/SOURCES/rear-bz1958247.patch @@ -0,0 +1,1923 @@ +diff --git a/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh +index 64b7a792..6ba7d543 100644 +--- a/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh ++++ b/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh +@@ -1,10 +1,4 @@ +-# create mount point + if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then +- mkdir -p $v "$BUILD_DIR/outputfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +- AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- + if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then + BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD" + fi +diff --git a/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh +index 185dbd95..8525ab1d 100644 +--- a/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh ++++ b/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh +@@ -6,10 +6,4 @@ if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" + fi + + umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs +- +- rmdir $v $BUILD_DIR/outputfs >&2 +- if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- fi + fi +diff --git a/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh b/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh +index 5c7696db..b6a955db 100644 +--- a/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh ++++ b/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh +@@ -1,9 +1,3 @@ +-# create mount point +-mkdir -p $v "$BUILD_DIR/outputfs" >&2 +-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +-AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- + if [[ "$BACKUP_MOUNTCMD" ]] ; then + BACKUP_URL="var://BACKUP_MOUNTCMD" + fi +diff --git a/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh b/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh +index d79653b4..9bf8f76a 100644 +--- a/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh ++++ b/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh +@@ -3,20 +3,17 @@ + [ -z "${NETFS_KEEP_OLD_BACKUP_COPY}" ] && return + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(backup_path $scheme $path) ++local scheme=$( url_scheme $BACKUP_URL ) ++local path=$( url_path $BACKUP_URL ) ++local opath=$( backup_path $scheme $path ) + + # if $opath is empty return silently (e.g. scheme tape) + [ -z "$opath" ] && return 0 + + if ! test -f "${opath}/.lockfile" ; then + if test -d "${opath}" ; then +- rm -rf $v "${opath}.old" >&2 +- StopIfError "Could not remove '${opath}.old'" +- mv -f $v "${opath}" "${opath}.old" >&2 +- StopIfError "Could not move '${opath}'" ++ rm -rf $v "${opath}.old" || Error "Could not remove '${opath}.old'" ++ mv -f $v "${opath}" "${opath}.old" || Error "Could not move '${opath}'" + fi + else + # lockfile was already made through the output workflow (hands off) +diff --git a/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh b/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh +index db15bca2..43f5b651 100644 +--- a/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh ++++ b/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh +@@ -2,13 +2,14 @@ + # to $HOSTNAME + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(backup_path $scheme $path) ++local scheme=$( url_scheme $BACKUP_URL ) ++local path=$( url_path $BACKUP_URL ) ++local opath=$( backup_path $scheme $path ) + + # if $opath is empty return silently (e.g. scheme tape) + [ -z "$opath" ] && return 0 + +-mkdir -p $v -m0750 "${opath}" >&2 +-StopIfError "Could not mkdir '${opath}'" ++mkdir -p $v -m0750 "${opath}" && return ++ ++# A failure to create the $NETFS_PREFIX sub-directory is fatal: ++Error "Failed to create '$opath' directory for BACKUP_URL=$BACKUP_URL" +diff --git a/usr/share/rear/backup/NETFS/default/250_create_lock.sh b/usr/share/rear/backup/NETFS/default/250_create_lock.sh +index 59090a22..36d547ec 100644 +--- a/usr/share/rear/backup/NETFS/default/250_create_lock.sh ++++ b/usr/share/rear/backup/NETFS/default/250_create_lock.sh +@@ -2,15 +2,13 @@ + # made by a previous mkbackup run when the variable NETFS_KEEP_OLD_BACKUP_COPY has been set + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(backup_path $scheme $path) ++local scheme=$( url_scheme $BACKUP_URL ) ++local path=$( url_path $BACKUP_URL ) ++local opath=$( backup_path $scheme $path ) + + # if $opath is empty return silently (e.g. scheme tape) + [ -z "$opath" ] && return 0 + + if test -d "${opath}" ; then +- > "${opath}/.lockfile" +- StopIfError "Could not create '${opath}/.lockfile'" ++ > "${opath}/.lockfile" || Error "Could not create '${opath}/.lockfile'" + fi +diff --git a/usr/share/rear/backup/NETFS/default/970_remove_lock.sh b/usr/share/rear/backup/NETFS/default/970_remove_lock.sh +index f69f7bd8..7038f5b9 100644 +--- a/usr/share/rear/backup/NETFS/default/970_remove_lock.sh ++++ b/usr/share/rear/backup/NETFS/default/970_remove_lock.sh +@@ -1,8 +1,7 @@ + # remove the lockfile +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(backup_path $scheme $path) ++local scheme=$( url_scheme $BACKUP_URL ) ++local path=$( url_path $BACKUP_URL ) ++local opath=$( backup_path $scheme $path ) + + # if $opath is empty return silently (e.g. scheme tape) + [ -z "$opath" ] && return 0 +diff --git a/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh b/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh +index f28c6cbf..e1954dc5 100644 +--- a/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh ++++ b/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh +@@ -5,9 +5,3 @@ if [[ "$BACKUP_UMOUNTCMD" ]] ; then + fi + + umount_url $BACKUP_URL $BUILD_DIR/outputfs +- +-rmdir $v $BUILD_DIR/outputfs >&2 +-if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +-fi +diff --git a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh +deleted file mode 100644 +index 6111f89b..00000000 +--- a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh ++++ /dev/null +@@ -1,33 +0,0 @@ +- +-# Backup all that is explicitly specified in BACKUP_PROG_INCLUDE: +-for backup_include_item in "${BACKUP_PROG_INCLUDE[@]}" ; do +- test "$backup_include_item" && echo "$backup_include_item" +-done > $TMP_DIR/backup-include.txt +- +-# Implicitly also backup all local filesystems as defined in mountpoint_device +-# except BACKUP_ONLY_INCLUDE or MANUAL_INCLUDE is set: +-if ! is_true "$BACKUP_ONLY_INCLUDE" ; then +- if [ "${MANUAL_INCLUDE:-NO}" != "YES" ] ; then +- # Add the mountpoints that will be recovered to the backup include list +- # unless a mountpoint is excluded: +- while read mountpoint device junk ; do +- if ! IsInArray "$mountpoint" "${EXCLUDE_MOUNTPOINTS[@]}" ; then +- echo "$mountpoint" +- fi +- done <"$VAR_DIR/recovery/mountpoint_device" >> $TMP_DIR/backup-include.txt +- fi +-fi +- +-# Exclude all that is explicitly specified in BACKUP_PROG_EXCLUDE: +-for backup_exclude_item in "${BACKUP_PROG_EXCLUDE[@]}" ; do +- test "$backup_exclude_item" && echo "$backup_exclude_item" +-done > $TMP_DIR/backup-exclude.txt +- +-# Implicitly also add excluded mountpoints to the backup exclude list +-# except BACKUP_ONLY_EXCLUDE is set: +-if ! is_true "$BACKUP_ONLY_EXCLUDE" ; then +- for excluded_mountpoint in "${EXCLUDE_MOUNTPOINTS[@]}" ; do +- test "$excluded_mountpoint" && echo "$excluded_mountpoint/" +- done >> $TMP_DIR/backup-exclude.txt +-fi +- +diff --git a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh +new file mode 120000 +index 00000000..d8d12c0b +--- /dev/null ++++ b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh +@@ -0,0 +1 @@ ++../../NETFS/default/400_create_include_exclude_files.sh +\ No newline at end of file +diff --git a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh +deleted file mode 100644 +index 29d85905..00000000 +--- a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh ++++ /dev/null +@@ -1,14 +0,0 @@ +-# Copied from ../../DUPLICITY/default/600_create_python_symlink.sh for YUM +-# make sure we have a symbolic link to the python binary +-( +- cd $ROOTFS_DIR/bin +- for py in $(find . -name "python*" ) +- do +- this_py=${py#./*} # should be without ./ +- case $this_py in +- python) break ;; +- python2*|python3*) ln -sf $v $this_py python >&2 ;; +- esac +- done +-) +- +diff --git a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh +new file mode 120000 +index 00000000..d776e5aa +--- /dev/null ++++ b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh +@@ -0,0 +1 @@ ++../../DUPLICITY/default/600_create_python_symlink.sh +\ No newline at end of file +diff --git a/usr/share/rear/lib/framework-functions.sh b/usr/share/rear/lib/framework-functions.sh +index d493e01c..4878216b 100644 +--- a/usr/share/rear/lib/framework-functions.sh ++++ b/usr/share/rear/lib/framework-functions.sh +@@ -107,7 +107,7 @@ function cleanup_build_area_and_end_program () { + # Cleanup build area + Log "Finished in $((SECONDS-STARTTIME)) seconds" + if is_true "$KEEP_BUILD_DIR" ; then +- LogPrint "You should also rm -Rf $BUILD_DIR" ++ LogPrint "You should also rm -Rf --one-file-system $BUILD_DIR" + else + Log "Removing build area $BUILD_DIR" + rm -Rf $TMP_DIR +@@ -118,15 +118,11 @@ function cleanup_build_area_and_end_program () { + mount | grep -q "$BUILD_DIR/outputfs" + if [[ $? -eq 0 ]]; then + # still mounted it seems +- LogPrint "Directory $BUILD_DIR/outputfs still mounted - trying lazy umount" + sleep 2 +- umount -f -l $BUILD_DIR/outputfs >&2 +- rm -Rf $v $BUILD_DIR/outputfs >&2 +- else +- # not mounted so we can safely delete $BUILD_DIR/outputfs +- rm -Rf $BUILD_DIR/outputfs ++ umount_mountpoint_lazy $BUILD_DIR/outputfs + fi +- rm -Rf $v $BUILD_DIR >&2 ++ remove_temporary_mountpoint '$BUILD_DIR/outputfs' || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove" ++ rmdir $v $BUILD_DIR >&2 + fi + Log "End of program reached" + } +diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh +index 24545ff7..c1a11615 100644 +--- a/usr/share/rear/lib/global-functions.sh ++++ b/usr/share/rear/lib/global-functions.sh +@@ -315,7 +315,44 @@ function url_path() { + echo /${url_without_scheme#*/} + } + +-backup_path() { ++### Returns true if one can upload files to the URL ++function scheme_accepts_files() { ++ local scheme=$1 ++ case $scheme in ++ (null|tape|obdr) ++ # tapes do not support uploading arbitrary files, one has to handle them ++ # as special case (usually passing the tape device as argument to tar) ++ # null means do not upload anything anywhere, leave the files under /var/lib/rear/output ++ return 1 ++ ;; ++ (*) ++ # most URL schemes support uploading files ++ return 0 ++ ;; ++ esac ++} ++ ++### Returns true if URLs with the given scheme corresponds to a path inside ++### a mountable fileystem and one can put files directly into it. ++### The actual path will be returned by backup_path() / output_path(). ++### If returns false, using backup_path() / output_path() has no sense ++### and one must use a scheme-specific method (like lftp or writing them to a tape) ++### to upload files to the destination instead of just "cp" or other direct filesystem access. ++### Returning true does not imply that the URL is currently mounted at a filesystem and usable, ++### only that it can be mounted (use mount_url() first) ++function scheme_supports_filesystem() { ++ local scheme=$1 ++ case $scheme in ++ (null|tape|obdr|rsync|fish|ftp|ftps|hftp|http|https|sftp) ++ return 1 ++ ;; ++ (*) ++ return 0 ++ ;; ++ esac ++} ++ ++function backup_path() { + local scheme=$1 + local path=$2 + case $scheme in +@@ -341,13 +378,21 @@ backup_path() { + echo "$path" + } + +-output_path() { ++function output_path() { + local scheme=$1 + local path=$2 ++ ++ # Abort for unmountable schemes ("tape-like" or "ftp-like" schemes). ++ # Returning an empty string for them is not satisfactory: it could lead to caller putting its files ++ # under / instead of the intended location if the result is not checked for emptiness. ++ # Returning ${BUILD_DIR}/outputfs/${OUTPUT_PREFIX} for unmountable URLs is also not satisfactory: ++ # caller could put its files there expecting them to be safely at their destination, ++ # but if the directory is not a mountpoint, they would get silently lost. ++ # The caller needs to check the URL/scheme using scheme_supports_filesystem() ++ # before calling this function. ++ scheme_supports_filesystem $scheme || BugError "output_path() called with scheme $scheme that does not support filesystem access" ++ + case $scheme in +- (null|tape) # no path for tape required +- path="" +- ;; + (file) # type file needs a local path (must be mounted by user) + path="$path/${OUTPUT_PREFIX}" + ;; +@@ -360,17 +405,33 @@ output_path() { + + + ### Mount URL $1 at mountpoint $2[, with options $3] +-mount_url() { ++function mount_url() { + local url=$1 + local mountpoint=$2 + local defaultoptions="rw,noatime" + local options=${3:-"$defaultoptions"} ++ local scheme ++ ++ scheme=$( url_scheme $url ) ++ ++ # The cases where we return 0 are those that do not need umount and also do not need ExitTask handling. ++ # They thus need to be kept in sync with umount_url() so that RemoveExitTasks is used ++ # iff AddExitTask was used in mount_url(). ++ ++ if ! scheme_supports_filesystem $scheme ; then ++ ### Stuff like null|tape|rsync|fish|ftp|ftps|hftp|http|https|sftp ++ ### Don't need to umount anything for these. ++ ### file: supports filesystem access, but is not mounted and unmounted, ++ ### so it has to be handled specially below. ++ ### Similarly for iso: which gets mounted and unmounted only during recovery. ++ return 0 ++ fi + + ### Generate a mount command + local mount_cmd +- case $(url_scheme $url) in +- (null|tape|file|rsync|fish|ftp|ftps|hftp|http|https|sftp) +- ### Don't need to mount anything for these ++ case $scheme in ++ (file) ++ ### Don't need to mount anything for file:, it is already mounted by user + return 0 + ;; + (iso) +@@ -435,23 +496,48 @@ mount_url() { + ;; + esac + ++ # create mount point ++ mkdir -p $v "$mountpoint" || Error "Could not mkdir '$mountpoint'" ++ AddExitTask "remove_temporary_mountpoint '$mountpoint'" ++ + Log "Mounting with '$mount_cmd'" + # eval is required when mount_cmd contains single quoted stuff (e.g. see the above mount_cmd for curlftpfs) + eval $mount_cmd >&2 + StopIfError "Mount command '$mount_cmd' failed." + +- AddExitTask "umount -f $v '$mountpoint' >&2" ++ AddExitTask "perform_umount_url '$url' '$mountpoint' lazy" + return 0 + } + +-### Unmount url $1 at mountpoint $2 +-umount_url() { ++function remove_temporary_mountpoint() { ++ if test -d "$1" ; then ++ rmdir $v "$1" ++ fi ++} ++ ++### Unmount url $1 at mountpoint $2, perform mountpoint cleanup and exit task + error handling ++function umount_url() { + local url=$1 + local mountpoint=$2 ++ local scheme + +- case $(url_scheme $url) in +- (null|tape|file|rsync|fish|ftp|ftps|hftp|http|https|sftp) +- ### Don't need to umount anything for these ++ scheme=$( url_scheme $url ) ++ ++ # The cases where we return 0 are those that do not need umount and also do not need ExitTask handling. ++ # They thus need to be kept in sync with mount_url() so that RemoveExitTasks is used ++ # iff AddExitTask was used in mount_url(). ++ ++ if ! scheme_supports_filesystem $scheme ; then ++ ### Stuff like null|tape|rsync|fish|ftp|ftps|hftp|http|https|sftp ++ ### Don't need to umount anything for these. ++ ### file: supports filesystem access, but is not mounted and unmounted, ++ ### so it has to be handled specially below. ++ ### Similarly for iso: which gets mounted and unmounted only during recovery. ++ return 0 ++ fi ++ ++ case $scheme in ++ (file) + return 0 + ;; + (iso) +@@ -459,42 +545,106 @@ umount_url() { + return 0 + fi + ;; +- (sshfs) +- umount_cmd="fusermount -u $mountpoint" +- ;; +- (davfs) +- umount_cmd="umount $mountpoint" +- # Wait for 3 sek. then remove the cache-dir /var/cache/davfs +- sleep 30 +- # ToDo: put in here the cache-dir from /etc/davfs2/davfs.conf +- # and delete only the just used cache +- #rm -rf /var/cache/davfs2/** +- rm -rf /var/cache/davfs2/*outputfs* +- +- ;; +- (var) +- local var=$(url_host $url) +- umount_cmd="${!var} $mountpoint" ++ (*) ++ # Schemes that actually need nontrivial umount are handled below. ++ # We do not handle them in the default branch because in the case of iso: ++ # it depends on the current workflow whether umount is needed or not. ++ : ++ esac + +- Log "Unmounting with '$umount_cmd'" +- $umount_cmd +- StopIfError "Unmounting failed." ++ # umount_url() is a wrapper that takes care of exit tasks and error handling and mountpoint cleanup. ++ # Therefore it also determines if exit task and mountpoint handling is required and returns early if not. ++ # The actual umount job is performed inside perform_umount_url(). ++ # We do not request lazy umount here because we want umount errors to be reliably reported. ++ perform_umount_url $url $mountpoint || Error "Unmounting '$mountpoint' failed." + +- RemoveExitTask "umount -f $v '$mountpoint' >&2" +- return 0 ++ RemoveExitTask "perform_umount_url '$url' '$mountpoint' lazy" ++ ++ remove_temporary_mountpoint '$mountpoint' && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'" ++ return 0 ++} ++ ++### Unmount url $1 at mountpoint $2 [ lazily if $3 is set to 'lazy' and normal unmount fails ] ++function perform_umount_url() { ++ local url=$1 ++ local mountpoint=$2 ++ local lazy=${3:-} ++ ++ if test $lazy ; then ++ if test $lazy != "lazy" ; then ++ BugError "lazy = $lazy, but it must have the value of 'lazy' or empty" ++ fi ++ fi ++ ++ case $(url_scheme $url) in ++ (sshfs) ++ # does ftpfs need this special case as well? ++ fusermount -u ${lazy:+'-z'} $mountpoint ++ ;; ++ (davfs) ++ umount_davfs $mountpoint $lazy ++ ;; ++ (var) ++ local var ++ var=$(url_host $url) ++ Log "Unmounting with '${!var} $mountpoint'" ++ # lazy unmount not supported with custom umount command ++ ${!var} $mountpoint + ;; ++ (*) ++ # usual umount command ++ umount_mountpoint $mountpoint $lazy + esac ++ # The switch above must be the last statement in this function and the umount commands must be ++ # the last commands (or part of) in each branch. This ensures proper exit code propagation ++ # to the caller even when set -e is used. ++} + +- umount_mountpoint $mountpoint +- StopIfError "Unmounting '$mountpoint' failed." ++### Helper which unmounts davfs mountpoint $1 and cleans up the cache, ++### performing lazy unmount if $2 = 'lazy' and normal unmount fails. ++function umount_davfs() { ++ local mountpoint=$1 ++ local lazy="${2:-}" + +- RemoveExitTask "umount -f $v '$mountpoint' >&2" +- return 0 ++ if test $lazy ; then ++ if test $lazy != "lazy" ; then ++ BugError "lazy = $lazy, but it must have the value of 'lazy' or empty" ++ fi ++ fi ++ ++ if umount_mountpoint $mountpoint ; then ++ # Wait for 3 sek. then remove the cache-dir /var/cache/davfs ++ sleep 30 ++ # TODO: put in here the cache-dir from /etc/davfs2/davfs.conf ++ # and delete only the just used cache ++ #rm -rf /var/cache/davfs2/** ++ rm -rf /var/cache/davfs2/*outputfs* ++ else ++ local retval=$? ++ ++ if test $lazy ; then ++ # try again to unmount lazily and this time do not delete the cache, it is still in use. ++ LogPrintError "davfs cache /var/cache/davfs2/*outputfs* needs to be cleaned up manually after the lazy unmount finishes" ++ umount_mountpoint_lazy $mountpoint ++ else ++ # propagate errors from umount ++ return $retval ++ fi ++ fi + } + +-### Unmount mountpoint $1 +-umount_mountpoint() { ++### Unmount mountpoint $1 [ lazily if $2 = 'lazy' ] ++### Default implementation for filesystems that don't need anything fancy ++### For special umount commands use perform_umount_url() ++function umount_mountpoint() { + local mountpoint=$1 ++ local lazy=${2:-} ++ ++ if test $lazy ; then ++ if test $lazy != "lazy" ; then ++ BugError "lazy = $lazy, but it must have the value of 'lazy' or empty" ++ fi ++ fi + + ### First, try a normal unmount, + Log "Unmounting '$mountpoint'" +@@ -514,7 +664,21 @@ umount_mountpoint() { + fi + + Log "Unmounting '$mountpoint' failed." +- return 1 ++ ++ if test $lazy ; then ++ umount_mountpoint_lazy $mountpoint ++ else ++ return 1 ++ fi ++} ++ ++### Unmount mountpoint $1 lazily ++### Preferably use "umount_mountpoint $mountpoint lazy", which attempts non-lazy unmount first. ++function umount_mountpoint_lazy() { ++ local mountpoint=$1 ++ ++ LogPrint "Directory $mountpoint still mounted - trying lazy umount" ++ umount $v -f -l $mountpoint >&2 + } + + # Change $1 to user input or leave default value on empty input +diff --git a/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh b/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh +index 97d9dc98..c1ca6d3c 100644 +--- a/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh ++++ b/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh +@@ -8,10 +8,12 @@ + if [[ ! -z "$PXE_TFTP_URL" ]] ; then + # E.g. PXE_TFTP_URL=nfs://server/export/nfs/tftpboot + local scheme=$( url_scheme $PXE_TFTP_URL ) +- local path=$( url_path $PXE_TFTP_URL ) +- mkdir -p $v "$BUILD_DIR/tftpbootfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/tftpbootfs'" +- AddExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" ++ ++ # We need filesystem access to the destination (schemes like ftp:// are not supported) ++ if ! scheme_supports_filesystem $scheme ; then ++ Error "Scheme $scheme for PXE output not supported, use a scheme that supports mounting (like nfs: )" ++ fi ++ + mount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs + # However, we copy under $OUTPUT_PREFIX_PXE directory (usually HOSTNAME) to have different clients on one pxe server + PXE_TFTP_LOCAL_PATH=$BUILD_DIR/tftpbootfs +@@ -67,10 +69,6 @@ fi + if [[ ! -z "$PXE_TFTP_URL" ]] ; then + LogPrint "Copied kernel+initrd $( du -shc $KERNEL_FILE "$TMP_DIR/$REAR_INITRD_FILENAME" | tail -n 1 | tr -s "\t " " " | cut -d " " -f 1 ) to $PXE_TFTP_URL/$OUTPUT_PREFIX_PXE" + umount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs +- rmdir $BUILD_DIR/tftpbootfs >&2 +- if [[ $? -eq 0 ]] ; then +- RemoveExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" +- fi + else + # legacy way PXE_TFTP_PATH + LogPrint "Copied kernel+initrd $( du -shc $KERNEL_FILE "$TMP_DIR/$REAR_INITRD_FILENAME" | tail -n 1 | tr -s "\t " " " | cut -d " " -f 1 ) to $PXE_TFTP_PATH" +diff --git a/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh b/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh +index a3add8ba..ab9483a8 100644 +--- a/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh ++++ b/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh +@@ -1,4 +1,4 @@ +-# 81_create_pxelinux_cfg.sh ++# 810_create_pxelinux_cfg.sh + # + # create pxelinux config on PXE server for Relax-and-Recover + # +@@ -11,10 +11,12 @@ if [[ ! -z "$PXE_CONFIG_URL" ]] ; then + # E.g. PXE_CONFIG_URL=nfs://server/export/nfs/tftpboot/pxelinux.cfg + # Better be sure that on 'server' the directory /export/nfs/tftpboot/pxelinux.cfg exists + local scheme=$( url_scheme $PXE_CONFIG_URL ) +- local path=$( url_path $PXE_CONFIG_URL ) +- mkdir -p $v "$BUILD_DIR/tftpbootfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/tftpbootfs'" +- AddExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" ++ ++ # We need filesystem access to the destination (schemes like ftp:// are not supported) ++ if ! scheme_supports_filesystem $scheme ; then ++ Error "Scheme $scheme for PXE output not supported, use a scheme that supports mounting (like nfs: )" ++ fi ++ + mount_url $PXE_CONFIG_URL $BUILD_DIR/tftpbootfs + PXE_LOCAL_PATH=$BUILD_DIR/tftpbootfs + else +@@ -105,10 +107,6 @@ popd >/dev/null + if [[ ! -z "$PXE_CONFIG_URL" ]] ; then + LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_URL" + umount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs +- rmdir $BUILD_DIR/tftpbootfs >&2 +- if [[ $? -eq 0 ]] ; then +- RemoveExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2" +- fi + RESULT_FILES=( "${RESULT_FILES[@]}" ) + else + LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_PATH" +diff --git a/usr/share/rear/output/default/100_mount_output_path.sh b/usr/share/rear/output/default/100_mount_output_path.sh +index 22ef36de..34ea8e5e 100644 +--- a/usr/share/rear/output/default/100_mount_output_path.sh ++++ b/usr/share/rear/output/default/100_mount_output_path.sh +@@ -1,9 +1,3 @@ +-# create mount point +-mkdir -p $v "$BUILD_DIR/outputfs" >&2 +-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +-AddExitTask "rm -Rf $v $BUILD_DIR/outputfs >&2" +- + if [[ "$OUTPUT_MOUNTCMD" ]] ; then + OUTPUT_URL="var://$OUTPUT_MOUNTCMD" + fi +diff --git a/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh b/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh +index 00339a96..06326114 100644 +--- a/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh ++++ b/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh +@@ -3,22 +3,20 @@ + [ -z "${KEEP_OLD_OUTPUT_COPY}" ] && return + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(output_path $scheme $path) ++local scheme=$( url_scheme $OUTPUT_URL ) ++local path=$( url_path $OUTPUT_URL ) + +-# if $opath is empty return silently (e.g. scheme tape) +-[ -z "$opath" ] && return 0 ++# if filesystem access to url is unsupported return silently (e.g. scheme tape) ++scheme_supports_filesystem $scheme || return 0 ++ ++local opath=$( output_path $scheme $path ) + + # an old lockfile from a previous run not cleaned up by output is possible + [[ -f ${opath}/.lockfile ]] && rm -f ${opath}/.lockfile >&2 + + if test -d "${opath}" ; then +- rm -rf $v "${opath}.old" >&2 +- StopIfError "Could not remove '${opath}.old'" ++ rm -rf $v "${opath}.old" || Error "Could not remove '${opath}.old'" + # below statement was 'cp -af' instead of 'mv -f' (see issue #192) +- mv -f $v "${opath}" "${opath}.old" >&2 +- StopIfError "Could not move '${opath}'" ++ mv -f $v "${opath}" "${opath}.old" || Error "Could not move '${opath}'" + fi + # the ${BUILD_DIR}/outputfs/${OUTPUT_PREFIX} will be created by output/default/200_make_prefix_dir.sh +diff --git a/usr/share/rear/output/default/200_make_prefix_dir.sh b/usr/share/rear/output/default/200_make_prefix_dir.sh +index d300e4af..e21716fe 100644 +--- a/usr/share/rear/output/default/200_make_prefix_dir.sh ++++ b/usr/share/rear/output/default/200_make_prefix_dir.sh +@@ -1,14 +1,17 @@ + # if set, create $OUTPUT_PREFIX under the mounted network filesystem share. This defaults + # to $HOSTNAME + +-# do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(output_path $scheme $path) ++# Do not do this for tapes and special attention for file:///path ++local scheme=$( url_scheme $OUTPUT_URL ) ++local path=$( url_path $OUTPUT_URL ) + +-# if $opath is empty return silently (e.g. scheme tape) +-[ -z "$opath" ] && return 0 ++# If filesystem access to url is unsupported return silently (e.g. scheme tape) ++scheme_supports_filesystem $scheme || return 0 + +-mkdir -p $v -m0750 "${opath}" >&2 +-StopIfError "Could not mkdir '${opath}'" ++local opath=$( output_path $scheme $path ) ++ ++# Create $OUTPUT_PREFIX sub-directory: ++mkdir -p $v -m0750 "${opath}" && return ++ ++# A failure to create the $OUTPUT_PREFIX sub-directory is fatal: ++Error "Failed to create '$opath' directory for OUTPUT_URL=$OUTPUT_URL" +diff --git a/usr/share/rear/output/default/250_create_lock.sh b/usr/share/rear/output/default/250_create_lock.sh +index 49c75601..d792b036 100644 +--- a/usr/share/rear/output/default/250_create_lock.sh ++++ b/usr/share/rear/output/default/250_create_lock.sh +@@ -2,15 +2,14 @@ + # made by a previous mkrescue run when the variable KEEP_OLD_OUTPUT_COPY has been set + + # do not do this for tapes and special attention for file:///path +-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL" +-local scheme=$(url_scheme ${!url}) +-local path=$(url_path ${!url}) +-local opath=$(output_path $scheme $path) ++local scheme=$( url_scheme $OUTPUT_URL ) ++local path=$( url_path $OUTPUT_URL ) + +-# if $opath is empty return silently (e.g. scheme tape) +-[ -z "$opath" ] && return 0 ++# if filesystem access to url is unsupported return silently (e.g. scheme tape) ++scheme_supports_filesystem $scheme || return 0 ++ ++local opath=$( output_path $scheme $path ) + + if test -d "${opath}" ; then +- > "${opath}/.lockfile" +- StopIfError "Could not create '${opath}/.lockfile'" ++ > "${opath}/.lockfile" || Error "Could not create '${opath}/.lockfile'" + fi +diff --git a/usr/share/rear/output/default/950_copy_result_files.sh b/usr/share/rear/output/default/950_copy_result_files.sh +index ca553eed..fc4da30a 100644 +--- a/usr/share/rear/output/default/950_copy_result_files.sh ++++ b/usr/share/rear/output/default/950_copy_result_files.sh +@@ -1,14 +1,29 @@ + # +-# copy resulting files to network output location ++# output/default/950_copy_result_files.sh ++# Copy the resulting files to the output location. ++# ++ ++# For example for "rear mkbackuponly" there are usually no result files ++# that would need to be copied here to the output location: ++test "${RESULT_FILES[*]:-}" || return 0 + + local scheme=$( url_scheme $OUTPUT_URL ) + local host=$( url_host $OUTPUT_URL ) + local path=$( url_path $OUTPUT_URL ) +-local opath=$( output_path $scheme $path ) + +-# if $opath is empty return silently (e.g. scheme tape) +-if [[ -z "$opath" || -z "$OUTPUT_URL" || "$scheme" == "obdr" || "$scheme" == "tape" ]]; then +- return 0 ++if [ -z "$OUTPUT_URL" ] || ! scheme_accepts_files $scheme ; then ++ if [ "$scheme" == "null" -o -z "$OUTPUT_URL" ] ; then ++ # There are result files to copy, but OUTPUT_URL=null indicates that we are not interested in them ++ # TODO: empty OUTPUT_URL seems to be equivalent to null, should we continue to allow that, ++ # or enforce setting it explicitly? ++ return 0 ++ else ++ # There are files to copy, but schemes like tape: do not allow files to be stored. The files would be lost. ++ # Do not allow that. ++ # Schemes like obdr: that store the results themselves should clear RESULT_FILES to indicate that nothing is to be done. ++ # Is this considered a bug in ReaR (BugError), or a user misconfiguration (Error) when this happens? ++ BugError "Output scheme $scheme does not accept result files ${RESULT_FILES[*]}, use OUTPUT_URL=null if you don't want to copy them anywhere." ++ fi + fi + + LogPrint "Copying resulting files to $scheme location" +@@ -30,35 +45,41 @@ LogPrint "Saving $RUNTIME_LOGFILE as $final_logfile_name to $scheme location" + # Add the README, VERSION and the final logfile to the RESULT_FILES array + RESULT_FILES=( "${RESULT_FILES[@]}" "$TMP_DIR/VERSION" "$TMP_DIR/README" "$TMP_DIR/$final_logfile_name" ) + +-# For example for "rear mkbackuponly" there are usually no result files +-# that would need to be copied here to the network output location: +-test "$RESULT_FILES" || return 0 +- + # The real work (actually copying resulting files to the network output location): ++if scheme_supports_filesystem $scheme ; then ++ # We can access the destination as a mounted filesystem. Do nothing special, ++ # simply copy the output files there. (Covers stuff like nfs|cifs|usb|file|sshfs|ftpfs|davfs.) ++ # This won't work for iso:// , but iso can't be a OUTPUT_URL scheme, this is checked in ++ # prep/default/040_check_backup_and_output_scheme.sh ++ # This covers also unknown schemes, because mount_url() will attempt to mount them and fail if this is not possible, ++ # so if we got here, the URL had been mounted successfully. ++ local opath ++ opath=$( output_path $scheme $path ) ++ LogPrint "Copying result files '${RESULT_FILES[*]}' to $opath at $scheme location" ++ # Copy each result file one by one to avoid usually false error exits as in ++ # https://github.com/rear/rear/issues/1711#issuecomment-380009044 ++ # where in case of an improper RESULT_FILES array member 'cp' can error out with something like ++ # cp: will not overwrite just-created '/tmp/rear.XXX/outputfs/f121/rear-f121.log' with '/tmp/rear.XXX/tmp/rear-f121.log' ++ # See ++ # https://stackoverflow.com/questions/4669420/have-you-ever-got-this-message-when-moving-a-file-mv-will-not-overwrite-just-c ++ # which is about the same for 'mv', how to reproduce it: ++ # mkdir a b c ++ # touch a/f b/f ++ # mv a/f b/f c/ ++ # mv: will not overwrite just-created 'c/f' with 'b/f' ++ # It happens because two different files with the same name would be moved to the same place with only one command. ++ # The -f option won't help for this case, it only applies when there already is a target file that will be overwritten. ++ # Accordingly it is sufficient (even without '-f') to copy each result file one by one: ++ for result_file in "${RESULT_FILES[@]}" ; do ++ cp $v "$result_file" "${opath}/" >&2 || Error "Could not copy result file $result_file to $opath at $scheme location" ++ done ++ ++ return 0 ++fi ++ ++# Filesystem access to output destination not supported, use a scheme-specific tool (rsync, lftp) + case "$scheme" in +- (nfs|cifs|usb|file|sshfs|ftpfs|davfs) +- Log "Copying result files '${RESULT_FILES[@]}' to $opath at $scheme location" +- # Copy each result file one by one to avoid usually false error exits as in +- # https://github.com/rear/rear/issues/1711#issuecomment-380009044 +- # where in case of an improper RESULT_FILES array member 'cp' can error out with something like +- # cp: will not overwrite just-created '/tmp/rear.XXX/outputfs/f121/rear-f121.log' with '/tmp/rear.XXX/tmp/rear-f121.log' +- # See +- # https://stackoverflow.com/questions/4669420/have-you-ever-got-this-message-when-moving-a-file-mv-will-not-overwrite-just-c +- # which is about the same for 'mv', how to reproduce it: +- # mkdir a b c +- # touch a/f b/f +- # mv a/f b/f c/ +- # mv: will not overwrite just-created 'c/f' with 'b/f' +- # It happens because two different files with the same name would be moved to the same place with only one command. +- # The -f option won't help for this case, it only applies when there already is a target file that will be overwritten. +- # Accordingly it is sufficient (even without '-f') to copy each result file one by one: +- for result_file in "${RESULT_FILES[@]}" ; do +- cp $v "$result_file" "${opath}/" >&2 || Error "Could not copy result file $result_file to $opath at $scheme location" +- done +- ;; + (fish|ftp|ftps|hftp|http|https|sftp) +- # FIXME: Verify if usage of $array[*] instead of "${array[@]}" is actually intended here +- # see https://github.com/rear/rear/issues/1068 + LogPrint "Copying result files '${RESULT_FILES[*]}' to $scheme location" + Log "lftp -c open $OUTPUT_URL; mput ${RESULT_FILES[*]}" + lftp -c "open $OUTPUT_URL; mput ${RESULT_FILES[*]}" || Error "Problem transferring result files to $OUTPUT_URL" +@@ -66,12 +87,15 @@ case "$scheme" in + (rsync) + # If BACKUP = RSYNC output/RSYNC/default/900_copy_result_files.sh took care of it: + test "$BACKUP" = "RSYNC" && return 0 +- LogPrint "Copying result files '${RESULT_FILES[@]}' to $scheme location" +- Log "rsync -a $v ${RESULT_FILES[@]} ${host}:${path}" ++ LogPrint "Copying result files '${RESULT_FILES[*]}' to $scheme location" ++ Log "rsync -a $v ${RESULT_FILES[*]} ${host}:${path}" + rsync -a $v "${RESULT_FILES[@]}" "${host}:${path}" || Error "Problem transferring result files to $OUTPUT_URL" + ;; + (*) +- Error "Invalid scheme '$scheme' in '$OUTPUT_URL'." ++ # Should be unreachable, if we got here, it is a bug. ++ # Unknown schemes are handled in mount_url(), which tries to mount them and aborts if they are unsupported. ++ # If they can be mounted, they fall under the scheme_supports_filesystem branch above. ++ BugError "Invalid scheme '$scheme' in '$OUTPUT_URL'." + ;; + esac + +diff --git a/usr/share/rear/output/default/970_remove_lock.sh b/usr/share/rear/output/default/970_remove_lock.sh +index 56640839..3b1b97cc 100644 +--- a/usr/share/rear/output/default/970_remove_lock.sh ++++ b/usr/share/rear/output/default/970_remove_lock.sh +@@ -1,10 +1,11 @@ + # remove the lockfile + local scheme=$(url_scheme $OUTPUT_URL) + local path=$(url_path $OUTPUT_URL) +-local opath=$(output_path $scheme $path) + +-# if $opath is empty return silently (e.g. scheme tape) +-[ -z "$opath" ] && return 0 ++# if filesystem access to url is unsupported return silently (e.g. scheme tape) ++scheme_supports_filesystem $scheme || return 0 ++ ++local opath=$( output_path $scheme $path ) + + # when OUTPUT_URL=BACKUP_URL we keep the lockfile to avoid double moves of the directory + [[ "$OUTPUT_URL" != "$BACKUP_URL" ]] && rm -f $v "${opath}/.lockfile" >&2 +diff --git a/usr/share/rear/output/default/980_umount_output_dir.sh b/usr/share/rear/output/default/980_umount_output_dir.sh +index 9a9995bd..abf0cd53 100644 +--- a/usr/share/rear/output/default/980_umount_output_dir.sh ++++ b/usr/share/rear/output/default/980_umount_output_dir.sh +@@ -9,12 +9,3 @@ if [[ -z "$OUTPUT_URL" ]] ; then + fi + + umount_url $OUTPUT_URL $BUILD_DIR/outputfs +- +-[[ -d $BUILD_DIR/outputfs/$NETFS_PREFIX ]] && rm -rf $v $BUILD_DIR/outputfs/$NETFS_PREFIX +-[[ -d $BUILD_DIR/outputfs/$RSYNC_PREFIX ]] && rm -rf $v $BUILD_DIR/outputfs/$RSYNC_PREFIX +- +-rmdir $v $BUILD_DIR/outputfs >&2 +-if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rm -Rf $v $BUILD_DIR/outputfs >&2" +-fi +diff --git a/usr/share/rear/prep/BORG/default/250_mount_usb.sh b/usr/share/rear/prep/BORG/default/250_mount_usb.sh +index 53594105..05be0179 100644 +--- a/usr/share/rear/prep/BORG/default/250_mount_usb.sh ++++ b/usr/share/rear/prep/BORG/default/250_mount_usb.sh +@@ -8,8 +8,5 @@ + # When BORGBACKUP_HOST is set, we don't need to mount anything as SSH + # backup destination will be handled internally by Borg it self. + if [[ -z $BORGBACKUP_HOST ]]; then +- mkdir -p $v "$borg_dst_dev" >&2 +- StopIfError "Could not mkdir '$borg_dst_dev'" +- +- mount_url usb://$USB_DEVICE $borg_dst_dev ++ mount_url "usb://$USB_DEVICE" "$borg_dst_dev" + fi +diff --git a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh +deleted file mode 100644 +index 263445d3..00000000 +--- a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh ++++ /dev/null +@@ -1,300 +0,0 @@ +-# Copied from ../../NETFS/default/070_set_backup_archive.sh for YUM +-### Determine the name of the backup archive +-### This needs to be after we special case USB devices. +- +-# FIXME: backuparchive is no local variable (regardless that it is lowercased) +- +-# If TAPE_DEVICE is specified, use that: +-if test "$TAPE_DEVICE" ; then +- backuparchive="$TAPE_DEVICE" +- LogPrint "Using backup archive '$backuparchive'" +- return +-fi +- +-local backup_file_suffix="$BACKUP_PROG_SUFFIX$BACKUP_PROG_COMPRESS_SUFFIX" +-local backup_file_name="$BACKUP_PROG_ARCHIVE$backup_file_suffix" +- +-local scheme=$( url_scheme $BACKUP_URL ) +-local path=$( url_path $BACKUP_URL ) +-case "$scheme" in +- (file|iso) +- # Define the output path according to the scheme +- local outputpath=$( backup_path $scheme $path ) +- backuparchive="$outputpath/$backup_file_name" +- LogPrint "Using backup archive '$backuparchive'" +- return +- ;; +- (tape) +- # TODO: Check if that case is really needed. +- # Perhaps prep/default/030_translate_tape.sh does already all what is needed. +- backuparchive=$path +- LogPrint "Using backup archive '$backuparchive'" +- return +- ;; +-esac +- +-local backup_directory=$BUILD_DIR/outputfs/$NETFS_PREFIX +- +-# Normal (i.e. non-incremental/non-differential) backup: +-if ! test "incremental" = "$BACKUP_TYPE" -o "differential" = "$BACKUP_TYPE" ; then +- # In case of normal (i.e. non-incremental) backup there is only one restore archive +- # and its name is the same as the backup archive (usually 'backup.tar.gz'): +- backuparchive="$backup_directory/$backup_file_name" +- LogPrint "Using backup archive '$backuparchive'" +- # This script is also run during "rear recover/restoreonly" where RESTORE_ARCHIVES must be set. +- local backup_restore_workflows=( "recover" "restoreonly" ) +- if IsInArray $WORKFLOW ${backup_restore_workflows[@]} ; then +- # Only set RESTORE_ARCHIVES the backup archive is actually accessible +- # cf. https://github.com/rear/rear/issues/1166 +- if test -r "$backuparchive" ; then +- RESTORE_ARCHIVES=( "$backuparchive" ) +- else +- # In case of USB backup there is the subsequent 540_choose_backup_archive.sh script +- # that shows a backup selection dialog when RESTORE_ARCHIVES is not already set. +- if test "usb" = "$scheme" ; then +- LogPrint "Backup archive '$backuparchive' not readable. Need to select another one." +- else +- Error "Backup archive '$backuparchive' not readable." +- fi +- fi +- fi +- return +-fi +- +-# Incremental or differential backup: +-set -e -u -o pipefail +-# Incremental or differential backup only works for the NETFS backup method +-# and only with the 'tar' backup program: +-if ! test "NETFS" = "$BACKUP" -a "tar" = "$BACKUP_PROG" ; then +- Error "BACKUP_TYPE incremental or differential only works with BACKUP=NETFS and BACKUP_PROG=tar" +-fi +-# Incremental or differential backup is currently only known to work with BACKUP_URL=nfs://. +-# Other BACKUP_URL schemes may work and at least BACKUP_URL=usb:///... needs special setup +-# to work with incremental or differential backup (see https://github.com/rear/rear/issues/1145): +-if test "usb" = "$scheme" ; then +- # When USB_SUFFIX is set the compliance mode is used where +- # backup on USB works in compliance with backup on NFS which means +- # a fixed backup directory where incremental or differential backups work. +- # Use plain $USB_SUFFIX and not "$USB_SUFFIX" because when USB_SUFFIX contains only blanks +- # test "$USB_SUFFIX" would result true because test " " results true: +- test $USB_SUFFIX || Error "BACKUP_TYPE incremental or differential requires USB_SUFFIX for BACKUP_URL=usb" +-fi +-# Incremental or differential backup and keeping old backup contradict each other (mutual exclusive) +-# so that NETFS_KEEP_OLD_BACKUP_COPY must not be 'true' in case of incremental or differential backup: +-if test "$NETFS_KEEP_OLD_BACKUP_COPY" ; then +- NETFS_KEEP_OLD_BACKUP_COPY="" +- LogPrint "Disabled NETFS_KEEP_OLD_BACKUP_COPY because BACKUP_TYPE incremental or differential does not work with that" +-fi +-# For incremental or differential backup some date values (weekday, YYYY-MM-DD, HHMM) are needed +-# that must be consistent for one single point of the current time which means +-# one cannot call the 'date' command several times because then there would be +-# a small probability that e.g. weekday, YYYY-MM-DD, HHMM do not match +-# one single point in time (in particular when midnight passes in between). +-# Therefore the output of one single 'date' call is storend in an array and +-# the array elements are then assinged to individual variables as needed: +-local current_date_output=( $( date '+%a %Y-%m-%d %H%M' ) ) +-local current_weekday="${current_date_output[0]}" +-local current_yyyy_mm_dd="${current_date_output[1]}" +-local current_hhmm="${current_date_output[2]}" +-# The date FULLBACKUP_OUTDATED_DAYS ago is needed to check if the latest full backup is too old. +-# When the latest full backup is more than FULLBACKUP_OUTDATED_DAYS ago a new full backup is made. +-# This separated call of the 'date' command which is technically needed because is is +-# for another point in time (e.g. 7 days ago) is run after the above call of the 'date' +-# command for the current time to be on the safe side when midnight passes in between +-# both 'date' commands which would then result that a new full backup is made +-# when the latest full backup is basically right now FULLBACKUP_OUTDATED_DAYS ago because +-# the stored date of the latest full backup is the current date at the time when it was made. +-# Example (assuming FULLBACKUP_OUTDATED_DAYS=7 ): +-# The latest full backup was made on Sunday January 10 in 2016 (just before midnight). +-# One week later this script runs again while midnight passes between the two 'date' calls +-# so that current_date_output[@]="Sun 2016-01-17 0000" (still Sunday January 17 in 2016) +-# and yyyymmdd_max_days_ago=20160111 (already Monday January 11 in 2016), then +-# Sunday January 10 is older than Monday January 11 so that a new full backup is made: +-test "$FULLBACKUP_OUTDATED_DAYS" || FULLBACKUP_OUTDATED_DAYS="7" +-local yyyymmdd_max_days_ago=$( date '+%Y%m%d' --date="$FULLBACKUP_OUTDATED_DAYS days ago" ) +-# Full backup file names are of the form YYYY-MM-DD-HHMM-F.tar.gz +-# where the 'F' denotes a full backup: +-local full_backup_marker="F" +-# Incremental backup file names are of the form YYYY-MM-DD-HHMM-I.tar.gz +-# where the 'I' denotes an incremental backup: +-local incremental_backup_marker="I" +-# Differential backup file names are of the form YYYY-MM-DD-HHMM-D.tar.gz +-# where the last 'D' denotes a differential backup: +-local differential_backup_marker="D" +-# In case of incremental or differential backup the RESTORE_ARCHIVES contains +-# first the latest full backup file. +-# In case of incremental backup the RESTORE_ARCHIVES contains +-# after the latest full backup file each incremental backup +-# in the ordering how they must be restored. +-# For example when the latest full backup was made on Sunday +-# plus each subsequent weekday a separated incremental backup was made, +-# then during a "rear recover" on Wednesday morning +-# first the full backup from Sunday has to be restored, +-# then the incremental backup from Monday, and +-# finally the incremental backup from Tuesday. +-# In case of differential backup the RESTORE_ARCHIVES contains +-# after the latest full backup file the latest differential backup. +-# For example when the latest full backup was made on Sunday +-# plus each subsequent weekday a separated differential backup was made, +-# then during a "rear recover" on Wednesday morning +-# first the full backup from Sunday has to be restored, +-# and finally the differential backup from Tuesday +-# (i.e. the differential backup from Monday is skipped). +-# The date format YYYY-MM-DD that is used here is crucial. +-# It is the ISO 8601 format 'year-month-day' to specify a day of a year +-# that is accepted by 'tar' for the '--newer' option, +-# see the GNU tar manual section "Operating Only on New Files" +-# at https://www.gnu.org/software/tar/manual/html_node/after.html +-# and the GNU tar manual section "Calendar date items" +-# at https://www.gnu.org/software/tar/manual/html_node/Calendar-date-items.html#SEC124 +-local date_glob_regex="[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]" +-local date_time_glob_regex="$date_glob_regex-[0-9][0-9][0-9][0-9]" +-# Determine what kind of backup must be created, 'full' or 'incremental' or 'differential' +-# (the empty default means it is undecided what kind of backup must be created): +-local create_backup_type="" +-# Code regarding creating a backup is useless during "rear recover" and +-# messages about creating a backup are misleading during "rear recover": +-local recovery_workflows=( "recover" "layoutonly" "restoreonly" ) +-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # When today is a specified full backup day, do a full backup in any case +- # (regardless if there is already a full backup of this day): +- if IsInArray "$current_weekday" "${FULLBACKUPDAY[@]}" ; then +- create_backup_type="full" +- LogPrint "Today's weekday ('$current_weekday') is a full backup day that triggers a new full backup in any case" +- fi +-fi +-# Get the latest full backup (if exists): +-local full_backup_glob_regex="$date_time_glob_regex-$full_backup_marker$backup_file_suffix" +-# Here things like 'find /path/to/dir -name '*.tar.gz' | sort' are used because +-# one cannot use bash globbing via commands like 'ls /path/to/dir/*.tar.gz' +-# because /usr/sbin/rear sets the nullglob bash option which leads to plain 'ls' +-# when '/path/to/dir/*.tar.gz' matches nothing (i.e. when no backup file exists) +-# so that then plain 'ls' would result nonsense. +-local latest_full_backup=$( find $backup_directory -name "$full_backup_glob_regex" | sort | tail -n1 ) +-# A latest full backup is found: +-if test "$latest_full_backup" ; then +- local latest_full_backup_file_name=$( basename "$latest_full_backup" ) +- # The full_or_incremental_backup_glob_regex is also needed below for non-"recover" WORKFLOWs +- # to set the right variables for creating an incremental backup: +- local full_or_incremental_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$incremental_backup_marker]$backup_file_suffix" +- # Code regarding creating a backup is useless during "rear recover" and +- # messages about creating a backup are misleading during "rear recover": +- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # There is nothing to do here if it is already decided that +- # a full backup must be created (see "full backup day" above"): +- if ! test "full" = "$create_backup_type" ; then +- local latest_full_backup_date=$( echo $latest_full_backup_file_name | grep -o "$date_glob_regex" ) +- local yyyymmdd_latest_full_backup=$( echo $latest_full_backup_date | tr -d '-' ) +- # Check if the latest full backup is too old: +- if test $yyyymmdd_latest_full_backup -lt $yyyymmdd_max_days_ago ; then +- create_backup_type="full" +- LogPrint "Latest full backup date '$latest_full_backup_date' too old (more than $FULLBACKUP_OUTDATED_DAYS days ago) triggers new full backup" +- else +- # When a latest full backup is found that is not too old +- # a BACKUP_TYPE (incremental or differential) backup will be created: +- create_backup_type="$BACKUP_TYPE" +- LogPrint "Latest full backup found ($latest_full_backup_file_name) triggers $BACKUP_TYPE backup" +- fi +- fi +- else +- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: +- case "$BACKUP_TYPE" in +- (incremental) +- # When a latest full backup is found use that plus all later incremental backups for restore: +- # The following command is a bit tricky: +- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-I.tar.gz files in the backup directory and sorts them +- # and finally it outputs only those that match the latest full backup file name and incremental backups that got sorted after that +- # where it is mandatory that the backup file names sort by date (i.e. date must be the leading part of the backup file names): +- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" ) ) +- ;; +- (differential) +- # For differential backup use the latest full backup plus the one latest differential backup for restore: +- # The following command is a bit tricky: +- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-D.tar.gz files in the backup directory and sorts them +- # then it outputs only those that match the latest full backup file name and all differential backups that got sorted after that +- # and then it outputs only the first line (i.e. the full backup) and the last line (i.e. the latest differential backup) +- # but when no differential backup exists (i.e. when only the full backup exists) the first line is also the last line +- # so that "sed -n -e '1p;$p'" outputs the full backup twice which is corrected by the final "sort -u": +- local full_or_differential_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$differential_backup_marker]$backup_file_suffix" +- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_differential_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | sed -n -e '1p;$p' | sort -u ) ) +- ;; +- (*) +- BugError "Unexpected BACKUP_TYPE '$BACKUP_TYPE'" +- ;; +- esac +- # Tell the user what will be restored: +- local restore_archives_file_names="" +- for restore_archive in "${RESTORE_ARCHIVES[@]}" ; do +- restore_archives_file_names="$restore_archives_file_names $( basename "$restore_archive" )" +- done +- LogPrint "For backup restore using $restore_archives_file_names" +- fi +-# No latest full backup is found: +-else +- # Code regarding creating a backup is useless during "rear recover" and +- # messages about creating a backup are misleading during "rear recover": +- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # If no latest full backup is found create one during "rear mkbackup": +- create_backup_type="full" +- LogPrint "No full backup found (YYYY-MM-DD-HHMM-F.tar.gz) triggers full backup" +- else +- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: +- # If no latest full backup is found (i.e. no file name matches the YYYY-MM-DD-HHMM-F.tar.gz form) +- # fall back to what is done in case of normal (i.e. non-incremental/non-differential) backup +- # and hope for the best (i.e. that a backup_directory/backup_file_name actually exists). +- # In case of normal (i.e. non-incremental/non-differential) backup there is only one restore archive +- # and its name is the same as the backup archive (usually 'backup.tar.gz'). +- # This is only a fallback setting to be more on the safe side for "rear recover". +- # Initially for the very fist run of incremental backup during "rear mkbackup" +- # a full backup file of the YYYY-MM-DD-HHMM-F.tar.gz form will be created. +- RESTORE_ARCHIVES=( "$backup_directory/$backup_file_name" ) +- LogPrint "Using $backup_file_name for backup restore" +- fi +-fi +-# Code regarding creating a backup is useless during "rear recover" and +-# messages about creating a backup are misleading during "rear recover": +-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # Set the right variables for creating a backup (but do not actually do anything at this point): +- case "$create_backup_type" in +- (full) +- local new_full_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$full_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_full_backup_file_name" +- BACKUP_PROG_CREATE_NEWER_OPTIONS="-V $new_full_backup_file_name" +- LogPrint "Performing full backup using backup archive '$new_full_backup_file_name'" +- ;; +- (incremental) +- local new_incremental_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$incremental_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_incremental_backup_file_name" +- # Get the latest latest incremental backup that is based on the latest full backup (if exists): +- local incremental_backup_glob_regex="$date_time_glob_regex-$incremental_backup_marker$backup_file_suffix" +- # First get the latest full backup plus all later incremental backups (cf. how RESTORE_ARCHIVES is set in case of incremental backup) +- # then grep only the incremental backups and from the incremental backups use only the last one (if exists): +- local latest_incremental_backup=$( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | grep "$incremental_backup_glob_regex" | tail -n1 ) +- if test "$latest_incremental_backup" ; then +- # A latest incremental backup that is based on the latest full backup is found: +- local latest_incremental_backup_file_name=$( basename $latest_incremental_backup ) +- LogPrint "Latest incremental backup found ($latest_incremental_backup_file_name) that is newer than the latest full backup" +- local latest_incremental_backup_date=$( echo $latest_incremental_backup_file_name | grep -o "$date_glob_regex" ) +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_incremental_backup_date -V $latest_incremental_backup_file_name" +- LogPrint "Performing incremental backup for files newer than $latest_incremental_backup_date using backup archive '$new_incremental_backup_file_name'" +- else +- # When there is not yet an incremental backup that is based on the latest full backup +- # the new created incremental backup must be based on the latest full backup: +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" +- LogPrint "Performing incremental backup for files newer than $latest_full_backup_date using backup archive '$new_incremental_backup_file_name'" +- fi +- ;; +- (differential) +- local new_differential_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$differential_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_differential_backup_file_name" +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" +- LogPrint "Performing differential backup for files newer than $latest_full_backup_date using backup archive '$new_differential_backup_file_name'" +- ;; +- (*) +- BugError "Unexpected create_backup_type '$create_backup_type'" +- ;; +- esac +-fi +-# Go back from "set -e -u -o pipefail" to the defaults: +-apply_bash_flags_and_options_commands "$DEFAULT_BASH_FLAGS_AND_OPTIONS_COMMANDS" +- +diff --git a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh +new file mode 120000 +index 00000000..cdbdc31f +--- /dev/null ++++ b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh +@@ -0,0 +1 @@ ++../../NETFS/default/070_set_backup_archive.sh +\ No newline at end of file +diff --git a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh +deleted file mode 100644 +index 64b7a792..00000000 +--- a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# create mount point +-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then +- mkdir -p $v "$BUILD_DIR/outputfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +- AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- +- if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then +- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD" +- fi +- +- mount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs $BACKUP_DUPLICITY_NETFS_OPTIONS +- +- BACKUP_DUPLICITY_URL="file://$BUILD_DIR/outputfs" +-fi +diff --git a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh +new file mode 120000 +index 00000000..7f558c5d +--- /dev/null ++++ b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh +@@ -0,0 +1 @@ ++../../../backup/DUPLICITY/default/100_mount_duplicity_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh +deleted file mode 100644 +index 60aa811e..00000000 +--- a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# umount mountpoint +-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]; then +- +- if [[ "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]] ; then +- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_UMOUNTCMD" +- fi +- +- umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs +- +- rmdir $v $BUILD_DIR/outputfs >&2 +- if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- fi +-fi +diff --git a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh +new file mode 120000 +index 00000000..b7e47be1 +--- /dev/null ++++ b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh +@@ -0,0 +1 @@ ++../../../backup/DUPLICITY/default/980_unmount_duplicity_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh +deleted file mode 100644 +index 7de92af4..00000000 +--- a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh ++++ /dev/null +@@ -1,13 +0,0 @@ +-# Copied from ../../NETFS/default/100_mount_NETFS_path.sh a.k.a. ../../../backup/NETFS/default/100_mount_NETFS_path.sh for YUM +- +-# create mount point +-mkdir -p $v "$BUILD_DIR/outputfs" >&2 +-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +-AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- +-if [[ "$BACKUP_MOUNTCMD" ]] ; then +- BACKUP_URL="var://BACKUP_MOUNTCMD" +-fi +- +-mount_url $BACKUP_URL $BUILD_DIR/outputfs $BACKUP_OPTIONS +diff --git a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh +new file mode 120000 +index 00000000..60e0f83f +--- /dev/null ++++ b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh +@@ -0,0 +1 @@ ++../../NETFS/default/100_mount_NETFS_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh +deleted file mode 100644 +index d02dcf34..00000000 +--- a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# Copied from ../../../backup/NETFS/default/980_umount_NETFS_dir.sh for YUM +- +-# umount NETFS mountpoint +- +-if [[ "$BACKUP_UMOUNTCMD" ]] ; then +- BACKUP_URL="var://BACKUP_UMOUNTCMD" +-fi +- +-umount_url $BACKUP_URL $BUILD_DIR/outputfs +- +-rmdir $v $BUILD_DIR/outputfs >&2 +-if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +-fi +diff --git a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh +new file mode 120000 +index 00000000..2c29cb57 +--- /dev/null ++++ b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh +@@ -0,0 +1 @@ ++../../NETFS/default/980_umount_NETFS_dir.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh +deleted file mode 100644 +index 64b7a792..00000000 +--- a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# create mount point +-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then +- mkdir -p $v "$BUILD_DIR/outputfs" >&2 +- StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +- AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- +- if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then +- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD" +- fi +- +- mount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs $BACKUP_DUPLICITY_NETFS_OPTIONS +- +- BACKUP_DUPLICITY_URL="file://$BUILD_DIR/outputfs" +-fi +diff --git a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh +new file mode 120000 +index 00000000..7f558c5d +--- /dev/null ++++ b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh +@@ -0,0 +1 @@ ++../../../backup/DUPLICITY/default/100_mount_duplicity_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh +deleted file mode 100644 +index 60aa811e..00000000 +--- a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh ++++ /dev/null +@@ -1,15 +0,0 @@ +-# umount mountpoint +-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]; then +- +- if [[ "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]] ; then +- BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_UMOUNTCMD" +- fi +- +- umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs +- +- rmdir $v $BUILD_DIR/outputfs >&2 +- if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- fi +-fi +diff --git a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh +new file mode 120000 +index 00000000..b7e47be1 +--- /dev/null ++++ b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh +@@ -0,0 +1 @@ ++../../../backup/DUPLICITY/default/980_unmount_duplicity_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh +deleted file mode 100644 +index 014879db..00000000 +--- a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh ++++ /dev/null +@@ -1,116 +0,0 @@ +-# Copied from ../../../prep/NETFS/default/050_check_NETFS_requirements.sh for YUM +-# BACKUP_URL=[proto]://[host]/[share] +-# example: nfs://lucky/temp/backup +-# example: cifs://lucky/temp +-# example: usb:///dev/sdb1 +-# example: tape:///dev/nst0 +-# example: file:///path +-# example: iso://backup/ +-# example: sshfs://user@host/G/rear/ +-# example: ftpfs://user:password@host/rear/ (the password part is optional) +- +-[[ "$BACKUP_URL" || "$BACKUP_MOUNTCMD" ]] +-# FIXME: The above test does not match the error message below. +-# To match the the error message the test should be +-# [[ "$BACKUP_URL" || ( "$BACKUP_MOUNTCMD" && "$BACKUP_UMOUNTCMD" ) ]] +-# but I cannot decide if there is a subtle reason for the omission. +-StopIfError "You must specify either BACKUP_URL or BACKUP_MOUNTCMD and BACKUP_UMOUNTCMD !" +- +-if [[ "$BACKUP_URL" ]] ; then +- local scheme=$( url_scheme $BACKUP_URL ) +- local hostname=$( url_hostname $BACKUP_URL ) +- local path=$( url_path $BACKUP_URL ) +- +- ### check for vaild BACKUP_URL schemes +- ### see https://github.com/rear/rear/issues/842 +- case $scheme in +- (nfs|cifs|usb|tape|file|iso|sshfs|ftpfs) +- # do nothing for vaild BACKUP_URL schemes +- : +- ;; +- (*) +- Error "Invalid scheme '$scheme' in BACKUP_URL '$BACKUP_URL' valid schemes: nfs cifs usb tape file iso sshfs ftpfs" +- ;; +- esac +- +- ### set other variables from BACKUP_URL +- if [[ "usb" = "$scheme" ]] ; then +- # if USB_DEVICE is not explicitly specified it is the path from BACKUP_URL +- [[ -z "$USB_DEVICE" ]] && USB_DEVICE="$path" +- fi +- +- ### check if host is reachable +- if [[ "$PING" && "$hostname" ]] ; then +- # Only LogPrintIfError but no StopIfError because it is not a fatal error +- # (i.e. not a reason to abort) when a host does not respond to a 'ping' +- # because hosts can be accessible via certain ports but do not respond to a 'ping' +- # cf. https://bugzilla.opensuse.org/show_bug.cgi?id=616706 +- # TODO: it would be better to test if it is accessible via the actually needed port(s) +- ping -c 2 "$hostname" >/dev/null +- LogPrintIfError "Host '$hostname' in BACKUP_URL '$BACKUP_URL' does not respond to a 'ping'." +- else +- Log "Skipping 'ping' test for host '$hostname' in BACKUP_URL '$BACKUP_URL'" +- fi +- +-fi +- +-# some backup progs require a different backuparchive name +-case "$(basename $BACKUP_PROG)" in +- (rsync) +- # rsync creates a target directory instead of a file +- BACKUP_PROG_SUFFIX= +- BACKUP_PROG_COMPRESS_SUFFIX= +- ;; +- (*) +- : +- ;; +-esac +- +-# include required programs +-# the code below includes mount.* and umount.* programs for all non-empty schemes +-# (i.e. for any non-empty BACKUP_URL like usb tape file sshfs ftpfs) +-# and it includes 'mount.' for empty schemes (e.g. if BACKUP_URL is not set) +-# which is o.k. because it is a catch all rule so we do not miss any +-# important executable needed a certain scheme and it does not hurt +-# see https://github.com/rear/rear/pull/859 +-PROGS=( "${PROGS[@]}" +-showmount +-mount.$(url_scheme $BACKUP_URL) +-umount.$(url_scheme $BACKUP_URL) +-$( test "$BACKUP_MOUNTCMD" && echo "${BACKUP_MOUNTCMD%% *}" ) +-$( test "$BACKUP_UMOUNTCMD" && echo "${BACKUP_UMOUNTCMD%% *}" ) +-$BACKUP_PROG +-gzip +-bzip2 +-xz +-) +- +-# include required stuff for sshfs or ftpfs (via CurlFtpFS) +-if [[ "sshfs" = "$scheme" || "ftpfs" = "$scheme" ]] ; then +- # both sshfs and ftpfs (via CurlFtpFS) are based on FUSE +- PROGS=( "${PROGS[@]}" fusermount mount.fuse ) +- MODULES=( "${MODULES[@]}" fuse ) +- MODULES_LOAD=( "${MODULES_LOAD[@]}" fuse ) +- COPY_AS_IS=( "${COPY_AS_IS[@]}" /etc/fuse.conf ) +- # include what is specific for sshfs +- if [[ "sshfs" = "$scheme" ]] ; then +- # see http://sourceforge.net/apps/mediawiki/fuse/index.php?title=SshfsFaq +- REQUIRED_PROGS=( "${REQUIRED_PROGS[@]}" sshfs ssh ) +- # relying on 500_ssh.sh to take a long the SSH related files +- fi +- # include what is specific for ftpfs +- if [[ "ftpfs" = "$scheme" ]] ; then +- # see http://curlftpfs.sourceforge.net/ +- # and https://github.com/rear/rear/issues/845 +- REQUIRED_PROGS=( "${REQUIRED_PROGS[@]}" curlftpfs ) +- fi +-fi +- +-# include required modules, like nfs cifs ... +-# the code below includes modules for all non-empty schemes +-# (i.e. for any non-empty BACKUP_URL like usb tape file sshfs ftpfs) +-# which is o.k. because this must been seen as a catch all rule +-# (one never knows what one could miss) +-# see https://github.com/rear/rear/pull/859 +-MODULES=( "${MODULES[@]}" $(url_scheme $BACKUP_URL) ) +- +diff --git a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh +new file mode 120000 +index 00000000..af1512d6 +--- /dev/null ++++ b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh +@@ -0,0 +1 @@ ++../../NETFS/default/050_check_NETFS_requirements.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh +deleted file mode 100644 +index f7e31ed6..00000000 +--- a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh ++++ /dev/null +@@ -1,12 +0,0 @@ +-# Copied from ../../../backup/NETFS/default/100_mount_NETFS_path.sh for YUM +-# create mount point +-mkdir -p $v "$BUILD_DIR/outputfs" >&2 +-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'" +- +-AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +- +-if [[ "$BACKUP_MOUNTCMD" ]] ; then +- BACKUP_URL="var://BACKUP_MOUNTCMD" +-fi +- +-mount_url $BACKUP_URL $BUILD_DIR/outputfs $BACKUP_OPTIONS +diff --git a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh +new file mode 120000 +index 00000000..73dd4697 +--- /dev/null ++++ b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh +@@ -0,0 +1 @@ ++../../../restore/YUM/default/100_mount_YUM_path.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh +deleted file mode 100644 +index 3d8bdd8d..00000000 +--- a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh ++++ /dev/null +@@ -1,300 +0,0 @@ +-# Copied from ../../../prep/NETFS/default/070_set_backup_archive.sh for YUM +-### Determine the name of the backup archive +-### This needs to be after we special case USB devices. +- +-# FIXME: backuparchive is no local variable (regardless that it is lowercased) +- +-# If TAPE_DEVICE is specified, use that: +-if test "$TAPE_DEVICE" ; then +- backuparchive="$TAPE_DEVICE" +- LogPrint "Using backup archive '$backuparchive'" +- return +-fi +- +-local backup_file_suffix="$BACKUP_PROG_SUFFIX$BACKUP_PROG_COMPRESS_SUFFIX" +-local backup_file_name="$BACKUP_PROG_ARCHIVE$backup_file_suffix" +- +-local scheme=$( url_scheme $BACKUP_URL ) +-local path=$( url_path $BACKUP_URL ) +-case "$scheme" in +- (file|iso) +- # Define the output path according to the scheme +- local outputpath=$( backup_path $scheme $path ) +- backuparchive="$outputpath/$backup_file_name" +- LogPrint "Using backup archive '$backuparchive'" +- return +- ;; +- (tape) +- # TODO: Check if that case is really needed. +- # Perhaps prep/default/030_translate_tape.sh does already all what is needed. +- backuparchive=$path +- LogPrint "Using backup archive '$backuparchive'" +- return +- ;; +-esac +- +-local backup_directory=$BUILD_DIR/outputfs/$NETFS_PREFIX +- +-# Normal (i.e. non-incremental/non-differential) backup: +-if ! test "incremental" = "$BACKUP_TYPE" -o "differential" = "$BACKUP_TYPE" ; then +- # In case of normal (i.e. non-incremental) backup there is only one restore archive +- # and its name is the same as the backup archive (usually 'backup.tar.gz'): +- backuparchive="$backup_directory/$backup_file_name" +- LogPrint "Using backup archive '$backuparchive'" +- # This script is also run during "rear recover/restoreonly" where RESTORE_ARCHIVES must be set. +- local backup_restore_workflows=( "recover" "restoreonly" ) +- if IsInArray $WORKFLOW ${backup_restore_workflows[@]} ; then +- # Only set RESTORE_ARCHIVES the backup archive is actually accessible +- # cf. https://github.com/rear/rear/issues/1166 +- if test -r "$backuparchive" ; then +- RESTORE_ARCHIVES=( "$backuparchive" ) +- else +- # In case of USB backup there is the subsequent 540_choose_backup_archive.sh script +- # that shows a backup selection dialog when RESTORE_ARCHIVES is not already set. +- if test "usb" = "$scheme" ; then +- LogPrint "Backup archive '$backuparchive' not readable. Need to select another one." +- else +- Error "Backup archive '$backuparchive' not readable." +- fi +- fi +- fi +- return +-fi +- +-# Incremental or differential backup: +-set -e -u -o pipefail +-# Incremental or differential backup only works for the NETFS backup method +-# and only with the 'tar' backup program: +-if ! test "NETFS" = "$BACKUP" -a "tar" = "$BACKUP_PROG" ; then +- Error "BACKUP_TYPE incremental or differential only works with BACKUP=NETFS and BACKUP_PROG=tar" +-fi +-# Incremental or differential backup is currently only known to work with BACKUP_URL=nfs://. +-# Other BACKUP_URL schemes may work and at least BACKUP_URL=usb:///... needs special setup +-# to work with incremental or differential backup (see https://github.com/rear/rear/issues/1145): +-if test "usb" = "$scheme" ; then +- # When USB_SUFFIX is set the compliance mode is used where +- # backup on USB works in compliance with backup on NFS which means +- # a fixed backup directory where incremental or differential backups work. +- # Use plain $USB_SUFFIX and not "$USB_SUFFIX" because when USB_SUFFIX contains only blanks +- # test "$USB_SUFFIX" would result true because test " " results true: +- test $USB_SUFFIX || Error "BACKUP_TYPE incremental or differential requires USB_SUFFIX for BACKUP_URL=usb" +-fi +-# Incremental or differential backup and keeping old backup contradict each other (mutual exclusive) +-# so that NETFS_KEEP_OLD_BACKUP_COPY must not be 'true' in case of incremental or differential backup: +-if test "$NETFS_KEEP_OLD_BACKUP_COPY" ; then +- NETFS_KEEP_OLD_BACKUP_COPY="" +- LogPrint "Disabled NETFS_KEEP_OLD_BACKUP_COPY because BACKUP_TYPE incremental or differential does not work with that" +-fi +-# For incremental or differential backup some date values (weekday, YYYY-MM-DD, HHMM) are needed +-# that must be consistent for one single point of the current time which means +-# one cannot call the 'date' command several times because then there would be +-# a small probability that e.g. weekday, YYYY-MM-DD, HHMM do not match +-# one single point in time (in particular when midnight passes in between). +-# Therefore the output of one single 'date' call is storend in an array and +-# the array elements are then assinged to individual variables as needed: +-local current_date_output=( $( date '+%a %Y-%m-%d %H%M' ) ) +-local current_weekday="${current_date_output[0]}" +-local current_yyyy_mm_dd="${current_date_output[1]}" +-local current_hhmm="${current_date_output[2]}" +-# The date FULLBACKUP_OUTDATED_DAYS ago is needed to check if the latest full backup is too old. +-# When the latest full backup is more than FULLBACKUP_OUTDATED_DAYS ago a new full backup is made. +-# This separated call of the 'date' command which is technically needed because is is +-# for another point in time (e.g. 7 days ago) is run after the above call of the 'date' +-# command for the current time to be on the safe side when midnight passes in between +-# both 'date' commands which would then result that a new full backup is made +-# when the latest full backup is basically right now FULLBACKUP_OUTDATED_DAYS ago because +-# the stored date of the latest full backup is the current date at the time when it was made. +-# Example (assuming FULLBACKUP_OUTDATED_DAYS=7 ): +-# The latest full backup was made on Sunday January 10 in 2016 (just before midnight). +-# One week later this script runs again while midnight passes between the two 'date' calls +-# so that current_date_output[@]="Sun 2016-01-17 0000" (still Sunday January 17 in 2016) +-# and yyyymmdd_max_days_ago=20160111 (already Monday January 11 in 2016), then +-# Sunday January 10 is older than Monday January 11 so that a new full backup is made: +-test "$FULLBACKUP_OUTDATED_DAYS" || FULLBACKUP_OUTDATED_DAYS="7" +-local yyyymmdd_max_days_ago=$( date '+%Y%m%d' --date="$FULLBACKUP_OUTDATED_DAYS days ago" ) +-# Full backup file names are of the form YYYY-MM-DD-HHMM-F.tar.gz +-# where the 'F' denotes a full backup: +-local full_backup_marker="F" +-# Incremental backup file names are of the form YYYY-MM-DD-HHMM-I.tar.gz +-# where the 'I' denotes an incremental backup: +-local incremental_backup_marker="I" +-# Differential backup file names are of the form YYYY-MM-DD-HHMM-D.tar.gz +-# where the last 'D' denotes a differential backup: +-local differential_backup_marker="D" +-# In case of incremental or differential backup the RESTORE_ARCHIVES contains +-# first the latest full backup file. +-# In case of incremental backup the RESTORE_ARCHIVES contains +-# after the latest full backup file each incremental backup +-# in the ordering how they must be restored. +-# For example when the latest full backup was made on Sunday +-# plus each subsequent weekday a separated incremental backup was made, +-# then during a "rear recover" on Wednesday morning +-# first the full backup from Sunday has to be restored, +-# then the incremental backup from Monday, and +-# finally the incremental backup from Tuesday. +-# In case of differential backup the RESTORE_ARCHIVES contains +-# after the latest full backup file the latest differential backup. +-# For example when the latest full backup was made on Sunday +-# plus each subsequent weekday a separated differential backup was made, +-# then during a "rear recover" on Wednesday morning +-# first the full backup from Sunday has to be restored, +-# and finally the differential backup from Tuesday +-# (i.e. the differential backup from Monday is skipped). +-# The date format YYYY-MM-DD that is used here is crucial. +-# It is the ISO 8601 format 'year-month-day' to specify a day of a year +-# that is accepted by 'tar' for the '--newer' option, +-# see the GNU tar manual section "Operating Only on New Files" +-# at https://www.gnu.org/software/tar/manual/html_node/after.html +-# and the GNU tar manual section "Calendar date items" +-# at https://www.gnu.org/software/tar/manual/html_node/Calendar-date-items.html#SEC124 +-local date_glob_regex="[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]" +-local date_time_glob_regex="$date_glob_regex-[0-9][0-9][0-9][0-9]" +-# Determine what kind of backup must be created, 'full' or 'incremental' or 'differential' +-# (the empty default means it is undecided what kind of backup must be created): +-local create_backup_type="" +-# Code regarding creating a backup is useless during "rear recover" and +-# messages about creating a backup are misleading during "rear recover": +-local recovery_workflows=( "recover" "layoutonly" "restoreonly" ) +-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # When today is a specified full backup day, do a full backup in any case +- # (regardless if there is already a full backup of this day): +- if IsInArray "$current_weekday" "${FULLBACKUPDAY[@]}" ; then +- create_backup_type="full" +- LogPrint "Today's weekday ('$current_weekday') is a full backup day that triggers a new full backup in any case" +- fi +-fi +-# Get the latest full backup (if exists): +-local full_backup_glob_regex="$date_time_glob_regex-$full_backup_marker$backup_file_suffix" +-# Here things like 'find /path/to/dir -name '*.tar.gz' | sort' are used because +-# one cannot use bash globbing via commands like 'ls /path/to/dir/*.tar.gz' +-# because /usr/sbin/rear sets the nullglob bash option which leads to plain 'ls' +-# when '/path/to/dir/*.tar.gz' matches nothing (i.e. when no backup file exists) +-# so that then plain 'ls' would result nonsense. +-local latest_full_backup=$( find $backup_directory -name "$full_backup_glob_regex" | sort | tail -n1 ) +-# A latest full backup is found: +-if test "$latest_full_backup" ; then +- local latest_full_backup_file_name=$( basename "$latest_full_backup" ) +- # The full_or_incremental_backup_glob_regex is also needed below for non-"recover" WORKFLOWs +- # to set the right variables for creating an incremental backup: +- local full_or_incremental_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$incremental_backup_marker]$backup_file_suffix" +- # Code regarding creating a backup is useless during "rear recover" and +- # messages about creating a backup are misleading during "rear recover": +- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # There is nothing to do here if it is already decided that +- # a full backup must be created (see "full backup day" above"): +- if ! test "full" = "$create_backup_type" ; then +- local latest_full_backup_date=$( echo $latest_full_backup_file_name | grep -o "$date_glob_regex" ) +- local yyyymmdd_latest_full_backup=$( echo $latest_full_backup_date | tr -d '-' ) +- # Check if the latest full backup is too old: +- if test $yyyymmdd_latest_full_backup -lt $yyyymmdd_max_days_ago ; then +- create_backup_type="full" +- LogPrint "Latest full backup date '$latest_full_backup_date' too old (more than $FULLBACKUP_OUTDATED_DAYS days ago) triggers new full backup" +- else +- # When a latest full backup is found that is not too old +- # a BACKUP_TYPE (incremental or differential) backup will be created: +- create_backup_type="$BACKUP_TYPE" +- LogPrint "Latest full backup found ($latest_full_backup_file_name) triggers $BACKUP_TYPE backup" +- fi +- fi +- else +- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: +- case "$BACKUP_TYPE" in +- (incremental) +- # When a latest full backup is found use that plus all later incremental backups for restore: +- # The following command is a bit tricky: +- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-I.tar.gz files in the backup directory and sorts them +- # and finally it outputs only those that match the latest full backup file name and incremental backups that got sorted after that +- # where it is mandatory that the backup file names sort by date (i.e. date must be the leading part of the backup file names): +- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" ) ) +- ;; +- (differential) +- # For differential backup use the latest full backup plus the one latest differential backup for restore: +- # The following command is a bit tricky: +- # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-D.tar.gz files in the backup directory and sorts them +- # then it outputs only those that match the latest full backup file name and all differential backups that got sorted after that +- # and then it outputs only the first line (i.e. the full backup) and the last line (i.e. the latest differential backup) +- # but when no differential backup exists (i.e. when only the full backup exists) the first line is also the last line +- # so that "sed -n -e '1p;$p'" outputs the full backup twice which is corrected by the final "sort -u": +- local full_or_differential_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$differential_backup_marker]$backup_file_suffix" +- RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_differential_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | sed -n -e '1p;$p' | sort -u ) ) +- ;; +- (*) +- BugError "Unexpected BACKUP_TYPE '$BACKUP_TYPE'" +- ;; +- esac +- # Tell the user what will be restored: +- local restore_archives_file_names="" +- for restore_archive in "${RESTORE_ARCHIVES[@]}" ; do +- restore_archives_file_names="$restore_archives_file_names $( basename "$restore_archive" )" +- done +- LogPrint "For backup restore using $restore_archives_file_names" +- fi +-# No latest full backup is found: +-else +- # Code regarding creating a backup is useless during "rear recover" and +- # messages about creating a backup are misleading during "rear recover": +- if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # If no latest full backup is found create one during "rear mkbackup": +- create_backup_type="full" +- LogPrint "No full backup found (YYYY-MM-DD-HHMM-F.tar.gz) triggers full backup" +- else +- # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set: +- # If no latest full backup is found (i.e. no file name matches the YYYY-MM-DD-HHMM-F.tar.gz form) +- # fall back to what is done in case of normal (i.e. non-incremental/non-differential) backup +- # and hope for the best (i.e. that a backup_directory/backup_file_name actually exists). +- # In case of normal (i.e. non-incremental/non-differential) backup there is only one restore archive +- # and its name is the same as the backup archive (usually 'backup.tar.gz'). +- # This is only a fallback setting to be more on the safe side for "rear recover". +- # Initially for the very fist run of incremental backup during "rear mkbackup" +- # a full backup file of the YYYY-MM-DD-HHMM-F.tar.gz form will be created. +- RESTORE_ARCHIVES=( "$backup_directory/$backup_file_name" ) +- LogPrint "Using $backup_file_name for backup restore" +- fi +-fi +-# Code regarding creating a backup is useless during "rear recover" and +-# messages about creating a backup are misleading during "rear recover": +-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then +- # Set the right variables for creating a backup (but do not actually do anything at this point): +- case "$create_backup_type" in +- (full) +- local new_full_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$full_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_full_backup_file_name" +- BACKUP_PROG_CREATE_NEWER_OPTIONS="-V $new_full_backup_file_name" +- LogPrint "Performing full backup using backup archive '$new_full_backup_file_name'" +- ;; +- (incremental) +- local new_incremental_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$incremental_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_incremental_backup_file_name" +- # Get the latest latest incremental backup that is based on the latest full backup (if exists): +- local incremental_backup_glob_regex="$date_time_glob_regex-$incremental_backup_marker$backup_file_suffix" +- # First get the latest full backup plus all later incremental backups (cf. how RESTORE_ARCHIVES is set in case of incremental backup) +- # then grep only the incremental backups and from the incremental backups use only the last one (if exists): +- local latest_incremental_backup=$( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | grep "$incremental_backup_glob_regex" | tail -n1 ) +- if test "$latest_incremental_backup" ; then +- # A latest incremental backup that is based on the latest full backup is found: +- local latest_incremental_backup_file_name=$( basename $latest_incremental_backup ) +- LogPrint "Latest incremental backup found ($latest_incremental_backup_file_name) that is newer than the latest full backup" +- local latest_incremental_backup_date=$( echo $latest_incremental_backup_file_name | grep -o "$date_glob_regex" ) +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_incremental_backup_date -V $latest_incremental_backup_file_name" +- LogPrint "Performing incremental backup for files newer than $latest_incremental_backup_date using backup archive '$new_incremental_backup_file_name'" +- else +- # When there is not yet an incremental backup that is based on the latest full backup +- # the new created incremental backup must be based on the latest full backup: +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" +- LogPrint "Performing incremental backup for files newer than $latest_full_backup_date using backup archive '$new_incremental_backup_file_name'" +- fi +- ;; +- (differential) +- local new_differential_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$differential_backup_marker$backup_file_suffix" +- backuparchive="$backup_directory/$new_differential_backup_file_name" +- BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name" +- LogPrint "Performing differential backup for files newer than $latest_full_backup_date using backup archive '$new_differential_backup_file_name'" +- ;; +- (*) +- BugError "Unexpected create_backup_type '$create_backup_type'" +- ;; +- esac +-fi +-# Go back from "set -e -u -o pipefail" to the defaults: +-apply_bash_flags_and_options_commands "$DEFAULT_BASH_FLAGS_AND_OPTIONS_COMMANDS" +- +diff --git a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh +new file mode 120000 +index 00000000..b8de3d9e +--- /dev/null ++++ b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh +@@ -0,0 +1 @@ ++../../../prep/YUM/default/070_set_backup_archive.sh +\ No newline at end of file +diff --git a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh +deleted file mode 100644 +index dc719e38..00000000 +--- a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh ++++ /dev/null +@@ -1,14 +0,0 @@ +-# Copied from ../../../backup/NETFS/default/980_umount_NETFS_dir.sh for YUM +-# umount NETFS mountpoint +- +-if [[ "$BACKUP_UMOUNTCMD" ]] ; then +- BACKUP_URL="var://BACKUP_UMOUNTCMD" +-fi +- +-umount_url $BACKUP_URL $BUILD_DIR/outputfs +- +-rmdir $v $BUILD_DIR/outputfs >&2 +-if [[ $? -eq 0 ]] ; then +- # the argument to RemoveExitTask has to be identical to the one given to AddExitTask +- RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2" +-fi +diff --git a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh +new file mode 120000 +index 00000000..ada5ea50 +--- /dev/null ++++ b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh +@@ -0,0 +1 @@ ++../../../restore/YUM/default/980_umount_YUM_dir.sh +\ No newline at end of file diff --git a/SOURCES/rear-bz1983000.patch b/SOURCES/rear-bz1983000.patch new file mode 100644 index 0000000..a57367f --- /dev/null +++ b/SOURCES/rear-bz1983000.patch @@ -0,0 +1,18 @@ +diff --git a/usr/share/rear/finalize/Linux-ppc64le/620_install_grub2.sh b/usr/share/rear/finalize/Linux-ppc64le/620_install_grub2.sh +index c025619d..93f45e2b 100644 +--- a/usr/share/rear/finalize/Linux-ppc64le/620_install_grub2.sh ++++ b/usr/share/rear/finalize/Linux-ppc64le/620_install_grub2.sh +@@ -22,6 +22,7 @@ fi + + LogPrint "Installing GRUB2 boot loader" + mount -t proc none $TARGET_FS_ROOT/proc ++mount -t sysfs sys $TARGET_FS_ROOT/sys + + if [[ -r "$LAYOUT_FILE" ]]; then + +@@ -72,4 +73,5 @@ if [[ "$NOBOOTLOADER" ]]; then + LogIfError "No bootloader configuration found. Install boot partition manually" + fi + ++umount $TARGET_FS_ROOT/sys + umount $TARGET_FS_ROOT/proc diff --git a/SOURCES/rear-bz1983013.patch b/SOURCES/rear-bz1983013.patch new file mode 100644 index 0000000..99ac700 --- /dev/null +++ b/SOURCES/rear-bz1983013.patch @@ -0,0 +1,71 @@ +diff --git a/usr/share/rear/conf/Linux-ppc64.conf b/usr/share/rear/conf/Linux-ppc64.conf +index 71fa8fa3..85924971 100644 +--- a/usr/share/rear/conf/Linux-ppc64.conf ++++ b/usr/share/rear/conf/Linux-ppc64.conf +@@ -1,6 +1,7 @@ + REQUIRED_PROGS=( + "${REQUIRED_PROGS[@]}" + sfdisk ++ofpathname + ) + + PROGS=( +@@ -9,14 +10,22 @@ mkofboot + ofpath + ybin + yabootconfig +-bootlist + pseries_platform + nvram +-ofpathname + bc + agetty + ) + ++if grep -q "emulated by qemu" /proc/cpuinfo ; then ++ # Qemu/KVM virtual machines don't need bootlist - don't complain if ++ # it is missing ++ PROGS+=( bootlist ) ++else ++ # PowerVM environment, we need to run bootlist, otherwise ++ # we can't make the system bpotable. Be strict about requiring it ++ REQUIRED_PROGS+=( bootlist ) ++fi ++ + COPY_AS_IS=( + "${COPY_AS_IS[@]}" + /usr/lib/yaboot/yaboot +diff --git a/usr/share/rear/conf/Linux-ppc64le.conf b/usr/share/rear/conf/Linux-ppc64le.conf +index 2ea82b85..52c15e75 100644 +--- a/usr/share/rear/conf/Linux-ppc64le.conf ++++ b/usr/share/rear/conf/Linux-ppc64le.conf +@@ -5,10 +5,8 @@ sfdisk + + PROGS=( + "${PROGS[@]}" +-bootlist + pseries_platform + nvram +-ofpathname + bc + agetty + ) +@@ -21,4 +19,18 @@ agetty + if [[ $(awk '/platform/ {print $NF}' < /proc/cpuinfo) != PowerNV ]] ; then + # No firmware files when ppc64le Linux is not run in BareMetal Mode (PowerNV): + test "${FIRMWARE_FILES[*]}" || FIRMWARE_FILES=( 'no' ) ++ # grub2-install for powerpc-ieee1275 calls ofpathname, so without it, ++ # the rescue system can't make the recovered system bootable ++ REQUIRED_PROGS+=( ofpathname ) ++ if grep -q "emulated by qemu" /proc/cpuinfo ; then ++ # Qemu/KVM virtual machines don't need bootlist - don't complain if ++ # it is missing ++ PROGS+=( bootlist ) ++ else ++ # PowerVM environment, we need to run bootlist, otherwise ++ # we can't make the system bpotable. Be strict about requiring it ++ REQUIRED_PROGS+=( bootlist ) ++ fi ++else ++ PROGS+=( ofpathname bootlist ) + fi diff --git a/SOURCES/rear-pr2675.patch b/SOURCES/rear-pr2675.patch new file mode 100644 index 0000000..7d11071 --- /dev/null +++ b/SOURCES/rear-pr2675.patch @@ -0,0 +1,60 @@ +diff --git a/usr/share/rear/lib/framework-functions.sh b/usr/share/rear/lib/framework-functions.sh +index 4878216b..e919bdbf 100644 +--- a/usr/share/rear/lib/framework-functions.sh ++++ b/usr/share/rear/lib/framework-functions.sh +@@ -121,7 +121,7 @@ function cleanup_build_area_and_end_program () { + sleep 2 + umount_mountpoint_lazy $BUILD_DIR/outputfs + fi +- remove_temporary_mountpoint '$BUILD_DIR/outputfs' || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove" ++ remove_temporary_mountpoint "$BUILD_DIR/outputfs" || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove" + rmdir $v $BUILD_DIR >&2 + fi + Log "End of program reached" +diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh +index c1a11615..0f8f362d 100644 +--- a/usr/share/rear/lib/global-functions.sh ++++ b/usr/share/rear/lib/global-functions.sh +@@ -317,7 +317,20 @@ function url_path() { + + ### Returns true if one can upload files to the URL + function scheme_accepts_files() { +- local scheme=$1 ++ # Be safe against 'set -eu' which would exit 'rear' with "bash: $1: unbound variable" ++ # when scheme_accepts_files is called without an argument ++ # by bash parameter expansion with using an empty default value if $1 is unset or null. ++ # Bash parameter expansion with assigning a default value ${1:=} does not work ++ # (then it would still exit with "bash: $1: cannot assign in this way") ++ # but using a default value is practicable here because $1 is used only once ++ # cf. https://github.com/rear/rear/pull/2675#discussion_r705018956 ++ local scheme=${1:-} ++ # Return false if scheme is empty or blank (e.g. when OUTPUT_URL is unset or empty or blank) ++ # cf. https://github.com/rear/rear/issues/2676 ++ # and https://github.com/rear/rear/issues/2667#issuecomment-914447326 ++ # also return false if scheme is more than one word (so no quoted "$scheme" here) ++ # cf. https://github.com/rear/rear/pull/2675#discussion_r704401462 ++ test $scheme || return 1 + case $scheme in + (null|tape|obdr) + # tapes do not support uploading arbitrary files, one has to handle them +@@ -341,7 +354,10 @@ function scheme_accepts_files() { + ### Returning true does not imply that the URL is currently mounted at a filesystem and usable, + ### only that it can be mounted (use mount_url() first) + function scheme_supports_filesystem() { +- local scheme=$1 ++ # Be safe against 'set -eu' exit if scheme_supports_filesystem is called without argument ++ local scheme=${1:-} ++ # Return false if scheme is empty or blank or more than one word, cf. scheme_accepts_files() above ++ test $scheme || return 1 + case $scheme in + (null|tape|obdr|rsync|fish|ftp|ftps|hftp|http|https|sftp) + return 1 +@@ -560,7 +576,7 @@ function umount_url() { + + RemoveExitTask "perform_umount_url '$url' '$mountpoint' lazy" + +- remove_temporary_mountpoint '$mountpoint' && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'" ++ remove_temporary_mountpoint "$mountpoint" && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'" + return 0 + } + diff --git a/SOURCES/rear-sfdc02772301.patch b/SOURCES/rear-sfdc02772301.patch new file mode 100644 index 0000000..0e3fae4 --- /dev/null +++ b/SOURCES/rear-sfdc02772301.patch @@ -0,0 +1,38 @@ +diff --git a/usr/share/rear/conf/default.conf b/usr/share/rear/conf/default.conf +index 264f2217..313f4118 100644 +--- a/usr/share/rear/conf/default.conf ++++ b/usr/share/rear/conf/default.conf +@@ -1478,7 +1478,7 @@ OBDR_BLOCKSIZE=2048 + # BACKUP=NBU stuff (Symantec/Veritas NetBackup) + ## + # +-COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt ) ++COPY_AS_IS_NBU=( /usr/openv/bin/vnetd /usr/openv/bin/vopied /usr/openv/lib /usr/openv/netbackup /usr/openv/var/auth/[mn]*.txt /opt/VRTSpbx /etc/vx/VxICS /etc/vx/vrtslog.conf ) + COPY_AS_IS_EXCLUDE_NBU=( "/usr/openv/netbackup/logs/*" "/usr/openv/netbackup/bin/bpjava*" /usr/openv/netbackup/bin/xbp /usr/openv/netbackup/bin/private /usr/openv/lib/java /usr/openv/lib/shared/vddk /usr/openv/netbackup/baremetal ) + NBU_LD_LIBRARY_PATH="/usr/openv/lib:/usr/openv/netbackup/sec/at/lib/" + PROGS_NBU=( ) +diff --git a/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh b/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh +index cd48b8d9..ae5a3ccc 100644 +--- a/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh ++++ b/usr/share/rear/rescue/NBU/default/450_prepare_netbackup.sh +@@ -7,6 +7,12 @@ + + [[ $NBU_version -lt 7 ]] && return # NBU is using xinetd when version <7.x + ++if [ -e "/etc/init.d/vxpbx_exchanged" ]; then ++ cp $v /etc/init.d/vxpbx_exchanged $ROOTFS_DIR/etc/scripts/system-setup.d/vxpbx_exchanged.real ++ chmod $v +x $ROOTFS_DIR/etc/scripts/system-setup.d/vxpbx_exchanged.real ++ echo "( /etc/scripts/system-setup.d/vxpbx_exchanged.real start )" > $ROOTFS_DIR/etc/scripts/system-setup.d/89-vxpbx_exchanged.sh ++fi ++ + if [ -e "/etc/init.d/netbackup" ]; then + cp $v /etc/init.d/netbackup $ROOTFS_DIR/etc/scripts/system-setup.d/netbackup.real + chmod $v +x $ROOTFS_DIR/etc/scripts/system-setup.d/netbackup.real +diff --git a/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore b/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore +new file mode 100644 +index 00000000..d6b7ef32 +--- /dev/null ++++ b/usr/share/rear/skel/NBU/usr/openv/tmp/.gitignore +@@ -0,0 +1,2 @@ ++* ++!.gitignore diff --git a/SPECS/rear.spec b/SPECS/rear.spec index 063978a..135ae08 100644 --- a/SPECS/rear.spec +++ b/SPECS/rear.spec @@ -2,7 +2,7 @@ Summary: Relax-and-Recover is a Linux disaster recovery and system migration tool Name: rear Version: 2.4 -Release: 13%{?dist} +Release: 15%{?dist} License: GPLv3 Group: Applications/File URL: http://relax-and-recover.org/ @@ -28,6 +28,13 @@ Patch22: rear-bz1692575.patch Patch23: rear-sfdc02343208.patch Patch24: rear-bz1726043.patch Patch25: rear-bz1842984-skip-longhorn.patch +Patch30: rear-sfdc02772301.patch +Patch31: rear-bz1945869.patch +Patch32: rear-bz1958247.patch +Patch33: rear-bz1930662.patch +Patch35: rear-bz1983013.patch +Patch38: rear-bz1983000.patch +Patch39: rear-pr2675.patch ExcludeArch: s390x ExcludeArch: s390 @@ -68,6 +75,13 @@ Requires: syslinux Requires: yaboot %endif +%ifarch ppc ppc64 ppc64le +# Called by grub2-install (except on PowerNV) +Requires: /usr/sbin/ofpathname +# Needed to make PowerVM LPARs bootable +Requires: /usr/sbin/bootlist +%endif + Requires: crontabs Requires: iproute Requires: xorriso @@ -132,6 +146,13 @@ fi %patch23 -p1 %patch24 -p1 %patch25 -p1 +%patch30 -p1 +%patch31 -p1 +%patch32 -p1 +%patch33 -p1 +%patch35 -p1 +%patch38 -p1 +%patch39 -p1 echo "30 1 * * * root /usr/sbin/rear checklayout || /usr/sbin/rear mkrescue" >rear.cron @@ -166,6 +187,27 @@ TZ=UTC %{__make} -C doc %{_sbindir}/rear %changelog +* Thu Dec 16 2021 Pavel Cahyna - 2.4-15 +- Backport PR2608: + Fix setting boot path in case of UEFI partition (ESP) on MD RAID + Resolves: rhbz1945869 +- Backport PR2625 & 2675 + Prevents accidental backup removal in case of errors + Resolves: rhbz1843585 +- Fix rsync error and option handling + Fixes metadata storage when rsync user is not root + Resolves: rhbz1947064 +- Changes for NetBackup (NBU) support, upstream PR2544 + Resolves: rhbz2031833 +- On POWER add bootlist & ofpathname to the list of required programs + conditionally (bootlist only if running under PowerVM, ofpathname + always except on PowerNV) - upstream PR2665, add them to package + dependencies + Resolves: rhbz1983008 +- On POWER mount /sys in the chroot, otherwise ofpathname does not work + and we risk ending up with not instlaling the bootloader properly + Resolves: rhbz1983000 + * Thu Jun 04 2020 Václav Doležal - 2.4-13 - Apply upstream PR2373: Skip Longhorn Engine replica devices Resolves: rhbz1842984