Blob Blame History Raw
diff --git a/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh
index 64b7a792..6ba7d543 100644
--- a/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh
+++ b/usr/share/rear/backup/DUPLICITY/default/100_mount_duplicity_path.sh
@@ -1,10 +1,4 @@
-# create mount point
 if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then
-	mkdir -p $v "$BUILD_DIR/outputfs" >&2
-	StopIfError "Could not mkdir '$BUILD_DIR/outputfs'"
-
-	AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-
 	if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then
 		BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD"
 	fi
diff --git a/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh
index 185dbd95..8525ab1d 100644
--- a/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh
+++ b/usr/share/rear/backup/DUPLICITY/default/980_unmount_duplicity_path.sh
@@ -6,10 +6,4 @@ if [  -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD"
 	fi
 
 	umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs
-
-	rmdir $v $BUILD_DIR/outputfs >&2
-	if [[ $? -eq 0 ]] ; then
-		# the argument to RemoveExitTask has to be identical to the one given to AddExitTask
-		RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-	fi
 fi
diff --git a/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh b/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh
index 5c7696db..b6a955db 100644
--- a/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh
+++ b/usr/share/rear/backup/NETFS/default/100_mount_NETFS_path.sh
@@ -1,9 +1,3 @@
-# create mount point
-mkdir -p $v "$BUILD_DIR/outputfs" >&2
-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'"
-
-AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-
 if [[ "$BACKUP_MOUNTCMD" ]] ; then
     BACKUP_URL="var://BACKUP_MOUNTCMD"
 fi
diff --git a/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh b/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh
index d79653b4..9bf8f76a 100644
--- a/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh
+++ b/usr/share/rear/backup/NETFS/default/150_save_copy_of_prefix_dir.sh
@@ -3,20 +3,17 @@
 [ -z "${NETFS_KEEP_OLD_BACKUP_COPY}" ] && return
 
 # do not do this for tapes and special attention for file:///path
-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL"
-local scheme=$(url_scheme ${!url})
-local path=$(url_path ${!url})
-local opath=$(backup_path $scheme $path)
+local scheme=$( url_scheme $BACKUP_URL )
+local path=$( url_path $BACKUP_URL )
+local opath=$( backup_path $scheme $path )
 
 # if $opath is empty return silently (e.g. scheme tape)
 [ -z "$opath" ] && return 0
 
 if ! test -f "${opath}/.lockfile" ; then
     if test -d "${opath}" ; then
-        rm -rf $v "${opath}.old" >&2
-        StopIfError "Could not remove '${opath}.old'"
-        mv -f $v "${opath}" "${opath}.old" >&2
-        StopIfError "Could not move '${opath}'"
+        rm -rf $v "${opath}.old" || Error "Could not remove '${opath}.old'"
+        mv -f $v "${opath}" "${opath}.old" || Error "Could not move '${opath}'"
     fi
 else
     # lockfile was already made through the output workflow (hands off)
diff --git a/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh b/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh
index db15bca2..43f5b651 100644
--- a/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh
+++ b/usr/share/rear/backup/NETFS/default/200_make_prefix_dir.sh
@@ -2,13 +2,14 @@
 # to $HOSTNAME
 
 # do not do this for tapes and special attention for file:///path
-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL"
-local scheme=$(url_scheme ${!url})
-local path=$(url_path ${!url})
-local opath=$(backup_path $scheme $path)
+local scheme=$( url_scheme $BACKUP_URL )
+local path=$( url_path $BACKUP_URL )
+local opath=$( backup_path $scheme $path )
 
 # if $opath is empty return silently (e.g. scheme tape)
 [ -z "$opath" ] && return 0
 
-mkdir -p $v -m0750 "${opath}" >&2
-StopIfError "Could not mkdir '${opath}'"
+mkdir -p $v -m0750 "${opath}" && return
+
+# A failure to create the $NETFS_PREFIX sub-directory is fatal:
+Error "Failed to create '$opath' directory for BACKUP_URL=$BACKUP_URL"
diff --git a/usr/share/rear/backup/NETFS/default/250_create_lock.sh b/usr/share/rear/backup/NETFS/default/250_create_lock.sh
index 59090a22..36d547ec 100644
--- a/usr/share/rear/backup/NETFS/default/250_create_lock.sh
+++ b/usr/share/rear/backup/NETFS/default/250_create_lock.sh
@@ -2,15 +2,13 @@
 # made by a previous mkbackup run when the variable NETFS_KEEP_OLD_BACKUP_COPY has been set
 
 # do not do this for tapes and special attention for file:///path
-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL"
-local scheme=$(url_scheme ${!url})
-local path=$(url_path ${!url})
-local opath=$(backup_path $scheme $path)
+local scheme=$( url_scheme $BACKUP_URL )
+local path=$( url_path $BACKUP_URL )
+local opath=$( backup_path $scheme $path )
 
 # if $opath is empty return silently (e.g. scheme tape)
 [ -z "$opath" ] && return 0
 
 if test -d "${opath}" ; then
-	> "${opath}/.lockfile"
-	StopIfError "Could not create '${opath}/.lockfile'"
+	> "${opath}/.lockfile" || Error "Could not create '${opath}/.lockfile'"
 fi
diff --git a/usr/share/rear/backup/NETFS/default/970_remove_lock.sh b/usr/share/rear/backup/NETFS/default/970_remove_lock.sh
index f69f7bd8..7038f5b9 100644
--- a/usr/share/rear/backup/NETFS/default/970_remove_lock.sh
+++ b/usr/share/rear/backup/NETFS/default/970_remove_lock.sh
@@ -1,8 +1,7 @@
 # remove the lockfile
-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL"
-local scheme=$(url_scheme ${!url})
-local path=$(url_path ${!url})
-local opath=$(backup_path $scheme $path)
+local scheme=$( url_scheme $BACKUP_URL )
+local path=$( url_path $BACKUP_URL )
+local opath=$( backup_path $scheme $path )
 
 # if $opath is empty return silently (e.g. scheme tape)
 [ -z "$opath" ] && return 0
diff --git a/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh b/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh
index f28c6cbf..e1954dc5 100644
--- a/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh
+++ b/usr/share/rear/backup/NETFS/default/980_umount_NETFS_dir.sh
@@ -5,9 +5,3 @@ if [[ "$BACKUP_UMOUNTCMD" ]] ; then
 fi
 
 umount_url $BACKUP_URL $BUILD_DIR/outputfs
-
-rmdir $v $BUILD_DIR/outputfs >&2
-if [[ $? -eq 0 ]] ; then
-    # the argument to RemoveExitTask has to be identical to the one given to AddExitTask
-    RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-fi
diff --git a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh
deleted file mode 100644
index 6111f89b..00000000
--- a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh
+++ /dev/null
@@ -1,33 +0,0 @@
-
-# Backup all that is explicitly specified in BACKUP_PROG_INCLUDE:
-for backup_include_item in "${BACKUP_PROG_INCLUDE[@]}" ; do
-    test "$backup_include_item" && echo "$backup_include_item"
-done > $TMP_DIR/backup-include.txt
-
-# Implicitly also backup all local filesystems as defined in mountpoint_device
-# except BACKUP_ONLY_INCLUDE or MANUAL_INCLUDE is set:
-if ! is_true "$BACKUP_ONLY_INCLUDE" ; then
-    if [ "${MANUAL_INCLUDE:-NO}" != "YES" ] ; then
-        # Add the mountpoints that will be recovered to the backup include list
-        # unless a mountpoint is excluded:
-        while read mountpoint device junk ; do
-            if ! IsInArray "$mountpoint" "${EXCLUDE_MOUNTPOINTS[@]}" ; then
-                echo "$mountpoint"
-            fi
-        done <"$VAR_DIR/recovery/mountpoint_device" >> $TMP_DIR/backup-include.txt
-    fi
-fi
-
-# Exclude all that is explicitly specified in BACKUP_PROG_EXCLUDE:
-for backup_exclude_item in "${BACKUP_PROG_EXCLUDE[@]}" ; do
-    test "$backup_exclude_item" && echo "$backup_exclude_item"
-done > $TMP_DIR/backup-exclude.txt
-
-# Implicitly also add excluded mountpoints to the backup exclude list
-# except BACKUP_ONLY_EXCLUDE is set:
-if ! is_true "$BACKUP_ONLY_EXCLUDE" ; then
-    for excluded_mountpoint in "${EXCLUDE_MOUNTPOINTS[@]}" ; do
-        test "$excluded_mountpoint" && echo "$excluded_mountpoint/"
-    done >> $TMP_DIR/backup-exclude.txt
-fi
-
diff --git a/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh
new file mode 120000
index 00000000..d8d12c0b
--- /dev/null
+++ b/usr/share/rear/backup/YUM/default/400_create_include_exclude_files.sh
@@ -0,0 +1 @@
+../../NETFS/default/400_create_include_exclude_files.sh
\ No newline at end of file
diff --git a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh
deleted file mode 100644
index 29d85905..00000000
--- a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copied from ../../DUPLICITY/default/600_create_python_symlink.sh for YUM
-# make sure we have a symbolic link to the python binary
-(
-    cd  $ROOTFS_DIR/bin
-    for py in $(find . -name "python*" )
-    do
-        this_py=${py#./*}   # should be without ./
-        case $this_py in
-            python) break ;;
-            python2*|python3*) ln -sf $v $this_py python >&2 ;;
-        esac
-    done
-)
-
diff --git a/usr/share/rear/build/YUM/default/600_create_python_symlink.sh b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh
new file mode 120000
index 00000000..d776e5aa
--- /dev/null
+++ b/usr/share/rear/build/YUM/default/600_create_python_symlink.sh
@@ -0,0 +1 @@
+../../DUPLICITY/default/600_create_python_symlink.sh
\ No newline at end of file
diff --git a/usr/share/rear/lib/framework-functions.sh b/usr/share/rear/lib/framework-functions.sh
index f245861a..b5324747 100644
--- a/usr/share/rear/lib/framework-functions.sh
+++ b/usr/share/rear/lib/framework-functions.sh
@@ -122,7 +122,7 @@ function cleanup_build_area_and_end_program () {
     # Cleanup build area
     Log "Finished in $((SECONDS-STARTTIME)) seconds"
     if is_true "$KEEP_BUILD_DIR" ; then
-        LogPrint "You should also rm -Rf $BUILD_DIR"
+        LogPrint "You should also rm -Rf --one-file-system $BUILD_DIR"
     else
         Log "Removing build area $BUILD_DIR"
         rm -Rf $TMP_DIR
@@ -132,15 +132,11 @@ function cleanup_build_area_and_end_program () {
         # in worst case it could not umount; so before remove the BUILD_DIR check if above outputfs is gone
         if mountpoint -q "$BUILD_DIR/outputfs" ; then
             # still mounted it seems
-            LogPrint "Directory $BUILD_DIR/outputfs still mounted - trying lazy umount"
             sleep 2
-            umount -f -l $BUILD_DIR/outputfs >&2
-            rm -Rf $v $BUILD_DIR/outputfs >&2
-        else
-            # not mounted so we can safely delete $BUILD_DIR/outputfs
-            rm -Rf $BUILD_DIR/outputfs
+            umount_mountpoint_lazy $BUILD_DIR/outputfs
         fi
-        rm -Rf $v $BUILD_DIR >&2
+        remove_temporary_mountpoint '$BUILD_DIR/outputfs' || BugError "Directory $BUILD_DIR/outputfs not empty, can not remove"
+        rmdir $v $BUILD_DIR >&2
     fi
     Log "End of program reached"
 }
diff --git a/usr/share/rear/lib/global-functions.sh b/usr/share/rear/lib/global-functions.sh
index 4264bb53..a1aec604 100644
--- a/usr/share/rear/lib/global-functions.sh
+++ b/usr/share/rear/lib/global-functions.sh
@@ -342,7 +342,44 @@ function url_path() {
     echo /${url_without_scheme#*/}
 }
 
-backup_path() {
+### Returns true if one can upload files to the URL
+function scheme_accepts_files() {
+    local scheme=$1
+    case $scheme in
+        (null|tape|obdr)
+            # tapes do not support uploading arbitrary files, one has to handle them
+            # as special case (usually passing the tape device as argument to tar)
+            # null means do not upload anything anywhere, leave the files under /var/lib/rear/output
+            return 1
+            ;;
+        (*)
+            # most URL schemes support uploading files
+            return 0
+            ;;
+    esac
+}
+
+### Returns true if URLs with the given scheme corresponds to a path inside
+### a mountable fileystem and one can put files directly into it.
+### The actual path will be returned by backup_path() / output_path().
+### If returns false, using backup_path() / output_path() has no sense
+### and one must use a scheme-specific method (like lftp or writing them to a tape)
+### to upload files to the destination instead of just "cp" or other direct filesystem access.
+### Returning true does not imply that the URL is currently mounted at a filesystem and usable,
+### only that it can be mounted (use mount_url() first)
+function scheme_supports_filesystem() {
+    local scheme=$1
+    case $scheme in
+        (null|tape|obdr|rsync|fish|ftp|ftps|hftp|http|https|sftp)
+            return 1
+            ;;
+        (*)
+            return 0
+            ;;
+    esac
+}
+
+function backup_path() {
     local scheme=$1
     local path=$2
     case $scheme in
@@ -368,13 +405,21 @@ backup_path() {
     echo "$path"
 }
 
-output_path() {
+function output_path() {
     local scheme=$1
     local path=$2
+
+    # Abort for unmountable schemes ("tape-like" or "ftp-like" schemes).
+    # Returning an empty string for them is not satisfactory: it could lead to caller putting its files
+    # under / instead of the intended location if the result is not checked for emptiness.
+    # Returning ${BUILD_DIR}/outputfs/${OUTPUT_PREFIX} for unmountable URLs is also not satisfactory:
+    # caller could put its files there expecting them to be safely at their destination,
+    # but if the directory is not a mountpoint, they would get silently lost.
+    # The caller needs to check the URL/scheme using scheme_supports_filesystem()
+    # before calling this function.
+    scheme_supports_filesystem $scheme || BugError "output_path() called with scheme $scheme that does not support filesystem access"
+
     case $scheme in
-       (null|tape)  # no path for tape required
-           path=""
-           ;;
        (file)  # type file needs a local path (must be mounted by user)
            path="$path/${OUTPUT_PREFIX}"
            ;;
@@ -387,17 +432,33 @@ output_path() {
 
 
 ### Mount URL $1 at mountpoint $2[, with options $3]
-mount_url() {
+function mount_url() {
     local url=$1
     local mountpoint=$2
     local defaultoptions="rw,noatime"
     local options=${3:-"$defaultoptions"}
+    local scheme
+
+    scheme=$( url_scheme $url )
+
+    # The cases where we return 0 are those that do not need umount and also do not need ExitTask handling.
+    # They thus need to be kept in sync with umount_url() so that RemoveExitTasks is used
+    # iff AddExitTask was used in mount_url().
+
+    if ! scheme_supports_filesystem $scheme ; then
+        ### Stuff like null|tape|rsync|fish|ftp|ftps|hftp|http|https|sftp
+        ### Don't need to umount anything for these.
+        ### file: supports filesystem access, but is not mounted and unmounted,
+        ### so it has to be handled specially below.
+        ### Similarly for iso: which gets mounted and unmounted only during recovery.
+        return 0
+    fi
 
     ### Generate a mount command
     local mount_cmd
-    case $(url_scheme $url) in
-        (null|tape|file|rsync|fish|ftp|ftps|hftp|http|https|sftp)
-            ### Don't need to mount anything for these
+    case $scheme in
+        (file)
+            ### Don't need to mount anything for file:, it is already mounted by user
             return 0
             ;;
         (iso)
@@ -558,22 +619,47 @@ mount_url() {
             ;;
     esac
 
+    # create mount point
+    mkdir -p $v "$mountpoint" || Error "Could not mkdir '$mountpoint'"
+    AddExitTask "remove_temporary_mountpoint '$mountpoint'"
+
     Log "Mounting with '$mount_cmd'"
     # eval is required when mount_cmd contains single quoted stuff (e.g. see the above mount_cmd for curlftpfs)
     eval $mount_cmd || Error "Mount command '$mount_cmd' failed."
 
-    AddExitTask "umount -f $v '$mountpoint' >&2"
+    AddExitTask "perform_umount_url '$url' '$mountpoint' lazy"
     return 0
 }
 
-### Unmount url $1 at mountpoint $2
-umount_url() {
+function remove_temporary_mountpoint() {
+    if test -d "$1" ; then
+        rmdir $v "$1"
+    fi
+}
+
+### Unmount url $1 at mountpoint $2, perform mountpoint cleanup and exit task + error handling
+function umount_url() {
     local url=$1
     local mountpoint=$2
+    local scheme
 
-    case $(url_scheme $url) in
-        (null|tape|file|rsync|fish|ftp|ftps|hftp|http|https|sftp)
-            ### Don't need to umount anything for these
+    scheme=$( url_scheme $url )
+
+    # The cases where we return 0 are those that do not need umount and also do not need ExitTask handling.
+    # They thus need to be kept in sync with mount_url() so that RemoveExitTasks is used
+    # iff AddExitTask was used in mount_url().
+
+    if ! scheme_supports_filesystem $scheme ; then
+        ### Stuff like null|tape|rsync|fish|ftp|ftps|hftp|http|https|sftp
+        ### Don't need to umount anything for these.
+        ### file: supports filesystem access, but is not mounted and unmounted,
+        ### so it has to be handled specially below.
+        ### Similarly for iso: which gets mounted and unmounted only during recovery.
+        return 0
+    fi
+
+    case $scheme in
+        (file)
             return 0
             ;;
         (iso)
@@ -581,42 +667,106 @@ umount_url() {
                 return 0
             fi
             ;;
-	    (sshfs)
-	        umount_cmd="fusermount -u $mountpoint"
-	    ;;
-	    (davfs)
-	        umount_cmd="umount $mountpoint"
-            # Wait for 3 sek. then remove the cache-dir /var/cache/davfs
-            sleep 30
-            # ToDo: put in here the cache-dir from /etc/davfs2/davfs.conf
-            # and delete only the just used cache
-            #rm -rf /var/cache/davfs2/*<mountpoint-hash>*
-            rm -rf /var/cache/davfs2/*outputfs*
-
-	    ;;
-        (var)
-            local var=$(url_host $url)
-            umount_cmd="${!var} $mountpoint"
+        (*)
+            # Schemes that actually need nontrivial umount are handled below.
+            # We do not handle them in the default branch because in the case of iso:
+            # it depends on the current workflow whether umount is needed or not.
+            :
+    esac
 
-            Log "Unmounting with '$umount_cmd'"
-            $umount_cmd
-            StopIfError "Unmounting failed."
+    # umount_url() is a wrapper that takes care of exit tasks and error handling and mountpoint cleanup.
+    # Therefore it also determines if exit task and mountpoint handling is required and returns early if not.
+    # The actual umount job is performed inside perform_umount_url().
+    # We do not request lazy umount here because we want umount errors to be reliably reported.
+    perform_umount_url $url $mountpoint || Error "Unmounting '$mountpoint' failed."
 
-            RemoveExitTask "umount -f $v '$mountpoint' >&2"
-            return 0
+    RemoveExitTask "perform_umount_url '$url' '$mountpoint' lazy"
+
+    remove_temporary_mountpoint '$mountpoint' && RemoveExitTask "remove_temporary_mountpoint '$mountpoint'"
+    return 0
+}
+
+### Unmount url $1 at mountpoint $2 [ lazily if $3 is set to 'lazy' and normal unmount fails ]
+function perform_umount_url() {
+    local url=$1
+    local mountpoint=$2
+    local lazy=${3:-}
+
+    if test $lazy ; then
+        if test $lazy != "lazy" ; then
+            BugError "lazy = $lazy, but it must have the value of 'lazy' or empty"
+        fi
+    fi
+
+    case $(url_scheme $url) in
+        (sshfs)
+            # does ftpfs need this special case as well?
+            fusermount -u ${lazy:+'-z'} $mountpoint
+            ;;
+        (davfs)
+            umount_davfs $mountpoint $lazy
+            ;;
+        (var)
+            local var
+            var=$(url_host $url)
+            Log "Unmounting with '${!var} $mountpoint'"
+            # lazy unmount not supported with custom umount command
+            ${!var} $mountpoint
             ;;
+        (*)
+            # usual umount command
+            umount_mountpoint $mountpoint $lazy
     esac
+    # The switch above must be the last statement in this function and the umount commands must be
+    # the last commands (or part of) in each branch. This ensures proper exit code propagation
+    # to the caller even when set -e is used.
+}
 
-    umount_mountpoint $mountpoint
-    StopIfError "Unmounting '$mountpoint' failed."
+### Helper which unmounts davfs mountpoint $1 and cleans up the cache,
+### performing lazy unmount if $2 = 'lazy' and normal unmount fails.
+function umount_davfs() {
+    local mountpoint=$1
+    local lazy="${2:-}"
 
-    RemoveExitTask "umount -f $v '$mountpoint' >&2"
-    return 0
+    if test $lazy ; then
+        if test $lazy != "lazy" ; then
+            BugError "lazy = $lazy, but it must have the value of 'lazy' or empty"
+        fi
+    fi
+
+    if umount_mountpoint $mountpoint ; then
+        # Wait for 3 sek. then remove the cache-dir /var/cache/davfs
+        sleep 30
+        # TODO: put in here the cache-dir from /etc/davfs2/davfs.conf
+        # and delete only the just used cache
+        #rm -rf /var/cache/davfs2/*<mountpoint-hash>*
+        rm -rf /var/cache/davfs2/*outputfs*
+    else
+        local retval=$?
+
+        if test $lazy ; then
+            # try again to unmount lazily and this time do not delete the cache, it is still in use.
+            LogPrintError "davfs cache /var/cache/davfs2/*outputfs* needs to be cleaned up manually after the lazy unmount finishes"
+            umount_mountpoint_lazy $mountpoint
+        else
+            # propagate errors from umount
+            return $retval
+        fi
+    fi
 }
 
-### Unmount mountpoint $1
-umount_mountpoint() {
+### Unmount mountpoint $1 [ lazily if $2 = 'lazy' ]
+### Default implementation for filesystems that don't need anything fancy
+### For special umount commands use perform_umount_url()
+function umount_mountpoint() {
     local mountpoint=$1
+    local lazy=${2:-}
+
+    if test $lazy ; then
+        if test $lazy != "lazy" ; then
+            BugError "lazy = $lazy, but it must have the value of 'lazy' or empty"
+        fi
+    fi
 
     ### First, try a normal unmount,
     Log "Unmounting '$mountpoint'"
@@ -636,7 +786,21 @@ umount_mountpoint() {
     fi
 
     Log "Unmounting '$mountpoint' failed."
-    return 1
+
+    if test $lazy ; then
+        umount_mountpoint_lazy $mountpoint
+    else
+        return 1
+    fi
+}
+
+### Unmount mountpoint $1 lazily
+### Preferably use "umount_mountpoint $mountpoint lazy", which attempts non-lazy unmount first.
+function umount_mountpoint_lazy() {
+    local mountpoint=$1
+
+    LogPrint "Directory $mountpoint still mounted - trying lazy umount"
+    umount $v -f -l $mountpoint >&2
 }
 
 # Change $1 to user input or leave default value on empty input
diff --git a/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh b/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh
index a43dff13..3e7512ee 100644
--- a/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh
+++ b/usr/share/rear/output/PXE/default/800_copy_to_tftp.sh
@@ -8,10 +8,12 @@
 if [[ ! -z "$PXE_TFTP_URL" ]] ; then
     # E.g. PXE_TFTP_URL=nfs://server/export/nfs/tftpboot
     local scheme=$( url_scheme $PXE_TFTP_URL )
-    local path=$( url_path $PXE_TFTP_URL )
-    mkdir -p $v "$BUILD_DIR/tftpbootfs" >&2
-    StopIfError "Could not mkdir '$BUILD_DIR/tftpbootfs'"
-    AddExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2"
+
+    # We need filesystem access to the destination (schemes like ftp:// are not supported)
+    if ! scheme_supports_filesystem $scheme ; then
+        Error "Scheme $scheme for PXE output not supported, use a scheme that supports mounting (like nfs: )"
+    fi
+
     mount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs $BACKUP_OPTIONS
     # However, we copy under $OUTPUT_PREFIX_PXE directory (usually HOSTNAME) to have different clients on one pxe server
     PXE_TFTP_LOCAL_PATH=$BUILD_DIR/tftpbootfs
@@ -74,10 +76,6 @@ fi
 if [[ ! -z "$PXE_TFTP_URL" ]] ; then
     LogPrint "Copied kernel+initrd $( du -shc $KERNEL_FILE "$TMP_DIR/$REAR_INITRD_FILENAME" | tail -n 1 | tr -s "\t " " " | cut -d " " -f 1 ) to $PXE_TFTP_URL/$OUTPUT_PREFIX_PXE"
     umount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs
-    rmdir $BUILD_DIR/tftpbootfs >&2
-    if [[ $? -eq 0 ]] ; then
-        RemoveExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2"
-    fi
 else
     # legacy way PXE_TFTP_PATH
     LogPrint "Copied kernel+initrd $( du -shc $KERNEL_FILE "$TMP_DIR/$REAR_INITRD_FILENAME" | tail -n 1 | tr -s "\t " " " | cut -d " " -f 1 ) to $PXE_TFTP_PATH"
diff --git a/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh b/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh
index fce4bcf1..5041a3bc 100644
--- a/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh
+++ b/usr/share/rear/output/PXE/default/810_create_pxelinux_cfg.sh
@@ -1,4 +1,4 @@
-# 81_create_pxelinux_cfg.sh
+# 810_create_pxelinux_cfg.sh
 #
 # create pxelinux config on PXE server for Relax-and-Recover
 #
@@ -11,10 +11,12 @@ if [[ ! -z "$PXE_CONFIG_URL" ]] ; then
     # E.g. PXE_CONFIG_URL=nfs://server/export/nfs/tftpboot/pxelinux.cfg
     # Better be sure that on 'server' the directory /export/nfs/tftpboot/pxelinux.cfg exists
     local scheme=$( url_scheme $PXE_CONFIG_URL )
-    local path=$( url_path $PXE_CONFIG_URL )
-    mkdir -p $v "$BUILD_DIR/tftpbootfs" >&2
-    StopIfError "Could not mkdir '$BUILD_DIR/tftpbootfs'"
-    AddExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2"
+
+    # We need filesystem access to the destination (schemes like ftp:// are not supported)
+    if ! scheme_supports_filesystem $scheme ; then
+        Error "Scheme $scheme for PXE output not supported, use a scheme that supports mounting (like nfs: )"
+    fi
+
     mount_url $PXE_CONFIG_URL $BUILD_DIR/tftpbootfs $BACKUP_OPTIONS
     PXE_LOCAL_PATH=$BUILD_DIR/tftpbootfs
 else
@@ -105,10 +107,6 @@ popd >/dev/null
 if [[ ! -z "$PXE_CONFIG_URL" ]] ; then
     LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_URL"
     umount_url $PXE_TFTP_URL $BUILD_DIR/tftpbootfs
-    rmdir $BUILD_DIR/tftpbootfs >&2
-    if [[ $? -eq 0 ]] ; then
-        RemoveExitTask "rm -Rf $v $BUILD_DIR/tftpbootfs >&2"
-    fi
 else
     LogPrint "Created pxelinux config '${PXE_CONFIG_PREFIX}$HOSTNAME' and symlinks for $PXE_CREATE_LINKS adresses in $PXE_CONFIG_PATH"
     # Add to result files
diff --git a/usr/share/rear/output/PXE/default/820_copy_to_net.sh b/usr/share/rear/output/PXE/default/820_copy_to_net.sh
deleted file mode 100644
index 39cd316d..00000000
--- a/usr/share/rear/output/PXE/default/820_copy_to_net.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-
-# 820_copy_to_net.sh
-
-# Check if we have a target location OUTPUT_URL
-test "$OUTPUT_URL" || return 0
-
-local scheme=$( url_scheme $OUTPUT_URL )
-local result_file=""
-local path=""
-
-case "$scheme" in
-    (nfs|cifs|usb|tape|file|davfs)
-        # The ISO has already been transferred by NETFS.
-        return 0
-        ;;
-    (fish|ftp|ftps|hftp|http|https|sftp)
-        LogPrint "Transferring PXE files to $OUTPUT_URL"
-        for result_file in "${RESULT_FILES[@]}" ; do
-            path=$(url_path $OUTPUT_URL)
-
-            # Make sure that destination directory exists, otherwise lftp would copy
-            # RESULT_FILES into last available directory in the path.
-            # e.g. OUTPUT_URL=sftp://<host_name>/iso/server1 and have "/iso/server1"
-            # directory missing, would upload RESULT_FILES into sftp://<host_name>/iso/
-            lftp -c "$OUTPUT_LFTP_OPTIONS; open $OUTPUT_URL; mkdir -fp ${path}"
-
-            LogPrint "Transferring file: $result_file"
-            lftp -c "$OUTPUT_LFTP_OPTIONS; open $OUTPUT_URL; mput $result_file" || Error "lftp failed to transfer '$result_file' to '$OUTPUT_URL' (lftp exit code: $?)"
-        done
-        ;;
-    (rsync)
-        LogPrint "Transferring PXE files to $OUTPUT_URL"
-        for result_file in "${RESULT_FILES[@]}" ; do
-            LogPrint "Transferring file: $result_file"
-            rsync -a $v "$result_file" "$OUTPUT_URL" || Error "Problem transferring '$result_file' to $OUTPUT_URL"
-        done
-        ;;
-    (*) Error "Invalid scheme '$scheme' in '$OUTPUT_URL'."
-        ;;
-esac
-
diff --git a/usr/share/rear/output/default/100_mount_output_path.sh b/usr/share/rear/output/default/100_mount_output_path.sh
index 22ef36de..34ea8e5e 100644
--- a/usr/share/rear/output/default/100_mount_output_path.sh
+++ b/usr/share/rear/output/default/100_mount_output_path.sh
@@ -1,9 +1,3 @@
-# create mount point
-mkdir -p $v "$BUILD_DIR/outputfs" >&2
-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'"
-
-AddExitTask "rm -Rf $v $BUILD_DIR/outputfs >&2"
-
 if [[ "$OUTPUT_MOUNTCMD" ]] ; then
     OUTPUT_URL="var://$OUTPUT_MOUNTCMD"
 fi
diff --git a/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh b/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh
index 00339a96..06326114 100644
--- a/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh
+++ b/usr/share/rear/output/default/150_save_copy_of_prefix_dir.sh
@@ -3,22 +3,20 @@
 [ -z "${KEEP_OLD_OUTPUT_COPY}" ] && return
 
 # do not do this for tapes and special attention for file:///path
-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL"
-local scheme=$(url_scheme ${!url})
-local path=$(url_path ${!url})
-local opath=$(output_path $scheme $path)
+local scheme=$( url_scheme $OUTPUT_URL )
+local path=$( url_path $OUTPUT_URL )
 
-# if $opath is empty return silently (e.g. scheme tape)
-[ -z "$opath" ] && return 0
+# if filesystem access to url is unsupported return silently (e.g. scheme tape)
+scheme_supports_filesystem $scheme || return 0
+
+local opath=$( output_path $scheme $path )
 
 # an old lockfile from a previous run not cleaned up by output is possible
 [[ -f ${opath}/.lockfile ]] && rm -f ${opath}/.lockfile >&2
 
 if test -d "${opath}" ; then
-    rm -rf $v "${opath}.old" >&2
-    StopIfError "Could not remove '${opath}.old'"
+    rm -rf $v "${opath}.old" || Error "Could not remove '${opath}.old'"
     # below statement was 'cp -af' instead of 'mv -f' (see issue #192)
-    mv -f $v "${opath}" "${opath}.old" >&2
-    StopIfError "Could not move '${opath}'"
+    mv -f $v "${opath}" "${opath}.old" || Error "Could not move '${opath}'"
 fi
 # the ${BUILD_DIR}/outputfs/${OUTPUT_PREFIX} will be created by output/default/200_make_prefix_dir.sh
diff --git a/usr/share/rear/output/default/200_make_prefix_dir.sh b/usr/share/rear/output/default/200_make_prefix_dir.sh
index b8892f2f..606e1c86 100644
--- a/usr/share/rear/output/default/200_make_prefix_dir.sh
+++ b/usr/share/rear/output/default/200_make_prefix_dir.sh
@@ -3,25 +3,21 @@
 # The $OUTPUT_PREFIX directory defaults to $HOSTNAME.
 #
 # This happens usually under a mounted network filesystem share
-# e.g. in case of BACKUP_URL=nfs://NFS.server.IP.address/remote/nfs/share
-# but it is also happens for local stuff like BACKUP_URL=usb:///dev/disk/by-label/REAR-000
+# e.g. in case of OUTPUT_URL=nfs://NFS.server.IP.address/remote/nfs/share
+# but it is also happens for local stuff like OUTPUT_URL=usb:///dev/disk/by-label/REAR-000
 #
 # Do not do this for tapes and special attention for file:///path
+local scheme=$( url_scheme $OUTPUT_URL )
+local path=$( url_path $OUTPUT_URL )
 
-# Generate url variable name that depends on the current stage,
-# e.g. BACKUP_URL or OUTPUT_URL:
-url="$( echo $stage | tr '[:lower:]' '[:upper:]' )_URL"
+# If filesystem access to url is unsupported return silently (e.g. scheme tape)
+scheme_supports_filesystem $scheme || return 0
 
-local scheme=$( url_scheme ${!url} )
-local path=$( url_path ${!url} )
 local opath=$( output_path $scheme $path )
 
-# If $opath is empty return silently (e.g. scheme tape):
-test "$opath" || return 0
-
 # Create $OUTPUT_PREFIX sub-directory:
 mkdir -p $v -m0750 "$opath" && return
 
-# A failure to cerate the $OUTPUT_PREFIX sub-directory is fatal: 
-Error "Failed to create '$opath' directory for $url=${!url}"
+# A failure to create the $OUTPUT_PREFIX sub-directory is fatal:
+Error "Failed to create '$opath' directory for OUTPUT_URL=$OUTPUT_URL"
 
diff --git a/usr/share/rear/output/default/250_create_lock.sh b/usr/share/rear/output/default/250_create_lock.sh
index 49c75601..d792b036 100644
--- a/usr/share/rear/output/default/250_create_lock.sh
+++ b/usr/share/rear/output/default/250_create_lock.sh
@@ -2,15 +2,14 @@
 # made by a previous mkrescue run when the variable KEEP_OLD_OUTPUT_COPY has been set
 
 # do not do this for tapes and special attention for file:///path
-url="$( echo $stage | tr '[:lower:]' '[:upper:]')_URL"
-local scheme=$(url_scheme ${!url})
-local path=$(url_path ${!url})
-local opath=$(output_path $scheme $path)
+local scheme=$( url_scheme $OUTPUT_URL )
+local path=$( url_path $OUTPUT_URL )
 
-# if $opath is empty return silently (e.g. scheme tape)
-[ -z "$opath" ] && return 0
+# if filesystem access to url is unsupported return silently (e.g. scheme tape)
+scheme_supports_filesystem $scheme || return 0
+
+local opath=$( output_path $scheme $path )
 
 if test -d "${opath}" ; then
-    > "${opath}/.lockfile"
-    StopIfError "Could not create '${opath}/.lockfile'"
+    > "${opath}/.lockfile" || Error "Could not create '${opath}/.lockfile'"
 fi
diff --git a/usr/share/rear/output/default/950_copy_result_files.sh b/usr/share/rear/output/default/950_copy_result_files.sh
index 545b3f7d..77f54d51 100644
--- a/usr/share/rear/output/default/950_copy_result_files.sh
+++ b/usr/share/rear/output/default/950_copy_result_files.sh
@@ -5,16 +5,25 @@
 
 # For example for "rear mkbackuponly" there are usually no result files
 # that would need to be copied here to the output location:
-test "$RESULT_FILES" || return 0
+test "${RESULT_FILES[*]:-}" || return 0
 
 local scheme=$( url_scheme $OUTPUT_URL )
 local host=$( url_host $OUTPUT_URL )
 local path=$( url_path $OUTPUT_URL )
-local opath=$( output_path $scheme $path )
 
-# if $opath is empty return silently (e.g. scheme tape)
-if [[ -z "$opath" || -z "$OUTPUT_URL" || "$scheme" == "obdr" || "$scheme" == "tape" ]] ; then
-    return 0
+if [ -z "$OUTPUT_URL" ] || ! scheme_accepts_files $scheme ; then
+    if [ "$scheme" == "null" -o -z "$OUTPUT_URL" ] ; then
+        # There are result files to copy, but OUTPUT_URL=null indicates that we are not interested in them
+        # TODO: empty OUTPUT_URL seems to be equivalent to null, should we continue to allow that,
+        # or enforce setting it explicitly?
+        return 0
+    else
+        # There are files to copy, but schemes like tape: do not allow files to be stored. The files would be lost.
+        # Do not allow that.
+        # Schemes like obdr: that store the results themselves should clear RESULT_FILES to indicate that nothing is to be done.
+        # Is this considered a bug in ReaR (BugError), or a user misconfiguration (Error) when this happens?
+        BugError "Output scheme $scheme does not accept result files ${RESULT_FILES[*]}, use OUTPUT_URL=null if you don't want to copy them anywhere."
+    fi
 fi
 
 LogPrint "Copying resulting files to $scheme location"
@@ -38,66 +47,76 @@ RESULT_FILES+=( "$TMP_DIR/$final_logfile_name" )
 LogPrint "Saving $RUNTIME_LOGFILE as $final_logfile_name to $scheme location"
 
 # The real work (actually copying resulting files to the output location):
+if scheme_supports_filesystem $scheme ; then
+    # We can access the destination as a mounted filesystem. Do nothing special,
+    # simply copy the output files there. (Covers stuff like nfs|cifs|usb|file|sshfs|ftpfs|davfs.)
+    # This won't work for iso:// , but iso can't be a OUTPUT_URL scheme, this is checked in
+    # prep/default/040_check_backup_and_output_scheme.sh
+    # This covers also unknown schemes, because mount_url() will attempt to mount them and fail if this is not possible,
+    # so if we got here, the URL had been mounted successfully.
+    local opath
+    opath=$( output_path $scheme $path )
+    LogPrint "Copying result files '${RESULT_FILES[*]}' to $opath at $scheme location"
+    # Copy each result file one by one to avoid usually false error exits as in
+    # https://github.com/rear/rear/issues/1711#issuecomment-380009044
+    # where in case of an improper RESULT_FILES array member 'cp' can error out with something like
+    #   cp: will not overwrite just-created '/tmp/rear.XXX/outputfs/f121/rear-f121.log' with '/tmp/rear.XXX/tmp/rear-f121.log'
+    # See
+    # https://stackoverflow.com/questions/4669420/have-you-ever-got-this-message-when-moving-a-file-mv-will-not-overwrite-just-c
+    # which is about the same for 'mv', how to reproduce it:
+    #   mkdir a b c
+    #   touch a/f b/f
+    #   mv a/f b/f c/
+    #     mv: will not overwrite just-created 'c/f' with 'b/f'
+    # It happens because two different files with the same name would be moved to the same place with only one command.
+    # The -f option won't help for this case, it only applies when there already is a target file that will be overwritten.
+    # Accordingly it is sufficient (even without '-f') to copy each result file one by one:
+    for result_file in "${RESULT_FILES[@]}" ; do
+
+        # note: s390 kernel copy is only through nfs
+        #
+        # s390 optional naming override of initrd and kernel to match the s390 filesytem naming conventions
+        # on s390a there is an option to name the initrd and kernel in the form of
+        # file name on s390 are in the form of name type mode
+        # the name is the userid or vm name and the type is initrd or kernel
+        # if the vm name (cp q userid) is HOSTA then the files written will be HOSTA kernel and HOSTA initrd
+        # vars needed:
+        # ZVM_NAMING      - set in local.conf, if Y then enable naming override
+        # ZVM_KERNEL_NAME - keeps track of kernel name in results array
+        # ARCH            - override only if ARCH is Linux-s390
+        #
+        # initrd name override is handled in 900_create_initramfs.sh
+        # kernel name override is handled in 400_guess_kernel.sh
+        # kernel name override is handled in 950_copy_result_files.sh
+
+        if [[ "$ZVM_NAMING" == "Y" && "$ARCH" == "Linux-s390" ]] ; then
+           if [[ -z $opath ]] ; then
+              Error "Output path is not set, please check OUTPUT_URL in local.conf."
+           fi
+
+           if [ "$ZVM_KERNEL_NAME" == "$result_file" ] ; then
+              VM_UID=$(vmcp q userid |awk '{ print $1 }')
+
+              if [[ -z $VM_UID ]] ; then
+                 Error "VM UID is not set, VM UID is set from call to vmcp.  Please make sure vmcp is available and 'vmcp q userid' returns VM ID"
+              fi
+
+              LogPrint "s390 kernel naming override: $result_file will be written as $VM_UID.kernel"
+              cp $v "$result_file" $opath/$VM_UID.kernel || Error "Could not copy result file $result_file to $opath/$VM_UID.kernel at $scheme location"
+           else
+              cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location"
+           fi
+        else
+           cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location"
+        fi
+    done
+
+    return 0
+fi
+
+# Filesystem access to output destination not supported, use a scheme-specific tool (rsync, lftp)
 case "$scheme" in
-    (nfs|cifs|usb|file|sshfs|ftpfs|davfs)
-        LogPrint "Copying result files '${RESULT_FILES[@]}' to $opath at $scheme location"
-        # Copy each result file one by one to avoid usually false error exits as in
-        # https://github.com/rear/rear/issues/1711#issuecomment-380009044
-        # where in case of an improper RESULT_FILES array member 'cp' can error out with something like
-        #   cp: will not overwrite just-created '/tmp/rear.XXX/outputfs/f121/rear-f121.log' with '/tmp/rear.XXX/tmp/rear-f121.log'
-        # See
-        # https://stackoverflow.com/questions/4669420/have-you-ever-got-this-message-when-moving-a-file-mv-will-not-overwrite-just-c
-        # which is about the same for 'mv', how to reproduce it:
-        #   mkdir a b c
-        #   touch a/f b/f
-        #   mv a/f b/f c/
-        #     mv: will not overwrite just-created 'c/f' with 'b/f'
-        # It happens because two different files with the same name would be moved to the same place with only one command.
-        # The -f option won't help for this case, it only applies when there already is a target file that will be overwritten.
-        # Accordingly it is sufficient (even without '-f') to copy each result file one by one:
-        for result_file in "${RESULT_FILES[@]}" ; do
-
-            # note: s390 kernel copy is only through nfs
-            #
-            # s390 optional naming override of initrd and kernel to match the s390 filesytem naming conventions
-            # on s390a there is an option to name the initrd and kernel in the form of
-            # file name on s390 are in the form of name type mode
-            # the name is the userid or vm name and the type is initrd or kernel
-            # if the vm name (cp q userid) is HOSTA then the files written will be HOSTA kernel and HOSTA initrd
-            # vars needed:
-            # ZVM_NAMING      - set in local.conf, if Y then enable naming override
-            # ZVM_KERNEL_NAME - keeps track of kernel name in results array
-            # ARCH            - override only if ARCH is Linux-s390
-            #
-            # initrd name override is handled in 900_create_initramfs.sh
-            # kernel name override is handled in 400_guess_kernel.sh
-            # kernel name override is handled in 950_copy_result_files.sh
-
-            if [[ "$ZVM_NAMING" == "Y" && "$ARCH" == "Linux-s390" ]] ; then
-               if [[ -z $opath ]] ; then
-                  Error "Output path is not set, please check OUTPUT_URL in local.conf."
-               fi
-
-               if [ "$ZVM_KERNEL_NAME" == "$result_file" ] ; then
-                  VM_UID=$(vmcp q userid |awk '{ print $1 }')
-
-                  if [[ -z $VM_UID ]] ; then
-                     Error "VM UID is not set, VM UID is set from call to vmcp.  Please make sure vmcp is available and 'vmcp q userid' returns VM ID"
-                  fi
-
-                  LogPrint "s390 kernel naming override: $result_file will be written as $VM_UID.kernel"
-                  cp $v "$result_file" $opath/$VM_UID.kernel || Error "Could not copy result file $result_file to $opath/$VM_UID.kernel at $scheme location"
-               else
-                  cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location"
-               fi
-            else
-               cp $v "$result_file" $opath/ || Error "Could not copy result file $result_file to $opath at $scheme location"
-            fi
-        done
-        ;;
     (fish|ftp|ftps|hftp|http|https|sftp)
-        # FIXME: Verify if usage of $array[*] instead of "${array[@]}" is actually intended here
-        # see https://github.com/rear/rear/issues/1068
         LogPrint "Copying result files '${RESULT_FILES[*]}' to $scheme location"
         Log "lftp -c $OUTPUT_LFTP_OPTIONS; open $OUTPUT_URL; mput ${RESULT_FILES[*]}"
 
@@ -111,12 +130,15 @@ case "$scheme" in
     (rsync)
         # If BACKUP = RSYNC output/RSYNC/default/900_copy_result_files.sh took care of it:
         test "$BACKUP" = "RSYNC" && return 0
-        LogPrint "Copying result files '${RESULT_FILES[@]}' to $scheme location"
-        Log "rsync -a $v ${RESULT_FILES[@]} ${host}:${path}"
+        LogPrint "Copying result files '${RESULT_FILES[*]}' to $scheme location"
+        Log "rsync -a $v ${RESULT_FILES[*]} ${host}:${path}"
         rsync -a $v "${RESULT_FILES[@]}" "${host}:${path}" || Error "Problem transferring result files to $OUTPUT_URL"
         ;;
     (*)
-        Error "Invalid scheme '$scheme' in '$OUTPUT_URL'."
+        # Should be unreachable, if we got here, it is a bug.
+        # Unknown schemes are handled in mount_url(), which tries to mount them and aborts if they are unsupported.
+        # If they can be mounted, they fall under the scheme_supports_filesystem branch above.
+        BugError "Invalid scheme '$scheme' in '$OUTPUT_URL'."
         ;;
 esac
 
diff --git a/usr/share/rear/output/default/970_remove_lock.sh b/usr/share/rear/output/default/970_remove_lock.sh
index 56640839..3b1b97cc 100644
--- a/usr/share/rear/output/default/970_remove_lock.sh
+++ b/usr/share/rear/output/default/970_remove_lock.sh
@@ -1,10 +1,11 @@
 # remove the lockfile
 local scheme=$(url_scheme $OUTPUT_URL)
 local path=$(url_path $OUTPUT_URL)
-local opath=$(output_path $scheme $path)
 
-# if $opath is empty return silently (e.g. scheme tape)
-[ -z "$opath" ] && return 0
+# if filesystem access to url is unsupported return silently (e.g. scheme tape)
+scheme_supports_filesystem $scheme || return 0
+
+local opath=$( output_path $scheme $path )
 
 # when OUTPUT_URL=BACKUP_URL we keep the lockfile to avoid double moves of the directory
 [[ "$OUTPUT_URL" != "$BACKUP_URL" ]] && rm -f $v "${opath}/.lockfile" >&2
diff --git a/usr/share/rear/output/default/980_umount_output_dir.sh b/usr/share/rear/output/default/980_umount_output_dir.sh
index 9a9995bd..abf0cd53 100644
--- a/usr/share/rear/output/default/980_umount_output_dir.sh
+++ b/usr/share/rear/output/default/980_umount_output_dir.sh
@@ -9,12 +9,3 @@ if [[ -z "$OUTPUT_URL" ]] ; then
 fi
 
 umount_url $OUTPUT_URL $BUILD_DIR/outputfs
-
-[[ -d $BUILD_DIR/outputfs/$NETFS_PREFIX ]] && rm -rf $v $BUILD_DIR/outputfs/$NETFS_PREFIX
-[[ -d $BUILD_DIR/outputfs/$RSYNC_PREFIX ]] && rm -rf $v $BUILD_DIR/outputfs/$RSYNC_PREFIX
-
-rmdir $v $BUILD_DIR/outputfs >&2
-if [[ $? -eq 0 ]] ; then
-    # the argument to RemoveExitTask has to be identical to the one given to AddExitTask
-    RemoveExitTask "rm -Rf $v $BUILD_DIR/outputfs >&2"
-fi
diff --git a/usr/share/rear/prep/BORG/default/250_mount_usb.sh b/usr/share/rear/prep/BORG/default/250_mount_usb.sh
index c13fd088..05be0179 100644
--- a/usr/share/rear/prep/BORG/default/250_mount_usb.sh
+++ b/usr/share/rear/prep/BORG/default/250_mount_usb.sh
@@ -8,10 +8,5 @@
 # When BORGBACKUP_HOST is set, we don't need to mount anything as SSH
 # backup destination will be handled internally by Borg it self.
 if [[ -z $BORGBACKUP_HOST ]]; then
-    # Has to be $verbose, not "$verbose", since it's used as option.
-    # shellcheck disable=SC2086,SC2154
-    mkdir -p $verbose "$borg_dst_dev" >&2
-    StopIfError "Could not mkdir '$borg_dst_dev'"
-
     mount_url "usb://$USB_DEVICE" "$borg_dst_dev"
 fi
diff --git a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh
deleted file mode 100644
index 2fbcc6cd..00000000
--- a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh
+++ /dev/null
@@ -1,300 +0,0 @@
-# Copied from ../../NETFS/default/070_set_backup_archive.sh for YUM
-### Determine the name of the backup archive
-### This needs to be after we special case USB devices.
-
-# FIXME: backuparchive is no local variable (regardless that it is lowercased)
-
-# If TAPE_DEVICE is specified, use that:
-if test "$TAPE_DEVICE" ; then
-    backuparchive="$TAPE_DEVICE"
-    LogPrint "Using backup archive '$backuparchive'"
-    return
-fi
-
-local backup_file_suffix="$BACKUP_PROG_SUFFIX$BACKUP_PROG_COMPRESS_SUFFIX"
-local backup_file_name="$BACKUP_PROG_ARCHIVE$backup_file_suffix"
-
-local scheme=$( url_scheme $BACKUP_URL )
-local path=$( url_path $BACKUP_URL )
-case "$scheme" in
-    (file|iso)
-        # Define the output path according to the scheme
-        local outputpath=$( backup_path $scheme $path )
-        backuparchive="$outputpath/$backup_file_name"
-        LogPrint "Using backup archive '$backuparchive'"
-        return
-        ;;
-    (tape)
-        # TODO: Check if that case is really needed.
-        # Perhaps prep/default/030_translate_tape.sh does already all what is needed.
-        backuparchive=$path
-        LogPrint "Using backup archive '$backuparchive'"
-        return
-        ;;
-esac
-
-local backup_directory=$BUILD_DIR/outputfs/$NETFS_PREFIX
-
-# Normal (i.e. non-incremental/non-differential) backup:
-if ! test "incremental" = "$BACKUP_TYPE" -o "differential" = "$BACKUP_TYPE" ; then
-    # In case of normal (i.e. non-incremental) backup there is only one restore archive
-    # and its name is the same as the backup archive (usually 'backup.tar.gz'):
-    backuparchive="$backup_directory/$backup_file_name"
-    LogPrint "Using backup archive '$backuparchive'"
-    # This script is also run during "rear recover/restoreonly" where RESTORE_ARCHIVES must be set.
-    local backup_restore_workflows=( "recover" "restoreonly" )
-    if IsInArray $WORKFLOW ${backup_restore_workflows[@]} ; then
-        # Only set RESTORE_ARCHIVES the backup archive is actually accessible
-        # cf. https://github.com/rear/rear/issues/1166
-        if test -r "$backuparchive" ; then
-            RESTORE_ARCHIVES=( "$backuparchive" )
-        else
-            # In case of USB backup there is the subsequent 540_choose_backup_archive.sh script
-            # that shows a backup selection dialog when RESTORE_ARCHIVES is not already set.
-            if test "usb" = "$scheme" ; then
-                LogPrint "Backup archive '$backuparchive' not readable. Need to select another one."
-            else
-                Error "Backup archive '$backuparchive' not readable."
-            fi
-        fi
-    fi
-    return
-fi
-
-# Incremental or differential backup:
-set -e -u -o pipefail
-# Incremental or differential backup only works for the NETFS backup method
-# and only with the 'tar' backup program:
-if ! test "NETFS" = "$BACKUP" -a "tar" = "$BACKUP_PROG" ; then
-    Error "BACKUP_TYPE incremental or differential only works with BACKUP=NETFS and BACKUP_PROG=tar"
-fi
-# Incremental or differential backup is currently only known to work with BACKUP_URL=nfs://.
-# Other BACKUP_URL schemes may work and at least BACKUP_URL=usb:///... needs special setup
-# to work with incremental or differential backup (see https://github.com/rear/rear/issues/1145):
-if test "usb" = "$scheme" ; then
-    # When USB_SUFFIX is set the compliance mode is used where
-    # backup on USB works in compliance with backup on NFS which means
-    # a fixed backup directory where incremental or differential backups work.
-    # Use plain $USB_SUFFIX and not "$USB_SUFFIX" because when USB_SUFFIX contains only blanks
-    # test "$USB_SUFFIX" would result true because test " " results true:
-    test $USB_SUFFIX || Error "BACKUP_TYPE incremental or differential requires USB_SUFFIX for BACKUP_URL=usb"
-fi
-# Incremental or differential backup and keeping old backup contradict each other (mutual exclusive)
-# so that NETFS_KEEP_OLD_BACKUP_COPY must not be 'true' in case of incremental or differential backup:
-if test "$NETFS_KEEP_OLD_BACKUP_COPY" ; then
-    NETFS_KEEP_OLD_BACKUP_COPY=""
-    LogPrint "Disabled NETFS_KEEP_OLD_BACKUP_COPY because BACKUP_TYPE incremental or differential does not work with that"
-fi
-# For incremental or differential backup some date values (weekday, YYYY-MM-DD, HHMM) are needed
-# that must be consistent for one single point of the current time which means
-# one cannot call the 'date' command several times because then there would be
-# a small probability that e.g. weekday, YYYY-MM-DD, HHMM do not match
-# one single point in time (in particular when midnight passes in between).
-# Therefore the output of one single 'date' call is storend in an array and
-# the array elements are then assinged to individual variables as needed:
-local current_date_output=( $( date '+%a %Y-%m-%d %H%M' ) )
-local current_weekday="${current_date_output[0]}"
-local current_yyyy_mm_dd="${current_date_output[1]}"
-local current_hhmm="${current_date_output[2]}"
-# The date FULLBACKUP_OUTDATED_DAYS ago is needed to check if the latest full backup is too old.
-# When the latest full backup is more than FULLBACKUP_OUTDATED_DAYS ago a new full backup is made.
-# This separated call of the 'date' command which is technically needed because it is
-# for another point in time (e.g. 7 days ago) is run after the above call of the 'date'
-# command for the current time to be on the safe side when midnight passes in between
-# both 'date' commands which would then result that a new full backup is made
-# when the latest full backup is basically right now FULLBACKUP_OUTDATED_DAYS ago because
-# the stored date of the latest full backup is the current date at the time when it was made.
-# Example (assuming FULLBACKUP_OUTDATED_DAYS=7 ):
-# The latest full backup was made on Sunday January 10 in 2016 (just before midnight).
-# One week later this script runs again while midnight passes between the two 'date' calls
-# so that current_date_output[@]="Sun 2016-01-17 0000" (still Sunday January 17 in 2016)
-# and yyyymmdd_max_days_ago=20160111 (already Monday January 11 in 2016), then
-# Sunday January 10 is older than Monday January 11 so that a new full backup is made:
-test "$FULLBACKUP_OUTDATED_DAYS" || FULLBACKUP_OUTDATED_DAYS="7"
-local yyyymmdd_max_days_ago=$( date '+%Y%m%d' --date="$FULLBACKUP_OUTDATED_DAYS days ago" )
-# Full backup file names are of the form YYYY-MM-DD-HHMM-F.tar.gz
-# where the 'F' denotes a full backup:
-local full_backup_marker="F"
-# Incremental backup file names are of the form YYYY-MM-DD-HHMM-I.tar.gz
-# where the 'I' denotes an incremental backup:
-local incremental_backup_marker="I"
-# Differential backup file names are of the form YYYY-MM-DD-HHMM-D.tar.gz
-# where the last 'D' denotes a differential backup:
-local differential_backup_marker="D"
-# In case of incremental or differential backup the RESTORE_ARCHIVES contains
-# first the latest full backup file.
-# In case of incremental backup the RESTORE_ARCHIVES contains
-# after the latest full backup file each incremental backup
-# in the ordering how they must be restored.
-# For example when the latest full backup was made on Sunday
-# plus each subsequent weekday a separated incremental backup was made,
-# then during a "rear recover" on Wednesday morning
-# first the full backup from Sunday has to be restored,
-# then the incremental backup from Monday, and
-# finally the incremental backup from Tuesday.
-# In case of differential backup the RESTORE_ARCHIVES contains
-# after the latest full backup file the latest differential backup.
-# For example when the latest full backup was made on Sunday
-# plus each subsequent weekday a separated differential backup was made,
-# then during a "rear recover" on Wednesday morning
-# first the full backup from Sunday has to be restored,
-# and finally the differential backup from Tuesday
-# (i.e. the differential backup from Monday is skipped).
-# The date format YYYY-MM-DD that is used here is crucial.
-# It is the ISO 8601 format 'year-month-day' to specify a day of a year
-# that is accepted by 'tar' for the '--newer' option,
-# see the GNU tar manual section "Operating Only on New Files"
-# at https://www.gnu.org/software/tar/manual/html_node/after.html
-# and the GNU tar manual section "Calendar date items"
-# at https://www.gnu.org/software/tar/manual/html_node/Calendar-date-items.html#SEC124
-local date_glob_regex="[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]"
-local date_time_glob_regex="$date_glob_regex-[0-9][0-9][0-9][0-9]"
-# Determine what kind of backup must be created, 'full' or 'incremental' or 'differential'
-# (the empty default means it is undecided what kind of backup must be created):
-local create_backup_type=""
-# Code regarding creating a backup is useless during "rear recover" and
-# messages about creating a backup are misleading during "rear recover":
-local recovery_workflows=( "recover" "layoutonly" "restoreonly" )
-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then
-    # When today is a specified full backup day, do a full backup in any case
-    # (regardless if there is already a full backup of this day):
-    if IsInArray "$current_weekday" "${FULLBACKUPDAY[@]}" ; then
-        create_backup_type="full"
-        LogPrint "Today's weekday ('$current_weekday') is a full backup day that triggers a new full backup in any case"
-    fi
-fi
-# Get the latest full backup (if exists):
-local full_backup_glob_regex="$date_time_glob_regex-$full_backup_marker$backup_file_suffix"
-# Here things like 'find /path/to/dir -name '*.tar.gz' | sort' are used because
-# one cannot use bash globbing via commands like 'ls /path/to/dir/*.tar.gz'
-# because /usr/sbin/rear sets the nullglob bash option which leads to plain 'ls'
-# when '/path/to/dir/*.tar.gz' matches nothing (i.e. when no backup file exists)
-# so that then plain 'ls' would result nonsense.
-local latest_full_backup=$( find $backup_directory -name "$full_backup_glob_regex" | sort | tail -n1 )
-# A latest full backup is found:
-if test "$latest_full_backup" ; then
-    local latest_full_backup_file_name=$( basename "$latest_full_backup" )
-    # The full_or_incremental_backup_glob_regex is also needed below for non-"recover" WORKFLOWs
-    # to set the right variables for creating an incremental backup:
-    local full_or_incremental_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$incremental_backup_marker]$backup_file_suffix"
-    # Code regarding creating a backup is useless during "rear recover" and
-    # messages about creating a backup are misleading during "rear recover":
-    if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then
-        # There is nothing to do here if it is already decided that
-        # a full backup must be created (see "full backup day" above"):
-        if ! test "full" = "$create_backup_type" ; then
-            local latest_full_backup_date=$( echo $latest_full_backup_file_name | grep -o "$date_glob_regex" )
-            local yyyymmdd_latest_full_backup=$( echo $latest_full_backup_date | tr -d '-' )
-            # Check if the latest full backup is too old:
-            if test $yyyymmdd_latest_full_backup -lt $yyyymmdd_max_days_ago ; then
-                create_backup_type="full"
-                LogPrint "Latest full backup date '$latest_full_backup_date' too old (more than $FULLBACKUP_OUTDATED_DAYS days ago) triggers new full backup"
-            else
-                # When a latest full backup is found that is not too old
-                # a BACKUP_TYPE (incremental or differential) backup will be created:
-                create_backup_type="$BACKUP_TYPE"
-                LogPrint "Latest full backup found ($latest_full_backup_file_name) triggers $BACKUP_TYPE backup"
-            fi
-        fi
-    else
-        # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set:
-        case "$BACKUP_TYPE" in
-            (incremental)
-                # When a latest full backup is found use that plus all later incremental backups for restore:
-                # The following command is a bit tricky:
-                # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-I.tar.gz files in the backup directory and sorts them
-                # and finally it outputs only those that match the latest full backup file name and incremental backups that got sorted after that
-                # where it is mandatory that the backup file names sort by date (i.e. date must be the leading part of the backup file names):
-                RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" ) )
-                ;;
-            (differential)
-                # For differential backup use the latest full backup plus the one latest differential backup for restore:
-                # The following command is a bit tricky:
-                # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-D.tar.gz files in the backup directory and sorts them
-                # then it outputs only those that match the latest full backup file name and all differential backups that got sorted after that
-                # and then it outputs only the first line (i.e. the full backup) and the last line (i.e. the latest differential backup)
-                # but when no differential backup exists (i.e. when only the full backup exists) the first line is also the last line
-                # so that "sed -n -e '1p;$p'" outputs the full backup twice which is corrected by the final "sort -u":
-                local full_or_differential_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$differential_backup_marker]$backup_file_suffix"
-                RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_differential_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | sed -n -e '1p;$p' | sort -u ) )
-                ;;
-            (*)
-                BugError "Unexpected BACKUP_TYPE '$BACKUP_TYPE'"
-                ;;
-        esac
-        # Tell the user what will be restored:
-        local restore_archives_file_names=""
-        for restore_archive in "${RESTORE_ARCHIVES[@]}" ; do
-            restore_archives_file_names="$restore_archives_file_names $( basename "$restore_archive" )"
-        done
-        LogPrint "For backup restore using $restore_archives_file_names"
-    fi
-# No latest full backup is found:
-else
-    # Code regarding creating a backup is useless during "rear recover" and
-    # messages about creating a backup are misleading during "rear recover":
-    if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then
-        # If no latest full backup is found create one during "rear mkbackup":
-        create_backup_type="full"
-        LogPrint "No full backup found (YYYY-MM-DD-HHMM-F.tar.gz) triggers full backup"
-    else
-        # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set:
-        # If no latest full backup is found (i.e. no file name matches the YYYY-MM-DD-HHMM-F.tar.gz form)
-        # fall back to what is done in case of normal (i.e. non-incremental/non-differential) backup
-        # and hope for the best (i.e. that a backup_directory/backup_file_name actually exists).
-        # In case of normal (i.e. non-incremental/non-differential) backup there is only one restore archive
-        # and its name is the same as the backup archive (usually 'backup.tar.gz').
-        # This is only a fallback setting to be more on the safe side for "rear recover".
-        # Initially for the very fist run of incremental backup during "rear mkbackup"
-        # a full backup file of the YYYY-MM-DD-HHMM-F.tar.gz form will be created.
-        RESTORE_ARCHIVES=( "$backup_directory/$backup_file_name" )
-        LogPrint "Using $backup_file_name for backup restore"
-    fi
-fi
-# Code regarding creating a backup is useless during "rear recover" and
-# messages about creating a backup are misleading during "rear recover":
-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then
-    # Set the right variables for creating a backup (but do not actually do anything at this point):
-    case "$create_backup_type" in
-        (full)
-            local new_full_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$full_backup_marker$backup_file_suffix"
-            backuparchive="$backup_directory/$new_full_backup_file_name"
-            BACKUP_PROG_CREATE_NEWER_OPTIONS="-V $new_full_backup_file_name"
-            LogPrint "Performing full backup using backup archive '$new_full_backup_file_name'"
-            ;;
-        (incremental)
-            local new_incremental_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$incremental_backup_marker$backup_file_suffix"
-            backuparchive="$backup_directory/$new_incremental_backup_file_name"
-            # Get the latest latest incremental backup that is based on the latest full backup (if exists):
-            local incremental_backup_glob_regex="$date_time_glob_regex-$incremental_backup_marker$backup_file_suffix"
-            # First get the latest full backup plus all later incremental backups (cf. how RESTORE_ARCHIVES is set in case of incremental backup)
-            # then grep only the incremental backups and from the incremental backups use only the last one (if exists):
-            local latest_incremental_backup=$( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | grep "$incremental_backup_glob_regex" | tail -n1 )
-            if test "$latest_incremental_backup" ; then
-                # A latest incremental backup that is based on the latest full backup is found:
-                local latest_incremental_backup_file_name=$( basename $latest_incremental_backup )
-                LogPrint "Latest incremental backup found ($latest_incremental_backup_file_name) that is newer than the latest full backup"
-                local latest_incremental_backup_date=$( echo $latest_incremental_backup_file_name | grep -o "$date_glob_regex" )
-                BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_incremental_backup_date -V $latest_incremental_backup_file_name"
-                LogPrint "Performing incremental backup for files newer than $latest_incremental_backup_date using backup archive '$new_incremental_backup_file_name'"
-            else
-                # When there is not yet an incremental backup that is based on the latest full backup
-                # the new created incremental backup must be based on the latest full backup:
-                BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name"
-                LogPrint "Performing incremental backup for files newer than $latest_full_backup_date using backup archive '$new_incremental_backup_file_name'"
-            fi
-            ;;
-        (differential)
-            local new_differential_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$differential_backup_marker$backup_file_suffix"
-            backuparchive="$backup_directory/$new_differential_backup_file_name"
-            BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name"
-            LogPrint "Performing differential backup for files newer than $latest_full_backup_date using backup archive '$new_differential_backup_file_name'"
-            ;;
-        (*)
-            BugError "Unexpected create_backup_type '$create_backup_type'"
-            ;;
-    esac
-fi
-# Go back from "set -e -u -o pipefail" to the defaults:
-apply_bash_flags_and_options_commands "$DEFAULT_BASH_FLAGS_AND_OPTIONS_COMMANDS"
-
diff --git a/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh
new file mode 120000
index 00000000..cdbdc31f
--- /dev/null
+++ b/usr/share/rear/prep/YUM/default/070_set_backup_archive.sh
@@ -0,0 +1 @@
+../../NETFS/default/070_set_backup_archive.sh
\ No newline at end of file
diff --git a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh
deleted file mode 100644
index 64b7a792..00000000
--- a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-# create mount point
-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then
-	mkdir -p $v "$BUILD_DIR/outputfs" >&2
-	StopIfError "Could not mkdir '$BUILD_DIR/outputfs'"
-
-	AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-
-	if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then
-		BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD"
-	fi
-
-	mount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs $BACKUP_DUPLICITY_NETFS_OPTIONS
-	
-	BACKUP_DUPLICITY_URL="file://$BUILD_DIR/outputfs"
-fi
diff --git a/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh
new file mode 120000
index 00000000..7f558c5d
--- /dev/null
+++ b/usr/share/rear/restore/DUPLICITY/default/100_mount_duplicity_path.sh
@@ -0,0 +1 @@
+../../../backup/DUPLICITY/default/100_mount_duplicity_path.sh
\ No newline at end of file
diff --git a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh
deleted file mode 100644
index 60aa811e..00000000
--- a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-# umount mountpoint
-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]; then
-
-	if [[ "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]] ; then
-		BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_UMOUNTCMD"
-	fi
-
-	umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs
-
-	rmdir $v $BUILD_DIR/outputfs >&2
-	if [[ $? -eq 0 ]] ; then
-		# the argument to RemoveExitTask has to be identical to the one given to AddExitTask
-		RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-	fi
-fi
diff --git a/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh
new file mode 120000
index 00000000..b7e47be1
--- /dev/null
+++ b/usr/share/rear/restore/DUPLICITY/default/980_unmount_duplicity_path.sh
@@ -0,0 +1 @@
+../../../backup/DUPLICITY/default/980_unmount_duplicity_path.sh
\ No newline at end of file
diff --git a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh
deleted file mode 100644
index 7de92af4..00000000
--- a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copied from ../../NETFS/default/100_mount_NETFS_path.sh a.k.a. ../../../backup/NETFS/default/100_mount_NETFS_path.sh for YUM
-
-# create mount point
-mkdir -p $v "$BUILD_DIR/outputfs" >&2
-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'"
-
-AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-
-if [[ "$BACKUP_MOUNTCMD" ]] ; then
-    BACKUP_URL="var://BACKUP_MOUNTCMD"
-fi
-
-mount_url $BACKUP_URL $BUILD_DIR/outputfs $BACKUP_OPTIONS
diff --git a/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh
new file mode 120000
index 00000000..60e0f83f
--- /dev/null
+++ b/usr/share/rear/restore/YUM/default/100_mount_YUM_path.sh
@@ -0,0 +1 @@
+../../NETFS/default/100_mount_NETFS_path.sh
\ No newline at end of file
diff --git a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh
deleted file mode 100644
index d02dcf34..00000000
--- a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-# Copied from ../../../backup/NETFS/default/980_umount_NETFS_dir.sh for YUM
-
-# umount NETFS mountpoint
-
-if [[ "$BACKUP_UMOUNTCMD" ]] ; then
-    BACKUP_URL="var://BACKUP_UMOUNTCMD"
-fi
-
-umount_url $BACKUP_URL $BUILD_DIR/outputfs
-
-rmdir $v $BUILD_DIR/outputfs >&2
-if [[ $? -eq 0 ]] ; then
-    # the argument to RemoveExitTask has to be identical to the one given to AddExitTask
-    RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-fi
diff --git a/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh
new file mode 120000
index 00000000..2c29cb57
--- /dev/null
+++ b/usr/share/rear/restore/YUM/default/980_umount_YUM_dir.sh
@@ -0,0 +1 @@
+../../NETFS/default/980_umount_NETFS_dir.sh
\ No newline at end of file
diff --git a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh
deleted file mode 100644
index 64b7a792..00000000
--- a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-# create mount point
-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]; then
-	mkdir -p $v "$BUILD_DIR/outputfs" >&2
-	StopIfError "Could not mkdir '$BUILD_DIR/outputfs'"
-
-	AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-
-	if [[ "$BACKUP_DUPLICITY_NETFS_MOUNTCMD" ]] ; then
-		BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_MOUNTCMD"
-	fi
-
-	mount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs $BACKUP_DUPLICITY_NETFS_OPTIONS
-	
-	BACKUP_DUPLICITY_URL="file://$BUILD_DIR/outputfs"
-fi
diff --git a/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh
new file mode 120000
index 00000000..7f558c5d
--- /dev/null
+++ b/usr/share/rear/verify/DUPLICITY/default/100_mount_duplicity_path.sh
@@ -0,0 +1 @@
+../../../backup/DUPLICITY/default/100_mount_duplicity_path.sh
\ No newline at end of file
diff --git a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh
deleted file mode 100644
index 60aa811e..00000000
--- a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh
+++ /dev/null
@@ -1,15 +0,0 @@
-# umount mountpoint
-if [ -n "$BACKUP_DUPLICITY_NETFS_URL" -o -n "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]; then
-
-	if [[ "$BACKUP_DUPLICITY_NETFS_UMOUNTCMD" ]] ; then
-		BACKUP_DUPLICITY_NETFS_URL="var://BACKUP_DUPLICITY_NETFS_UMOUNTCMD"
-	fi
-
-	umount_url $BACKUP_DUPLICITY_NETFS_URL $BUILD_DIR/outputfs
-
-	rmdir $v $BUILD_DIR/outputfs >&2
-	if [[ $? -eq 0 ]] ; then
-		# the argument to RemoveExitTask has to be identical to the one given to AddExitTask
-		RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-	fi
-fi
diff --git a/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh
new file mode 120000
index 00000000..b7e47be1
--- /dev/null
+++ b/usr/share/rear/verify/DUPLICITY/default/980_unmount_duplicity_path.sh
@@ -0,0 +1 @@
+../../../backup/DUPLICITY/default/980_unmount_duplicity_path.sh
\ No newline at end of file
diff --git a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh
deleted file mode 100644
index cfd70026..00000000
--- a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh
+++ /dev/null
@@ -1,116 +0,0 @@
-# Copied from ../../../prep/NETFS/default/050_check_NETFS_requirements.sh for YUM
-# BACKUP_URL=[proto]://[host]/[share]
-# example: nfs://lucky/temp/backup
-# example: cifs://lucky/temp
-# example: usb:///dev/sdb1
-# example: tape:///dev/nst0
-# example: file:///path
-# example: iso://backup/
-# example: sshfs://user@host/G/rear/
-# example: ftpfs://user:password@host/rear/ (the password part is optional)
-
-[[ "$BACKUP_URL" || "$BACKUP_MOUNTCMD" ]]
-# FIXME: The above test does not match the error message below.
-# To match the the error message the test should be
-# [[ "$BACKUP_URL" || ( "$BACKUP_MOUNTCMD" && "$BACKUP_UMOUNTCMD" ) ]]
-# but I <jsmeix@suse.de> cannot decide if there is a subtle reason for the omission.
-StopIfError "You must specify either BACKUP_URL or BACKUP_MOUNTCMD and BACKUP_UMOUNTCMD !"
-
-if [[ "$BACKUP_URL" ]] ; then
-    local scheme=$( url_scheme $BACKUP_URL )
-    local hostname=$( url_hostname $BACKUP_URL )
-    local path=$( url_path $BACKUP_URL )
-
-    ### check for vaild BACKUP_URL schemes
-    ### see https://github.com/rear/rear/issues/842
-    case $scheme in
-        (nfs|cifs|usb|tape|file|iso|sshfs|ftpfs)
-            # do nothing for vaild BACKUP_URL schemes
-            :
-            ;;
-        (*)
-            Error "Invalid scheme '$scheme' in BACKUP_URL '$BACKUP_URL' valid schemes: nfs cifs usb tape file iso sshfs ftpfs"
-            ;;
-    esac
-
-    ### set other variables from BACKUP_URL
-    if [[ "usb" = "$scheme" ]] ; then
-        # if USB_DEVICE is not explicitly specified it is the path from BACKUP_URL
-        [[ -z "$USB_DEVICE" ]] && USB_DEVICE="$path"
-    fi
-
-    ### check if host is reachable
-    if [[ "$PING" && "$hostname" ]] ; then
-        # Only LogPrintIfError but no StopIfError because it is not a fatal error
-        # (i.e. not a reason to abort) when a host does not respond to a 'ping'
-        # because hosts can be accessible via certain ports but do not respond to a 'ping'
-        # cf. https://bugzilla.opensuse.org/show_bug.cgi?id=616706
-        # TODO: it would be better to test if it is accessible via the actually needed port(s)
-        ping -c 2 "$hostname" >/dev/null
-        LogPrintIfError "Host '$hostname' in BACKUP_URL '$BACKUP_URL' does not respond to a 'ping'."
-    else
-        Log "Skipping 'ping' test for host '$hostname' in BACKUP_URL '$BACKUP_URL'"
-    fi
-
-fi
-
-# some backup progs require a different backuparchive name
-case "$(basename $BACKUP_PROG)" in
-    (rsync)
-        # rsync creates a target directory instead of a file
-        BACKUP_PROG_SUFFIX=
-        BACKUP_PROG_COMPRESS_SUFFIX=
-        ;;
-    (*)
-        :
-        ;;
-esac
-
-# include required programs
-# the code below includes mount.* and umount.* programs for all non-empty schemes
-# (i.e. for any non-empty BACKUP_URL like usb tape file sshfs ftpfs)
-# and it includes 'mount.' for empty schemes (e.g. if BACKUP_URL is not set)
-# which is o.k. because it is a catch all rule so we do not miss any
-# important executable needed a certain scheme and it does not hurt
-# see https://github.com/rear/rear/pull/859
-PROGS+=(
-showmount
-mount.$(url_scheme $BACKUP_URL)
-umount.$(url_scheme $BACKUP_URL)
-$( test "$BACKUP_MOUNTCMD" && echo "${BACKUP_MOUNTCMD%% *}" )
-$( test "$BACKUP_UMOUNTCMD" && echo "${BACKUP_UMOUNTCMD%% *}" )
-$BACKUP_PROG
-gzip
-bzip2
-xz
-)
-
-# include required stuff for sshfs or ftpfs (via CurlFtpFS)
-if [[ "sshfs" = "$scheme" || "ftpfs" = "$scheme" ]] ; then
-    # both sshfs and ftpfs (via CurlFtpFS) are based on FUSE
-    PROGS+=( fusermount mount.fuse )
-    MODULES+=( fuse )
-    MODULES_LOAD+=( fuse )
-    COPY_AS_IS+=( /etc/fuse.conf )
-    # include what is specific for sshfs
-    if [[ "sshfs" = "$scheme" ]] ; then
-        # see http://sourceforge.net/apps/mediawiki/fuse/index.php?title=SshfsFaq
-        REQUIRED_PROGS+=( sshfs ssh )
-        # relying on 500_ssh.sh to take a long the SSH related files
-    fi
-    # include what is specific for ftpfs
-    if [[ "ftpfs" = "$scheme" ]] ; then
-        # see http://curlftpfs.sourceforge.net/
-        # and https://github.com/rear/rear/issues/845
-        REQUIRED_PROGS+=( curlftpfs )
-    fi
-fi
-
-# include required modules, like nfs cifs ...
-# the code below includes modules for all non-empty schemes
-# (i.e. for any non-empty BACKUP_URL like usb tape file sshfs ftpfs)
-# which is o.k. because this must been seen as a catch all rule
-# (one never knows what one could miss)
-# see https://github.com/rear/rear/pull/859
-MODULES+=( $(url_scheme $BACKUP_URL) )
-
diff --git a/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh
new file mode 120000
index 00000000..af1512d6
--- /dev/null
+++ b/usr/share/rear/verify/YUM/default/050_check_YUM_requirements.sh
@@ -0,0 +1 @@
+../../NETFS/default/050_check_NETFS_requirements.sh
\ No newline at end of file
diff --git a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh
deleted file mode 100644
index f7e31ed6..00000000
--- a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copied from ../../../backup/NETFS/default/100_mount_NETFS_path.sh for YUM
-# create mount point
-mkdir -p $v "$BUILD_DIR/outputfs" >&2
-StopIfError "Could not mkdir '$BUILD_DIR/outputfs'"
-
-AddExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-
-if [[ "$BACKUP_MOUNTCMD" ]] ; then
-    BACKUP_URL="var://BACKUP_MOUNTCMD"
-fi
-
-mount_url $BACKUP_URL $BUILD_DIR/outputfs $BACKUP_OPTIONS
diff --git a/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh
new file mode 120000
index 00000000..73dd4697
--- /dev/null
+++ b/usr/share/rear/verify/YUM/default/060_mount_YUM_path.sh
@@ -0,0 +1 @@
+../../../restore/YUM/default/100_mount_YUM_path.sh
\ No newline at end of file
diff --git a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh
deleted file mode 100644
index 86d1708d..00000000
--- a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh
+++ /dev/null
@@ -1,300 +0,0 @@
-# Copied from ../../../prep/NETFS/default/070_set_backup_archive.sh for YUM
-### Determine the name of the backup archive
-### This needs to be after we special case USB devices.
-
-# FIXME: backuparchive is no local variable (regardless that it is lowercased)
-
-# If TAPE_DEVICE is specified, use that:
-if test "$TAPE_DEVICE" ; then
-    backuparchive="$TAPE_DEVICE"
-    LogPrint "Using backup archive '$backuparchive'"
-    return
-fi
-
-local backup_file_suffix="$BACKUP_PROG_SUFFIX$BACKUP_PROG_COMPRESS_SUFFIX"
-local backup_file_name="$BACKUP_PROG_ARCHIVE$backup_file_suffix"
-
-local scheme=$( url_scheme $BACKUP_URL )
-local path=$( url_path $BACKUP_URL )
-case "$scheme" in
-    (file|iso)
-        # Define the output path according to the scheme
-        local outputpath=$( backup_path $scheme $path )
-        backuparchive="$outputpath/$backup_file_name"
-        LogPrint "Using backup archive '$backuparchive'"
-        return
-        ;;
-    (tape)
-        # TODO: Check if that case is really needed.
-        # Perhaps prep/default/030_translate_tape.sh does already all what is needed.
-        backuparchive=$path
-        LogPrint "Using backup archive '$backuparchive'"
-        return
-        ;;
-esac
-
-local backup_directory=$BUILD_DIR/outputfs/$NETFS_PREFIX
-
-# Normal (i.e. non-incremental/non-differential) backup:
-if ! test "incremental" = "$BACKUP_TYPE" -o "differential" = "$BACKUP_TYPE" ; then
-    # In case of normal (i.e. non-incremental) backup there is only one restore archive
-    # and its name is the same as the backup archive (usually 'backup.tar.gz'):
-    backuparchive="$backup_directory/$backup_file_name"
-    LogPrint "Using backup archive '$backuparchive'"
-    # This script is also run during "rear recover/restoreonly" where RESTORE_ARCHIVES must be set.
-    local backup_restore_workflows=( "recover" "restoreonly" )
-    if IsInArray $WORKFLOW ${backup_restore_workflows[@]} ; then
-        # Only set RESTORE_ARCHIVES the backup archive is actually accessible
-        # cf. https://github.com/rear/rear/issues/1166
-        if test -r "$backuparchive" ; then
-            RESTORE_ARCHIVES=( "$backuparchive" )
-        else
-            # In case of USB backup there is the subsequent 540_choose_backup_archive.sh script
-            # that shows a backup selection dialog when RESTORE_ARCHIVES is not already set.
-            if test "usb" = "$scheme" ; then
-                LogPrint "Backup archive '$backuparchive' not readable. Need to select another one."
-            else
-                Error "Backup archive '$backuparchive' not readable."
-            fi
-        fi
-    fi
-    return
-fi
-
-# Incremental or differential backup:
-set -e -u -o pipefail
-# Incremental or differential backup only works for the NETFS backup method
-# and only with the 'tar' backup program:
-if ! test "NETFS" = "$BACKUP" -a "tar" = "$BACKUP_PROG" ; then
-    Error "BACKUP_TYPE incremental or differential only works with BACKUP=NETFS and BACKUP_PROG=tar"
-fi
-# Incremental or differential backup is currently only known to work with BACKUP_URL=nfs://.
-# Other BACKUP_URL schemes may work and at least BACKUP_URL=usb:///... needs special setup
-# to work with incremental or differential backup (see https://github.com/rear/rear/issues/1145):
-if test "usb" = "$scheme" ; then
-    # When USB_SUFFIX is set the compliance mode is used where
-    # backup on USB works in compliance with backup on NFS which means
-    # a fixed backup directory where incremental or differential backups work.
-    # Use plain $USB_SUFFIX and not "$USB_SUFFIX" because when USB_SUFFIX contains only blanks
-    # test "$USB_SUFFIX" would result true because test " " results true:
-    test $USB_SUFFIX || Error "BACKUP_TYPE incremental or differential requires USB_SUFFIX for BACKUP_URL=usb"
-fi
-# Incremental or differential backup and keeping old backup contradict each other (mutual exclusive)
-# so that NETFS_KEEP_OLD_BACKUP_COPY must not be 'true' in case of incremental or differential backup:
-if test "$NETFS_KEEP_OLD_BACKUP_COPY" ; then
-    NETFS_KEEP_OLD_BACKUP_COPY=""
-    LogPrint "Disabled NETFS_KEEP_OLD_BACKUP_COPY because BACKUP_TYPE incremental or differential does not work with that"
-fi
-# For incremental or differential backup some date values (weekday, YYYY-MM-DD, HHMM) are needed
-# that must be consistent for one single point of the current time which means
-# one cannot call the 'date' command several times because then there would be
-# a small probability that e.g. weekday, YYYY-MM-DD, HHMM do not match
-# one single point in time (in particular when midnight passes in between).
-# Therefore the output of one single 'date' call is storend in an array and
-# the array elements are then assinged to individual variables as needed:
-local current_date_output=( $( date '+%a %Y-%m-%d %H%M' ) )
-local current_weekday="${current_date_output[0]}"
-local current_yyyy_mm_dd="${current_date_output[1]}"
-local current_hhmm="${current_date_output[2]}"
-# The date FULLBACKUP_OUTDATED_DAYS ago is needed to check if the latest full backup is too old.
-# When the latest full backup is more than FULLBACKUP_OUTDATED_DAYS ago a new full backup is made.
-# This separated call of the 'date' command which is technically needed because it is
-# for another point in time (e.g. 7 days ago) is run after the above call of the 'date'
-# command for the current time to be on the safe side when midnight passes in between
-# both 'date' commands which would then result that a new full backup is made
-# when the latest full backup is basically right now FULLBACKUP_OUTDATED_DAYS ago because
-# the stored date of the latest full backup is the current date at the time when it was made.
-# Example (assuming FULLBACKUP_OUTDATED_DAYS=7 ):
-# The latest full backup was made on Sunday January 10 in 2016 (just before midnight).
-# One week later this script runs again while midnight passes between the two 'date' calls
-# so that current_date_output[@]="Sun 2016-01-17 0000" (still Sunday January 17 in 2016)
-# and yyyymmdd_max_days_ago=20160111 (already Monday January 11 in 2016), then
-# Sunday January 10 is older than Monday January 11 so that a new full backup is made:
-test "$FULLBACKUP_OUTDATED_DAYS" || FULLBACKUP_OUTDATED_DAYS="7"
-local yyyymmdd_max_days_ago=$( date '+%Y%m%d' --date="$FULLBACKUP_OUTDATED_DAYS days ago" )
-# Full backup file names are of the form YYYY-MM-DD-HHMM-F.tar.gz
-# where the 'F' denotes a full backup:
-local full_backup_marker="F"
-# Incremental backup file names are of the form YYYY-MM-DD-HHMM-I.tar.gz
-# where the 'I' denotes an incremental backup:
-local incremental_backup_marker="I"
-# Differential backup file names are of the form YYYY-MM-DD-HHMM-D.tar.gz
-# where the last 'D' denotes a differential backup:
-local differential_backup_marker="D"
-# In case of incremental or differential backup the RESTORE_ARCHIVES contains
-# first the latest full backup file.
-# In case of incremental backup the RESTORE_ARCHIVES contains
-# after the latest full backup file each incremental backup
-# in the ordering how they must be restored.
-# For example when the latest full backup was made on Sunday
-# plus each subsequent weekday a separated incremental backup was made,
-# then during a "rear recover" on Wednesday morning
-# first the full backup from Sunday has to be restored,
-# then the incremental backup from Monday, and
-# finally the incremental backup from Tuesday.
-# In case of differential backup the RESTORE_ARCHIVES contains
-# after the latest full backup file the latest differential backup.
-# For example when the latest full backup was made on Sunday
-# plus each subsequent weekday a separated differential backup was made,
-# then during a "rear recover" on Wednesday morning
-# first the full backup from Sunday has to be restored,
-# and finally the differential backup from Tuesday
-# (i.e. the differential backup from Monday is skipped).
-# The date format YYYY-MM-DD that is used here is crucial.
-# It is the ISO 8601 format 'year-month-day' to specify a day of a year
-# that is accepted by 'tar' for the '--newer' option,
-# see the GNU tar manual section "Operating Only on New Files"
-# at https://www.gnu.org/software/tar/manual/html_node/after.html
-# and the GNU tar manual section "Calendar date items"
-# at https://www.gnu.org/software/tar/manual/html_node/Calendar-date-items.html#SEC124
-local date_glob_regex="[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]"
-local date_time_glob_regex="$date_glob_regex-[0-9][0-9][0-9][0-9]"
-# Determine what kind of backup must be created, 'full' or 'incremental' or 'differential'
-# (the empty default means it is undecided what kind of backup must be created):
-local create_backup_type=""
-# Code regarding creating a backup is useless during "rear recover" and
-# messages about creating a backup are misleading during "rear recover":
-local recovery_workflows=( "recover" "layoutonly" "restoreonly" )
-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then
-    # When today is a specified full backup day, do a full backup in any case
-    # (regardless if there is already a full backup of this day):
-    if IsInArray "$current_weekday" "${FULLBACKUPDAY[@]}" ; then
-        create_backup_type="full"
-        LogPrint "Today's weekday ('$current_weekday') is a full backup day that triggers a new full backup in any case"
-    fi
-fi
-# Get the latest full backup (if exists):
-local full_backup_glob_regex="$date_time_glob_regex-$full_backup_marker$backup_file_suffix"
-# Here things like 'find /path/to/dir -name '*.tar.gz' | sort' are used because
-# one cannot use bash globbing via commands like 'ls /path/to/dir/*.tar.gz'
-# because /usr/sbin/rear sets the nullglob bash option which leads to plain 'ls'
-# when '/path/to/dir/*.tar.gz' matches nothing (i.e. when no backup file exists)
-# so that then plain 'ls' would result nonsense.
-local latest_full_backup=$( find $backup_directory -name "$full_backup_glob_regex" | sort | tail -n1 )
-# A latest full backup is found:
-if test "$latest_full_backup" ; then
-    local latest_full_backup_file_name=$( basename "$latest_full_backup" )
-    # The full_or_incremental_backup_glob_regex is also needed below for non-"recover" WORKFLOWs
-    # to set the right variables for creating an incremental backup:
-    local full_or_incremental_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$incremental_backup_marker]$backup_file_suffix"
-    # Code regarding creating a backup is useless during "rear recover" and
-    # messages about creating a backup are misleading during "rear recover":
-    if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then
-        # There is nothing to do here if it is already decided that
-        # a full backup must be created (see "full backup day" above"):
-        if ! test "full" = "$create_backup_type" ; then
-            local latest_full_backup_date=$( echo $latest_full_backup_file_name | grep -o "$date_glob_regex" )
-            local yyyymmdd_latest_full_backup=$( echo $latest_full_backup_date | tr -d '-' )
-            # Check if the latest full backup is too old:
-            if test $yyyymmdd_latest_full_backup -lt $yyyymmdd_max_days_ago ; then
-                create_backup_type="full"
-                LogPrint "Latest full backup date '$latest_full_backup_date' too old (more than $FULLBACKUP_OUTDATED_DAYS days ago) triggers new full backup"
-            else
-                # When a latest full backup is found that is not too old
-                # a BACKUP_TYPE (incremental or differential) backup will be created:
-                create_backup_type="$BACKUP_TYPE"
-                LogPrint "Latest full backup found ($latest_full_backup_file_name) triggers $BACKUP_TYPE backup"
-            fi
-        fi
-    else
-        # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set:
-        case "$BACKUP_TYPE" in
-            (incremental)
-                # When a latest full backup is found use that plus all later incremental backups for restore:
-                # The following command is a bit tricky:
-                # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-I.tar.gz files in the backup directory and sorts them
-                # and finally it outputs only those that match the latest full backup file name and incremental backups that got sorted after that
-                # where it is mandatory that the backup file names sort by date (i.e. date must be the leading part of the backup file names):
-                RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" ) )
-                ;;
-            (differential)
-                # For differential backup use the latest full backup plus the one latest differential backup for restore:
-                # The following command is a bit tricky:
-                # It lists all YYYY-MM-DD-HHMM-F.tar.gz and all YYYY-MM-DD-HHMM-D.tar.gz files in the backup directory and sorts them
-                # then it outputs only those that match the latest full backup file name and all differential backups that got sorted after that
-                # and then it outputs only the first line (i.e. the full backup) and the last line (i.e. the latest differential backup)
-                # but when no differential backup exists (i.e. when only the full backup exists) the first line is also the last line
-                # so that "sed -n -e '1p;$p'" outputs the full backup twice which is corrected by the final "sort -u":
-                local full_or_differential_backup_glob_regex="$date_time_glob_regex-[$full_backup_marker$differential_backup_marker]$backup_file_suffix"
-                RESTORE_ARCHIVES=( $( find $backup_directory -name "$full_or_differential_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | sed -n -e '1p;$p' | sort -u ) )
-                ;;
-            (*)
-                BugError "Unexpected BACKUP_TYPE '$BACKUP_TYPE'"
-                ;;
-        esac
-        # Tell the user what will be restored:
-        local restore_archives_file_names=""
-        for restore_archive in "${RESTORE_ARCHIVES[@]}" ; do
-            restore_archives_file_names="$restore_archives_file_names $( basename "$restore_archive" )"
-        done
-        LogPrint "For backup restore using $restore_archives_file_names"
-    fi
-# No latest full backup is found:
-else
-    # Code regarding creating a backup is useless during "rear recover" and
-    # messages about creating a backup are misleading during "rear recover":
-    if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then
-        # If no latest full backup is found create one during "rear mkbackup":
-        create_backup_type="full"
-        LogPrint "No full backup found (YYYY-MM-DD-HHMM-F.tar.gz) triggers full backup"
-    else
-        # This script is also run during "rear recover" where RESTORE_ARCHIVES must be set:
-        # If no latest full backup is found (i.e. no file name matches the YYYY-MM-DD-HHMM-F.tar.gz form)
-        # fall back to what is done in case of normal (i.e. non-incremental/non-differential) backup
-        # and hope for the best (i.e. that a backup_directory/backup_file_name actually exists).
-        # In case of normal (i.e. non-incremental/non-differential) backup there is only one restore archive
-        # and its name is the same as the backup archive (usually 'backup.tar.gz').
-        # This is only a fallback setting to be more on the safe side for "rear recover".
-        # Initially for the very fist run of incremental backup during "rear mkbackup"
-        # a full backup file of the YYYY-MM-DD-HHMM-F.tar.gz form will be created.
-        RESTORE_ARCHIVES=( "$backup_directory/$backup_file_name" )
-        LogPrint "Using $backup_file_name for backup restore"
-    fi
-fi
-# Code regarding creating a backup is useless during "rear recover" and
-# messages about creating a backup are misleading during "rear recover":
-if ! IsInArray $WORKFLOW ${recovery_workflows[@]} ; then
-    # Set the right variables for creating a backup (but do not actually do anything at this point):
-    case "$create_backup_type" in
-        (full)
-            local new_full_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$full_backup_marker$backup_file_suffix"
-            backuparchive="$backup_directory/$new_full_backup_file_name"
-            BACKUP_PROG_CREATE_NEWER_OPTIONS="-V $new_full_backup_file_name"
-            LogPrint "Performing full backup using backup archive '$new_full_backup_file_name'"
-            ;;
-        (incremental)
-            local new_incremental_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$incremental_backup_marker$backup_file_suffix"
-            backuparchive="$backup_directory/$new_incremental_backup_file_name"
-            # Get the latest latest incremental backup that is based on the latest full backup (if exists):
-            local incremental_backup_glob_regex="$date_time_glob_regex-$incremental_backup_marker$backup_file_suffix"
-            # First get the latest full backup plus all later incremental backups (cf. how RESTORE_ARCHIVES is set in case of incremental backup)
-            # then grep only the incremental backups and from the incremental backups use only the last one (if exists):
-            local latest_incremental_backup=$( find $backup_directory -name "$full_or_incremental_backup_glob_regex" | sort | sed -n -e "/$latest_full_backup_file_name/,\$p" | grep "$incremental_backup_glob_regex" | tail -n1 )
-            if test "$latest_incremental_backup" ; then
-                # A latest incremental backup that is based on the latest full backup is found:
-                local latest_incremental_backup_file_name=$( basename $latest_incremental_backup )
-                LogPrint "Latest incremental backup found ($latest_incremental_backup_file_name) that is newer than the latest full backup"
-                local latest_incremental_backup_date=$( echo $latest_incremental_backup_file_name | grep -o "$date_glob_regex" )
-                BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_incremental_backup_date -V $latest_incremental_backup_file_name"
-                LogPrint "Performing incremental backup for files newer than $latest_incremental_backup_date using backup archive '$new_incremental_backup_file_name'"
-            else
-                # When there is not yet an incremental backup that is based on the latest full backup
-                # the new created incremental backup must be based on the latest full backup:
-                BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name"
-                LogPrint "Performing incremental backup for files newer than $latest_full_backup_date using backup archive '$new_incremental_backup_file_name'"
-            fi
-            ;;
-        (differential)
-            local new_differential_backup_file_name="$current_yyyy_mm_dd-$current_hhmm-$differential_backup_marker$backup_file_suffix"
-            backuparchive="$backup_directory/$new_differential_backup_file_name"
-            BACKUP_PROG_CREATE_NEWER_OPTIONS="--newer=$latest_full_backup_date -V $latest_full_backup_file_name"
-            LogPrint "Performing differential backup for files newer than $latest_full_backup_date using backup archive '$new_differential_backup_file_name'"
-            ;;
-        (*)
-            BugError "Unexpected create_backup_type '$create_backup_type'"
-            ;;
-    esac
-fi
-# Go back from "set -e -u -o pipefail" to the defaults:
-apply_bash_flags_and_options_commands "$DEFAULT_BASH_FLAGS_AND_OPTIONS_COMMANDS"
-
diff --git a/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh
new file mode 120000
index 00000000..b8de3d9e
--- /dev/null
+++ b/usr/share/rear/verify/YUM/default/070_set_backup_archive.sh
@@ -0,0 +1 @@
+../../../prep/YUM/default/070_set_backup_archive.sh
\ No newline at end of file
diff --git a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh
deleted file mode 100644
index dc719e38..00000000
--- a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-# Copied from ../../../backup/NETFS/default/980_umount_NETFS_dir.sh for YUM
-# umount NETFS mountpoint
-
-if [[ "$BACKUP_UMOUNTCMD" ]] ; then
-    BACKUP_URL="var://BACKUP_UMOUNTCMD"
-fi
-
-umount_url $BACKUP_URL $BUILD_DIR/outputfs
-
-rmdir $v $BUILD_DIR/outputfs >&2
-if [[ $? -eq 0 ]] ; then
-    # the argument to RemoveExitTask has to be identical to the one given to AddExitTask
-    RemoveExitTask "rmdir $v $BUILD_DIR/outputfs >&2"
-fi
diff --git a/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh
new file mode 120000
index 00000000..ada5ea50
--- /dev/null
+++ b/usr/share/rear/verify/YUM/default/980_umount_YUM_dir.sh
@@ -0,0 +1 @@
+../../../restore/YUM/default/980_umount_YUM_dir.sh
\ No newline at end of file