diff --git a/.cryptsetup.metadata b/.cryptsetup.metadata index 9b26198..462e48a 100644 --- a/.cryptsetup.metadata +++ b/.cryptsetup.metadata @@ -1 +1 @@ -135dc2e7b84bc8c74f01447f93d1392485b47a37 SOURCES/cryptsetup-2.2.2.tar.xz +d847934311112c7220435b94b44c2b2bb3dca0e1 SOURCES/cryptsetup-2.3.3.tar.xz diff --git a/.gitignore b/.gitignore index af155c9..16b2a5f 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1 @@ -SOURCES/cryptsetup-2.2.2.tar.xz +SOURCES/cryptsetup-2.3.3.tar.xz diff --git a/SOURCES/cryptsetup-2.3.4-luks2-validation-32bit-fix.patch b/SOURCES/cryptsetup-2.3.4-luks2-validation-32bit-fix.patch new file mode 100644 index 0000000..f708ba9 --- /dev/null +++ b/SOURCES/cryptsetup-2.3.4-luks2-validation-32bit-fix.patch @@ -0,0 +1,341 @@ +From 52f5cb8cedf22fb3e14c744814ec8af7614146c7 Mon Sep 17 00:00:00 2001 +From: Tobias Stoeckmann +Date: Mon, 24 Aug 2020 19:21:43 +0200 +Subject: [PATCH 01/11] Check segment gaps regardless of heap space. + +Segments are validated in hdr_validate_segments. Gaps in segment keys +are detected when collecting offsets. But if an invalid segment is very +large, larger than count, it could happen that cryptsetup is unable to +allocate enough memory, not giving a clue about what actually is the +problem. + +Therefore check for gaps even if not enough memory is available. This +gives much more information with debug output enabled. + +Obviously cryptsetup still fails if segments are perfectly fine but not +enough RAM available. But at that stage, the user knows that it's the +fault of the system, not of an invalid segment. +--- + lib/luks2/luks2_json_metadata.c | 19 ++++++++++++------- + 1 file changed, 12 insertions(+), 7 deletions(-) + +diff --git a/lib/luks2/luks2_json_metadata.c b/lib/luks2/luks2_json_metadata.c +index 23556f2..ea5c7f7 100644 +--- a/lib/luks2/luks2_json_metadata.c ++++ b/lib/luks2/luks2_json_metadata.c +@@ -679,11 +679,10 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj) + if (first_backup < 0) + first_backup = count; + +- intervals = malloc(first_backup * sizeof(*intervals)); +- if (!intervals) { +- log_dbg(cd, "Not enough memory."); +- return 1; +- } ++ if (first_backup <= count && (size_t)first_backup < SIZE_MAX / sizeof(*intervals)) ++ intervals = malloc(first_backup * sizeof(*intervals)); ++ else ++ intervals = NULL; + + for (i = 0; i < first_backup; i++) { + jobj = json_segments_get_segment(jobj_segments, i); +@@ -692,8 +691,14 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj) + free(intervals); + return 1; + } +- intervals[i].offset = json_segment_get_offset(jobj, 0); +- intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX; ++ if (intervals != NULL) { ++ intervals[i].offset = json_segment_get_offset(jobj, 0); ++ intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX; ++ } ++ } ++ if (intervals == NULL) { ++ log_dbg(cd, "Not enough memory."); ++ return 1; + } + + r = !validate_segment_intervals(cd, first_backup, intervals); +-- +1.8.3.1 + +From 46ee71edcd13e1dad50815ad65c28779aa6f7503 Mon Sep 17 00:00:00 2001 +From: Ondrej Kozina +Date: Tue, 25 Aug 2020 19:32:48 +0200 +Subject: [PATCH 07/11] Avoid needlessly large allocations in LUKS2 validation + code. + +In case LUKS2 backup segment creates gap in between last regular +segment and backup segment report invalid metadata imediately. We stop +on first error so there's no need to allocate large memory on heap +(we may ran with mlock(MCL_FUTURE) set). + +Example: +- total segments count is 3 +- regular segments have keys "0" and "1" +- first backup segment has key "42" +--- + lib/luks2/luks2_json_metadata.c | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/lib/luks2/luks2_json_metadata.c b/lib/luks2/luks2_json_metadata.c +index ea5c7f7..5b73b34 100644 +--- a/lib/luks2/luks2_json_metadata.c ++++ b/lib/luks2/luks2_json_metadata.c +@@ -676,10 +676,16 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj) + return 1; + } + ++ /* avoid needlessly large allocation when first backup segment is invalid */ ++ if (first_backup >= count) { ++ log_dbg(cd, "Gap between last regular segment and backup segment at key %d.", first_backup); ++ return 1; ++ } ++ + if (first_backup < 0) + first_backup = count; + +- if (first_backup <= count && (size_t)first_backup < SIZE_MAX / sizeof(*intervals)) ++ if ((size_t)first_backup < SIZE_MAX / sizeof(*intervals)) + intervals = malloc(first_backup * sizeof(*intervals)); + else + intervals = NULL; +-- +1.8.3.1 + +From 752c9a52798f11d3b765b673ebaa3058eb25316e Mon Sep 17 00:00:00 2001 +From: Ondrej Kozina +Date: Tue, 25 Aug 2020 19:23:21 +0200 +Subject: [PATCH 08/11] Simplify validation code a bit. + +Keep it simple. If there's not enough memory we can't validate +segments. The LUKS2 specification does not recommend to continue +processing LUKS2 metadata if it can not be properly validated. +--- + lib/luks2/luks2_json_metadata.c | 19 ++++++++----------- + 1 file changed, 8 insertions(+), 11 deletions(-) + +diff --git a/lib/luks2/luks2_json_metadata.c b/lib/luks2/luks2_json_metadata.c +index 5b73b34..df89401 100644 +--- a/lib/luks2/luks2_json_metadata.c ++++ b/lib/luks2/luks2_json_metadata.c +@@ -594,9 +594,9 @@ static bool validate_segment_intervals(struct crypt_device *cd, + static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj) + { + json_object *jobj_segments, *jobj_digests, *jobj_offset, *jobj_size, *jobj_type, *jobj_flags, *jobj; +- struct interval *intervals; + uint64_t offset, size; + int i, r, count, first_backup = -1; ++ struct interval *intervals = NULL; + + if (!json_object_object_get_ex(hdr_jobj, "segments", &jobj_segments)) { + log_dbg(cd, "Missing segments section."); +@@ -687,8 +687,11 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj) + + if ((size_t)first_backup < SIZE_MAX / sizeof(*intervals)) + intervals = malloc(first_backup * sizeof(*intervals)); +- else +- intervals = NULL; ++ ++ if (!intervals) { ++ log_dbg(cd, "Not enough memory."); ++ return 1; ++ } + + for (i = 0; i < first_backup; i++) { + jobj = json_segments_get_segment(jobj_segments, i); +@@ -697,14 +700,8 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj) + free(intervals); + return 1; + } +- if (intervals != NULL) { +- intervals[i].offset = json_segment_get_offset(jobj, 0); +- intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX; +- } +- } +- if (intervals == NULL) { +- log_dbg(cd, "Not enough memory."); +- return 1; ++ intervals[i].offset = json_segment_get_offset(jobj, 0); ++ intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX; + } + + r = !validate_segment_intervals(cd, first_backup, intervals); +-- +1.8.3.1 + +From 96da06430b6f5c2588c619082f2c5880df9cc776 Mon Sep 17 00:00:00 2001 +From: Ondrej Kozina +Date: Wed, 26 Aug 2020 17:24:05 +0200 +Subject: [PATCH 09/11] Add test for LUKS2 segments validation code fix. + +--- + ...enerate-luks2-segment-wrong-backup-key-0.img.sh | 67 ++++++++++++++++++++++ + ...enerate-luks2-segment-wrong-backup-key-1.img.sh | 67 ++++++++++++++++++++++ + tests/luks2-validation-test | 2 + + 3 files changed, 136 insertions(+) + create mode 100755 tests/generators/generate-luks2-segment-wrong-backup-key-0.img.sh + create mode 100755 tests/generators/generate-luks2-segment-wrong-backup-key-1.img.sh + +diff --git a/tests/generators/generate-luks2-segment-wrong-backup-key-0.img.sh b/tests/generators/generate-luks2-segment-wrong-backup-key-0.img.sh +new file mode 100755 +index 0000000..2499a5e +--- /dev/null ++++ b/tests/generators/generate-luks2-segment-wrong-backup-key-0.img.sh +@@ -0,0 +1,67 @@ ++#!/bin/bash ++ ++. lib.sh ++ ++# ++# *** Description *** ++# ++# generate primary header with wrong backup segment id ++# ++# secondary header is corrupted on purpose as well ++# ++ ++# $1 full target dir ++# $2 full source luks2 image ++ ++function prepare() ++{ ++ cp $SRC_IMG $TGT_IMG ++ test -d $TMPDIR || mkdir $TMPDIR ++ read_luks2_json0 $TGT_IMG $TMPDIR/json0 ++ read_luks2_bin_hdr0 $TGT_IMG $TMPDIR/hdr0 ++ read_luks2_bin_hdr1 $TGT_IMG $TMPDIR/hdr1 ++} ++ ++function generate() ++{ ++ # create illegal backup segment key (used to be bug in 32bit implementations) ++ json_str=$(jq -c '.segments[(.segments | length + 1 | tostring)] = { "type" : "linear", "offset" : "512", "size" : "512", "flags":["backup-x"]}' $TMPDIR/json0) ++ test ${#json_str} -lt $((LUKS2_JSON_SIZE*512)) || exit 2 ++ ++ write_luks2_json "$json_str" $TMPDIR/json0 ++ ++ merge_bin_hdr_with_json $TMPDIR/hdr0 $TMPDIR/json0 $TMPDIR/area0 ++ erase_checksum $TMPDIR/area0 ++ chks0=$(calc_sha256_checksum_file $TMPDIR/area0) ++ write_checksum $chks0 $TMPDIR/area0 ++ write_luks2_hdr0 $TMPDIR/area0 $TGT_IMG ++ kill_bin_hdr $TMPDIR/hdr1 ++ write_luks2_hdr1 $TMPDIR/hdr1 $TGT_IMG ++} ++ ++function check() ++{ ++ read_luks2_bin_hdr1 $TGT_IMG $TMPDIR/hdr_res1 ++ local str_res1=$(head -c 6 $TMPDIR/hdr_res1) ++ test "$str_res1" = "VACUUM" || exit 2 ++ ++ read_luks2_json0 $TGT_IMG $TMPDIR/json_res0 ++ jq -c 'if .segments | length < 2 ++ then error("Unexpected segments count") else empty end' $TMPDIR/json_res0 || exit 5 ++} ++ ++function cleanup() ++{ ++ rm -f $TMPDIR/* ++ rm -fd $TMPDIR ++} ++ ++test $# -eq 2 || exit 1 ++ ++TGT_IMG=$1/$(test_img_name $0) ++SRC_IMG=$2 ++ ++prepare ++generate ++check ++cleanup +diff --git a/tests/generators/generate-luks2-segment-wrong-backup-key-1.img.sh b/tests/generators/generate-luks2-segment-wrong-backup-key-1.img.sh +new file mode 100755 +index 0000000..702fe71 +--- /dev/null ++++ b/tests/generators/generate-luks2-segment-wrong-backup-key-1.img.sh +@@ -0,0 +1,67 @@ ++#!/bin/bash ++ ++. lib.sh ++ ++# ++# *** Description *** ++# ++# generate primary header with wrong backup segment id ++# ++# secondary header is corrupted on purpose as well ++# ++ ++# $1 full target dir ++# $2 full source luks2 image ++ ++function prepare() ++{ ++ cp $SRC_IMG $TGT_IMG ++ test -d $TMPDIR || mkdir $TMPDIR ++ read_luks2_json0 $TGT_IMG $TMPDIR/json0 ++ read_luks2_bin_hdr0 $TGT_IMG $TMPDIR/hdr0 ++ read_luks2_bin_hdr1 $TGT_IMG $TMPDIR/hdr1 ++} ++ ++function generate() ++{ ++ # create illegal backup segment key (used to be bug in 32bit implementations) ++ json_str=$(jq -c '(.segments."0".offset | tonumber) as $i | .segments[range(1;65) | tostring] = { "type" : "linear", "offset" : ($i + 512 | tostring), "size" : "512" } | .segments."268435472" = { "type":"linear","offset":"512","size":"512","flags":["backup-x"]}' $TMPDIR/json0) ++ test ${#json_str} -lt $((LUKS2_JSON_SIZE*512)) || exit 2 ++ ++ write_luks2_json "$json_str" $TMPDIR/json0 ++ ++ merge_bin_hdr_with_json $TMPDIR/hdr0 $TMPDIR/json0 $TMPDIR/area0 ++ erase_checksum $TMPDIR/area0 ++ chks0=$(calc_sha256_checksum_file $TMPDIR/area0) ++ write_checksum $chks0 $TMPDIR/area0 ++ write_luks2_hdr0 $TMPDIR/area0 $TGT_IMG ++ kill_bin_hdr $TMPDIR/hdr1 ++ write_luks2_hdr1 $TMPDIR/hdr1 $TGT_IMG ++} ++ ++function check() ++{ ++ read_luks2_bin_hdr1 $TGT_IMG $TMPDIR/hdr_res1 ++ local str_res1=$(head -c 6 $TMPDIR/hdr_res1) ++ test "$str_res1" = "VACUUM" || exit 2 ++ ++ read_luks2_json0 $TGT_IMG $TMPDIR/json_res0 ++ jq -c 'if .segments | length < 64 ++ then error("Unexpected segments count") else empty end' $TMPDIR/json_res0 || exit 5 ++} ++ ++function cleanup() ++{ ++ rm -f $TMPDIR/* ++ rm -fd $TMPDIR ++} ++ ++test $# -eq 2 || exit 1 ++ ++TGT_IMG=$1/$(test_img_name $0) ++SRC_IMG=$2 ++ ++prepare ++generate ++check ++cleanup +diff --git a/tests/luks2-validation-test b/tests/luks2-validation-test +index 52945ba..04183fb 100755 +--- a/tests/luks2-validation-test ++++ b/tests/luks2-validation-test +@@ -199,6 +199,8 @@ RUN luks2-segment-unknown-type.img "R" "Validation rejected segment with all m + RUN luks2-segment-two.img "R" "Validation rejected two valid segments" + RUN luks2-segment-wrong-flags.img "F" "Failed to detect invalid flags field" + RUN luks2-segment-wrong-flags-element.img "F" "Failed to detect invalid flags content" ++RUN luks2-segment-wrong-backup-key-0.img "F" "Failed to detect gap in backup segments" ++RUN luks2-segment-wrong-backup-key-1.img "F" "Failed to detect gap in backup segments" + + echo "[6] Test metadata size and keyslots size (config section)" + RUN luks2-invalid-keyslots-size-c0.img "F" "Failed to detect too large keyslots_size in config section" +-- +1.8.3.1 + diff --git a/SPECS/cryptsetup.spec b/SPECS/cryptsetup.spec index e78d0f7..d1b86dc 100644 --- a/SPECS/cryptsetup.spec +++ b/SPECS/cryptsetup.spec @@ -4,8 +4,8 @@ Obsoletes: cryptsetup-python3 Summary: A utility for setting up encrypted disks Name: cryptsetup -Version: 2.2.2 -Release: 1%{?dist} +Version: 2.3.3 +Release: 2%{?dist} License: GPLv2+ and LGPLv2+ Group: Applications/System URL: https://gitlab.com/cryptsetup/cryptsetup @@ -23,6 +23,7 @@ Source0: https://www.kernel.org/pub/linux/utils/cryptsetup/v2.0/cryptsetup-%{ups Patch0: %{name}-add-system-library-paths.patch # Remove the patch when (if ever) osci infrastructure gets stable enough Patch1: %{name}-disable-verity-compat-test.patch +Patch2: %{name}-2.3.4-luks2-validation-32bit-fix.patch %description The cryptsetup package contains a utility for setting up @@ -79,6 +80,7 @@ can be used for offline reencryption of disk in situ. %prep %setup -q -n cryptsetup-%{upstream_version} %patch1 -p1 +%patch2 -p1 %patch0 -p1 chmod -x misc/dracut_90reencrypt/* @@ -138,6 +140,19 @@ rm -rf %{buildroot}/%{_libdir}/*.la %clean %changelog +* Thu Aug 27 2020 Ondrej Kozina - 2.3.3-2 +- patch: Fix possible memory corruption in LUKS2 validation + code in 32bit library. +- Resolves: #1872294 + +* Thu May 28 2020 Ondrej Kozina - 2.3.3-1 +- Update to cryptsetup 2.3.3 +- Resolves: #1796826 #1743891 #1785748 + +* Fri Apr 03 2020 Ondrej Kozina - 2.3.1-1 +- Update to cryptsetup 2.3.1 +- Resolves: #1796826 #1743891 #1785748 + * Mon Nov 18 2019 Ondrej Kozina - 2.2.2-1 - Update to cryptsetup 2.2.2 - LUKS2 reencryption honors activation flags (one time and persistent). @@ -145,7 +160,6 @@ rm -rf %{buildroot}/%{_libdir}/*.la keyring service. - Resolves: #1757783 #1750680 #1753597 #1743399 -- Resolves: #1742815 #1746532 * Fri Aug 30 2019 Ondrej Kozina - 2.2.0-2 - patch: Fix mapped segments overflow on 32bit architectures. - patch: Take optimal io size in account with LUKS2 reencryption.