Blob Blame History Raw
From 52f5cb8cedf22fb3e14c744814ec8af7614146c7 Mon Sep 17 00:00:00 2001
From: Tobias Stoeckmann <tobias@stoeckmann.org>
Date: Mon, 24 Aug 2020 19:21:43 +0200
Subject: [PATCH 01/11] Check segment gaps regardless of heap space.

Segments are validated in hdr_validate_segments. Gaps in segment keys
are detected when collecting offsets. But if an invalid segment is very
large, larger than count, it could happen that cryptsetup is unable to
allocate enough memory, not giving a clue about what actually is the
problem.

Therefore check for gaps even if not enough memory is available. This
gives much more information with debug output enabled.

Obviously cryptsetup still fails if segments are perfectly fine but not
enough RAM available. But at that stage, the user knows that it's the
fault of the system, not of an invalid segment.
---
 lib/luks2/luks2_json_metadata.c | 19 ++++++++++++-------
 1 file changed, 12 insertions(+), 7 deletions(-)

diff --git a/lib/luks2/luks2_json_metadata.c b/lib/luks2/luks2_json_metadata.c
index 23556f2..ea5c7f7 100644
--- a/lib/luks2/luks2_json_metadata.c
+++ b/lib/luks2/luks2_json_metadata.c
@@ -679,11 +679,10 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
 	if (first_backup < 0)
 		first_backup = count;
 
-	intervals = malloc(first_backup * sizeof(*intervals));
-	if (!intervals) {
-		log_dbg(cd, "Not enough memory.");
-		return 1;
-	}
+	if (first_backup <= count && (size_t)first_backup < SIZE_MAX / sizeof(*intervals))
+		intervals = malloc(first_backup * sizeof(*intervals));
+	else
+		intervals = NULL;
 
 	for (i = 0; i < first_backup; i++) {
 		jobj = json_segments_get_segment(jobj_segments, i);
@@ -692,8 +691,14 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
 			free(intervals);
 			return 1;
 		}
-		intervals[i].offset = json_segment_get_offset(jobj, 0);
-		intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX;
+		if (intervals != NULL) {
+			intervals[i].offset = json_segment_get_offset(jobj, 0);
+			intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX;
+		}
+	}
+	if (intervals == NULL) {
+		log_dbg(cd, "Not enough memory.");
+		return 1;
 	}
 
 	r = !validate_segment_intervals(cd, first_backup, intervals);
-- 
1.8.3.1

From 46ee71edcd13e1dad50815ad65c28779aa6f7503 Mon Sep 17 00:00:00 2001
From: Ondrej Kozina <okozina@redhat.com>
Date: Tue, 25 Aug 2020 19:32:48 +0200
Subject: [PATCH 07/11] Avoid needlessly large allocations in LUKS2 validation
 code.

In case LUKS2 backup segment creates gap in between last regular
segment and backup segment report invalid metadata imediately. We stop
on first error so there's no need to allocate large memory on heap
(we may ran with mlock(MCL_FUTURE) set).

Example:
- total segments count is 3
- regular segments have keys "0" and "1"
- first backup segment has key "42"
---
 lib/luks2/luks2_json_metadata.c | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

diff --git a/lib/luks2/luks2_json_metadata.c b/lib/luks2/luks2_json_metadata.c
index ea5c7f7..5b73b34 100644
--- a/lib/luks2/luks2_json_metadata.c
+++ b/lib/luks2/luks2_json_metadata.c
@@ -676,10 +676,16 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
 		return 1;
 	}
 
+	/* avoid needlessly large allocation when first backup segment is invalid */
+	if (first_backup >= count) {
+		log_dbg(cd, "Gap between last regular segment and backup segment at key %d.", first_backup);
+		return 1;
+	}
+
 	if (first_backup < 0)
 		first_backup = count;
 
-	if (first_backup <= count && (size_t)first_backup < SIZE_MAX / sizeof(*intervals))
+	if ((size_t)first_backup < SIZE_MAX / sizeof(*intervals))
 		intervals = malloc(first_backup * sizeof(*intervals));
 	else
 		intervals = NULL;
-- 
1.8.3.1

From 752c9a52798f11d3b765b673ebaa3058eb25316e Mon Sep 17 00:00:00 2001
From: Ondrej Kozina <okozina@redhat.com>
Date: Tue, 25 Aug 2020 19:23:21 +0200
Subject: [PATCH 08/11] Simplify validation code a bit.

Keep it simple. If there's not enough memory we can't validate
segments. The LUKS2 specification does not recommend to continue
processing LUKS2 metadata if it can not be properly validated.
---
 lib/luks2/luks2_json_metadata.c | 19 ++++++++-----------
 1 file changed, 8 insertions(+), 11 deletions(-)

diff --git a/lib/luks2/luks2_json_metadata.c b/lib/luks2/luks2_json_metadata.c
index 5b73b34..df89401 100644
--- a/lib/luks2/luks2_json_metadata.c
+++ b/lib/luks2/luks2_json_metadata.c
@@ -594,9 +594,9 @@ static bool validate_segment_intervals(struct crypt_device *cd,
 static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
 {
 	json_object *jobj_segments, *jobj_digests, *jobj_offset, *jobj_size, *jobj_type, *jobj_flags, *jobj;
-	struct interval *intervals;
 	uint64_t offset, size;
 	int i, r, count, first_backup = -1;
+	struct interval *intervals = NULL;
 
 	if (!json_object_object_get_ex(hdr_jobj, "segments", &jobj_segments)) {
 		log_dbg(cd, "Missing segments section.");
@@ -687,8 +687,11 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
 
 	if ((size_t)first_backup < SIZE_MAX / sizeof(*intervals))
 		intervals = malloc(first_backup * sizeof(*intervals));
-	else
-		intervals = NULL;
+
+	if (!intervals) {
+		log_dbg(cd, "Not enough memory.");
+		return 1;
+	}
 
 	for (i = 0; i < first_backup; i++) {
 		jobj = json_segments_get_segment(jobj_segments, i);
@@ -697,14 +700,8 @@ static int hdr_validate_segments(struct crypt_device *cd, json_object *hdr_jobj)
 			free(intervals);
 			return 1;
 		}
-		if (intervals != NULL) {
-			intervals[i].offset = json_segment_get_offset(jobj, 0);
-			intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX;
-		}
-	}
-	if (intervals == NULL) {
-		log_dbg(cd, "Not enough memory.");
-		return 1;
+		intervals[i].offset = json_segment_get_offset(jobj, 0);
+		intervals[i].length = json_segment_get_size(jobj, 0) ?: UINT64_MAX;
 	}
 
 	r = !validate_segment_intervals(cd, first_backup, intervals);
-- 
1.8.3.1

From 96da06430b6f5c2588c619082f2c5880df9cc776 Mon Sep 17 00:00:00 2001
From: Ondrej Kozina <okozina@redhat.com>
Date: Wed, 26 Aug 2020 17:24:05 +0200
Subject: [PATCH 09/11] Add test for LUKS2 segments validation code fix.

---
 ...enerate-luks2-segment-wrong-backup-key-0.img.sh | 67 ++++++++++++++++++++++
 ...enerate-luks2-segment-wrong-backup-key-1.img.sh | 67 ++++++++++++++++++++++
 tests/luks2-validation-test                        |  2 +
 3 files changed, 136 insertions(+)
 create mode 100755 tests/generators/generate-luks2-segment-wrong-backup-key-0.img.sh
 create mode 100755 tests/generators/generate-luks2-segment-wrong-backup-key-1.img.sh

diff --git a/tests/generators/generate-luks2-segment-wrong-backup-key-0.img.sh b/tests/generators/generate-luks2-segment-wrong-backup-key-0.img.sh
new file mode 100755
index 0000000..2499a5e
--- /dev/null
+++ b/tests/generators/generate-luks2-segment-wrong-backup-key-0.img.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+. lib.sh
+
+#
+# *** Description ***
+#
+# generate primary header with wrong backup segment id
+#
+# secondary header is corrupted on purpose as well
+#
+
+# $1 full target dir
+# $2 full source luks2 image
+
+function prepare()
+{
+	cp $SRC_IMG $TGT_IMG
+	test -d $TMPDIR || mkdir $TMPDIR
+	read_luks2_json0 $TGT_IMG $TMPDIR/json0
+	read_luks2_bin_hdr0 $TGT_IMG $TMPDIR/hdr0
+	read_luks2_bin_hdr1 $TGT_IMG $TMPDIR/hdr1
+}
+
+function generate()
+{
+	# create illegal backup segment key (used to be bug in 32bit implementations)
+	json_str=$(jq -c '.segments[(.segments | length + 1 | tostring)] = { "type" : "linear", "offset" : "512", "size" : "512", "flags":["backup-x"]}' $TMPDIR/json0)
+	test ${#json_str} -lt $((LUKS2_JSON_SIZE*512)) || exit 2
+
+	write_luks2_json "$json_str" $TMPDIR/json0
+
+	merge_bin_hdr_with_json $TMPDIR/hdr0 $TMPDIR/json0 $TMPDIR/area0
+	erase_checksum $TMPDIR/area0
+	chks0=$(calc_sha256_checksum_file $TMPDIR/area0)
+	write_checksum $chks0 $TMPDIR/area0
+	write_luks2_hdr0 $TMPDIR/area0 $TGT_IMG
+	kill_bin_hdr $TMPDIR/hdr1
+	write_luks2_hdr1 $TMPDIR/hdr1 $TGT_IMG
+}
+
+function check()
+{
+	read_luks2_bin_hdr1 $TGT_IMG $TMPDIR/hdr_res1
+	local str_res1=$(head -c 6 $TMPDIR/hdr_res1)
+	test "$str_res1" = "VACUUM" || exit 2
+
+	read_luks2_json0 $TGT_IMG $TMPDIR/json_res0
+	jq -c 'if .segments | length < 2
+	       then error("Unexpected segments count") else empty end' $TMPDIR/json_res0 || exit 5
+}
+
+function cleanup()
+{
+	rm -f $TMPDIR/*
+	rm -fd $TMPDIR
+}
+
+test $# -eq 2 || exit 1
+
+TGT_IMG=$1/$(test_img_name $0)
+SRC_IMG=$2
+
+prepare
+generate
+check
+cleanup
diff --git a/tests/generators/generate-luks2-segment-wrong-backup-key-1.img.sh b/tests/generators/generate-luks2-segment-wrong-backup-key-1.img.sh
new file mode 100755
index 0000000..702fe71
--- /dev/null
+++ b/tests/generators/generate-luks2-segment-wrong-backup-key-1.img.sh
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+. lib.sh
+
+#
+# *** Description ***
+#
+# generate primary header with wrong backup segment id
+#
+# secondary header is corrupted on purpose as well
+#
+
+# $1 full target dir
+# $2 full source luks2 image
+
+function prepare()
+{
+	cp $SRC_IMG $TGT_IMG
+	test -d $TMPDIR || mkdir $TMPDIR
+	read_luks2_json0 $TGT_IMG $TMPDIR/json0
+	read_luks2_bin_hdr0 $TGT_IMG $TMPDIR/hdr0
+	read_luks2_bin_hdr1 $TGT_IMG $TMPDIR/hdr1
+}
+
+function generate()
+{
+	# create illegal backup segment key (used to be bug in 32bit implementations)
+	json_str=$(jq -c '(.segments."0".offset | tonumber) as $i | .segments[range(1;65) | tostring] = { "type" : "linear", "offset" : ($i + 512 | tostring), "size" : "512" } | .segments."268435472" = { "type":"linear","offset":"512","size":"512","flags":["backup-x"]}' $TMPDIR/json0)
+	test ${#json_str} -lt $((LUKS2_JSON_SIZE*512)) || exit 2
+
+	write_luks2_json "$json_str" $TMPDIR/json0
+
+	merge_bin_hdr_with_json $TMPDIR/hdr0 $TMPDIR/json0 $TMPDIR/area0
+	erase_checksum $TMPDIR/area0
+	chks0=$(calc_sha256_checksum_file $TMPDIR/area0)
+	write_checksum $chks0 $TMPDIR/area0
+	write_luks2_hdr0 $TMPDIR/area0 $TGT_IMG
+	kill_bin_hdr $TMPDIR/hdr1
+	write_luks2_hdr1 $TMPDIR/hdr1 $TGT_IMG
+}
+
+function check()
+{
+	read_luks2_bin_hdr1 $TGT_IMG $TMPDIR/hdr_res1
+	local str_res1=$(head -c 6 $TMPDIR/hdr_res1)
+	test "$str_res1" = "VACUUM" || exit 2
+
+	read_luks2_json0 $TGT_IMG $TMPDIR/json_res0
+	jq -c 'if .segments | length < 64
+	       then error("Unexpected segments count") else empty end' $TMPDIR/json_res0 || exit 5
+}
+
+function cleanup()
+{
+	rm -f $TMPDIR/*
+	rm -fd $TMPDIR
+}
+
+test $# -eq 2 || exit 1
+
+TGT_IMG=$1/$(test_img_name $0)
+SRC_IMG=$2
+
+prepare
+generate
+check
+cleanup
diff --git a/tests/luks2-validation-test b/tests/luks2-validation-test
index 52945ba..04183fb 100755
--- a/tests/luks2-validation-test
+++ b/tests/luks2-validation-test
@@ -199,6 +199,8 @@ RUN luks2-segment-unknown-type.img			"R" "Validation rejected segment with all m
 RUN luks2-segment-two.img				"R" "Validation rejected two valid segments"
 RUN luks2-segment-wrong-flags.img			"F" "Failed to detect invalid flags field"
 RUN luks2-segment-wrong-flags-element.img		"F" "Failed to detect invalid flags content"
+RUN luks2-segment-wrong-backup-key-0.img		"F" "Failed to detect gap in backup segments"
+RUN luks2-segment-wrong-backup-key-1.img		"F" "Failed to detect gap in backup segments"
 
 echo "[6] Test metadata size and keyslots size (config section)"
 RUN luks2-invalid-keyslots-size-c0.img			"F" "Failed to detect too large keyslots_size in config section"
-- 
1.8.3.1