|
|
c5d8c8 |
From 0e453ede1f248a004965d0d368e2c4beb83f2ce1 Mon Sep 17 00:00:00 2001
|
|
|
c5d8c8 |
From: Vinayakswami Hariharmath <vharihar@redhat.com>
|
|
|
c5d8c8 |
Date: Mon, 25 Jan 2021 17:32:14 +0530
|
|
|
c5d8c8 |
Subject: [PATCH 573/584] features/shard: unlink fails due to nospace to mknod
|
|
|
c5d8c8 |
marker file
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
When we hit the max capacity of the storage space, shard_unlink()
|
|
|
c5d8c8 |
starts failing if there is no space left on the brick to create a
|
|
|
c5d8c8 |
marker file.
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
shard_unlink() happens in below steps:
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
1. create a marker file in the name of gfid of the base file under
|
|
|
c5d8c8 |
BRICK_PATH/.shard/.remove_me
|
|
|
c5d8c8 |
2. unlink the base file
|
|
|
c5d8c8 |
3. shard_delete_shards() deletes the shards in background by
|
|
|
c5d8c8 |
picking the entries in BRICK_PATH/.shard/.remove_me
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
If a marker file creation fails then we can't really delete the
|
|
|
c5d8c8 |
shards which eventually a problem for user who is looking to make
|
|
|
c5d8c8 |
space by deleting unwanted data.
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
Solution:
|
|
|
c5d8c8 |
Create the marker file by marking xdata = GLUSTERFS_INTERNAL_FOP_KEY
|
|
|
c5d8c8 |
which is considered to be internal op and allowed to create under
|
|
|
c5d8c8 |
reserved space.
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
Backport of:
|
|
|
c5d8c8 |
> Upstream-patch: https://github.com/gluster/glusterfs/pull/2057
|
|
|
c5d8c8 |
> Fixes: #2038
|
|
|
c5d8c8 |
> Change-Id: I7facebab940f9aeee81d489df429e00ef4fb7c5d
|
|
|
c5d8c8 |
> Signed-off-by: Vinayakswami Hariharmath <vharihar@redhat.com>
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
BUG: 1891403
|
|
|
c5d8c8 |
Change-Id: I7facebab940f9aeee81d489df429e00ef4fb7c5d
|
|
|
c5d8c8 |
Signed-off-by: Vinayakswami Hariharmath <vharihar@redhat.com>
|
|
|
c5d8c8 |
Reviewed-on: https://code.engineering.redhat.com/gerrit/c/rhs-glusterfs/+/244966
|
|
|
c5d8c8 |
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
|
c5d8c8 |
Reviewed-by: Sunil Kumar Heggodu Gopala Acharya <sheggodu@redhat.com>
|
|
|
c5d8c8 |
---
|
|
|
c5d8c8 |
tests/bugs/shard/issue-2038.t | 56 ++++++++++++++++++++++++++++++++++++++
|
|
|
c5d8c8 |
xlators/features/shard/src/shard.c | 20 ++++++++++++++
|
|
|
c5d8c8 |
2 files changed, 76 insertions(+)
|
|
|
c5d8c8 |
create mode 100644 tests/bugs/shard/issue-2038.t
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
diff --git a/tests/bugs/shard/issue-2038.t b/tests/bugs/shard/issue-2038.t
|
|
|
c5d8c8 |
new file mode 100644
|
|
|
c5d8c8 |
index 0000000..fc3e7f9
|
|
|
c5d8c8 |
--- /dev/null
|
|
|
c5d8c8 |
+++ b/tests/bugs/shard/issue-2038.t
|
|
|
c5d8c8 |
@@ -0,0 +1,56 @@
|
|
|
c5d8c8 |
+#!/bin/bash
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+. $(dirname $0)/../../include.rc
|
|
|
c5d8c8 |
+. $(dirname $0)/../../volume.rc
|
|
|
c5d8c8 |
+. $(dirname $0)/../../snapshot.rc
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+cleanup
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+FILE_COUNT_TIME=5
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+function get_file_count {
|
|
|
c5d8c8 |
+ ls $1* | wc -l
|
|
|
c5d8c8 |
+}
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+TEST verify_lvm_version
|
|
|
c5d8c8 |
+TEST glusterd
|
|
|
c5d8c8 |
+TEST pidof glusterd
|
|
|
c5d8c8 |
+TEST init_n_bricks 1
|
|
|
c5d8c8 |
+TEST setup_lvm 1
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+TEST $CLI volume create $V0 $H0:$L1
|
|
|
c5d8c8 |
+TEST $CLI volume start $V0
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+$CLI volume info
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+TEST $CLI volume set $V0 features.shard on
|
|
|
c5d8c8 |
+TEST glusterfs --volfile-id=/$V0 --volfile-server=$H0 $M0
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+#Setting the size in percentage
|
|
|
c5d8c8 |
+TEST $CLI volume set $V0 storage.reserve 40
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+#wait 5s to reset disk_space_full flag
|
|
|
c5d8c8 |
+sleep 5
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+TEST touch $M0/test
|
|
|
c5d8c8 |
+TEST unlink $M0/test
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+TEST dd if=/dev/zero of=$M0/a bs=80M count=1
|
|
|
c5d8c8 |
+TEST dd if=/dev/zero of=$M0/b bs=10M count=1
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+gfid_new=$(get_gfid_string $M0/a)
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+# Wait 5s to update disk_space_full flag because thread check disk space
|
|
|
c5d8c8 |
+# after every 5s
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+sleep 5
|
|
|
c5d8c8 |
+# setup_lvm create lvm partition of 150M and 40M are reserve so after
|
|
|
c5d8c8 |
+# consuming more than 110M next unlink should not fail
|
|
|
c5d8c8 |
+# Delete the base shard and check shards get cleaned up
|
|
|
c5d8c8 |
+TEST unlink $M0/a
|
|
|
c5d8c8 |
+TEST ! stat $M0/a
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+TEST $CLI volume stop $V0
|
|
|
c5d8c8 |
+TEST $CLI volume delete $V0
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
+cleanup
|
|
|
c5d8c8 |
diff --git a/xlators/features/shard/src/shard.c b/xlators/features/shard/src/shard.c
|
|
|
c5d8c8 |
index d1d7d7a..8d4a970 100644
|
|
|
c5d8c8 |
--- a/xlators/features/shard/src/shard.c
|
|
|
c5d8c8 |
+++ b/xlators/features/shard/src/shard.c
|
|
|
c5d8c8 |
@@ -4078,6 +4078,16 @@ shard_create_marker_file_under_remove_me(call_frame_t *frame, xlator_t *this,
|
|
|
c5d8c8 |
SHARD_INODE_CREATE_INIT(this, bs, xattr_req, &local->newloc,
|
|
|
c5d8c8 |
local->prebuf.ia_size, 0, err);
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
+ /* Mark this as an internal operation, so that in case of disk full,
|
|
|
c5d8c8 |
+ * the marker file will be created as part of reserve space */
|
|
|
c5d8c8 |
+ ret = dict_set_int32_sizen(xattr_req, GLUSTERFS_INTERNAL_FOP_KEY, 1);
|
|
|
c5d8c8 |
+ if (ret < 0) {
|
|
|
c5d8c8 |
+ gf_msg(this->name, GF_LOG_WARNING, 0, SHARD_MSG_DICT_OP_FAILED,
|
|
|
c5d8c8 |
+ "Failed to set key: %s on path %s", GLUSTERFS_INTERNAL_FOP_KEY,
|
|
|
c5d8c8 |
+ local->newloc.path);
|
|
|
c5d8c8 |
+ goto err;
|
|
|
c5d8c8 |
+ }
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
STACK_WIND(frame, shard_create_marker_file_under_remove_me_cbk,
|
|
|
c5d8c8 |
FIRST_CHILD(this), FIRST_CHILD(this)->fops->mknod,
|
|
|
c5d8c8 |
&local->newloc, 0, 0, 0644, xattr_req);
|
|
|
c5d8c8 |
@@ -5843,6 +5853,16 @@ shard_mkdir_internal_dir(call_frame_t *frame, xlator_t *this,
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
SHARD_SET_ROOT_FS_ID(frame, local);
|
|
|
c5d8c8 |
|
|
|
c5d8c8 |
+ /* Mark this as an internal operation, so that in case of disk full
|
|
|
c5d8c8 |
+ * the internal dir will be created as part of reserve space */
|
|
|
c5d8c8 |
+ ret = dict_set_int32_sizen(xattr_req, GLUSTERFS_INTERNAL_FOP_KEY, 1);
|
|
|
c5d8c8 |
+ if (ret < 0) {
|
|
|
c5d8c8 |
+ gf_msg(this->name, GF_LOG_WARNING, 0, SHARD_MSG_DICT_OP_FAILED,
|
|
|
c5d8c8 |
+ "Failed to set key: %s on path %s", GLUSTERFS_INTERNAL_FOP_KEY,
|
|
|
c5d8c8 |
+ loc->path);
|
|
|
c5d8c8 |
+ goto err;
|
|
|
c5d8c8 |
+ }
|
|
|
c5d8c8 |
+
|
|
|
c5d8c8 |
STACK_WIND_COOKIE(frame, shard_mkdir_internal_dir_cbk, (void *)(long)type,
|
|
|
c5d8c8 |
FIRST_CHILD(this), FIRST_CHILD(this)->fops->mkdir, loc,
|
|
|
c5d8c8 |
0755, 0, xattr_req);
|
|
|
c5d8c8 |
--
|
|
|
c5d8c8 |
1.8.3.1
|
|
|
c5d8c8 |
|