From c0d88596bda4eb5c7e942e621a7d38c7ae6d737a Mon Sep 17 00:00:00 2001
From: Sanju Rakonde <srakonde@redhat.com>
Date: Wed, 28 Nov 2018 16:13:58 +0530
Subject: [PATCH 477/493] glusterd: perform rcu_read_lock/unlock() under
cleanup_lock mutex
Problem: glusterd should not try to acquire locks on any resources,
when it already received a SIGTERM and cleanup is started. Otherwise
we might hit segfault, since the thread which is going through
cleanup path will be freeing up the resouces and some other thread
might be trying to acquire locks on freed resources.
Solution: perform rcu_read_lock/unlock() under cleanup_lock mutex.
> fixes: bz#1654270
> Change-Id: I87a97cfe4f272f74f246d688660934638911ce54
> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
upstream patch: https://review.gluster.org/#/c/glusterfs/+/21743/
Change-Id: I87a97cfe4f272f74f246d688660934638911ce54
BUG: 1654161
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/158647
Tested-by: RHGS Build Bot <nigelb@redhat.com>
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
---
xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 8 +--
xlators/mgmt/glusterd/src/glusterd-handler.c | 75 +++++++++++-----------
xlators/mgmt/glusterd/src/glusterd-handshake.c | 32 ++++-----
xlators/mgmt/glusterd/src/glusterd-mgmt.c | 28 ++++----
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 30 ++++-----
xlators/mgmt/glusterd/src/glusterd-peer-utils.c | 40 ++++++------
xlators/mgmt/glusterd/src/glusterd-replace-brick.c | 4 +-
xlators/mgmt/glusterd/src/glusterd-reset-brick.c | 4 +-
xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 48 +++++++-------
xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 4 +-
xlators/mgmt/glusterd/src/glusterd-sm.c | 64 +++++++++---------
xlators/mgmt/glusterd/src/glusterd-snapshot.c | 6 +-
xlators/mgmt/glusterd/src/glusterd-store.c | 4 +-
xlators/mgmt/glusterd/src/glusterd-syncop.c | 40 ++++++------
xlators/mgmt/glusterd/src/glusterd-utils.c | 8 +--
xlators/mgmt/glusterd/src/glusterd.h | 20 +++++-
16 files changed, 215 insertions(+), 200 deletions(-)
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
index 416412e..5ad8ab8 100644
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
@@ -2097,7 +2097,7 @@ check:
continue;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_uuid
(brickinfo->uuid);
if (!peerinfo) {
@@ -2105,7 +2105,7 @@ check:
"brick %s is not in cluster", brick);
*errstr = gf_strdup (msg);
ret = -1;
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
if (!peerinfo->connected) {
@@ -2113,10 +2113,10 @@ check:
"brick %s is down", brick);
*errstr = gf_strdup (msg);
ret = -1;
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
}
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
index c71bf3c..d40de89 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
@@ -105,7 +105,7 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
ret = glusterd_remote_hostname_get (req, rhost, sizeof (rhost));
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (uuid, rhost);
@@ -179,7 +179,7 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
ret = GLUSTERD_CONNECTION_AWAITED;
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
if (ctx && ctx->hostname)
@@ -198,7 +198,6 @@ out:
GF_FREE (event);
}
-
return ret;
}
@@ -214,7 +213,7 @@ glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
if (!port)
port = GF_DEFAULT_BASE_PORT;
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (uuid, hostname);
@@ -269,7 +268,7 @@ glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
ret = 0;
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 != ret) {
if (ctx && ctx->hostname)
@@ -902,9 +901,9 @@ __glusterd_handle_cluster_lock (rpcsvc_request_t *req)
gf_msg_debug (this->name, 0, "Received LOCK from uuid: %s",
uuid_utoa (lock_req.uuid));
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_WARNING, 0,
GD_MSG_PEER_NOT_FOUND, "%s doesn't "
@@ -1060,9 +1059,9 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
gf_msg_debug (this->name, 0, "transaction ID = %s",
uuid_utoa (*txn_id));
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_WARNING, 0,
GD_MSG_PEER_NOT_FOUND, "%s doesn't "
@@ -1144,9 +1143,9 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_WARNING, 0,
GD_MSG_PEER_NOT_FOUND, "%s doesn't "
@@ -1270,12 +1269,12 @@ __glusterd_handle_cli_probe (rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_hostname (hostname);
ret = (peerinfo && gd_peer_has_address (peerinfo, hostname));
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg_debug ("glusterd", 0, "Probe host %s port %d "
@@ -2329,7 +2328,7 @@ __glusterd_handle_fsm_log (rpcsvc_request_t *req)
conf = this->private;
ret = glusterd_sm_tr_log_add_to_dict (dict, &conf->op_sm_log);
} else {
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_hostname (cli_req.name);
if (!peerinfo) {
@@ -2341,7 +2340,7 @@ __glusterd_handle_fsm_log (rpcsvc_request_t *req)
(dict, &peerinfo->sm_log);
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
}
out:
@@ -2482,9 +2481,9 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
gf_msg_debug (this->name, 0,
"Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid));
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find_by_uuid (unlock_req.uuid) == NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_WARNING, 0,
GD_MSG_PEER_NOT_FOUND, "%s doesn't "
@@ -2786,11 +2785,11 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
}
ret = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
if (glusterd_peerinfo_find (friend_req.uuid, NULL) == NULL) {
ret = -1;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
GD_MSG_REQ_FROM_UNKNOWN_PEER,
@@ -2856,7 +2855,7 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
memset (key, 0, sizeof (key));
snprintf (key, sizeof (key), "friend%d", i);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (uuid, NULL);
if (peerinfo == NULL) {
/* Create a new peer and add it to the list as there is
@@ -2903,7 +2902,7 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
}
}
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret)
break;
@@ -3002,7 +3001,7 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
goto out;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (probe_req.uuid, remote_hostname);
if ((peerinfo == NULL) && (!cds_list_empty (&conf->peers))) {
rsp.op_ret = -1;
@@ -3024,7 +3023,7 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
rsp.op_errno = GF_PROBE_ADD_FAILED;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
respond:
gf_uuid_copy (rsp.uuid, MY_UUID);
@@ -3370,11 +3369,11 @@ glusterd_friend_remove (uuid_t uuid, char *hostname)
int ret = -1;
glusterd_peerinfo_t *peerinfo = NULL;
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (uuid, hostname);
if (peerinfo == NULL) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
@@ -3382,7 +3381,7 @@ glusterd_friend_remove (uuid_t uuid, char *hostname)
if (ret)
gf_msg (THIS->name, GF_LOG_WARNING, 0,
GD_MSG_VOL_CLEANUP_FAIL, "Volumes cleanup failed");
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
/* Giving up the critical section here as glusterd_peerinfo_cleanup must
* be called from outside a critical section
*/
@@ -3715,7 +3714,7 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT (hoststr);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (NULL, hoststr);
if (peerinfo == NULL) {
@@ -3763,7 +3762,7 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
}
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
gf_msg_debug ("glusterd", 0, "returning %d", ret);
return ret;
}
@@ -3780,7 +3779,7 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
GF_ASSERT (hoststr);
GF_ASSERT (req);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (uuid, hoststr);
if (peerinfo == NULL) {
@@ -3840,7 +3839,7 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
peerinfo->detaching = _gf_true;
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -4162,7 +4161,7 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
/* Reset ret to 0, needed to prevent failure incase no peers exist */
ret = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
if (!cds_list_empty (&priv->peers)) {
cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
count++;
@@ -4173,7 +4172,7 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
}
}
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret)
goto out;
@@ -5592,7 +5591,7 @@ glusterd_get_state (rpcsvc_request_t *req, dict_t *dict)
if (priv->opts)
dict_foreach (priv->opts, glusterd_print_global_options, fp);
- rcu_read_lock ();
+ RCU_READ_LOCK;
fprintf (fp, "\n[Peers]\n");
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
@@ -5621,7 +5620,7 @@ glusterd_get_state (rpcsvc_request_t *req, dict_t *dict)
count_bkp = 0;
fprintf (fp, "\n");
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
count = 0;
fprintf (fp, "\n[Volumes]\n");
@@ -6259,7 +6258,7 @@ glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx, int32_t op_errno)
GF_ASSERT (peerctx);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
if (!peerinfo) {
gf_msg_debug (THIS->name, 0, "Could not find peer %s(%s). "
@@ -6300,7 +6299,7 @@ glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx, int32_t op_errno)
}
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -6340,7 +6339,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
peerctx->peername);
return 0;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -6466,7 +6465,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
}
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
glusterd_friend_sm ();
glusterd_op_sm ();
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
index b2a9b20..d18a7a3 100644
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
@@ -1140,9 +1140,9 @@ gd_validate_mgmt_hndsk_req (rpcsvc_request_t *req, dict_t *dict)
*/
if (!ret) {
gf_uuid_parse (uuid_str, peer_uuid);
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find (peer_uuid, NULL) != NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret)
return _gf_true;
}
@@ -1158,7 +1158,7 @@ gd_validate_mgmt_hndsk_req (rpcsvc_request_t *req, dict_t *dict)
* is available in the peerinfo list but the uuid has changed of the
* node due to a reinstall, in that case the validation should fail!
*/
- rcu_read_lock ();
+ RCU_READ_LOCK;
if (!uuid_str) {
ret = (glusterd_peerinfo_find (NULL, hostname) == NULL);
} else {
@@ -1177,7 +1177,7 @@ gd_validate_mgmt_hndsk_req (rpcsvc_request_t *req, dict_t *dict)
ret = -1;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_HANDSHAKE_REQ_REJECTED, "Rejecting management "
@@ -1728,7 +1728,7 @@ glusterd_event_connected_inject (glusterd_peerctx_t *peerctx)
goto out;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -1754,7 +1754,7 @@ glusterd_event_connected_inject (glusterd_peerctx_t *peerctx)
GD_MSG_EVENT_INJECT_FAIL, "Unable to inject "
"EVENT_CONNECTED ret = %d", ret);
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
gf_msg_debug ("glusterd", 0, "returning %d", ret);
@@ -1824,7 +1824,7 @@ __glusterd_mgmt_hndsk_version_ack_cbk (struct rpc_req *req, struct iovec *iov,
frame = myframe;
peerctx = frame->local;
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
if (!peerinfo) {
gf_msg_debug (this->name, 0, "Could not find peer %s(%s)",
@@ -1887,7 +1887,7 @@ out:
if (ret != 0 && peerinfo)
rpc_transport_disconnect (peerinfo->rpc->conn.trans, _gf_false);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
frame->local = NULL;
STACK_DESTROY (frame->root);
@@ -1930,7 +1930,7 @@ __glusterd_mgmt_hndsk_version_cbk (struct rpc_req *req, struct iovec *iov,
frame = myframe;
peerctx = frame->local;
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2014,7 +2014,7 @@ out:
_gf_false);
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (rsp.hndsk.hndsk_val)
free (rsp.hndsk.hndsk_val);
@@ -2070,7 +2070,7 @@ glusterd_mgmt_handshake (xlator_t *this, glusterd_peerctx_t *peerctx)
GF_PROTOCOL_DICT_SERIALIZE (this, req_dict, (&req.hndsk.hndsk_val),
req.hndsk.hndsk_len, ret, out);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2086,7 +2086,7 @@ glusterd_mgmt_handshake (xlator_t *this, glusterd_peerctx_t *peerctx)
(xdrproc_t)xdr_gf_mgmt_hndsk_req);
ret = 0;
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
if (ret && frame)
STACK_DESTROY (frame->root);
@@ -2202,7 +2202,7 @@ __glusterd_peer_dump_version_cbk (struct rpc_req *req, struct iovec *iov,
frame = myframe;
peerctx = frame->local;
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2282,7 +2282,7 @@ out:
if (ret != 0 && peerinfo)
rpc_transport_disconnect (peerinfo->rpc->conn.trans, _gf_false);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
glusterd_friend_sm ();
glusterd_op_sm ();
@@ -2330,7 +2330,7 @@ glusterd_peer_dump_version (xlator_t *this, struct rpc_clnt *rpc,
if (!peerctx)
goto out;
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
if (!peerinfo) {
@@ -2347,7 +2347,7 @@ glusterd_peer_dump_version (xlator_t *this, struct rpc_clnt *rpc,
glusterd_peer_dump_version_cbk,
(xdrproc_t)xdr_gf_dump_req);
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
if (ret && frame)
STACK_DESTROY (frame->root);
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
index 751d6e4..d98c6bc 100644
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
@@ -52,14 +52,14 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
args->op_ret = op_ret;
args->op_errno = op_errno;
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (peerid, NULL);
if (peerinfo)
peer_str = gf_strdup (peerinfo->hostname);
else
peer_str = gf_strdup (uuid_utoa (uuid));
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
is_operrstr_blk = (op_errstr && strcmp (op_errstr, ""));
err_string = (is_operrstr_blk) ? op_errstr : err_str;
@@ -761,7 +761,7 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -779,7 +779,7 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,
MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1126,7 +1126,7 @@ glusterd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *req_dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -1144,7 +1144,7 @@ glusterd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *req_dict,
MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1401,7 +1401,7 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -1419,7 +1419,7 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1667,7 +1667,7 @@ glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -1702,7 +1702,7 @@ glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1912,7 +1912,7 @@ glusterd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -1930,7 +1930,7 @@ glusterd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *dict,
&args, MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -2094,7 +2094,7 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_op_t op, dict_t *dict,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -2112,7 +2112,7 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_op_t op, dict_t *dict,
MY_UUID, peer_uuid);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
index 9f76ab3..6414a4e 100644
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
@@ -1825,7 +1825,7 @@ glusterd_op_stage_sync_volume (dict_t *dict, char **op_errstr)
ret = 0;
}
} else {
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (NULL, hostname);
if (peerinfo == NULL) {
@@ -1841,7 +1841,7 @@ glusterd_op_stage_sync_volume (dict_t *dict, char **op_errstr)
ret = -1;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
}
out:
@@ -3964,7 +3964,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT (priv);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -3985,7 +3985,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = proc->fn (NULL, this, peerinfo);
if (ret) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
gf_msg (this->name, GF_LOG_WARNING, 0,
GD_MSG_LOCK_REQ_SEND_FAIL,
"Failed to send lock request "
@@ -4009,7 +4009,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
ret = dict_set_static_ptr (dict, "peerinfo",
peerinfo);
if (ret) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
@@ -4019,7 +4019,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
ret = proc->fn (NULL, this, dict);
if (ret) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
gf_msg (this->name, GF_LOG_WARNING, 0,
GD_MSG_MGMTV3_LOCK_REQ_SEND_FAIL,
"Failed to send mgmt_v3 lock "
@@ -4036,7 +4036,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
}
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
@@ -4074,7 +4074,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
priv = this->private;
GF_ASSERT (priv);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -4152,7 +4152,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
}
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
@@ -4762,7 +4762,7 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -4781,7 +4781,7 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
if (ret) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED, "failed to "
"set peerinfo");
@@ -4800,7 +4800,7 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
pending_count++;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
out:
@@ -5413,7 +5413,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
goto out;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -5432,7 +5432,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
if (proc->fn) {
ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
if (ret) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_DICT_SET_FAILED,
"failed to set peerinfo");
@@ -5451,7 +5451,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
pending_count++;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
opinfo.pending_count = pending_count;
gf_msg_debug (this->name, 0, "Sent commit op req for 'Volume %s' "
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
index 592aa16..6ed5831 100644
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
@@ -190,7 +190,7 @@ glusterd_peerinfo_find_by_uuid (uuid_t uuid)
if (gf_uuid_is_null (uuid))
return NULL;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
if (!gf_uuid_compare (entry->uuid, uuid)) {
@@ -201,7 +201,7 @@ glusterd_peerinfo_find_by_uuid (uuid_t uuid)
break;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (!found)
gf_msg_debug (this->name, 0,
@@ -330,7 +330,7 @@ glusterd_chk_peers_connected_befriended (uuid_t skip_uuid)
priv= THIS->private;
GF_ASSERT (priv);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
if (!gf_uuid_is_null (skip_uuid) && !gf_uuid_compare (skip_uuid,
@@ -343,7 +343,7 @@ glusterd_chk_peers_connected_befriended (uuid_t skip_uuid)
break;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
gf_msg_debug (THIS->name, 0, "Returning %s",
(ret?"TRUE":"FALSE"));
@@ -366,7 +366,7 @@ glusterd_uuid_to_hostname (uuid_t uuid)
if (!gf_uuid_compare (MY_UUID, uuid)) {
hostname = gf_strdup ("localhost");
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
if (!cds_list_empty (&priv->peers)) {
cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
if (!gf_uuid_compare (entry->uuid, uuid)) {
@@ -375,7 +375,7 @@ glusterd_uuid_to_hostname (uuid_t uuid)
}
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
return hostname;
}
@@ -406,14 +406,14 @@ glusterd_are_all_peers_up ()
conf = this->private;
GF_VALIDATE_OR_GOTO (this->name, conf, out);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
if (!peerinfo->connected) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
peers_up = _gf_true;
@@ -434,7 +434,7 @@ glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
if (!gf_uuid_compare (brickinfo->uuid, MY_UUID))
continue;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, peers, uuid_list) {
if (gf_uuid_compare (peerinfo->uuid, brickinfo->uuid))
continue;
@@ -447,11 +447,11 @@ glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
*down_peerstr = gf_strdup (peerinfo->hostname);
gf_msg_debug (THIS->name, 0, "Peer %s is down. ",
peerinfo->hostname);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
}
ret = _gf_true;
@@ -664,7 +664,7 @@ gd_peerinfo_find_from_hostname (const char *hoststr)
GF_VALIDATE_OR_GOTO (this->name, (hoststr != NULL), out);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peer, &priv->peers, uuid_list) {
cds_list_for_each_entry_rcu (tmphost, &peer->hostnames,
hostname_list) {
@@ -679,7 +679,7 @@ gd_peerinfo_find_from_hostname (const char *hoststr)
}
}
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
return found;
}
@@ -713,7 +713,7 @@ gd_peerinfo_find_from_addrinfo (const struct addrinfo *addr)
GF_VALIDATE_OR_GOTO (this->name, (addr != NULL), out);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peer, &conf->peers, uuid_list) {
cds_list_for_each_entry_rcu (address, &peer->hostnames,
hostname_list) {
@@ -747,7 +747,7 @@ gd_peerinfo_find_from_addrinfo (const struct addrinfo *addr)
}
}
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
return found;
}
@@ -1014,7 +1014,7 @@ glusterd_peerinfo_find_by_generation (uint32_t generation) {
GF_ASSERT (priv);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
if (entry->generation == generation) {
@@ -1025,7 +1025,7 @@ glusterd_peerinfo_find_by_generation (uint32_t generation) {
break;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (!found)
gf_msg_debug (this->name, 0,
@@ -1047,10 +1047,10 @@ glusterd_get_peers_count () {
conf = this->private;
GF_VALIDATE_OR_GOTO (this->name, conf, out);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peer, &conf->peers, uuid_list)
count++;
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
return count;
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
index 5fc3669..f9ad524 100644
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
@@ -278,7 +278,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
}
if (!gf_is_local_addr (host)) {
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (NULL, host);
if (peerinfo == NULL) {
@@ -300,7 +300,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
*op_errstr = gf_strdup (msg);
ret = -1;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
index c1de043..60c5716 100644
--- a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
+++ b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
@@ -165,7 +165,7 @@ glusterd_reset_brick_prevalidate (dict_t *dict, char **op_errstr,
if (ret)
goto out;
} else {
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (NULL, host);
if (peerinfo == NULL) {
@@ -190,7 +190,7 @@ glusterd_reset_brick_prevalidate (dict_t *dict, char **op_errstr,
*op_errstr = gf_strdup (msg);
ret = -1;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret)
goto out;
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
index 86e1256..c669240 100644
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
@@ -280,7 +280,7 @@ __glusterd_probe_cbk (struct rpc_req *req, struct iovec *iov,
goto out;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (rsp.uuid, rsp.hostname);
if (peerinfo == NULL) {
ret = -1;
@@ -422,7 +422,7 @@ cont:
GD_MSG_PROBE_REQ_RESP_RCVD, "Received resp to probe req");
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
free (rsp.hostname);//malloced by xdr
@@ -485,7 +485,7 @@ __glusterd_friend_add_cbk (struct rpc_req * req, struct iovec *iov,
"Received %s from uuid: %s, host: %s, port: %d",
(op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (rsp.uuid, rsp.hostname);
if (peerinfo == NULL) {
@@ -527,7 +527,7 @@ __glusterd_friend_add_cbk (struct rpc_req * req, struct iovec *iov,
ret = glusterd_friend_sm_inject_event (event);
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
ctx = ((call_frame_t *)myframe)->local;
((call_frame_t *)myframe)->local = NULL;
@@ -605,7 +605,7 @@ __glusterd_friend_remove_cbk (struct rpc_req * req, struct iovec *iov,
(op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port);
inject:
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (rsp.uuid, ctx->hostname);
if (peerinfo == NULL) {
@@ -640,7 +640,7 @@ inject:
op_ret = 0;
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
respond:
ret = glusterd_xfer_cli_deprobe_resp (ctx->req, op_ret, op_errno, NULL,
@@ -769,9 +769,9 @@ __glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
uuid_utoa (rsp.uuid));
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -889,9 +889,9 @@ glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
uuid_utoa (rsp.uuid));
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -1000,9 +1000,9 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
uuid_utoa (rsp.uuid));
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -1109,9 +1109,9 @@ __glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
uuid_utoa (rsp.uuid));
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -1239,7 +1239,7 @@ out:
uuid_utoa (rsp.uuid));
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);
if (peerinfo == NULL) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -1268,7 +1268,7 @@ out:
event_type = GD_OP_EVENT_RCVD_ACC;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
ret = glusterd_set_txn_opinfo (txn_id, &opinfo);
@@ -1399,7 +1399,7 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
"for txn_id = %s", uuid_utoa (*txn_id));
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);
if (peerinfo == NULL) {
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -1450,7 +1450,7 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
}
}
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
@@ -1554,11 +1554,11 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
GF_ASSERT (priv);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
if (!peerinfo) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
ret = -1;
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
@@ -1570,7 +1570,7 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
req.hostname = gf_strdup (peerinfo->hostname);
req.port = peerinfo->port;
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
ret = glusterd_add_volumes_to_export_dict (&peer_data);
if (ret) {
@@ -1653,11 +1653,11 @@ glusterd_rpc_friend_remove (call_frame_t *frame, xlator_t *this,
GF_ASSERT (priv);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
if (!peerinfo) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
ret = -1;
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
@@ -1674,7 +1674,7 @@ glusterd_rpc_friend_remove (call_frame_t *frame, xlator_t *this,
this, glusterd_friend_remove_cbk,
(xdrproc_t)xdr_gd1_mgmt_friend_req);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
out:
GF_FREE (req.hostname);
diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
index b01bfaa..ef97bfd 100644
--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
+++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
@@ -224,14 +224,14 @@ glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count,
if (active_count)
*active_count = 1;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
if (_is_contributing_to_quorum (peerinfo->quorum_contrib))
inquorum_count = inquorum_count + 1;
if (active_count && (peerinfo->quorum_contrib == QUORUM_UP))
*active_count = *active_count + 1;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
ret = dict_get_str (conf->opts, GLUSTERD_QUORUM_RATIO_KEY, &val);
if (ret == 0) {
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
index 6c56837..a2ef9f7 100644
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
@@ -157,7 +157,7 @@ glusterd_broadcast_friend_delete (char *hostname, uuid_t uuid)
if (ret)
goto out;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
if (!peerinfo->connected || !peerinfo->peer)
continue;
@@ -180,7 +180,7 @@ glusterd_broadcast_friend_delete (char *hostname, uuid_t uuid)
}
}
unlock:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
@@ -224,7 +224,7 @@ glusterd_ac_reverse_probe_begin (glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT (event);
GF_ASSERT (ctx);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
if (!peerinfo) {
@@ -271,7 +271,7 @@ glusterd_ac_reverse_probe_begin (glusterd_friend_sm_event_t *event, void *ctx)
}
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
if (new_event)
@@ -302,7 +302,7 @@ glusterd_ac_friend_add (glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT (conf);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
if (!peerinfo) {
@@ -326,7 +326,7 @@ glusterd_ac_friend_add (glusterd_friend_sm_event_t *event, void *ctx)
}
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret && frame)
STACK_DESTROY (frame->root);
@@ -359,7 +359,7 @@ glusterd_ac_friend_probe (glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT (conf);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (NULL, probe_ctx->hostname);
if (peerinfo == NULL) {
//We should not reach this state ideally
@@ -406,7 +406,7 @@ glusterd_ac_friend_probe (glusterd_friend_sm_event_t *event, void *ctx)
}
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (dict)
dict_unref (dict);
@@ -439,7 +439,7 @@ glusterd_ac_send_friend_remove_req (glusterd_friend_sm_event_t *event,
GF_ASSERT (conf);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
if (!peerinfo) {
@@ -489,7 +489,7 @@ glusterd_ac_send_friend_remove_req (glusterd_friend_sm_event_t *event,
}
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
@@ -533,7 +533,7 @@ glusterd_ac_send_friend_update (glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT (priv);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cur_peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
if (!cur_peerinfo) {
@@ -596,7 +596,7 @@ glusterd_ac_send_friend_update (glusterd_friend_sm_event_t *event, void *ctx)
gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (friends)
dict_unref (friends);
@@ -628,7 +628,7 @@ glusterd_ac_update_friend (glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT (priv);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cur_peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
if (!cur_peerinfo) {
@@ -690,7 +690,7 @@ glusterd_ac_update_friend (glusterd_friend_sm_event_t *event, void *ctx)
gf_msg_debug (this->name, 0, "Returning with %d", ret);
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (friends)
dict_unref (friends);
@@ -790,13 +790,13 @@ glusterd_ac_handle_friend_remove_req (glusterd_friend_sm_event_t *event,
ret = glusterd_xfer_friend_remove_resp (ev_ctx->req, ev_ctx->hostname,
ev_ctx->port);
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_REMOVE_FRIEND,
&new_event);
if (ret) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
@@ -805,13 +805,13 @@ glusterd_ac_handle_friend_remove_req (glusterd_friend_sm_event_t *event,
ret = glusterd_friend_sm_inject_event (new_event);
if (ret) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
new_event = NULL;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
glusterd_peer_detach_cleanup (priv);
out:
@@ -831,7 +831,7 @@ glusterd_ac_friend_remove (glusterd_friend_sm_event_t *event, void *ctx)
GF_ASSERT (event);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
if (!peerinfo) {
@@ -839,7 +839,7 @@ glusterd_ac_friend_remove (glusterd_friend_sm_event_t *event, void *ctx)
GD_MSG_PEER_NOT_FOUND,
"Could not find peer %s(%s)",
event->peername, uuid_utoa (event->peerid));
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid);
@@ -847,7 +847,7 @@ glusterd_ac_friend_remove (glusterd_friend_sm_event_t *event, void *ctx)
gf_msg (THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
"Volumes cleanup failed");
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
/* Exiting read critical section as glusterd_peerinfo_cleanup calls
* synchronize_rcu before freeing the peerinfo
*/
@@ -896,14 +896,14 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
ev_ctx = ctx;
gf_uuid_copy (uuid, ev_ctx->uuid);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
if (!peerinfo) {
gf_msg (this->name, GF_LOG_ERROR, 0,
GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
event->peername, uuid_utoa (event->peerid));
ret = -1;
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
@@ -913,7 +913,7 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
*/
gf_uuid_copy (peerinfo->uuid, ev_ctx->uuid);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
conf = this->private;
GF_ASSERT (conf);
@@ -1032,7 +1032,7 @@ glusterd_friend_sm_transition_state (uuid_t peerid, char *peername,
GF_ASSERT (state);
GF_ASSERT (peername);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (peerid, peername);
if (!peerinfo) {
goto out;
@@ -1047,7 +1047,7 @@ glusterd_friend_sm_transition_state (uuid_t peerid, char *peername,
ret = 0;
out:
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
return ret;
}
@@ -1357,7 +1357,7 @@ glusterd_friend_sm ()
cds_list_del_init (&event->list);
event_type = event->event;
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (event->peerid,
event->peername);
@@ -1368,7 +1368,7 @@ glusterd_friend_sm ()
glusterd_friend_sm_event_name_get (event_type));
GF_FREE (event);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
continue;
}
gf_msg_debug ("glusterd", 0, "Dequeued event of type: '%s'",
@@ -1377,7 +1377,7 @@ glusterd_friend_sm ()
old_state = peerinfo->state.state;
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
/* Giving up read-critical section here as we only need
* the current state to call the handler.
*
@@ -1435,11 +1435,11 @@ glusterd_friend_sm ()
/* We need to obtain peerinfo reference once again as we
* had exited the read critical section above.
*/
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (event->peerid,
event->peername);
if (!peerinfo) {
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
/* A peer can only be deleted as a effect of
* this state machine, and two such state
* machines can never run at the same time.
@@ -1463,7 +1463,7 @@ glusterd_friend_sm ()
}
ret = glusterd_store_peerinfo (peerinfo);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
glusterd_destroy_friend_event_context (event);
GF_FREE (event);
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
index 830a67f..3c362e1 100644
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
@@ -185,7 +185,7 @@ glusterd_find_missed_snap (dict_t *rsp_dict, glusterd_volinfo_t *vol,
continue;
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, peers, uuid_list) {
if (gf_uuid_compare (peerinfo->uuid, brickinfo->uuid)) {
/* If the brick doesnt belong to this peer */
@@ -210,12 +210,12 @@ glusterd_find_missed_snap (dict_t *rsp_dict, glusterd_volinfo_t *vol,
"info for %s:%s in the "
"rsp_dict", brickinfo->hostname,
brickinfo->path);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
goto out;
}
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
brick_count++;
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
index b3c4d9a..1db2c7c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
@@ -4593,13 +4593,13 @@ glusterd_store_retrieve_peers (xlator_t *this)
args.mode = GD_MODE_ON;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
ret = glusterd_friend_rpc_create (this, peerinfo, &args);
if (ret)
break;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
peerinfo = NULL;
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
index 5aaa7f8..9a67d1c 100644
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
@@ -52,13 +52,13 @@ gd_collate_errors (struct syncargs *args, int op_ret, int op_errno,
args->op_ret = op_ret;
args->op_errno = op_errno;
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (peerid, NULL);
if (peerinfo)
peer_str = gf_strdup (peerinfo->hostname);
else
peer_str = gf_strdup (uuid_utoa (uuid));
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (op_errstr && strcmp (op_errstr, "")) {
len = snprintf (err_str, sizeof(err_str) - 1,
@@ -571,7 +571,7 @@ _gd_syncop_mgmt_lock_cbk (struct rpc_req *req, struct iovec *iov,
gf_uuid_copy (args->uuid, rsp.uuid);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (*peerid, NULL);
if (peerinfo) {
/* Set peer as locked, so we unlock only the locked peers */
@@ -584,7 +584,7 @@ _gd_syncop_mgmt_lock_cbk (struct rpc_req *req, struct iovec *iov,
"Could not find peer with "
"ID %s", uuid_utoa (*peerid));
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
@@ -670,7 +670,7 @@ _gd_syncop_mgmt_unlock_cbk (struct rpc_req *req, struct iovec *iov,
gf_uuid_copy (args->uuid, rsp.uuid);
- rcu_read_lock ();
+ RCU_READ_LOCK;
peerinfo = glusterd_peerinfo_find (*peerid, NULL);
if (peerinfo) {
peerinfo->locked = _gf_false;
@@ -680,7 +680,7 @@ _gd_syncop_mgmt_unlock_cbk (struct rpc_req *req, struct iovec *iov,
GD_MSG_PEER_NOT_FOUND, "Could not find peer with "
"ID %s", uuid_utoa (*peerid));
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
op_ret = rsp.op_ret;
op_errno = rsp.op_errno;
@@ -780,9 +780,9 @@ _gd_syncop_stage_op_cbk (struct rpc_req *req, struct iovec *iov,
}
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
ret = -1;
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -1110,9 +1110,9 @@ _gd_syncop_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
}
}
- rcu_read_lock ();
+ RCU_READ_LOCK;
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == 0);
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (ret) {
ret = -1;
gf_msg (this->name, GF_LOG_CRITICAL, 0,
@@ -1227,7 +1227,7 @@ gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -1252,7 +1252,7 @@ gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
MY_UUID, peer_uuid, txn_id);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1360,7 +1360,7 @@ stage_done:
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -1379,7 +1379,7 @@ stage_done:
op, req_dict, op_ctx);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1491,7 +1491,7 @@ commit_done:
synctask_barrier_init((&args));
peer_cnt = 0;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Only send requests to peers who were available before the
* transaction started
@@ -1510,7 +1510,7 @@ commit_done:
op, req_dict, op_ctx);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
if (0 == peer_cnt) {
ret = 0;
@@ -1568,7 +1568,7 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
peer_cnt = 0;
if (cluster_lock) {
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers,
uuid_list) {
/* Only send requests to peers who were available before
@@ -1590,7 +1590,7 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
peer_cnt++;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
} else {
ret = dict_get_int32 (op_ctx, "hold_global_locks", &global);
@@ -1599,7 +1599,7 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
else
type = "vol";
if (volname || global) {
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers,
uuid_list) {
/* Only send requests to peers who were
@@ -1620,7 +1620,7 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
tmp_uuid, txn_id);
peer_cnt++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
}
}
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
index d789c53..2290343 100644
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
@@ -10934,7 +10934,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
node_uuid_str = gf_strdup (node_uuid);
/* Finding the index of the node-uuid in the peer-list */
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers,
uuid_list) {
peer_uuid_str = gd_peer_uuid_str (peerinfo);
@@ -10943,7 +10943,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
current_index++;
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
/* Setting the largest index value as the total count. */
ret = dict_get_int32 (ctx_dict, "count", &count);
@@ -13716,7 +13716,7 @@ glusterd_count_connected_peers (int32_t *count)
*count = 1;
- rcu_read_lock ();
+ RCU_READ_LOCK;
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
/* Find peer who is connected and is a friend */
if ((peerinfo->connected) &&
@@ -13724,7 +13724,7 @@ glusterd_count_connected_peers (int32_t *count)
(*count)++;
}
}
- rcu_read_unlock ();
+ RCU_READ_UNLOCK;
ret = 0;
out:
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
index cbdca52..42c8821 100644
--- a/xlators/mgmt/glusterd/src/glusterd.h
+++ b/xlators/mgmt/glusterd/src/glusterd.h
@@ -804,6 +804,22 @@ do { \
*snap_volname_ptr = '\0'; \
} while (0)
+#define RCU_READ_LOCK do { \
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
+ { \
+ rcu_read_lock(); \
+ } \
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock); \
+ } while (0)
+
+#define RCU_READ_UNLOCK do { \
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
+ { \
+ rcu_read_unlock(); \
+ } \
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock); \
+ } while (0)
+
#define GLUSTERD_DUMP_PEERS(head, member, xpeers) do { \
glusterd_peerinfo_t *_peerinfo = NULL; \
int index = 1; \
@@ -815,7 +831,7 @@ do { \
snprintf (key, sizeof (key), \
"glusterd.xaction_peer"); \
\
- rcu_read_lock (); \
+ RCU_READ_LOCK; \
cds_list_for_each_entry_rcu (_peerinfo, head, member) { \
glusterd_dump_peer (_peerinfo, key, index, xpeers); \
if (!xpeers) \
@@ -823,7 +839,7 @@ do { \
index); \
index++; \
} \
- rcu_read_unlock (); \
+ RCU_READ_UNLOCK; \
\
} while (0)
--
1.8.3.1