|
|
887953 |
From c0d88596bda4eb5c7e942e621a7d38c7ae6d737a Mon Sep 17 00:00:00 2001
|
|
|
887953 |
From: Sanju Rakonde <srakonde@redhat.com>
|
|
|
887953 |
Date: Wed, 28 Nov 2018 16:13:58 +0530
|
|
|
887953 |
Subject: [PATCH 477/493] glusterd: perform rcu_read_lock/unlock() under
|
|
|
887953 |
cleanup_lock mutex
|
|
|
887953 |
|
|
|
887953 |
Problem: glusterd should not try to acquire locks on any resources,
|
|
|
887953 |
when it already received a SIGTERM and cleanup is started. Otherwise
|
|
|
887953 |
we might hit segfault, since the thread which is going through
|
|
|
887953 |
cleanup path will be freeing up the resouces and some other thread
|
|
|
887953 |
might be trying to acquire locks on freed resources.
|
|
|
887953 |
|
|
|
887953 |
Solution: perform rcu_read_lock/unlock() under cleanup_lock mutex.
|
|
|
887953 |
|
|
|
887953 |
> fixes: bz#1654270
|
|
|
887953 |
> Change-Id: I87a97cfe4f272f74f246d688660934638911ce54
|
|
|
887953 |
> Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
|
|
|
887953 |
|
|
|
887953 |
upstream patch: https://review.gluster.org/#/c/glusterfs/+/21743/
|
|
|
887953 |
|
|
|
887953 |
Change-Id: I87a97cfe4f272f74f246d688660934638911ce54
|
|
|
887953 |
BUG: 1654161
|
|
|
887953 |
Signed-off-by: Sanju Rakonde <srakonde@redhat.com>
|
|
|
887953 |
Reviewed-on: https://code.engineering.redhat.com/gerrit/158647
|
|
|
887953 |
Tested-by: RHGS Build Bot <nigelb@redhat.com>
|
|
|
887953 |
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
|
|
|
887953 |
---
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-brick-ops.c | 8 +--
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-handler.c | 75 +++++++++++-----------
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-handshake.c | 32 ++++-----
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-mgmt.c | 28 ++++----
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-op-sm.c | 30 ++++-----
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-peer-utils.c | 40 ++++++------
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-replace-brick.c | 4 +-
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-reset-brick.c | 4 +-
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-rpc-ops.c | 48 +++++++-------
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-server-quorum.c | 4 +-
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-sm.c | 64 +++++++++---------
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-snapshot.c | 6 +-
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-store.c | 4 +-
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-syncop.c | 40 ++++++------
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd-utils.c | 8 +--
|
|
|
887953 |
xlators/mgmt/glusterd/src/glusterd.h | 20 +++++-
|
|
|
887953 |
16 files changed, 215 insertions(+), 200 deletions(-)
|
|
|
887953 |
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
|
|
|
887953 |
index 416412e..5ad8ab8 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-brick-ops.c
|
|
|
887953 |
@@ -2097,7 +2097,7 @@ check:
|
|
|
887953 |
continue;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_uuid
|
|
|
887953 |
(brickinfo->uuid);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -2105,7 +2105,7 @@ check:
|
|
|
887953 |
"brick %s is not in cluster", brick);
|
|
|
887953 |
*errstr = gf_strdup (msg);
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
if (!peerinfo->connected) {
|
|
|
887953 |
@@ -2113,10 +2113,10 @@ check:
|
|
|
887953 |
"brick %s is down", brick);
|
|
|
887953 |
*errstr = gf_strdup (msg);
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-handler.c b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
|
|
887953 |
index c71bf3c..d40de89 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-handler.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-handler.c
|
|
|
887953 |
@@ -105,7 +105,7 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
|
|
|
887953 |
|
|
|
887953 |
ret = glusterd_remote_hostname_get (req, rhost, sizeof (rhost));
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (uuid, rhost);
|
|
|
887953 |
|
|
|
887953 |
@@ -179,7 +179,7 @@ glusterd_handle_friend_req (rpcsvc_request_t *req, uuid_t uuid,
|
|
|
887953 |
ret = GLUSTERD_CONNECTION_AWAITED;
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret && (ret != GLUSTERD_CONNECTION_AWAITED)) {
|
|
|
887953 |
if (ctx && ctx->hostname)
|
|
|
887953 |
@@ -198,7 +198,6 @@ out:
|
|
|
887953 |
GF_FREE (event);
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
-
|
|
|
887953 |
return ret;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
@@ -214,7 +213,7 @@ glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
|
|
|
887953 |
if (!port)
|
|
|
887953 |
port = GF_DEFAULT_BASE_PORT;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (uuid, hostname);
|
|
|
887953 |
|
|
|
887953 |
@@ -269,7 +268,7 @@ glusterd_handle_unfriend_req (rpcsvc_request_t *req, uuid_t uuid,
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 != ret) {
|
|
|
887953 |
if (ctx && ctx->hostname)
|
|
|
887953 |
@@ -902,9 +901,9 @@ __glusterd_handle_cluster_lock (rpcsvc_request_t *req)
|
|
|
887953 |
gf_msg_debug (this->name, 0, "Received LOCK from uuid: %s",
|
|
|
887953 |
uuid_utoa (lock_req.uuid));
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find_by_uuid (lock_req.uuid) == NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_WARNING, 0,
|
|
|
887953 |
GD_MSG_PEER_NOT_FOUND, "%s doesn't "
|
|
|
887953 |
@@ -1060,9 +1059,9 @@ __glusterd_handle_stage_op (rpcsvc_request_t *req)
|
|
|
887953 |
gf_msg_debug (this->name, 0, "transaction ID = %s",
|
|
|
887953 |
uuid_utoa (*txn_id));
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_WARNING, 0,
|
|
|
887953 |
GD_MSG_PEER_NOT_FOUND, "%s doesn't "
|
|
|
887953 |
@@ -1144,9 +1143,9 @@ __glusterd_handle_commit_op (rpcsvc_request_t *req)
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find_by_uuid (op_req.uuid) == NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_WARNING, 0,
|
|
|
887953 |
GD_MSG_PEER_NOT_FOUND, "%s doesn't "
|
|
|
887953 |
@@ -1270,12 +1269,12 @@ __glusterd_handle_cli_probe (rpcsvc_request_t *req)
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_hostname (hostname);
|
|
|
887953 |
ret = (peerinfo && gd_peer_has_address (peerinfo, hostname));
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg_debug ("glusterd", 0, "Probe host %s port %d "
|
|
|
887953 |
@@ -2329,7 +2328,7 @@ __glusterd_handle_fsm_log (rpcsvc_request_t *req)
|
|
|
887953 |
conf = this->private;
|
|
|
887953 |
ret = glusterd_sm_tr_log_add_to_dict (dict, &conf->op_sm_log);
|
|
|
887953 |
} else {
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_hostname (cli_req.name);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -2341,7 +2340,7 @@ __glusterd_handle_fsm_log (rpcsvc_request_t *req)
|
|
|
887953 |
(dict, &peerinfo->sm_log);
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
@@ -2482,9 +2481,9 @@ __glusterd_handle_cluster_unlock (rpcsvc_request_t *req)
|
|
|
887953 |
gf_msg_debug (this->name, 0,
|
|
|
887953 |
"Received UNLOCK from uuid: %s", uuid_utoa (unlock_req.uuid));
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find_by_uuid (unlock_req.uuid) == NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_WARNING, 0,
|
|
|
887953 |
GD_MSG_PEER_NOT_FOUND, "%s doesn't "
|
|
|
887953 |
@@ -2786,11 +2785,11 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
if (glusterd_peerinfo_find (friend_req.uuid, NULL) == NULL) {
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
887953 |
GD_MSG_REQ_FROM_UNKNOWN_PEER,
|
|
|
887953 |
@@ -2856,7 +2855,7 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
|
|
|
887953 |
memset (key, 0, sizeof (key));
|
|
|
887953 |
snprintf (key, sizeof (key), "friend%d", i);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (uuid, NULL);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
/* Create a new peer and add it to the list as there is
|
|
|
887953 |
@@ -2903,7 +2902,7 @@ __glusterd_handle_friend_update (rpcsvc_request_t *req)
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret)
|
|
|
887953 |
break;
|
|
|
887953 |
|
|
|
887953 |
@@ -3002,7 +3001,7 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (probe_req.uuid, remote_hostname);
|
|
|
887953 |
if ((peerinfo == NULL) && (!cds_list_empty (&conf->peers))) {
|
|
|
887953 |
rsp.op_ret = -1;
|
|
|
887953 |
@@ -3024,7 +3023,7 @@ __glusterd_handle_probe_query (rpcsvc_request_t *req)
|
|
|
887953 |
rsp.op_errno = GF_PROBE_ADD_FAILED;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
respond:
|
|
|
887953 |
gf_uuid_copy (rsp.uuid, MY_UUID);
|
|
|
887953 |
@@ -3370,11 +3369,11 @@ glusterd_friend_remove (uuid_t uuid, char *hostname)
|
|
|
887953 |
int ret = -1;
|
|
|
887953 |
glusterd_peerinfo_t *peerinfo = NULL;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (uuid, hostname);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
@@ -3382,7 +3381,7 @@ glusterd_friend_remove (uuid_t uuid, char *hostname)
|
|
|
887953 |
if (ret)
|
|
|
887953 |
gf_msg (THIS->name, GF_LOG_WARNING, 0,
|
|
|
887953 |
GD_MSG_VOL_CLEANUP_FAIL, "Volumes cleanup failed");
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
/* Giving up the critical section here as glusterd_peerinfo_cleanup must
|
|
|
887953 |
* be called from outside a critical section
|
|
|
887953 |
*/
|
|
|
887953 |
@@ -3715,7 +3714,7 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (hoststr);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (NULL, hoststr);
|
|
|
887953 |
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
@@ -3763,7 +3762,7 @@ glusterd_probe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
gf_msg_debug ("glusterd", 0, "returning %d", ret);
|
|
|
887953 |
return ret;
|
|
|
887953 |
}
|
|
|
887953 |
@@ -3780,7 +3779,7 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
|
|
|
887953 |
GF_ASSERT (hoststr);
|
|
|
887953 |
GF_ASSERT (req);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (uuid, hoststr);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
@@ -3840,7 +3839,7 @@ glusterd_deprobe_begin (rpcsvc_request_t *req, const char *hoststr, int port,
|
|
|
887953 |
peerinfo->detaching = _gf_true;
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
return ret;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
@@ -4162,7 +4161,7 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
|
|
|
887953 |
|
|
|
887953 |
/* Reset ret to 0, needed to prevent failure incase no peers exist */
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
if (!cds_list_empty (&priv->peers)) {
|
|
|
887953 |
cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
|
|
|
887953 |
count++;
|
|
|
887953 |
@@ -4173,7 +4172,7 @@ glusterd_list_friends (rpcsvc_request_t *req, dict_t *dict, int32_t flags)
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret)
|
|
|
887953 |
goto out;
|
|
|
887953 |
|
|
|
887953 |
@@ -5592,7 +5591,7 @@ glusterd_get_state (rpcsvc_request_t *req, dict_t *dict)
|
|
|
887953 |
if (priv->opts)
|
|
|
887953 |
dict_foreach (priv->opts, glusterd_print_global_options, fp);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
fprintf (fp, "\n[Peers]\n");
|
|
|
887953 |
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
|
|
|
887953 |
@@ -5621,7 +5620,7 @@ glusterd_get_state (rpcsvc_request_t *req, dict_t *dict)
|
|
|
887953 |
count_bkp = 0;
|
|
|
887953 |
fprintf (fp, "\n");
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
count = 0;
|
|
|
887953 |
fprintf (fp, "\n[Volumes]\n");
|
|
|
887953 |
@@ -6259,7 +6258,7 @@ glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx, int32_t op_errno)
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (peerctx);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
gf_msg_debug (THIS->name, 0, "Could not find peer %s(%s). "
|
|
|
887953 |
@@ -6300,7 +6299,7 @@ glusterd_friend_remove_notify (glusterd_peerctx_t *peerctx, int32_t op_errno)
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
return ret;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
@@ -6340,7 +6339,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
|
|
|
887953 |
peerctx->peername);
|
|
|
887953 |
return 0;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -6466,7 +6465,7 @@ __glusterd_peer_rpc_notify (struct rpc_clnt *rpc, void *mydata,
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
glusterd_friend_sm ();
|
|
|
887953 |
glusterd_op_sm ();
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-handshake.c b/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
|
|
887953 |
index b2a9b20..d18a7a3 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-handshake.c
|
|
|
887953 |
@@ -1140,9 +1140,9 @@ gd_validate_mgmt_hndsk_req (rpcsvc_request_t *req, dict_t *dict)
|
|
|
887953 |
*/
|
|
|
887953 |
if (!ret) {
|
|
|
887953 |
gf_uuid_parse (uuid_str, peer_uuid);
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find (peer_uuid, NULL) != NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret)
|
|
|
887953 |
return _gf_true;
|
|
|
887953 |
}
|
|
|
887953 |
@@ -1158,7 +1158,7 @@ gd_validate_mgmt_hndsk_req (rpcsvc_request_t *req, dict_t *dict)
|
|
|
887953 |
* is available in the peerinfo list but the uuid has changed of the
|
|
|
887953 |
* node due to a reinstall, in that case the validation should fail!
|
|
|
887953 |
*/
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
if (!uuid_str) {
|
|
|
887953 |
ret = (glusterd_peerinfo_find (NULL, hostname) == NULL);
|
|
|
887953 |
} else {
|
|
|
887953 |
@@ -1177,7 +1177,7 @@ gd_validate_mgmt_hndsk_req (rpcsvc_request_t *req, dict_t *dict)
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
GD_MSG_HANDSHAKE_REQ_REJECTED, "Rejecting management "
|
|
|
887953 |
@@ -1728,7 +1728,7 @@ glusterd_event_connected_inject (glusterd_peerctx_t *peerctx)
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -1754,7 +1754,7 @@ glusterd_event_connected_inject (glusterd_peerctx_t *peerctx)
|
|
|
887953 |
GD_MSG_EVENT_INJECT_FAIL, "Unable to inject "
|
|
|
887953 |
"EVENT_CONNECTED ret = %d", ret);
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
gf_msg_debug ("glusterd", 0, "returning %d", ret);
|
|
|
887953 |
@@ -1824,7 +1824,7 @@ __glusterd_mgmt_hndsk_version_ack_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
frame = myframe;
|
|
|
887953 |
peerctx = frame->local;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
gf_msg_debug (this->name, 0, "Could not find peer %s(%s)",
|
|
|
887953 |
@@ -1887,7 +1887,7 @@ out:
|
|
|
887953 |
if (ret != 0 && peerinfo)
|
|
|
887953 |
rpc_transport_disconnect (peerinfo->rpc->conn.trans, _gf_false);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
frame->local = NULL;
|
|
|
887953 |
STACK_DESTROY (frame->root);
|
|
|
887953 |
@@ -1930,7 +1930,7 @@ __glusterd_mgmt_hndsk_version_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
frame = myframe;
|
|
|
887953 |
peerctx = frame->local;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -2014,7 +2014,7 @@ out:
|
|
|
887953 |
_gf_false);
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (rsp.hndsk.hndsk_val)
|
|
|
887953 |
free (rsp.hndsk.hndsk_val);
|
|
|
887953 |
@@ -2070,7 +2070,7 @@ glusterd_mgmt_handshake (xlator_t *this, glusterd_peerctx_t *peerctx)
|
|
|
887953 |
GF_PROTOCOL_DICT_SERIALIZE (this, req_dict, (&req.hndsk.hndsk_val),
|
|
|
887953 |
req.hndsk.hndsk_len, ret, out);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -2086,7 +2086,7 @@ glusterd_mgmt_handshake (xlator_t *this, glusterd_peerctx_t *peerctx)
|
|
|
887953 |
(xdrproc_t)xdr_gf_mgmt_hndsk_req);
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
out:
|
|
|
887953 |
if (ret && frame)
|
|
|
887953 |
STACK_DESTROY (frame->root);
|
|
|
887953 |
@@ -2202,7 +2202,7 @@ __glusterd_peer_dump_version_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
frame = myframe;
|
|
|
887953 |
peerctx = frame->local;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -2282,7 +2282,7 @@ out:
|
|
|
887953 |
if (ret != 0 && peerinfo)
|
|
|
887953 |
rpc_transport_disconnect (peerinfo->rpc->conn.trans, _gf_false);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
glusterd_friend_sm ();
|
|
|
887953 |
glusterd_op_sm ();
|
|
|
887953 |
@@ -2330,7 +2330,7 @@ glusterd_peer_dump_version (xlator_t *this, struct rpc_clnt *rpc,
|
|
|
887953 |
if (!peerctx)
|
|
|
887953 |
goto out;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find_by_generation (peerctx->peerinfo_gen);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -2347,7 +2347,7 @@ glusterd_peer_dump_version (xlator_t *this, struct rpc_clnt *rpc,
|
|
|
887953 |
glusterd_peer_dump_version_cbk,
|
|
|
887953 |
(xdrproc_t)xdr_gf_dump_req);
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
out:
|
|
|
887953 |
if (ret && frame)
|
|
|
887953 |
STACK_DESTROY (frame->root);
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-mgmt.c b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
|
|
|
887953 |
index 751d6e4..d98c6bc 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-mgmt.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-mgmt.c
|
|
|
887953 |
@@ -52,14 +52,14 @@ gd_mgmt_v3_collate_errors (struct syncargs *args, int op_ret, int op_errno,
|
|
|
887953 |
args->op_ret = op_ret;
|
|
|
887953 |
args->op_errno = op_errno;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (peerid, NULL);
|
|
|
887953 |
if (peerinfo)
|
|
|
887953 |
peer_str = gf_strdup (peerinfo->hostname);
|
|
|
887953 |
else
|
|
|
887953 |
peer_str = gf_strdup (uuid_utoa (uuid));
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
is_operrstr_blk = (op_errstr && strcmp (op_errstr, ""));
|
|
|
887953 |
err_string = (is_operrstr_blk) ? op_errstr : err_str;
|
|
|
887953 |
@@ -761,7 +761,7 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -779,7 +779,7 @@ glusterd_mgmt_v3_initiate_lockdown (glusterd_op_t op, dict_t *dict,
|
|
|
887953 |
MY_UUID, peer_uuid);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 == peer_cnt) {
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
@@ -1126,7 +1126,7 @@ glusterd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *req_dict,
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -1144,7 +1144,7 @@ glusterd_mgmt_v3_pre_validate (glusterd_op_t op, dict_t *req_dict,
|
|
|
887953 |
MY_UUID, peer_uuid);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 == peer_cnt) {
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
@@ -1401,7 +1401,7 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -1419,7 +1419,7 @@ glusterd_mgmt_v3_brick_op (glusterd_op_t op, dict_t *rsp_dict, dict_t *req_dict,
|
|
|
887953 |
MY_UUID, peer_uuid);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 == peer_cnt) {
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
@@ -1667,7 +1667,7 @@ glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -1702,7 +1702,7 @@ glusterd_mgmt_v3_commit (glusterd_op_t op, dict_t *op_ctx, dict_t *req_dict,
|
|
|
887953 |
MY_UUID, peer_uuid);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 == peer_cnt) {
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
@@ -1912,7 +1912,7 @@ glusterd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *dict,
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -1930,7 +1930,7 @@ glusterd_mgmt_v3_post_validate (glusterd_op_t op, int32_t op_ret, dict_t *dict,
|
|
|
887953 |
&args, MY_UUID, peer_uuid);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 == peer_cnt) {
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
@@ -2094,7 +2094,7 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_op_t op, dict_t *dict,
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -2112,7 +2112,7 @@ glusterd_mgmt_v3_release_peer_locks (glusterd_op_t op, dict_t *dict,
|
|
|
887953 |
MY_UUID, peer_uuid);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 == peer_cnt) {
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-op-sm.c b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
887953 |
index 9f76ab3..6414a4e 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-op-sm.c
|
|
|
887953 |
@@ -1825,7 +1825,7 @@ glusterd_op_stage_sync_volume (dict_t *dict, char **op_errstr)
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
}
|
|
|
887953 |
} else {
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (NULL, hostname);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
@@ -1841,7 +1841,7 @@ glusterd_op_stage_sync_volume (dict_t *dict, char **op_errstr)
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
@@ -3964,7 +3964,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
priv = this->private;
|
|
|
887953 |
GF_ASSERT (priv);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -3985,7 +3985,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
if (proc->fn) {
|
|
|
887953 |
ret = proc->fn (NULL, this, peerinfo);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
gf_msg (this->name, GF_LOG_WARNING, 0,
|
|
|
887953 |
GD_MSG_LOCK_REQ_SEND_FAIL,
|
|
|
887953 |
"Failed to send lock request "
|
|
|
887953 |
@@ -4009,7 +4009,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
ret = dict_set_static_ptr (dict, "peerinfo",
|
|
|
887953 |
peerinfo);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
GD_MSG_DICT_SET_FAILED,
|
|
|
887953 |
"failed to set peerinfo");
|
|
|
887953 |
@@ -4019,7 +4019,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
|
|
|
887953 |
ret = proc->fn (NULL, this, dict);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
gf_msg (this->name, GF_LOG_WARNING, 0,
|
|
|
887953 |
GD_MSG_MGMTV3_LOCK_REQ_SEND_FAIL,
|
|
|
887953 |
"Failed to send mgmt_v3 lock "
|
|
|
887953 |
@@ -4036,7 +4036,7 @@ glusterd_op_ac_send_lock (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
opinfo.pending_count = pending_count;
|
|
|
887953 |
|
|
|
887953 |
@@ -4074,7 +4074,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
priv = this->private;
|
|
|
887953 |
GF_ASSERT (priv);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -4152,7 +4152,7 @@ glusterd_op_ac_send_unlock (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
opinfo.pending_count = pending_count;
|
|
|
887953 |
|
|
|
887953 |
@@ -4762,7 +4762,7 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -4781,7 +4781,7 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
if (proc->fn) {
|
|
|
887953 |
ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
GD_MSG_DICT_SET_FAILED, "failed to "
|
|
|
887953 |
"set peerinfo");
|
|
|
887953 |
@@ -4800,7 +4800,7 @@ glusterd_op_ac_send_stage_op (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
pending_count++;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
opinfo.pending_count = pending_count;
|
|
|
887953 |
out:
|
|
|
887953 |
@@ -5413,7 +5413,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -5432,7 +5432,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
if (proc->fn) {
|
|
|
887953 |
ret = dict_set_static_ptr (dict, "peerinfo", peerinfo);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
GD_MSG_DICT_SET_FAILED,
|
|
|
887953 |
"failed to set peerinfo");
|
|
|
887953 |
@@ -5451,7 +5451,7 @@ glusterd_op_ac_send_commit_op (glusterd_op_sm_event_t *event, void *ctx)
|
|
|
887953 |
pending_count++;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
opinfo.pending_count = pending_count;
|
|
|
887953 |
gf_msg_debug (this->name, 0, "Sent commit op req for 'Volume %s' "
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
|
|
|
887953 |
index 592aa16..6ed5831 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-peer-utils.c
|
|
|
887953 |
@@ -190,7 +190,7 @@ glusterd_peerinfo_find_by_uuid (uuid_t uuid)
|
|
|
887953 |
if (gf_uuid_is_null (uuid))
|
|
|
887953 |
return NULL;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
|
|
|
887953 |
if (!gf_uuid_compare (entry->uuid, uuid)) {
|
|
|
887953 |
|
|
|
887953 |
@@ -201,7 +201,7 @@ glusterd_peerinfo_find_by_uuid (uuid_t uuid)
|
|
|
887953 |
break;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (!found)
|
|
|
887953 |
gf_msg_debug (this->name, 0,
|
|
|
887953 |
@@ -330,7 +330,7 @@ glusterd_chk_peers_connected_befriended (uuid_t skip_uuid)
|
|
|
887953 |
priv= THIS->private;
|
|
|
887953 |
GF_ASSERT (priv);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
|
|
|
887953 |
|
|
|
887953 |
if (!gf_uuid_is_null (skip_uuid) && !gf_uuid_compare (skip_uuid,
|
|
|
887953 |
@@ -343,7 +343,7 @@ glusterd_chk_peers_connected_befriended (uuid_t skip_uuid)
|
|
|
887953 |
break;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
gf_msg_debug (THIS->name, 0, "Returning %s",
|
|
|
887953 |
(ret?"TRUE":"FALSE"));
|
|
|
887953 |
@@ -366,7 +366,7 @@ glusterd_uuid_to_hostname (uuid_t uuid)
|
|
|
887953 |
if (!gf_uuid_compare (MY_UUID, uuid)) {
|
|
|
887953 |
hostname = gf_strdup ("localhost");
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
if (!cds_list_empty (&priv->peers)) {
|
|
|
887953 |
cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
|
|
|
887953 |
if (!gf_uuid_compare (entry->uuid, uuid)) {
|
|
|
887953 |
@@ -375,7 +375,7 @@ glusterd_uuid_to_hostname (uuid_t uuid)
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
return hostname;
|
|
|
887953 |
}
|
|
|
887953 |
@@ -406,14 +406,14 @@ glusterd_are_all_peers_up ()
|
|
|
887953 |
conf = this->private;
|
|
|
887953 |
GF_VALIDATE_OR_GOTO (this->name, conf, out);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
if (!peerinfo->connected) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
peers_up = _gf_true;
|
|
|
887953 |
|
|
|
887953 |
@@ -434,7 +434,7 @@ glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
|
|
|
887953 |
if (!gf_uuid_compare (brickinfo->uuid, MY_UUID))
|
|
|
887953 |
continue;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, peers, uuid_list) {
|
|
|
887953 |
if (gf_uuid_compare (peerinfo->uuid, brickinfo->uuid))
|
|
|
887953 |
continue;
|
|
|
887953 |
@@ -447,11 +447,11 @@ glusterd_are_vol_all_peers_up (glusterd_volinfo_t *volinfo,
|
|
|
887953 |
*down_peerstr = gf_strdup (peerinfo->hostname);
|
|
|
887953 |
gf_msg_debug (THIS->name, 0, "Peer %s is down. ",
|
|
|
887953 |
peerinfo->hostname);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
ret = _gf_true;
|
|
|
887953 |
@@ -664,7 +664,7 @@ gd_peerinfo_find_from_hostname (const char *hoststr)
|
|
|
887953 |
|
|
|
887953 |
GF_VALIDATE_OR_GOTO (this->name, (hoststr != NULL), out);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peer, &priv->peers, uuid_list) {
|
|
|
887953 |
cds_list_for_each_entry_rcu (tmphost, &peer->hostnames,
|
|
|
887953 |
hostname_list) {
|
|
|
887953 |
@@ -679,7 +679,7 @@ gd_peerinfo_find_from_hostname (const char *hoststr)
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
out:
|
|
|
887953 |
return found;
|
|
|
887953 |
}
|
|
|
887953 |
@@ -713,7 +713,7 @@ gd_peerinfo_find_from_addrinfo (const struct addrinfo *addr)
|
|
|
887953 |
|
|
|
887953 |
GF_VALIDATE_OR_GOTO (this->name, (addr != NULL), out);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peer, &conf->peers, uuid_list) {
|
|
|
887953 |
cds_list_for_each_entry_rcu (address, &peer->hostnames,
|
|
|
887953 |
hostname_list) {
|
|
|
887953 |
@@ -747,7 +747,7 @@ gd_peerinfo_find_from_addrinfo (const struct addrinfo *addr)
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
out:
|
|
|
887953 |
return found;
|
|
|
887953 |
}
|
|
|
887953 |
@@ -1014,7 +1014,7 @@ glusterd_peerinfo_find_by_generation (uint32_t generation) {
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (priv);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (entry, &priv->peers, uuid_list) {
|
|
|
887953 |
if (entry->generation == generation) {
|
|
|
887953 |
|
|
|
887953 |
@@ -1025,7 +1025,7 @@ glusterd_peerinfo_find_by_generation (uint32_t generation) {
|
|
|
887953 |
break;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (!found)
|
|
|
887953 |
gf_msg_debug (this->name, 0,
|
|
|
887953 |
@@ -1047,10 +1047,10 @@ glusterd_get_peers_count () {
|
|
|
887953 |
conf = this->private;
|
|
|
887953 |
GF_VALIDATE_OR_GOTO (this->name, conf, out);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peer, &conf->peers, uuid_list)
|
|
|
887953 |
count++;
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
return count;
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
|
|
|
887953 |
index 5fc3669..f9ad524 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-replace-brick.c
|
|
|
887953 |
@@ -278,7 +278,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
if (!gf_is_local_addr (host)) {
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (NULL, host);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
@@ -300,7 +300,7 @@ glusterd_op_stage_replace_brick (dict_t *dict, char **op_errstr,
|
|
|
887953 |
*op_errstr = gf_strdup (msg);
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret)
|
|
|
887953 |
goto out;
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
|
|
|
887953 |
index c1de043..60c5716 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-reset-brick.c
|
|
|
887953 |
@@ -165,7 +165,7 @@ glusterd_reset_brick_prevalidate (dict_t *dict, char **op_errstr,
|
|
|
887953 |
if (ret)
|
|
|
887953 |
goto out;
|
|
|
887953 |
} else {
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (NULL, host);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
@@ -190,7 +190,7 @@ glusterd_reset_brick_prevalidate (dict_t *dict, char **op_errstr,
|
|
|
887953 |
*op_errstr = gf_strdup (msg);
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret)
|
|
|
887953 |
goto out;
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
|
|
|
887953 |
index 86e1256..c669240 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-rpc-ops.c
|
|
|
887953 |
@@ -280,7 +280,7 @@ __glusterd_probe_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (rsp.uuid, rsp.hostname);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
@@ -422,7 +422,7 @@ cont:
|
|
|
887953 |
GD_MSG_PROBE_REQ_RESP_RCVD, "Received resp to probe req");
|
|
|
887953 |
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
free (rsp.hostname);//malloced by xdr
|
|
|
887953 |
@@ -485,7 +485,7 @@ __glusterd_friend_add_cbk (struct rpc_req * req, struct iovec *iov,
|
|
|
887953 |
"Received %s from uuid: %s, host: %s, port: %d",
|
|
|
887953 |
(op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (rsp.uuid, rsp.hostname);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
@@ -527,7 +527,7 @@ __glusterd_friend_add_cbk (struct rpc_req * req, struct iovec *iov,
|
|
|
887953 |
ret = glusterd_friend_sm_inject_event (event);
|
|
|
887953 |
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
out:
|
|
|
887953 |
ctx = ((call_frame_t *)myframe)->local;
|
|
|
887953 |
((call_frame_t *)myframe)->local = NULL;
|
|
|
887953 |
@@ -605,7 +605,7 @@ __glusterd_friend_remove_cbk (struct rpc_req * req, struct iovec *iov,
|
|
|
887953 |
(op_ret)?"RJT":"ACC", uuid_utoa (rsp.uuid), rsp.hostname, rsp.port);
|
|
|
887953 |
|
|
|
887953 |
inject:
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (rsp.uuid, ctx->hostname);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
@@ -640,7 +640,7 @@ inject:
|
|
|
887953 |
op_ret = 0;
|
|
|
887953 |
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
respond:
|
|
|
887953 |
ret = glusterd_xfer_cli_deprobe_resp (ctx->req, op_ret, op_errno, NULL,
|
|
|
887953 |
@@ -769,9 +769,9 @@ __glusterd_cluster_lock_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
uuid_utoa (rsp.uuid));
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
887953 |
@@ -889,9 +889,9 @@ glusterd_mgmt_v3_lock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
uuid_utoa (rsp.uuid));
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
887953 |
@@ -1000,9 +1000,9 @@ glusterd_mgmt_v3_unlock_peers_cbk_fn (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
uuid_utoa (rsp.uuid));
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
887953 |
@@ -1109,9 +1109,9 @@ __glusterd_cluster_unlock_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
uuid_utoa (rsp.uuid));
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
887953 |
@@ -1239,7 +1239,7 @@ out:
|
|
|
887953 |
uuid_utoa (rsp.uuid));
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
887953 |
@@ -1268,7 +1268,7 @@ out:
|
|
|
887953 |
event_type = GD_OP_EVENT_RCVD_ACC;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
|
|
|
887953 |
ret = glusterd_set_txn_opinfo (txn_id, &opinfo);
|
|
|
887953 |
@@ -1399,7 +1399,7 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
"for txn_id = %s", uuid_utoa (*txn_id));
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (rsp.uuid, NULL);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
887953 |
@@ -1450,7 +1450,7 @@ __glusterd_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
|
|
|
887953 |
@@ -1554,11 +1554,11 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (priv);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
|
|
|
887953 |
@@ -1570,7 +1570,7 @@ glusterd_rpc_friend_add (call_frame_t *frame, xlator_t *this,
|
|
|
887953 |
req.hostname = gf_strdup (peerinfo->hostname);
|
|
|
887953 |
req.port = peerinfo->port;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
ret = glusterd_add_volumes_to_export_dict (&peer_data);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
@@ -1653,11 +1653,11 @@ glusterd_rpc_friend_remove (call_frame_t *frame, xlator_t *this,
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (priv);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
|
|
|
887953 |
@@ -1674,7 +1674,7 @@ glusterd_rpc_friend_remove (call_frame_t *frame, xlator_t *this,
|
|
|
887953 |
this, glusterd_friend_remove_cbk,
|
|
|
887953 |
(xdrproc_t)xdr_gd1_mgmt_friend_req);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
out:
|
|
|
887953 |
GF_FREE (req.hostname);
|
|
|
887953 |
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
|
|
|
887953 |
index b01bfaa..ef97bfd 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-server-quorum.c
|
|
|
887953 |
@@ -224,14 +224,14 @@ glusterd_get_quorum_cluster_counts (xlator_t *this, int *active_count,
|
|
|
887953 |
if (active_count)
|
|
|
887953 |
*active_count = 1;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
if (_is_contributing_to_quorum (peerinfo->quorum_contrib))
|
|
|
887953 |
inquorum_count = inquorum_count + 1;
|
|
|
887953 |
if (active_count && (peerinfo->quorum_contrib == QUORUM_UP))
|
|
|
887953 |
*active_count = *active_count + 1;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
ret = dict_get_str (conf->opts, GLUSTERD_QUORUM_RATIO_KEY, &val;;
|
|
|
887953 |
if (ret == 0) {
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-sm.c b/xlators/mgmt/glusterd/src/glusterd-sm.c
|
|
|
887953 |
index 6c56837..a2ef9f7 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-sm.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-sm.c
|
|
|
887953 |
@@ -157,7 +157,7 @@ glusterd_broadcast_friend_delete (char *hostname, uuid_t uuid)
|
|
|
887953 |
if (ret)
|
|
|
887953 |
goto out;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
|
|
|
887953 |
if (!peerinfo->connected || !peerinfo->peer)
|
|
|
887953 |
continue;
|
|
|
887953 |
@@ -180,7 +180,7 @@ glusterd_broadcast_friend_delete (char *hostname, uuid_t uuid)
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
unlock:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
|
|
|
887953 |
|
|
|
887953 |
@@ -224,7 +224,7 @@ glusterd_ac_reverse_probe_begin (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
GF_ASSERT (event);
|
|
|
887953 |
GF_ASSERT (ctx);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -271,7 +271,7 @@ glusterd_ac_reverse_probe_begin (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
if (new_event)
|
|
|
887953 |
@@ -302,7 +302,7 @@ glusterd_ac_friend_add (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (conf);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -326,7 +326,7 @@ glusterd_ac_friend_add (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (ret && frame)
|
|
|
887953 |
STACK_DESTROY (frame->root);
|
|
|
887953 |
@@ -359,7 +359,7 @@ glusterd_ac_friend_probe (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (conf);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (NULL, probe_ctx->hostname);
|
|
|
887953 |
if (peerinfo == NULL) {
|
|
|
887953 |
//We should not reach this state ideally
|
|
|
887953 |
@@ -406,7 +406,7 @@ glusterd_ac_friend_probe (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (dict)
|
|
|
887953 |
dict_unref (dict);
|
|
|
887953 |
@@ -439,7 +439,7 @@ glusterd_ac_send_friend_remove_req (glusterd_friend_sm_event_t *event,
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (conf);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -489,7 +489,7 @@ glusterd_ac_send_friend_remove_req (glusterd_friend_sm_event_t *event,
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
|
|
|
887953 |
|
|
|
887953 |
@@ -533,7 +533,7 @@ glusterd_ac_send_friend_update (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (priv);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
cur_peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
|
|
|
887953 |
if (!cur_peerinfo) {
|
|
|
887953 |
@@ -596,7 +596,7 @@ glusterd_ac_send_friend_update (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
gf_msg_debug ("glusterd", 0, "Returning with %d", ret);
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (friends)
|
|
|
887953 |
dict_unref (friends);
|
|
|
887953 |
@@ -628,7 +628,7 @@ glusterd_ac_update_friend (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (priv);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
cur_peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
|
|
|
887953 |
if (!cur_peerinfo) {
|
|
|
887953 |
@@ -690,7 +690,7 @@ glusterd_ac_update_friend (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
gf_msg_debug (this->name, 0, "Returning with %d", ret);
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (friends)
|
|
|
887953 |
dict_unref (friends);
|
|
|
887953 |
@@ -790,13 +790,13 @@ glusterd_ac_handle_friend_remove_req (glusterd_friend_sm_event_t *event,
|
|
|
887953 |
ret = glusterd_xfer_friend_remove_resp (ev_ctx->req, ev_ctx->hostname,
|
|
|
887953 |
ev_ctx->port);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
|
|
|
887953 |
|
|
|
887953 |
ret = glusterd_friend_sm_new_event (GD_FRIEND_EVENT_REMOVE_FRIEND,
|
|
|
887953 |
&new_event);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
@@ -805,13 +805,13 @@ glusterd_ac_handle_friend_remove_req (glusterd_friend_sm_event_t *event,
|
|
|
887953 |
|
|
|
887953 |
ret = glusterd_friend_sm_inject_event (new_event);
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
new_event = NULL;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
glusterd_peer_detach_cleanup (priv);
|
|
|
887953 |
out:
|
|
|
887953 |
@@ -831,7 +831,7 @@ glusterd_ac_friend_remove (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
|
|
|
887953 |
GF_ASSERT (event);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
@@ -839,7 +839,7 @@ glusterd_ac_friend_remove (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
GD_MSG_PEER_NOT_FOUND,
|
|
|
887953 |
"Could not find peer %s(%s)",
|
|
|
887953 |
event->peername, uuid_utoa (event->peerid));
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
ret = glusterd_friend_remove_cleanup_vols (peerinfo->uuid);
|
|
|
887953 |
@@ -847,7 +847,7 @@ glusterd_ac_friend_remove (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
gf_msg (THIS->name, GF_LOG_WARNING, 0, GD_MSG_VOL_CLEANUP_FAIL,
|
|
|
887953 |
"Volumes cleanup failed");
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
/* Exiting read critical section as glusterd_peerinfo_cleanup calls
|
|
|
887953 |
* synchronize_rcu before freeing the peerinfo
|
|
|
887953 |
*/
|
|
|
887953 |
@@ -896,14 +896,14 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
ev_ctx = ctx;
|
|
|
887953 |
gf_uuid_copy (uuid, ev_ctx->uuid);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (event->peerid, event->peername);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
gf_msg (this->name, GF_LOG_ERROR, 0,
|
|
|
887953 |
GD_MSG_PEER_NOT_FOUND, "Could not find peer %s(%s)",
|
|
|
887953 |
event->peername, uuid_utoa (event->peerid));
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
@@ -913,7 +913,7 @@ glusterd_ac_handle_friend_add_req (glusterd_friend_sm_event_t *event, void *ctx)
|
|
|
887953 |
*/
|
|
|
887953 |
gf_uuid_copy (peerinfo->uuid, ev_ctx->uuid);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
conf = this->private;
|
|
|
887953 |
GF_ASSERT (conf);
|
|
|
887953 |
@@ -1032,7 +1032,7 @@ glusterd_friend_sm_transition_state (uuid_t peerid, char *peername,
|
|
|
887953 |
GF_ASSERT (state);
|
|
|
887953 |
GF_ASSERT (peername);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (peerid, peername);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
goto out;
|
|
|
887953 |
@@ -1047,7 +1047,7 @@ glusterd_friend_sm_transition_state (uuid_t peerid, char *peername,
|
|
|
887953 |
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
out:
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
return ret;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
@@ -1357,7 +1357,7 @@ glusterd_friend_sm ()
|
|
|
887953 |
cds_list_del_init (&event->list);
|
|
|
887953 |
event_type = event->event;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (event->peerid,
|
|
|
887953 |
event->peername);
|
|
|
887953 |
@@ -1368,7 +1368,7 @@ glusterd_friend_sm ()
|
|
|
887953 |
glusterd_friend_sm_event_name_get (event_type));
|
|
|
887953 |
|
|
|
887953 |
GF_FREE (event);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
continue;
|
|
|
887953 |
}
|
|
|
887953 |
gf_msg_debug ("glusterd", 0, "Dequeued event of type: '%s'",
|
|
|
887953 |
@@ -1377,7 +1377,7 @@ glusterd_friend_sm ()
|
|
|
887953 |
|
|
|
887953 |
old_state = peerinfo->state.state;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
/* Giving up read-critical section here as we only need
|
|
|
887953 |
* the current state to call the handler.
|
|
|
887953 |
*
|
|
|
887953 |
@@ -1435,11 +1435,11 @@ glusterd_friend_sm ()
|
|
|
887953 |
/* We need to obtain peerinfo reference once again as we
|
|
|
887953 |
* had exited the read critical section above.
|
|
|
887953 |
*/
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (event->peerid,
|
|
|
887953 |
event->peername);
|
|
|
887953 |
if (!peerinfo) {
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
/* A peer can only be deleted as a effect of
|
|
|
887953 |
* this state machine, and two such state
|
|
|
887953 |
* machines can never run at the same time.
|
|
|
887953 |
@@ -1463,7 +1463,7 @@ glusterd_friend_sm ()
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
ret = glusterd_store_peerinfo (peerinfo);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
glusterd_destroy_friend_event_context (event);
|
|
|
887953 |
GF_FREE (event);
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-snapshot.c b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
|
|
|
887953 |
index 830a67f..3c362e1 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-snapshot.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-snapshot.c
|
|
|
887953 |
@@ -185,7 +185,7 @@ glusterd_find_missed_snap (dict_t *rsp_dict, glusterd_volinfo_t *vol,
|
|
|
887953 |
continue;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, peers, uuid_list) {
|
|
|
887953 |
if (gf_uuid_compare (peerinfo->uuid, brickinfo->uuid)) {
|
|
|
887953 |
/* If the brick doesnt belong to this peer */
|
|
|
887953 |
@@ -210,12 +210,12 @@ glusterd_find_missed_snap (dict_t *rsp_dict, glusterd_volinfo_t *vol,
|
|
|
887953 |
"info for %s:%s in the "
|
|
|
887953 |
"rsp_dict", brickinfo->hostname,
|
|
|
887953 |
brickinfo->path);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
goto out;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
brick_count++;
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-store.c b/xlators/mgmt/glusterd/src/glusterd-store.c
|
|
|
887953 |
index b3c4d9a..1db2c7c 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-store.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-store.c
|
|
|
887953 |
@@ -4593,13 +4593,13 @@ glusterd_store_retrieve_peers (xlator_t *this)
|
|
|
887953 |
|
|
|
887953 |
args.mode = GD_MODE_ON;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &priv->peers, uuid_list) {
|
|
|
887953 |
ret = glusterd_friend_rpc_create (this, peerinfo, &args);
|
|
|
887953 |
if (ret)
|
|
|
887953 |
break;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
peerinfo = NULL;
|
|
|
887953 |
|
|
|
887953 |
out:
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-syncop.c b/xlators/mgmt/glusterd/src/glusterd-syncop.c
|
|
|
887953 |
index 5aaa7f8..9a67d1c 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-syncop.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-syncop.c
|
|
|
887953 |
@@ -52,13 +52,13 @@ gd_collate_errors (struct syncargs *args, int op_ret, int op_errno,
|
|
|
887953 |
args->op_ret = op_ret;
|
|
|
887953 |
args->op_errno = op_errno;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (peerid, NULL);
|
|
|
887953 |
if (peerinfo)
|
|
|
887953 |
peer_str = gf_strdup (peerinfo->hostname);
|
|
|
887953 |
else
|
|
|
887953 |
peer_str = gf_strdup (uuid_utoa (uuid));
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (op_errstr && strcmp (op_errstr, "")) {
|
|
|
887953 |
len = snprintf (err_str, sizeof(err_str) - 1,
|
|
|
887953 |
@@ -571,7 +571,7 @@ _gd_syncop_mgmt_lock_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
|
|
|
887953 |
gf_uuid_copy (args->uuid, rsp.uuid);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (*peerid, NULL);
|
|
|
887953 |
if (peerinfo) {
|
|
|
887953 |
/* Set peer as locked, so we unlock only the locked peers */
|
|
|
887953 |
@@ -584,7 +584,7 @@ _gd_syncop_mgmt_lock_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
"Could not find peer with "
|
|
|
887953 |
"ID %s", uuid_utoa (*peerid));
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
op_ret = rsp.op_ret;
|
|
|
887953 |
op_errno = rsp.op_errno;
|
|
|
887953 |
@@ -670,7 +670,7 @@ _gd_syncop_mgmt_unlock_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
|
|
|
887953 |
gf_uuid_copy (args->uuid, rsp.uuid);
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
peerinfo = glusterd_peerinfo_find (*peerid, NULL);
|
|
|
887953 |
if (peerinfo) {
|
|
|
887953 |
peerinfo->locked = _gf_false;
|
|
|
887953 |
@@ -680,7 +680,7 @@ _gd_syncop_mgmt_unlock_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
GD_MSG_PEER_NOT_FOUND, "Could not find peer with "
|
|
|
887953 |
"ID %s", uuid_utoa (*peerid));
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
op_ret = rsp.op_ret;
|
|
|
887953 |
op_errno = rsp.op_errno;
|
|
|
887953 |
@@ -780,9 +780,9 @@ _gd_syncop_stage_op_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == NULL);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
887953 |
@@ -1110,9 +1110,9 @@ _gd_syncop_commit_op_cbk (struct rpc_req *req, struct iovec *iov,
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
ret = (glusterd_peerinfo_find (rsp.uuid, NULL) == 0);
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
if (ret) {
|
|
|
887953 |
ret = -1;
|
|
|
887953 |
gf_msg (this->name, GF_LOG_CRITICAL, 0,
|
|
|
887953 |
@@ -1227,7 +1227,7 @@ gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -1252,7 +1252,7 @@ gd_lock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, dict_t *op_ctx,
|
|
|
887953 |
MY_UUID, peer_uuid, txn_id);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 == peer_cnt) {
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
@@ -1360,7 +1360,7 @@ stage_done:
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -1379,7 +1379,7 @@ stage_done:
|
|
|
887953 |
op, req_dict, op_ctx);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 == peer_cnt) {
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
@@ -1491,7 +1491,7 @@ commit_done:
|
|
|
887953 |
synctask_barrier_init((&args));
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before the
|
|
|
887953 |
* transaction started
|
|
|
887953 |
@@ -1510,7 +1510,7 @@ commit_done:
|
|
|
887953 |
op, req_dict, op_ctx);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
if (0 == peer_cnt) {
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
@@ -1568,7 +1568,7 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
|
|
|
887953 |
peer_cnt = 0;
|
|
|
887953 |
|
|
|
887953 |
if (cluster_lock) {
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers,
|
|
|
887953 |
uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were available before
|
|
|
887953 |
@@ -1590,7 +1590,7 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
} else {
|
|
|
887953 |
|
|
|
887953 |
ret = dict_get_int32 (op_ctx, "hold_global_locks", &global);
|
|
|
887953 |
@@ -1599,7 +1599,7 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
|
|
|
887953 |
else
|
|
|
887953 |
type = "vol";
|
|
|
887953 |
if (volname || global) {
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers,
|
|
|
887953 |
uuid_list) {
|
|
|
887953 |
/* Only send requests to peers who were
|
|
|
887953 |
@@ -1620,7 +1620,7 @@ gd_unlock_op_phase (glusterd_conf_t *conf, glusterd_op_t op, int *op_ret,
|
|
|
887953 |
tmp_uuid, txn_id);
|
|
|
887953 |
peer_cnt++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd-utils.c b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
887953 |
index d789c53..2290343 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd-utils.c
|
|
|
887953 |
@@ -10934,7 +10934,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
|
|
|
887953 |
node_uuid_str = gf_strdup (node_uuid);
|
|
|
887953 |
|
|
|
887953 |
/* Finding the index of the node-uuid in the peer-list */
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers,
|
|
|
887953 |
uuid_list) {
|
|
|
887953 |
peer_uuid_str = gd_peer_uuid_str (peerinfo);
|
|
|
887953 |
@@ -10943,7 +10943,7 @@ glusterd_volume_rebalance_use_rsp_dict (dict_t *aggr, dict_t *rsp_dict)
|
|
|
887953 |
|
|
|
887953 |
current_index++;
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
/* Setting the largest index value as the total count. */
|
|
|
887953 |
ret = dict_get_int32 (ctx_dict, "count", &count);
|
|
|
887953 |
@@ -13716,7 +13716,7 @@ glusterd_count_connected_peers (int32_t *count)
|
|
|
887953 |
|
|
|
887953 |
*count = 1;
|
|
|
887953 |
|
|
|
887953 |
- rcu_read_lock ();
|
|
|
887953 |
+ RCU_READ_LOCK;
|
|
|
887953 |
cds_list_for_each_entry_rcu (peerinfo, &conf->peers, uuid_list) {
|
|
|
887953 |
/* Find peer who is connected and is a friend */
|
|
|
887953 |
if ((peerinfo->connected) &&
|
|
|
887953 |
@@ -13724,7 +13724,7 @@ glusterd_count_connected_peers (int32_t *count)
|
|
|
887953 |
(*count)++;
|
|
|
887953 |
}
|
|
|
887953 |
}
|
|
|
887953 |
- rcu_read_unlock ();
|
|
|
887953 |
+ RCU_READ_UNLOCK;
|
|
|
887953 |
|
|
|
887953 |
ret = 0;
|
|
|
887953 |
out:
|
|
|
887953 |
diff --git a/xlators/mgmt/glusterd/src/glusterd.h b/xlators/mgmt/glusterd/src/glusterd.h
|
|
|
887953 |
index cbdca52..42c8821 100644
|
|
|
887953 |
--- a/xlators/mgmt/glusterd/src/glusterd.h
|
|
|
887953 |
+++ b/xlators/mgmt/glusterd/src/glusterd.h
|
|
|
887953 |
@@ -804,6 +804,22 @@ do { \
|
|
|
887953 |
*snap_volname_ptr = '\0'; \
|
|
|
887953 |
} while (0)
|
|
|
887953 |
|
|
|
887953 |
+#define RCU_READ_LOCK do { \
|
|
|
887953 |
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
|
|
|
887953 |
+ { \
|
|
|
887953 |
+ rcu_read_lock(); \
|
|
|
887953 |
+ } \
|
|
|
887953 |
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock); \
|
|
|
887953 |
+ } while (0)
|
|
|
887953 |
+
|
|
|
887953 |
+#define RCU_READ_UNLOCK do { \
|
|
|
887953 |
+ pthread_mutex_lock(&(THIS->ctx)->cleanup_lock); \
|
|
|
887953 |
+ { \
|
|
|
887953 |
+ rcu_read_unlock(); \
|
|
|
887953 |
+ } \
|
|
|
887953 |
+ pthread_mutex_unlock(&(THIS->ctx)->cleanup_lock); \
|
|
|
887953 |
+ } while (0)
|
|
|
887953 |
+
|
|
|
887953 |
#define GLUSTERD_DUMP_PEERS(head, member, xpeers) do { \
|
|
|
887953 |
glusterd_peerinfo_t *_peerinfo = NULL; \
|
|
|
887953 |
int index = 1; \
|
|
|
887953 |
@@ -815,7 +831,7 @@ do { \
|
|
|
887953 |
snprintf (key, sizeof (key), \
|
|
|
887953 |
"glusterd.xaction_peer"); \
|
|
|
887953 |
\
|
|
|
887953 |
- rcu_read_lock (); \
|
|
|
887953 |
+ RCU_READ_LOCK; \
|
|
|
887953 |
cds_list_for_each_entry_rcu (_peerinfo, head, member) { \
|
|
|
887953 |
glusterd_dump_peer (_peerinfo, key, index, xpeers); \
|
|
|
887953 |
if (!xpeers) \
|
|
|
887953 |
@@ -823,7 +839,7 @@ do { \
|
|
|
887953 |
index); \
|
|
|
887953 |
index++; \
|
|
|
887953 |
} \
|
|
|
887953 |
- rcu_read_unlock (); \
|
|
|
887953 |
+ RCU_READ_UNLOCK; \
|
|
|
887953 |
\
|
|
|
887953 |
} while (0)
|
|
|
887953 |
|
|
|
887953 |
--
|
|
|
887953 |
1.8.3.1
|
|
|
887953 |
|