256ebe
From 8a3035bf612943694a3cd1c6a857bd009e84f55d Mon Sep 17 00:00:00 2001
256ebe
From: Milind Changire <mchangir@redhat.com>
256ebe
Date: Tue, 10 Oct 2017 09:58:24 +0530
256ebe
Subject: [PATCH 19/52] build: launch glusterd upgrade after all new bits are
256ebe
 installed
256ebe
256ebe
Problem:
256ebe
glusterd upgrade mode needs new bits from glusterfs-rdma which
256ebe
optional and causes the dependency graph to break since it is
256ebe
not tied into glusterfs-server requirements
256ebe
256ebe
Solution:
256ebe
Run glusterd upgrade mode after all new bits are installed
256ebe
i.e. in %posttrans server section
256ebe
256ebe
Label: DOWNSTREAM ONLY
256ebe
256ebe
Change-Id: I356e02d0bf0eaaef43c20ce07b388262f63093a4
256ebe
Signed-off-by: Milind Changire <mchangir@redhat.com>
256ebe
Reviewed-on: https://code.engineering.redhat.com/gerrit/120094
256ebe
Reviewed-by: Atin Mukherjee <amukherj@redhat.com>
256ebe
Tested-by: RHGS Build Bot <nigelb@redhat.com>
256ebe
Reviewed-by: Raghavendra Talur <rtalur@redhat.com>
256ebe
---
256ebe
 glusterfs.spec.in | 51 +++++++++++++++++++++++++++++----------------------
256ebe
 1 file changed, 29 insertions(+), 22 deletions(-)
256ebe
256ebe
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
256ebe
index 3a98822..208a82d 100644
256ebe
--- a/glusterfs.spec.in
256ebe
+++ b/glusterfs.spec.in
256ebe
@@ -946,28 +946,6 @@ fi
256ebe
     %firewalld_reload
256ebe
 %endif
256ebe
 
256ebe
-pidof -c -o %PPID -x glusterd &> /dev/null
256ebe
-if [ $? -eq 0 ]; then
256ebe
-    kill -9 `pgrep -f gsyncd.py` &> /dev/null
256ebe
-
256ebe
-    killall --wait glusterd &> /dev/null
256ebe
-    glusterd --xlator-option *.upgrade=on -N
256ebe
-
256ebe
-    #Cleaning leftover glusterd socket file which is created by glusterd in
256ebe
-    #rpm_script_t context.
256ebe
-    rm -f %{_rundir}/glusterd.socket
256ebe
-
256ebe
-    # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
256ebe
-    # so start it again
256ebe
-    %service_start glusterd
256ebe
-else
256ebe
-    glusterd --xlator-option *.upgrade=on -N
256ebe
-
256ebe
-    #Cleaning leftover glusterd socket file which is created by glusterd in
256ebe
-    #rpm_script_t context.
256ebe
-    rm -f %{_rundir}/glusterd.socket
256ebe
-fi
256ebe
-exit 0
256ebe
 %endif
256ebe
 
256ebe
 ##-----------------------------------------------------------------------------
256ebe
@@ -2027,6 +2005,35 @@ os.remove(tmpname)
256ebe
 if not (ok == 0) then
256ebe
    error("Detected running glusterfs processes", ok)
256ebe
 end
256ebe
+
256ebe
+%posttrans server
256ebe
+pidof -c -o %PPID -x glusterd &> /dev/null
256ebe
+if [ $? -eq 0 ]; then
256ebe
+    kill -9 `pgrep -f gsyncd.py` &> /dev/null
256ebe
+
256ebe
+    killall --wait -SIGTERM glusterd &> /dev/null
256ebe
+
256ebe
+    if [ "$?" != "0" ]; then
256ebe
+        echo "killall failed while killing glusterd"
256ebe
+    fi
256ebe
+
256ebe
+    glusterd --xlator-option *.upgrade=on -N
256ebe
+
256ebe
+    #Cleaning leftover glusterd socket file which is created by glusterd in
256ebe
+    #rpm_script_t context.
256ebe
+    rm -rf /var/run/glusterd.socket
256ebe
+
256ebe
+    # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
256ebe
+    # so start it again
256ebe
+    %service_start glusterd
256ebe
+else
256ebe
+    glusterd --xlator-option *.upgrade=on -N
256ebe
+
256ebe
+    #Cleaning leftover glusterd socket file which is created by glusterd in
256ebe
+    #rpm_script_t context.
256ebe
+    rm -rf /var/run/glusterd.socket
256ebe
+fi
256ebe
+
256ebe
 %endif
256ebe
 
256ebe
 %changelog
256ebe
-- 
256ebe
1.8.3.1
256ebe