Blob Blame History Raw
From cd770b8697ea79bd884925bcfdf451d98a2d5c25 Mon Sep 17 00:00:00 2001
From: Bala.FA <barumuga@redhat.com>
Date: Fri, 28 Feb 2014 15:28:44 +0530
Subject: [PATCH 08/86] build: add RHGS specific changes

Label: DOWNSTREAM ONLY

Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1074947
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1097782
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1115267
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1221743
Change-Id: I08333334745adf2350e772c6454ffcfe9c08cb89
Reviewed-on: https://code.engineering.redhat.com/gerrit/24983
Reviewed-on: https://code.engineering.redhat.com/gerrit/25451
Reviewed-on: https://code.engineering.redhat.com/gerrit/25518
Reviewed-on: https://code.engineering.redhat.com/gerrit/25983
Signed-off-by: Bala.FA <barumuga@redhat.com>
Reviewed-on: https://code.engineering.redhat.com/gerrit/60134
Tested-by: Milind Changire <mchangir@redhat.com>
---
 glusterfs.spec.in |  567 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 561 insertions(+), 6 deletions(-)

diff --git a/glusterfs.spec.in b/glusterfs.spec.in
index 011150d..b0c4cb9 100644
--- a/glusterfs.spec.in
+++ b/glusterfs.spec.in
@@ -72,6 +72,23 @@
 %global _without_tiering --disable-tiering
 %endif
 
+# if you wish not to build server rpms, compile like this.
+# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without server
+
+%global _build_server 1
+%if "%{?_without_server}"
+%global _build_server 0
+%endif
+
+%if ( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" )
+%global _build_server 1
+%else
+%global _build_server 0
+%endif
+
+%global _without_extra_xlators 1
+%global _without_regression_tests 1
+
 ##-----------------------------------------------------------------------------
 ## All %%global definitions should be placed here and keep them sorted
 ##
@@ -162,8 +179,8 @@ Vendor:           Fedora Project
 %else
 Name:             @PACKAGE_NAME@
 Version:          @PACKAGE_VERSION@
-Release:          0.@PACKAGE_RELEASE@%{?dist}
-Vendor:           Gluster Community
+Release:          @PACKAGE_RELEASE@%{?dist}
+ExclusiveArch:    x86_64 aarch64
 %endif
 License:          GPLv2 or LGPLv3+
 Group:            System Environment/Base
@@ -298,7 +315,9 @@ Summary:          Development Libraries
 Group:            Development/Libraries
 Requires:         %{name}%{?_isa} = %{version}-%{release}
 # Needed for the Glupy examples to work
-Requires:         %{name}-extra-xlators%{?_isa} = %{version}-%{release}
+%if ( 0%{!?_without_extra_xlators:1} )
+Requires:         %{name}-extra-xlators = %{version}-%{release}
+%endif
 
 %description devel
 GlusterFS is a distributed file-system capable of scaling to several
@@ -311,6 +330,7 @@ is in user space and easily manageable.
 
 This package provides the development libraries and include files.
 
+%if ( 0%{!?_without_extra_xlators:1} )
 %package extra-xlators
 Summary:          Extra Gluster filesystem Translators
 Group:            Applications/File
@@ -330,6 +350,7 @@ is in user space and easily manageable.
 
 This package provides extra filesystem Translators, such as Glupy,
 for GlusterFS.
+%endif
 
 %package fuse
 Summary:          Fuse client
@@ -356,6 +377,7 @@ is in user space and easily manageable.
 This package provides support to FUSE based clients and inlcudes the
 glusterfs(d) binary.
 
+%if ( 0%{?_build_server} )
 %package ganesha
 Summary:          NFS-Ganesha configuration
 Group:            Applications/File
@@ -377,7 +399,9 @@ is in user space and easily manageable.
 
 This package provides the configuration and related files for using
 NFS-Ganesha as the NFS server using GlusterFS
+%endif
 
+%if ( 0%{?_build_server} )
 %if ( 0%{!?_without_georeplication:1} )
 %package geo-replication
 Summary:          GlusterFS Geo-replication
@@ -398,6 +422,7 @@ is in userspace and easily manageable.
 
 This package provides support to geo-replication.
 %endif
+%endif
 
 %package libs
 Summary:          GlusterFS common libraries
@@ -456,6 +481,8 @@ is in user space and easily manageable.
 This package provides support to ib-verbs library.
 %endif
 
+%if ( 0%{?_build_server} )
+%if ( 0%{!?_without_regression_tests:1} )
 %package regression-tests
 Summary:          Development Tools
 Group:            Development/Tools
@@ -471,7 +498,10 @@ Requires:         nfs-utils xfsprogs yajl
 %description regression-tests
 The Gluster Test Framework, is a suite of scripts used for
 regression testing of Gluster.
+%endif
+%endif
 
+%if ( 0%{?_build_server} )
 %if ( 0%{!?_without_ocf:1} )
 %package resource-agents
 Summary:          OCF Resource Agents for GlusterFS
@@ -504,7 +534,9 @@ This package provides the resource agents which plug glusterd into
 Open Cluster Framework (OCF) compliant cluster resource managers,
 like Pacemaker.
 %endif
+%endif
 
+%if ( 0%{?_build_server} )
 %package server
 Summary:          Clustered file-system server
 Group:            System Environment/Daemons
@@ -554,6 +586,7 @@ called Translators from GNU Hurd kernel. Much of the code in GlusterFS
 is in user space and easily manageable.
 
 This package provides the glusterfs server daemon.
+%endif
 
 %package client-xlators
 Summary:          GlusterFS client-side translators
@@ -753,6 +786,7 @@ modprobe fuse
 exit 0
 %endif
 
+%if ( 0%{?_build_server} )
 %if ( 0%{!?_without_georeplication:1} )
 %post geo-replication
 if [ $1 -ge 1 ]; then
@@ -760,10 +794,12 @@ if [ $1 -ge 1 ]; then
 fi
 exit 0
 %endif
+%endif
 
 %post libs
 /sbin/ldconfig
 
+%if ( 0%{?_build_server} )
 %post server
 # Legacy server
 %_init_enable glusterd
@@ -838,11 +874,12 @@ else
     #rpm_script_t context.
     rm -rf /var/run/glusterd.socket
 fi
-exit 0
+%endif
 
 ##-----------------------------------------------------------------------------
 ## All %%preun should be placed here and keep them sorted
 ##
+%if ( 0%{?_build_server} )
 %preun server
 if [ $1 -eq 0 ]; then
     if [ -f %_init_glusterfsd ]; then
@@ -860,7 +897,7 @@ if [ $1 -ge 1 ]; then
     fi
     %_init_restart glusterd
 fi
-exit 0
+%endif
 
 ##-----------------------------------------------------------------------------
 ## All %%postun should be placed here and keep them sorted
@@ -893,6 +930,80 @@ exit 0
 ## All %%files should be placed here and keep them grouped
 ##
 %files
+# exclude extra-xlators files
+%if ( ! 0%{!?_without_extra_xlators:1} )
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/mac-compat.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_client.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_dht.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_server.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/features/template.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
+%exclude %{python_sitelib}/*
+%endif
+# exclude regression-tests files
+%if ( ! 0%{!?_without_regression_tests:1} )
+%exclude %{_prefix}/share/glusterfs/run-tests.sh
+%exclude %{_prefix}/share/glusterfs/tests/*
+%endif
+%if ( ! 0%{?_build_server} )
+# exclude ganesha files
+%exclude %{_sysconfdir}/ganesha/*
+%exclude %{_libexecdir}/ganesha/*
+%exclude %{_prefix}/lib/ocf/*
+# exclude geo-replication files
+%exclude %{_sysconfdir}/logrotate.d/glusterfs-georep
+%exclude %{_libexecdir}/glusterfs/*
+%exclude %{_sbindir}/gfind_missing_files
+%exclude %{_datadir}/glusterfs/scripts/get-gfid.sh
+%exclude %{_datadir}/glusterfs/scripts/slave-upgrade.sh
+%exclude %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
+%exclude %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
+%exclude %{_datadir}/glusterfs/scripts/gsync-sync-gfid
+%exclude %{_sharedstatedir}/glusterd/*
+# exclude server files
+%exclude %{_sysconfdir}/glusterfs
+%exclude %{_sysconfdir}/glusterfs/glusterd.vol
+%exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate
+%exclude %{_sysconfdir}/glusterfs/glusterfs-logrotate
+%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-5.8.conf
+%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-7.2.conf
+%exclude %{_sysconfdir}/glusterfs/group-virt.example
+%exclude %{_sysconfdir}/glusterfs/logger.conf.example
+%exclude %_init_glusterd
+%exclude %{_sysconfdir}/sysconfig/glusterd
+%exclude %{_bindir}/glusterfind
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt*
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server*
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage*
+%exclude %{_libdir}/libgfdb.so.*
+%exclude %{_sbindir}/gcron.py
+%exclude %{_sbindir}/glfsheal
+%exclude %{_sbindir}/glusterd
+%exclude %{_sbindir}/snap_scheduler.py
+%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
+%if 0%{?_tmpfilesdir:1}
+%exclude %{_tmpfilesdir}/gluster.conf
+%endif
+%endif
 %doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS
 %{_mandir}/man8/*gluster*.8*
 %exclude %{_mandir}/man8/gluster.8*
@@ -979,6 +1090,7 @@ exit 0
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/ganesha.so
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
 
++%if ( 0%{!?_without_extra_xlators:1} )
 %files extra-xlators
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy.so
@@ -991,6 +1103,11 @@ exit 0
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
 # Glupy Python files
 %{python_sitelib}/gluster/glupy/*
+# Don't expect a .egg-info file on EL5
+%if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 ) )
+%{python_sitelib}/glusterfs_glupy*.egg-info
+%endif
+%endif
 
 %files fuse
 # glusterfs is a symlink to glusterfsd, -server depends on -fuse.
@@ -1008,13 +1125,16 @@ exit 0
 %endif
 %endif
 
+%if ( 0%{?_build_server} )
 %files ganesha
 %{_sysconfdir}/ganesha/*
 %{_libexecdir}/ganesha/*
 %{_prefix}/lib/ocf/resource.d/heartbeat/*
 %{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
 %{_sharedstatedir}/glusterd/hooks/1/reset/post/S31ganesha-reset.sh
+%endif
 
+%if ( 0%{?_build_server} )
 %if ( 0%{!?_without_georeplication:1} )
 %files geo-replication
 %config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
@@ -1042,6 +1162,7 @@ exit 0
 %{_datadir}/glusterfs/scripts/gsync-sync-gfid
 %{_datadir}/glusterfs/scripts/schedule_georep.py*
 %endif
+%endif
 
 %files libs
 %{_libdir}/*.so.*
@@ -1061,19 +1182,25 @@ exit 0
 %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
 %endif
 
+%if ( 0%{?_build_server} )
 %files regression-tests
 %{_prefix}/share/glusterfs/run-tests.sh
 %{_prefix}/share/glusterfs/tests
 %exclude %{_prefix}/share/glusterfs/tests/basic/rpm.t
 %exclude %{_prefix}/share/glusterfs/tests/vagrant
+%endif
 
+%if ( 0%{?_build_server} )
 %if ( 0%{!?_without_ocf:1} )
 %files resource-agents
 # /usr/lib is the standard for OCF, also on x86_64
 %{_prefix}/lib/ocf/resource.d/glusterfs
 %endif
+%endif
 
+%if ( 0%{?_build_server} )
 %files server
+%exclude %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
 %doc extras/clear_xattrs.sh
 # sysconf
 %config(noreplace) %{_sysconfdir}/glusterfs
@@ -1123,7 +1250,6 @@ exit 0
 %{_sbindir}/snap_scheduler.py
 %{_sbindir}/gcron.py
 
-<<<<<<< 2944c7b6656a36a79551f9f9f24ab7a10467f13a
 # /var/lib/glusterd, e.g. hookscripts, etc.
 %ghost      %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
        %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
@@ -1194,6 +1320,435 @@ exit 0
 %if ( 0%{?_with_firewalld:1} )
 %{_prefix}/lib/firewalld/services/glusterfs.xml
 %endif
+%endif
+
+
+##-----------------------------------------------------------------------------
+## All %pretrans should be placed here and keep them sorted
+##
+%if 0%{?_build_server}
+%pretrans -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
+          echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
+   echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
+   echo "WARNING: Refer upgrade section of install guide for more details"
+   echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans api -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-api_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans api-devel -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-api-devel_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans devel -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-devel_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+
+
+
+%pretrans fuse -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-fuse_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+
+
+
+%if 0%{?_can_georeplicate}
+%if ( 0%{!?_without_georeplication:1} )
+%pretrans geo-replication -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-geo-replication_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+%endif
+%endif
+
+
+
+%pretrans libs -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-libs_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+
+
+
+%if ( 0%{!?_without_rdma:1} )
+%pretrans rdma -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-rdma_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+%endif
+
+
+
+%if ( 0%{!?_without_ocf:1} )
+%pretrans resource-agents -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-resource-agents_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+%endif
+
+
+
+%pretrans server -p <lua>
+if not posix.access("/bin/bash", "x") then
+    -- initial installation, no shell, no running glusterfsd
+    return 0
+end
+
+-- TODO: move this completely to a lua script
+-- For now, we write a temporary bash script and execute that.
+
+script = [[#!/bin/sh
+pidof -c -o %PPID -x glusterfsd &>/dev/null
+
+if [ $? -eq 0 ]; then
+   pushd . > /dev/null 2>&1
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
+          exit 1;
+       fi
+   done
+
+   popd > /dev/null 2>&1
+   exit 1;
+fi
+]]
+
+-- rpm in RHEL5 does not have os.tmpname()
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
+tmpname = "/tmp/glusterfs-server_pretrans_" .. os.date("%s")
+tmpfile = io.open(tmpname, "w")
+tmpfile:write(script)
+tmpfile:close()
+ok, how, val = os.execute("/bin/bash " .. tmpname)
+os.remove(tmpname)
+if not (ok == 0) then
+   error("Detected running glusterfs processes", ok)
+end
+%endif
 
 %changelog
 * Mon Aug 22 2016 Milind Changire <mchangir@redhat.com>
-- 
1.7.1