3604df
From cd770b8697ea79bd884925bcfdf451d98a2d5c25 Mon Sep 17 00:00:00 2001
12a457
From: Bala.FA <barumuga@redhat.com>
cb8e9e
Date: Fri, 28 Feb 2014 15:28:44 +0530
3604df
Subject: [PATCH 08/86] build: add RHGS specific changes
cb8e9e
cb8e9e
Label: DOWNSTREAM ONLY
cb8e9e
cb8e9e
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1074947
cb8e9e
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1097782
cb8e9e
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1115267
cb8e9e
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1221743
cb8e9e
Change-Id: I08333334745adf2350e772c6454ffcfe9c08cb89
cb8e9e
Reviewed-on: https://code.engineering.redhat.com/gerrit/24983
cb8e9e
Reviewed-on: https://code.engineering.redhat.com/gerrit/25451
cb8e9e
Reviewed-on: https://code.engineering.redhat.com/gerrit/25518
cb8e9e
Reviewed-on: https://code.engineering.redhat.com/gerrit/25983
cb8e9e
Signed-off-by: Bala.FA <barumuga@redhat.com>
12a457
Reviewed-on: https://code.engineering.redhat.com/gerrit/60134
12a457
Tested-by: Milind Changire <mchangir@redhat.com>
cb8e9e
---
3604df
 glusterfs.spec.in |  567 ++++++++++++++++++++++++++++++++++++++++++++++++++++-
3604df
 1 files changed, 561 insertions(+), 6 deletions(-)
cb8e9e
cb8e9e
diff --git a/glusterfs.spec.in b/glusterfs.spec.in
3604df
index 011150d..b0c4cb9 100644
cb8e9e
--- a/glusterfs.spec.in
cb8e9e
+++ b/glusterfs.spec.in
3604df
@@ -72,6 +72,23 @@
12a457
 %global _without_tiering --disable-tiering
cb8e9e
 %endif
cb8e9e
 
cb8e9e
+# if you wish not to build server rpms, compile like this.
cb8e9e
+# rpmbuild -ta @PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz --without server
cb8e9e
+
cb8e9e
+%global _build_server 1
cb8e9e
+%if "%{?_without_server}"
cb8e9e
+%global _build_server 0
cb8e9e
+%endif
cb8e9e
+
cb8e9e
+%if ( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" )
cb8e9e
+%global _build_server 1
cb8e9e
+%else
cb8e9e
+%global _build_server 0
cb8e9e
+%endif
cb8e9e
+
cb8e9e
+%global _without_extra_xlators 1
cb8e9e
+%global _without_regression_tests 1
cb8e9e
+
cb8e9e
 ##-----------------------------------------------------------------------------
12a457
 ## All %%global definitions should be placed here and keep them sorted
cb8e9e
 ##
3604df
@@ -162,8 +179,8 @@ Vendor:           Fedora Project
cb8e9e
 %else
cb8e9e
 Name:             @PACKAGE_NAME@
cb8e9e
 Version:          @PACKAGE_VERSION@
cb8e9e
-Release:          0.@PACKAGE_RELEASE@%{?dist}
12a457
-Vendor:           Gluster Community
cb8e9e
+Release:          @PACKAGE_RELEASE@%{?dist}
cb8e9e
+ExclusiveArch:    x86_64 aarch64
cb8e9e
 %endif
cb8e9e
 License:          GPLv2 or LGPLv3+
cb8e9e
 Group:            System Environment/Base
3604df
@@ -298,7 +315,9 @@ Summary:          Development Libraries
cb8e9e
 Group:            Development/Libraries
12a457
 Requires:         %{name}%{?_isa} = %{version}-%{release}
cb8e9e
 # Needed for the Glupy examples to work
12a457
-Requires:         %{name}-extra-xlators%{?_isa} = %{version}-%{release}
cb8e9e
+%if ( 0%{!?_without_extra_xlators:1} )
12a457
+Requires:         %{name}-extra-xlators = %{version}-%{release}
cb8e9e
+%endif
cb8e9e
 
cb8e9e
 %description devel
cb8e9e
 GlusterFS is a distributed file-system capable of scaling to several
3604df
@@ -311,6 +330,7 @@ is in user space and easily manageable.
cb8e9e
 
cb8e9e
 This package provides the development libraries and include files.
cb8e9e
 
cb8e9e
+%if ( 0%{!?_without_extra_xlators:1} )
cb8e9e
 %package extra-xlators
cb8e9e
 Summary:          Extra Gluster filesystem Translators
cb8e9e
 Group:            Applications/File
3604df
@@ -330,6 +350,7 @@ is in user space and easily manageable.
cb8e9e
 
cb8e9e
 This package provides extra filesystem Translators, such as Glupy,
cb8e9e
 for GlusterFS.
cb8e9e
+%endif
cb8e9e
 
cb8e9e
 %package fuse
cb8e9e
 Summary:          Fuse client
3604df
@@ -356,6 +377,7 @@ is in user space and easily manageable.
cb8e9e
 This package provides support to FUSE based clients and inlcudes the
cb8e9e
 glusterfs(d) binary.
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %package ganesha
cb8e9e
 Summary:          NFS-Ganesha configuration
cb8e9e
 Group:            Applications/File
3604df
@@ -377,7 +399,9 @@ is in user space and easily manageable.
cb8e9e
 
cb8e9e
 This package provides the configuration and related files for using
cb8e9e
 NFS-Ganesha as the NFS server using GlusterFS
cb8e9e
+%endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %if ( 0%{!?_without_georeplication:1} )
cb8e9e
 %package geo-replication
cb8e9e
 Summary:          GlusterFS Geo-replication
3604df
@@ -398,6 +422,7 @@ is in userspace and easily manageable.
cb8e9e
 
cb8e9e
 This package provides support to geo-replication.
cb8e9e
 %endif
cb8e9e
+%endif
cb8e9e
 
cb8e9e
 %package libs
cb8e9e
 Summary:          GlusterFS common libraries
3604df
@@ -456,6 +481,8 @@ is in user space and easily manageable.
cb8e9e
 This package provides support to ib-verbs library.
cb8e9e
 %endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
+%if ( 0%{!?_without_regression_tests:1} )
cb8e9e
 %package regression-tests
cb8e9e
 Summary:          Development Tools
cb8e9e
 Group:            Development/Tools
3604df
@@ -471,7 +498,10 @@ Requires:         nfs-utils xfsprogs yajl
cb8e9e
 %description regression-tests
cb8e9e
 The Gluster Test Framework, is a suite of scripts used for
cb8e9e
 regression testing of Gluster.
cb8e9e
+%endif
cb8e9e
+%endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %if ( 0%{!?_without_ocf:1} )
cb8e9e
 %package resource-agents
cb8e9e
 Summary:          OCF Resource Agents for GlusterFS
3604df
@@ -504,7 +534,9 @@ This package provides the resource agents which plug glusterd into
cb8e9e
 Open Cluster Framework (OCF) compliant cluster resource managers,
cb8e9e
 like Pacemaker.
cb8e9e
 %endif
cb8e9e
+%endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %package server
cb8e9e
 Summary:          Clustered file-system server
cb8e9e
 Group:            System Environment/Daemons
3604df
@@ -554,6 +586,7 @@ called Translators from GNU Hurd kernel. Much of the code in GlusterFS
cb8e9e
 is in user space and easily manageable.
cb8e9e
 
cb8e9e
 This package provides the glusterfs server daemon.
cb8e9e
+%endif
cb8e9e
 
cb8e9e
 %package client-xlators
cb8e9e
 Summary:          GlusterFS client-side translators
3604df
@@ -753,6 +786,7 @@ modprobe fuse
12a457
 exit 0
cb8e9e
 %endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %if ( 0%{!?_without_georeplication:1} )
cb8e9e
 %post geo-replication
12a457
 if [ $1 -ge 1 ]; then
3604df
@@ -760,10 +794,12 @@ if [ $1 -ge 1 ]; then
cb8e9e
 fi
12a457
 exit 0
cb8e9e
 %endif
cb8e9e
+%endif
cb8e9e
 
3604df
 %post libs
3604df
 /sbin/ldconfig
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %post server
cb8e9e
 # Legacy server
cb8e9e
 %_init_enable glusterd
3604df
@@ -838,11 +874,12 @@ else
cb8e9e
     #rpm_script_t context.
cb8e9e
     rm -rf /var/run/glusterd.socket
cb8e9e
 fi
12a457
-exit 0
cb8e9e
+%endif
cb8e9e
 
cb8e9e
 ##-----------------------------------------------------------------------------
12a457
 ## All %%preun should be placed here and keep them sorted
cb8e9e
 ##
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %preun server
cb8e9e
 if [ $1 -eq 0 ]; then
cb8e9e
     if [ -f %_init_glusterfsd ]; then
3604df
@@ -860,7 +897,7 @@ if [ $1 -ge 1 ]; then
cb8e9e
     fi
cb8e9e
     %_init_restart glusterd
cb8e9e
 fi
12a457
-exit 0
cb8e9e
+%endif
cb8e9e
 
cb8e9e
 ##-----------------------------------------------------------------------------
12a457
 ## All %%postun should be placed here and keep them sorted
3604df
@@ -893,6 +930,80 @@ exit 0
12a457
 ## All %%files should be placed here and keep them grouped
cb8e9e
 ##
cb8e9e
 %files
cb8e9e
+# exclude extra-xlators files
cb8e9e
+%if ( ! 0%{!?_without_extra_xlators:1} )
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/mac-compat.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_client.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_dht.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_server.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/features/template.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
cb8e9e
+%exclude %{python_sitelib}/*
cb8e9e
+%endif
cb8e9e
+# exclude regression-tests files
cb8e9e
+%if ( ! 0%{!?_without_regression_tests:1} )
cb8e9e
+%exclude %{_prefix}/share/glusterfs/run-tests.sh
cb8e9e
+%exclude %{_prefix}/share/glusterfs/tests/*
cb8e9e
+%endif
cb8e9e
+%if ( ! 0%{?_build_server} )
cb8e9e
+# exclude ganesha files
cb8e9e
+%exclude %{_sysconfdir}/ganesha/*
cb8e9e
+%exclude %{_libexecdir}/ganesha/*
cb8e9e
+%exclude %{_prefix}/lib/ocf/*
cb8e9e
+# exclude geo-replication files
cb8e9e
+%exclude %{_sysconfdir}/logrotate.d/glusterfs-georep
cb8e9e
+%exclude %{_libexecdir}/glusterfs/*
cb8e9e
+%exclude %{_sbindir}/gfind_missing_files
cb8e9e
+%exclude %{_datadir}/glusterfs/scripts/get-gfid.sh
cb8e9e
+%exclude %{_datadir}/glusterfs/scripts/slave-upgrade.sh
cb8e9e
+%exclude %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
cb8e9e
+%exclude %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
cb8e9e
+%exclude %{_datadir}/glusterfs/scripts/gsync-sync-gfid
cb8e9e
+%exclude %{_sharedstatedir}/glusterd/*
cb8e9e
+# exclude server files
cb8e9e
+%exclude %{_sysconfdir}/glusterfs
cb8e9e
+%exclude %{_sysconfdir}/glusterfs/glusterd.vol
cb8e9e
+%exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate
cb8e9e
+%exclude %{_sysconfdir}/glusterfs/glusterfs-logrotate
cb8e9e
+%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-5.8.conf
cb8e9e
+%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-7.2.conf
cb8e9e
+%exclude %{_sysconfdir}/glusterfs/group-virt.example
cb8e9e
+%exclude %{_sysconfdir}/glusterfs/logger.conf.example
cb8e9e
+%exclude %_init_glusterd
cb8e9e
+%exclude %{_sysconfdir}/sysconfig/glusterd
cb8e9e
+%exclude %{_bindir}/glusterfind
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
3604df
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt*
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
3604df
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server*
cb8e9e
+%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage*
cb8e9e
+%exclude %{_libdir}/libgfdb.so.*
cb8e9e
+%exclude %{_sbindir}/gcron.py
cb8e9e
+%exclude %{_sbindir}/glfsheal
cb8e9e
+%exclude %{_sbindir}/glusterd
cb8e9e
+%exclude %{_sbindir}/snap_scheduler.py
cb8e9e
+%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
cb8e9e
+%if 0%{?_tmpfilesdir:1}
cb8e9e
+%exclude %{_tmpfilesdir}/gluster.conf
cb8e9e
+%endif
cb8e9e
+%endif
cb8e9e
 %doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS
12a457
 %{_mandir}/man8/*gluster*.8*
12a457
 %exclude %{_mandir}/man8/gluster.8*
3604df
@@ -979,6 +1090,7 @@ exit 0
3604df
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/ganesha.so
cb8e9e
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
cb8e9e
 
12a457
++%if ( 0%{!?_without_extra_xlators:1} )
cb8e9e
 %files extra-xlators
cb8e9e
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
cb8e9e
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy.so
3604df
@@ -991,6 +1103,11 @@ exit 0
3604df
 %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
3604df
 # Glupy Python files
3604df
 %{python_sitelib}/gluster/glupy/*
3604df
+# Don't expect a .egg-info file on EL5
3604df
+%if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 ) )
3604df
+%{python_sitelib}/glusterfs_glupy*.egg-info
3604df
+%endif
cb8e9e
+%endif
cb8e9e
 
cb8e9e
 %files fuse
cb8e9e
 # glusterfs is a symlink to glusterfsd, -server depends on -fuse.
3604df
@@ -1008,13 +1125,16 @@ exit 0
cb8e9e
 %endif
cb8e9e
 %endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %files ganesha
cb8e9e
 %{_sysconfdir}/ganesha/*
3604df
 %{_libexecdir}/ganesha/*
3604df
 %{_prefix}/lib/ocf/resource.d/heartbeat/*
3604df
 %{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
3604df
 %{_sharedstatedir}/glusterd/hooks/1/reset/post/S31ganesha-reset.sh
cb8e9e
+%endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %if ( 0%{!?_without_georeplication:1} )
cb8e9e
 %files geo-replication
cb8e9e
 %config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
3604df
@@ -1042,6 +1162,7 @@ exit 0
3604df
 %{_datadir}/glusterfs/scripts/gsync-sync-gfid
3604df
 %{_datadir}/glusterfs/scripts/schedule_georep.py*
cb8e9e
 %endif
cb8e9e
+%endif
cb8e9e
 
cb8e9e
 %files libs
cb8e9e
 %{_libdir}/*.so.*
3604df
@@ -1061,19 +1182,25 @@ exit 0
cb8e9e
 %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
cb8e9e
 %endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %files regression-tests
cb8e9e
 %{_prefix}/share/glusterfs/run-tests.sh
cb8e9e
 %{_prefix}/share/glusterfs/tests
cb8e9e
 %exclude %{_prefix}/share/glusterfs/tests/basic/rpm.t
3604df
 %exclude %{_prefix}/share/glusterfs/tests/vagrant
cb8e9e
+%endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %if ( 0%{!?_without_ocf:1} )
cb8e9e
 %files resource-agents
cb8e9e
 # /usr/lib is the standard for OCF, also on x86_64
cb8e9e
 %{_prefix}/lib/ocf/resource.d/glusterfs
cb8e9e
 %endif
cb8e9e
+%endif
cb8e9e
 
cb8e9e
+%if ( 0%{?_build_server} )
cb8e9e
 %files server
cb8e9e
+%exclude %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
cb8e9e
 %doc extras/clear_xattrs.sh
3604df
 # sysconf
cb8e9e
 %config(noreplace) %{_sysconfdir}/glusterfs
3604df
@@ -1123,7 +1250,6 @@ exit 0
3604df
 %{_sbindir}/snap_scheduler.py
3604df
 %{_sbindir}/gcron.py
3604df
 
3604df
-<<<<<<< 2944c7b6656a36a79551f9f9f24ab7a10467f13a
3604df
 # /var/lib/glusterd, e.g. hookscripts, etc.
3604df
 %ghost      %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
3604df
        %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
3604df
@@ -1194,6 +1320,435 @@ exit 0
12a457
 %if ( 0%{?_with_firewalld:1} )
3604df
 %{_prefix}/lib/firewalld/services/glusterfs.xml
12a457
 %endif
cb8e9e
+%endif
cb8e9e
+
cb8e9e
+
cb8e9e
+##-----------------------------------------------------------------------------
cb8e9e
+## All %pretrans should be placed here and keep them sorted
cb8e9e
+##
cb8e9e
+%if 0%{?_build_server}
cb8e9e
+%pretrans -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
cb8e9e
+          echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
cb8e9e
+   echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
cb8e9e
+   echo "WARNING: Refer upgrade section of install guide for more details"
cb8e9e
+   echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
cb8e9e
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+
cb8e9e
+
cb8e9e
+
cb8e9e
+%pretrans api -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
12a457
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs-api_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+
cb8e9e
+
cb8e9e
+
cb8e9e
+%pretrans api-devel -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
cb8e9e
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs-api-devel_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+
cb8e9e
+
cb8e9e
+
cb8e9e
+%pretrans devel -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
cb8e9e
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs-devel_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+
cb8e9e
+
cb8e9e
+
cb8e9e
+%pretrans fuse -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
cb8e9e
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs-fuse_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+
cb8e9e
+
cb8e9e
+
cb8e9e
+%if 0%{?_can_georeplicate}
cb8e9e
+%if ( 0%{!?_without_georeplication:1} )
cb8e9e
+%pretrans geo-replication -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
cb8e9e
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs-geo-replication_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+%endif
cb8e9e
+%endif
cb8e9e
+
cb8e9e
+
cb8e9e
+
cb8e9e
+%pretrans libs -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
cb8e9e
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs-libs_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+
cb8e9e
+
cb8e9e
+
cb8e9e
+%if ( 0%{!?_without_rdma:1} )
cb8e9e
+%pretrans rdma -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
cb8e9e
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs-rdma_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+%endif
cb8e9e
+
cb8e9e
+
cb8e9e
+
cb8e9e
+%if ( 0%{!?_without_ocf:1} )
cb8e9e
+%pretrans resource-agents -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
cb8e9e
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs-resource-agents_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+%endif
cb8e9e
+
cb8e9e
+
cb8e9e
+
cb8e9e
+%pretrans server -p <lua>
cb8e9e
+if not posix.access("/bin/bash", "x") then
cb8e9e
+    -- initial installation, no shell, no running glusterfsd
cb8e9e
+    return 0
cb8e9e
+end
cb8e9e
+
cb8e9e
+-- TODO: move this completely to a lua script
cb8e9e
+-- For now, we write a temporary bash script and execute that.
cb8e9e
+
cb8e9e
+script = [[#!/bin/sh
cb8e9e
+pidof -c -o %PPID -x glusterfsd &>/dev/null
cb8e9e
+
cb8e9e
+if [ $? -eq 0 ]; then
cb8e9e
+   pushd . > /dev/null 2>&1
cb8e9e
+   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
+       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
+       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
+       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
+          exit 1;
cb8e9e
+       fi
cb8e9e
+   done
cb8e9e
+
cb8e9e
+   popd > /dev/null 2>&1
cb8e9e
+   exit 1;
cb8e9e
+fi
cb8e9e
+]]
cb8e9e
+
cb8e9e
+-- rpm in RHEL5 does not have os.tmpname()
cb8e9e
+-- io.tmpfile() can not be resolved to a filename to pass to bash :-/
cb8e9e
+tmpname = "/tmp/glusterfs-server_pretrans_" .. os.date("%s")
cb8e9e
+tmpfile = io.open(tmpname, "w")
cb8e9e
+tmpfile:write(script)
cb8e9e
+tmpfile:close()
cb8e9e
+ok, how, val = os.execute("/bin/bash " .. tmpname)
cb8e9e
+os.remove(tmpname)
cb8e9e
+if not (ok == 0) then
cb8e9e
+   error("Detected running glusterfs processes", ok)
cb8e9e
+end
cb8e9e
+%endif
cb8e9e
 
cb8e9e
 %changelog
3604df
 * Mon Aug 22 2016 Milind Changire <mchangir@redhat.com>
cb8e9e
-- 
12a457
1.7.1
cb8e9e