233933
%global _hardened_build 1
233933
233933
%global _for_fedora_koji_builds 0
233933
233933
# uncomment and add '%' to use the prereltag for pre-releases
233933
# %%global prereltag qa3
233933
233933
##-----------------------------------------------------------------------------
233933
## All argument definitions should be placed here and keep them sorted
233933
##
233933
233933
# asan
233933
# if you wish to compile an rpm with address sanitizer...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --with asan
233933
%{?_with_asan:%global _with_asan --enable-asan}
233933
233933
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
233933
%global _with_asan %{nil}
233933
%endif
233933
233933
# bd
233933
# if you wish to compile an rpm without the BD map support...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --without bd
233933
%{?_without_bd:%global _without_bd --disable-bd-xlator}
233933
233933
%if ( 0%{?rhel} && 0%{?rhel} > 7 )
233933
%global _without_bd --without-bd
233933
%endif
233933
233933
# cmocka
233933
# if you wish to compile an rpm with cmocka unit testing...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --with cmocka
233933
%{?_with_cmocka:%global _with_cmocka --enable-cmocka}
233933
233933
# debug
233933
# if you wish to compile an rpm with debugging...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --with debug
233933
%{?_with_debug:%global _with_debug --enable-debug}
233933
233933
# epoll
233933
# if you wish to compile an rpm without epoll...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --without epoll
233933
%{?_without_epoll:%global _without_epoll --disable-epoll}
233933
233933
# fusermount
233933
# if you wish to compile an rpm without fusermount...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --without fusermount
233933
%{?_without_fusermount:%global _without_fusermount --disable-fusermount}
233933
233933
# geo-rep
233933
# if you wish to compile an rpm without geo-replication support, compile like this...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --without georeplication
233933
%{?_without_georeplication:%global _without_georeplication --disable-georeplication}
233933
233933
# ipv6default
233933
# if you wish to compile an rpm with IPv6 default...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --with ipv6default
233933
%{?_with_ipv6default:%global _with_ipv6default --with-ipv6-default}
233933
233933
# libtirpc
233933
# if you wish to compile an rpm without TIRPC (i.e. use legacy glibc rpc)
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --without libtirpc
233933
%{?_without_libtirpc:%global _without_libtirpc --without-libtirpc}
233933
233933
# Do not use libtirpc on EL6, it does not have xdr_uint64_t() and xdr_uint32_t
233933
# Do not use libtirpc on EL7, it does not have xdr_sizeof()
233933
%if ( 0%{?rhel} && 0%{?rhel} <= 7 )
233933
%global _without_libtirpc --without-libtirpc
233933
%endif
233933
233933
233933
# ocf
233933
# if you wish to compile an rpm without the OCF resource agents...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --without ocf
233933
%{?_without_ocf:%global _without_ocf --without-ocf}
233933
233933
# rdma
233933
# if you wish to compile an rpm without rdma support, compile like this...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --without rdma
233933
%{?_without_rdma:%global _without_rdma --disable-ibverbs}
233933
233933
# No RDMA Support on 32-bit ARM
233933
%ifarch armv7hl
233933
%global _without_rdma --disable-ibverbs
233933
%endif
233933
233933
# server
233933
# if you wish to build rpms without server components, compile like this
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --without server
233933
%{?_without_server:%global _without_server --without-server}
233933
233933
# disable server components forcefully as rhel <= 6
233933
%if ( 0%{?rhel} )
233933
%if (!(( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" ) || ( "%{?dist}" == ".el8rhgs" )))
233933
%global _without_server --without-server
233933
%endif
233933
%endif
233933
233933
%global _without_extra_xlators 1
233933
%global _without_regression_tests 1
233933
233933
# syslog
233933
# if you wish to build rpms without syslog logging, compile like this
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --without syslog
233933
%{?_without_syslog:%global _without_syslog --disable-syslog}
233933
233933
# disable syslog forcefully as rhel <= 6 doesn't have rsyslog or rsyslog-mmcount
233933
# Fedora deprecated syslog, see
233933
#  https://fedoraproject.org/wiki/Changes/NoDefaultSyslog
233933
# (And what about RHEL7?)
233933
%if ( 0%{?fedora} && 0%{?fedora} >= 20 ) || ( 0%{?rhel} && 0%{?rhel} <= 6 )
233933
%global _without_syslog --disable-syslog
233933
%endif
233933
233933
# tsan
233933
# if you wish to compile an rpm with thread sanitizer...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --with tsan
233933
%{?_with_tsan:%global _with_tsan --enable-tsan}
233933
233933
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
233933
%global _with_tsan %{nil}
233933
%endif
233933
233933
# valgrind
233933
# if you wish to compile an rpm to run all processes under valgrind...
233933
# rpmbuild -ta glusterfs-6.0.tar.gz --with valgrind
233933
%{?_with_valgrind:%global _with_valgrind --enable-valgrind}
233933
233933
##-----------------------------------------------------------------------------
233933
## All %%global definitions should be placed here and keep them sorted
233933
##
233933
233933
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
233933
%global _with_systemd true
233933
%endif
233933
233933
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 7 )
233933
%global _with_firewalld --enable-firewalld
233933
%endif
233933
233933
%if 0%{?_tmpfilesdir:1}
233933
%global _with_tmpfilesdir --with-tmpfilesdir=%{_tmpfilesdir}
233933
%else
233933
%global _with_tmpfilesdir --without-tmpfilesdir
233933
%endif
233933
233933
# without server should also disable some server-only components
233933
%if 0%{?_without_server:1}
233933
%global _without_events --disable-events
233933
%global _without_georeplication --disable-georeplication
233933
%global _without_tiering --disable-tiering
233933
%global _without_ocf --without-ocf
233933
%endif
233933
233933
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
233933
%global _usepython3 1
233933
%global _pythonver 3
233933
%else
233933
%global _usepython3 0
233933
%global _pythonver 2
233933
%endif
233933
233933
# From https://fedoraproject.org/wiki/Packaging:Python#Macros
233933
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
233933
%{!?python2_sitelib: %global python2_sitelib %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
233933
%{!?python2_sitearch: %global python2_sitearch %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
233933
%global _rundir %{_localstatedir}/run
233933
%endif
233933
233933
%if ( 0%{?_with_systemd:1} )
233933
%global service_enable()   /bin/systemctl --quiet enable %1.service || : \
233933
%{nil}
233933
%global service_start()   /bin/systemctl --quiet start %1.service || : \
233933
%{nil}
233933
%global service_stop()    /bin/systemctl --quiet stop %1.service || :\
233933
%{nil}
233933
%global service_install() install -D -p -m 0644 %1.service %{buildroot}%2 \
233933
%{nil}
233933
# can't seem to make a generic macro that works
233933
%global glusterd_svcfile   %{_unitdir}/glusterd.service
233933
%global glusterfsd_svcfile %{_unitdir}/glusterfsd.service
233933
%global glusterta_svcfile %{_unitdir}/gluster-ta-volume.service
233933
%global glustereventsd_svcfile %{_unitdir}/glustereventsd.service
233933
%global glusterfssharedstorage_svcfile %{_unitdir}/glusterfssharedstorage.service
233933
%else
233933
%global service_enable()  /sbin/chkconfig --add %1 >/dev/null 2>&1 || : \
233933
%{nil}
233933
%global systemd_preun() /sbin/chkconfig --del %1 >/dev/null 2>&1 || : \
233933
%{nil}
233933
%global systemd_postun_with_restart() /sbin/service %1 condrestart >/dev/null 2>&1 || : \
233933
%{nil}
233933
%global service_start()   /sbin/service %1 start >/dev/null 2>&1 || : \
233933
%{nil}
233933
%global service_stop()    /sbin/service %1 stop >/dev/null 2>&1 || : \
233933
%{nil}
233933
%global service_install() install -D -p -m 0755 %1.init %{buildroot}%2 \
233933
%{nil}
233933
# can't seem to make a generic macro that works
233933
%global glusterd_svcfile   %{_sysconfdir}/init.d/glusterd
233933
%global glusterfsd_svcfile %{_sysconfdir}/init.d/glusterfsd
233933
%global glustereventsd_svcfile %{_sysconfdir}/init.d/glustereventsd
233933
%endif
233933
233933
%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}}
233933
233933
# We do not want to generate useless provides and requires for xlator
233933
# .so files to be set for glusterfs packages.
233933
# Filter all generated:
233933
#
233933
# TODO: RHEL5 does not have a convenient solution
233933
%if ( 0%{?rhel} == 6 )
233933
# filter_setup exists in RHEL6 only
233933
%filter_provides_in %{_libdir}/glusterfs/%{version}/
233933
%global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$'
233933
%filter_setup
233933
%else
233933
# modern rpm and current Fedora do not generate requires when the
233933
# provides are filtered
233933
%global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$
233933
%endif
233933
233933
233933
##-----------------------------------------------------------------------------
233933
## All package definitions should be placed here in alphabetical order
233933
##
233933
Summary:          Distributed File System
233933
%if ( 0%{_for_fedora_koji_builds} )
233933
Name:             glusterfs
233933
Version:          3.8.0
233933
Release:          0.1%{?prereltag:.%{prereltag}}%{?dist}
233933
%else
233933
Name:             glusterfs
233933
Version:          6.0
ca3909
Release:          29%{?dist}
233933
ExcludeArch:      i686
233933
%endif
233933
License:          GPLv2 or LGPLv3+
233933
URL:              http://docs.gluster.org/
233933
%if ( 0%{_for_fedora_koji_builds} )
233933
Source0:          http://bits.gluster.org/pub/gluster/glusterfs/src/glusterfs-%{version}%{?prereltag}.tar.gz
233933
Source1:          glusterd.sysconfig
233933
Source2:          glusterfsd.sysconfig
233933
Source7:          glusterfsd.service
233933
Source8:          glusterfsd.init
233933
%else
233933
Source0:          glusterfs-6.0.tar.gz
233933
%endif
233933
233933
Requires(pre):    shadow-utils
233933
%if ( 0%{?_with_systemd:1} )
233933
BuildRequires:    systemd
233933
%endif
233933
233933
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
233933
%if ( 0%{?_with_systemd:1} )
233933
%{?systemd_requires}
233933
%endif
233933
%if 0%{?_with_asan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
233933
BuildRequires:    libasan
233933
%endif
233933
%if 0%{?_with_tsan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
233933
BuildRequires:    libtsan
233933
%endif
233933
BuildRequires:    git
233933
BuildRequires:    bison flex
233933
BuildRequires:    gcc make libtool
233933
BuildRequires:    ncurses-devel readline-devel
233933
BuildRequires:    libxml2-devel openssl-devel
233933
BuildRequires:    libaio-devel libacl-devel
233933
BuildRequires:    python%{_pythonver}-devel
233933
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
233933
BuildRequires:    python-ctypes
233933
%endif
233933
%if ( 0%{?_with_ipv6default:1} ) || ( 0%{!?_without_libtirpc:1} ) || ( 0%{?rhel} && ( 0%{?rhel} >= 8 ) )
233933
BuildRequires:    libtirpc-devel
233933
%endif
233933
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
233933
BuildRequires:    rpcgen
233933
%endif
233933
BuildRequires:    userspace-rcu-devel >= 0.7
233933
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
233933
BuildRequires:    automake
233933
%endif
233933
BuildRequires:    libuuid-devel
233933
%if ( 0%{?_with_cmocka:1} )
233933
BuildRequires:    libcmocka-devel >= 1.0.1
233933
%endif
233933
%if ( 0%{!?_without_tiering:1} )
233933
BuildRequires:    sqlite-devel
233933
%endif
233933
%if ( 0%{!?_without_georeplication:1} )
233933
BuildRequires:    libattr-devel
233933
%endif
233933
233933
%if (0%{?_with_firewalld:1})
233933
BuildRequires:    firewalld
233933
%endif
233933
233933
Obsoletes:        hekafs
233933
Obsoletes:        %{name}-common < %{version}-%{release}
233933
Obsoletes:        %{name}-core < %{version}-%{release}
233933
Obsoletes:        %{name}-ufo
233933
%if ( 0%{!?_with_gnfs:1} )
233933
Obsoletes:        %{name}-gnfs
233933
%endif
233933
%if ( 0%{?rhel} < 7 )
233933
Obsoletes:        %{name}-ganesha
233933
%endif
233933
Provides:         %{name}-common = %{version}-%{release}
233933
Provides:         %{name}-core = %{version}-%{release}
233933
233933
# Patch0001: 0001-Update-rfc.sh-to-rhgs-3.5.0.patch
233933
Patch0002: 0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch
233933
Patch0003: 0003-rpc-set-bind-insecure-to-off-by-default.patch
233933
Patch0004: 0004-glusterd-spec-fixing-autogen-issue.patch
233933
Patch0005: 0005-libglusterfs-glusterd-Fix-compilation-errors.patch
233933
Patch0006: 0006-build-remove-ghost-directory-entries.patch
233933
Patch0007: 0007-build-add-RHGS-specific-changes.patch
233933
Patch0008: 0008-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch
233933
Patch0009: 0009-build-introduce-security-hardening-flags-in-gluster.patch
233933
Patch0010: 0010-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch
233933
Patch0011: 0011-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch
233933
Patch0012: 0012-build-add-pretrans-check.patch
233933
Patch0013: 0013-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch
233933
Patch0014: 0014-build-spec-file-conflict-resolution.patch
233933
Patch0015: 0015-build-randomize-temp-file-names-in-pretrans-scriptle.patch
233933
Patch0016: 0016-glusterd-parallel-readdir-Change-the-op-version-of-p.patch
233933
Patch0017: 0017-glusterd-Revert-op-version-for-cluster.max-brick-per.patch
233933
Patch0018: 0018-cli-Add-message-for-user-before-modifying-brick-mult.patch
233933
Patch0019: 0019-build-launch-glusterd-upgrade-after-all-new-bits-are.patch
233933
Patch0020: 0020-spec-unpackaged-files-found-for-RHEL-7-client-build.patch
233933
Patch0021: 0021-cli-glusterfsd-remove-copyright-information.patch
233933
Patch0022: 0022-cli-Remove-upstream-doc-reference.patch
233933
Patch0023: 0023-hooks-remove-selinux-hooks.patch
233933
Patch0024: 0024-glusterd-Make-localtime-logging-option-invisible-in-.patch
233933
Patch0025: 0025-build-make-RHGS-version-available-for-server.patch
233933
Patch0026: 0026-glusterd-Introduce-daemon-log-level-cluster-wide-opt.patch
233933
Patch0027: 0027-glusterd-change-op-version-of-fips-mode-rchecksum.patch
233933
Patch0028: 0028-glusterd-Reset-op-version-for-features.shard-deletio.patch
233933
Patch0029: 0029-glusterd-Reset-op-version-for-features.shard-lru-lim.patch
233933
Patch0030: 0030-selinux-glusterd-add-features.selinux-to-glusterd-vo.patch
233933
Patch0031: 0031-glusterd-turn-off-selinux-feature-in-downstream.patch
233933
Patch0032: 0032-glusterd-update-gd-op-version-to-3_7_0.patch
233933
Patch0033: 0033-build-add-missing-explicit-package-dependencies.patch
233933
Patch0034: 0034-glusterd-introduce-a-new-op-version-for-rhgs-3.4.3.patch
233933
Patch0035: 0035-glusterd-tag-rebalance-mgmt_v3-command-to-op-version.patch
233933
Patch0036: 0036-build-add-conditional-dependency-on-server-for-devel.patch
233933
Patch0037: 0037-cli-change-the-warning-message.patch
233933
Patch0038: 0038-spec-avoid-creation-of-temp-file-in-lua-script.patch
233933
Patch0039: 0039-cli-fix-query-to-user-during-brick-mux-selection.patch
233933
Patch0040: 0040-build-Remove-unsupported-test-cases-failing-consiste.patch
233933
Patch0041: 0041-tests-geo-rep-Build-failed-in-Jenkins-for-test-bug-1.patch
233933
Patch0042: 0042-spec-client-server-Builds-are-failing-on-rhel-6.patch
233933
Patch0043: 0043-inode-don-t-dump-the-whole-table-to-CLI.patch
233933
Patch0044: 0044-cluster-ec-Don-t-enqueue-an-entry-if-it-is-already-h.patch
233933
Patch0045: 0045-glusterd-fix-txn-id-mem-leak.patch
233933
Patch0046: 0046-protocol-client-Do-not-fallback-to-anon-fd-if-fd-is-.patch
233933
Patch0047: 0047-client-rpc-Fix-the-payload-being-sent-on-the-wire.patch
233933
Patch0048: 0048-gfapi-Unblock-epoll-thread-for-upcall-processing.patch
233933
Patch0049: 0049-transport-socket-log-shutdown-msg-occasionally.patch
233933
Patch0050: 0050-geo-rep-Fix-syncing-multiple-rename-of-symlink.patch
233933
Patch0051: 0051-spec-update-rpm-install-condition.patch
233933
Patch0052: 0052-geo-rep-IPv6-support.patch
233933
Patch0053: 0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch
233933
Patch0054: 0054-Revert-glusterd-storhaug-remove-ganesha.patch
233933
Patch0055: 0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch
233933
Patch0056: 0056-common-ha-fixes-for-Debian-based-systems.patch
233933
Patch0057: 0057-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch
233933
Patch0058: 0058-glusterd-ganesha-During-volume-delete-remove-the-gan.patch
233933
Patch0059: 0059-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch
233933
Patch0060: 0060-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch
233933
Patch0061: 0061-ganesha-allow-refresh-config-and-volume-export-unexp.patch
233933
Patch0062: 0062-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch
233933
Patch0063: 0063-glusterd-ganesha-update-cache-invalidation-properly-.patch
233933
Patch0064: 0064-glusterd-ganesha-return-proper-value-in-pre_setup.patch
233933
Patch0065: 0065-ganesha-scripts-remove-dependency-over-export-config.patch
233933
Patch0066: 0066-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch
233933
Patch0067: 0067-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch
233933
Patch0068: 0068-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch
233933
Patch0069: 0069-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
233933
Patch0070: 0070-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch
233933
Patch0071: 0071-common-ha-enable-and-disable-selinux-gluster_use_exe.patch
233933
Patch0072: 0072-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch
233933
Patch0073: 0073-build-remove-ganesha-dependency-on-selinux-policy.patch
233933
Patch0074: 0074-common-ha-enable-pacemaker-at-end-of-setup.patch
233933
Patch0075: 0075-common-ha-Fix-an-incorrect-syntax-during-setup.patch
233933
Patch0076: 0076-glusterd-ganesha-change-voltype-for-ganesha.enable-i.patch
233933
Patch0077: 0077-glusterd-ganesha-create-remove-export-file-only-from.patch
233933
Patch0078: 0078-common-ha-scripts-pass-the-list-of-servers-properly-.patch
233933
Patch0079: 0079-common-ha-All-statd-related-files-need-to-be-owned-b.patch
233933
Patch0080: 0080-glusterd-ganesha-Skip-non-ganesha-nodes-properly-for.patch
233933
Patch0081: 0081-ganesha-ha-ensure-pacemaker-is-enabled-after-setup.patch
233933
Patch0082: 0082-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch
233933
Patch0083: 0083-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
233933
Patch0084: 0084-glusterd-Fix-duplicate-client_op_version-in-info-fil.patch
233933
Patch0085: 0085-Revert-all-remove-code-which-is-not-being-considered.patch
233933
Patch0086: 0086-Revert-tiering-remove-the-translator-from-build-and-.patch
233933
Patch0087: 0087-ganesha-fixing-minor-issues-after-the-backport-from-.patch
233933
Patch0088: 0088-tier-fix-failures-noticed-during-tier-start-and-tier.patch
233933
Patch0089: 0089-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch
233933
Patch0090: 0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch
233933
Patch0091: 0091-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch
233933
Patch0092: 0092-logging-Fix-GF_LOG_OCCASSIONALLY-API.patch
233933
Patch0093: 0093-glusterd-Change-op-version-of-cache-invalidation-in-.patch
233933
Patch0094: 0094-glusterd-load-ctime-in-the-client-graph-only-if-it-s.patch
233933
Patch0095: 0095-cluster-afr-Remove-local-from-owners_list-on-failure.patch
233933
Patch0096: 0096-core-Brick-is-not-able-to-detach-successfully-in-bri.patch
233933
Patch0097: 0097-glusterd-tier-while-doing-an-attach-tier-the-self-he.patch
233933
Patch0098: 0098-mgmt-shd-Implement-multiplexing-in-self-heal-daemon.patch
233933
Patch0099: 0099-client-fini-return-fini-after-rpc-cleanup.patch
233933
Patch0100: 0100-clnt-rpc-ref-leak-during-disconnect.patch
233933
Patch0101: 0101-shd-mux-Fix-coverity-issues-introduced-by-shd-mux-pa.patch
233933
Patch0102: 0102-rpc-transport-Missing-a-ref-on-dict-while-creating-t.patch
233933
Patch0103: 0103-dht-NULL-check-before-setting-error-flag.patch
233933
Patch0104: 0104-afr-shd-Cleanup-self-heal-daemon-resources-during-af.patch
233933
Patch0105: 0105-core-Log-level-changes-do-not-effect-on-running-clie.patch
233933
Patch0106: 0106-libgfchangelog-use-find_library-to-locate-shared-lib.patch
233933
Patch0107: 0107-gfapi-add-function-to-set-client-pid.patch
233933
Patch0108: 0108-afr-add-client-pid-to-all-gf_event-calls.patch
233933
Patch0109: 0109-glusterd-Optimize-glusterd-handshaking-code-path.patch
233933
Patch0110: 0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch
233933
Patch0111: 0111-glusterd-fix-loading-ctime-in-client-graph-logic.patch
233933
Patch0112: 0112-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
233933
Patch0113: 0113-spec-Glusterd-did-not-start-by-default-after-node-re.patch
233933
Patch0114: 0114-core-fix-hang-issue-in-__gf_free.patch
233933
Patch0115: 0115-core-only-log-seek-errors-if-SEEK_HOLE-SEEK_DATA-is-.patch
233933
Patch0116: 0116-cluster-ec-fix-fd-reopen.patch
233933
Patch0117: 0117-spec-Remove-thin-arbiter-package.patch
233933
Patch0118: 0118-tests-mark-thin-arbiter-test-ta.t-as-bad.patch
233933
Patch0119: 0119-glusterd-provide-a-way-to-detach-failed-node.patch
233933
Patch0120: 0120-glusterd-shd-Keep-a-ref-on-volinfo-until-attach-rpc-.patch
233933
Patch0121: 0121-spec-glusterfs-devel-for-client-build-should-not-dep.patch
233933
Patch0122: 0122-posix-ctime-Fix-stat-time-attributes-inconsistency-d.patch
233933
Patch0123: 0123-ctime-Fix-log-repeated-logging-during-open.patch
233933
Patch0124: 0124-spec-remove-duplicate-references-to-files.patch
233933
Patch0125: 0125-glusterd-define-dumpops-in-the-xlator_api-of-gluster.patch
233933
Patch0126: 0126-cluster-dht-refactor-dht-lookup-functions.patch
233933
Patch0127: 0127-cluster-dht-Refactor-dht-lookup-functions.patch
233933
Patch0128: 0128-glusterd-Fix-bulkvoldict-thread-logic-in-brick-multi.patch
233933
Patch0129: 0129-core-handle-memory-accounting-correctly.patch
233933
Patch0130: 0130-tier-test-new-tier-cmds.t-fails-after-a-glusterd-res.patch
233933
Patch0131: 0131-tests-dht-Test-that-lookups-are-sent-post-brick-up.patch
233933
Patch0132: 0132-glusterd-remove-duplicate-occurrence-of-features.sel.patch
233933
Patch0133: 0133-glusterd-enable-fips-mode-rchecksum-for-new-volumes.patch
233933
Patch0134: 0134-performance-write-behind-remove-request-from-wip-lis.patch
233933
Patch0135: 0135-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
233933
Patch0136: 0136-glusterd-fix-inconsistent-global-option-output-in-vo.patch
233933
Patch0137: 0137-shd-glusterd-Serialize-shd-manager-to-prevent-race-c.patch
233933
Patch0138: 0138-glusterd-Add-gluster-volume-stop-operation-to-gluste.patch
233933
Patch0139: 0139-ec-shd-Cleanup-self-heal-daemon-resources-during-ec-.patch
233933
Patch0140: 0140-cluster-ec-Reopen-shouldn-t-happen-with-O_TRUNC.patch
233933
Patch0141: 0141-socket-ssl-fix-crl-handling.patch
233933
Patch0142: 0142-lock-check-null-value-of-dict-to-avoid-log-flooding.patch
233933
Patch0143: 0143-packaging-Change-the-dependency-on-nfs-ganesha-to-2..patch
233933
Patch0144: 0144-cluster-ec-honor-contention-notifications-for-partia.patch
233933
Patch0145: 0145-core-Capture-process-memory-usage-at-the-time-of-cal.patch
233933
Patch0146: 0146-dht-Custom-xattrs-are-not-healed-in-case-of-add-bric.patch
233933
Patch0147: 0147-glusterd-bulkvoldict-thread-is-not-handling-all-volu.patch
233933
Patch0148: 0148-cluster-dht-Lookup-all-files-when-processing-directo.patch
233933
Patch0149: 0149-glusterd-Optimize-code-to-copy-dictionary-in-handsha.patch
233933
Patch0150: 0150-libglusterfs-define-macros-needed-for-cloudsync.patch
233933
Patch0151: 0151-mgmt-glusterd-Make-changes-related-to-cloudsync-xlat.patch
233933
Patch0152: 0152-storage-posix-changes-with-respect-to-cloudsync.patch
233933
Patch0153: 0153-features-cloudsync-Added-some-new-functions.patch
233933
Patch0154: 0154-cloudsync-cvlt-Cloudsync-plugin-for-commvault-store.patch
233933
Patch0155: 0155-cloudsync-Make-readdirp-return-stat-info-of-all-the-.patch
233933
Patch0156: 0156-cloudsync-Fix-bug-in-cloudsync-fops-c.py.patch
233933
Patch0157: 0157-afr-frame-Destroy-frame-after-afr_selfheal_entry_gra.patch
233933
Patch0158: 0158-glusterfsd-cleanup-Protect-graph-object-under-a-lock.patch
233933
Patch0159: 0159-glusterd-add-an-op-version-check.patch
233933
Patch0160: 0160-geo-rep-Geo-rep-help-text-issue.patch
233933
Patch0161: 0161-geo-rep-Fix-rename-with-existing-destination-with-sa.patch
233933
Patch0162: 0162-geo-rep-Fix-sync-method-config.patch
233933
Patch0163: 0163-geo-rep-Fix-sync-hang-with-tarssh.patch
233933
Patch0164: 0164-cluster-ec-Fix-handling-of-heal-info-cases-without-l.patch
233933
Patch0165: 0165-tests-shd-Add-test-coverage-for-shd-mux.patch
233933
Patch0166: 0166-glusterd-svc-glusterd_svcs_stop-should-call-individu.patch
233933
Patch0167: 0167-glusterd-shd-Optimize-the-glustershd-manager-to-send.patch
233933
Patch0168: 0168-cluster-dht-Fix-directory-perms-during-selfheal.patch
233933
Patch0169: 0169-Build-Fix-spec-to-enable-rhel8-client-build.patch
233933
Patch0170: 0170-geo-rep-Convert-gfid-conflict-resolutiong-logs-into-.patch
233933
Patch0171: 0171-posix-add-storage.reserve-size-option.patch
233933
Patch0172: 0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
233933
Patch0173: 0173-glusterd-store-fips-mode-rchecksum-option-in-the-inf.patch
233933
Patch0174: 0174-xlator-log-Add-more-logging-in-xlator_is_cleanup_sta.patch
233933
Patch0175: 0175-ec-fini-Fix-race-between-xlator-cleanup-and-on-going.patch
233933
Patch0176: 0176-features-shard-Fix-crash-during-background-shard-del.patch
233933
Patch0177: 0177-features-shard-Fix-extra-unref-when-inode-object-is-.patch
233933
Patch0178: 0178-Cluster-afr-Don-t-treat-all-bricks-having-metadata-p.patch
233933
Patch0179: 0179-tests-Fix-split-brain-favorite-child-policy.t-failur.patch
233933
Patch0180: 0180-ganesha-scripts-Make-generate-epoch.py-python3-compa.patch
233933
Patch0181: 0181-afr-log-before-attempting-data-self-heal.patch
233933
Patch0182: 0182-geo-rep-fix-mountbroker-setup.patch
233933
Patch0183: 0183-glusterd-svc-Stop-stale-process-using-the-glusterd_p.patch
233933
Patch0184: 0184-tests-Add-gating-configuration-file-for-rhel8.patch
233933
Patch0185: 0185-gfapi-provide-an-api-for-setting-statedump-path.patch
233933
Patch0186: 0186-cli-Remove-brick-warning-seems-unnecessary.patch
233933
Patch0187: 0187-gfapi-statedump_path-add-proper-version-number.patch
233933
Patch0188: 0188-features-shard-Fix-integer-overflow-in-block-count-a.patch
233933
Patch0189: 0189-features-shard-Fix-block-count-accounting-upon-trunc.patch
233933
Patch0190: 0190-Build-removing-the-hardcoded-usage-of-python3.patch
233933
Patch0191: 0191-Build-Update-python-shebangs-based-on-version.patch
233933
Patch0192: 0192-build-Ensure-gluster-cli-package-is-built-as-part-of.patch
233933
Patch0193: 0193-spec-fixed-python-dependency-for-rhel6.patch
233933
Patch0194: 0194-stack-Make-sure-to-have-unique-call-stacks-in-all-ca.patch
233933
Patch0195: 0195-build-package-glusterfs-ganesha-for-rhel7-and-above.patch
233933
Patch0196: 0196-posix-ctime-Fix-ctime-upgrade-issue.patch
233933
Patch0197: 0197-posix-fix-crash-in-posix_cs_set_state.patch
233933
Patch0198: 0198-cluster-ec-Prevent-double-pre-op-xattrops.patch
233933
Patch0199: 0199-upcall-Avoid-sending-notifications-for-invalid-inode.patch
233933
Patch0200: 0200-gfapi-fix-incorrect-initialization-of-upcall-syncop-.patch
233933
Patch0201: 0201-geo-rep-Fix-permissions-for-GEOREP_DIR-in-non-root-s.patch
233933
Patch0202: 0202-shd-mux-Fix-race-between-mux_proc-unlink-and-stop.patch
233933
Patch0203: 0203-glusterd-shd-Change-shd-logfile-to-a-unique-name.patch
233933
Patch0204: 0204-glusterd-conditionally-clear-txn_opinfo-in-stage-op.patch
233933
Patch0205: 0205-glusterd-Can-t-run-rebalance-due-to-long-unix-socket.patch
233933
Patch0206: 0206-glusterd-ignore-user.-options-from-compatibility-che.patch
233933
Patch0207: 0207-glusterd-fix-use-after-free-of-a-dict_t.patch
233933
Patch0208: 0208-mem-pool-remove-dead-code.patch
233933
Patch0209: 0209-core-avoid-dynamic-TLS-allocation-when-possible.patch
233933
Patch0210: 0210-mem-pool.-c-h-minor-changes.patch
233933
Patch0211: 0211-libglusterfs-Fix-compilation-when-disable-mempool-is.patch
233933
Patch0212: 0212-core-fix-memory-allocation-issues.patch
233933
Patch0213: 0213-cluster-dht-Strip-out-dht-xattrs.patch
233933
Patch0214: 0214-geo-rep-Upgrading-config-file-to-new-version.patch
233933
Patch0215: 0215-posix-modify-storage.reserve-option-to-take-size-and.patch
233933
Patch0216: 0216-Test-case-fixe-for-downstream-3.5.0.patch
233933
Patch0217: 0217-uss-Fix-tar-issue-with-ctime-and-uss-enabled.patch
233933
Patch0218: 0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch
233933
Patch0219: 0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch
233933
Patch0220: 0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch
233933
Patch0221: 0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch
233933
Patch0222: 0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch
233933
Patch0223: 0223-change-get_real_filename-implementation-to-use-ENOAT.patch
233933
Patch0224: 0224-core-replace-inet_addr-with-inet_pton.patch
233933
Patch0225: 0225-tests-utils-Fix-py2-py3-util-python-scripts.patch
233933
Patch0226: 0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch
233933
Patch0227: 0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch
233933
Patch0228: 0228-locks-enable-notify-contention-by-default.patch
233933
Patch0229: 0229-glusterd-Show-the-correct-brick-status-in-get-state.patch
233933
Patch0230: 0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch
233933
Patch0231: 0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch
233933
Patch0232: 0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch
233933
Patch0233: 0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch
233933
Patch0234: 0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch
233933
Patch0235: 0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch
233933
Patch0236: 0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch
233933
Patch0237: 0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch
233933
Patch0238: 0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch
233933
Patch0239: 0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
233933
Patch0240: 0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch
233933
Patch0241: 0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch
233933
Patch0242: 0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch
233933
Patch0243: 0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch
233933
Patch0244: 0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch
233933
Patch0245: 0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch
233933
Patch0246: 0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch
233933
Patch0247: 0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch
233933
Patch0248: 0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch
233933
Patch0249: 0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch
233933
Patch0250: 0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch
233933
Patch0251: 0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch
233933
Patch0252: 0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch
233933
Patch0253: 0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch
233933
Patch0254: 0254-Detach-iot_worker-to-release-its-resources.patch
233933
Patch0255: 0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch
233933
Patch0256: 0256-features-snapview-server-use-the-same-volfile-server.patch
233933
Patch0257: 0257-geo-rep-Test-case-for-upgrading-config-file.patch
233933
Patch0258: 0258-geo-rep-Fix-mount-broker-setup-issue.patch
233933
Patch0259: 0259-gluster-block-tuning-perf-options.patch
233933
Patch0260: 0260-ctime-Set-mdata-xattr-on-legacy-files.patch
233933
Patch0261: 0261-features-utime-Fix-mem_put-crash.patch
233933
Patch0262: 0262-glusterd-ctime-Disable-ctime-by-default.patch
233933
Patch0263: 0263-tests-fix-ctime-related-tests.patch
233933
Patch0264: 0264-gfapi-Fix-deadlock-while-processing-upcall.patch
233933
Patch0265: 0265-fuse-add-missing-GF_FREE-to-fuse_interrupt.patch
233933
Patch0266: 0266-geo-rep-Fix-mount-broker-setup-issue.patch
233933
Patch0267: 0267-posix-ctime-Fix-race-during-lookup-ctime-xattr-heal.patch
233933
Patch0268: 0268-rpc-transport-have-default-listen-port.patch
233933
Patch0269: 0269-ec-fix-truncate-lock-to-cover-the-write-in-tuncate-c.patch
233933
Patch0270: 0270-cluster-ec-inherit-healing-from-lock-when-it-has-inf.patch
233933
Patch0271: 0271-cluster-ec-fix-EIO-error-for-concurrent-writes-on-sp.patch
233933
Patch0272: 0272-cluster-ec-Always-read-from-good-mask.patch
233933
Patch0273: 0273-cluster-ec-Fix-reopen-flags-to-avoid-misbehavior.patch
233933
Patch0274: 0274-cluster-ec-Update-lock-good_mask-on-parent-fop-failu.patch
233933
Patch0275: 0275-cluster-ec-Create-heal-task-with-heal-process-id.patch
233933
Patch0276: 0276-features-utime-always-update-ctime-at-setattr.patch
ca3909
Patch0277: 0277-geo-rep-Fix-Config-Get-Race.patch
ca3909
Patch0278: 0278-geo-rep-Fix-worker-connection-issue.patch
ca3909
Patch0279: 0279-posix-In-brick_mux-brick-is-crashed-while-start-stop.patch
ca3909
Patch0280: 0280-performance-md-cache-Do-not-skip-caching-of-null-cha.patch
ca3909
Patch0281: 0281-ctime-Fix-incorrect-realtime-passed-to-frame-root-ct.patch
ca3909
Patch0282: 0282-geo-rep-Fix-the-name-of-changelog-archive-file.patch
ca3909
Patch0283: 0283-ctime-Fix-ctime-issue-with-utime-family-of-syscalls.patch
ca3909
Patch0284: 0284-posix-log-aio_error-return-codes-in-posix_fs_health_.patch
ca3909
Patch0285: 0285-glusterd-glusterd-service-is-getting-timed-out-on-sc.patch
ca3909
Patch0286: 0286-glusterfs.spec.in-added-script-files-for-machine-com.patch
ca3909
Patch0287: 0287-cluster-ec-Fail-fsync-flush-for-files-on-update-size.patch
ca3909
Patch0288: 0288-cluster-ec-Fix-coverity-issues.patch
ca3909
Patch0289: 0289-cluster-ec-quorum-count-implementation.patch
ca3909
Patch0290: 0290-glusterd-tag-disperse.quorum-count-for-31306.patch
ca3909
Patch0291: 0291-cluster-ec-Mark-release-only-when-it-is-acquired.patch
ca3909
Patch0292: 0292-rpc-Update-address-family-if-it-is-not-provide-in-cm.patch
ca3909
Patch0293: 0293-glusterd-IPV6-hostname-address-is-not-parsed-correct.patch
ca3909
Patch0294: 0294-eventsapi-Set-IPv4-IPv6-family-based-on-input-IP.patch
ca3909
Patch0295: 0295-ctime-rebalance-Heal-ctime-xattr-on-directory-during.patch
ca3909
Patch0296: 0296-glusterfind-pre-command-failure-on-a-modify.patch
ca3909
Patch0297: 0297-rpmbuild-fixing-the-build-errors-with-2a905a8ae.patch
ca3909
Patch0298: 0298-geo-rep-fix-sub-command-during-worker-connection.patch
ca3909
Patch0299: 0299-geo-rep-performance-improvement-while-syncing-rename.patch
ca3909
Patch0300: 0300-cli-remove-the-warning-displayed-when-remove-brick-s.patch
ca3909
Patch0301: 0301-posix-Brick-is-going-down-unexpectedly.patch
ca3909
Patch0302: 0302-cluster-ec-prevent-filling-shd-log-with-table-not-fo.patch
ca3909
Patch0303: 0303-posix-heketidbstorage-bricks-go-down-during-PVC-crea.patch
ca3909
Patch0304: 0304-cluster-dht-Correct-fd-processing-loop.patch
ca3909
Patch0305: 0305-glusterd-rebalance-start-should-fail-when-quorum-is-.patch
ca3909
Patch0306: 0306-cli-fix-distCount-value.patch
ca3909
Patch0307: 0307-ssl-fix-RHEL8-regression-failure.patch
ca3909
Patch0308: 0308-dht-Rebalance-causing-IO-Error-File-descriptor-in-ba.patch
ca3909
Patch0309: 0309-geo-rep-Fix-config-upgrade-on-non-participating-node.patch
ca3909
Patch0310: 0310-tests-test-case-for-non-root-geo-rep-setup.patch
ca3909
Patch0311: 0311-geo-rep-Fix-Permission-denied-traceback-on-non-root-.patch
ca3909
Patch0312: 0312-Scripts-quota_fsck-script-KeyError-contri_size.patch
ca3909
Patch0313: 0313-extras-Cgroup-CPU-Mem-restriction-are-not-working-on.patch
ca3909
Patch0314: 0314-glusterd-tier-is_tier_enabled-inserted-causing-check.patch
ca3909
Patch0315: 0315-geo-rep-Fix-py2-py3-compatibility-in-repce.patch
ca3909
Patch0316: 0316-spec-fixed-python-prettytable-dependency-for-rhel6.patch
ca3909
Patch0317: 0317-Update-rfc.sh-to-rhgs-3.5.1.patch
ca3909
Patch0318: 0318-Update-rfc.sh-to-rhgs-3.5.1.patch
ca3909
Patch0319: 0319-features-snapview-server-obtain-the-list-of-snapshot.patch
ca3909
Patch0320: 0320-gf-event-Handle-unix-volfile-servers.patch
ca3909
Patch0321: 0321-Adding-white-spaces-to-description-of-set-group.patch
ca3909
Patch0322: 0322-glusterd-display-correct-rebalance-data-size-after-g.patch
ca3909
Patch0323: 0323-cli-display-detailed-rebalance-info.patch
ca3909
Patch0324: 0324-extras-hooks-Add-SELinux-label-on-new-bricks-during-.patch
ca3909
Patch0325: 0325-extras-hooks-Install-and-package-newly-added-post-ad.patch
ca3909
Patch0326: 0326-tests-subdir-mount.t-is-failing-for-brick_mux-regrss.patch
ca3909
Patch0327: 0327-glusterfind-integrate-with-gfid2path.patch
ca3909
Patch0328: 0328-glusterd-Add-warning-and-abort-in-case-of-failures-i.patch
ca3909
Patch0329: 0329-cluster-afr-Heal-entries-when-there-is-a-source-no-h.patch
ca3909
Patch0330: 0330-mount.glusterfs-change-the-error-message.patch
ca3909
Patch0331: 0331-features-locks-Do-special-handling-for-op-version-3..patch
ca3909
Patch0332: 0332-Removing-one-top-command-from-gluster-v-help.patch
ca3909
Patch0333: 0333-rpc-Synchronize-slot-allocation-code.patch
ca3909
Patch0334: 0334-dht-log-getxattr-failure-for-node-uuid-at-DEBUG.patch
ca3909
Patch0335: 0335-tests-RHEL8-test-failure-fixes-for-RHGS.patch
ca3909
Patch0336: 0336-spec-check-and-return-exit-code-in-rpm-scripts.patch
ca3909
Patch0337: 0337-fuse-Set-limit-on-invalidate-queue-size.patch
ca3909
Patch0338: 0338-glusterfs-fuse-Reduce-the-default-lru-limit-value.patch
ca3909
Patch0339: 0339-geo-rep-fix-integer-config-validation.patch
ca3909
Patch0340: 0340-rpc-event_slot_alloc-converted-infinite-loop-after-r.patch
ca3909
Patch0341: 0341-socket-fix-error-handling.patch
ca3909
Patch0342: 0342-Revert-hooks-remove-selinux-hooks.patch
ca3909
Patch0343: 0343-extras-hooks-syntactical-errors-in-SELinux-hooks-sci.patch
ca3909
Patch0344: 0344-Revert-all-fixes-to-include-SELinux-hook-scripts.patch
ca3909
Patch0345: 0345-read-ahead-io-cache-turn-off-by-default.patch
ca3909
Patch0346: 0346-fuse-degrade-logging-of-write-failure-to-fuse-device.patch
ca3909
Patch0347: 0347-tools-glusterfind-handle-offline-bricks.patch
ca3909
Patch0348: 0348-glusterfind-Fix-py2-py3-issues.patch
ca3909
Patch0349: 0349-glusterfind-python3-compatibility.patch
ca3909
Patch0350: 0350-tools-glusterfind-Remove-an-extra-argument.patch
ca3909
Patch0351: 0351-server-Mount-fails-after-reboot-1-3-gluster-nodes.patch
233933
233933
%description
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package includes the glusterfs binary, the glusterfsd daemon and the
233933
libglusterfs and glusterfs translator modules common to both GlusterFS server
233933
and client framework.
233933
233933
%package api
233933
Summary:          GlusterFS api library
233933
Requires:         %{name}%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
233933
233933
%description api
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides the glusterfs libgfapi library.
233933
233933
%package api-devel
233933
Summary:          Development Libraries
233933
Requires:         %{name}%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-devel%{?_isa} = %{version}-%{release}
233933
Requires:         libacl-devel
233933
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
233933
233933
%description api-devel
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides the api include files.
233933
233933
%package cli
233933
Summary:          GlusterFS CLI
233933
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
233933
233933
%description cli
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides the GlusterFS CLI application and its man page
233933
233933
%package cloudsync-plugins
233933
Summary:          Cloudsync Plugins
233933
BuildRequires:    libcurl-devel
233933
233933
%description cloudsync-plugins
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides cloudsync plugins for archival feature.
233933
233933
%package devel
233933
Summary:          Development Libraries
233933
Requires:         %{name}%{?_isa} = %{version}-%{release}
233933
# Needed for the Glupy examples to work
233933
%if ( 0%{!?_without_extra_xlators:1} )
233933
Requires:         %{name}-extra-xlators%{?_isa} = %{version}-%{release}
233933
%endif
233933
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
233933
%if ( 0%{!?_without_server:1} )
233933
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
233933
%endif
233933
233933
%description devel
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides the development libraries and include files.
233933
233933
%if ( 0%{!?_without_extra_xlators:1} )
233933
%package extra-xlators
233933
Summary:          Extra Gluster filesystem Translators
233933
# We need python-gluster rpm for gluster module's __init__.py in Python
233933
# site-packages area
233933
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
233933
Requires:         python%{_pythonver}
233933
233933
%description extra-xlators
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides extra filesystem Translators, such as Glupy,
233933
for GlusterFS.
233933
%endif
233933
233933
%package fuse
233933
Summary:          Fuse client
233933
BuildRequires:    fuse-devel
233933
Requires:         attr
233933
Requires:         psmisc
233933
233933
Requires:         %{name}%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
233933
233933
Obsoletes:        %{name}-client < %{version}-%{release}
233933
Provides:         %{name}-client = %{version}-%{release}
233933
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
233933
233933
%description fuse
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides support to FUSE based clients and inlcudes the
233933
glusterfs(d) binary.
233933
233933
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
233933
%package ganesha
233933
Summary:          NFS-Ganesha configuration
233933
Group:            Applications/File
233933
233933
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
233933
Requires:         nfs-ganesha-gluster >= 2.7.3
233933
Requires:         pcs, dbus
233933
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
233933
Requires:         cman, pacemaker, corosync
233933
%endif
233933
233933
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 )
233933
# we need portblock resource-agent in 3.9.5 and later.
233933
Requires:         resource-agents >= 3.9.5
233933
Requires:         net-tools
233933
%endif
233933
233933
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
233933
%if ( 0%{?rhel} )
233933
Requires: selinux-policy >= 3.13.1-160
233933
Requires(post):   policycoreutils-python
233933
Requires(postun): policycoreutils-python
233933
%else
233933
Requires(post):   policycoreutils-python-utils
233933
Requires(postun): policycoreutils-python-utils
233933
%endif
233933
%endif
233933
233933
%description ganesha
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides the configuration and related files for using
233933
NFS-Ganesha as the NFS server using GlusterFS
233933
%endif
233933
233933
%if ( 0%{!?_without_georeplication:1} )
233933
%package geo-replication
233933
Summary:          GlusterFS Geo-replication
233933
Requires:         %{name}%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
233933
Requires:         python%{_pythonver}
233933
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
233933
Requires:         python-prettytable
233933
%else
233933
Requires:         python%{_pythonver}-prettytable
233933
%endif
233933
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
233933
233933
Requires:         rsync
233933
Requires:         util-linux
233933
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
233933
233933
%description geo-replication
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file system in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in userspace and easily manageable.
233933
233933
This package provides support to geo-replication.
233933
%endif
233933
233933
%package libs
233933
Summary:          GlusterFS common libraries
233933
233933
%description libs
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides the base GlusterFS libraries
233933
233933
%package -n python%{_pythonver}-gluster
233933
Summary:          GlusterFS python library
233933
Requires:         python%{_pythonver}
233933
%if ( ! %{_usepython3} )
233933
%{?python_provide:%python_provide python-gluster}
233933
Provides:         python-gluster = %{version}-%{release}
233933
Obsoletes:        python-gluster < 3.10
233933
%endif
233933
233933
%description -n python%{_pythonver}-gluster
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package contains the python modules of GlusterFS and own gluster
233933
namespace.
233933
233933
%if ( 0%{!?_without_rdma:1} )
233933
%package rdma
233933
Summary:          GlusterFS rdma support for ib-verbs
233933
%if ( 0%{?fedora} && 0%{?fedora} > 26 )
233933
BuildRequires:    rdma-core-devel
233933
%else
233933
BuildRequires:    libibverbs-devel
233933
BuildRequires:    librdmacm-devel >= 1.0.15
233933
%endif
233933
Requires:         %{name}%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
233933
233933
%description rdma
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides support to ib-verbs library.
233933
%endif
233933
233933
%if ( 0%{!?_without_regression_tests:1} )
233933
%package regression-tests
233933
Summary:          Development Tools
233933
Requires:         %{name}%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
233933
## thin provisioning support
233933
Requires:         lvm2 >= 2.02.89
233933
Requires:         perl(App::Prove) perl(Test::Harness) gcc util-linux-ng
233933
Requires:         python%{_pythonver}
233933
Requires:         attr dbench file git libacl-devel net-tools
233933
Requires:         nfs-utils xfsprogs yajl psmisc bc
233933
233933
%description regression-tests
233933
The Gluster Test Framework, is a suite of scripts used for
233933
regression testing of Gluster.
233933
%endif
233933
233933
%if ( 0%{!?_without_ocf:1} )
233933
%package resource-agents
233933
Summary:          OCF Resource Agents for GlusterFS
233933
License:          GPLv3+
233933
BuildArch:        noarch
233933
# this Group handling comes from the Fedora resource-agents package
233933
# for glusterd
233933
Requires:         %{name}-server = %{version}-%{release}
233933
# depending on the distribution, we need pacemaker or resource-agents
233933
Requires:         %{_prefix}/lib/ocf/resource.d
233933
233933
%description resource-agents
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides the resource agents which plug glusterd into
233933
Open Cluster Framework (OCF) compliant cluster resource managers,
233933
like Pacemaker.
233933
%endif
233933
233933
%if ( 0%{!?_without_server:1} )
233933
%package server
233933
Summary:          Clustered file-system server
233933
Requires:         %{name}%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-cli%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
233933
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
233933
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
233933
# self-heal daemon, rebalance, nfs-server etc. are actually clients
233933
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
233933
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
233933
# lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server
233933
Requires:         lvm2
233933
Requires:         nfs-utils
233933
%if ( 0%{?_with_systemd:1} )
233933
%{?systemd_requires}
233933
%else
233933
Requires(post):   /sbin/chkconfig
233933
Requires(preun):  /sbin/service
233933
Requires(preun):  /sbin/chkconfig
233933
Requires(postun): /sbin/service
233933
%endif
233933
%if (0%{?_with_firewalld:1})
233933
# we install firewalld rules, so we need to have the directory owned
233933
%if ( 0%{!?rhel} )
233933
# not on RHEL because firewalld-filesystem appeared in 7.3
233933
# when EL7 rpm gets weak dependencies we can add a Suggests:
233933
Requires:         firewalld-filesystem
233933
%endif
233933
%endif
233933
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
233933
Requires:         rpcbind
233933
%else
233933
Requires:         portmap
233933
%endif
233933
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
233933
Requires:         python-argparse
233933
%endif
233933
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
233933
Requires:         python%{_pythonver}-pyxattr
233933
%else
233933
Requires:         pyxattr
233933
%endif
233933
%if (0%{?_with_valgrind:1})
233933
Requires:         valgrind
233933
%endif
233933
233933
%description server
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides the glusterfs server daemon.
233933
%endif
233933
233933
%package client-xlators
233933
Summary:          GlusterFS client-side translators
233933
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
233933
233933
%description client-xlators
233933
GlusterFS is a distributed file-system capable of scaling to several
233933
petabytes. It aggregates various storage bricks over Infiniband RDMA
233933
or TCP/IP interconnect into one large parallel network file
233933
system. GlusterFS is one of the most sophisticated file systems in
233933
terms of features and extensibility.  It borrows a powerful concept
233933
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
233933
is in user space and easily manageable.
233933
233933
This package provides the translators needed on any GlusterFS client.
233933
233933
%if ( 0%{!?_without_events:1} )
233933
%package events
233933
Summary:          GlusterFS Events
233933
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
ca3909
Requires:         python%{_pythonver}
233933
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
233933
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
233933
Requires:         python-requests
233933
%else
233933
Requires:         python%{_pythonver}-requests
233933
%endif
233933
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
ca3909
Requires:         python-prettytable
233933
Requires:         python-argparse
ca3909
%else
ca3909
Requires:         python%{_pythonver}-prettytable
233933
%endif
233933
%if ( 0%{?_with_systemd:1} )
233933
%{?systemd_requires}
233933
%endif
233933
233933
%description events
233933
GlusterFS Events
233933
233933
%endif
233933
233933
%prep
233933
%setup -q -n %{name}-%{version}%{?prereltag}
233933
233933
# sanitization scriptlet for patches with file renames
233933
ls %{_topdir}/SOURCES/*.patch | sort | \
233933
while read p
233933
do
233933
    # if the destination file exists, its most probably stale
233933
    # so we must remove it
233933
    rename_to=( $(grep -i 'rename to' $p | cut -f 3 -d ' ') )
233933
    if [ ${#rename_to[*]} -gt 0 ]; then
233933
        for f in ${rename_to[*]}
233933
        do
233933
            if [ -f $f ]; then
233933
                rm -f $f
233933
            elif [ -d $f ]; then
233933
                rm -rf $f
233933
            fi
233933
        done
233933
    fi
233933
233933
    SOURCE_FILES=( $(egrep '^\-\-\- a/' $p | cut -f 2- -d '/') )
233933
    DEST_FILES=( $(egrep '^\+\+\+ b/' $p | cut -f 2- -d '/') )
233933
    EXCLUDE_DOCS=()
233933
    for idx in ${!SOURCE_FILES[@]}; do
233933
        # skip the doc 
233933
        source_file=${SOURCE_FILES[$idx]}
233933
        dest_file=${DEST_FILES[$idx]}
233933
        if [[ "$dest_file" =~ ^doc/.+ ]]; then
233933
            if [ "$source_file" != "dev/null" ] && [ ! -f "$dest_file" ]; then
233933
                # if patch is being applied to a doc file and if the doc file
233933
                # hasn't been added so far then we need to exclude it
233933
                EXCLUDE_DOCS=( ${EXCLUDE_DOCS[*]} "$dest_file" )
233933
            fi
233933
        fi
233933
    done
233933
    EXCLUDE_DOCS_OPT=""
233933
    for doc in ${EXCLUDE_DOCS}; do
233933
        EXCLUDE_DOCS_OPT="--exclude=$doc $EXCLUDE_DOCS_OPT"
233933
    done
233933
233933
    # HACK to fix build
233933
    bn=$(basename $p)
233933
    if [ "$bn" == "0085-Revert-all-remove-code-which-is-not-being-considered.patch" ]; then
233933
        (patch -p1 -u -F3 < $p || :)
233933
        if [ -f libglusterfs/Makefile.am.rej ]; then
233933
            sed -i -e 's/^SUBDIRS = src/SUBDIRS = src src\/gfdb/g;s/^CLEANFILES = /CLEANFILES =/g' libglusterfs/Makefile.am
233933
        fi
233933
    elif [ "$bn" == "0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch" ]; then
233933
        (patch -p1 < $p || :)
233933
    elif [ "$bn" == "0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch" ]; then
233933
        (patch -p1 < $p || :)
233933
    elif [ "$bn" == "0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch" ]; then
233933
        (patch -p1 < $p || :)
233933
    elif [ "$bn" == "0117-spec-Remove-thin-arbiter-package.patch" ]; then
233933
        (patch -p1 < $p || :)
233933
    elif [ "$bn" == "0023-hooks-remove-selinux-hooks.patch" ]; then
233933
        (patch -p1 < $p || :)
233933
    elif [ "$bn" == "0042-spec-client-server-Builds-are-failing-on-rhel-6.patch" ]; then
233933
        (patch -p1 < $p || :)
233933
    else
233933
        # apply the patch with 'git apply'
233933
        git apply -p1 --exclude=rfc.sh \
233933
                      --exclude=.gitignore \
233933
                      --exclude=.testignore \
233933
                      --exclude=MAINTAINERS \
233933
                      --exclude=extras/checkpatch.pl \
233933
                      --exclude=build-aux/checkpatch.pl \
233933
                      --exclude='tests/*' \
233933
                      ${EXCLUDE_DOCS_OPT} \
233933
                      $p
233933
    fi
233933
233933
done
233933
233933
echo "fixing python shebangs..."
233933
%if ( %{_usepython3} )
233933
    for i in `find . -type f -exec bash -c "if file {} | grep 'Python script, ASCII text executable' >/dev/null; then echo {}; fi" ';'`; do
233933
        sed -i -e 's|^#!/usr/bin/python.*|#!%{__python3}|' -e 's|^#!/usr/bin/env python.*|#!%{__python3}|' $i
233933
    done
233933
%else
233933
    for f in api events extras geo-replication libglusterfs tools xlators; do
233933
        find $f -type f -exec sed -i 's|/usr/bin/python3|/usr/bin/python2|' {} \;
233933
    done
233933
%endif
233933
233933
%build
233933
233933
# In RHEL7 few hardening flags are available by default, however the RELRO
233933
# default behaviour is partial, convert to full
233933
%if ( 0%{?rhel} && 0%{?rhel} >= 7 )
233933
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
233933
export LDFLAGS
233933
%else
233933
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
233933
CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE"
233933
LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now"
233933
%else
233933
#It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and
233933
 # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does
233933
CFLAGS="$CFLAGS $RPM_OPT_FLAGS"
233933
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
233933
%endif
233933
export CFLAGS
233933
export LDFLAGS
233933
%endif
233933
233933
./autogen.sh && %configure \
233933
        %{?_with_asan} \
233933
        %{?_with_cmocka} \
233933
        %{?_with_debug} \
233933
        %{?_with_firewalld} \
233933
        %{?_with_tmpfilesdir} \
233933
        %{?_with_tsan} \
233933
        %{?_with_valgrind} \
233933
        %{?_without_epoll} \
233933
        %{?_without_events} \
233933
        %{?_without_fusermount} \
233933
        %{?_without_georeplication} \
233933
        %{?_without_ocf} \
233933
        %{?_without_rdma} \
233933
        %{?_without_server} \
233933
        %{?_without_syslog} \
233933
        %{?_without_tiering} \
233933
        %{?_with_ipv6default} \
233933
        %{?_without_libtirpc}
233933
233933
# fix hardening and remove rpath in shlibs
233933
%if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
233933
sed -i 's| \\\$compiler_flags |&\\\$LDFLAGS |' libtool
233933
%endif
233933
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|' libtool
233933
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|' libtool
233933
233933
make %{?_smp_mflags}
233933
233933
%check
233933
make check
233933
233933
%install
233933
rm -rf %{buildroot}
233933
make install DESTDIR=%{buildroot}
233933
%if ( 0%{!?_without_server:1} )
233933
%if ( 0%{_for_fedora_koji_builds} )
233933
install -D -p -m 0644 %{SOURCE1} \
233933
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
233933
install -D -p -m 0644 %{SOURCE2} \
233933
    %{buildroot}%{_sysconfdir}/sysconfig/glusterfsd
233933
%else
233933
install -D -p -m 0644 extras/glusterd-sysconfig \
233933
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
233933
%endif
233933
%endif
233933
233933
mkdir -p %{buildroot}%{_localstatedir}/log/glusterd
233933
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfs
233933
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfsd
233933
mkdir -p %{buildroot}%{_rundir}/gluster
233933
233933
# Remove unwanted files from all the shared libraries
233933
find %{buildroot}%{_libdir} -name '*.a' -delete
233933
find %{buildroot}%{_libdir} -name '*.la' -delete
233933
233933
# Remove installed docs, the ones we want are included by %%doc, in
233933
# /usr/share/doc/glusterfs or /usr/share/doc/glusterfs-x.y.z depending
233933
# on the distribution
233933
%if ( 0%{?fedora} && 0%{?fedora} > 19 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
233933
rm -rf %{buildroot}%{_pkgdocdir}/*
233933
%else
233933
rm -rf %{buildroot}%{_defaultdocdir}/%{name}
233933
mkdir -p %{buildroot}%{_pkgdocdir}
233933
%endif
233933
head -50 ChangeLog > ChangeLog.head && mv ChangeLog.head ChangeLog
233933
cat << EOM >> ChangeLog
233933
233933
More commit messages for this ChangeLog can be found at
233933
https://forge.gluster.org/glusterfs-core/glusterfs/commits/v%{version}%{?prereltag}
233933
EOM
233933
233933
# Remove benchmarking and other unpackaged files
233933
# make install always puts these in %%{_defaultdocdir}/%%{name} so don't
233933
# use %%{_pkgdocdir}; that will be wrong on later Fedora distributions
233933
rm -rf %{buildroot}%{_defaultdocdir}/%{name}/benchmarking
233933
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs-mode.el
233933
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs.vim
233933
233933
%if ( 0%{!?_without_server:1} )
233933
# Create working directory
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd
233933
233933
# Update configuration file to /var/lib working directory
233933
sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sharedstatedir}/glusterd|g' \
233933
    %{buildroot}%{_sysconfdir}/glusterfs/glusterd.vol
233933
%endif
233933
233933
# Install glusterfsd .service or init.d file
233933
%if ( 0%{!?_without_server:1} )
233933
%if ( 0%{_for_fedora_koji_builds} )
233933
%service_install glusterfsd %{glusterfsd_svcfile}
233933
%endif
233933
%endif
233933
233933
install -D -p -m 0644 extras/glusterfs-logrotate \
233933
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
233933
233933
# ganesha ghosts
233933
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
233933
mkdir -p %{buildroot}%{_sysconfdir}/ganesha
233933
touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf
233933
mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/
233933
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
233933
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
233933
%endif
233933
233933
%if ( 0%{!?_without_georeplication:1} )
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
233933
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
233933
install -D -p -m 0644 extras/glusterfs-georep-logrotate \
233933
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
233933
%endif
233933
233933
%if ( 0%{!?_without_server:1} )
233933
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
233933
touch %{buildroot}%{_sharedstatedir}/glusterd/options
233933
subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop)
233933
for dir in ${subdirs[@]}; do
233933
    mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/"$dir"/{pre,post}
233933
done
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/glustershd
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/peers
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/vols
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/nfs/run
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/bitd
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/quotad
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/scrub
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/snaps
233933
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/ss_brick
233933
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/nfs-server.vol
233933
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/run/nfs.pid
233933
%endif
233933
233933
find ./tests ./run-tests.sh -type f | cpio -pd %{buildroot}%{_prefix}/share/glusterfs
233933
233933
## Install bash completion for cli
233933
install -p -m 0744 -D extras/command-completion/gluster.bash \
233933
    %{buildroot}%{_sysconfdir}/bash_completion.d/gluster
233933
233933
%if ( 0%{!?_without_server:1} )
233933
echo "RHGS 3.5" > %{buildroot}%{_datadir}/glusterfs/release
233933
%endif
233933
233933
%clean
233933
rm -rf %{buildroot}
233933
233933
##-----------------------------------------------------------------------------
233933
## All %%post should be placed here and keep them sorted
233933
##
233933
%post
233933
/sbin/ldconfig
233933
%if ( 0%{!?_without_syslog:1} )
233933
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
233933
%systemd_postun_with_restart rsyslog
233933
%endif
233933
%endif
233933
exit 0
233933
233933
%post api
233933
/sbin/ldconfig
233933
233933
%if ( 0%{!?_without_events:1} )
233933
%post events
233933
%service_enable glustereventsd
233933
%endif
233933
233933
%if ( 0%{!?_without_server:1} )
233933
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
233933
%post ganesha
233933
semanage boolean -m ganesha_use_fusefs --on
233933
exit 0
233933
%endif
233933
%endif
233933
233933
%if ( 0%{!?_without_georeplication:1} )
233933
%post geo-replication
233933
if [ $1 -ge 1 ]; then
233933
    %systemd_postun_with_restart glusterd
233933
fi
233933
exit 0
233933
%endif
233933
233933
%post libs
233933
/sbin/ldconfig
233933
233933
%if ( 0%{!?_without_server:1} )
233933
%post server
233933
# Legacy server
233933
%service_enable glusterd
233933
%if ( 0%{_for_fedora_koji_builds} )
233933
%service_enable glusterfsd
233933
%endif
233933
# ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 .
233933
# While upgrading glusterfs-server package form GlusterFS version <= 3.6 to
233933
# GlusterFS version 3.7, ".cmd_log_history" should be renamed to
233933
# "cmd_history.log" to retain cli command history contents.
233933
if [ -f %{_localstatedir}/log/glusterfs/.cmd_log_history ]; then
233933
    mv %{_localstatedir}/log/glusterfs/.cmd_log_history \
233933
       %{_localstatedir}/log/glusterfs/cmd_history.log
233933
fi
233933
233933
# Genuine Fedora (and EPEL) builds never put gluster files in /etc; if
233933
# there are any files in /etc from a prior gluster.org install, move them
233933
# to /var/lib. (N.B. Starting with 3.3.0 all gluster files are in /var/lib
233933
# in gluster.org RPMs.) Be careful to copy them on the off chance that
233933
# /etc and /var/lib are on separate file systems
233933
if [ -d /etc/glusterd -a ! -h %{_sharedstatedir}/glusterd ]; then
233933
    mkdir -p %{_sharedstatedir}/glusterd
233933
    cp -a /etc/glusterd %{_sharedstatedir}/glusterd
233933
    rm -rf /etc/glusterd
233933
    ln -sf %{_sharedstatedir}/glusterd /etc/glusterd
233933
fi
233933
233933
# Rename old volfiles in an RPM-standard way.  These aren't actually
233933
# considered package config files, so %%config doesn't work for them.
233933
if [ -d %{_sharedstatedir}/glusterd/vols ]; then
233933
    for file in $(find %{_sharedstatedir}/glusterd/vols -name '*.vol'); do
233933
        newfile=${file}.rpmsave
233933
        echo "warning: ${file} saved as ${newfile}"
233933
        cp ${file} ${newfile}
233933
    done
233933
fi
233933
233933
# add marker translator
233933
# but first make certain that there are no old libs around to bite us
233933
# BZ 834847
233933
if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then
233933
    rm -f /etc/ld.so.conf.d/glusterfs.conf
233933
    /sbin/ldconfig
233933
fi
233933
233933
%if (0%{?_with_firewalld:1})
233933
    %firewalld_reload
233933
%endif
233933
233933
%endif
233933
233933
##-----------------------------------------------------------------------------
233933
## All %%pre should be placed here and keep them sorted
233933
##
233933
%pre
233933
getent group gluster > /dev/null || groupadd -r gluster
233933
getent passwd gluster > /dev/null || useradd -r -g gluster -d %{_rundir}/gluster -s /sbin/nologin -c "GlusterFS daemons" gluster
233933
exit 0
233933
233933
##-----------------------------------------------------------------------------
233933
## All %%preun should be placed here and keep them sorted
233933
##
233933
%if ( 0%{!?_without_events:1} )
233933
%preun events
233933
if [ $1 -eq 0 ]; then
233933
    if [ -f %glustereventsd_svcfile ]; then
233933
        %service_stop glustereventsd
233933
        %systemd_preun glustereventsd
233933
    fi
233933
fi
233933
exit 0
233933
%endif
233933
233933
%if ( 0%{!?_without_server:1} )
233933
%preun server
233933
if [ $1 -eq 0 ]; then
233933
    if [ -f %glusterfsd_svcfile ]; then
233933
        %service_stop glusterfsd
233933
    fi
233933
    %service_stop glusterd
233933
    if [ -f %glusterfsd_svcfile ]; then
233933
        %systemd_preun glusterfsd
233933
    fi
233933
    %systemd_preun glusterd
233933
fi
233933
if [ $1 -ge 1 ]; then
233933
    if [ -f %glusterfsd_svcfile ]; then
233933
        %systemd_postun_with_restart glusterfsd
233933
    fi
233933
    %systemd_postun_with_restart glusterd
233933
fi
233933
exit 0
233933
%endif
233933
233933
##-----------------------------------------------------------------------------
233933
## All %%postun should be placed here and keep them sorted
233933
##
233933
%postun
233933
%if ( 0%{!?_without_syslog:1} )
233933
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
233933
%systemd_postun_with_restart rsyslog
233933
%endif
233933
%endif
233933
233933
%if ( 0%{!?_without_server:1} )
233933
%postun server
233933
%if (0%{?_with_firewalld:1})
233933
    %firewalld_reload
233933
%endif
233933
exit 0
233933
%endif
233933
233933
%if ( 0%{!?_without_server:1} )
233933
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
233933
%postun ganesha
233933
semanage boolean -m ganesha_use_fusefs --off
233933
exit 0
233933
%endif
233933
%endif
233933
233933
##-----------------------------------------------------------------------------
233933
## All %%trigger should be placed here and keep them sorted
233933
##
233933
%if ( 0%{!?_without_server:1} )
233933
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
233933
%trigger ganesha -- selinux-policy-targeted
233933
semanage boolean -m ganesha_use_fusefs --on
233933
exit 0
233933
%endif
233933
%endif
233933
233933
##-----------------------------------------------------------------------------
233933
## All %%triggerun should be placed here and keep them sorted
233933
##
233933
%if ( 0%{!?_without_server:1} )
233933
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
233933
%triggerun ganesha -- selinux-policy-targeted
233933
semanage boolean -m ganesha_use_fusefs --off
233933
exit 0
233933
%endif
233933
%endif
233933
233933
##-----------------------------------------------------------------------------
233933
## All %%files should be placed here and keep them grouped
233933
##
233933
%files
233933
%doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS COMMITMENT
233933
%{_mandir}/man8/*gluster*.8*
233933
%if ( 0%{!?_without_server:1} )
233933
%exclude %{_mandir}/man8/gluster.8*
233933
%endif
233933
%dir %{_localstatedir}/log/glusterfs
233933
%if ( 0%{!?_without_rdma:1} )
233933
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
233933
%endif
233933
%if 0%{?!_without_server:1}
233933
%dir %{_datadir}/glusterfs
233933
%dir %{_datadir}/glusterfs/scripts
233933
     %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
233933
     %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
233933
%endif
ca3909
%{_datadir}/glusterfs/scripts/identify-hangs.sh
ca3909
%{_datadir}/glusterfs/scripts/collect-system-stats.sh
ca3909
%{_datadir}/glusterfs/scripts/log_accounting.sh
233933
# xlators that are needed on the client- and on the server-side
233933
%dir %{_libdir}/glusterfs
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/auth
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/addr.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/login.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/socket.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/error-gen.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/delay-gen.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/io-stats.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/sink.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/trace.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/access-control.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/barrier.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cdc.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changelog.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/utime.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/gfid-access.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/namespace.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/read-only.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cloudsync.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/md-cache.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/open-behind.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/quick-read.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/read-ahead.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/readdir-ahead.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/nl-cache.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so
233933
%dir %attr(0775,gluster,gluster) %{_rundir}/gluster
233933
%if 0%{?_tmpfilesdir:1} && 0%{!?_without_server:1}
233933
%{_tmpfilesdir}/gluster.conf
233933
%endif
233933
%if ( 0%{?_without_extra_xlators:1} )
233933
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
233933
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
233933
%endif
233933
%if ( 0%{?_without_regression_tests:1} )
233933
%exclude %{_datadir}/glusterfs/run-tests.sh
233933
%exclude %{_datadir}/glusterfs/tests
233933
%endif
233933
%if 0%{?_without_server:1}
233933
%if ( 0%{?_with_systemd:1} )
233933
%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
233933
%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
233933
%endif
233933
%endif
233933
233933
%if ( 0%{?_without_server:1} || 0%{?rhel} < 7 )
233933
#exclude ganesha related files for rhel 6 and client builds
233933
%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample
233933
%exclude %{_libexecdir}/ganesha/*
233933
%exclude %{_prefix}/lib/ocf/resource.d/heartbeat/*
233933
%if ( 0%{!?_without_server:1} )
233933
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
233933
%endif
233933
%endif
233933
233933
%exclude %{_datadir}/glusterfs/scripts/setup-thin-arbiter.sh
233933
233933
%if ( 0%{?_without_server:1} )
233933
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
233933
%endif
233933
233933
%files api
233933
%exclude %{_libdir}/*.so
233933
# libgfapi files
233933
%{_libdir}/libgfapi.*
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
233933
233933
%files api-devel
233933
%{_libdir}/pkgconfig/glusterfs-api.pc
233933
%{_libdir}/libgfapi.so
233933
%dir %{_includedir}/glusterfs
233933
%dir %{_includedir}/glusterfs/api
233933
     %{_includedir}/glusterfs/api/*
233933
233933
%files cli
233933
%{_sbindir}/gluster
233933
%{_mandir}/man8/gluster.8*
233933
%{_sysconfdir}/bash_completion.d/gluster
233933
233933
%files cloudsync-plugins
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsyncs3.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsynccvlt.so
233933
233933
%files devel
233933
%dir %{_includedir}/glusterfs
233933
     %{_includedir}/glusterfs/*
233933
%exclude %{_includedir}/glusterfs/api
233933
%exclude %{_libdir}/libgfapi.so
233933
%{_libdir}/*.so
233933
%if ( 0%{?_without_server:1} )
233933
%exclude %{_libdir}/pkgconfig/libgfchangelog.pc
233933
%exclude %{_libdir}/libgfchangelog.so
233933
%if ( 0%{!?_without_tiering:1} )
233933
%exclude %{_libdir}/pkgconfig/libgfdb.pc
233933
%endif
233933
%else
233933
%{_libdir}/pkgconfig/libgfchangelog.pc
233933
%if ( 0%{!?_without_tiering:1} )
233933
%{_libdir}/pkgconfig/libgfdb.pc
233933
%endif
233933
%endif
233933
233933
%files client-xlators
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
233933
233933
%if ( 0%{!?_without_extra_xlators:1} )
233933
%files extra-xlators
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
233933
%endif
233933
233933
%files fuse
233933
# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
233933
%{_sbindir}/glusterfs
233933
%{_sbindir}/glusterfsd
233933
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so
233933
/sbin/mount.glusterfs
233933
%if ( 0%{!?_without_fusermount:1} )
233933
%{_bindir}/fusermount-glusterfs
233933
%endif
233933
233933
%if ( 0%{!?_without_georeplication:1} )
233933
%files geo-replication
233933
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
233933
233933
%{_sbindir}/gfind_missing_files
233933
%{_sbindir}/gluster-mountbroker
233933
%dir %{_libexecdir}/glusterfs
233933
%dir %{_libexecdir}/glusterfs/python
233933
%dir %{_libexecdir}/glusterfs/python/syncdaemon
233933
     %{_libexecdir}/glusterfs/gsyncd
233933
     %{_libexecdir}/glusterfs/python/syncdaemon/*
233933
     %{_libexecdir}/glusterfs/gverify.sh
233933
     %{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
233933
     %{_libexecdir}/glusterfs/peer_gsec_create
233933
     %{_libexecdir}/glusterfs/peer_mountbroker
233933
     %{_libexecdir}/glusterfs/peer_mountbroker.py*
233933
     %{_libexecdir}/glusterfs/gfind_missing_files
233933
     %{_libexecdir}/glusterfs/peer_georep-sshkey.py*
233933
%{_sbindir}/gluster-georep-sshkey
233933
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication
233933
%ghost      %attr(0644,-,-) %{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/pre
233933
233933
%dir %{_datadir}/glusterfs
233933
%dir %{_datadir}/glusterfs/scripts
233933
     %{_datadir}/glusterfs/scripts/get-gfid.sh
233933
     %{_datadir}/glusterfs/scripts/slave-upgrade.sh
233933
     %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
233933
     %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
233933
     %{_datadir}/glusterfs/scripts/gsync-sync-gfid
233933
     %{_datadir}/glusterfs/scripts/schedule_georep.py*
233933
%endif
233933
233933
%files libs
233933
%{_libdir}/*.so.*
233933
%exclude %{_libdir}/libgfapi.*
233933
%if ( 0%{!?_without_tiering:1} )
233933
# libgfdb is only needed server-side
233933
%exclude %{_libdir}/libgfdb.*
233933
%endif
233933
233933
%files -n python%{_pythonver}-gluster
233933
# introducing glusterfs module in site packages.
233933
# so that all other gluster submodules can reside in the same namespace.
233933
%if ( %{_usepython3} )
233933
%dir %{python3_sitelib}/gluster
233933
     %{python3_sitelib}/gluster/__init__.*
233933
     %{python3_sitelib}/gluster/__pycache__
233933
     %{python3_sitelib}/gluster/cliutils
233933
%else
233933
%dir %{python2_sitelib}/gluster
233933
     %{python2_sitelib}/gluster/__init__.*
233933
     %{python2_sitelib}/gluster/cliutils
233933
%endif
233933
233933
%if ( 0%{!?_without_rdma:1} )
233933
%files rdma
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
233933
%endif
233933
233933
%if ( 0%{!?_without_regression_tests:1} )
233933
%files regression-tests
233933
%dir %{_datadir}/glusterfs
233933
     %{_datadir}/glusterfs/run-tests.sh
233933
     %{_datadir}/glusterfs/tests
233933
%exclude %{_datadir}/glusterfs/tests/vagrant
233933
%endif
233933
233933
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
233933
%files ganesha
233933
%dir %{_libexecdir}/ganesha
233933
%{_sysconfdir}/ganesha/ganesha-ha.conf.sample
233933
%{_libexecdir}/ganesha/*
233933
%{_prefix}/lib/ocf/resource.d/heartbeat/*
233933
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
233933
%ghost      %attr(0644,-,-) %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf
233933
%ghost %dir %attr(0755,-,-) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha
233933
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
233933
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
233933
%endif
233933
233933
%if ( 0%{!?_without_ocf:1} )
233933
%files resource-agents
233933
# /usr/lib is the standard for OCF, also on x86_64
233933
%{_prefix}/lib/ocf/resource.d/glusterfs
233933
%endif
233933
233933
%if ( 0%{!?_without_server:1} )
233933
%files server
233933
%doc extras/clear_xattrs.sh
ca3909
%{_datadir}/glusterfs/scripts/xattr_analysis.py*
ca3909
%{_datadir}/glusterfs/scripts/quota_fsck.py*
233933
# sysconf
233933
%config(noreplace) %{_sysconfdir}/glusterfs
233933
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
233933
%exclude %{_sysconfdir}/glusterfs/eventsconfig.json
233933
%config(noreplace) %{_sysconfdir}/sysconfig/glusterd
233933
%if ( 0%{_for_fedora_koji_builds} )
233933
%config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd
233933
%endif
233933
233933
# init files
233933
%glusterd_svcfile
233933
%if ( 0%{_for_fedora_koji_builds} )
233933
%glusterfsd_svcfile
233933
%endif
233933
%if ( 0%{?_with_systemd:1} )
233933
%glusterfssharedstorage_svcfile
233933
%endif
233933
233933
# binaries
233933
%{_sbindir}/glusterd
233933
%{_sbindir}/glfsheal
233933
%{_sbindir}/gf_attach
233933
%{_sbindir}/gluster-setgfid2path
233933
# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
233933
# symlink. The binary itself (and symlink) are part of the glusterfs-fuse
233933
# package, because glusterfs-server depends on that anyway.
233933
233933
# Manpages
233933
%{_mandir}/man8/gluster-setgfid2path.8*
233933
233933
# xlators
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
233933
%if ( 0%{!?_without_tiering:1} )
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
233933
     %{_libdir}/libgfdb.so.*
233933
%endif
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
233933
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
233933
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
233933
233933
# snap_scheduler
233933
%{_sbindir}/snap_scheduler.py
233933
%{_sbindir}/gcron.py
233933
%{_sbindir}/conf.py
233933
233933
# /var/lib/glusterd, e.g. hookscripts, etc.
233933
%ghost      %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
233933
%ghost      %attr(0600,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/options
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/bitd
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/groups
233933
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/virt
233933
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/metadata-cache
233933
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/gluster-block
233933
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/nl-cache
233933
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/db-workload
233933
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/distributed-virt
233933
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/samba
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind/.keys
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glustershd
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/disabled-quota-root-xattr-heal.sh
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/S13create-subdir-mounts.sh
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/pre
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
233933
                            %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/post
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/pre
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/pre
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
233933
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
233933
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
233933
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
233933
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/snaps
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/ss_brick
233933
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/vols
233933
233933
# Extra utility script
233933
%dir %{_libexecdir}/glusterfs
233933
     %{_datadir}/glusterfs/release
233933
%dir %{_datadir}/glusterfs/scripts
233933
     %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
233933
%if ( 0%{?_with_systemd:1} )
233933
     %{_libexecdir}/glusterfs/mount-shared-storage.sh
233933
     %{_datadir}/glusterfs/scripts/control-cpu-load.sh
233933
     %{_datadir}/glusterfs/scripts/control-mem.sh
233933
%endif
233933
233933
# Incrementalapi
233933
     %{_libexecdir}/glusterfs/glusterfind
233933
%{_bindir}/glusterfind
233933
     %{_libexecdir}/glusterfs/peer_add_secret_pub
233933
233933
%if ( 0%{?_with_firewalld:1} )
233933
%{_prefix}/lib/firewalld/services/glusterfs.xml
233933
%endif
233933
# end of server files
233933
%endif
233933
233933
# Events
233933
%if ( 0%{!?_without_events:1} )
233933
%files events
233933
%config(noreplace) %{_sysconfdir}/glusterfs/eventsconfig.json
233933
%dir %{_sharedstatedir}/glusterd
233933
%dir %{_sharedstatedir}/glusterd/events
233933
%dir %{_libexecdir}/glusterfs
233933
     %{_libexecdir}/glusterfs/gfevents
233933
     %{_libexecdir}/glusterfs/peer_eventsapi.py*
233933
%{_sbindir}/glustereventsd
233933
%{_sbindir}/gluster-eventsapi
233933
%{_datadir}/glusterfs/scripts/eventsdash.py*
233933
%if ( 0%{?_with_systemd:1} )
233933
%{_unitdir}/glustereventsd.service
233933
%else
233933
%{_sysconfdir}/init.d/glustereventsd
233933
%endif
233933
%endif
233933
233933
##-----------------------------------------------------------------------------
233933
## All %pretrans should be placed here and keep them sorted
233933
##
233933
%if 0%{!?_without_server:1}
233933
%pretrans -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
233933
          echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
233933
   echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
233933
   echo "WARNING: Refer upgrade section of install guide for more details"
233933
   echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
233933
233933
233933
%pretrans api -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
233933
233933
233933
%pretrans api-devel -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
233933
233933
233933
%pretrans cli -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
233933
233933
%pretrans client-xlators -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
233933
233933
%pretrans fuse -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
233933
233933
233933
%if ( 0%{!?_without_georeplication:1} )
233933
%pretrans geo-replication -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
%endif
233933
233933
233933
233933
%pretrans libs -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
233933
233933
233933
%if ( 0%{!?_without_rdma:1} )
233933
%pretrans rdma -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
%endif
233933
233933
233933
233933
%if ( 0%{!?_without_ocf:1} )
233933
%pretrans resource-agents -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
%endif
233933
233933
233933
233933
%pretrans server -p <lua>
233933
if not posix.access("/bin/bash", "x") then
233933
    -- initial installation, no shell, no running glusterfsd
233933
    return 0
233933
end
233933
233933
-- TODO: move this completely to a lua script
233933
-- For now, we write a temporary bash script and execute that.
233933
233933
script = [[#!/bin/sh
233933
pidof -c -o %PPID -x glusterfsd &>/dev/null
233933
233933
if [ $? -eq 0 ]; then
233933
   pushd . > /dev/null 2>&1
233933
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
233933
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
233933
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
233933
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
233933
          exit 1;
233933
       fi
233933
   done
233933
233933
   popd > /dev/null 2>&1
233933
   exit 1;
233933
fi
233933
]]
233933
233933
ok, how, val = os.execute(script)
ca3909
rc = val or ok
ca3909
if not (rc == 0) then
ca3909
   error("Detected running glusterfs processes", rc)
233933
end
233933
233933
%posttrans server
233933
pidof -c -o %PPID -x glusterd &> /dev/null
233933
if [ $? -eq 0 ]; then
233933
    kill -9 `pgrep -f gsyncd.py` &> /dev/null
233933
233933
    killall --wait -SIGTERM glusterd &> /dev/null
233933
233933
    if [ "$?" != "0" ]; then
233933
        echo "killall failed while killing glusterd"
233933
    fi
233933
233933
    glusterd --xlator-option *.upgrade=on -N
233933
233933
    #Cleaning leftover glusterd socket file which is created by glusterd in
233933
    #rpm_script_t context.
233933
    rm -rf /var/run/glusterd.socket
233933
233933
    # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
233933
    # so start it again
233933
    %service_start glusterd
233933
else
233933
    glusterd --xlator-option *.upgrade=on -N
233933
233933
    #Cleaning leftover glusterd socket file which is created by glusterd in
233933
    #rpm_script_t context.
233933
    rm -rf /var/run/glusterd.socket
233933
fi
233933
233933
%endif
233933
233933
%changelog
6ef7e5
* Wed May 20 2020 CentOS Sources <bugs@centos.org> - 6.0-29.el7.centos
6ef7e5
- remove vendor and/or packager lines
6ef7e5
ca3909
* Thu Jan 23 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-29
ca3909
- fixes bugs bz#1793035
ca3909
ca3909
* Tue Jan 14 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-28
ca3909
- fixes bugs bz#1789447
ca3909
ca3909
* Mon Jan 13 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-27
ca3909
- fixes bugs bz#1789447
ca3909
ca3909
* Fri Jan 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-26
ca3909
- fixes bugs bz#1763208 bz#1788656
ca3909
ca3909
* Mon Dec 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-25
ca3909
- fixes bugs bz#1686800 bz#1763208 bz#1779696 bz#1781444 bz#1782162
ca3909
ca3909
* Thu Nov 28 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-24
ca3909
- fixes bugs bz#1768786
ca3909
ca3909
* Thu Nov 21 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-23
ca3909
- fixes bugs bz#1344758 bz#1599802 bz#1685406 bz#1686800 bz#1724021 
ca3909
  bz#1726058 bz#1727755 bz#1731513 bz#1741193 bz#1758923 bz#1761326 bz#1761486 
ca3909
  bz#1762180 bz#1764095 bz#1766640
ca3909
ca3909
* Thu Nov 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-22
ca3909
- fixes bugs bz#1771524 bz#1771614
ca3909
ca3909
* Fri Oct 25 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-21
ca3909
- fixes bugs bz#1765555
ca3909
ca3909
* Wed Oct 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-20
ca3909
- fixes bugs bz#1719171 bz#1763412 bz#1764202
ca3909
ca3909
* Thu Oct 17 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-19
ca3909
- fixes bugs bz#1760939
ca3909
ca3909
* Wed Oct 16 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-18
ca3909
- fixes bugs bz#1758432
ca3909
ca3909
* Fri Oct 11 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-17
ca3909
- fixes bugs bz#1704562 bz#1758618 bz#1760261
ca3909
ca3909
* Wed Oct 09 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-16
ca3909
- fixes bugs bz#1752713 bz#1756325
ca3909
ca3909
* Fri Sep 27 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-15
ca3909
- fixes bugs bz#1726000 bz#1731826 bz#1754407 bz#1754790 bz#1755227
ca3909
ca3909
* Fri Sep 20 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-14
ca3909
- fixes bugs bz#1719171 bz#1728673 bz#1731896 bz#1732443 bz#1733970 
ca3909
  bz#1745107 bz#1746027 bz#1748688 bz#1750241 bz#1572163
ca3909
ca3909
* Fri Aug 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-13
ca3909
- fixes bugs bz#1729915 bz#1732376 bz#1743611 bz#1743627 bz#1743634 bz#1744518
788a6b
233933
* Fri Aug 09 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-12
233933
- fixes bugs bz#1730914 bz#1731448 bz#1732770 bz#1732792 bz#1733531 
233933
  bz#1734305 bz#1734534 bz#1734734 bz#1735514 bz#1737705 bz#1732774
233933
  bz#1732793
233933
233933
* Tue Aug 06 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-11
233933
- fixes bugs bz#1733520 bz#1734423
233933
233933
* Fri Aug 02 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-10
233933
- fixes bugs bz#1713890
233933
233933
* Tue Jul 23 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-9
233933
- fixes bugs bz#1708064 bz#1708180 bz#1715422 bz#1720992 bz#1722757
233933
233933
* Tue Jul 16 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-8
233933
- fixes bugs bz#1698435 bz#1712591 bz#1715447 bz#1720488 bz#1722209
233933
  bz#1722512 bz#1724089 bz#1726991 bz#1727785 bz#1729108
233933
233933
* Fri Jun 28 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-7
233933
- fixes bugs bz#1573077 bz#1600918 bz#1703423 bz#1704207 bz#1708064
233933
  bz#1709301 bz#1713664 bz#1716760 bz#1717784 bz#1720163 bz#1720192
233933
  bz#1720551 bz#1721351 bz#1721357 bz#1721477 bz#1722131 bz#1722331
233933
  bz#1722509 bz#1722801 bz#1720248
233933
233933
* Fri Jun 14 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-6
233933
- fixes bugs bz#1668001 bz#1708043 bz#1708183 bz#1710701 
233933
  bz#1719640 bz#1720079 bz#1720248 bz#1720318 bz#1720461
233933
233933
* Tue Jun 11 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-5
233933
- fixes bugs bz#1573077 bz#1694595 bz#1703434 bz#1714536 bz#1714588 
233933
  bz#1715407 bz#1715438 bz#1705018
233933
233933
* Fri Jun 07 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-4
233933
- fixes bugs bz#1480907 bz#1702298 bz#1703455 bz#1704181 bz#1707246
233933
  bz#1708067 bz#1708116 bz#1708121 bz#1709087 bz#1711249 bz#1711296 
233933
  bz#1714078 bz#1714124 bz#1716385 bz#1716626 bz#1716821 bz#1716865 bz#1717927
233933
233933
* Tue May 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-3
233933
- fixes bugs bz#1583585 bz#1671862 bz#1702686 bz#1703434 bz#1703753 
233933
  bz#1703897 bz#1704562 bz#1704769 bz#1704851 bz#1706683 bz#1706776 bz#1706893
233933
233933
* Thu Apr 25 2019 Milind Changire <mchangir@redhat.com> - 6.0-2
233933
- fixes bugs bz#1471742 bz#1652461 bz#1671862 bz#1676495 bz#1691620 
233933
  bz#1696334 bz#1696903 bz#1697820 bz#1698436 bz#1698728 bz#1699709 bz#1699835 
233933
  bz#1702240
233933
233933
* Mon Apr 08 2019 Milind Changire <mchangir@redhat.com> - 6.0-1
233933
- rebase to upstream glusterfs at v6.0
233933
- fixes bugs bz#1493284 bz#1578703 bz#1600918 bz#1670415 bz#1691620 
233933
  bz#1693935 bz#1695057
233933