e3c68b
%global _hardened_build 1
e3c68b
e3c68b
%global _for_fedora_koji_builds 0
e3c68b
e3c68b
# uncomment and add '%' to use the prereltag for pre-releases
e3c68b
# %%global prereltag qa3
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All argument definitions should be placed here and keep them sorted
e3c68b
##
e3c68b
e3c68b
# asan
e3c68b
# if you wish to compile an rpm with address sanitizer...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --with asan
e3c68b
%{?_with_asan:%global _with_asan --enable-asan}
e3c68b
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
e3c68b
%global _with_asan %{nil}
e3c68b
%endif
e3c68b
e3c68b
# bd
e3c68b
# if you wish to compile an rpm without the BD map support...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --without bd
e3c68b
%{?_without_bd:%global _without_bd --disable-bd-xlator}
e3c68b
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} > 7 )
e3c68b
%global _without_bd --without-bd
e3c68b
%endif
e3c68b
e3c68b
# cmocka
e3c68b
# if you wish to compile an rpm with cmocka unit testing...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --with cmocka
e3c68b
%{?_with_cmocka:%global _with_cmocka --enable-cmocka}
e3c68b
e3c68b
# debug
e3c68b
# if you wish to compile an rpm with debugging...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --with debug
e3c68b
%{?_with_debug:%global _with_debug --enable-debug}
e3c68b
e3c68b
# epoll
e3c68b
# if you wish to compile an rpm without epoll...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --without epoll
e3c68b
%{?_without_epoll:%global _without_epoll --disable-epoll}
e3c68b
e3c68b
# fusermount
e3c68b
# if you wish to compile an rpm without fusermount...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --without fusermount
e3c68b
%{?_without_fusermount:%global _without_fusermount --disable-fusermount}
e3c68b
e3c68b
# geo-rep
e3c68b
# if you wish to compile an rpm without geo-replication support, compile like this...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --without georeplication
e3c68b
%{?_without_georeplication:%global _without_georeplication --disable-georeplication}
e3c68b
e3c68b
# ipv6default
e3c68b
# if you wish to compile an rpm with IPv6 default...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --with ipv6default
e3c68b
%{?_with_ipv6default:%global _with_ipv6default --with-ipv6-default}
e3c68b
e3c68b
# libtirpc
e3c68b
# if you wish to compile an rpm without TIRPC (i.e. use legacy glibc rpc)
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --without libtirpc
e3c68b
%{?_without_libtirpc:%global _without_libtirpc --without-libtirpc}
e3c68b
e3c68b
# Do not use libtirpc on EL6, it does not have xdr_uint64_t() and xdr_uint32_t
e3c68b
# Do not use libtirpc on EL7, it does not have xdr_sizeof()
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} <= 7 )
e3c68b
%global _without_libtirpc --without-libtirpc
e3c68b
%endif
e3c68b
e3c68b
e3c68b
# ocf
e3c68b
# if you wish to compile an rpm without the OCF resource agents...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --without ocf
e3c68b
%{?_without_ocf:%global _without_ocf --without-ocf}
e3c68b
e3c68b
# rdma
e3c68b
# if you wish to compile an rpm without rdma support, compile like this...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --without rdma
e3c68b
%{?_without_rdma:%global _without_rdma --disable-ibverbs}
e3c68b
e3c68b
# No RDMA Support on 32-bit ARM
e3c68b
%ifarch armv7hl
e3c68b
%global _without_rdma --disable-ibverbs
e3c68b
%endif
e3c68b
e3c68b
# server
e3c68b
# if you wish to build rpms without server components, compile like this
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --without server
e3c68b
%{?_without_server:%global _without_server --without-server}
e3c68b
e3c68b
# disable server components forcefully as rhel <= 6
e3c68b
%if ( 0%{?rhel} )
e3c68b
%if (!(( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" ) || ( "%{?dist}" == ".el8rhgs" )))
e3c68b
%global _without_server --without-server
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
%global _without_extra_xlators 1
e3c68b
%global _without_regression_tests 1
e3c68b
e3c68b
# syslog
e3c68b
# if you wish to build rpms without syslog logging, compile like this
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --without syslog
e3c68b
%{?_without_syslog:%global _without_syslog --disable-syslog}
e3c68b
e3c68b
# disable syslog forcefully as rhel <= 6 doesn't have rsyslog or rsyslog-mmcount
e3c68b
# Fedora deprecated syslog, see
e3c68b
#  https://fedoraproject.org/wiki/Changes/NoDefaultSyslog
e3c68b
# (And what about RHEL7?)
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} >= 20 ) || ( 0%{?rhel} && 0%{?rhel} <= 6 )
e3c68b
%global _without_syslog --disable-syslog
e3c68b
%endif
e3c68b
e3c68b
# tsan
e3c68b
# if you wish to compile an rpm with thread sanitizer...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --with tsan
e3c68b
%{?_with_tsan:%global _with_tsan --enable-tsan}
e3c68b
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
e3c68b
%global _with_tsan %{nil}
e3c68b
%endif
e3c68b
e3c68b
# valgrind
e3c68b
# if you wish to compile an rpm to run all processes under valgrind...
e3c68b
# rpmbuild -ta glusterfs-6.0.tar.gz --with valgrind
e3c68b
%{?_with_valgrind:%global _with_valgrind --enable-valgrind}
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All %%global definitions should be placed here and keep them sorted
e3c68b
##
e3c68b
e3c68b
# selinux booleans whose defalut value needs modification
e3c68b
# these booleans will be consumed by "%%selinux_set_booleans" macro.
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
e3c68b
%global selinuxbooleans rsync_full_access=1 rsync_client=1
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
e3c68b
%global _with_systemd true
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 7 )
e3c68b
%global _with_firewalld --enable-firewalld
e3c68b
%endif
e3c68b
e3c68b
%if 0%{?_tmpfilesdir:1}
e3c68b
%global _with_tmpfilesdir --with-tmpfilesdir=%{_tmpfilesdir}
e3c68b
%else
e3c68b
%global _with_tmpfilesdir --without-tmpfilesdir
e3c68b
%endif
e3c68b
e3c68b
# without server should also disable some server-only components
e3c68b
%if 0%{?_without_server:1}
e3c68b
%global _without_events --disable-events
e3c68b
%global _without_georeplication --disable-georeplication
e3c68b
%global _without_tiering --disable-tiering
e3c68b
%global _without_ocf --without-ocf
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
e3c68b
%global _usepython3 1
e3c68b
%global _pythonver 3
e3c68b
%else
e3c68b
%global _usepython3 0
e3c68b
%global _pythonver 2
e3c68b
%endif
e3c68b
e3c68b
# From https://fedoraproject.org/wiki/Packaging:Python#Macros
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
e3c68b
%{!?python2_sitelib: %global python2_sitelib %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
e3c68b
%{!?python2_sitearch: %global python2_sitearch %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
e3c68b
%global _rundir %{_localstatedir}/run
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{?_with_systemd:1} )
e3c68b
%global service_enable()   /bin/systemctl --quiet enable %1.service || : \
e3c68b
%{nil}
e3c68b
%global service_start()   /bin/systemctl --quiet start %1.service || : \
e3c68b
%{nil}
e3c68b
%global service_stop()    /bin/systemctl --quiet stop %1.service || :\
e3c68b
%{nil}
e3c68b
%global service_install() install -D -p -m 0644 %1.service %{buildroot}%2 \
e3c68b
%{nil}
e3c68b
# can't seem to make a generic macro that works
e3c68b
%global glusterd_svcfile   %{_unitdir}/glusterd.service
e3c68b
%global glusterfsd_svcfile %{_unitdir}/glusterfsd.service
e3c68b
%global glusterta_svcfile %{_unitdir}/gluster-ta-volume.service
e3c68b
%global glustereventsd_svcfile %{_unitdir}/glustereventsd.service
e3c68b
%global glusterfssharedstorage_svcfile %{_unitdir}/glusterfssharedstorage.service
e3c68b
%else
e3c68b
%global service_enable()  /sbin/chkconfig --add %1 >/dev/null 2>&1 || : \
e3c68b
%{nil}
e3c68b
%global systemd_preun() /sbin/chkconfig --del %1 >/dev/null 2>&1 || : \
e3c68b
%{nil}
e3c68b
%global systemd_postun_with_restart() /sbin/service %1 condrestart >/dev/null 2>&1 || : \
e3c68b
%{nil}
e3c68b
%global service_start()   /sbin/service %1 start >/dev/null 2>&1 || : \
e3c68b
%{nil}
e3c68b
%global service_stop()    /sbin/service %1 stop >/dev/null 2>&1 || : \
e3c68b
%{nil}
e3c68b
%global service_install() install -D -p -m 0755 %1.init %{buildroot}%2 \
e3c68b
%{nil}
e3c68b
# can't seem to make a generic macro that works
e3c68b
%global glusterd_svcfile   %{_sysconfdir}/init.d/glusterd
e3c68b
%global glusterfsd_svcfile %{_sysconfdir}/init.d/glusterfsd
e3c68b
%global glustereventsd_svcfile %{_sysconfdir}/init.d/glustereventsd
e3c68b
%endif
e3c68b
e3c68b
%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}}
e3c68b
e3c68b
# We do not want to generate useless provides and requires for xlator
e3c68b
# .so files to be set for glusterfs packages.
e3c68b
# Filter all generated:
e3c68b
#
e3c68b
# TODO: RHEL5 does not have a convenient solution
e3c68b
%if ( 0%{?rhel} == 6 )
e3c68b
# filter_setup exists in RHEL6 only
e3c68b
%filter_provides_in %{_libdir}/glusterfs/%{version}/
e3c68b
%global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$'
e3c68b
%filter_setup
e3c68b
%else
e3c68b
# modern rpm and current Fedora do not generate requires when the
e3c68b
# provides are filtered
e3c68b
%global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$
e3c68b
%endif
e3c68b
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All package definitions should be placed here in alphabetical order
e3c68b
##
e3c68b
Summary:          Distributed File System
e3c68b
%if ( 0%{_for_fedora_koji_builds} )
e3c68b
Name:             glusterfs
e3c68b
Version:          3.8.0
e3c68b
Release:          0.1%{?prereltag:.%{prereltag}}%{?dist}
e3c68b
%else
e3c68b
Name:             glusterfs
e3c68b
Version:          6.0
b51a1f
Release:          56.4%{?dist}
e3c68b
ExcludeArch:      i686
e3c68b
%endif
e3c68b
License:          GPLv2 or LGPLv3+
e3c68b
URL:              http://docs.gluster.org/
e3c68b
%if ( 0%{_for_fedora_koji_builds} )
e3c68b
Source0:          http://bits.gluster.org/pub/gluster/glusterfs/src/glusterfs-%{version}%{?prereltag}.tar.gz
e3c68b
Source1:          glusterd.sysconfig
e3c68b
Source2:          glusterfsd.sysconfig
e3c68b
Source7:          glusterfsd.service
e3c68b
Source8:          glusterfsd.init
e3c68b
%else
e3c68b
Source0:          glusterfs-6.0.tar.gz
e3c68b
%endif
e3c68b
e3c68b
Requires(pre):    shadow-utils
e3c68b
%if ( 0%{?_with_systemd:1} )
e3c68b
BuildRequires:    systemd
e3c68b
%endif
e3c68b
e3c68b
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e3c68b
%if ( 0%{?_with_systemd:1} )
e3c68b
%{?systemd_requires}
e3c68b
%endif
e3c68b
%if 0%{?_with_asan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
e3c68b
BuildRequires:    libasan
e3c68b
%endif
e3c68b
%if 0%{?_with_tsan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
e3c68b
BuildRequires:    libtsan
e3c68b
%endif
e3c68b
BuildRequires:    git
e3c68b
BuildRequires:    bison flex
e3c68b
BuildRequires:    gcc make libtool
e3c68b
BuildRequires:    ncurses-devel readline-devel
e3c68b
BuildRequires:    libxml2-devel openssl-devel
e3c68b
BuildRequires:    libaio-devel libacl-devel
e3c68b
BuildRequires:    python%{_pythonver}-devel
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
e3c68b
BuildRequires:    python-ctypes
e3c68b
%endif
e3c68b
%if ( 0%{?_with_ipv6default:1} ) || ( 0%{!?_without_libtirpc:1} ) || ( 0%{?rhel} && ( 0%{?rhel} >= 8 ) )
e3c68b
BuildRequires:    libtirpc-devel
e3c68b
%endif
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
e3c68b
BuildRequires:    rpcgen
e3c68b
%endif
e3c68b
BuildRequires:    userspace-rcu-devel >= 0.7
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
e3c68b
BuildRequires:    automake
e3c68b
%endif
e3c68b
BuildRequires:    libuuid-devel
e3c68b
%if ( 0%{?_with_cmocka:1} )
e3c68b
BuildRequires:    libcmocka-devel >= 1.0.1
e3c68b
%endif
e3c68b
%if ( 0%{!?_without_tiering:1} )
e3c68b
BuildRequires:    sqlite-devel
e3c68b
%endif
e3c68b
%if ( 0%{!?_without_georeplication:1} )
e3c68b
BuildRequires:    libattr-devel
e3c68b
%endif
e3c68b
e3c68b
%if (0%{?_with_firewalld:1})
e3c68b
BuildRequires:    firewalld
e3c68b
%endif
e3c68b
e3c68b
Obsoletes:        hekafs
e3c68b
Obsoletes:        %{name}-common < %{version}-%{release}
e3c68b
Obsoletes:        %{name}-core < %{version}-%{release}
e3c68b
Obsoletes:        %{name}-ufo
e3c68b
%if ( 0%{!?_with_gnfs:1} )
e3c68b
Obsoletes:        %{name}-gnfs
e3c68b
%endif
e3c68b
%if ( 0%{?rhel} < 7 )
e3c68b
Obsoletes:        %{name}-ganesha
e3c68b
%endif
e3c68b
Provides:         %{name}-common = %{version}-%{release}
e3c68b
Provides:         %{name}-core = %{version}-%{release}
e3c68b
e3c68b
# Patch0001: 0001-Update-rfc.sh-to-rhgs-3.5.0.patch
e3c68b
Patch0002: 0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch
e3c68b
Patch0003: 0003-rpc-set-bind-insecure-to-off-by-default.patch
e3c68b
Patch0004: 0004-glusterd-spec-fixing-autogen-issue.patch
e3c68b
Patch0005: 0005-libglusterfs-glusterd-Fix-compilation-errors.patch
e3c68b
Patch0006: 0006-build-remove-ghost-directory-entries.patch
e3c68b
Patch0007: 0007-build-add-RHGS-specific-changes.patch
e3c68b
Patch0008: 0008-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch
e3c68b
Patch0009: 0009-build-introduce-security-hardening-flags-in-gluster.patch
e3c68b
Patch0010: 0010-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch
e3c68b
Patch0011: 0011-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch
e3c68b
Patch0012: 0012-build-add-pretrans-check.patch
e3c68b
Patch0013: 0013-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch
e3c68b
Patch0014: 0014-build-spec-file-conflict-resolution.patch
e3c68b
Patch0015: 0015-build-randomize-temp-file-names-in-pretrans-scriptle.patch
e3c68b
Patch0016: 0016-glusterd-parallel-readdir-Change-the-op-version-of-p.patch
e3c68b
Patch0017: 0017-glusterd-Revert-op-version-for-cluster.max-brick-per.patch
e3c68b
Patch0018: 0018-cli-Add-message-for-user-before-modifying-brick-mult.patch
e3c68b
Patch0019: 0019-build-launch-glusterd-upgrade-after-all-new-bits-are.patch
e3c68b
Patch0020: 0020-spec-unpackaged-files-found-for-RHEL-7-client-build.patch
e3c68b
Patch0021: 0021-cli-glusterfsd-remove-copyright-information.patch
e3c68b
Patch0022: 0022-cli-Remove-upstream-doc-reference.patch
e3c68b
Patch0023: 0023-hooks-remove-selinux-hooks.patch
e3c68b
Patch0024: 0024-glusterd-Make-localtime-logging-option-invisible-in-.patch
e3c68b
Patch0025: 0025-build-make-RHGS-version-available-for-server.patch
e3c68b
Patch0026: 0026-glusterd-Introduce-daemon-log-level-cluster-wide-opt.patch
e3c68b
Patch0027: 0027-glusterd-change-op-version-of-fips-mode-rchecksum.patch
e3c68b
Patch0028: 0028-glusterd-Reset-op-version-for-features.shard-deletio.patch
e3c68b
Patch0029: 0029-glusterd-Reset-op-version-for-features.shard-lru-lim.patch
e3c68b
Patch0030: 0030-selinux-glusterd-add-features.selinux-to-glusterd-vo.patch
e3c68b
Patch0031: 0031-glusterd-turn-off-selinux-feature-in-downstream.patch
e3c68b
Patch0032: 0032-glusterd-update-gd-op-version-to-3_7_0.patch
e3c68b
Patch0033: 0033-build-add-missing-explicit-package-dependencies.patch
e3c68b
Patch0034: 0034-glusterd-introduce-a-new-op-version-for-rhgs-3.4.3.patch
e3c68b
Patch0035: 0035-glusterd-tag-rebalance-mgmt_v3-command-to-op-version.patch
e3c68b
Patch0036: 0036-build-add-conditional-dependency-on-server-for-devel.patch
e3c68b
Patch0037: 0037-cli-change-the-warning-message.patch
e3c68b
Patch0038: 0038-spec-avoid-creation-of-temp-file-in-lua-script.patch
e3c68b
Patch0039: 0039-cli-fix-query-to-user-during-brick-mux-selection.patch
e3c68b
Patch0040: 0040-build-Remove-unsupported-test-cases-failing-consiste.patch
e3c68b
Patch0041: 0041-tests-geo-rep-Build-failed-in-Jenkins-for-test-bug-1.patch
e3c68b
Patch0042: 0042-spec-client-server-Builds-are-failing-on-rhel-6.patch
e3c68b
Patch0043: 0043-inode-don-t-dump-the-whole-table-to-CLI.patch
e3c68b
Patch0044: 0044-cluster-ec-Don-t-enqueue-an-entry-if-it-is-already-h.patch
e3c68b
Patch0045: 0045-glusterd-fix-txn-id-mem-leak.patch
e3c68b
Patch0046: 0046-protocol-client-Do-not-fallback-to-anon-fd-if-fd-is-.patch
e3c68b
Patch0047: 0047-client-rpc-Fix-the-payload-being-sent-on-the-wire.patch
e3c68b
Patch0048: 0048-gfapi-Unblock-epoll-thread-for-upcall-processing.patch
e3c68b
Patch0049: 0049-transport-socket-log-shutdown-msg-occasionally.patch
e3c68b
Patch0050: 0050-geo-rep-Fix-syncing-multiple-rename-of-symlink.patch
e3c68b
Patch0051: 0051-spec-update-rpm-install-condition.patch
e3c68b
Patch0052: 0052-geo-rep-IPv6-support.patch
e3c68b
Patch0053: 0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch
e3c68b
Patch0054: 0054-Revert-glusterd-storhaug-remove-ganesha.patch
e3c68b
Patch0055: 0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch
e3c68b
Patch0056: 0056-common-ha-fixes-for-Debian-based-systems.patch
e3c68b
Patch0057: 0057-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch
e3c68b
Patch0058: 0058-glusterd-ganesha-During-volume-delete-remove-the-gan.patch
e3c68b
Patch0059: 0059-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch
e3c68b
Patch0060: 0060-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch
e3c68b
Patch0061: 0061-ganesha-allow-refresh-config-and-volume-export-unexp.patch
e3c68b
Patch0062: 0062-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch
e3c68b
Patch0063: 0063-glusterd-ganesha-update-cache-invalidation-properly-.patch
e3c68b
Patch0064: 0064-glusterd-ganesha-return-proper-value-in-pre_setup.patch
e3c68b
Patch0065: 0065-ganesha-scripts-remove-dependency-over-export-config.patch
e3c68b
Patch0066: 0066-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch
e3c68b
Patch0067: 0067-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch
e3c68b
Patch0068: 0068-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch
e3c68b
Patch0069: 0069-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
e3c68b
Patch0070: 0070-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch
e3c68b
Patch0071: 0071-common-ha-enable-and-disable-selinux-gluster_use_exe.patch
e3c68b
Patch0072: 0072-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch
e3c68b
Patch0073: 0073-build-remove-ganesha-dependency-on-selinux-policy.patch
e3c68b
Patch0074: 0074-common-ha-enable-pacemaker-at-end-of-setup.patch
e3c68b
Patch0075: 0075-common-ha-Fix-an-incorrect-syntax-during-setup.patch
e3c68b
Patch0076: 0076-glusterd-ganesha-change-voltype-for-ganesha.enable-i.patch
e3c68b
Patch0077: 0077-glusterd-ganesha-create-remove-export-file-only-from.patch
e3c68b
Patch0078: 0078-common-ha-scripts-pass-the-list-of-servers-properly-.patch
e3c68b
Patch0079: 0079-common-ha-All-statd-related-files-need-to-be-owned-b.patch
e3c68b
Patch0080: 0080-glusterd-ganesha-Skip-non-ganesha-nodes-properly-for.patch
e3c68b
Patch0081: 0081-ganesha-ha-ensure-pacemaker-is-enabled-after-setup.patch
e3c68b
Patch0082: 0082-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch
e3c68b
Patch0083: 0083-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
e3c68b
Patch0084: 0084-glusterd-Fix-duplicate-client_op_version-in-info-fil.patch
e3c68b
Patch0085: 0085-Revert-all-remove-code-which-is-not-being-considered.patch
e3c68b
Patch0086: 0086-Revert-tiering-remove-the-translator-from-build-and-.patch
e3c68b
Patch0087: 0087-ganesha-fixing-minor-issues-after-the-backport-from-.patch
e3c68b
Patch0088: 0088-tier-fix-failures-noticed-during-tier-start-and-tier.patch
e3c68b
Patch0089: 0089-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch
e3c68b
Patch0090: 0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch
e3c68b
Patch0091: 0091-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch
e3c68b
Patch0092: 0092-logging-Fix-GF_LOG_OCCASSIONALLY-API.patch
e3c68b
Patch0093: 0093-glusterd-Change-op-version-of-cache-invalidation-in-.patch
e3c68b
Patch0094: 0094-glusterd-load-ctime-in-the-client-graph-only-if-it-s.patch
e3c68b
Patch0095: 0095-cluster-afr-Remove-local-from-owners_list-on-failure.patch
e3c68b
Patch0096: 0096-core-Brick-is-not-able-to-detach-successfully-in-bri.patch
e3c68b
Patch0097: 0097-glusterd-tier-while-doing-an-attach-tier-the-self-he.patch
e3c68b
Patch0098: 0098-mgmt-shd-Implement-multiplexing-in-self-heal-daemon.patch
e3c68b
Patch0099: 0099-client-fini-return-fini-after-rpc-cleanup.patch
e3c68b
Patch0100: 0100-clnt-rpc-ref-leak-during-disconnect.patch
e3c68b
Patch0101: 0101-shd-mux-Fix-coverity-issues-introduced-by-shd-mux-pa.patch
e3c68b
Patch0102: 0102-rpc-transport-Missing-a-ref-on-dict-while-creating-t.patch
e3c68b
Patch0103: 0103-dht-NULL-check-before-setting-error-flag.patch
e3c68b
Patch0104: 0104-afr-shd-Cleanup-self-heal-daemon-resources-during-af.patch
e3c68b
Patch0105: 0105-core-Log-level-changes-do-not-effect-on-running-clie.patch
e3c68b
Patch0106: 0106-libgfchangelog-use-find_library-to-locate-shared-lib.patch
e3c68b
Patch0107: 0107-gfapi-add-function-to-set-client-pid.patch
e3c68b
Patch0108: 0108-afr-add-client-pid-to-all-gf_event-calls.patch
e3c68b
Patch0109: 0109-glusterd-Optimize-glusterd-handshaking-code-path.patch
e3c68b
Patch0110: 0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch
e3c68b
Patch0111: 0111-glusterd-fix-loading-ctime-in-client-graph-logic.patch
e3c68b
Patch0112: 0112-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
e3c68b
Patch0113: 0113-spec-Glusterd-did-not-start-by-default-after-node-re.patch
e3c68b
Patch0114: 0114-core-fix-hang-issue-in-__gf_free.patch
e3c68b
Patch0115: 0115-core-only-log-seek-errors-if-SEEK_HOLE-SEEK_DATA-is-.patch
e3c68b
Patch0116: 0116-cluster-ec-fix-fd-reopen.patch
e3c68b
Patch0117: 0117-spec-Remove-thin-arbiter-package.patch
e3c68b
Patch0118: 0118-tests-mark-thin-arbiter-test-ta.t-as-bad.patch
e3c68b
Patch0119: 0119-glusterd-provide-a-way-to-detach-failed-node.patch
e3c68b
Patch0120: 0120-glusterd-shd-Keep-a-ref-on-volinfo-until-attach-rpc-.patch
e3c68b
Patch0121: 0121-spec-glusterfs-devel-for-client-build-should-not-dep.patch
e3c68b
Patch0122: 0122-posix-ctime-Fix-stat-time-attributes-inconsistency-d.patch
e3c68b
Patch0123: 0123-ctime-Fix-log-repeated-logging-during-open.patch
e3c68b
Patch0124: 0124-spec-remove-duplicate-references-to-files.patch
e3c68b
Patch0125: 0125-glusterd-define-dumpops-in-the-xlator_api-of-gluster.patch
e3c68b
Patch0126: 0126-cluster-dht-refactor-dht-lookup-functions.patch
e3c68b
Patch0127: 0127-cluster-dht-Refactor-dht-lookup-functions.patch
e3c68b
Patch0128: 0128-glusterd-Fix-bulkvoldict-thread-logic-in-brick-multi.patch
e3c68b
Patch0129: 0129-core-handle-memory-accounting-correctly.patch
e3c68b
Patch0130: 0130-tier-test-new-tier-cmds.t-fails-after-a-glusterd-res.patch
e3c68b
Patch0131: 0131-tests-dht-Test-that-lookups-are-sent-post-brick-up.patch
e3c68b
Patch0132: 0132-glusterd-remove-duplicate-occurrence-of-features.sel.patch
e3c68b
Patch0133: 0133-glusterd-enable-fips-mode-rchecksum-for-new-volumes.patch
e3c68b
Patch0134: 0134-performance-write-behind-remove-request-from-wip-lis.patch
e3c68b
Patch0135: 0135-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
e3c68b
Patch0136: 0136-glusterd-fix-inconsistent-global-option-output-in-vo.patch
e3c68b
Patch0137: 0137-shd-glusterd-Serialize-shd-manager-to-prevent-race-c.patch
e3c68b
Patch0138: 0138-glusterd-Add-gluster-volume-stop-operation-to-gluste.patch
e3c68b
Patch0139: 0139-ec-shd-Cleanup-self-heal-daemon-resources-during-ec-.patch
e3c68b
Patch0140: 0140-cluster-ec-Reopen-shouldn-t-happen-with-O_TRUNC.patch
e3c68b
Patch0141: 0141-socket-ssl-fix-crl-handling.patch
e3c68b
Patch0142: 0142-lock-check-null-value-of-dict-to-avoid-log-flooding.patch
e3c68b
Patch0143: 0143-packaging-Change-the-dependency-on-nfs-ganesha-to-2..patch
e3c68b
Patch0144: 0144-cluster-ec-honor-contention-notifications-for-partia.patch
e3c68b
Patch0145: 0145-core-Capture-process-memory-usage-at-the-time-of-cal.patch
e3c68b
Patch0146: 0146-dht-Custom-xattrs-are-not-healed-in-case-of-add-bric.patch
e3c68b
Patch0147: 0147-glusterd-bulkvoldict-thread-is-not-handling-all-volu.patch
e3c68b
Patch0148: 0148-cluster-dht-Lookup-all-files-when-processing-directo.patch
e3c68b
Patch0149: 0149-glusterd-Optimize-code-to-copy-dictionary-in-handsha.patch
e3c68b
Patch0150: 0150-libglusterfs-define-macros-needed-for-cloudsync.patch
e3c68b
Patch0151: 0151-mgmt-glusterd-Make-changes-related-to-cloudsync-xlat.patch
e3c68b
Patch0152: 0152-storage-posix-changes-with-respect-to-cloudsync.patch
e3c68b
Patch0153: 0153-features-cloudsync-Added-some-new-functions.patch
e3c68b
Patch0154: 0154-cloudsync-cvlt-Cloudsync-plugin-for-commvault-store.patch
e3c68b
Patch0155: 0155-cloudsync-Make-readdirp-return-stat-info-of-all-the-.patch
e3c68b
Patch0156: 0156-cloudsync-Fix-bug-in-cloudsync-fops-c.py.patch
e3c68b
Patch0157: 0157-afr-frame-Destroy-frame-after-afr_selfheal_entry_gra.patch
e3c68b
Patch0158: 0158-glusterfsd-cleanup-Protect-graph-object-under-a-lock.patch
e3c68b
Patch0159: 0159-glusterd-add-an-op-version-check.patch
e3c68b
Patch0160: 0160-geo-rep-Geo-rep-help-text-issue.patch
e3c68b
Patch0161: 0161-geo-rep-Fix-rename-with-existing-destination-with-sa.patch
e3c68b
Patch0162: 0162-geo-rep-Fix-sync-method-config.patch
e3c68b
Patch0163: 0163-geo-rep-Fix-sync-hang-with-tarssh.patch
e3c68b
Patch0164: 0164-cluster-ec-Fix-handling-of-heal-info-cases-without-l.patch
e3c68b
Patch0165: 0165-tests-shd-Add-test-coverage-for-shd-mux.patch
e3c68b
Patch0166: 0166-glusterd-svc-glusterd_svcs_stop-should-call-individu.patch
e3c68b
Patch0167: 0167-glusterd-shd-Optimize-the-glustershd-manager-to-send.patch
e3c68b
Patch0168: 0168-cluster-dht-Fix-directory-perms-during-selfheal.patch
e3c68b
Patch0169: 0169-Build-Fix-spec-to-enable-rhel8-client-build.patch
e3c68b
Patch0170: 0170-geo-rep-Convert-gfid-conflict-resolutiong-logs-into-.patch
e3c68b
Patch0171: 0171-posix-add-storage.reserve-size-option.patch
e3c68b
Patch0172: 0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
e3c68b
Patch0173: 0173-glusterd-store-fips-mode-rchecksum-option-in-the-inf.patch
e3c68b
Patch0174: 0174-xlator-log-Add-more-logging-in-xlator_is_cleanup_sta.patch
e3c68b
Patch0175: 0175-ec-fini-Fix-race-between-xlator-cleanup-and-on-going.patch
e3c68b
Patch0176: 0176-features-shard-Fix-crash-during-background-shard-del.patch
e3c68b
Patch0177: 0177-features-shard-Fix-extra-unref-when-inode-object-is-.patch
e3c68b
Patch0178: 0178-Cluster-afr-Don-t-treat-all-bricks-having-metadata-p.patch
e3c68b
Patch0179: 0179-tests-Fix-split-brain-favorite-child-policy.t-failur.patch
e3c68b
Patch0180: 0180-ganesha-scripts-Make-generate-epoch.py-python3-compa.patch
e3c68b
Patch0181: 0181-afr-log-before-attempting-data-self-heal.patch
e3c68b
Patch0182: 0182-geo-rep-fix-mountbroker-setup.patch
e3c68b
Patch0183: 0183-glusterd-svc-Stop-stale-process-using-the-glusterd_p.patch
e3c68b
Patch0184: 0184-tests-Add-gating-configuration-file-for-rhel8.patch
e3c68b
Patch0185: 0185-gfapi-provide-an-api-for-setting-statedump-path.patch
e3c68b
Patch0186: 0186-cli-Remove-brick-warning-seems-unnecessary.patch
e3c68b
Patch0187: 0187-gfapi-statedump_path-add-proper-version-number.patch
e3c68b
Patch0188: 0188-features-shard-Fix-integer-overflow-in-block-count-a.patch
e3c68b
Patch0189: 0189-features-shard-Fix-block-count-accounting-upon-trunc.patch
e3c68b
Patch0190: 0190-Build-removing-the-hardcoded-usage-of-python3.patch
e3c68b
Patch0191: 0191-Build-Update-python-shebangs-based-on-version.patch
e3c68b
Patch0192: 0192-build-Ensure-gluster-cli-package-is-built-as-part-of.patch
e3c68b
Patch0193: 0193-spec-fixed-python-dependency-for-rhel6.patch
e3c68b
Patch0194: 0194-stack-Make-sure-to-have-unique-call-stacks-in-all-ca.patch
e3c68b
Patch0195: 0195-build-package-glusterfs-ganesha-for-rhel7-and-above.patch
e3c68b
Patch0196: 0196-posix-ctime-Fix-ctime-upgrade-issue.patch
e3c68b
Patch0197: 0197-posix-fix-crash-in-posix_cs_set_state.patch
e3c68b
Patch0198: 0198-cluster-ec-Prevent-double-pre-op-xattrops.patch
e3c68b
Patch0199: 0199-upcall-Avoid-sending-notifications-for-invalid-inode.patch
e3c68b
Patch0200: 0200-gfapi-fix-incorrect-initialization-of-upcall-syncop-.patch
e3c68b
Patch0201: 0201-geo-rep-Fix-permissions-for-GEOREP_DIR-in-non-root-s.patch
e3c68b
Patch0202: 0202-shd-mux-Fix-race-between-mux_proc-unlink-and-stop.patch
e3c68b
Patch0203: 0203-glusterd-shd-Change-shd-logfile-to-a-unique-name.patch
e3c68b
Patch0204: 0204-glusterd-conditionally-clear-txn_opinfo-in-stage-op.patch
e3c68b
Patch0205: 0205-glusterd-Can-t-run-rebalance-due-to-long-unix-socket.patch
e3c68b
Patch0206: 0206-glusterd-ignore-user.-options-from-compatibility-che.patch
e3c68b
Patch0207: 0207-glusterd-fix-use-after-free-of-a-dict_t.patch
e3c68b
Patch0208: 0208-mem-pool-remove-dead-code.patch
e3c68b
Patch0209: 0209-core-avoid-dynamic-TLS-allocation-when-possible.patch
e3c68b
Patch0210: 0210-mem-pool.-c-h-minor-changes.patch
e3c68b
Patch0211: 0211-libglusterfs-Fix-compilation-when-disable-mempool-is.patch
e3c68b
Patch0212: 0212-core-fix-memory-allocation-issues.patch
e3c68b
Patch0213: 0213-cluster-dht-Strip-out-dht-xattrs.patch
e3c68b
Patch0214: 0214-geo-rep-Upgrading-config-file-to-new-version.patch
e3c68b
Patch0215: 0215-posix-modify-storage.reserve-option-to-take-size-and.patch
e3c68b
Patch0216: 0216-Test-case-fixe-for-downstream-3.5.0.patch
e3c68b
Patch0217: 0217-uss-Fix-tar-issue-with-ctime-and-uss-enabled.patch
e3c68b
Patch0218: 0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch
e3c68b
Patch0219: 0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch
e3c68b
Patch0220: 0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch
e3c68b
Patch0221: 0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch
e3c68b
Patch0222: 0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch
e3c68b
Patch0223: 0223-change-get_real_filename-implementation-to-use-ENOAT.patch
e3c68b
Patch0224: 0224-core-replace-inet_addr-with-inet_pton.patch
e3c68b
Patch0225: 0225-tests-utils-Fix-py2-py3-util-python-scripts.patch
e3c68b
Patch0226: 0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch
e3c68b
Patch0227: 0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch
e3c68b
Patch0228: 0228-locks-enable-notify-contention-by-default.patch
e3c68b
Patch0229: 0229-glusterd-Show-the-correct-brick-status-in-get-state.patch
e3c68b
Patch0230: 0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch
e3c68b
Patch0231: 0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch
e3c68b
Patch0232: 0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch
e3c68b
Patch0233: 0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch
e3c68b
Patch0234: 0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch
e3c68b
Patch0235: 0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch
e3c68b
Patch0236: 0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch
e3c68b
Patch0237: 0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch
e3c68b
Patch0238: 0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch
e3c68b
Patch0239: 0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
e3c68b
Patch0240: 0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch
e3c68b
Patch0241: 0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch
e3c68b
Patch0242: 0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch
e3c68b
Patch0243: 0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch
e3c68b
Patch0244: 0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch
e3c68b
Patch0245: 0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch
e3c68b
Patch0246: 0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch
e3c68b
Patch0247: 0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch
e3c68b
Patch0248: 0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch
e3c68b
Patch0249: 0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch
e3c68b
Patch0250: 0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch
e3c68b
Patch0251: 0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch
e3c68b
Patch0252: 0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch
e3c68b
Patch0253: 0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch
e3c68b
Patch0254: 0254-Detach-iot_worker-to-release-its-resources.patch
e3c68b
Patch0255: 0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch
e3c68b
Patch0256: 0256-features-snapview-server-use-the-same-volfile-server.patch
e3c68b
Patch0257: 0257-geo-rep-Test-case-for-upgrading-config-file.patch
e3c68b
Patch0258: 0258-geo-rep-Fix-mount-broker-setup-issue.patch
e3c68b
Patch0259: 0259-gluster-block-tuning-perf-options.patch
e3c68b
Patch0260: 0260-ctime-Set-mdata-xattr-on-legacy-files.patch
e3c68b
Patch0261: 0261-features-utime-Fix-mem_put-crash.patch
e3c68b
Patch0262: 0262-glusterd-ctime-Disable-ctime-by-default.patch
e3c68b
Patch0263: 0263-tests-fix-ctime-related-tests.patch
e3c68b
Patch0264: 0264-gfapi-Fix-deadlock-while-processing-upcall.patch
e3c68b
Patch0265: 0265-fuse-add-missing-GF_FREE-to-fuse_interrupt.patch
e3c68b
Patch0266: 0266-geo-rep-Fix-mount-broker-setup-issue.patch
e3c68b
Patch0267: 0267-posix-ctime-Fix-race-during-lookup-ctime-xattr-heal.patch
e3c68b
Patch0268: 0268-rpc-transport-have-default-listen-port.patch
e3c68b
Patch0269: 0269-ec-fix-truncate-lock-to-cover-the-write-in-tuncate-c.patch
e3c68b
Patch0270: 0270-cluster-ec-inherit-healing-from-lock-when-it-has-inf.patch
e3c68b
Patch0271: 0271-cluster-ec-fix-EIO-error-for-concurrent-writes-on-sp.patch
e3c68b
Patch0272: 0272-cluster-ec-Always-read-from-good-mask.patch
e3c68b
Patch0273: 0273-cluster-ec-Fix-reopen-flags-to-avoid-misbehavior.patch
e3c68b
Patch0274: 0274-cluster-ec-Update-lock-good_mask-on-parent-fop-failu.patch
e3c68b
Patch0275: 0275-cluster-ec-Create-heal-task-with-heal-process-id.patch
e3c68b
Patch0276: 0276-features-utime-always-update-ctime-at-setattr.patch
e3c68b
Patch0277: 0277-geo-rep-Fix-Config-Get-Race.patch
e3c68b
Patch0278: 0278-geo-rep-Fix-worker-connection-issue.patch
e3c68b
Patch0279: 0279-posix-In-brick_mux-brick-is-crashed-while-start-stop.patch
e3c68b
Patch0280: 0280-performance-md-cache-Do-not-skip-caching-of-null-cha.patch
e3c68b
Patch0281: 0281-ctime-Fix-incorrect-realtime-passed-to-frame-root-ct.patch
e3c68b
Patch0282: 0282-geo-rep-Fix-the-name-of-changelog-archive-file.patch
e3c68b
Patch0283: 0283-ctime-Fix-ctime-issue-with-utime-family-of-syscalls.patch
e3c68b
Patch0284: 0284-posix-log-aio_error-return-codes-in-posix_fs_health_.patch
e3c68b
Patch0285: 0285-glusterd-glusterd-service-is-getting-timed-out-on-sc.patch
e3c68b
Patch0286: 0286-glusterfs.spec.in-added-script-files-for-machine-com.patch
e3c68b
Patch0287: 0287-cluster-ec-Fail-fsync-flush-for-files-on-update-size.patch
e3c68b
Patch0288: 0288-cluster-ec-Fix-coverity-issues.patch
e3c68b
Patch0289: 0289-cluster-ec-quorum-count-implementation.patch
e3c68b
Patch0290: 0290-glusterd-tag-disperse.quorum-count-for-31306.patch
e3c68b
Patch0291: 0291-cluster-ec-Mark-release-only-when-it-is-acquired.patch
e3c68b
Patch0292: 0292-rpc-Update-address-family-if-it-is-not-provide-in-cm.patch
e3c68b
Patch0293: 0293-glusterd-IPV6-hostname-address-is-not-parsed-correct.patch
e3c68b
Patch0294: 0294-eventsapi-Set-IPv4-IPv6-family-based-on-input-IP.patch
e3c68b
Patch0295: 0295-ctime-rebalance-Heal-ctime-xattr-on-directory-during.patch
e3c68b
Patch0296: 0296-glusterfind-pre-command-failure-on-a-modify.patch
e3c68b
Patch0297: 0297-rpmbuild-fixing-the-build-errors-with-2a905a8ae.patch
e3c68b
Patch0298: 0298-geo-rep-fix-sub-command-during-worker-connection.patch
e3c68b
Patch0299: 0299-geo-rep-performance-improvement-while-syncing-rename.patch
e3c68b
Patch0300: 0300-cli-remove-the-warning-displayed-when-remove-brick-s.patch
e3c68b
Patch0301: 0301-posix-Brick-is-going-down-unexpectedly.patch
e3c68b
Patch0302: 0302-cluster-ec-prevent-filling-shd-log-with-table-not-fo.patch
e3c68b
Patch0303: 0303-posix-heketidbstorage-bricks-go-down-during-PVC-crea.patch
e3c68b
Patch0304: 0304-cluster-dht-Correct-fd-processing-loop.patch
e3c68b
Patch0305: 0305-glusterd-rebalance-start-should-fail-when-quorum-is-.patch
e3c68b
Patch0306: 0306-cli-fix-distCount-value.patch
e3c68b
Patch0307: 0307-ssl-fix-RHEL8-regression-failure.patch
e3c68b
Patch0308: 0308-dht-Rebalance-causing-IO-Error-File-descriptor-in-ba.patch
e3c68b
Patch0309: 0309-geo-rep-Fix-config-upgrade-on-non-participating-node.patch
e3c68b
Patch0310: 0310-tests-test-case-for-non-root-geo-rep-setup.patch
e3c68b
Patch0311: 0311-geo-rep-Fix-Permission-denied-traceback-on-non-root-.patch
e3c68b
Patch0312: 0312-Scripts-quota_fsck-script-KeyError-contri_size.patch
e3c68b
Patch0313: 0313-extras-Cgroup-CPU-Mem-restriction-are-not-working-on.patch
e3c68b
Patch0314: 0314-glusterd-tier-is_tier_enabled-inserted-causing-check.patch
e3c68b
Patch0315: 0315-geo-rep-Fix-py2-py3-compatibility-in-repce.patch
e3c68b
Patch0316: 0316-spec-fixed-python-prettytable-dependency-for-rhel6.patch
e3c68b
Patch0317: 0317-Update-rfc.sh-to-rhgs-3.5.1.patch
e3c68b
Patch0318: 0318-Update-rfc.sh-to-rhgs-3.5.1.patch
e3c68b
Patch0319: 0319-features-snapview-server-obtain-the-list-of-snapshot.patch
e3c68b
Patch0320: 0320-gf-event-Handle-unix-volfile-servers.patch
e3c68b
Patch0321: 0321-Adding-white-spaces-to-description-of-set-group.patch
e3c68b
Patch0322: 0322-glusterd-display-correct-rebalance-data-size-after-g.patch
e3c68b
Patch0323: 0323-cli-display-detailed-rebalance-info.patch
e3c68b
Patch0324: 0324-extras-hooks-Add-SELinux-label-on-new-bricks-during-.patch
e3c68b
Patch0325: 0325-extras-hooks-Install-and-package-newly-added-post-ad.patch
e3c68b
Patch0326: 0326-tests-subdir-mount.t-is-failing-for-brick_mux-regrss.patch
e3c68b
Patch0327: 0327-glusterfind-integrate-with-gfid2path.patch
e3c68b
Patch0328: 0328-glusterd-Add-warning-and-abort-in-case-of-failures-i.patch
e3c68b
Patch0329: 0329-cluster-afr-Heal-entries-when-there-is-a-source-no-h.patch
e3c68b
Patch0330: 0330-mount.glusterfs-change-the-error-message.patch
e3c68b
Patch0331: 0331-features-locks-Do-special-handling-for-op-version-3..patch
e3c68b
Patch0332: 0332-Removing-one-top-command-from-gluster-v-help.patch
e3c68b
Patch0333: 0333-rpc-Synchronize-slot-allocation-code.patch
e3c68b
Patch0334: 0334-dht-log-getxattr-failure-for-node-uuid-at-DEBUG.patch
e3c68b
Patch0335: 0335-tests-RHEL8-test-failure-fixes-for-RHGS.patch
e3c68b
Patch0336: 0336-spec-check-and-return-exit-code-in-rpm-scripts.patch
e3c68b
Patch0337: 0337-fuse-Set-limit-on-invalidate-queue-size.patch
e3c68b
Patch0338: 0338-glusterfs-fuse-Reduce-the-default-lru-limit-value.patch
e3c68b
Patch0339: 0339-geo-rep-fix-integer-config-validation.patch
e3c68b
Patch0340: 0340-rpc-event_slot_alloc-converted-infinite-loop-after-r.patch
e3c68b
Patch0341: 0341-socket-fix-error-handling.patch
e3c68b
Patch0342: 0342-Revert-hooks-remove-selinux-hooks.patch
e3c68b
Patch0343: 0343-extras-hooks-syntactical-errors-in-SELinux-hooks-sci.patch
e3c68b
Patch0344: 0344-Revert-all-fixes-to-include-SELinux-hook-scripts.patch
e3c68b
Patch0345: 0345-read-ahead-io-cache-turn-off-by-default.patch
e3c68b
Patch0346: 0346-fuse-degrade-logging-of-write-failure-to-fuse-device.patch
e3c68b
Patch0347: 0347-tools-glusterfind-handle-offline-bricks.patch
e3c68b
Patch0348: 0348-glusterfind-Fix-py2-py3-issues.patch
e3c68b
Patch0349: 0349-glusterfind-python3-compatibility.patch
e3c68b
Patch0350: 0350-tools-glusterfind-Remove-an-extra-argument.patch
e3c68b
Patch0351: 0351-server-Mount-fails-after-reboot-1-3-gluster-nodes.patch
e3c68b
Patch0352: 0352-spec-fixed-missing-dependencies-for-glusterfs-clouds.patch
e3c68b
Patch0353: 0353-build-glusterfs-ganesha-pkg-requires-python3-policyc.patch
e3c68b
Patch0354: 0354-core-fix-memory-pool-management-races.patch
e3c68b
Patch0355: 0355-core-Prevent-crash-on-process-termination.patch
e3c68b
Patch0356: 0356-Update-rfc.sh-to-rhgs-3.5.1-rhel-8.patch
e3c68b
Patch0357: 0357-ganesha-ha-updates-for-pcs-0.10.x-i.e.-in-Fedora-29-.patch
e3c68b
Patch0358: 0358-inode-fix-wrong-loop-count-in-__inode_ctx_free.patch
e3c68b
Patch0359: 0359-dht-gf_defrag_process_dir-is-called-even-if-gf_defra.patch
e3c68b
Patch0360: 0360-rpc-Make-ssl-log-more-useful.patch
e3c68b
Patch0361: 0361-snap_scheduler-python3-compatibility-and-new-test-ca.patch
e3c68b
Patch0362: 0362-write-behind-fix-data-corruption.patch
e3c68b
Patch0363: 0363-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
e3c68b
Patch0364: 0364-dht-fixing-rebalance-failures-for-files-with-holes.patch
e3c68b
Patch0365: 0365-build-geo-rep-requires-relevant-selinux-permission-f.patch
e3c68b
Patch0366: 0366-snapshot-fix-python3-issue-in-gcron.patch
e3c68b
Patch0367: 0367-dht-Handle-setxattr-and-rm-race-for-directory-in-reb.patch
e3c68b
Patch0368: 0368-Update-rfc.sh-to-rhgs-3.5.2.patch
e3c68b
Patch0369: 0369-cluster-ec-Return-correct-error-code-and-log-message.patch
e3c68b
Patch0370: 0370-dht-Do-opendir-selectively-in-gf_defrag_process_dir.patch
e3c68b
Patch0371: 0371-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
e3c68b
Patch0372: 0372-posix-fix-seek-functionality.patch
e3c68b
Patch0373: 0373-build-geo-rep-sub-pkg-requires-policycoreutils-pytho.patch
e3c68b
Patch0374: 0374-open-behind-fix-missing-fd-reference.patch
e3c68b
Patch0375: 0375-features-shard-Send-correct-size-when-reads-are-sent.patch
e3c68b
Patch0376: 0376-features-shard-Fix-crash-during-shards-cleanup-in-er.patch
e3c68b
Patch0377: 0377-syncop-improve-scaling-and-implement-more-tools.patch
e3c68b
Patch0378: 0378-Revert-open-behind-fix-missing-fd-reference.patch
e3c68b
Patch0379: 0379-glusterd-add-missing-synccond_broadcast.patch
e3c68b
Patch0380: 0380-features-shard-Aggregate-size-block-count-in-iatt-be.patch
e3c68b
Patch0381: 0381-dht-add-null-check-in-gf_defrag_free_dir_dfmeta.patch
e3c68b
Patch0382: 0382-features-shard-Aggregate-file-size-block-count-befor.patch
e3c68b
Patch0383: 0383-common-ha-ganesha-ha.sh-bad-test-for-rhel-centos-for.patch
d84cf8
Patch0384: 0384-Update-rfc.sh-to-rhgs-3.5.3.patch
d84cf8
Patch0385: 0385-glusterd-start-glusterd-automatically-on-abnormal-sh.patch
d84cf8
Patch0386: 0386-glusterd-increase-the-StartLimitBurst.patch
d84cf8
Patch0387: 0387-To-fix-readdir-ahead-memory-leak.patch
87c3ef
Patch0388: 0388-rpc-Cleanup-SSL-specific-data-at-the-time-of-freeing.patch
d84cf8
Patch0389: 0389-posix-Avoid-diskpace-error-in-case-of-overwriting-th.patch
d84cf8
Patch0390: 0390-glusterd-deafult-options-after-volume-reset.patch
d84cf8
Patch0391: 0391-glusterd-unlink-the-file-after-killing-the-process.patch
d84cf8
Patch0392: 0392-glusterd-Brick-process-fails-to-come-up-with-brickmu.patch
d84cf8
Patch0393: 0393-afr-restore-timestamp-of-files-during-metadata-heal.patch
d84cf8
Patch0394: 0394-man-gluster-Add-volume-top-command-to-gluster-man-pa.patch
d84cf8
Patch0395: 0395-Cli-Removing-old-log-rotate-command.patch
d84cf8
Patch0396: 0396-Updating-gluster-manual.patch
d84cf8
Patch0397: 0397-mgmt-brick-mux-Avoid-sending-two-response-when-attac.patch
d84cf8
Patch0398: 0398-ec-change-error-message-for-heal-commands-for-disper.patch
d84cf8
Patch0399: 0399-glusterd-coverity-fixes.patch
d84cf8
Patch0400: 0400-cli-throw-a-warning-if-replica-count-greater-than-3.patch
d84cf8
Patch0401: 0401-cli-change-the-warning-message.patch
d84cf8
Patch0402: 0402-afr-wake-up-index-healer-threads.patch
d84cf8
Patch0403: 0403-Fix-spurious-failure-in-bug-1744548-heal-timeout.t.patch
d84cf8
Patch0404: 0404-tests-Fix-spurious-failure.patch
d84cf8
Patch0405: 0405-core-fix-return-of-local-in-__nlc_inode_ctx_get.patch
d84cf8
Patch0406: 0406-afr-support-split-brain-CLI-for-replica-3.patch
d84cf8
Patch0407: 0407-geo-rep-Improving-help-message-in-schedule_georep.py.patch
d84cf8
Patch0408: 0408-geo-rep-Fix-ssh-port-validation.patch
d84cf8
Patch0409: 0409-system-posix-acl-update-ctx-only-if-iatt-is-non-NULL.patch
d84cf8
Patch0410: 0410-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
d84cf8
Patch0411: 0411-tools-glusterfind-validate-session-name.patch
d84cf8
Patch0412: 0412-gluster-smb-add-smb-parameter-when-access-gluster-by.patch
d84cf8
Patch0413: 0413-extras-hooks-Remove-smb.conf-parameter-allowing-gues.patch
d84cf8
Patch0414: 0414-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
d84cf8
Patch0415: 0415-dht-Fix-stale-layout-and-create-issue.patch
d84cf8
Patch0416: 0416-tests-fix-spurious-failure-of-bug-1402841.t-mt-dir-s.patch
d84cf8
Patch0417: 0417-events-fix-IPv6-memory-corruption.patch
d84cf8
Patch0418: 0418-md-cache-avoid-clearing-cache-when-not-necessary.patch
d84cf8
Patch0419: 0419-cluster-afr-fix-race-when-bricks-come-up.patch
d84cf8
Patch0420: 0420-scripts-quota_fsck-script-TypeError-d-format-not-dic.patch
d84cf8
Patch0421: 0421-Improve-logging-in-EC-client-and-lock-translator.patch
d84cf8
Patch0422: 0422-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
d84cf8
Patch0423: 0423-ctime-Fix-ctime-inconsisteny-with-utimensat.patch
d84cf8
Patch0424: 0424-afr-make-heal-info-lockless.patch
d84cf8
Patch0425: 0425-tests-Fix-spurious-self-heald.t-failure.patch
d84cf8
Patch0426: 0426-geo-rep-Fix-for-Transport-End-Point-not-connected-is.patch
d84cf8
Patch0427: 0427-storage-posix-Fixing-a-coverity-issue.patch
d84cf8
Patch0428: 0428-glusterd-ganesha-fixing-resource-leak-in-tear_down_c.patch
d84cf8
Patch0429: 0429-dht-rebalance-fixing-failure-occurace-due-to-rebalan.patch
d84cf8
Patch0430: 0430-Fix-some-Null-pointer-dereference-coverity-issues.patch
d84cf8
Patch0431: 0431-glusterd-check-for-same-node-while-adding-bricks-in-.patch
d84cf8
Patch0432: 0432-glusterd-Fix-coverity-defects-put-coverity-annotatio.patch
d84cf8
Patch0433: 0433-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
d84cf8
Patch0434: 0434-glusterd-ganesha-fix-Coverity-CID-1405785.patch
d84cf8
Patch0435: 0435-glusterd-coverity-fix.patch
d84cf8
Patch0436: 0436-glusterd-coverity-fixes.patch
d84cf8
Patch0437: 0437-glusterd-prevent-use-after-free-in-glusterd_op_ac_se.patch
d84cf8
Patch0438: 0438-dht-sparse-files-rebalance-enhancements.patch
d84cf8
Patch0439: 0439-cluster-afr-Delay-post-op-for-fsync.patch
d84cf8
Patch0440: 0440-glusterd-snapshot-Improve-log-message-during-snapsho.patch
d84cf8
Patch0441: 0441-fuse-occasional-logging-for-fuse-device-weird-write-.patch
d84cf8
Patch0442: 0442-fuse-correctly-handle-setxattr-values.patch
d84cf8
Patch0443: 0443-fuse-fix-high-sev-coverity-issue.patch
d84cf8
Patch0444: 0444-mount-fuse-Fixing-a-coverity-issue.patch
d84cf8
Patch0445: 0445-feature-changelog-Avoid-thread-creation-if-xlator-is.patch
d84cf8
Patch0446: 0446-bitrot-Make-number-of-signer-threads-configurable.patch
d84cf8
Patch0447: 0447-core-brick_mux-brick-crashed-when-creating-and-delet.patch
d84cf8
Patch0448: 0448-Posix-Use-simple-approach-to-close-fd.patch
d84cf8
Patch0449: 0449-test-Test-case-brick-mux-validation-in-cluster.t-is-.patch
d84cf8
Patch0450: 0450-tests-basic-ctime-enable-ctime-before-testing.patch
d84cf8
Patch0451: 0451-extras-Modify-group-virt-to-include-network-related-.patch
d84cf8
Patch0452: 0452-Tier-DHT-Handle-the-pause-case-missed-out.patch
d84cf8
Patch0453: 0453-glusterd-add-brick-command-failure.patch
d84cf8
Patch0454: 0454-features-locks-avoid-use-after-freed-of-frame-for-bl.patch
d84cf8
Patch0455: 0455-locks-prevent-deletion-of-locked-entries.patch
d84cf8
Patch0456: 0456-add-clean-local-after-grant-lock.patch
d84cf8
Patch0457: 0457-cluster-ec-Improve-detection-of-new-heals.patch
d84cf8
Patch0458: 0458-features-bit-rot-stub-clean-the-mutex-after-cancelli.patch
d84cf8
Patch0459: 0459-features-bit-rot-Unconditionally-sign-the-files-duri.patch
d84cf8
Patch0460: 0460-cluster-ec-Remove-stale-entries-from-indices-xattrop.patch
d84cf8
Patch0461: 0461-geo-replication-Fix-IPv6-parsing.patch
d84cf8
Patch0462: 0462-Issue-with-gf_fill_iatt_for_dirent.patch
d84cf8
Patch0463: 0463-cluster-ec-Change-handling-of-heal-failure-to-avoid-.patch
d84cf8
Patch0464: 0464-storage-posix-Remove-nr_files-usage.patch
d84cf8
Patch0465: 0465-posix-Implement-a-janitor-thread-to-close-fd.patch
d84cf8
Patch0466: 0466-cluster-ec-Change-stale-index-handling.patch
d84cf8
Patch0467: 0467-build-Added-dependency-for-glusterfs-selinux.patch
d84cf8
Patch0468: 0468-build-Update-the-glusterfs-selinux-version.patch
d84cf8
Patch0469: 0469-cluster-ec-Don-t-trigger-heal-for-stale-index.patch
d84cf8
Patch0470: 0470-extras-snap_scheduler-changes-in-gluster-shared-stor.patch
d84cf8
Patch0471: 0471-nfs-ganesha-gluster_shared_storage-fails-to-automoun.patch
d84cf8
Patch0472: 0472-geo-rep-gluster_shared_storage-fails-to-automount-on.patch
d84cf8
Patch0473: 0473-glusterd-Fix-Add-brick-with-increasing-replica-count.patch
d84cf8
Patch0474: 0474-features-locks-posixlk-clear-lock-should-set-error-a.patch
d84cf8
Patch0475: 0475-fuse-lock-interrupt-fix-flock_interrupt.t.patch
d84cf8
Patch0476: 0476-mount-fuse-use-cookies-to-get-fuse-interrupt-record-.patch
d84cf8
Patch0477: 0477-glusterd-snapshot-Snapshot-prevalidation-failure-not.patch
d84cf8
Patch0478: 0478-DHT-Fixing-rebalance-failure-on-issuing-stop-command.patch
d84cf8
Patch0479: 0479-ganesha-ha-revised-regex-exprs-for-status.patch
d84cf8
Patch0480: 0480-DHT-Rebalance-Ensure-Rebalance-reports-status-only-o.patch
b51a1f
Patch0481: 0481-Update-rfc.sh-to-rhgs-3.5.4.patch
b51a1f
Patch0482: 0482-logger-Always-print-errors-in-english.patch
b51a1f
Patch0483: 0483-afr-more-quorum-checks-in-lookup-and-new-entry-marki.patch
b51a1f
Patch0484: 0484-glusterd-rebalance-status-displays-stats-as-0-after-.patch
b51a1f
Patch0485: 0485-cli-rpc-conditional-init-of-global-quota-rpc-1578.patch
b51a1f
Patch0486: 0486-glusterd-brick-sock-file-deleted-log-error-1560.patch
b51a1f
Patch0487: 0487-Events-Log-file-not-re-opened-after-logrotate.patch
b51a1f
Patch0488: 0488-glusterd-afr-enable-granular-entry-heal-by-default.patch
b51a1f
Patch0489: 0489-glusterd-fix-bug-in-enabling-granular-entry-heal.patch
b51a1f
Patch0490: 0490-Segmentation-fault-occurs-during-truncate.patch
b51a1f
Patch0491: 0491-glusterd-mount-directory-getting-truncated-on-mounti.patch
b51a1f
Patch0492: 0492-afr-lookup-Pass-xattr_req-in-while-doing-a-selfheal-.patch
b51a1f
Patch0493: 0493-geo-rep-Note-section-is-required-for-ignore_deletes.patch
b51a1f
Patch0494: 0494-glusterd-start-the-brick-on-a-different-port.patch
b51a1f
Patch0495: 0495-geo-rep-descriptive-message-when-worker-crashes-due-.patch
b51a1f
Patch0496: 0496-posix-Use-MALLOC-instead-of-alloca-to-allocate-memor.patch
b51a1f
Patch0497: 0497-socket-Use-AES128-cipher-in-SSL-if-AES-is-supported-.patch
b51a1f
Patch0498: 0498-geo-rep-Fix-corner-case-in-rename-on-mkdir-during-hy.patch
b51a1f
Patch0499: 0499-gfapi-give-appropriate-error-when-size-exceeds.patch
b51a1f
Patch0500: 0500-features-shard-Convert-shard-block-indices-to-uint64.patch
b51a1f
Patch0501: 0501-Cli-Removing-old-syntax-of-tier-cmds-from-help-menu.patch
b51a1f
Patch0502: 0502-dht-fixing-a-permission-update-issue.patch
b51a1f
Patch0503: 0503-gfapi-Suspend-synctasks-instead-of-blocking-them.patch
b51a1f
Patch0504: 0504-io-stats-Configure-ios_sample_buf_size-based-on-samp.patch
b51a1f
Patch0505: 0505-trash-Create-inode_table-only-while-feature-is-enabl.patch
b51a1f
Patch0506: 0506-posix-Attach-a-posix_spawn_disk_thread-with-glusterf.patch
b51a1f
Patch0507: 0507-inode-make-critical-section-smaller.patch
b51a1f
Patch0508: 0508-fuse-fetch-arbitrary-number-of-groups-from-proc-pid-.patch
b51a1f
Patch0509: 0509-core-configure-optimum-inode-table-hash_size-for-shd.patch
b51a1f
Patch0510: 0510-glusterd-brick_mux-Optimize-friend-handshake-code-to.patch
b51a1f
Patch0511: 0511-features-shard-Missing-format-specifier.patch
b51a1f
Patch0512: 0512-glusterd-shared-storage-mount-fails-in-ipv6-environm.patch
b51a1f
Patch0513: 0513-afr-mark-pending-xattrs-as-a-part-of-metadata-heal.patch
b51a1f
Patch0514: 0514-afr-event-gen-changes.patch
b51a1f
Patch0515: 0515-cluster-afr-Heal-directory-rename-without-rmdir-mkdi.patch
b51a1f
Patch0516: 0516-afr-return-EIO-for-gfid-split-brains.patch
b51a1f
Patch0517: 0517-gfapi-glfs_h_creat_open-new-API-to-create-handle-and.patch
b51a1f
Patch0518: 0518-glusterd-Fix-for-shared-storage-in-ipv6-env.patch
b51a1f
Patch0519: 0519-glusterfs-events-Fix-incorrect-attribute-access-2002.patch
b51a1f
Patch0520: 0520-performance-open-behind-seek-fop-should-open_and_res.patch
b51a1f
Patch0521: 0521-open-behind-fix-missing-fd-reference.patch
b51a1f
Patch0522: 0522-lcov-improve-line-coverage.patch
b51a1f
Patch0523: 0523-open-behind-rewrite-of-internal-logic.patch
b51a1f
Patch0524: 0524-open-behind-fix-call_frame-leak.patch
b51a1f
Patch0525: 0525-open-behind-implement-create-fop.patch
b51a1f
Patch0526: 0526-Quota-quota_fsck.py-converting-byte-string-to-string.patch
b51a1f
Patch0527: 0527-Events-Socket-creation-after-getaddrinfo-and-IPv4-an.patch
b51a1f
Patch0528: 0528-Extras-Removing-xattr_analysis-script.patch
b51a1f
Patch0529: 0529-geo-rep-prompt-should-work-for-ignore_deletes.patch
b51a1f
Patch0530: 0530-gfapi-avoid-crash-while-logging-message.patch
b51a1f
Patch0531: 0531-Glustereventsd-Default-port-change-2091.patch
b51a1f
Patch0532: 0532-glusterd-fix-for-starting-brick-on-new-port.patch
b51a1f
Patch0533: 0533-glusterd-Rebalance-cli-is-not-showing-correct-status.patch
b51a1f
Patch0534: 0534-glusterd-Resolve-use-after-free-bug-2181.patch
b51a1f
Patch0535: 0535-multiple-files-use-dict_allocate_and_serialize-where.patch
b51a1f
Patch0536: 0536-dht-Ongoing-IO-is-failed-during-volume-shrink-operat.patch
b51a1f
Patch0537: 0537-cluster-afr-Fix-race-in-lockinfo-f-getxattr.patch
b51a1f
Patch0538: 0538-afr-fix-coverity-issue-introduced-by-90cefde.patch
b51a1f
Patch0539: 0539-extras-disable-lookup-optimize-in-virt-and-block-gro.patch
b51a1f
Patch0540: 0540-extras-Disable-write-behind-for-group-samba.patch
b51a1f
Patch0541: 0541-glusterd-volgen-Add-functionality-to-accept-any-cust.patch
b51a1f
Patch0542: 0542-xlaotrs-mgmt-Fixing-coverity-issue-1445996.patch
b51a1f
Patch0543: 0543-glusterd-handle-custom-xlator-failure-cases.patch
b51a1f
Patch0544: 0544-RHGS-3.5.4-rebuild-to-ship-with-RHEL-8.5.patch
e3c68b
e3c68b
%description
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package includes the glusterfs binary, the glusterfsd daemon and the
e3c68b
libglusterfs and glusterfs translator modules common to both GlusterFS server
e3c68b
and client framework.
e3c68b
e3c68b
%package api
e3c68b
Summary:          GlusterFS api library
e3c68b
Requires:         %{name}%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e3c68b
e3c68b
%description api
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides the glusterfs libgfapi library.
e3c68b
e3c68b
%package api-devel
e3c68b
Summary:          Development Libraries
e3c68b
Requires:         %{name}%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-devel%{?_isa} = %{version}-%{release}
e3c68b
Requires:         libacl-devel
e3c68b
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
e3c68b
e3c68b
%description api-devel
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides the api include files.
e3c68b
e3c68b
%package cli
e3c68b
Summary:          GlusterFS CLI
e3c68b
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e3c68b
e3c68b
%description cli
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides the GlusterFS CLI application and its man page
e3c68b
e3c68b
%package cloudsync-plugins
e3c68b
Summary:          Cloudsync Plugins
e3c68b
BuildRequires:    libcurl-devel
e3c68b
Requires:         glusterfs-libs = %{version}-%{release}
e3c68b
e3c68b
%description cloudsync-plugins
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides cloudsync plugins for archival feature.
e3c68b
e3c68b
%package devel
e3c68b
Summary:          Development Libraries
e3c68b
Requires:         %{name}%{?_isa} = %{version}-%{release}
e3c68b
# Needed for the Glupy examples to work
e3c68b
%if ( 0%{!?_without_extra_xlators:1} )
e3c68b
Requires:         %{name}-extra-xlators%{?_isa} = %{version}-%{release}
e3c68b
%endif
e3c68b
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
e3c68b
%endif
e3c68b
e3c68b
%description devel
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides the development libraries and include files.
e3c68b
e3c68b
%if ( 0%{!?_without_extra_xlators:1} )
e3c68b
%package extra-xlators
e3c68b
Summary:          Extra Gluster filesystem Translators
e3c68b
# We need python-gluster rpm for gluster module's __init__.py in Python
e3c68b
# site-packages area
e3c68b
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
e3c68b
Requires:         python%{_pythonver}
e3c68b
e3c68b
%description extra-xlators
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides extra filesystem Translators, such as Glupy,
e3c68b
for GlusterFS.
e3c68b
%endif
e3c68b
e3c68b
%package fuse
e3c68b
Summary:          Fuse client
e3c68b
BuildRequires:    fuse-devel
e3c68b
Requires:         attr
e3c68b
Requires:         psmisc
e3c68b
e3c68b
Requires:         %{name}%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
e3c68b
e3c68b
Obsoletes:        %{name}-client < %{version}-%{release}
e3c68b
Provides:         %{name}-client = %{version}-%{release}
e3c68b
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e3c68b
e3c68b
%description fuse
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides support to FUSE based clients and inlcudes the
e3c68b
glusterfs(d) binary.
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
e3c68b
%package ganesha
e3c68b
Summary:          NFS-Ganesha configuration
e3c68b
Group:            Applications/File
e3c68b
e3c68b
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
e3c68b
Requires:         nfs-ganesha-selinux >= 2.7.3
e3c68b
Requires:         nfs-ganesha-gluster >= 2.7.3
e3c68b
Requires:         pcs, dbus
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
e3c68b
Requires:         cman, pacemaker, corosync
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 )
e3c68b
# we need portblock resource-agent in 3.9.5 and later.
e3c68b
Requires:         resource-agents >= 3.9.5
e3c68b
Requires:         net-tools
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
e3c68b
Requires: selinux-policy >= 3.13.1-160
e3c68b
Requires(post):   policycoreutils-python
e3c68b
Requires(postun): policycoreutils-python
e3c68b
%else
e3c68b
Requires(post):   policycoreutils-python-utils
e3c68b
Requires(postun): policycoreutils-python-utils
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
%description ganesha
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides the configuration and related files for using
e3c68b
NFS-Ganesha as the NFS server using GlusterFS
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_georeplication:1} )
e3c68b
%package geo-replication
e3c68b
Summary:          GlusterFS Geo-replication
e3c68b
Requires:         %{name}%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
e3c68b
Requires:         python%{_pythonver}
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
e3c68b
Requires:         python-prettytable
e3c68b
%else
e3c68b
Requires:         python%{_pythonver}-prettytable
e3c68b
%endif
e3c68b
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
e3c68b
e3c68b
Requires:         rsync
e3c68b
Requires:         util-linux
e3c68b
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e3c68b
# required for setting selinux bools
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
e3c68b
Requires(post):      policycoreutils-python-utils
e3c68b
Requires(postun):    policycoreutils-python-utils
e3c68b
Requires:            selinux-policy-targeted
e3c68b
Requires(post):      selinux-policy-targeted
e3c68b
BuildRequires:       selinux-policy-devel
e3c68b
%endif
e3c68b
e3c68b
%description geo-replication
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file system in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in userspace and easily manageable.
e3c68b
e3c68b
This package provides support to geo-replication.
e3c68b
%endif
e3c68b
e3c68b
%package libs
e3c68b
Summary:          GlusterFS common libraries
e3c68b
e3c68b
%description libs
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides the base GlusterFS libraries
e3c68b
e3c68b
%package -n python%{_pythonver}-gluster
e3c68b
Summary:          GlusterFS python library
e3c68b
Requires:         python%{_pythonver}
e3c68b
%if ( ! %{_usepython3} )
e3c68b
%{?python_provide:%python_provide python-gluster}
e3c68b
Provides:         python-gluster = %{version}-%{release}
e3c68b
Obsoletes:        python-gluster < 3.10
e3c68b
%endif
e3c68b
e3c68b
%description -n python%{_pythonver}-gluster
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package contains the python modules of GlusterFS and own gluster
e3c68b
namespace.
e3c68b
e3c68b
%if ( 0%{!?_without_rdma:1} )
e3c68b
%package rdma
e3c68b
Summary:          GlusterFS rdma support for ib-verbs
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 26 )
e3c68b
BuildRequires:    rdma-core-devel
e3c68b
%else
e3c68b
BuildRequires:    libibverbs-devel
e3c68b
BuildRequires:    librdmacm-devel >= 1.0.15
e3c68b
%endif
e3c68b
Requires:         %{name}%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e3c68b
e3c68b
%description rdma
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides support to ib-verbs library.
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_regression_tests:1} )
e3c68b
%package regression-tests
e3c68b
Summary:          Development Tools
e3c68b
Requires:         %{name}%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
e3c68b
## thin provisioning support
e3c68b
Requires:         lvm2 >= 2.02.89
e3c68b
Requires:         perl(App::Prove) perl(Test::Harness) gcc util-linux-ng
e3c68b
Requires:         python%{_pythonver}
e3c68b
Requires:         attr dbench file git libacl-devel net-tools
e3c68b
Requires:         nfs-utils xfsprogs yajl psmisc bc
e3c68b
e3c68b
%description regression-tests
e3c68b
The Gluster Test Framework, is a suite of scripts used for
e3c68b
regression testing of Gluster.
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_ocf:1} )
e3c68b
%package resource-agents
e3c68b
Summary:          OCF Resource Agents for GlusterFS
e3c68b
License:          GPLv3+
e3c68b
BuildArch:        noarch
e3c68b
# this Group handling comes from the Fedora resource-agents package
e3c68b
# for glusterd
e3c68b
Requires:         %{name}-server = %{version}-%{release}
e3c68b
# depending on the distribution, we need pacemaker or resource-agents
e3c68b
Requires:         %{_prefix}/lib/ocf/resource.d
e3c68b
e3c68b
%description resource-agents
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides the resource agents which plug glusterd into
e3c68b
Open Cluster Framework (OCF) compliant cluster resource managers,
e3c68b
like Pacemaker.
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%package server
e3c68b
Summary:          Clustered file-system server
e3c68b
Requires:         %{name}%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-cli%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
d84cf8
%if ( 0%{?fedora} && 0%{?fedora} >= 30  || ( 0%{?rhel} && 0%{?rhel} >= 8 ) )
d84cf8
Requires:         glusterfs-selinux >= 1.0-1
d84cf8
%endif
e3c68b
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
e3c68b
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
e3c68b
# self-heal daemon, rebalance, nfs-server etc. are actually clients
e3c68b
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
e3c68b
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
e3c68b
# lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server
e3c68b
Requires:         lvm2
e3c68b
Requires:         nfs-utils
e3c68b
%if ( 0%{?_with_systemd:1} )
e3c68b
%{?systemd_requires}
e3c68b
%else
e3c68b
Requires(post):   /sbin/chkconfig
e3c68b
Requires(preun):  /sbin/service
e3c68b
Requires(preun):  /sbin/chkconfig
e3c68b
Requires(postun): /sbin/service
e3c68b
%endif
e3c68b
%if (0%{?_with_firewalld:1})
e3c68b
# we install firewalld rules, so we need to have the directory owned
e3c68b
%if ( 0%{!?rhel} )
e3c68b
# not on RHEL because firewalld-filesystem appeared in 7.3
e3c68b
# when EL7 rpm gets weak dependencies we can add a Suggests:
e3c68b
Requires:         firewalld-filesystem
e3c68b
%endif
e3c68b
%endif
e3c68b
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
e3c68b
Requires:         rpcbind
e3c68b
%else
e3c68b
Requires:         portmap
e3c68b
%endif
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
e3c68b
Requires:         python-argparse
e3c68b
%endif
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
e3c68b
Requires:         python%{_pythonver}-pyxattr
e3c68b
%else
e3c68b
Requires:         pyxattr
e3c68b
%endif
e3c68b
%if (0%{?_with_valgrind:1})
e3c68b
Requires:         valgrind
e3c68b
%endif
e3c68b
e3c68b
%description server
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides the glusterfs server daemon.
e3c68b
%endif
e3c68b
e3c68b
%package client-xlators
e3c68b
Summary:          GlusterFS client-side translators
e3c68b
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e3c68b
e3c68b
%description client-xlators
e3c68b
GlusterFS is a distributed file-system capable of scaling to several
e3c68b
petabytes. It aggregates various storage bricks over Infiniband RDMA
e3c68b
or TCP/IP interconnect into one large parallel network file
e3c68b
system. GlusterFS is one of the most sophisticated file systems in
e3c68b
terms of features and extensibility.  It borrows a powerful concept
e3c68b
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e3c68b
is in user space and easily manageable.
e3c68b
e3c68b
This package provides the translators needed on any GlusterFS client.
e3c68b
e3c68b
%if ( 0%{!?_without_events:1} )
e3c68b
%package events
e3c68b
Summary:          GlusterFS Events
e3c68b
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
e3c68b
Requires:         python%{_pythonver}
e3c68b
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
e3c68b
Requires:         python-requests
e3c68b
%else
e3c68b
Requires:         python%{_pythonver}-requests
e3c68b
%endif
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
e3c68b
Requires:         python-prettytable
e3c68b
Requires:         python-argparse
e3c68b
%else
e3c68b
Requires:         python%{_pythonver}-prettytable
e3c68b
%endif
e3c68b
%if ( 0%{?_with_systemd:1} )
e3c68b
%{?systemd_requires}
e3c68b
%endif
e3c68b
e3c68b
%description events
e3c68b
GlusterFS Events
e3c68b
e3c68b
%endif
e3c68b
e3c68b
%prep
e3c68b
%setup -q -n %{name}-%{version}%{?prereltag}
e3c68b
e3c68b
# sanitization scriptlet for patches with file renames
e3c68b
ls %{_topdir}/SOURCES/*.patch | sort | \
e3c68b
while read p
e3c68b
do
e3c68b
    # if the destination file exists, its most probably stale
e3c68b
    # so we must remove it
e3c68b
    rename_to=( $(grep -i 'rename to' $p | cut -f 3 -d ' ') )
e3c68b
    if [ ${#rename_to[*]} -gt 0 ]; then
e3c68b
        for f in ${rename_to[*]}
e3c68b
        do
e3c68b
            if [ -f $f ]; then
e3c68b
                rm -f $f
e3c68b
            elif [ -d $f ]; then
e3c68b
                rm -rf $f
e3c68b
            fi
e3c68b
        done
e3c68b
    fi
e3c68b
e3c68b
    SOURCE_FILES=( $(egrep '^\-\-\- a/' $p | cut -f 2- -d '/') )
e3c68b
    DEST_FILES=( $(egrep '^\+\+\+ b/' $p | cut -f 2- -d '/') )
e3c68b
    EXCLUDE_DOCS=()
e3c68b
    for idx in ${!SOURCE_FILES[@]}; do
87c3ef
        # skip the doc
e3c68b
        source_file=${SOURCE_FILES[$idx]}
e3c68b
        dest_file=${DEST_FILES[$idx]}
e3c68b
        if [[ "$dest_file" =~ ^doc/.+ ]]; then
e3c68b
            if [ "$source_file" != "dev/null" ] && [ ! -f "$dest_file" ]; then
e3c68b
                # if patch is being applied to a doc file and if the doc file
e3c68b
                # hasn't been added so far then we need to exclude it
e3c68b
                EXCLUDE_DOCS=( ${EXCLUDE_DOCS[*]} "$dest_file" )
e3c68b
            fi
e3c68b
        fi
e3c68b
    done
e3c68b
    EXCLUDE_DOCS_OPT=""
e3c68b
    for doc in ${EXCLUDE_DOCS}; do
e3c68b
        EXCLUDE_DOCS_OPT="--exclude=$doc $EXCLUDE_DOCS_OPT"
e3c68b
    done
e3c68b
e3c68b
    # HACK to fix build
e3c68b
    bn=$(basename $p)
e3c68b
    if [ "$bn" == "0085-Revert-all-remove-code-which-is-not-being-considered.patch" ]; then
e3c68b
        (patch -p1 -u -F3 < $p || :)
e3c68b
        if [ -f libglusterfs/Makefile.am.rej ]; then
e3c68b
            sed -i -e 's/^SUBDIRS = src/SUBDIRS = src src\/gfdb/g;s/^CLEANFILES = /CLEANFILES =/g' libglusterfs/Makefile.am
e3c68b
        fi
e3c68b
    elif [ "$bn" == "0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch" ]; then
e3c68b
        (patch -p1 < $p || :)
e3c68b
    elif [ "$bn" == "0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch" ]; then
e3c68b
        (patch -p1 < $p || :)
e3c68b
    elif [ "$bn" == "0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch" ]; then
e3c68b
        (patch -p1 < $p || :)
e3c68b
    elif [ "$bn" == "0117-spec-Remove-thin-arbiter-package.patch" ]; then
e3c68b
        (patch -p1 < $p || :)
e3c68b
    elif [ "$bn" == "0023-hooks-remove-selinux-hooks.patch" ]; then
e3c68b
        (patch -p1 < $p || :)
e3c68b
    elif [ "$bn" == "0042-spec-client-server-Builds-are-failing-on-rhel-6.patch" ]; then
e3c68b
        (patch -p1 < $p || :)
e3c68b
    else
e3c68b
        # apply the patch with 'git apply'
e3c68b
        git apply -p1 --exclude=rfc.sh \
e3c68b
                      --exclude=.gitignore \
e3c68b
                      --exclude=.testignore \
e3c68b
                      --exclude=MAINTAINERS \
e3c68b
                      --exclude=extras/checkpatch.pl \
e3c68b
                      --exclude=build-aux/checkpatch.pl \
e3c68b
                      --exclude='tests/*' \
e3c68b
                      ${EXCLUDE_DOCS_OPT} \
e3c68b
                      $p
e3c68b
    fi
e3c68b
e3c68b
done
e3c68b
e3c68b
echo "fixing python shebangs..."
e3c68b
%if ( %{_usepython3} )
e3c68b
    for i in `find . -type f -exec bash -c "if file {} | grep 'Python script, ASCII text executable' >/dev/null; then echo {}; fi" ';'`; do
e3c68b
        sed -i -e 's|^#!/usr/bin/python.*|#!%{__python3}|' -e 's|^#!/usr/bin/env python.*|#!%{__python3}|' $i
e3c68b
    done
e3c68b
%else
e3c68b
    for f in api events extras geo-replication libglusterfs tools xlators; do
e3c68b
        find $f -type f -exec sed -i 's|/usr/bin/python3|/usr/bin/python2|' {} \;
e3c68b
    done
e3c68b
%endif
e3c68b
e3c68b
%build
e3c68b
e3c68b
# In RHEL7 few hardening flags are available by default, however the RELRO
e3c68b
# default behaviour is partial, convert to full
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} >= 7 )
e3c68b
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
e3c68b
export LDFLAGS
e3c68b
%else
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
e3c68b
CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE"
e3c68b
LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now"
e3c68b
%else
e3c68b
#It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and
e3c68b
 # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does
e3c68b
CFLAGS="$CFLAGS $RPM_OPT_FLAGS"
e3c68b
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
e3c68b
%endif
e3c68b
export CFLAGS
e3c68b
export LDFLAGS
e3c68b
%endif
e3c68b
e3c68b
./autogen.sh && %configure \
e3c68b
        %{?_with_asan} \
e3c68b
        %{?_with_cmocka} \
e3c68b
        %{?_with_debug} \
e3c68b
        %{?_with_firewalld} \
e3c68b
        %{?_with_tmpfilesdir} \
e3c68b
        %{?_with_tsan} \
e3c68b
        %{?_with_valgrind} \
e3c68b
        %{?_without_epoll} \
e3c68b
        %{?_without_events} \
e3c68b
        %{?_without_fusermount} \
e3c68b
        %{?_without_georeplication} \
e3c68b
        %{?_without_ocf} \
e3c68b
        %{?_without_rdma} \
e3c68b
        %{?_without_server} \
e3c68b
        %{?_without_syslog} \
e3c68b
        %{?_without_tiering} \
e3c68b
        %{?_with_ipv6default} \
e3c68b
        %{?_without_libtirpc}
e3c68b
e3c68b
# fix hardening and remove rpath in shlibs
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
e3c68b
sed -i 's| \\\$compiler_flags |&\\\$LDFLAGS |' libtool
e3c68b
%endif
e3c68b
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|' libtool
e3c68b
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|' libtool
e3c68b
e3c68b
make %{?_smp_mflags}
e3c68b
e3c68b
%check
e3c68b
make check
e3c68b
e3c68b
%install
e3c68b
rm -rf %{buildroot}
e3c68b
make install DESTDIR=%{buildroot}
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%if ( 0%{_for_fedora_koji_builds} )
e3c68b
install -D -p -m 0644 %{SOURCE1} \
e3c68b
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
e3c68b
install -D -p -m 0644 %{SOURCE2} \
e3c68b
    %{buildroot}%{_sysconfdir}/sysconfig/glusterfsd
e3c68b
%else
e3c68b
install -D -p -m 0644 extras/glusterd-sysconfig \
e3c68b
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
mkdir -p %{buildroot}%{_localstatedir}/log/glusterd
e3c68b
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfs
e3c68b
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfsd
e3c68b
mkdir -p %{buildroot}%{_rundir}/gluster
e3c68b
e3c68b
# Remove unwanted files from all the shared libraries
e3c68b
find %{buildroot}%{_libdir} -name '*.a' -delete
e3c68b
find %{buildroot}%{_libdir} -name '*.la' -delete
e3c68b
e3c68b
# Remove installed docs, the ones we want are included by %%doc, in
e3c68b
# /usr/share/doc/glusterfs or /usr/share/doc/glusterfs-x.y.z depending
e3c68b
# on the distribution
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 19 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
e3c68b
rm -rf %{buildroot}%{_pkgdocdir}/*
e3c68b
%else
e3c68b
rm -rf %{buildroot}%{_defaultdocdir}/%{name}
e3c68b
mkdir -p %{buildroot}%{_pkgdocdir}
e3c68b
%endif
e3c68b
head -50 ChangeLog > ChangeLog.head && mv ChangeLog.head ChangeLog
e3c68b
cat << EOM >> ChangeLog
e3c68b
e3c68b
More commit messages for this ChangeLog can be found at
e3c68b
https://forge.gluster.org/glusterfs-core/glusterfs/commits/v%{version}%{?prereltag}
e3c68b
EOM
e3c68b
e3c68b
# Remove benchmarking and other unpackaged files
e3c68b
# make install always puts these in %%{_defaultdocdir}/%%{name} so don't
e3c68b
# use %%{_pkgdocdir}; that will be wrong on later Fedora distributions
e3c68b
rm -rf %{buildroot}%{_defaultdocdir}/%{name}/benchmarking
e3c68b
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs-mode.el
e3c68b
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs.vim
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
# Create working directory
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd
e3c68b
e3c68b
# Update configuration file to /var/lib working directory
e3c68b
sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sharedstatedir}/glusterd|g' \
e3c68b
    %{buildroot}%{_sysconfdir}/glusterfs/glusterd.vol
e3c68b
%endif
e3c68b
e3c68b
# Install glusterfsd .service or init.d file
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%if ( 0%{_for_fedora_koji_builds} )
e3c68b
%service_install glusterfsd %{glusterfsd_svcfile}
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
install -D -p -m 0644 extras/glusterfs-logrotate \
e3c68b
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
e3c68b
e3c68b
# ganesha ghosts
e3c68b
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
e3c68b
mkdir -p %{buildroot}%{_sysconfdir}/ganesha
e3c68b
touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf
e3c68b
mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/
e3c68b
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
e3c68b
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_georeplication:1} )
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
e3c68b
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
e3c68b
install -D -p -m 0644 extras/glusterfs-georep-logrotate \
e3c68b
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
e3c68b
touch %{buildroot}%{_sharedstatedir}/glusterd/options
e3c68b
subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop)
e3c68b
for dir in ${subdirs[@]}; do
e3c68b
    mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/"$dir"/{pre,post}
e3c68b
done
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/glustershd
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/peers
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/vols
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/nfs/run
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/bitd
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/quotad
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/scrub
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/snaps
e3c68b
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/ss_brick
e3c68b
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/nfs-server.vol
e3c68b
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/run/nfs.pid
e3c68b
%endif
e3c68b
e3c68b
find ./tests ./run-tests.sh -type f | cpio -pd %{buildroot}%{_prefix}/share/glusterfs
e3c68b
e3c68b
## Install bash completion for cli
e3c68b
install -p -m 0744 -D extras/command-completion/gluster.bash \
e3c68b
    %{buildroot}%{_sysconfdir}/bash_completion.d/gluster
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
echo "RHGS 3.5" > %{buildroot}%{_datadir}/glusterfs/release
e3c68b
%endif
e3c68b
e3c68b
%clean
e3c68b
rm -rf %{buildroot}
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All %%post should be placed here and keep them sorted
e3c68b
##
e3c68b
%post
e3c68b
/sbin/ldconfig
e3c68b
%if ( 0%{!?_without_syslog:1} )
e3c68b
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
e3c68b
%systemd_postun_with_restart rsyslog
e3c68b
%endif
e3c68b
%endif
e3c68b
exit 0
e3c68b
e3c68b
%post api
e3c68b
/sbin/ldconfig
e3c68b
e3c68b
%if ( 0%{!?_without_events:1} )
e3c68b
%post events
e3c68b
%service_enable glustereventsd
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
e3c68b
%post ganesha
e3c68b
semanage boolean -m ganesha_use_fusefs --on
e3c68b
exit 0
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_georeplication:1} )
e3c68b
%post geo-replication
e3c68b
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
e3c68b
%selinux_set_booleans %{selinuxbooleans}
e3c68b
%endif
e3c68b
if [ $1 -ge 1 ]; then
e3c68b
    %systemd_postun_with_restart glusterd
e3c68b
fi
e3c68b
exit 0
e3c68b
%endif
e3c68b
e3c68b
%post libs
e3c68b
/sbin/ldconfig
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%post server
e3c68b
# Legacy server
e3c68b
%service_enable glusterd
e3c68b
%if ( 0%{_for_fedora_koji_builds} )
e3c68b
%service_enable glusterfsd
e3c68b
%endif
e3c68b
# ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 .
e3c68b
# While upgrading glusterfs-server package form GlusterFS version <= 3.6 to
e3c68b
# GlusterFS version 3.7, ".cmd_log_history" should be renamed to
e3c68b
# "cmd_history.log" to retain cli command history contents.
e3c68b
if [ -f %{_localstatedir}/log/glusterfs/.cmd_log_history ]; then
e3c68b
    mv %{_localstatedir}/log/glusterfs/.cmd_log_history \
e3c68b
       %{_localstatedir}/log/glusterfs/cmd_history.log
e3c68b
fi
e3c68b
e3c68b
# Genuine Fedora (and EPEL) builds never put gluster files in /etc; if
e3c68b
# there are any files in /etc from a prior gluster.org install, move them
e3c68b
# to /var/lib. (N.B. Starting with 3.3.0 all gluster files are in /var/lib
e3c68b
# in gluster.org RPMs.) Be careful to copy them on the off chance that
e3c68b
# /etc and /var/lib are on separate file systems
e3c68b
if [ -d /etc/glusterd -a ! -h %{_sharedstatedir}/glusterd ]; then
e3c68b
    mkdir -p %{_sharedstatedir}/glusterd
e3c68b
    cp -a /etc/glusterd %{_sharedstatedir}/glusterd
e3c68b
    rm -rf /etc/glusterd
e3c68b
    ln -sf %{_sharedstatedir}/glusterd /etc/glusterd
e3c68b
fi
e3c68b
e3c68b
# Rename old volfiles in an RPM-standard way.  These aren't actually
e3c68b
# considered package config files, so %%config doesn't work for them.
e3c68b
if [ -d %{_sharedstatedir}/glusterd/vols ]; then
e3c68b
    for file in $(find %{_sharedstatedir}/glusterd/vols -name '*.vol'); do
e3c68b
        newfile=${file}.rpmsave
e3c68b
        echo "warning: ${file} saved as ${newfile}"
e3c68b
        cp ${file} ${newfile}
e3c68b
    done
e3c68b
fi
e3c68b
e3c68b
# add marker translator
e3c68b
# but first make certain that there are no old libs around to bite us
e3c68b
# BZ 834847
e3c68b
if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then
e3c68b
    rm -f /etc/ld.so.conf.d/glusterfs.conf
e3c68b
    /sbin/ldconfig
e3c68b
fi
e3c68b
e3c68b
%if (0%{?_with_firewalld:1})
e3c68b
    %firewalld_reload
e3c68b
%endif
e3c68b
e3c68b
%endif
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All %%pre should be placed here and keep them sorted
e3c68b
##
e3c68b
%pre
e3c68b
getent group gluster > /dev/null || groupadd -r gluster
e3c68b
getent passwd gluster > /dev/null || useradd -r -g gluster -d %{_rundir}/gluster -s /sbin/nologin -c "GlusterFS daemons" gluster
e3c68b
exit 0
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All %%preun should be placed here and keep them sorted
e3c68b
##
e3c68b
%if ( 0%{!?_without_events:1} )
e3c68b
%preun events
e3c68b
if [ $1 -eq 0 ]; then
e3c68b
    if [ -f %glustereventsd_svcfile ]; then
e3c68b
        %service_stop glustereventsd
e3c68b
        %systemd_preun glustereventsd
e3c68b
    fi
e3c68b
fi
e3c68b
exit 0
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%preun server
e3c68b
if [ $1 -eq 0 ]; then
e3c68b
    if [ -f %glusterfsd_svcfile ]; then
e3c68b
        %service_stop glusterfsd
e3c68b
    fi
e3c68b
    %service_stop glusterd
e3c68b
    if [ -f %glusterfsd_svcfile ]; then
e3c68b
        %systemd_preun glusterfsd
e3c68b
    fi
e3c68b
    %systemd_preun glusterd
e3c68b
fi
e3c68b
if [ $1 -ge 1 ]; then
e3c68b
    if [ -f %glusterfsd_svcfile ]; then
e3c68b
        %systemd_postun_with_restart glusterfsd
e3c68b
    fi
e3c68b
    %systemd_postun_with_restart glusterd
e3c68b
fi
e3c68b
exit 0
e3c68b
%endif
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All %%postun should be placed here and keep them sorted
e3c68b
##
e3c68b
%postun
e3c68b
%if ( 0%{!?_without_syslog:1} )
e3c68b
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
e3c68b
%systemd_postun_with_restart rsyslog
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%postun server
e3c68b
%if (0%{?_with_firewalld:1})
e3c68b
    %firewalld_reload
e3c68b
%endif
e3c68b
exit 0
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
e3c68b
%postun ganesha
e3c68b
semanage boolean -m ganesha_use_fusefs --off
e3c68b
exit 0
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All %%trigger should be placed here and keep them sorted
e3c68b
##
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
e3c68b
%trigger ganesha -- selinux-policy-targeted
e3c68b
semanage boolean -m ganesha_use_fusefs --on
e3c68b
exit 0
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All %%triggerun should be placed here and keep them sorted
e3c68b
##
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
e3c68b
%triggerun ganesha -- selinux-policy-targeted
e3c68b
semanage boolean -m ganesha_use_fusefs --off
e3c68b
exit 0
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All %%files should be placed here and keep them grouped
e3c68b
##
e3c68b
%files
e3c68b
%doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS COMMITMENT
e3c68b
%{_mandir}/man8/*gluster*.8*
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%exclude %{_mandir}/man8/gluster.8*
e3c68b
%endif
e3c68b
%dir %{_localstatedir}/log/glusterfs
e3c68b
%if ( 0%{!?_without_rdma:1} )
e3c68b
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
e3c68b
%endif
e3c68b
%if 0%{?!_without_server:1}
e3c68b
%dir %{_datadir}/glusterfs
e3c68b
%dir %{_datadir}/glusterfs/scripts
e3c68b
     %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
e3c68b
     %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
e3c68b
%endif
e3c68b
%{_datadir}/glusterfs/scripts/identify-hangs.sh
e3c68b
%{_datadir}/glusterfs/scripts/collect-system-stats.sh
e3c68b
%{_datadir}/glusterfs/scripts/log_accounting.sh
e3c68b
# xlators that are needed on the client- and on the server-side
e3c68b
%dir %{_libdir}/glusterfs
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/auth
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/addr.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/login.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/socket.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/error-gen.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/delay-gen.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/io-stats.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/sink.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/trace.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/access-control.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/barrier.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cdc.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changelog.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/utime.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/gfid-access.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/namespace.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/read-only.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cloudsync.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/md-cache.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/open-behind.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/quick-read.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/read-ahead.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/readdir-ahead.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/nl-cache.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so
e3c68b
%dir %attr(0775,gluster,gluster) %{_rundir}/gluster
e3c68b
%if 0%{?_tmpfilesdir:1} && 0%{!?_without_server:1}
e3c68b
%{_tmpfilesdir}/gluster.conf
e3c68b
%endif
e3c68b
%if ( 0%{?_without_extra_xlators:1} )
e3c68b
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
e3c68b
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
e3c68b
%endif
e3c68b
%if ( 0%{?_without_regression_tests:1} )
e3c68b
%exclude %{_datadir}/glusterfs/run-tests.sh
e3c68b
%exclude %{_datadir}/glusterfs/tests
e3c68b
%endif
e3c68b
%if 0%{?_without_server:1}
e3c68b
%if ( 0%{?_with_systemd:1} )
e3c68b
%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
e3c68b
%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{?_without_server:1} || 0%{?rhel} < 7 )
e3c68b
#exclude ganesha related files for rhel 6 and client builds
e3c68b
%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample
e3c68b
%exclude %{_libexecdir}/ganesha/*
e3c68b
%exclude %{_prefix}/lib/ocf/resource.d/heartbeat/*
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
%exclude %{_datadir}/glusterfs/scripts/setup-thin-arbiter.sh
e3c68b
e3c68b
%if ( 0%{?_without_server:1} )
e3c68b
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
e3c68b
%endif
e3c68b
e3c68b
%files api
e3c68b
%exclude %{_libdir}/*.so
e3c68b
# libgfapi files
e3c68b
%{_libdir}/libgfapi.*
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
e3c68b
e3c68b
%files api-devel
e3c68b
%{_libdir}/pkgconfig/glusterfs-api.pc
e3c68b
%{_libdir}/libgfapi.so
e3c68b
%dir %{_includedir}/glusterfs
e3c68b
%dir %{_includedir}/glusterfs/api
e3c68b
     %{_includedir}/glusterfs/api/*
e3c68b
e3c68b
%files cli
e3c68b
%{_sbindir}/gluster
e3c68b
%{_mandir}/man8/gluster.8*
e3c68b
%{_sysconfdir}/bash_completion.d/gluster
e3c68b
e3c68b
%files cloudsync-plugins
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsyncs3.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsynccvlt.so
e3c68b
e3c68b
%files devel
e3c68b
%dir %{_includedir}/glusterfs
e3c68b
     %{_includedir}/glusterfs/*
e3c68b
%exclude %{_includedir}/glusterfs/api
e3c68b
%exclude %{_libdir}/libgfapi.so
e3c68b
%{_libdir}/*.so
e3c68b
%if ( 0%{?_without_server:1} )
e3c68b
%exclude %{_libdir}/pkgconfig/libgfchangelog.pc
e3c68b
%exclude %{_libdir}/libgfchangelog.so
e3c68b
%if ( 0%{!?_without_tiering:1} )
e3c68b
%exclude %{_libdir}/pkgconfig/libgfdb.pc
e3c68b
%endif
e3c68b
%else
e3c68b
%{_libdir}/pkgconfig/libgfchangelog.pc
e3c68b
%if ( 0%{!?_without_tiering:1} )
e3c68b
%{_libdir}/pkgconfig/libgfdb.pc
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
%files client-xlators
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
e3c68b
e3c68b
%if ( 0%{!?_without_extra_xlators:1} )
e3c68b
%files extra-xlators
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
e3c68b
%endif
e3c68b
e3c68b
%files fuse
e3c68b
# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
e3c68b
%{_sbindir}/glusterfs
e3c68b
%{_sbindir}/glusterfsd
e3c68b
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so
e3c68b
/sbin/mount.glusterfs
e3c68b
%if ( 0%{!?_without_fusermount:1} )
e3c68b
%{_bindir}/fusermount-glusterfs
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_georeplication:1} )
e3c68b
%files geo-replication
e3c68b
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
e3c68b
e3c68b
%{_sbindir}/gfind_missing_files
e3c68b
%{_sbindir}/gluster-mountbroker
e3c68b
%dir %{_libexecdir}/glusterfs
e3c68b
%dir %{_libexecdir}/glusterfs/python
e3c68b
%dir %{_libexecdir}/glusterfs/python/syncdaemon
e3c68b
     %{_libexecdir}/glusterfs/gsyncd
e3c68b
     %{_libexecdir}/glusterfs/python/syncdaemon/*
e3c68b
     %{_libexecdir}/glusterfs/gverify.sh
e3c68b
     %{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
e3c68b
     %{_libexecdir}/glusterfs/peer_gsec_create
e3c68b
     %{_libexecdir}/glusterfs/peer_mountbroker
e3c68b
     %{_libexecdir}/glusterfs/peer_mountbroker.py*
e3c68b
     %{_libexecdir}/glusterfs/gfind_missing_files
e3c68b
     %{_libexecdir}/glusterfs/peer_georep-sshkey.py*
e3c68b
%{_sbindir}/gluster-georep-sshkey
e3c68b
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication
e3c68b
%ghost      %attr(0644,-,-) %{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/pre
e3c68b
e3c68b
%dir %{_datadir}/glusterfs
e3c68b
%dir %{_datadir}/glusterfs/scripts
e3c68b
     %{_datadir}/glusterfs/scripts/get-gfid.sh
e3c68b
     %{_datadir}/glusterfs/scripts/slave-upgrade.sh
e3c68b
     %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
e3c68b
     %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
e3c68b
     %{_datadir}/glusterfs/scripts/gsync-sync-gfid
e3c68b
     %{_datadir}/glusterfs/scripts/schedule_georep.py*
e3c68b
%endif
e3c68b
e3c68b
%files libs
e3c68b
%{_libdir}/*.so.*
e3c68b
%exclude %{_libdir}/libgfapi.*
e3c68b
%if ( 0%{!?_without_tiering:1} )
e3c68b
# libgfdb is only needed server-side
e3c68b
%exclude %{_libdir}/libgfdb.*
e3c68b
%endif
e3c68b
e3c68b
%files -n python%{_pythonver}-gluster
e3c68b
# introducing glusterfs module in site packages.
e3c68b
# so that all other gluster submodules can reside in the same namespace.
e3c68b
%if ( %{_usepython3} )
e3c68b
%dir %{python3_sitelib}/gluster
e3c68b
     %{python3_sitelib}/gluster/__init__.*
e3c68b
     %{python3_sitelib}/gluster/__pycache__
e3c68b
     %{python3_sitelib}/gluster/cliutils
e3c68b
%else
e3c68b
%dir %{python2_sitelib}/gluster
e3c68b
     %{python2_sitelib}/gluster/__init__.*
e3c68b
     %{python2_sitelib}/gluster/cliutils
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_rdma:1} )
e3c68b
%files rdma
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_regression_tests:1} )
e3c68b
%files regression-tests
e3c68b
%dir %{_datadir}/glusterfs
e3c68b
     %{_datadir}/glusterfs/run-tests.sh
e3c68b
     %{_datadir}/glusterfs/tests
e3c68b
%exclude %{_datadir}/glusterfs/tests/vagrant
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
e3c68b
%files ganesha
e3c68b
%dir %{_libexecdir}/ganesha
e3c68b
%{_sysconfdir}/ganesha/ganesha-ha.conf.sample
e3c68b
%{_libexecdir}/ganesha/*
e3c68b
%{_prefix}/lib/ocf/resource.d/heartbeat/*
e3c68b
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
e3c68b
%ghost      %attr(0644,-,-) %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf
e3c68b
%ghost %dir %attr(0755,-,-) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha
e3c68b
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
e3c68b
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_ocf:1} )
e3c68b
%files resource-agents
e3c68b
# /usr/lib is the standard for OCF, also on x86_64
e3c68b
%{_prefix}/lib/ocf/resource.d/glusterfs
e3c68b
%endif
e3c68b
e3c68b
%if ( 0%{!?_without_server:1} )
e3c68b
%files server
e3c68b
%doc extras/clear_xattrs.sh
e3c68b
%{_datadir}/glusterfs/scripts/quota_fsck.py*
e3c68b
# sysconf
e3c68b
%config(noreplace) %{_sysconfdir}/glusterfs
e3c68b
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
e3c68b
%exclude %{_sysconfdir}/glusterfs/eventsconfig.json
e3c68b
%config(noreplace) %{_sysconfdir}/sysconfig/glusterd
e3c68b
%if ( 0%{_for_fedora_koji_builds} )
e3c68b
%config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd
e3c68b
%endif
e3c68b
e3c68b
# init files
e3c68b
%glusterd_svcfile
e3c68b
%if ( 0%{_for_fedora_koji_builds} )
e3c68b
%glusterfsd_svcfile
e3c68b
%endif
e3c68b
%if ( 0%{?_with_systemd:1} )
e3c68b
%glusterfssharedstorage_svcfile
e3c68b
%endif
e3c68b
e3c68b
# binaries
e3c68b
%{_sbindir}/glusterd
e3c68b
%{_sbindir}/glfsheal
e3c68b
%{_sbindir}/gf_attach
e3c68b
%{_sbindir}/gluster-setgfid2path
e3c68b
# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
e3c68b
# symlink. The binary itself (and symlink) are part of the glusterfs-fuse
e3c68b
# package, because glusterfs-server depends on that anyway.
e3c68b
e3c68b
# Manpages
e3c68b
%{_mandir}/man8/gluster-setgfid2path.8*
e3c68b
e3c68b
# xlators
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
e3c68b
%if ( 0%{!?_without_tiering:1} )
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
e3c68b
     %{_libdir}/libgfdb.so.*
e3c68b
%endif
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
e3c68b
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
e3c68b
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
e3c68b
e3c68b
# snap_scheduler
e3c68b
%{_sbindir}/snap_scheduler.py
e3c68b
%{_sbindir}/gcron.py
e3c68b
%{_sbindir}/conf.py
e3c68b
e3c68b
# /var/lib/glusterd, e.g. hookscripts, etc.
e3c68b
%ghost      %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
e3c68b
%ghost      %attr(0600,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/options
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/bitd
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/groups
e3c68b
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/virt
e3c68b
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/metadata-cache
e3c68b
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/gluster-block
e3c68b
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/nl-cache
e3c68b
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/db-workload
e3c68b
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/distributed-virt
e3c68b
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/samba
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind/.keys
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glustershd
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/disabled-quota-root-xattr-heal.sh
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/S13create-subdir-mounts.sh
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/pre
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
e3c68b
                            %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/post
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/pre
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/pre
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
e3c68b
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
e3c68b
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
e3c68b
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
e3c68b
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/snaps
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/ss_brick
e3c68b
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/vols
e3c68b
e3c68b
# Extra utility script
e3c68b
%dir %{_libexecdir}/glusterfs
e3c68b
     %{_datadir}/glusterfs/release
e3c68b
%dir %{_datadir}/glusterfs/scripts
e3c68b
     %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
e3c68b
%if ( 0%{?_with_systemd:1} )
e3c68b
     %{_libexecdir}/glusterfs/mount-shared-storage.sh
e3c68b
     %{_datadir}/glusterfs/scripts/control-cpu-load.sh
e3c68b
     %{_datadir}/glusterfs/scripts/control-mem.sh
e3c68b
%endif
e3c68b
e3c68b
# Incrementalapi
e3c68b
     %{_libexecdir}/glusterfs/glusterfind
e3c68b
%{_bindir}/glusterfind
e3c68b
     %{_libexecdir}/glusterfs/peer_add_secret_pub
e3c68b
e3c68b
%if ( 0%{?_with_firewalld:1} )
e3c68b
%{_prefix}/lib/firewalld/services/glusterfs.xml
e3c68b
%endif
e3c68b
# end of server files
e3c68b
%endif
e3c68b
e3c68b
# Events
e3c68b
%if ( 0%{!?_without_events:1} )
e3c68b
%files events
e3c68b
%config(noreplace) %{_sysconfdir}/glusterfs/eventsconfig.json
e3c68b
%dir %{_sharedstatedir}/glusterd
e3c68b
%dir %{_sharedstatedir}/glusterd/events
e3c68b
%dir %{_libexecdir}/glusterfs
e3c68b
     %{_libexecdir}/glusterfs/gfevents
e3c68b
     %{_libexecdir}/glusterfs/peer_eventsapi.py*
e3c68b
%{_sbindir}/glustereventsd
e3c68b
%{_sbindir}/gluster-eventsapi
e3c68b
%{_datadir}/glusterfs/scripts/eventsdash.py*
e3c68b
%if ( 0%{?_with_systemd:1} )
e3c68b
%{_unitdir}/glustereventsd.service
e3c68b
%else
e3c68b
%{_sysconfdir}/init.d/glustereventsd
e3c68b
%endif
e3c68b
%endif
e3c68b
e3c68b
##-----------------------------------------------------------------------------
e3c68b
## All %pretrans should be placed here and keep them sorted
e3c68b
##
e3c68b
%if 0%{!?_without_server:1}
e3c68b
%pretrans -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
e3c68b
          echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
e3c68b
   echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
e3c68b
   echo "WARNING: Refer upgrade section of install guide for more details"
e3c68b
   echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
e3c68b
e3c68b
e3c68b
%pretrans api -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
e3c68b
e3c68b
e3c68b
%pretrans api-devel -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
e3c68b
e3c68b
e3c68b
%pretrans cli -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
e3c68b
e3c68b
%pretrans client-xlators -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
e3c68b
e3c68b
%pretrans fuse -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
e3c68b
e3c68b
e3c68b
%if ( 0%{!?_without_georeplication:1} )
e3c68b
%pretrans geo-replication -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
%endif
e3c68b
e3c68b
e3c68b
e3c68b
%pretrans libs -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
e3c68b
e3c68b
e3c68b
%if ( 0%{!?_without_rdma:1} )
e3c68b
%pretrans rdma -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
%endif
e3c68b
e3c68b
e3c68b
e3c68b
%if ( 0%{!?_without_ocf:1} )
e3c68b
%pretrans resource-agents -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
%endif
e3c68b
e3c68b
e3c68b
e3c68b
%pretrans server -p <lua>
e3c68b
if not posix.access("/bin/bash", "x") then
e3c68b
    -- initial installation, no shell, no running glusterfsd
e3c68b
    return 0
e3c68b
end
e3c68b
e3c68b
-- TODO: move this completely to a lua script
e3c68b
-- For now, we write a temporary bash script and execute that.
e3c68b
e3c68b
script = [[#!/bin/sh
e3c68b
pidof -c -o %PPID -x glusterfsd &>/dev/null
e3c68b
e3c68b
if [ $? -eq 0 ]; then
e3c68b
   pushd . > /dev/null 2>&1
e3c68b
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e3c68b
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e3c68b
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e3c68b
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e3c68b
          exit 1;
e3c68b
       fi
e3c68b
   done
e3c68b
e3c68b
   popd > /dev/null 2>&1
e3c68b
   exit 1;
e3c68b
fi
e3c68b
]]
e3c68b
e3c68b
ok, how, val = os.execute(script)
e3c68b
rc = val or ok
e3c68b
if not (rc == 0) then
e3c68b
   error("Detected running glusterfs processes", rc)
e3c68b
end
e3c68b
e3c68b
%posttrans server
e3c68b
pidof -c -o %PPID -x glusterd &> /dev/null
e3c68b
if [ $? -eq 0 ]; then
e3c68b
    kill -9 `pgrep -f gsyncd.py` &> /dev/null
e3c68b
e3c68b
    killall --wait -SIGTERM glusterd &> /dev/null
e3c68b
e3c68b
    if [ "$?" != "0" ]; then
e3c68b
        echo "killall failed while killing glusterd"
e3c68b
    fi
e3c68b
e3c68b
    glusterd --xlator-option *.upgrade=on -N
e3c68b
e3c68b
    #Cleaning leftover glusterd socket file which is created by glusterd in
e3c68b
    #rpm_script_t context.
e3c68b
    rm -rf /var/run/glusterd.socket
e3c68b
e3c68b
    # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
e3c68b
    # so start it again
e3c68b
    %service_start glusterd
e3c68b
else
e3c68b
    glusterd --xlator-option *.upgrade=on -N
e3c68b
e3c68b
    #Cleaning leftover glusterd socket file which is created by glusterd in
e3c68b
    #rpm_script_t context.
e3c68b
    rm -rf /var/run/glusterd.socket
e3c68b
fi
e3c68b
e3c68b
%endif
e3c68b
e3c68b
%changelog
b51a1f
* Mon Aug 30 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.4
b51a1f
- Add gating.yaml, fixes bugs bz#1996984
b51a1f
b51a1f
* Tue Aug 24 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.3
b51a1f
- fixes bugs bz#1996984
b51a1f
b51a1f
* Thu May 06 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.2
b51a1f
- fixes bugs bz#1953901
b51a1f
b51a1f
* Thu Apr 22 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.1
b51a1f
- fixes bugs bz#1927235
b51a1f
b51a1f
* Wed Apr 14 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56
b51a1f
- fixes bugs bz#1948547
b51a1f
b51a1f
* Fri Mar 19 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-55
b51a1f
- fixes bugs bz#1939372
b51a1f
b51a1f
* Wed Mar 03 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-54
b51a1f
- fixes bugs bz#1832306 bz#1911292 bz#1924044
b51a1f
b51a1f
* Thu Feb 11 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-53
b51a1f
- fixes bugs bz#1224906 bz#1691320 bz#1719171 bz#1814744 bz#1865796
b51a1f
b51a1f
* Thu Jan 28 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-52
b51a1f
- fixes bugs bz#1600459 bz#1719171 bz#1830713 bz#1856574
b51a1f
b51a1f
* Mon Dec 28 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-51
b51a1f
- fixes bugs bz#1640148 bz#1856574 bz#1910119
b51a1f
b51a1f
* Tue Dec 15 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-50
b51a1f
- fixes bugs bz#1224906 bz#1412494 bz#1612973 bz#1663821 bz#1691320 
b51a1f
  bz#1726673 bz#1749304 bz#1752739 bz#1779238 bz#1813866 bz#1814744 bz#1821599 
b51a1f
  bz#1832306 bz#1835229 bz#1842449 bz#1865796 bz#1878077 bz#1882923 bz#1885966 
b51a1f
  bz#1890506 bz#1896425 bz#1898776 bz#1898777 bz#1898778 bz#1898781 bz#1898784 
b51a1f
  bz#1903468
d84cf8
d84cf8
* Wed Nov 25 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-49
d84cf8
- fixes bugs bz#1286171
d84cf8
d84cf8
* Tue Nov 10 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-48
d84cf8
- fixes bugs bz#1895301
d84cf8
d84cf8
* Thu Nov 05 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-47
d84cf8
- fixes bugs bz#1286171 bz#1821743 bz#1837926
d84cf8
d84cf8
* Wed Oct 21 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-46
d84cf8
- fixes bugs bz#1873469 bz#1881823
d84cf8
d84cf8
* Wed Sep 09 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-45
d84cf8
- fixes bugs bz#1785714
d84cf8
d84cf8
* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-44
d84cf8
- fixes bugs bz#1460657
d84cf8
d84cf8
* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-43
d84cf8
- fixes bugs bz#1460657
d84cf8
d84cf8
* Wed Sep 02 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-42
d84cf8
- fixes bugs bz#1785714
d84cf8
d84cf8
* Tue Aug 25 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-41
d84cf8
- fixes bugs bz#1785714 bz#1851424 bz#1851989 bz#1852736 bz#1853189 bz#1855966
d84cf8
d84cf8
* Tue Jul 21 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-40
d84cf8
- fixes bugs bz#1812789 bz#1844359 bz#1847081 bz#1854165
d84cf8
d84cf8
* Wed Jun 17 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-39
d84cf8
- fixes bugs bz#1844359 bz#1845064
7c14e0
d84cf8
* Wed Jun 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-38
d84cf8
- fixes bugs bz#1234220 bz#1286171 bz#1487177 bz#1524457 bz#1640573 
d84cf8
  bz#1663557 bz#1667954 bz#1683602 bz#1686897 bz#1721355 bz#1748865 bz#1750211 
d84cf8
  bz#1754391 bz#1759875 bz#1761531 bz#1761932 bz#1763124 bz#1763129 bz#1764091 
d84cf8
  bz#1775637 bz#1776901 bz#1781550 bz#1781649 bz#1781710 bz#1783232 bz#1784211 
d84cf8
  bz#1784415 bz#1786516 bz#1786681 bz#1787294 bz#1787310 bz#1787331 bz#1787994 
d84cf8
  bz#1790336 bz#1792873 bz#1794663 bz#1796814 bz#1804164 bz#1810924 bz#1815434 
d84cf8
  bz#1836099 bz#1837467 bz#1837926 bz#1838479 bz#1839137 bz#1844359
87c3ef
e3c68b
* Fri May 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-37
e3c68b
- fixes bugs bz#1840794
e3c68b
e3c68b
* Wed May 27 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-36
e3c68b
- fixes bugs bz#1812789 bz#1823423
e3c68b
e3c68b
* Fri May 22 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-35
e3c68b
- fixes bugs bz#1810516 bz#1830713 bz#1836233
e3c68b
e3c68b
* Sun May 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-34
e3c68b
- fixes bugs bz#1802013 bz#1823706 bz#1825177 bz#1830713 bz#1831403 bz#1833017
e3c68b
e3c68b
* Wed Apr 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-33
e3c68b
- fixes bugs bz#1812789 bz#1813917 bz#1823703 bz#1823706 bz#1825195
e3c68b
e3c68b
* Sat Apr 04 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-32
e3c68b
- fixes bugs bz#1781543 bz#1812789 bz#1812824 bz#1817369 bz#1819059
e3c68b
e3c68b
* Tue Mar 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-31
e3c68b
- fixes bugs bz#1802727
e3c68b
e3c68b
* Thu Feb 20 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30.1
e3c68b
- fixes bugs bz#1800703
e3c68b
e3c68b
* Sat Feb 01 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30
e3c68b
- fixes bugs bz#1775564 bz#1794153
e3c68b
e3c68b
* Thu Jan 23 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-29
e3c68b
- fixes bugs bz#1793035
e3c68b
e3c68b
* Tue Jan 14 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-28
e3c68b
- fixes bugs bz#1789447
e3c68b
e3c68b
* Mon Jan 13 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-27
e3c68b
- fixes bugs bz#1789447
e3c68b
e3c68b
* Fri Jan 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-26
e3c68b
- fixes bugs bz#1763208 bz#1788656
e3c68b
e3c68b
* Mon Dec 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-25
e3c68b
- fixes bugs bz#1686800 bz#1763208 bz#1779696 bz#1781444 bz#1782162
e3c68b
e3c68b
* Thu Nov 28 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-24
e3c68b
- fixes bugs bz#1768786
e3c68b
e3c68b
* Thu Nov 21 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-23
e3c68b
- fixes bugs bz#1344758 bz#1599802 bz#1685406 bz#1686800 bz#1724021 
e3c68b
  bz#1726058 bz#1727755 bz#1731513 bz#1741193 bz#1758923 bz#1761326 bz#1761486 
e3c68b
  bz#1762180 bz#1764095 bz#1766640
e3c68b
e3c68b
* Thu Nov 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-22
e3c68b
- fixes bugs bz#1771524 bz#1771614
e3c68b
e3c68b
* Fri Oct 25 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-21
e3c68b
- fixes bugs bz#1765555
e3c68b
e3c68b
* Wed Oct 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-20
e3c68b
- fixes bugs bz#1719171 bz#1763412 bz#1764202
e3c68b
e3c68b
* Thu Oct 17 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-19
e3c68b
- fixes bugs bz#1760939
e3c68b
e3c68b
* Wed Oct 16 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-18
e3c68b
- fixes bugs bz#1758432
e3c68b
e3c68b
* Fri Oct 11 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-17
e3c68b
- fixes bugs bz#1704562 bz#1758618 bz#1760261
e3c68b
e3c68b
* Wed Oct 09 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-16
e3c68b
- fixes bugs bz#1752713 bz#1756325
e3c68b
e3c68b
* Fri Sep 27 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-15
e3c68b
- fixes bugs bz#1726000 bz#1731826 bz#1754407 bz#1754790 bz#1755227
e3c68b
e3c68b
* Fri Sep 20 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-14
e3c68b
- fixes bugs bz#1719171 bz#1728673 bz#1731896 bz#1732443 bz#1733970 
e3c68b
  bz#1745107 bz#1746027 bz#1748688 bz#1750241 bz#1572163
e3c68b
e3c68b
* Fri Aug 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-13
e3c68b
- fixes bugs bz#1729915 bz#1732376 bz#1743611 bz#1743627 bz#1743634 bz#1744518
e3c68b
e3c68b
* Fri Aug 09 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-12
e3c68b
- fixes bugs bz#1730914 bz#1731448 bz#1732770 bz#1732792 bz#1733531 
e3c68b
  bz#1734305 bz#1734534 bz#1734734 bz#1735514 bz#1737705 bz#1732774
e3c68b
  bz#1732793
e3c68b
e3c68b
* Tue Aug 06 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-11
e3c68b
- fixes bugs bz#1733520 bz#1734423
e3c68b
e3c68b
* Fri Aug 02 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-10
e3c68b
- fixes bugs bz#1713890
e3c68b
e3c68b
* Tue Jul 23 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-9
e3c68b
- fixes bugs bz#1708064 bz#1708180 bz#1715422 bz#1720992 bz#1722757
e3c68b
e3c68b
* Tue Jul 16 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-8
e3c68b
- fixes bugs bz#1698435 bz#1712591 bz#1715447 bz#1720488 bz#1722209
e3c68b
  bz#1722512 bz#1724089 bz#1726991 bz#1727785 bz#1729108
e3c68b
e3c68b
* Fri Jun 28 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-7
e3c68b
- fixes bugs bz#1573077 bz#1600918 bz#1703423 bz#1704207 bz#1708064
e3c68b
  bz#1709301 bz#1713664 bz#1716760 bz#1717784 bz#1720163 bz#1720192
e3c68b
  bz#1720551 bz#1721351 bz#1721357 bz#1721477 bz#1722131 bz#1722331
e3c68b
  bz#1722509 bz#1722801 bz#1720248
e3c68b
e3c68b
* Fri Jun 14 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-6
e3c68b
- fixes bugs bz#1668001 bz#1708043 bz#1708183 bz#1710701 
e3c68b
  bz#1719640 bz#1720079 bz#1720248 bz#1720318 bz#1720461
e3c68b
e3c68b
* Tue Jun 11 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-5
e3c68b
- fixes bugs bz#1573077 bz#1694595 bz#1703434 bz#1714536 bz#1714588 
e3c68b
  bz#1715407 bz#1715438 bz#1705018
e3c68b
e3c68b
* Fri Jun 07 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-4
e3c68b
- fixes bugs bz#1480907 bz#1702298 bz#1703455 bz#1704181 bz#1707246
e3c68b
  bz#1708067 bz#1708116 bz#1708121 bz#1709087 bz#1711249 bz#1711296 
e3c68b
  bz#1714078 bz#1714124 bz#1716385 bz#1716626 bz#1716821 bz#1716865 bz#1717927
e3c68b
e3c68b
* Tue May 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-3
e3c68b
- fixes bugs bz#1583585 bz#1671862 bz#1702686 bz#1703434 bz#1703753 
e3c68b
  bz#1703897 bz#1704562 bz#1704769 bz#1704851 bz#1706683 bz#1706776 bz#1706893
e3c68b
e3c68b
* Thu Apr 25 2019 Milind Changire <mchangir@redhat.com> - 6.0-2
e3c68b
- fixes bugs bz#1471742 bz#1652461 bz#1671862 bz#1676495 bz#1691620 
e3c68b
  bz#1696334 bz#1696903 bz#1697820 bz#1698436 bz#1698728 bz#1699709 bz#1699835 
e3c68b
  bz#1702240
e3c68b
e3c68b
* Mon Apr 08 2019 Milind Changire <mchangir@redhat.com> - 6.0-1
e3c68b
- rebase to upstream glusterfs at v6.0
e3c68b
- fixes bugs bz#1493284 bz#1578703 bz#1600918 bz#1670415 bz#1691620 
e3c68b
  bz#1693935 bz#1695057
e3c68b