256ebe
%global _hardened_build 1
256ebe
256ebe
%global _for_fedora_koji_builds 0
256ebe
256ebe
# uncomment and add '%' to use the prereltag for pre-releases
256ebe
# %%global prereltag qa3
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All argument definitions should be placed here and keep them sorted
256ebe
##
256ebe
256ebe
# asan
256ebe
# if you wish to compile an rpm with address sanitizer...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --with asan
256ebe
%{?_with_asan:%global _with_asan --enable-asan}
256ebe
256ebe
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
256ebe
%global _with_asan %{nil}
256ebe
%endif
256ebe
256ebe
# bd
256ebe
# if you wish to compile an rpm without the BD map support...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --without bd
256ebe
%{?_without_bd:%global _without_bd --disable-bd-xlator}
256ebe
256ebe
%if ( 0%{?rhel} && 0%{?rhel} > 7 )
256ebe
%global _without_bd --without-bd
256ebe
%endif
256ebe
256ebe
# cmocka
256ebe
# if you wish to compile an rpm with cmocka unit testing...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --with cmocka
256ebe
%{?_with_cmocka:%global _with_cmocka --enable-cmocka}
256ebe
256ebe
# debug
256ebe
# if you wish to compile an rpm with debugging...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --with debug
256ebe
%{?_with_debug:%global _with_debug --enable-debug}
256ebe
256ebe
# epoll
256ebe
# if you wish to compile an rpm without epoll...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --without epoll
256ebe
%{?_without_epoll:%global _without_epoll --disable-epoll}
256ebe
256ebe
# fusermount
256ebe
# if you wish to compile an rpm without fusermount...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --without fusermount
256ebe
%{?_without_fusermount:%global _without_fusermount --disable-fusermount}
256ebe
256ebe
# geo-rep
256ebe
# if you wish to compile an rpm without geo-replication support, compile like this...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --without georeplication
256ebe
%{?_without_georeplication:%global _without_georeplication --disable-georeplication}
256ebe
256ebe
# ipv6default
256ebe
# if you wish to compile an rpm with IPv6 default...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --with ipv6default
256ebe
%{?_with_ipv6default:%global _with_ipv6default --with-ipv6-default}
256ebe
256ebe
# libtirpc
256ebe
# if you wish to compile an rpm without TIRPC (i.e. use legacy glibc rpc)
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --without libtirpc
256ebe
%{?_without_libtirpc:%global _without_libtirpc --without-libtirpc}
256ebe
256ebe
# Do not use libtirpc on EL6, it does not have xdr_uint64_t() and xdr_uint32_t
256ebe
# Do not use libtirpc on EL7, it does not have xdr_sizeof()
256ebe
%if ( 0%{?rhel} && 0%{?rhel} <= 7 )
256ebe
%global _without_libtirpc --without-libtirpc
256ebe
%endif
256ebe
256ebe
256ebe
# ocf
256ebe
# if you wish to compile an rpm without the OCF resource agents...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --without ocf
256ebe
%{?_without_ocf:%global _without_ocf --without-ocf}
256ebe
256ebe
# rdma
256ebe
# if you wish to compile an rpm without rdma support, compile like this...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --without rdma
256ebe
%{?_without_rdma:%global _without_rdma --disable-ibverbs}
256ebe
256ebe
# No RDMA Support on 32-bit ARM
256ebe
%ifarch armv7hl
256ebe
%global _without_rdma --disable-ibverbs
256ebe
%endif
256ebe
256ebe
# server
256ebe
# if you wish to build rpms without server components, compile like this
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --without server
256ebe
%{?_without_server:%global _without_server --without-server}
256ebe
256ebe
# disable server components forcefully as rhel <= 6
256ebe
%if ( 0%{?rhel} )
256ebe
%if (!(( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" ) || ( "%{?dist}" == ".el8rhgs" )))
256ebe
%global _without_server --without-server
256ebe
%endif
256ebe
%endif
256ebe
256ebe
%global _without_extra_xlators 1
256ebe
%global _without_regression_tests 1
256ebe
256ebe
# syslog
256ebe
# if you wish to build rpms without syslog logging, compile like this
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --without syslog
256ebe
%{?_without_syslog:%global _without_syslog --disable-syslog}
256ebe
256ebe
# disable syslog forcefully as rhel <= 6 doesn't have rsyslog or rsyslog-mmcount
256ebe
# Fedora deprecated syslog, see
256ebe
#  https://fedoraproject.org/wiki/Changes/NoDefaultSyslog
256ebe
# (And what about RHEL7?)
256ebe
%if ( 0%{?fedora} && 0%{?fedora} >= 20 ) || ( 0%{?rhel} && 0%{?rhel} <= 6 )
256ebe
%global _without_syslog --disable-syslog
256ebe
%endif
256ebe
256ebe
# tsan
256ebe
# if you wish to compile an rpm with thread sanitizer...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --with tsan
256ebe
%{?_with_tsan:%global _with_tsan --enable-tsan}
256ebe
256ebe
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
256ebe
%global _with_tsan %{nil}
256ebe
%endif
256ebe
256ebe
# valgrind
256ebe
# if you wish to compile an rpm to run all processes under valgrind...
256ebe
# rpmbuild -ta glusterfs-6.0.tar.gz --with valgrind
256ebe
%{?_with_valgrind:%global _with_valgrind --enable-valgrind}
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All %%global definitions should be placed here and keep them sorted
256ebe
##
256ebe
1df6c8
# selinux booleans whose defalut value needs modification
1df6c8
# these booleans will be consumed by "%%selinux_set_booleans" macro.
1df6c8
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
1df6c8
%global selinuxbooleans rsync_full_access=1 rsync_client=1
1df6c8
%endif
1df6c8
256ebe
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
256ebe
%global _with_systemd true
256ebe
%endif
256ebe
256ebe
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 7 )
256ebe
%global _with_firewalld --enable-firewalld
256ebe
%endif
256ebe
256ebe
%if 0%{?_tmpfilesdir:1}
256ebe
%global _with_tmpfilesdir --with-tmpfilesdir=%{_tmpfilesdir}
256ebe
%else
256ebe
%global _with_tmpfilesdir --without-tmpfilesdir
256ebe
%endif
256ebe
256ebe
# without server should also disable some server-only components
256ebe
%if 0%{?_without_server:1}
256ebe
%global _without_events --disable-events
256ebe
%global _without_georeplication --disable-georeplication
256ebe
%global _without_tiering --disable-tiering
256ebe
%global _without_ocf --without-ocf
256ebe
%endif
256ebe
256ebe
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
256ebe
%global _usepython3 1
256ebe
%global _pythonver 3
256ebe
%else
256ebe
%global _usepython3 0
256ebe
%global _pythonver 2
256ebe
%endif
256ebe
256ebe
# From https://fedoraproject.org/wiki/Packaging:Python#Macros
256ebe
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
256ebe
%{!?python2_sitelib: %global python2_sitelib %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
256ebe
%{!?python2_sitearch: %global python2_sitearch %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
256ebe
%global _rundir %{_localstatedir}/run
256ebe
%endif
256ebe
256ebe
%if ( 0%{?_with_systemd:1} )
256ebe
%global service_enable()   /bin/systemctl --quiet enable %1.service || : \
256ebe
%{nil}
256ebe
%global service_start()   /bin/systemctl --quiet start %1.service || : \
256ebe
%{nil}
256ebe
%global service_stop()    /bin/systemctl --quiet stop %1.service || :\
256ebe
%{nil}
256ebe
%global service_install() install -D -p -m 0644 %1.service %{buildroot}%2 \
256ebe
%{nil}
256ebe
# can't seem to make a generic macro that works
256ebe
%global glusterd_svcfile   %{_unitdir}/glusterd.service
256ebe
%global glusterfsd_svcfile %{_unitdir}/glusterfsd.service
256ebe
%global glusterta_svcfile %{_unitdir}/gluster-ta-volume.service
256ebe
%global glustereventsd_svcfile %{_unitdir}/glustereventsd.service
256ebe
%global glusterfssharedstorage_svcfile %{_unitdir}/glusterfssharedstorage.service
256ebe
%else
256ebe
%global service_enable()  /sbin/chkconfig --add %1 >/dev/null 2>&1 || : \
256ebe
%{nil}
256ebe
%global systemd_preun() /sbin/chkconfig --del %1 >/dev/null 2>&1 || : \
256ebe
%{nil}
256ebe
%global systemd_postun_with_restart() /sbin/service %1 condrestart >/dev/null 2>&1 || : \
256ebe
%{nil}
256ebe
%global service_start()   /sbin/service %1 start >/dev/null 2>&1 || : \
256ebe
%{nil}
256ebe
%global service_stop()    /sbin/service %1 stop >/dev/null 2>&1 || : \
256ebe
%{nil}
256ebe
%global service_install() install -D -p -m 0755 %1.init %{buildroot}%2 \
256ebe
%{nil}
256ebe
# can't seem to make a generic macro that works
256ebe
%global glusterd_svcfile   %{_sysconfdir}/init.d/glusterd
256ebe
%global glusterfsd_svcfile %{_sysconfdir}/init.d/glusterfsd
256ebe
%global glustereventsd_svcfile %{_sysconfdir}/init.d/glustereventsd
256ebe
%endif
256ebe
256ebe
%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}}
256ebe
256ebe
# We do not want to generate useless provides and requires for xlator
256ebe
# .so files to be set for glusterfs packages.
256ebe
# Filter all generated:
256ebe
#
256ebe
# TODO: RHEL5 does not have a convenient solution
256ebe
%if ( 0%{?rhel} == 6 )
256ebe
# filter_setup exists in RHEL6 only
256ebe
%filter_provides_in %{_libdir}/glusterfs/%{version}/
256ebe
%global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$'
256ebe
%filter_setup
256ebe
%else
256ebe
# modern rpm and current Fedora do not generate requires when the
256ebe
# provides are filtered
256ebe
%global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$
256ebe
%endif
256ebe
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All package definitions should be placed here in alphabetical order
256ebe
##
256ebe
Summary:          Distributed File System
256ebe
%if ( 0%{_for_fedora_koji_builds} )
256ebe
Name:             glusterfs
256ebe
Version:          3.8.0
256ebe
Release:          0.1%{?prereltag:.%{prereltag}}%{?dist}
256ebe
%else
256ebe
Name:             glusterfs
256ebe
Version:          6.0
74096c
Release:          56.4%{?dist}
256ebe
ExcludeArch:      i686
256ebe
%endif
256ebe
License:          GPLv2 or LGPLv3+
256ebe
URL:              http://docs.gluster.org/
256ebe
%if ( 0%{_for_fedora_koji_builds} )
256ebe
Source0:          http://bits.gluster.org/pub/gluster/glusterfs/src/glusterfs-%{version}%{?prereltag}.tar.gz
256ebe
Source1:          glusterd.sysconfig
256ebe
Source2:          glusterfsd.sysconfig
256ebe
Source7:          glusterfsd.service
256ebe
Source8:          glusterfsd.init
256ebe
%else
256ebe
Source0:          glusterfs-6.0.tar.gz
256ebe
%endif
256ebe
256ebe
Requires(pre):    shadow-utils
256ebe
%if ( 0%{?_with_systemd:1} )
256ebe
BuildRequires:    systemd
256ebe
%endif
256ebe
256ebe
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
256ebe
%if ( 0%{?_with_systemd:1} )
256ebe
%{?systemd_requires}
256ebe
%endif
256ebe
%if 0%{?_with_asan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
256ebe
BuildRequires:    libasan
256ebe
%endif
256ebe
%if 0%{?_with_tsan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
256ebe
BuildRequires:    libtsan
256ebe
%endif
256ebe
BuildRequires:    git
256ebe
BuildRequires:    bison flex
256ebe
BuildRequires:    gcc make libtool
256ebe
BuildRequires:    ncurses-devel readline-devel
256ebe
BuildRequires:    libxml2-devel openssl-devel
256ebe
BuildRequires:    libaio-devel libacl-devel
256ebe
BuildRequires:    python%{_pythonver}-devel
256ebe
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
256ebe
BuildRequires:    python-ctypes
256ebe
%endif
256ebe
%if ( 0%{?_with_ipv6default:1} ) || ( 0%{!?_without_libtirpc:1} ) || ( 0%{?rhel} && ( 0%{?rhel} >= 8 ) )
256ebe
BuildRequires:    libtirpc-devel
256ebe
%endif
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
256ebe
BuildRequires:    rpcgen
256ebe
%endif
256ebe
BuildRequires:    userspace-rcu-devel >= 0.7
256ebe
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
256ebe
BuildRequires:    automake
256ebe
%endif
256ebe
BuildRequires:    libuuid-devel
256ebe
%if ( 0%{?_with_cmocka:1} )
256ebe
BuildRequires:    libcmocka-devel >= 1.0.1
256ebe
%endif
256ebe
%if ( 0%{!?_without_tiering:1} )
256ebe
BuildRequires:    sqlite-devel
256ebe
%endif
256ebe
%if ( 0%{!?_without_georeplication:1} )
256ebe
BuildRequires:    libattr-devel
256ebe
%endif
256ebe
256ebe
%if (0%{?_with_firewalld:1})
256ebe
BuildRequires:    firewalld
256ebe
%endif
256ebe
256ebe
Obsoletes:        hekafs
256ebe
Obsoletes:        %{name}-common < %{version}-%{release}
256ebe
Obsoletes:        %{name}-core < %{version}-%{release}
256ebe
Obsoletes:        %{name}-ufo
256ebe
%if ( 0%{!?_with_gnfs:1} )
256ebe
Obsoletes:        %{name}-gnfs
256ebe
%endif
f338ef
%if ( 0%{?rhel} < 7 )
f338ef
Obsoletes:        %{name}-ganesha
f338ef
%endif
256ebe
Provides:         %{name}-common = %{version}-%{release}
256ebe
Provides:         %{name}-core = %{version}-%{release}
256ebe
256ebe
# Patch0001: 0001-Update-rfc.sh-to-rhgs-3.5.0.patch
256ebe
Patch0002: 0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch
256ebe
Patch0003: 0003-rpc-set-bind-insecure-to-off-by-default.patch
256ebe
Patch0004: 0004-glusterd-spec-fixing-autogen-issue.patch
256ebe
Patch0005: 0005-libglusterfs-glusterd-Fix-compilation-errors.patch
256ebe
Patch0006: 0006-build-remove-ghost-directory-entries.patch
256ebe
Patch0007: 0007-build-add-RHGS-specific-changes.patch
256ebe
Patch0008: 0008-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch
256ebe
Patch0009: 0009-build-introduce-security-hardening-flags-in-gluster.patch
256ebe
Patch0010: 0010-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch
256ebe
Patch0011: 0011-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch
256ebe
Patch0012: 0012-build-add-pretrans-check.patch
256ebe
Patch0013: 0013-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch
256ebe
Patch0014: 0014-build-spec-file-conflict-resolution.patch
256ebe
Patch0015: 0015-build-randomize-temp-file-names-in-pretrans-scriptle.patch
256ebe
Patch0016: 0016-glusterd-parallel-readdir-Change-the-op-version-of-p.patch
256ebe
Patch0017: 0017-glusterd-Revert-op-version-for-cluster.max-brick-per.patch
256ebe
Patch0018: 0018-cli-Add-message-for-user-before-modifying-brick-mult.patch
256ebe
Patch0019: 0019-build-launch-glusterd-upgrade-after-all-new-bits-are.patch
256ebe
Patch0020: 0020-spec-unpackaged-files-found-for-RHEL-7-client-build.patch
256ebe
Patch0021: 0021-cli-glusterfsd-remove-copyright-information.patch
256ebe
Patch0022: 0022-cli-Remove-upstream-doc-reference.patch
256ebe
Patch0023: 0023-hooks-remove-selinux-hooks.patch
256ebe
Patch0024: 0024-glusterd-Make-localtime-logging-option-invisible-in-.patch
256ebe
Patch0025: 0025-build-make-RHGS-version-available-for-server.patch
256ebe
Patch0026: 0026-glusterd-Introduce-daemon-log-level-cluster-wide-opt.patch
256ebe
Patch0027: 0027-glusterd-change-op-version-of-fips-mode-rchecksum.patch
256ebe
Patch0028: 0028-glusterd-Reset-op-version-for-features.shard-deletio.patch
256ebe
Patch0029: 0029-glusterd-Reset-op-version-for-features.shard-lru-lim.patch
256ebe
Patch0030: 0030-selinux-glusterd-add-features.selinux-to-glusterd-vo.patch
256ebe
Patch0031: 0031-glusterd-turn-off-selinux-feature-in-downstream.patch
256ebe
Patch0032: 0032-glusterd-update-gd-op-version-to-3_7_0.patch
256ebe
Patch0033: 0033-build-add-missing-explicit-package-dependencies.patch
256ebe
Patch0034: 0034-glusterd-introduce-a-new-op-version-for-rhgs-3.4.3.patch
256ebe
Patch0035: 0035-glusterd-tag-rebalance-mgmt_v3-command-to-op-version.patch
256ebe
Patch0036: 0036-build-add-conditional-dependency-on-server-for-devel.patch
256ebe
Patch0037: 0037-cli-change-the-warning-message.patch
256ebe
Patch0038: 0038-spec-avoid-creation-of-temp-file-in-lua-script.patch
256ebe
Patch0039: 0039-cli-fix-query-to-user-during-brick-mux-selection.patch
256ebe
Patch0040: 0040-build-Remove-unsupported-test-cases-failing-consiste.patch
256ebe
Patch0041: 0041-tests-geo-rep-Build-failed-in-Jenkins-for-test-bug-1.patch
256ebe
Patch0042: 0042-spec-client-server-Builds-are-failing-on-rhel-6.patch
256ebe
Patch0043: 0043-inode-don-t-dump-the-whole-table-to-CLI.patch
256ebe
Patch0044: 0044-cluster-ec-Don-t-enqueue-an-entry-if-it-is-already-h.patch
256ebe
Patch0045: 0045-glusterd-fix-txn-id-mem-leak.patch
256ebe
Patch0046: 0046-protocol-client-Do-not-fallback-to-anon-fd-if-fd-is-.patch
256ebe
Patch0047: 0047-client-rpc-Fix-the-payload-being-sent-on-the-wire.patch
256ebe
Patch0048: 0048-gfapi-Unblock-epoll-thread-for-upcall-processing.patch
256ebe
Patch0049: 0049-transport-socket-log-shutdown-msg-occasionally.patch
256ebe
Patch0050: 0050-geo-rep-Fix-syncing-multiple-rename-of-symlink.patch
256ebe
Patch0051: 0051-spec-update-rpm-install-condition.patch
256ebe
Patch0052: 0052-geo-rep-IPv6-support.patch
256ebe
Patch0053: 0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch
256ebe
Patch0054: 0054-Revert-glusterd-storhaug-remove-ganesha.patch
256ebe
Patch0055: 0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch
256ebe
Patch0056: 0056-common-ha-fixes-for-Debian-based-systems.patch
256ebe
Patch0057: 0057-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch
256ebe
Patch0058: 0058-glusterd-ganesha-During-volume-delete-remove-the-gan.patch
256ebe
Patch0059: 0059-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch
256ebe
Patch0060: 0060-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch
256ebe
Patch0061: 0061-ganesha-allow-refresh-config-and-volume-export-unexp.patch
256ebe
Patch0062: 0062-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch
256ebe
Patch0063: 0063-glusterd-ganesha-update-cache-invalidation-properly-.patch
256ebe
Patch0064: 0064-glusterd-ganesha-return-proper-value-in-pre_setup.patch
256ebe
Patch0065: 0065-ganesha-scripts-remove-dependency-over-export-config.patch
256ebe
Patch0066: 0066-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch
256ebe
Patch0067: 0067-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch
256ebe
Patch0068: 0068-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch
256ebe
Patch0069: 0069-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
256ebe
Patch0070: 0070-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch
256ebe
Patch0071: 0071-common-ha-enable-and-disable-selinux-gluster_use_exe.patch
256ebe
Patch0072: 0072-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch
256ebe
Patch0073: 0073-build-remove-ganesha-dependency-on-selinux-policy.patch
256ebe
Patch0074: 0074-common-ha-enable-pacemaker-at-end-of-setup.patch
256ebe
Patch0075: 0075-common-ha-Fix-an-incorrect-syntax-during-setup.patch
256ebe
Patch0076: 0076-glusterd-ganesha-change-voltype-for-ganesha.enable-i.patch
256ebe
Patch0077: 0077-glusterd-ganesha-create-remove-export-file-only-from.patch
256ebe
Patch0078: 0078-common-ha-scripts-pass-the-list-of-servers-properly-.patch
256ebe
Patch0079: 0079-common-ha-All-statd-related-files-need-to-be-owned-b.patch
256ebe
Patch0080: 0080-glusterd-ganesha-Skip-non-ganesha-nodes-properly-for.patch
256ebe
Patch0081: 0081-ganesha-ha-ensure-pacemaker-is-enabled-after-setup.patch
256ebe
Patch0082: 0082-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch
256ebe
Patch0083: 0083-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
256ebe
Patch0084: 0084-glusterd-Fix-duplicate-client_op_version-in-info-fil.patch
256ebe
Patch0085: 0085-Revert-all-remove-code-which-is-not-being-considered.patch
256ebe
Patch0086: 0086-Revert-tiering-remove-the-translator-from-build-and-.patch
256ebe
Patch0087: 0087-ganesha-fixing-minor-issues-after-the-backport-from-.patch
256ebe
Patch0088: 0088-tier-fix-failures-noticed-during-tier-start-and-tier.patch
256ebe
Patch0089: 0089-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch
256ebe
Patch0090: 0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch
256ebe
Patch0091: 0091-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch
256ebe
Patch0092: 0092-logging-Fix-GF_LOG_OCCASSIONALLY-API.patch
256ebe
Patch0093: 0093-glusterd-Change-op-version-of-cache-invalidation-in-.patch
256ebe
Patch0094: 0094-glusterd-load-ctime-in-the-client-graph-only-if-it-s.patch
256ebe
Patch0095: 0095-cluster-afr-Remove-local-from-owners_list-on-failure.patch
256ebe
Patch0096: 0096-core-Brick-is-not-able-to-detach-successfully-in-bri.patch
256ebe
Patch0097: 0097-glusterd-tier-while-doing-an-attach-tier-the-self-he.patch
256ebe
Patch0098: 0098-mgmt-shd-Implement-multiplexing-in-self-heal-daemon.patch
256ebe
Patch0099: 0099-client-fini-return-fini-after-rpc-cleanup.patch
256ebe
Patch0100: 0100-clnt-rpc-ref-leak-during-disconnect.patch
256ebe
Patch0101: 0101-shd-mux-Fix-coverity-issues-introduced-by-shd-mux-pa.patch
256ebe
Patch0102: 0102-rpc-transport-Missing-a-ref-on-dict-while-creating-t.patch
256ebe
Patch0103: 0103-dht-NULL-check-before-setting-error-flag.patch
256ebe
Patch0104: 0104-afr-shd-Cleanup-self-heal-daemon-resources-during-af.patch
256ebe
Patch0105: 0105-core-Log-level-changes-do-not-effect-on-running-clie.patch
256ebe
Patch0106: 0106-libgfchangelog-use-find_library-to-locate-shared-lib.patch
256ebe
Patch0107: 0107-gfapi-add-function-to-set-client-pid.patch
256ebe
Patch0108: 0108-afr-add-client-pid-to-all-gf_event-calls.patch
256ebe
Patch0109: 0109-glusterd-Optimize-glusterd-handshaking-code-path.patch
256ebe
Patch0110: 0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch
256ebe
Patch0111: 0111-glusterd-fix-loading-ctime-in-client-graph-logic.patch
256ebe
Patch0112: 0112-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
256ebe
Patch0113: 0113-spec-Glusterd-did-not-start-by-default-after-node-re.patch
256ebe
Patch0114: 0114-core-fix-hang-issue-in-__gf_free.patch
256ebe
Patch0115: 0115-core-only-log-seek-errors-if-SEEK_HOLE-SEEK_DATA-is-.patch
256ebe
Patch0116: 0116-cluster-ec-fix-fd-reopen.patch
256ebe
Patch0117: 0117-spec-Remove-thin-arbiter-package.patch
256ebe
Patch0118: 0118-tests-mark-thin-arbiter-test-ta.t-as-bad.patch
256ebe
Patch0119: 0119-glusterd-provide-a-way-to-detach-failed-node.patch
256ebe
Patch0120: 0120-glusterd-shd-Keep-a-ref-on-volinfo-until-attach-rpc-.patch
256ebe
Patch0121: 0121-spec-glusterfs-devel-for-client-build-should-not-dep.patch
256ebe
Patch0122: 0122-posix-ctime-Fix-stat-time-attributes-inconsistency-d.patch
256ebe
Patch0123: 0123-ctime-Fix-log-repeated-logging-during-open.patch
256ebe
Patch0124: 0124-spec-remove-duplicate-references-to-files.patch
256ebe
Patch0125: 0125-glusterd-define-dumpops-in-the-xlator_api-of-gluster.patch
256ebe
Patch0126: 0126-cluster-dht-refactor-dht-lookup-functions.patch
256ebe
Patch0127: 0127-cluster-dht-Refactor-dht-lookup-functions.patch
256ebe
Patch0128: 0128-glusterd-Fix-bulkvoldict-thread-logic-in-brick-multi.patch
256ebe
Patch0129: 0129-core-handle-memory-accounting-correctly.patch
256ebe
Patch0130: 0130-tier-test-new-tier-cmds.t-fails-after-a-glusterd-res.patch
256ebe
Patch0131: 0131-tests-dht-Test-that-lookups-are-sent-post-brick-up.patch
256ebe
Patch0132: 0132-glusterd-remove-duplicate-occurrence-of-features.sel.patch
256ebe
Patch0133: 0133-glusterd-enable-fips-mode-rchecksum-for-new-volumes.patch
256ebe
Patch0134: 0134-performance-write-behind-remove-request-from-wip-lis.patch
256ebe
Patch0135: 0135-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
256ebe
Patch0136: 0136-glusterd-fix-inconsistent-global-option-output-in-vo.patch
256ebe
Patch0137: 0137-shd-glusterd-Serialize-shd-manager-to-prevent-race-c.patch
256ebe
Patch0138: 0138-glusterd-Add-gluster-volume-stop-operation-to-gluste.patch
256ebe
Patch0139: 0139-ec-shd-Cleanup-self-heal-daemon-resources-during-ec-.patch
256ebe
Patch0140: 0140-cluster-ec-Reopen-shouldn-t-happen-with-O_TRUNC.patch
256ebe
Patch0141: 0141-socket-ssl-fix-crl-handling.patch
256ebe
Patch0142: 0142-lock-check-null-value-of-dict-to-avoid-log-flooding.patch
256ebe
Patch0143: 0143-packaging-Change-the-dependency-on-nfs-ganesha-to-2..patch
256ebe
Patch0144: 0144-cluster-ec-honor-contention-notifications-for-partia.patch
256ebe
Patch0145: 0145-core-Capture-process-memory-usage-at-the-time-of-cal.patch
256ebe
Patch0146: 0146-dht-Custom-xattrs-are-not-healed-in-case-of-add-bric.patch
256ebe
Patch0147: 0147-glusterd-bulkvoldict-thread-is-not-handling-all-volu.patch
256ebe
Patch0148: 0148-cluster-dht-Lookup-all-files-when-processing-directo.patch
256ebe
Patch0149: 0149-glusterd-Optimize-code-to-copy-dictionary-in-handsha.patch
256ebe
Patch0150: 0150-libglusterfs-define-macros-needed-for-cloudsync.patch
256ebe
Patch0151: 0151-mgmt-glusterd-Make-changes-related-to-cloudsync-xlat.patch
256ebe
Patch0152: 0152-storage-posix-changes-with-respect-to-cloudsync.patch
256ebe
Patch0153: 0153-features-cloudsync-Added-some-new-functions.patch
256ebe
Patch0154: 0154-cloudsync-cvlt-Cloudsync-plugin-for-commvault-store.patch
256ebe
Patch0155: 0155-cloudsync-Make-readdirp-return-stat-info-of-all-the-.patch
256ebe
Patch0156: 0156-cloudsync-Fix-bug-in-cloudsync-fops-c.py.patch
256ebe
Patch0157: 0157-afr-frame-Destroy-frame-after-afr_selfheal_entry_gra.patch
256ebe
Patch0158: 0158-glusterfsd-cleanup-Protect-graph-object-under-a-lock.patch
256ebe
Patch0159: 0159-glusterd-add-an-op-version-check.patch
256ebe
Patch0160: 0160-geo-rep-Geo-rep-help-text-issue.patch
256ebe
Patch0161: 0161-geo-rep-Fix-rename-with-existing-destination-with-sa.patch
256ebe
Patch0162: 0162-geo-rep-Fix-sync-method-config.patch
256ebe
Patch0163: 0163-geo-rep-Fix-sync-hang-with-tarssh.patch
256ebe
Patch0164: 0164-cluster-ec-Fix-handling-of-heal-info-cases-without-l.patch
256ebe
Patch0165: 0165-tests-shd-Add-test-coverage-for-shd-mux.patch
256ebe
Patch0166: 0166-glusterd-svc-glusterd_svcs_stop-should-call-individu.patch
256ebe
Patch0167: 0167-glusterd-shd-Optimize-the-glustershd-manager-to-send.patch
256ebe
Patch0168: 0168-cluster-dht-Fix-directory-perms-during-selfheal.patch
256ebe
Patch0169: 0169-Build-Fix-spec-to-enable-rhel8-client-build.patch
256ebe
Patch0170: 0170-geo-rep-Convert-gfid-conflict-resolutiong-logs-into-.patch
256ebe
Patch0171: 0171-posix-add-storage.reserve-size-option.patch
256ebe
Patch0172: 0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
256ebe
Patch0173: 0173-glusterd-store-fips-mode-rchecksum-option-in-the-inf.patch
256ebe
Patch0174: 0174-xlator-log-Add-more-logging-in-xlator_is_cleanup_sta.patch
256ebe
Patch0175: 0175-ec-fini-Fix-race-between-xlator-cleanup-and-on-going.patch
256ebe
Patch0176: 0176-features-shard-Fix-crash-during-background-shard-del.patch
256ebe
Patch0177: 0177-features-shard-Fix-extra-unref-when-inode-object-is-.patch
256ebe
Patch0178: 0178-Cluster-afr-Don-t-treat-all-bricks-having-metadata-p.patch
256ebe
Patch0179: 0179-tests-Fix-split-brain-favorite-child-policy.t-failur.patch
256ebe
Patch0180: 0180-ganesha-scripts-Make-generate-epoch.py-python3-compa.patch
256ebe
Patch0181: 0181-afr-log-before-attempting-data-self-heal.patch
256ebe
Patch0182: 0182-geo-rep-fix-mountbroker-setup.patch
256ebe
Patch0183: 0183-glusterd-svc-Stop-stale-process-using-the-glusterd_p.patch
256ebe
Patch0184: 0184-tests-Add-gating-configuration-file-for-rhel8.patch
256ebe
Patch0185: 0185-gfapi-provide-an-api-for-setting-statedump-path.patch
256ebe
Patch0186: 0186-cli-Remove-brick-warning-seems-unnecessary.patch
256ebe
Patch0187: 0187-gfapi-statedump_path-add-proper-version-number.patch
256ebe
Patch0188: 0188-features-shard-Fix-integer-overflow-in-block-count-a.patch
256ebe
Patch0189: 0189-features-shard-Fix-block-count-accounting-upon-trunc.patch
256ebe
Patch0190: 0190-Build-removing-the-hardcoded-usage-of-python3.patch
256ebe
Patch0191: 0191-Build-Update-python-shebangs-based-on-version.patch
256ebe
Patch0192: 0192-build-Ensure-gluster-cli-package-is-built-as-part-of.patch
f338ef
Patch0193: 0193-spec-fixed-python-dependency-for-rhel6.patch
f338ef
Patch0194: 0194-stack-Make-sure-to-have-unique-call-stacks-in-all-ca.patch
f338ef
Patch0195: 0195-build-package-glusterfs-ganesha-for-rhel7-and-above.patch
f338ef
Patch0196: 0196-posix-ctime-Fix-ctime-upgrade-issue.patch
f338ef
Patch0197: 0197-posix-fix-crash-in-posix_cs_set_state.patch
f338ef
Patch0198: 0198-cluster-ec-Prevent-double-pre-op-xattrops.patch
f338ef
Patch0199: 0199-upcall-Avoid-sending-notifications-for-invalid-inode.patch
f338ef
Patch0200: 0200-gfapi-fix-incorrect-initialization-of-upcall-syncop-.patch
f338ef
Patch0201: 0201-geo-rep-Fix-permissions-for-GEOREP_DIR-in-non-root-s.patch
f338ef
Patch0202: 0202-shd-mux-Fix-race-between-mux_proc-unlink-and-stop.patch
f338ef
Patch0203: 0203-glusterd-shd-Change-shd-logfile-to-a-unique-name.patch
f338ef
Patch0204: 0204-glusterd-conditionally-clear-txn_opinfo-in-stage-op.patch
f338ef
Patch0205: 0205-glusterd-Can-t-run-rebalance-due-to-long-unix-socket.patch
f338ef
Patch0206: 0206-glusterd-ignore-user.-options-from-compatibility-che.patch
f338ef
Patch0207: 0207-glusterd-fix-use-after-free-of-a-dict_t.patch
f338ef
Patch0208: 0208-mem-pool-remove-dead-code.patch
f338ef
Patch0209: 0209-core-avoid-dynamic-TLS-allocation-when-possible.patch
f338ef
Patch0210: 0210-mem-pool.-c-h-minor-changes.patch
f338ef
Patch0211: 0211-libglusterfs-Fix-compilation-when-disable-mempool-is.patch
f338ef
Patch0212: 0212-core-fix-memory-allocation-issues.patch
f338ef
Patch0213: 0213-cluster-dht-Strip-out-dht-xattrs.patch
f338ef
Patch0214: 0214-geo-rep-Upgrading-config-file-to-new-version.patch
f338ef
Patch0215: 0215-posix-modify-storage.reserve-option-to-take-size-and.patch
f338ef
Patch0216: 0216-Test-case-fixe-for-downstream-3.5.0.patch
f338ef
Patch0217: 0217-uss-Fix-tar-issue-with-ctime-and-uss-enabled.patch
f338ef
Patch0218: 0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch
f338ef
Patch0219: 0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch
f338ef
Patch0220: 0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch
f338ef
Patch0221: 0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch
f338ef
Patch0222: 0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch
f338ef
Patch0223: 0223-change-get_real_filename-implementation-to-use-ENOAT.patch
f338ef
Patch0224: 0224-core-replace-inet_addr-with-inet_pton.patch
f338ef
Patch0225: 0225-tests-utils-Fix-py2-py3-util-python-scripts.patch
f338ef
Patch0226: 0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch
f338ef
Patch0227: 0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch
f338ef
Patch0228: 0228-locks-enable-notify-contention-by-default.patch
f338ef
Patch0229: 0229-glusterd-Show-the-correct-brick-status-in-get-state.patch
f338ef
Patch0230: 0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch
f338ef
Patch0231: 0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch
f338ef
Patch0232: 0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch
f338ef
Patch0233: 0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch
f338ef
Patch0234: 0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch
f338ef
Patch0235: 0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch
f338ef
Patch0236: 0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch
f338ef
Patch0237: 0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch
f338ef
Patch0238: 0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch
f338ef
Patch0239: 0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
f338ef
Patch0240: 0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch
f338ef
Patch0241: 0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch
f338ef
Patch0242: 0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch
f338ef
Patch0243: 0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch
f338ef
Patch0244: 0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch
f338ef
Patch0245: 0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch
f338ef
Patch0246: 0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch
f338ef
Patch0247: 0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch
f338ef
Patch0248: 0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch
f338ef
Patch0249: 0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch
f338ef
Patch0250: 0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch
f338ef
Patch0251: 0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch
f338ef
Patch0252: 0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch
f338ef
Patch0253: 0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch
f338ef
Patch0254: 0254-Detach-iot_worker-to-release-its-resources.patch
f338ef
Patch0255: 0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch
f338ef
Patch0256: 0256-features-snapview-server-use-the-same-volfile-server.patch
f338ef
Patch0257: 0257-geo-rep-Test-case-for-upgrading-config-file.patch
f338ef
Patch0258: 0258-geo-rep-Fix-mount-broker-setup-issue.patch
f338ef
Patch0259: 0259-gluster-block-tuning-perf-options.patch
f338ef
Patch0260: 0260-ctime-Set-mdata-xattr-on-legacy-files.patch
f338ef
Patch0261: 0261-features-utime-Fix-mem_put-crash.patch
f338ef
Patch0262: 0262-glusterd-ctime-Disable-ctime-by-default.patch
f338ef
Patch0263: 0263-tests-fix-ctime-related-tests.patch
f338ef
Patch0264: 0264-gfapi-Fix-deadlock-while-processing-upcall.patch
f338ef
Patch0265: 0265-fuse-add-missing-GF_FREE-to-fuse_interrupt.patch
f338ef
Patch0266: 0266-geo-rep-Fix-mount-broker-setup-issue.patch
f338ef
Patch0267: 0267-posix-ctime-Fix-race-during-lookup-ctime-xattr-heal.patch
f338ef
Patch0268: 0268-rpc-transport-have-default-listen-port.patch
f338ef
Patch0269: 0269-ec-fix-truncate-lock-to-cover-the-write-in-tuncate-c.patch
f338ef
Patch0270: 0270-cluster-ec-inherit-healing-from-lock-when-it-has-inf.patch
f338ef
Patch0271: 0271-cluster-ec-fix-EIO-error-for-concurrent-writes-on-sp.patch
f338ef
Patch0272: 0272-cluster-ec-Always-read-from-good-mask.patch
f338ef
Patch0273: 0273-cluster-ec-Fix-reopen-flags-to-avoid-misbehavior.patch
f338ef
Patch0274: 0274-cluster-ec-Update-lock-good_mask-on-parent-fop-failu.patch
f338ef
Patch0275: 0275-cluster-ec-Create-heal-task-with-heal-process-id.patch
f338ef
Patch0276: 0276-features-utime-always-update-ctime-at-setattr.patch
f338ef
Patch0277: 0277-geo-rep-Fix-Config-Get-Race.patch
f338ef
Patch0278: 0278-geo-rep-Fix-worker-connection-issue.patch
f338ef
Patch0279: 0279-posix-In-brick_mux-brick-is-crashed-while-start-stop.patch
f338ef
Patch0280: 0280-performance-md-cache-Do-not-skip-caching-of-null-cha.patch
f338ef
Patch0281: 0281-ctime-Fix-incorrect-realtime-passed-to-frame-root-ct.patch
f338ef
Patch0282: 0282-geo-rep-Fix-the-name-of-changelog-archive-file.patch
f338ef
Patch0283: 0283-ctime-Fix-ctime-issue-with-utime-family-of-syscalls.patch
f338ef
Patch0284: 0284-posix-log-aio_error-return-codes-in-posix_fs_health_.patch
f338ef
Patch0285: 0285-glusterd-glusterd-service-is-getting-timed-out-on-sc.patch
f338ef
Patch0286: 0286-glusterfs.spec.in-added-script-files-for-machine-com.patch
f338ef
Patch0287: 0287-cluster-ec-Fail-fsync-flush-for-files-on-update-size.patch
f338ef
Patch0288: 0288-cluster-ec-Fix-coverity-issues.patch
f338ef
Patch0289: 0289-cluster-ec-quorum-count-implementation.patch
f338ef
Patch0290: 0290-glusterd-tag-disperse.quorum-count-for-31306.patch
f338ef
Patch0291: 0291-cluster-ec-Mark-release-only-when-it-is-acquired.patch
f338ef
Patch0292: 0292-rpc-Update-address-family-if-it-is-not-provide-in-cm.patch
f338ef
Patch0293: 0293-glusterd-IPV6-hostname-address-is-not-parsed-correct.patch
f338ef
Patch0294: 0294-eventsapi-Set-IPv4-IPv6-family-based-on-input-IP.patch
f338ef
Patch0295: 0295-ctime-rebalance-Heal-ctime-xattr-on-directory-during.patch
f338ef
Patch0296: 0296-glusterfind-pre-command-failure-on-a-modify.patch
f338ef
Patch0297: 0297-rpmbuild-fixing-the-build-errors-with-2a905a8ae.patch
f338ef
Patch0298: 0298-geo-rep-fix-sub-command-during-worker-connection.patch
f338ef
Patch0299: 0299-geo-rep-performance-improvement-while-syncing-rename.patch
f338ef
Patch0300: 0300-cli-remove-the-warning-displayed-when-remove-brick-s.patch
f338ef
Patch0301: 0301-posix-Brick-is-going-down-unexpectedly.patch
f338ef
Patch0302: 0302-cluster-ec-prevent-filling-shd-log-with-table-not-fo.patch
f338ef
Patch0303: 0303-posix-heketidbstorage-bricks-go-down-during-PVC-crea.patch
f338ef
Patch0304: 0304-cluster-dht-Correct-fd-processing-loop.patch
f338ef
Patch0305: 0305-glusterd-rebalance-start-should-fail-when-quorum-is-.patch
f338ef
Patch0306: 0306-cli-fix-distCount-value.patch
f338ef
Patch0307: 0307-ssl-fix-RHEL8-regression-failure.patch
f338ef
Patch0308: 0308-dht-Rebalance-causing-IO-Error-File-descriptor-in-ba.patch
f338ef
Patch0309: 0309-geo-rep-Fix-config-upgrade-on-non-participating-node.patch
f338ef
Patch0310: 0310-tests-test-case-for-non-root-geo-rep-setup.patch
f338ef
Patch0311: 0311-geo-rep-Fix-Permission-denied-traceback-on-non-root-.patch
f338ef
Patch0312: 0312-Scripts-quota_fsck-script-KeyError-contri_size.patch
f338ef
Patch0313: 0313-extras-Cgroup-CPU-Mem-restriction-are-not-working-on.patch
1df6c8
Patch0314: 0314-glusterd-tier-is_tier_enabled-inserted-causing-check.patch
1df6c8
Patch0315: 0315-geo-rep-Fix-py2-py3-compatibility-in-repce.patch
1df6c8
Patch0316: 0316-spec-fixed-python-prettytable-dependency-for-rhel6.patch
1df6c8
Patch0317: 0317-Update-rfc.sh-to-rhgs-3.5.1.patch
1df6c8
Patch0318: 0318-Update-rfc.sh-to-rhgs-3.5.1.patch
1df6c8
Patch0319: 0319-features-snapview-server-obtain-the-list-of-snapshot.patch
1df6c8
Patch0320: 0320-gf-event-Handle-unix-volfile-servers.patch
1df6c8
Patch0321: 0321-Adding-white-spaces-to-description-of-set-group.patch
1df6c8
Patch0322: 0322-glusterd-display-correct-rebalance-data-size-after-g.patch
1df6c8
Patch0323: 0323-cli-display-detailed-rebalance-info.patch
1df6c8
Patch0324: 0324-extras-hooks-Add-SELinux-label-on-new-bricks-during-.patch
1df6c8
Patch0325: 0325-extras-hooks-Install-and-package-newly-added-post-ad.patch
1df6c8
Patch0326: 0326-tests-subdir-mount.t-is-failing-for-brick_mux-regrss.patch
1df6c8
Patch0327: 0327-glusterfind-integrate-with-gfid2path.patch
1df6c8
Patch0328: 0328-glusterd-Add-warning-and-abort-in-case-of-failures-i.patch
1df6c8
Patch0329: 0329-cluster-afr-Heal-entries-when-there-is-a-source-no-h.patch
1df6c8
Patch0330: 0330-mount.glusterfs-change-the-error-message.patch
1df6c8
Patch0331: 0331-features-locks-Do-special-handling-for-op-version-3..patch
1df6c8
Patch0332: 0332-Removing-one-top-command-from-gluster-v-help.patch
1df6c8
Patch0333: 0333-rpc-Synchronize-slot-allocation-code.patch
1df6c8
Patch0334: 0334-dht-log-getxattr-failure-for-node-uuid-at-DEBUG.patch
1df6c8
Patch0335: 0335-tests-RHEL8-test-failure-fixes-for-RHGS.patch
1df6c8
Patch0336: 0336-spec-check-and-return-exit-code-in-rpm-scripts.patch
1df6c8
Patch0337: 0337-fuse-Set-limit-on-invalidate-queue-size.patch
1df6c8
Patch0338: 0338-glusterfs-fuse-Reduce-the-default-lru-limit-value.patch
1df6c8
Patch0339: 0339-geo-rep-fix-integer-config-validation.patch
1df6c8
Patch0340: 0340-rpc-event_slot_alloc-converted-infinite-loop-after-r.patch
1df6c8
Patch0341: 0341-socket-fix-error-handling.patch
1df6c8
Patch0342: 0342-Revert-hooks-remove-selinux-hooks.patch
1df6c8
Patch0343: 0343-extras-hooks-syntactical-errors-in-SELinux-hooks-sci.patch
1df6c8
Patch0344: 0344-Revert-all-fixes-to-include-SELinux-hook-scripts.patch
1df6c8
Patch0345: 0345-read-ahead-io-cache-turn-off-by-default.patch
1df6c8
Patch0346: 0346-fuse-degrade-logging-of-write-failure-to-fuse-device.patch
1df6c8
Patch0347: 0347-tools-glusterfind-handle-offline-bricks.patch
1df6c8
Patch0348: 0348-glusterfind-Fix-py2-py3-issues.patch
1df6c8
Patch0349: 0349-glusterfind-python3-compatibility.patch
1df6c8
Patch0350: 0350-tools-glusterfind-Remove-an-extra-argument.patch
1df6c8
Patch0351: 0351-server-Mount-fails-after-reboot-1-3-gluster-nodes.patch
1df6c8
Patch0352: 0352-spec-fixed-missing-dependencies-for-glusterfs-clouds.patch
1df6c8
Patch0353: 0353-build-glusterfs-ganesha-pkg-requires-python3-policyc.patch
1df6c8
Patch0354: 0354-core-fix-memory-pool-management-races.patch
1df6c8
Patch0355: 0355-core-Prevent-crash-on-process-termination.patch
1df6c8
Patch0356: 0356-Update-rfc.sh-to-rhgs-3.5.1-rhel-8.patch
1df6c8
Patch0357: 0357-ganesha-ha-updates-for-pcs-0.10.x-i.e.-in-Fedora-29-.patch
1df6c8
Patch0358: 0358-inode-fix-wrong-loop-count-in-__inode_ctx_free.patch
1df6c8
Patch0359: 0359-dht-gf_defrag_process_dir-is-called-even-if-gf_defra.patch
1df6c8
Patch0360: 0360-rpc-Make-ssl-log-more-useful.patch
1df6c8
Patch0361: 0361-snap_scheduler-python3-compatibility-and-new-test-ca.patch
1df6c8
Patch0362: 0362-write-behind-fix-data-corruption.patch
1df6c8
Patch0363: 0363-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
1df6c8
Patch0364: 0364-dht-fixing-rebalance-failures-for-files-with-holes.patch
1df6c8
Patch0365: 0365-build-geo-rep-requires-relevant-selinux-permission-f.patch
1df6c8
Patch0366: 0366-snapshot-fix-python3-issue-in-gcron.patch
1df6c8
Patch0367: 0367-dht-Handle-setxattr-and-rm-race-for-directory-in-reb.patch
1df6c8
Patch0368: 0368-Update-rfc.sh-to-rhgs-3.5.2.patch
1df6c8
Patch0369: 0369-cluster-ec-Return-correct-error-code-and-log-message.patch
1df6c8
Patch0370: 0370-dht-Do-opendir-selectively-in-gf_defrag_process_dir.patch
1df6c8
Patch0371: 0371-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
1df6c8
Patch0372: 0372-posix-fix-seek-functionality.patch
1df6c8
Patch0373: 0373-build-geo-rep-sub-pkg-requires-policycoreutils-pytho.patch
1df6c8
Patch0374: 0374-open-behind-fix-missing-fd-reference.patch
1df6c8
Patch0375: 0375-features-shard-Send-correct-size-when-reads-are-sent.patch
1df6c8
Patch0376: 0376-features-shard-Fix-crash-during-shards-cleanup-in-er.patch
1df6c8
Patch0377: 0377-syncop-improve-scaling-and-implement-more-tools.patch
1df6c8
Patch0378: 0378-Revert-open-behind-fix-missing-fd-reference.patch
1df6c8
Patch0379: 0379-glusterd-add-missing-synccond_broadcast.patch
1df6c8
Patch0380: 0380-features-shard-Aggregate-size-block-count-in-iatt-be.patch
1df6c8
Patch0381: 0381-dht-add-null-check-in-gf_defrag_free_dir_dfmeta.patch
1df6c8
Patch0382: 0382-features-shard-Aggregate-file-size-block-count-befor.patch
1df6c8
Patch0383: 0383-common-ha-ganesha-ha.sh-bad-test-for-rhel-centos-for.patch
74096c
Patch0384: 0384-Update-rfc.sh-to-rhgs-3.5.3.patch
74096c
Patch0385: 0385-glusterd-start-glusterd-automatically-on-abnormal-sh.patch
74096c
Patch0386: 0386-glusterd-increase-the-StartLimitBurst.patch
74096c
Patch0387: 0387-To-fix-readdir-ahead-memory-leak.patch
1df6c8
Patch0388: 0388-rpc-Cleanup-SSL-specific-data-at-the-time-of-freeing.patch
74096c
Patch0389: 0389-posix-Avoid-diskpace-error-in-case-of-overwriting-th.patch
74096c
Patch0390: 0390-glusterd-deafult-options-after-volume-reset.patch
74096c
Patch0391: 0391-glusterd-unlink-the-file-after-killing-the-process.patch
74096c
Patch0392: 0392-glusterd-Brick-process-fails-to-come-up-with-brickmu.patch
74096c
Patch0393: 0393-afr-restore-timestamp-of-files-during-metadata-heal.patch
74096c
Patch0394: 0394-man-gluster-Add-volume-top-command-to-gluster-man-pa.patch
74096c
Patch0395: 0395-Cli-Removing-old-log-rotate-command.patch
74096c
Patch0396: 0396-Updating-gluster-manual.patch
74096c
Patch0397: 0397-mgmt-brick-mux-Avoid-sending-two-response-when-attac.patch
74096c
Patch0398: 0398-ec-change-error-message-for-heal-commands-for-disper.patch
74096c
Patch0399: 0399-glusterd-coverity-fixes.patch
74096c
Patch0400: 0400-cli-throw-a-warning-if-replica-count-greater-than-3.patch
74096c
Patch0401: 0401-cli-change-the-warning-message.patch
74096c
Patch0402: 0402-afr-wake-up-index-healer-threads.patch
74096c
Patch0403: 0403-Fix-spurious-failure-in-bug-1744548-heal-timeout.t.patch
74096c
Patch0404: 0404-tests-Fix-spurious-failure.patch
74096c
Patch0405: 0405-core-fix-return-of-local-in-__nlc_inode_ctx_get.patch
74096c
Patch0406: 0406-afr-support-split-brain-CLI-for-replica-3.patch
74096c
Patch0407: 0407-geo-rep-Improving-help-message-in-schedule_georep.py.patch
74096c
Patch0408: 0408-geo-rep-Fix-ssh-port-validation.patch
74096c
Patch0409: 0409-system-posix-acl-update-ctx-only-if-iatt-is-non-NULL.patch
74096c
Patch0410: 0410-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
74096c
Patch0411: 0411-tools-glusterfind-validate-session-name.patch
74096c
Patch0412: 0412-gluster-smb-add-smb-parameter-when-access-gluster-by.patch
74096c
Patch0413: 0413-extras-hooks-Remove-smb.conf-parameter-allowing-gues.patch
74096c
Patch0414: 0414-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
74096c
Patch0415: 0415-dht-Fix-stale-layout-and-create-issue.patch
74096c
Patch0416: 0416-tests-fix-spurious-failure-of-bug-1402841.t-mt-dir-s.patch
74096c
Patch0417: 0417-events-fix-IPv6-memory-corruption.patch
74096c
Patch0418: 0418-md-cache-avoid-clearing-cache-when-not-necessary.patch
74096c
Patch0419: 0419-cluster-afr-fix-race-when-bricks-come-up.patch
74096c
Patch0420: 0420-scripts-quota_fsck-script-TypeError-d-format-not-dic.patch
74096c
Patch0421: 0421-Improve-logging-in-EC-client-and-lock-translator.patch
74096c
Patch0422: 0422-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
74096c
Patch0423: 0423-ctime-Fix-ctime-inconsisteny-with-utimensat.patch
74096c
Patch0424: 0424-afr-make-heal-info-lockless.patch
74096c
Patch0425: 0425-tests-Fix-spurious-self-heald.t-failure.patch
74096c
Patch0426: 0426-geo-rep-Fix-for-Transport-End-Point-not-connected-is.patch
74096c
Patch0427: 0427-storage-posix-Fixing-a-coverity-issue.patch
74096c
Patch0428: 0428-glusterd-ganesha-fixing-resource-leak-in-tear_down_c.patch
74096c
Patch0429: 0429-dht-rebalance-fixing-failure-occurace-due-to-rebalan.patch
74096c
Patch0430: 0430-Fix-some-Null-pointer-dereference-coverity-issues.patch
74096c
Patch0431: 0431-glusterd-check-for-same-node-while-adding-bricks-in-.patch
74096c
Patch0432: 0432-glusterd-Fix-coverity-defects-put-coverity-annotatio.patch
74096c
Patch0433: 0433-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
74096c
Patch0434: 0434-glusterd-ganesha-fix-Coverity-CID-1405785.patch
74096c
Patch0435: 0435-glusterd-coverity-fix.patch
74096c
Patch0436: 0436-glusterd-coverity-fixes.patch
74096c
Patch0437: 0437-glusterd-prevent-use-after-free-in-glusterd_op_ac_se.patch
74096c
Patch0438: 0438-dht-sparse-files-rebalance-enhancements.patch
74096c
Patch0439: 0439-cluster-afr-Delay-post-op-for-fsync.patch
74096c
Patch0440: 0440-glusterd-snapshot-Improve-log-message-during-snapsho.patch
74096c
Patch0441: 0441-fuse-occasional-logging-for-fuse-device-weird-write-.patch
74096c
Patch0442: 0442-fuse-correctly-handle-setxattr-values.patch
74096c
Patch0443: 0443-fuse-fix-high-sev-coverity-issue.patch
74096c
Patch0444: 0444-mount-fuse-Fixing-a-coverity-issue.patch
74096c
Patch0445: 0445-feature-changelog-Avoid-thread-creation-if-xlator-is.patch
74096c
Patch0446: 0446-bitrot-Make-number-of-signer-threads-configurable.patch
74096c
Patch0447: 0447-core-brick_mux-brick-crashed-when-creating-and-delet.patch
74096c
Patch0448: 0448-Posix-Use-simple-approach-to-close-fd.patch
74096c
Patch0449: 0449-test-Test-case-brick-mux-validation-in-cluster.t-is-.patch
74096c
Patch0450: 0450-tests-basic-ctime-enable-ctime-before-testing.patch
74096c
Patch0451: 0451-extras-Modify-group-virt-to-include-network-related-.patch
74096c
Patch0452: 0452-Tier-DHT-Handle-the-pause-case-missed-out.patch
74096c
Patch0453: 0453-glusterd-add-brick-command-failure.patch
74096c
Patch0454: 0454-features-locks-avoid-use-after-freed-of-frame-for-bl.patch
74096c
Patch0455: 0455-locks-prevent-deletion-of-locked-entries.patch
74096c
Patch0456: 0456-add-clean-local-after-grant-lock.patch
74096c
Patch0457: 0457-cluster-ec-Improve-detection-of-new-heals.patch
74096c
Patch0458: 0458-features-bit-rot-stub-clean-the-mutex-after-cancelli.patch
74096c
Patch0459: 0459-features-bit-rot-Unconditionally-sign-the-files-duri.patch
74096c
Patch0460: 0460-cluster-ec-Remove-stale-entries-from-indices-xattrop.patch
74096c
Patch0461: 0461-geo-replication-Fix-IPv6-parsing.patch
74096c
Patch0462: 0462-Issue-with-gf_fill_iatt_for_dirent.patch
74096c
Patch0463: 0463-cluster-ec-Change-handling-of-heal-failure-to-avoid-.patch
74096c
Patch0464: 0464-storage-posix-Remove-nr_files-usage.patch
74096c
Patch0465: 0465-posix-Implement-a-janitor-thread-to-close-fd.patch
74096c
Patch0466: 0466-cluster-ec-Change-stale-index-handling.patch
74096c
Patch0467: 0467-build-Added-dependency-for-glusterfs-selinux.patch
74096c
Patch0468: 0468-build-Update-the-glusterfs-selinux-version.patch
74096c
Patch0469: 0469-cluster-ec-Don-t-trigger-heal-for-stale-index.patch
74096c
Patch0470: 0470-extras-snap_scheduler-changes-in-gluster-shared-stor.patch
74096c
Patch0471: 0471-nfs-ganesha-gluster_shared_storage-fails-to-automoun.patch
74096c
Patch0472: 0472-geo-rep-gluster_shared_storage-fails-to-automount-on.patch
74096c
Patch0473: 0473-glusterd-Fix-Add-brick-with-increasing-replica-count.patch
74096c
Patch0474: 0474-features-locks-posixlk-clear-lock-should-set-error-a.patch
74096c
Patch0475: 0475-fuse-lock-interrupt-fix-flock_interrupt.t.patch
74096c
Patch0476: 0476-mount-fuse-use-cookies-to-get-fuse-interrupt-record-.patch
74096c
Patch0477: 0477-glusterd-snapshot-Snapshot-prevalidation-failure-not.patch
74096c
Patch0478: 0478-DHT-Fixing-rebalance-failure-on-issuing-stop-command.patch
74096c
Patch0479: 0479-ganesha-ha-revised-regex-exprs-for-status.patch
74096c
Patch0480: 0480-DHT-Rebalance-Ensure-Rebalance-reports-status-only-o.patch
74096c
Patch0481: 0481-Update-rfc.sh-to-rhgs-3.5.4.patch
74096c
Patch0482: 0482-logger-Always-print-errors-in-english.patch
74096c
Patch0483: 0483-afr-more-quorum-checks-in-lookup-and-new-entry-marki.patch
74096c
Patch0484: 0484-glusterd-rebalance-status-displays-stats-as-0-after-.patch
74096c
Patch0485: 0485-cli-rpc-conditional-init-of-global-quota-rpc-1578.patch
74096c
Patch0486: 0486-glusterd-brick-sock-file-deleted-log-error-1560.patch
74096c
Patch0487: 0487-Events-Log-file-not-re-opened-after-logrotate.patch
74096c
Patch0488: 0488-glusterd-afr-enable-granular-entry-heal-by-default.patch
74096c
Patch0489: 0489-glusterd-fix-bug-in-enabling-granular-entry-heal.patch
74096c
Patch0490: 0490-Segmentation-fault-occurs-during-truncate.patch
74096c
Patch0491: 0491-glusterd-mount-directory-getting-truncated-on-mounti.patch
74096c
Patch0492: 0492-afr-lookup-Pass-xattr_req-in-while-doing-a-selfheal-.patch
74096c
Patch0493: 0493-geo-rep-Note-section-is-required-for-ignore_deletes.patch
74096c
Patch0494: 0494-glusterd-start-the-brick-on-a-different-port.patch
74096c
Patch0495: 0495-geo-rep-descriptive-message-when-worker-crashes-due-.patch
74096c
Patch0496: 0496-posix-Use-MALLOC-instead-of-alloca-to-allocate-memor.patch
74096c
Patch0497: 0497-socket-Use-AES128-cipher-in-SSL-if-AES-is-supported-.patch
74096c
Patch0498: 0498-geo-rep-Fix-corner-case-in-rename-on-mkdir-during-hy.patch
74096c
Patch0499: 0499-gfapi-give-appropriate-error-when-size-exceeds.patch
74096c
Patch0500: 0500-features-shard-Convert-shard-block-indices-to-uint64.patch
74096c
Patch0501: 0501-Cli-Removing-old-syntax-of-tier-cmds-from-help-menu.patch
74096c
Patch0502: 0502-dht-fixing-a-permission-update-issue.patch
74096c
Patch0503: 0503-gfapi-Suspend-synctasks-instead-of-blocking-them.patch
74096c
Patch0504: 0504-io-stats-Configure-ios_sample_buf_size-based-on-samp.patch
74096c
Patch0505: 0505-trash-Create-inode_table-only-while-feature-is-enabl.patch
74096c
Patch0506: 0506-posix-Attach-a-posix_spawn_disk_thread-with-glusterf.patch
74096c
Patch0507: 0507-inode-make-critical-section-smaller.patch
74096c
Patch0508: 0508-fuse-fetch-arbitrary-number-of-groups-from-proc-pid-.patch
74096c
Patch0509: 0509-core-configure-optimum-inode-table-hash_size-for-shd.patch
74096c
Patch0510: 0510-glusterd-brick_mux-Optimize-friend-handshake-code-to.patch
74096c
Patch0511: 0511-features-shard-Missing-format-specifier.patch
74096c
Patch0512: 0512-glusterd-shared-storage-mount-fails-in-ipv6-environm.patch
74096c
Patch0513: 0513-afr-mark-pending-xattrs-as-a-part-of-metadata-heal.patch
74096c
Patch0514: 0514-afr-event-gen-changes.patch
74096c
Patch0515: 0515-cluster-afr-Heal-directory-rename-without-rmdir-mkdi.patch
74096c
Patch0516: 0516-afr-return-EIO-for-gfid-split-brains.patch
74096c
Patch0517: 0517-gfapi-glfs_h_creat_open-new-API-to-create-handle-and.patch
74096c
Patch0518: 0518-glusterd-Fix-for-shared-storage-in-ipv6-env.patch
74096c
Patch0519: 0519-glusterfs-events-Fix-incorrect-attribute-access-2002.patch
74096c
Patch0520: 0520-performance-open-behind-seek-fop-should-open_and_res.patch
74096c
Patch0521: 0521-open-behind-fix-missing-fd-reference.patch
74096c
Patch0522: 0522-lcov-improve-line-coverage.patch
74096c
Patch0523: 0523-open-behind-rewrite-of-internal-logic.patch
74096c
Patch0524: 0524-open-behind-fix-call_frame-leak.patch
74096c
Patch0525: 0525-open-behind-implement-create-fop.patch
74096c
Patch0526: 0526-Quota-quota_fsck.py-converting-byte-string-to-string.patch
74096c
Patch0527: 0527-Events-Socket-creation-after-getaddrinfo-and-IPv4-an.patch
74096c
Patch0528: 0528-Extras-Removing-xattr_analysis-script.patch
74096c
Patch0529: 0529-geo-rep-prompt-should-work-for-ignore_deletes.patch
74096c
Patch0530: 0530-gfapi-avoid-crash-while-logging-message.patch
74096c
Patch0531: 0531-Glustereventsd-Default-port-change-2091.patch
74096c
Patch0532: 0532-glusterd-fix-for-starting-brick-on-new-port.patch
74096c
Patch0533: 0533-glusterd-Rebalance-cli-is-not-showing-correct-status.patch
74096c
Patch0534: 0534-glusterd-Resolve-use-after-free-bug-2181.patch
74096c
Patch0535: 0535-multiple-files-use-dict_allocate_and_serialize-where.patch
74096c
Patch0536: 0536-dht-Ongoing-IO-is-failed-during-volume-shrink-operat.patch
74096c
Patch0537: 0537-cluster-afr-Fix-race-in-lockinfo-f-getxattr.patch
74096c
Patch0538: 0538-afr-fix-coverity-issue-introduced-by-90cefde.patch
74096c
Patch0539: 0539-extras-disable-lookup-optimize-in-virt-and-block-gro.patch
74096c
Patch0540: 0540-extras-Disable-write-behind-for-group-samba.patch
74096c
Patch0541: 0541-glusterd-volgen-Add-functionality-to-accept-any-cust.patch
74096c
Patch0542: 0542-xlaotrs-mgmt-Fixing-coverity-issue-1445996.patch
74096c
Patch0543: 0543-glusterd-handle-custom-xlator-failure-cases.patch
74096c
Patch0544: 0544-RHGS-3.5.4-rebuild-to-ship-with-RHEL-8.5.patch
256ebe
256ebe
%description
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package includes the glusterfs binary, the glusterfsd daemon and the
256ebe
libglusterfs and glusterfs translator modules common to both GlusterFS server
256ebe
and client framework.
256ebe
256ebe
%package api
256ebe
Summary:          GlusterFS api library
256ebe
Requires:         %{name}%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
256ebe
256ebe
%description api
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides the glusterfs libgfapi library.
256ebe
256ebe
%package api-devel
256ebe
Summary:          Development Libraries
256ebe
Requires:         %{name}%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-devel%{?_isa} = %{version}-%{release}
256ebe
Requires:         libacl-devel
256ebe
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
256ebe
256ebe
%description api-devel
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides the api include files.
256ebe
256ebe
%package cli
256ebe
Summary:          GlusterFS CLI
256ebe
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
256ebe
256ebe
%description cli
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides the GlusterFS CLI application and its man page
256ebe
256ebe
%package cloudsync-plugins
256ebe
Summary:          Cloudsync Plugins
256ebe
BuildRequires:    libcurl-devel
1df6c8
Requires:         glusterfs-libs = %{version}-%{release}
256ebe
256ebe
%description cloudsync-plugins
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides cloudsync plugins for archival feature.
256ebe
256ebe
%package devel
256ebe
Summary:          Development Libraries
256ebe
Requires:         %{name}%{?_isa} = %{version}-%{release}
256ebe
# Needed for the Glupy examples to work
256ebe
%if ( 0%{!?_without_extra_xlators:1} )
256ebe
Requires:         %{name}-extra-xlators%{?_isa} = %{version}-%{release}
256ebe
%endif
256ebe
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
256ebe
%endif
256ebe
256ebe
%description devel
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides the development libraries and include files.
256ebe
256ebe
%if ( 0%{!?_without_extra_xlators:1} )
256ebe
%package extra-xlators
256ebe
Summary:          Extra Gluster filesystem Translators
256ebe
# We need python-gluster rpm for gluster module's __init__.py in Python
256ebe
# site-packages area
256ebe
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
256ebe
Requires:         python%{_pythonver}
256ebe
256ebe
%description extra-xlators
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides extra filesystem Translators, such as Glupy,
256ebe
for GlusterFS.
256ebe
%endif
256ebe
256ebe
%package fuse
256ebe
Summary:          Fuse client
256ebe
BuildRequires:    fuse-devel
256ebe
Requires:         attr
256ebe
Requires:         psmisc
256ebe
256ebe
Requires:         %{name}%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
256ebe
256ebe
Obsoletes:        %{name}-client < %{version}-%{release}
256ebe
Provides:         %{name}-client = %{version}-%{release}
256ebe
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
256ebe
256ebe
%description fuse
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides support to FUSE based clients and inlcudes the
256ebe
glusterfs(d) binary.
256ebe
f338ef
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
256ebe
%package ganesha
256ebe
Summary:          NFS-Ganesha configuration
256ebe
Group:            Applications/File
256ebe
256ebe
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
1df6c8
Requires:         nfs-ganesha-selinux >= 2.7.3
256ebe
Requires:         nfs-ganesha-gluster >= 2.7.3
256ebe
Requires:         pcs, dbus
256ebe
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
256ebe
Requires:         cman, pacemaker, corosync
256ebe
%endif
256ebe
256ebe
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 )
256ebe
# we need portblock resource-agent in 3.9.5 and later.
256ebe
Requires:         resource-agents >= 3.9.5
256ebe
Requires:         net-tools
256ebe
%endif
256ebe
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
1df6c8
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
256ebe
Requires: selinux-policy >= 3.13.1-160
256ebe
Requires(post):   policycoreutils-python
256ebe
Requires(postun): policycoreutils-python
256ebe
%else
256ebe
Requires(post):   policycoreutils-python-utils
256ebe
Requires(postun): policycoreutils-python-utils
256ebe
%endif
256ebe
%endif
256ebe
256ebe
%description ganesha
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides the configuration and related files for using
256ebe
NFS-Ganesha as the NFS server using GlusterFS
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_georeplication:1} )
256ebe
%package geo-replication
256ebe
Summary:          GlusterFS Geo-replication
256ebe
Requires:         %{name}%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
256ebe
Requires:         python%{_pythonver}
f338ef
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
f338ef
Requires:         python-prettytable
f338ef
%else
256ebe
Requires:         python%{_pythonver}-prettytable
f338ef
%endif
256ebe
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
256ebe
256ebe
Requires:         rsync
256ebe
Requires:         util-linux
256ebe
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
1df6c8
# required for setting selinux bools
1df6c8
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
1df6c8
Requires(post):      policycoreutils-python-utils
1df6c8
Requires(postun):    policycoreutils-python-utils
1df6c8
Requires:            selinux-policy-targeted
1df6c8
Requires(post):      selinux-policy-targeted
1df6c8
BuildRequires:       selinux-policy-devel
1df6c8
%endif
256ebe
256ebe
%description geo-replication
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file system in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in userspace and easily manageable.
256ebe
256ebe
This package provides support to geo-replication.
256ebe
%endif
256ebe
256ebe
%package libs
256ebe
Summary:          GlusterFS common libraries
256ebe
256ebe
%description libs
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides the base GlusterFS libraries
256ebe
256ebe
%package -n python%{_pythonver}-gluster
256ebe
Summary:          GlusterFS python library
256ebe
Requires:         python%{_pythonver}
256ebe
%if ( ! %{_usepython3} )
256ebe
%{?python_provide:%python_provide python-gluster}
256ebe
Provides:         python-gluster = %{version}-%{release}
256ebe
Obsoletes:        python-gluster < 3.10
256ebe
%endif
256ebe
256ebe
%description -n python%{_pythonver}-gluster
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package contains the python modules of GlusterFS and own gluster
256ebe
namespace.
256ebe
256ebe
%if ( 0%{!?_without_rdma:1} )
256ebe
%package rdma
256ebe
Summary:          GlusterFS rdma support for ib-verbs
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 26 )
256ebe
BuildRequires:    rdma-core-devel
256ebe
%else
256ebe
BuildRequires:    libibverbs-devel
256ebe
BuildRequires:    librdmacm-devel >= 1.0.15
256ebe
%endif
256ebe
Requires:         %{name}%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
256ebe
256ebe
%description rdma
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides support to ib-verbs library.
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_regression_tests:1} )
256ebe
%package regression-tests
256ebe
Summary:          Development Tools
256ebe
Requires:         %{name}%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
256ebe
## thin provisioning support
256ebe
Requires:         lvm2 >= 2.02.89
256ebe
Requires:         perl(App::Prove) perl(Test::Harness) gcc util-linux-ng
256ebe
Requires:         python%{_pythonver}
256ebe
Requires:         attr dbench file git libacl-devel net-tools
256ebe
Requires:         nfs-utils xfsprogs yajl psmisc bc
256ebe
256ebe
%description regression-tests
256ebe
The Gluster Test Framework, is a suite of scripts used for
256ebe
regression testing of Gluster.
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_ocf:1} )
256ebe
%package resource-agents
256ebe
Summary:          OCF Resource Agents for GlusterFS
256ebe
License:          GPLv3+
256ebe
BuildArch:        noarch
256ebe
# this Group handling comes from the Fedora resource-agents package
256ebe
# for glusterd
256ebe
Requires:         %{name}-server = %{version}-%{release}
256ebe
# depending on the distribution, we need pacemaker or resource-agents
256ebe
Requires:         %{_prefix}/lib/ocf/resource.d
256ebe
256ebe
%description resource-agents
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides the resource agents which plug glusterd into
256ebe
Open Cluster Framework (OCF) compliant cluster resource managers,
256ebe
like Pacemaker.
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%package server
256ebe
Summary:          Clustered file-system server
256ebe
Requires:         %{name}%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-cli%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
74096c
%if ( 0%{?fedora} && 0%{?fedora} >= 30  || ( 0%{?rhel} && 0%{?rhel} >= 8 ) )
74096c
Requires:         glusterfs-selinux >= 1.0-1
74096c
%endif
256ebe
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
256ebe
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
256ebe
# self-heal daemon, rebalance, nfs-server etc. are actually clients
256ebe
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
256ebe
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
256ebe
# lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server
256ebe
Requires:         lvm2
256ebe
Requires:         nfs-utils
256ebe
%if ( 0%{?_with_systemd:1} )
256ebe
%{?systemd_requires}
256ebe
%else
256ebe
Requires(post):   /sbin/chkconfig
256ebe
Requires(preun):  /sbin/service
256ebe
Requires(preun):  /sbin/chkconfig
256ebe
Requires(postun): /sbin/service
256ebe
%endif
256ebe
%if (0%{?_with_firewalld:1})
256ebe
# we install firewalld rules, so we need to have the directory owned
256ebe
%if ( 0%{!?rhel} )
256ebe
# not on RHEL because firewalld-filesystem appeared in 7.3
256ebe
# when EL7 rpm gets weak dependencies we can add a Suggests:
256ebe
Requires:         firewalld-filesystem
256ebe
%endif
256ebe
%endif
256ebe
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
256ebe
Requires:         rpcbind
256ebe
%else
256ebe
Requires:         portmap
256ebe
%endif
256ebe
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
256ebe
Requires:         python-argparse
256ebe
%endif
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
256ebe
Requires:         python%{_pythonver}-pyxattr
256ebe
%else
256ebe
Requires:         pyxattr
256ebe
%endif
256ebe
%if (0%{?_with_valgrind:1})
256ebe
Requires:         valgrind
256ebe
%endif
256ebe
256ebe
%description server
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides the glusterfs server daemon.
256ebe
%endif
256ebe
256ebe
%package client-xlators
256ebe
Summary:          GlusterFS client-side translators
256ebe
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
256ebe
256ebe
%description client-xlators
256ebe
GlusterFS is a distributed file-system capable of scaling to several
256ebe
petabytes. It aggregates various storage bricks over Infiniband RDMA
256ebe
or TCP/IP interconnect into one large parallel network file
256ebe
system. GlusterFS is one of the most sophisticated file systems in
256ebe
terms of features and extensibility.  It borrows a powerful concept
256ebe
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
256ebe
is in user space and easily manageable.
256ebe
256ebe
This package provides the translators needed on any GlusterFS client.
256ebe
256ebe
%if ( 0%{!?_without_events:1} )
256ebe
%package events
256ebe
Summary:          GlusterFS Events
256ebe
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
1df6c8
Requires:         python%{_pythonver}
256ebe
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
256ebe
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
256ebe
Requires:         python-requests
256ebe
%else
256ebe
Requires:         python%{_pythonver}-requests
256ebe
%endif
256ebe
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
1df6c8
Requires:         python-prettytable
256ebe
Requires:         python-argparse
1df6c8
%else
1df6c8
Requires:         python%{_pythonver}-prettytable
256ebe
%endif
256ebe
%if ( 0%{?_with_systemd:1} )
256ebe
%{?systemd_requires}
256ebe
%endif
256ebe
256ebe
%description events
256ebe
GlusterFS Events
256ebe
256ebe
%endif
256ebe
256ebe
%prep
256ebe
%setup -q -n %{name}-%{version}%{?prereltag}
256ebe
256ebe
# sanitization scriptlet for patches with file renames
256ebe
ls %{_topdir}/SOURCES/*.patch | sort | \
256ebe
while read p
256ebe
do
256ebe
    # if the destination file exists, its most probably stale
256ebe
    # so we must remove it
256ebe
    rename_to=( $(grep -i 'rename to' $p | cut -f 3 -d ' ') )
256ebe
    if [ ${#rename_to[*]} -gt 0 ]; then
256ebe
        for f in ${rename_to[*]}
256ebe
        do
256ebe
            if [ -f $f ]; then
256ebe
                rm -f $f
256ebe
            elif [ -d $f ]; then
256ebe
                rm -rf $f
256ebe
            fi
256ebe
        done
256ebe
    fi
256ebe
256ebe
    SOURCE_FILES=( $(egrep '^\-\-\- a/' $p | cut -f 2- -d '/') )
256ebe
    DEST_FILES=( $(egrep '^\+\+\+ b/' $p | cut -f 2- -d '/') )
256ebe
    EXCLUDE_DOCS=()
256ebe
    for idx in ${!SOURCE_FILES[@]}; do
1df6c8
        # skip the doc
256ebe
        source_file=${SOURCE_FILES[$idx]}
256ebe
        dest_file=${DEST_FILES[$idx]}
256ebe
        if [[ "$dest_file" =~ ^doc/.+ ]]; then
256ebe
            if [ "$source_file" != "dev/null" ] && [ ! -f "$dest_file" ]; then
256ebe
                # if patch is being applied to a doc file and if the doc file
256ebe
                # hasn't been added so far then we need to exclude it
256ebe
                EXCLUDE_DOCS=( ${EXCLUDE_DOCS[*]} "$dest_file" )
256ebe
            fi
256ebe
        fi
256ebe
    done
256ebe
    EXCLUDE_DOCS_OPT=""
256ebe
    for doc in ${EXCLUDE_DOCS}; do
256ebe
        EXCLUDE_DOCS_OPT="--exclude=$doc $EXCLUDE_DOCS_OPT"
256ebe
    done
256ebe
256ebe
    # HACK to fix build
256ebe
    bn=$(basename $p)
256ebe
    if [ "$bn" == "0085-Revert-all-remove-code-which-is-not-being-considered.patch" ]; then
256ebe
        (patch -p1 -u -F3 < $p || :)
256ebe
        if [ -f libglusterfs/Makefile.am.rej ]; then
256ebe
            sed -i -e 's/^SUBDIRS = src/SUBDIRS = src src\/gfdb/g;s/^CLEANFILES = /CLEANFILES =/g' libglusterfs/Makefile.am
256ebe
        fi
256ebe
    elif [ "$bn" == "0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch" ]; then
256ebe
        (patch -p1 < $p || :)
256ebe
    elif [ "$bn" == "0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch" ]; then
256ebe
        (patch -p1 < $p || :)
256ebe
    elif [ "$bn" == "0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch" ]; then
256ebe
        (patch -p1 < $p || :)
256ebe
    elif [ "$bn" == "0117-spec-Remove-thin-arbiter-package.patch" ]; then
256ebe
        (patch -p1 < $p || :)
256ebe
    elif [ "$bn" == "0023-hooks-remove-selinux-hooks.patch" ]; then
256ebe
        (patch -p1 < $p || :)
256ebe
    elif [ "$bn" == "0042-spec-client-server-Builds-are-failing-on-rhel-6.patch" ]; then
256ebe
        (patch -p1 < $p || :)
256ebe
    else
256ebe
        # apply the patch with 'git apply'
256ebe
        git apply -p1 --exclude=rfc.sh \
256ebe
                      --exclude=.gitignore \
256ebe
                      --exclude=.testignore \
256ebe
                      --exclude=MAINTAINERS \
256ebe
                      --exclude=extras/checkpatch.pl \
256ebe
                      --exclude=build-aux/checkpatch.pl \
256ebe
                      --exclude='tests/*' \
256ebe
                      ${EXCLUDE_DOCS_OPT} \
256ebe
                      $p
256ebe
    fi
256ebe
256ebe
done
256ebe
256ebe
echo "fixing python shebangs..."
256ebe
%if ( %{_usepython3} )
256ebe
    for i in `find . -type f -exec bash -c "if file {} | grep 'Python script, ASCII text executable' >/dev/null; then echo {}; fi" ';'`; do
256ebe
        sed -i -e 's|^#!/usr/bin/python.*|#!%{__python3}|' -e 's|^#!/usr/bin/env python.*|#!%{__python3}|' $i
256ebe
    done
256ebe
%else
256ebe
    for f in api events extras geo-replication libglusterfs tools xlators; do
256ebe
        find $f -type f -exec sed -i 's|/usr/bin/python3|/usr/bin/python2|' {} \;
256ebe
    done
256ebe
%endif
256ebe
256ebe
%build
256ebe
256ebe
# In RHEL7 few hardening flags are available by default, however the RELRO
256ebe
# default behaviour is partial, convert to full
256ebe
%if ( 0%{?rhel} && 0%{?rhel} >= 7 )
256ebe
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
256ebe
export LDFLAGS
256ebe
%else
256ebe
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
256ebe
CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE"
256ebe
LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now"
256ebe
%else
256ebe
#It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and
256ebe
 # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does
256ebe
CFLAGS="$CFLAGS $RPM_OPT_FLAGS"
256ebe
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
256ebe
%endif
256ebe
export CFLAGS
256ebe
export LDFLAGS
256ebe
%endif
256ebe
256ebe
./autogen.sh && %configure \
256ebe
        %{?_with_asan} \
256ebe
        %{?_with_cmocka} \
256ebe
        %{?_with_debug} \
256ebe
        %{?_with_firewalld} \
256ebe
        %{?_with_tmpfilesdir} \
256ebe
        %{?_with_tsan} \
256ebe
        %{?_with_valgrind} \
256ebe
        %{?_without_epoll} \
256ebe
        %{?_without_events} \
256ebe
        %{?_without_fusermount} \
256ebe
        %{?_without_georeplication} \
256ebe
        %{?_without_ocf} \
256ebe
        %{?_without_rdma} \
256ebe
        %{?_without_server} \
256ebe
        %{?_without_syslog} \
256ebe
        %{?_without_tiering} \
256ebe
        %{?_with_ipv6default} \
256ebe
        %{?_without_libtirpc}
256ebe
256ebe
# fix hardening and remove rpath in shlibs
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
256ebe
sed -i 's| \\\$compiler_flags |&\\\$LDFLAGS |' libtool
256ebe
%endif
256ebe
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|' libtool
256ebe
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|' libtool
256ebe
256ebe
make %{?_smp_mflags}
256ebe
256ebe
%check
256ebe
make check
256ebe
256ebe
%install
256ebe
rm -rf %{buildroot}
256ebe
make install DESTDIR=%{buildroot}
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%if ( 0%{_for_fedora_koji_builds} )
256ebe
install -D -p -m 0644 %{SOURCE1} \
256ebe
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
256ebe
install -D -p -m 0644 %{SOURCE2} \
256ebe
    %{buildroot}%{_sysconfdir}/sysconfig/glusterfsd
256ebe
%else
256ebe
install -D -p -m 0644 extras/glusterd-sysconfig \
256ebe
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
256ebe
%endif
256ebe
%endif
256ebe
256ebe
mkdir -p %{buildroot}%{_localstatedir}/log/glusterd
256ebe
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfs
256ebe
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfsd
256ebe
mkdir -p %{buildroot}%{_rundir}/gluster
256ebe
256ebe
# Remove unwanted files from all the shared libraries
256ebe
find %{buildroot}%{_libdir} -name '*.a' -delete
256ebe
find %{buildroot}%{_libdir} -name '*.la' -delete
256ebe
256ebe
# Remove installed docs, the ones we want are included by %%doc, in
256ebe
# /usr/share/doc/glusterfs or /usr/share/doc/glusterfs-x.y.z depending
256ebe
# on the distribution
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 19 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
256ebe
rm -rf %{buildroot}%{_pkgdocdir}/*
256ebe
%else
256ebe
rm -rf %{buildroot}%{_defaultdocdir}/%{name}
256ebe
mkdir -p %{buildroot}%{_pkgdocdir}
256ebe
%endif
256ebe
head -50 ChangeLog > ChangeLog.head && mv ChangeLog.head ChangeLog
256ebe
cat << EOM >> ChangeLog
256ebe
256ebe
More commit messages for this ChangeLog can be found at
256ebe
https://forge.gluster.org/glusterfs-core/glusterfs/commits/v%{version}%{?prereltag}
256ebe
EOM
256ebe
256ebe
# Remove benchmarking and other unpackaged files
256ebe
# make install always puts these in %%{_defaultdocdir}/%%{name} so don't
256ebe
# use %%{_pkgdocdir}; that will be wrong on later Fedora distributions
256ebe
rm -rf %{buildroot}%{_defaultdocdir}/%{name}/benchmarking
256ebe
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs-mode.el
256ebe
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs.vim
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
# Create working directory
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd
256ebe
256ebe
# Update configuration file to /var/lib working directory
256ebe
sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sharedstatedir}/glusterd|g' \
256ebe
    %{buildroot}%{_sysconfdir}/glusterfs/glusterd.vol
256ebe
%endif
256ebe
256ebe
# Install glusterfsd .service or init.d file
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%if ( 0%{_for_fedora_koji_builds} )
256ebe
%service_install glusterfsd %{glusterfsd_svcfile}
256ebe
%endif
256ebe
%endif
256ebe
256ebe
install -D -p -m 0644 extras/glusterfs-logrotate \
256ebe
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
256ebe
256ebe
# ganesha ghosts
f338ef
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
256ebe
mkdir -p %{buildroot}%{_sysconfdir}/ganesha
256ebe
touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf
256ebe
mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/
256ebe
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
256ebe
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_georeplication:1} )
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
256ebe
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
256ebe
install -D -p -m 0644 extras/glusterfs-georep-logrotate \
256ebe
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
256ebe
touch %{buildroot}%{_sharedstatedir}/glusterd/options
256ebe
subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop)
256ebe
for dir in ${subdirs[@]}; do
256ebe
    mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/"$dir"/{pre,post}
256ebe
done
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/glustershd
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/peers
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/vols
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/nfs/run
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/bitd
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/quotad
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/scrub
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/snaps
256ebe
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/ss_brick
256ebe
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/nfs-server.vol
256ebe
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/run/nfs.pid
256ebe
%endif
256ebe
256ebe
find ./tests ./run-tests.sh -type f | cpio -pd %{buildroot}%{_prefix}/share/glusterfs
256ebe
256ebe
## Install bash completion for cli
256ebe
install -p -m 0744 -D extras/command-completion/gluster.bash \
256ebe
    %{buildroot}%{_sysconfdir}/bash_completion.d/gluster
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
echo "RHGS 3.5" > %{buildroot}%{_datadir}/glusterfs/release
256ebe
%endif
256ebe
256ebe
%clean
256ebe
rm -rf %{buildroot}
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All %%post should be placed here and keep them sorted
256ebe
##
256ebe
%post
256ebe
/sbin/ldconfig
256ebe
%if ( 0%{!?_without_syslog:1} )
256ebe
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
256ebe
%systemd_postun_with_restart rsyslog
256ebe
%endif
256ebe
%endif
256ebe
exit 0
256ebe
256ebe
%post api
256ebe
/sbin/ldconfig
256ebe
256ebe
%if ( 0%{!?_without_events:1} )
256ebe
%post events
256ebe
%service_enable glustereventsd
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
256ebe
%post ganesha
256ebe
semanage boolean -m ganesha_use_fusefs --on
256ebe
exit 0
256ebe
%endif
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_georeplication:1} )
256ebe
%post geo-replication
1df6c8
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
1df6c8
%selinux_set_booleans %{selinuxbooleans}
1df6c8
%endif
256ebe
if [ $1 -ge 1 ]; then
256ebe
    %systemd_postun_with_restart glusterd
256ebe
fi
256ebe
exit 0
256ebe
%endif
256ebe
256ebe
%post libs
256ebe
/sbin/ldconfig
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%post server
256ebe
# Legacy server
256ebe
%service_enable glusterd
256ebe
%if ( 0%{_for_fedora_koji_builds} )
256ebe
%service_enable glusterfsd
256ebe
%endif
256ebe
# ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 .
256ebe
# While upgrading glusterfs-server package form GlusterFS version <= 3.6 to
256ebe
# GlusterFS version 3.7, ".cmd_log_history" should be renamed to
256ebe
# "cmd_history.log" to retain cli command history contents.
256ebe
if [ -f %{_localstatedir}/log/glusterfs/.cmd_log_history ]; then
256ebe
    mv %{_localstatedir}/log/glusterfs/.cmd_log_history \
256ebe
       %{_localstatedir}/log/glusterfs/cmd_history.log
256ebe
fi
256ebe
256ebe
# Genuine Fedora (and EPEL) builds never put gluster files in /etc; if
256ebe
# there are any files in /etc from a prior gluster.org install, move them
256ebe
# to /var/lib. (N.B. Starting with 3.3.0 all gluster files are in /var/lib
256ebe
# in gluster.org RPMs.) Be careful to copy them on the off chance that
256ebe
# /etc and /var/lib are on separate file systems
256ebe
if [ -d /etc/glusterd -a ! -h %{_sharedstatedir}/glusterd ]; then
256ebe
    mkdir -p %{_sharedstatedir}/glusterd
256ebe
    cp -a /etc/glusterd %{_sharedstatedir}/glusterd
256ebe
    rm -rf /etc/glusterd
256ebe
    ln -sf %{_sharedstatedir}/glusterd /etc/glusterd
256ebe
fi
256ebe
256ebe
# Rename old volfiles in an RPM-standard way.  These aren't actually
256ebe
# considered package config files, so %%config doesn't work for them.
256ebe
if [ -d %{_sharedstatedir}/glusterd/vols ]; then
256ebe
    for file in $(find %{_sharedstatedir}/glusterd/vols -name '*.vol'); do
256ebe
        newfile=${file}.rpmsave
256ebe
        echo "warning: ${file} saved as ${newfile}"
256ebe
        cp ${file} ${newfile}
256ebe
    done
256ebe
fi
256ebe
256ebe
# add marker translator
256ebe
# but first make certain that there are no old libs around to bite us
256ebe
# BZ 834847
256ebe
if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then
256ebe
    rm -f /etc/ld.so.conf.d/glusterfs.conf
256ebe
    /sbin/ldconfig
256ebe
fi
256ebe
256ebe
%if (0%{?_with_firewalld:1})
256ebe
    %firewalld_reload
256ebe
%endif
256ebe
256ebe
%endif
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All %%pre should be placed here and keep them sorted
256ebe
##
256ebe
%pre
256ebe
getent group gluster > /dev/null || groupadd -r gluster
256ebe
getent passwd gluster > /dev/null || useradd -r -g gluster -d %{_rundir}/gluster -s /sbin/nologin -c "GlusterFS daemons" gluster
256ebe
exit 0
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All %%preun should be placed here and keep them sorted
256ebe
##
256ebe
%if ( 0%{!?_without_events:1} )
256ebe
%preun events
256ebe
if [ $1 -eq 0 ]; then
256ebe
    if [ -f %glustereventsd_svcfile ]; then
256ebe
        %service_stop glustereventsd
256ebe
        %systemd_preun glustereventsd
256ebe
    fi
256ebe
fi
256ebe
exit 0
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%preun server
256ebe
if [ $1 -eq 0 ]; then
256ebe
    if [ -f %glusterfsd_svcfile ]; then
256ebe
        %service_stop glusterfsd
256ebe
    fi
256ebe
    %service_stop glusterd
256ebe
    if [ -f %glusterfsd_svcfile ]; then
256ebe
        %systemd_preun glusterfsd
256ebe
    fi
256ebe
    %systemd_preun glusterd
256ebe
fi
256ebe
if [ $1 -ge 1 ]; then
256ebe
    if [ -f %glusterfsd_svcfile ]; then
256ebe
        %systemd_postun_with_restart glusterfsd
256ebe
    fi
256ebe
    %systemd_postun_with_restart glusterd
256ebe
fi
256ebe
exit 0
256ebe
%endif
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All %%postun should be placed here and keep them sorted
256ebe
##
256ebe
%postun
256ebe
%if ( 0%{!?_without_syslog:1} )
256ebe
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
256ebe
%systemd_postun_with_restart rsyslog
256ebe
%endif
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%postun server
256ebe
%if (0%{?_with_firewalld:1})
256ebe
    %firewalld_reload
256ebe
%endif
256ebe
exit 0
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
256ebe
%postun ganesha
256ebe
semanage boolean -m ganesha_use_fusefs --off
256ebe
exit 0
256ebe
%endif
256ebe
%endif
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All %%trigger should be placed here and keep them sorted
256ebe
##
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
256ebe
%trigger ganesha -- selinux-policy-targeted
256ebe
semanage boolean -m ganesha_use_fusefs --on
256ebe
exit 0
256ebe
%endif
256ebe
%endif
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All %%triggerun should be placed here and keep them sorted
256ebe
##
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
256ebe
%triggerun ganesha -- selinux-policy-targeted
256ebe
semanage boolean -m ganesha_use_fusefs --off
256ebe
exit 0
256ebe
%endif
256ebe
%endif
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All %%files should be placed here and keep them grouped
256ebe
##
256ebe
%files
256ebe
%doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS COMMITMENT
256ebe
%{_mandir}/man8/*gluster*.8*
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%exclude %{_mandir}/man8/gluster.8*
256ebe
%endif
256ebe
%dir %{_localstatedir}/log/glusterfs
256ebe
%if ( 0%{!?_without_rdma:1} )
256ebe
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
256ebe
%endif
256ebe
%if 0%{?!_without_server:1}
256ebe
%dir %{_datadir}/glusterfs
256ebe
%dir %{_datadir}/glusterfs/scripts
256ebe
     %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
256ebe
     %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
256ebe
%endif
f338ef
%{_datadir}/glusterfs/scripts/identify-hangs.sh
f338ef
%{_datadir}/glusterfs/scripts/collect-system-stats.sh
f338ef
%{_datadir}/glusterfs/scripts/log_accounting.sh
256ebe
# xlators that are needed on the client- and on the server-side
256ebe
%dir %{_libdir}/glusterfs
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/auth
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/addr.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/login.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/socket.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/error-gen.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/delay-gen.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/io-stats.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/sink.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/trace.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/access-control.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/barrier.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cdc.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changelog.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/utime.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/gfid-access.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/namespace.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/read-only.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cloudsync.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/md-cache.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/open-behind.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/quick-read.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/read-ahead.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/readdir-ahead.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/nl-cache.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so
256ebe
%dir %attr(0775,gluster,gluster) %{_rundir}/gluster
256ebe
%if 0%{?_tmpfilesdir:1} && 0%{!?_without_server:1}
256ebe
%{_tmpfilesdir}/gluster.conf
256ebe
%endif
256ebe
%if ( 0%{?_without_extra_xlators:1} )
256ebe
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
256ebe
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
256ebe
%endif
256ebe
%if ( 0%{?_without_regression_tests:1} )
256ebe
%exclude %{_datadir}/glusterfs/run-tests.sh
256ebe
%exclude %{_datadir}/glusterfs/tests
256ebe
%endif
256ebe
%if 0%{?_without_server:1}
256ebe
%if ( 0%{?_with_systemd:1} )
256ebe
%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
256ebe
%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
256ebe
%endif
256ebe
%endif
256ebe
f338ef
%if ( 0%{?_without_server:1} || 0%{?rhel} < 7 )
f338ef
#exclude ganesha related files for rhel 6 and client builds
256ebe
%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample
256ebe
%exclude %{_libexecdir}/ganesha/*
256ebe
%exclude %{_prefix}/lib/ocf/resource.d/heartbeat/*
f338ef
%if ( 0%{!?_without_server:1} )
f338ef
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
f338ef
%endif
256ebe
%endif
256ebe
256ebe
%exclude %{_datadir}/glusterfs/scripts/setup-thin-arbiter.sh
256ebe
256ebe
%if ( 0%{?_without_server:1} )
256ebe
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
256ebe
%endif
256ebe
256ebe
%files api
256ebe
%exclude %{_libdir}/*.so
256ebe
# libgfapi files
256ebe
%{_libdir}/libgfapi.*
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
256ebe
256ebe
%files api-devel
256ebe
%{_libdir}/pkgconfig/glusterfs-api.pc
256ebe
%{_libdir}/libgfapi.so
256ebe
%dir %{_includedir}/glusterfs
256ebe
%dir %{_includedir}/glusterfs/api
256ebe
     %{_includedir}/glusterfs/api/*
256ebe
256ebe
%files cli
256ebe
%{_sbindir}/gluster
256ebe
%{_mandir}/man8/gluster.8*
256ebe
%{_sysconfdir}/bash_completion.d/gluster
256ebe
256ebe
%files cloudsync-plugins
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsyncs3.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsynccvlt.so
256ebe
256ebe
%files devel
256ebe
%dir %{_includedir}/glusterfs
256ebe
     %{_includedir}/glusterfs/*
256ebe
%exclude %{_includedir}/glusterfs/api
256ebe
%exclude %{_libdir}/libgfapi.so
256ebe
%{_libdir}/*.so
256ebe
%if ( 0%{?_without_server:1} )
256ebe
%exclude %{_libdir}/pkgconfig/libgfchangelog.pc
256ebe
%exclude %{_libdir}/libgfchangelog.so
256ebe
%if ( 0%{!?_without_tiering:1} )
256ebe
%exclude %{_libdir}/pkgconfig/libgfdb.pc
256ebe
%endif
256ebe
%else
256ebe
%{_libdir}/pkgconfig/libgfchangelog.pc
256ebe
%if ( 0%{!?_without_tiering:1} )
256ebe
%{_libdir}/pkgconfig/libgfdb.pc
256ebe
%endif
256ebe
%endif
256ebe
256ebe
%files client-xlators
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
256ebe
256ebe
%if ( 0%{!?_without_extra_xlators:1} )
256ebe
%files extra-xlators
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
256ebe
%endif
256ebe
256ebe
%files fuse
256ebe
# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
256ebe
%{_sbindir}/glusterfs
256ebe
%{_sbindir}/glusterfsd
256ebe
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so
256ebe
/sbin/mount.glusterfs
256ebe
%if ( 0%{!?_without_fusermount:1} )
256ebe
%{_bindir}/fusermount-glusterfs
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_georeplication:1} )
256ebe
%files geo-replication
256ebe
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
256ebe
256ebe
%{_sbindir}/gfind_missing_files
256ebe
%{_sbindir}/gluster-mountbroker
256ebe
%dir %{_libexecdir}/glusterfs
256ebe
%dir %{_libexecdir}/glusterfs/python
256ebe
%dir %{_libexecdir}/glusterfs/python/syncdaemon
256ebe
     %{_libexecdir}/glusterfs/gsyncd
256ebe
     %{_libexecdir}/glusterfs/python/syncdaemon/*
256ebe
     %{_libexecdir}/glusterfs/gverify.sh
256ebe
     %{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
256ebe
     %{_libexecdir}/glusterfs/peer_gsec_create
256ebe
     %{_libexecdir}/glusterfs/peer_mountbroker
256ebe
     %{_libexecdir}/glusterfs/peer_mountbroker.py*
256ebe
     %{_libexecdir}/glusterfs/gfind_missing_files
256ebe
     %{_libexecdir}/glusterfs/peer_georep-sshkey.py*
256ebe
%{_sbindir}/gluster-georep-sshkey
256ebe
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication
256ebe
%ghost      %attr(0644,-,-) %{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/pre
256ebe
256ebe
%dir %{_datadir}/glusterfs
256ebe
%dir %{_datadir}/glusterfs/scripts
256ebe
     %{_datadir}/glusterfs/scripts/get-gfid.sh
256ebe
     %{_datadir}/glusterfs/scripts/slave-upgrade.sh
256ebe
     %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
256ebe
     %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
256ebe
     %{_datadir}/glusterfs/scripts/gsync-sync-gfid
256ebe
     %{_datadir}/glusterfs/scripts/schedule_georep.py*
256ebe
%endif
256ebe
256ebe
%files libs
256ebe
%{_libdir}/*.so.*
256ebe
%exclude %{_libdir}/libgfapi.*
256ebe
%if ( 0%{!?_without_tiering:1} )
256ebe
# libgfdb is only needed server-side
256ebe
%exclude %{_libdir}/libgfdb.*
256ebe
%endif
256ebe
256ebe
%files -n python%{_pythonver}-gluster
256ebe
# introducing glusterfs module in site packages.
256ebe
# so that all other gluster submodules can reside in the same namespace.
256ebe
%if ( %{_usepython3} )
256ebe
%dir %{python3_sitelib}/gluster
256ebe
     %{python3_sitelib}/gluster/__init__.*
256ebe
     %{python3_sitelib}/gluster/__pycache__
256ebe
     %{python3_sitelib}/gluster/cliutils
256ebe
%else
256ebe
%dir %{python2_sitelib}/gluster
256ebe
     %{python2_sitelib}/gluster/__init__.*
256ebe
     %{python2_sitelib}/gluster/cliutils
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_rdma:1} )
256ebe
%files rdma
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_regression_tests:1} )
256ebe
%files regression-tests
256ebe
%dir %{_datadir}/glusterfs
256ebe
     %{_datadir}/glusterfs/run-tests.sh
256ebe
     %{_datadir}/glusterfs/tests
256ebe
%exclude %{_datadir}/glusterfs/tests/vagrant
256ebe
%endif
256ebe
f338ef
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
256ebe
%files ganesha
256ebe
%dir %{_libexecdir}/ganesha
256ebe
%{_sysconfdir}/ganesha/ganesha-ha.conf.sample
256ebe
%{_libexecdir}/ganesha/*
256ebe
%{_prefix}/lib/ocf/resource.d/heartbeat/*
256ebe
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
256ebe
%ghost      %attr(0644,-,-) %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf
256ebe
%ghost %dir %attr(0755,-,-) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha
256ebe
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
256ebe
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_ocf:1} )
256ebe
%files resource-agents
256ebe
# /usr/lib is the standard for OCF, also on x86_64
256ebe
%{_prefix}/lib/ocf/resource.d/glusterfs
256ebe
%endif
256ebe
256ebe
%if ( 0%{!?_without_server:1} )
256ebe
%files server
256ebe
%doc extras/clear_xattrs.sh
f338ef
%{_datadir}/glusterfs/scripts/quota_fsck.py*
256ebe
# sysconf
256ebe
%config(noreplace) %{_sysconfdir}/glusterfs
256ebe
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
256ebe
%exclude %{_sysconfdir}/glusterfs/eventsconfig.json
256ebe
%config(noreplace) %{_sysconfdir}/sysconfig/glusterd
256ebe
%if ( 0%{_for_fedora_koji_builds} )
256ebe
%config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd
256ebe
%endif
256ebe
256ebe
# init files
256ebe
%glusterd_svcfile
256ebe
%if ( 0%{_for_fedora_koji_builds} )
256ebe
%glusterfsd_svcfile
256ebe
%endif
256ebe
%if ( 0%{?_with_systemd:1} )
256ebe
%glusterfssharedstorage_svcfile
256ebe
%endif
256ebe
256ebe
# binaries
256ebe
%{_sbindir}/glusterd
256ebe
%{_sbindir}/glfsheal
256ebe
%{_sbindir}/gf_attach
256ebe
%{_sbindir}/gluster-setgfid2path
256ebe
# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
256ebe
# symlink. The binary itself (and symlink) are part of the glusterfs-fuse
256ebe
# package, because glusterfs-server depends on that anyway.
256ebe
256ebe
# Manpages
256ebe
%{_mandir}/man8/gluster-setgfid2path.8*
256ebe
256ebe
# xlators
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
256ebe
%if ( 0%{!?_without_tiering:1} )
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
256ebe
     %{_libdir}/libgfdb.so.*
256ebe
%endif
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
256ebe
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
256ebe
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
256ebe
256ebe
# snap_scheduler
256ebe
%{_sbindir}/snap_scheduler.py
256ebe
%{_sbindir}/gcron.py
256ebe
%{_sbindir}/conf.py
256ebe
256ebe
# /var/lib/glusterd, e.g. hookscripts, etc.
256ebe
%ghost      %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
256ebe
%ghost      %attr(0600,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/options
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/bitd
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/groups
256ebe
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/virt
256ebe
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/metadata-cache
256ebe
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/gluster-block
256ebe
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/nl-cache
256ebe
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/db-workload
256ebe
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/distributed-virt
256ebe
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/samba
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind/.keys
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glustershd
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/disabled-quota-root-xattr-heal.sh
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/S13create-subdir-mounts.sh
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/pre
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
256ebe
                            %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/post
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/pre
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/pre
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
256ebe
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
256ebe
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
256ebe
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
256ebe
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/snaps
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/ss_brick
256ebe
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/vols
256ebe
256ebe
# Extra utility script
256ebe
%dir %{_libexecdir}/glusterfs
256ebe
     %{_datadir}/glusterfs/release
256ebe
%dir %{_datadir}/glusterfs/scripts
256ebe
     %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
256ebe
%if ( 0%{?_with_systemd:1} )
256ebe
     %{_libexecdir}/glusterfs/mount-shared-storage.sh
256ebe
     %{_datadir}/glusterfs/scripts/control-cpu-load.sh
256ebe
     %{_datadir}/glusterfs/scripts/control-mem.sh
256ebe
%endif
256ebe
256ebe
# Incrementalapi
256ebe
     %{_libexecdir}/glusterfs/glusterfind
256ebe
%{_bindir}/glusterfind
256ebe
     %{_libexecdir}/glusterfs/peer_add_secret_pub
256ebe
256ebe
%if ( 0%{?_with_firewalld:1} )
256ebe
%{_prefix}/lib/firewalld/services/glusterfs.xml
256ebe
%endif
256ebe
# end of server files
256ebe
%endif
256ebe
256ebe
# Events
256ebe
%if ( 0%{!?_without_events:1} )
256ebe
%files events
256ebe
%config(noreplace) %{_sysconfdir}/glusterfs/eventsconfig.json
256ebe
%dir %{_sharedstatedir}/glusterd
256ebe
%dir %{_sharedstatedir}/glusterd/events
256ebe
%dir %{_libexecdir}/glusterfs
256ebe
     %{_libexecdir}/glusterfs/gfevents
256ebe
     %{_libexecdir}/glusterfs/peer_eventsapi.py*
256ebe
%{_sbindir}/glustereventsd
256ebe
%{_sbindir}/gluster-eventsapi
256ebe
%{_datadir}/glusterfs/scripts/eventsdash.py*
256ebe
%if ( 0%{?_with_systemd:1} )
256ebe
%{_unitdir}/glustereventsd.service
256ebe
%else
256ebe
%{_sysconfdir}/init.d/glustereventsd
256ebe
%endif
256ebe
%endif
256ebe
256ebe
##-----------------------------------------------------------------------------
256ebe
## All %pretrans should be placed here and keep them sorted
256ebe
##
256ebe
%if 0%{!?_without_server:1}
256ebe
%pretrans -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
256ebe
          echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
256ebe
   echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
256ebe
   echo "WARNING: Refer upgrade section of install guide for more details"
256ebe
   echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
256ebe
256ebe
256ebe
%pretrans api -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
256ebe
256ebe
256ebe
%pretrans api-devel -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
256ebe
256ebe
256ebe
%pretrans cli -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
256ebe
256ebe
%pretrans client-xlators -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
256ebe
256ebe
%pretrans fuse -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
256ebe
256ebe
256ebe
%if ( 0%{!?_without_georeplication:1} )
256ebe
%pretrans geo-replication -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
%endif
256ebe
256ebe
256ebe
256ebe
%pretrans libs -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
256ebe
256ebe
256ebe
%if ( 0%{!?_without_rdma:1} )
256ebe
%pretrans rdma -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
%endif
256ebe
256ebe
256ebe
256ebe
%if ( 0%{!?_without_ocf:1} )
256ebe
%pretrans resource-agents -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
%endif
256ebe
256ebe
256ebe
256ebe
%pretrans server -p <lua>
256ebe
if not posix.access("/bin/bash", "x") then
256ebe
    -- initial installation, no shell, no running glusterfsd
256ebe
    return 0
256ebe
end
256ebe
256ebe
-- TODO: move this completely to a lua script
256ebe
-- For now, we write a temporary bash script and execute that.
256ebe
256ebe
script = [[#!/bin/sh
256ebe
pidof -c -o %PPID -x glusterfsd &>/dev/null
256ebe
256ebe
if [ $? -eq 0 ]; then
256ebe
   pushd . > /dev/null 2>&1
256ebe
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
256ebe
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
256ebe
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
256ebe
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
256ebe
          exit 1;
256ebe
       fi
256ebe
   done
256ebe
256ebe
   popd > /dev/null 2>&1
256ebe
   exit 1;
256ebe
fi
256ebe
]]
256ebe
256ebe
ok, how, val = os.execute(script)
1df6c8
rc = val or ok
1df6c8
if not (rc == 0) then
1df6c8
   error("Detected running glusterfs processes", rc)
256ebe
end
256ebe
256ebe
%posttrans server
256ebe
pidof -c -o %PPID -x glusterd &> /dev/null
256ebe
if [ $? -eq 0 ]; then
256ebe
    kill -9 `pgrep -f gsyncd.py` &> /dev/null
256ebe
256ebe
    killall --wait -SIGTERM glusterd &> /dev/null
256ebe
256ebe
    if [ "$?" != "0" ]; then
256ebe
        echo "killall failed while killing glusterd"
256ebe
    fi
256ebe
256ebe
    glusterd --xlator-option *.upgrade=on -N
256ebe
256ebe
    #Cleaning leftover glusterd socket file which is created by glusterd in
256ebe
    #rpm_script_t context.
256ebe
    rm -rf /var/run/glusterd.socket
256ebe
256ebe
    # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
256ebe
    # so start it again
256ebe
    %service_start glusterd
256ebe
else
256ebe
    glusterd --xlator-option *.upgrade=on -N
256ebe
256ebe
    #Cleaning leftover glusterd socket file which is created by glusterd in
256ebe
    #rpm_script_t context.
256ebe
    rm -rf /var/run/glusterd.socket
256ebe
fi
256ebe
256ebe
%endif
256ebe
256ebe
%changelog
4ae940
* Wed Oct 06 2021 CentOS Sources <bugs@centos.org> - 6.0-56.4.el8.centos
4ae940
- remove vendor and/or packager lines
4ae940
74096c
* Mon Aug 30 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.4
74096c
- Add gating.yaml, fixes bugs bz#1996984
74096c
74096c
* Tue Aug 24 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.3
74096c
- fixes bugs bz#1996984
74096c
74096c
* Thu May 06 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.2
74096c
- fixes bugs bz#1953901
74096c
74096c
* Thu Apr 22 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.1
74096c
- fixes bugs bz#1927235
74096c
74096c
* Wed Apr 14 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56
74096c
- fixes bugs bz#1948547
74096c
74096c
* Fri Mar 19 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-55
74096c
- fixes bugs bz#1939372
74096c
74096c
* Wed Mar 03 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-54
74096c
- fixes bugs bz#1832306 bz#1911292 bz#1924044
74096c
74096c
* Thu Feb 11 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-53
74096c
- fixes bugs bz#1224906 bz#1691320 bz#1719171 bz#1814744 bz#1865796
74096c
74096c
* Thu Jan 28 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-52
74096c
- fixes bugs bz#1600459 bz#1719171 bz#1830713 bz#1856574
74096c
74096c
* Mon Dec 28 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-51
74096c
- fixes bugs bz#1640148 bz#1856574 bz#1910119
74096c
74096c
* Tue Dec 15 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-50
74096c
- fixes bugs bz#1224906 bz#1412494 bz#1612973 bz#1663821 bz#1691320 
74096c
  bz#1726673 bz#1749304 bz#1752739 bz#1779238 bz#1813866 bz#1814744 bz#1821599 
74096c
  bz#1832306 bz#1835229 bz#1842449 bz#1865796 bz#1878077 bz#1882923 bz#1885966 
74096c
  bz#1890506 bz#1896425 bz#1898776 bz#1898777 bz#1898778 bz#1898781 bz#1898784 
74096c
  bz#1903468
74096c
74096c
* Wed Nov 25 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-49
74096c
- fixes bugs bz#1286171
74096c
74096c
* Tue Nov 10 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-48
74096c
- fixes bugs bz#1895301
74096c
74096c
* Thu Nov 05 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-47
74096c
- fixes bugs bz#1286171 bz#1821743 bz#1837926
74096c
74096c
* Wed Oct 21 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-46
74096c
- fixes bugs bz#1873469 bz#1881823
74096c
74096c
* Wed Sep 09 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-45
74096c
- fixes bugs bz#1785714
74096c
74096c
* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-44
74096c
- fixes bugs bz#1460657
74096c
74096c
* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-43
74096c
- fixes bugs bz#1460657
74096c
74096c
* Wed Sep 02 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-42
74096c
- fixes bugs bz#1785714
74096c
74096c
* Tue Aug 25 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-41
74096c
- fixes bugs bz#1785714 bz#1851424 bz#1851989 bz#1852736 bz#1853189 bz#1855966
74096c
74096c
* Tue Jul 21 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-40
74096c
- fixes bugs bz#1812789 bz#1844359 bz#1847081 bz#1854165
098181
74096c
* Wed Jun 17 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-39
74096c
- fixes bugs bz#1844359 bz#1845064
1df6c8
74096c
* Wed Jun 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-38
74096c
- fixes bugs bz#1234220 bz#1286171 bz#1487177 bz#1524457 bz#1640573 
74096c
  bz#1663557 bz#1667954 bz#1683602 bz#1686897 bz#1721355 bz#1748865 bz#1750211 
74096c
  bz#1754391 bz#1759875 bz#1761531 bz#1761932 bz#1763124 bz#1763129 bz#1764091 
74096c
  bz#1775637 bz#1776901 bz#1781550 bz#1781649 bz#1781710 bz#1783232 bz#1784211 
74096c
  bz#1784415 bz#1786516 bz#1786681 bz#1787294 bz#1787310 bz#1787331 bz#1787994 
74096c
  bz#1790336 bz#1792873 bz#1794663 bz#1796814 bz#1804164 bz#1810924 bz#1815434 
74096c
  bz#1836099 bz#1837467 bz#1837926 bz#1838479 bz#1839137 bz#1844359
1df6c8
1df6c8
* Fri May 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-37
1df6c8
- fixes bugs bz#1840794
1df6c8
1df6c8
* Wed May 27 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-36
1df6c8
- fixes bugs bz#1812789 bz#1823423
1df6c8
1df6c8
* Fri May 22 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-35
1df6c8
- fixes bugs bz#1810516 bz#1830713 bz#1836233
1df6c8
1df6c8
* Sun May 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-34
1df6c8
- fixes bugs bz#1802013 bz#1823706 bz#1825177 bz#1830713 bz#1831403 bz#1833017
1df6c8
1df6c8
* Wed Apr 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-33
1df6c8
- fixes bugs bz#1812789 bz#1813917 bz#1823703 bz#1823706 bz#1825195
1df6c8
1df6c8
* Sat Apr 04 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-32
1df6c8
- fixes bugs bz#1781543 bz#1812789 bz#1812824 bz#1817369 bz#1819059
1df6c8
1df6c8
* Tue Mar 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-31
1df6c8
- fixes bugs bz#1802727
1df6c8
1df6c8
* Thu Feb 20 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30.1
1df6c8
- fixes bugs bz#1800703
1df6c8
1df6c8
* Sat Feb 01 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30
1df6c8
- fixes bugs bz#1775564 bz#1794153
1df6c8
1df6c8
* Thu Jan 23 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-29
1df6c8
- fixes bugs bz#1793035
1df6c8
1df6c8
* Tue Jan 14 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-28
1df6c8
- fixes bugs bz#1789447
1df6c8
1df6c8
* Mon Jan 13 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-27
1df6c8
- fixes bugs bz#1789447
1df6c8
1df6c8
* Fri Jan 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-26
1df6c8
- fixes bugs bz#1763208 bz#1788656
1df6c8
1df6c8
* Mon Dec 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-25
1df6c8
- fixes bugs bz#1686800 bz#1763208 bz#1779696 bz#1781444 bz#1782162
1df6c8
1df6c8
* Thu Nov 28 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-24
1df6c8
- fixes bugs bz#1768786
1df6c8
1df6c8
* Thu Nov 21 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-23
1df6c8
- fixes bugs bz#1344758 bz#1599802 bz#1685406 bz#1686800 bz#1724021 
1df6c8
  bz#1726058 bz#1727755 bz#1731513 bz#1741193 bz#1758923 bz#1761326 bz#1761486 
1df6c8
  bz#1762180 bz#1764095 bz#1766640
1df6c8
1df6c8
* Thu Nov 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-22
1df6c8
- fixes bugs bz#1771524 bz#1771614
1df6c8
1df6c8
* Fri Oct 25 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-21
1df6c8
- fixes bugs bz#1765555
c19c54
f338ef
* Wed Oct 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-20
f338ef
- fixes bugs bz#1719171 bz#1763412 bz#1764202
f338ef
f338ef
* Thu Oct 17 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-19
f338ef
- fixes bugs bz#1760939
f338ef
f338ef
* Wed Oct 16 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-18
f338ef
- fixes bugs bz#1758432
f338ef
f338ef
* Fri Oct 11 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-17
f338ef
- fixes bugs bz#1704562 bz#1758618 bz#1760261
f338ef
f338ef
* Wed Oct 09 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-16
f338ef
- fixes bugs bz#1752713 bz#1756325
f338ef
f338ef
* Fri Sep 27 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-15
f338ef
- fixes bugs bz#1726000 bz#1731826 bz#1754407 bz#1754790 bz#1755227
f338ef
f338ef
* Fri Sep 20 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-14
f338ef
- fixes bugs bz#1719171 bz#1728673 bz#1731896 bz#1732443 bz#1733970 
f338ef
  bz#1745107 bz#1746027 bz#1748688 bz#1750241 bz#1572163
f338ef
f338ef
* Fri Aug 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-13
f338ef
- fixes bugs bz#1729915 bz#1732376 bz#1743611 bz#1743627 bz#1743634 bz#1744518
f338ef
f338ef
* Fri Aug 09 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-12
f338ef
- fixes bugs bz#1730914 bz#1731448 bz#1732770 bz#1732792 bz#1733531 
f338ef
  bz#1734305 bz#1734534 bz#1734734 bz#1735514 bz#1737705 bz#1732774
f338ef
  bz#1732793
f338ef
f338ef
* Tue Aug 06 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-11
f338ef
- fixes bugs bz#1733520 bz#1734423
f338ef
f338ef
* Fri Aug 02 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-10
f338ef
- fixes bugs bz#1713890
f338ef
f338ef
* Tue Jul 23 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-9
f338ef
- fixes bugs bz#1708064 bz#1708180 bz#1715422 bz#1720992 bz#1722757
f338ef
f338ef
* Tue Jul 16 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-8
f338ef
- fixes bugs bz#1698435 bz#1712591 bz#1715447 bz#1720488 bz#1722209
f338ef
  bz#1722512 bz#1724089 bz#1726991 bz#1727785 bz#1729108
f338ef
f338ef
* Fri Jun 28 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-7
f338ef
- fixes bugs bz#1573077 bz#1600918 bz#1703423 bz#1704207 bz#1708064
f338ef
  bz#1709301 bz#1713664 bz#1716760 bz#1717784 bz#1720163 bz#1720192
f338ef
  bz#1720551 bz#1721351 bz#1721357 bz#1721477 bz#1722131 bz#1722331
f338ef
  bz#1722509 bz#1722801 bz#1720248
b7ebea
256ebe
* Fri Jun 14 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-6
256ebe
- fixes bugs bz#1668001 bz#1708043 bz#1708183 bz#1710701 
256ebe
  bz#1719640 bz#1720079 bz#1720248 bz#1720318 bz#1720461
256ebe
256ebe
* Tue Jun 11 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-5
256ebe
- fixes bugs bz#1573077 bz#1694595 bz#1703434 bz#1714536 bz#1714588 
256ebe
  bz#1715407 bz#1715438 bz#1705018
256ebe
256ebe
* Fri Jun 07 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-4
256ebe
- fixes bugs bz#1480907 bz#1702298 bz#1703455 bz#1704181 bz#1707246
256ebe
  bz#1708067 bz#1708116 bz#1708121 bz#1709087 bz#1711249 bz#1711296 
256ebe
  bz#1714078 bz#1714124 bz#1716385 bz#1716626 bz#1716821 bz#1716865 bz#1717927
256ebe
256ebe
* Tue May 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-3
256ebe
- fixes bugs bz#1583585 bz#1671862 bz#1702686 bz#1703434 bz#1703753 
256ebe
  bz#1703897 bz#1704562 bz#1704769 bz#1704851 bz#1706683 bz#1706776 bz#1706893
256ebe
256ebe
* Thu Apr 25 2019 Milind Changire <mchangir@redhat.com> - 6.0-2
256ebe
- fixes bugs bz#1471742 bz#1652461 bz#1671862 bz#1676495 bz#1691620 
256ebe
  bz#1696334 bz#1696903 bz#1697820 bz#1698436 bz#1698728 bz#1699709 bz#1699835 
256ebe
  bz#1702240
256ebe
256ebe
* Mon Apr 08 2019 Milind Changire <mchangir@redhat.com> - 6.0-1
256ebe
- rebase to upstream glusterfs at v6.0
256ebe
- fixes bugs bz#1493284 bz#1578703 bz#1600918 bz#1670415 bz#1691620 
256ebe
  bz#1693935 bz#1695057
256ebe