e7a346
%global _hardened_build 1
e7a346
e7a346
%global _for_fedora_koji_builds 0
e7a346
e7a346
# uncomment and add '%' to use the prereltag for pre-releases
e7a346
# %%global prereltag qa3
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All argument definitions should be placed here and keep them sorted
e7a346
##
e7a346
50dc83
# asan
50dc83
# if you wish to compile an rpm with address sanitizer...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --with asan
50dc83
%{?_with_asan:%global _with_asan --enable-asan}
e7a346
50dc83
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
50dc83
%global _with_asan %{nil}
50dc83
%endif
e7a346
50dc83
# bd
50dc83
# if you wish to compile an rpm without the BD map support...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --without bd
50dc83
%{?_without_bd:%global _without_bd --disable-bd-xlator}
50dc83
50dc83
%if ( 0%{?rhel} && 0%{?rhel} > 7 )
50dc83
%global _without_bd --without-bd
50dc83
%endif
50dc83
50dc83
# cmocka
e7a346
# if you wish to compile an rpm with cmocka unit testing...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --with cmocka
e7a346
%{?_with_cmocka:%global _with_cmocka --enable-cmocka}
e7a346
50dc83
# debug
50dc83
# if you wish to compile an rpm with debugging...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --with debug
50dc83
%{?_with_debug:%global _with_debug --enable-debug}
50dc83
50dc83
# epoll
50dc83
# if you wish to compile an rpm without epoll...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --without epoll
50dc83
%{?_without_epoll:%global _without_epoll --disable-epoll}
50dc83
50dc83
# fusermount
50dc83
# if you wish to compile an rpm without fusermount...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --without fusermount
50dc83
%{?_without_fusermount:%global _without_fusermount --disable-fusermount}
50dc83
50dc83
# geo-rep
50dc83
# if you wish to compile an rpm without geo-replication support, compile like this...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --without georeplication
50dc83
%{?_without_georeplication:%global _without_georeplication --disable-georeplication}
50dc83
50dc83
# ipv6default
50dc83
# if you wish to compile an rpm with IPv6 default...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --with ipv6default
50dc83
%{?_with_ipv6default:%global _with_ipv6default --with-ipv6-default}
e7a346
e7a346
# libtirpc
e7a346
# if you wish to compile an rpm without TIRPC (i.e. use legacy glibc rpc)
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --without libtirpc
e7a346
%{?_without_libtirpc:%global _without_libtirpc --without-libtirpc}
e7a346
e7a346
# Do not use libtirpc on EL6, it does not have xdr_uint64_t() and xdr_uint32_t
e7a346
# Do not use libtirpc on EL7, it does not have xdr_sizeof()
e7a346
%if ( 0%{?rhel} && 0%{?rhel} <= 7 )
e7a346
%global _without_libtirpc --without-libtirpc
e7a346
%endif
e7a346
e7a346
50dc83
# ocf
50dc83
# if you wish to compile an rpm without the OCF resource agents...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --without ocf
50dc83
%{?_without_ocf:%global _without_ocf --without-ocf}
e7a346
50dc83
# rdma
50dc83
# if you wish to compile an rpm without rdma support, compile like this...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --without rdma
50dc83
%{?_without_rdma:%global _without_rdma --disable-ibverbs}
e7a346
50dc83
# No RDMA Support on 32-bit ARM
50dc83
%ifarch armv7hl
50dc83
%global _without_rdma --disable-ibverbs
50dc83
%endif
e7a346
50dc83
# server
50dc83
# if you wish to build rpms without server components, compile like this
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --without server
50dc83
%{?_without_server:%global _without_server --without-server}
50dc83
50dc83
# disable server components forcefully as rhel <= 6
50dc83
%if ( 0%{?rhel} )
50dc83
%if (!(( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" ) || ( "%{?dist}" == ".el8rhgs" )))
50dc83
%global _without_server --without-server
50dc83
%endif
e7a346
%endif
e7a346
50dc83
%global _without_extra_xlators 1
50dc83
%global _without_regression_tests 1
e7a346
50dc83
# syslog
e7a346
# if you wish to build rpms without syslog logging, compile like this
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --without syslog
e7a346
%{?_without_syslog:%global _without_syslog --disable-syslog}
e7a346
e7a346
# disable syslog forcefully as rhel <= 6 doesn't have rsyslog or rsyslog-mmcount
e7a346
# Fedora deprecated syslog, see
e7a346
#  https://fedoraproject.org/wiki/Changes/NoDefaultSyslog
e7a346
# (And what about RHEL7?)
e7a346
%if ( 0%{?fedora} && 0%{?fedora} >= 20 ) || ( 0%{?rhel} && 0%{?rhel} <= 6 )
e7a346
%global _without_syslog --disable-syslog
e7a346
%endif
e7a346
50dc83
# tsan
50dc83
# if you wish to compile an rpm with thread sanitizer...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --with tsan
50dc83
%{?_with_tsan:%global _with_tsan --enable-tsan}
e7a346
50dc83
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
50dc83
%global _with_tsan %{nil}
e7a346
%endif
e7a346
50dc83
# valgrind
50dc83
# if you wish to compile an rpm to run all processes under valgrind...
50dc83
# rpmbuild -ta glusterfs-6.0.tar.gz --with valgrind
50dc83
%{?_with_valgrind:%global _with_valgrind --enable-valgrind}
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All %%global definitions should be placed here and keep them sorted
e7a346
##
e7a346
9f5ccc
# selinux booleans whose defalut value needs modification
9f5ccc
# these booleans will be consumed by "%%selinux_set_booleans" macro.
9f5ccc
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
9f5ccc
%global selinuxbooleans rsync_full_access=1 rsync_client=1
9f5ccc
%endif
9f5ccc
50dc83
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
e7a346
%global _with_systemd true
e7a346
%endif
e7a346
e7a346
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 7 )
e7a346
%global _with_firewalld --enable-firewalld
e7a346
%endif
e7a346
e7a346
%if 0%{?_tmpfilesdir:1}
e7a346
%global _with_tmpfilesdir --with-tmpfilesdir=%{_tmpfilesdir}
e7a346
%else
e7a346
%global _with_tmpfilesdir --without-tmpfilesdir
e7a346
%endif
e7a346
50dc83
# without server should also disable some server-only components
50dc83
%if 0%{?_without_server:1}
e7a346
%global _without_events --disable-events
50dc83
%global _without_georeplication --disable-georeplication
50dc83
%global _without_tiering --disable-tiering
50dc83
%global _without_ocf --without-ocf
50dc83
%endif
50dc83
50dc83
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
50dc83
%global _usepython3 1
50dc83
%global _pythonver 3
50dc83
%else
50dc83
%global _usepython3 0
50dc83
%global _pythonver 2
e7a346
%endif
e7a346
e7a346
# From https://fedoraproject.org/wiki/Packaging:Python#Macros
e7a346
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
e7a346
%{!?python2_sitelib: %global python2_sitelib %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
e7a346
%{!?python2_sitearch: %global python2_sitearch %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
e7a346
%global _rundir %{_localstatedir}/run
e7a346
%endif
e7a346
e7a346
%if ( 0%{?_with_systemd:1} )
50dc83
%global service_enable()   /bin/systemctl --quiet enable %1.service || : \
50dc83
%{nil}
50dc83
%global service_start()   /bin/systemctl --quiet start %1.service || : \
50dc83
%{nil}
50dc83
%global service_stop()    /bin/systemctl --quiet stop %1.service || :\
50dc83
%{nil}
50dc83
%global service_install() install -D -p -m 0644 %1.service %{buildroot}%2 \
50dc83
%{nil}
e7a346
# can't seem to make a generic macro that works
50dc83
%global glusterd_svcfile   %{_unitdir}/glusterd.service
50dc83
%global glusterfsd_svcfile %{_unitdir}/glusterfsd.service
50dc83
%global glusterta_svcfile %{_unitdir}/gluster-ta-volume.service
50dc83
%global glustereventsd_svcfile %{_unitdir}/glustereventsd.service
50dc83
%global glusterfssharedstorage_svcfile %{_unitdir}/glusterfssharedstorage.service
e7a346
%else
50dc83
%global service_enable()  /sbin/chkconfig --add %1 >/dev/null 2>&1 || : \
50dc83
%{nil}
50dc83
%global systemd_preun() /sbin/chkconfig --del %1 >/dev/null 2>&1 || : \
50dc83
%{nil}
50dc83
%global systemd_postun_with_restart() /sbin/service %1 condrestart >/dev/null 2>&1 || : \
50dc83
%{nil}
50dc83
%global service_start()   /sbin/service %1 start >/dev/null 2>&1 || : \
50dc83
%{nil}
50dc83
%global service_stop()    /sbin/service %1 stop >/dev/null 2>&1 || : \
50dc83
%{nil}
50dc83
%global service_install() install -D -p -m 0755 %1.init %{buildroot}%2 \
50dc83
%{nil}
e7a346
# can't seem to make a generic macro that works
50dc83
%global glusterd_svcfile   %{_sysconfdir}/init.d/glusterd
50dc83
%global glusterfsd_svcfile %{_sysconfdir}/init.d/glusterfsd
50dc83
%global glustereventsd_svcfile %{_sysconfdir}/init.d/glustereventsd
e7a346
%endif
e7a346
e7a346
%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}}
e7a346
e7a346
# We do not want to generate useless provides and requires for xlator
e7a346
# .so files to be set for glusterfs packages.
e7a346
# Filter all generated:
e7a346
#
e7a346
# TODO: RHEL5 does not have a convenient solution
e7a346
%if ( 0%{?rhel} == 6 )
e7a346
# filter_setup exists in RHEL6 only
e7a346
%filter_provides_in %{_libdir}/glusterfs/%{version}/
e7a346
%global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$'
e7a346
%filter_setup
e7a346
%else
e7a346
# modern rpm and current Fedora do not generate requires when the
e7a346
# provides are filtered
e7a346
%global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$
e7a346
%endif
e7a346
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All package definitions should be placed here in alphabetical order
e7a346
##
e7a346
Summary:          Distributed File System
e7a346
%if ( 0%{_for_fedora_koji_builds} )
e7a346
Name:             glusterfs
e7a346
Version:          3.8.0
e7a346
Release:          0.1%{?prereltag:.%{prereltag}}%{?dist}
e7a346
%else
e7a346
Name:             glusterfs
50dc83
Version:          6.0
b7d4d7
Release:          56.4%{?dist}
50dc83
ExcludeArch:      i686
e7a346
%endif
e7a346
License:          GPLv2 or LGPLv3+
50dc83
URL:              http://docs.gluster.org/
e7a346
%if ( 0%{_for_fedora_koji_builds} )
e7a346
Source0:          http://bits.gluster.org/pub/gluster/glusterfs/src/glusterfs-%{version}%{?prereltag}.tar.gz
e7a346
Source1:          glusterd.sysconfig
e7a346
Source2:          glusterfsd.sysconfig
e7a346
Source7:          glusterfsd.service
e7a346
Source8:          glusterfsd.init
e7a346
%else
50dc83
Source0:          glusterfs-6.0.tar.gz
e7a346
%endif
e7a346
e7a346
Requires(pre):    shadow-utils
e7a346
%if ( 0%{?_with_systemd:1} )
e7a346
BuildRequires:    systemd
e7a346
%endif
e7a346
e7a346
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e7a346
%if ( 0%{?_with_systemd:1} )
e7a346
%{?systemd_requires}
e7a346
%endif
50dc83
%if 0%{?_with_asan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
50dc83
BuildRequires:    libasan
50dc83
%endif
50dc83
%if 0%{?_with_tsan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
50dc83
BuildRequires:    libtsan
50dc83
%endif
e7a346
BuildRequires:    git
e7a346
BuildRequires:    bison flex
e7a346
BuildRequires:    gcc make libtool
e7a346
BuildRequires:    ncurses-devel readline-devel
e7a346
BuildRequires:    libxml2-devel openssl-devel
e7a346
BuildRequires:    libaio-devel libacl-devel
50dc83
BuildRequires:    python%{_pythonver}-devel
50dc83
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
e7a346
BuildRequires:    python-ctypes
e7a346
%endif
50dc83
%if ( 0%{?_with_ipv6default:1} ) || ( 0%{!?_without_libtirpc:1} ) || ( 0%{?rhel} && ( 0%{?rhel} >= 8 ) )
e7a346
BuildRequires:    libtirpc-devel
e7a346
%endif
50dc83
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
50dc83
BuildRequires:    rpcgen
e7a346
%endif
e7a346
BuildRequires:    userspace-rcu-devel >= 0.7
e7a346
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
e7a346
BuildRequires:    automake
e7a346
%endif
e7a346
BuildRequires:    libuuid-devel
e7a346
%if ( 0%{?_with_cmocka:1} )
e7a346
BuildRequires:    libcmocka-devel >= 1.0.1
e7a346
%endif
e7a346
%if ( 0%{!?_without_tiering:1} )
e7a346
BuildRequires:    sqlite-devel
e7a346
%endif
e7a346
%if ( 0%{!?_without_georeplication:1} )
e7a346
BuildRequires:    libattr-devel
e7a346
%endif
e7a346
e7a346
%if (0%{?_with_firewalld:1})
e7a346
BuildRequires:    firewalld
e7a346
%endif
e7a346
e7a346
Obsoletes:        hekafs
e7a346
Obsoletes:        %{name}-common < %{version}-%{release}
e7a346
Obsoletes:        %{name}-core < %{version}-%{release}
e7a346
Obsoletes:        %{name}-ufo
50dc83
%if ( 0%{!?_with_gnfs:1} )
50dc83
Obsoletes:        %{name}-gnfs
50dc83
%endif
50dc83
%if ( 0%{?rhel} < 7 )
50dc83
Obsoletes:        %{name}-ganesha
50dc83
%endif
e7a346
Provides:         %{name}-common = %{version}-%{release}
e7a346
Provides:         %{name}-core = %{version}-%{release}
e7a346
50dc83
# Patch0001: 0001-Update-rfc.sh-to-rhgs-3.5.0.patch
e7a346
Patch0002: 0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch
50dc83
Patch0003: 0003-rpc-set-bind-insecure-to-off-by-default.patch
50dc83
Patch0004: 0004-glusterd-spec-fixing-autogen-issue.patch
50dc83
Patch0005: 0005-libglusterfs-glusterd-Fix-compilation-errors.patch
50dc83
Patch0006: 0006-build-remove-ghost-directory-entries.patch
50dc83
Patch0007: 0007-build-add-RHGS-specific-changes.patch
50dc83
Patch0008: 0008-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch
50dc83
Patch0009: 0009-build-introduce-security-hardening-flags-in-gluster.patch
50dc83
Patch0010: 0010-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch
50dc83
Patch0011: 0011-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch
50dc83
Patch0012: 0012-build-add-pretrans-check.patch
50dc83
Patch0013: 0013-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch
50dc83
Patch0014: 0014-build-spec-file-conflict-resolution.patch
50dc83
Patch0015: 0015-build-randomize-temp-file-names-in-pretrans-scriptle.patch
50dc83
Patch0016: 0016-glusterd-parallel-readdir-Change-the-op-version-of-p.patch
50dc83
Patch0017: 0017-glusterd-Revert-op-version-for-cluster.max-brick-per.patch
50dc83
Patch0018: 0018-cli-Add-message-for-user-before-modifying-brick-mult.patch
50dc83
Patch0019: 0019-build-launch-glusterd-upgrade-after-all-new-bits-are.patch
50dc83
Patch0020: 0020-spec-unpackaged-files-found-for-RHEL-7-client-build.patch
50dc83
Patch0021: 0021-cli-glusterfsd-remove-copyright-information.patch
50dc83
Patch0022: 0022-cli-Remove-upstream-doc-reference.patch
50dc83
Patch0023: 0023-hooks-remove-selinux-hooks.patch
50dc83
Patch0024: 0024-glusterd-Make-localtime-logging-option-invisible-in-.patch
50dc83
Patch0025: 0025-build-make-RHGS-version-available-for-server.patch
50dc83
Patch0026: 0026-glusterd-Introduce-daemon-log-level-cluster-wide-opt.patch
50dc83
Patch0027: 0027-glusterd-change-op-version-of-fips-mode-rchecksum.patch
50dc83
Patch0028: 0028-glusterd-Reset-op-version-for-features.shard-deletio.patch
50dc83
Patch0029: 0029-glusterd-Reset-op-version-for-features.shard-lru-lim.patch
50dc83
Patch0030: 0030-selinux-glusterd-add-features.selinux-to-glusterd-vo.patch
50dc83
Patch0031: 0031-glusterd-turn-off-selinux-feature-in-downstream.patch
50dc83
Patch0032: 0032-glusterd-update-gd-op-version-to-3_7_0.patch
50dc83
Patch0033: 0033-build-add-missing-explicit-package-dependencies.patch
50dc83
Patch0034: 0034-glusterd-introduce-a-new-op-version-for-rhgs-3.4.3.patch
50dc83
Patch0035: 0035-glusterd-tag-rebalance-mgmt_v3-command-to-op-version.patch
50dc83
Patch0036: 0036-build-add-conditional-dependency-on-server-for-devel.patch
50dc83
Patch0037: 0037-cli-change-the-warning-message.patch
50dc83
Patch0038: 0038-spec-avoid-creation-of-temp-file-in-lua-script.patch
50dc83
Patch0039: 0039-cli-fix-query-to-user-during-brick-mux-selection.patch
50dc83
Patch0040: 0040-build-Remove-unsupported-test-cases-failing-consiste.patch
50dc83
Patch0041: 0041-tests-geo-rep-Build-failed-in-Jenkins-for-test-bug-1.patch
50dc83
Patch0042: 0042-spec-client-server-Builds-are-failing-on-rhel-6.patch
50dc83
Patch0043: 0043-inode-don-t-dump-the-whole-table-to-CLI.patch
50dc83
Patch0044: 0044-cluster-ec-Don-t-enqueue-an-entry-if-it-is-already-h.patch
50dc83
Patch0045: 0045-glusterd-fix-txn-id-mem-leak.patch
50dc83
Patch0046: 0046-protocol-client-Do-not-fallback-to-anon-fd-if-fd-is-.patch
50dc83
Patch0047: 0047-client-rpc-Fix-the-payload-being-sent-on-the-wire.patch
50dc83
Patch0048: 0048-gfapi-Unblock-epoll-thread-for-upcall-processing.patch
50dc83
Patch0049: 0049-transport-socket-log-shutdown-msg-occasionally.patch
50dc83
Patch0050: 0050-geo-rep-Fix-syncing-multiple-rename-of-symlink.patch
50dc83
Patch0051: 0051-spec-update-rpm-install-condition.patch
50dc83
Patch0052: 0052-geo-rep-IPv6-support.patch
50dc83
Patch0053: 0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch
50dc83
Patch0054: 0054-Revert-glusterd-storhaug-remove-ganesha.patch
50dc83
Patch0055: 0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch
50dc83
Patch0056: 0056-common-ha-fixes-for-Debian-based-systems.patch
50dc83
Patch0057: 0057-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch
50dc83
Patch0058: 0058-glusterd-ganesha-During-volume-delete-remove-the-gan.patch
50dc83
Patch0059: 0059-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch
50dc83
Patch0060: 0060-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch
50dc83
Patch0061: 0061-ganesha-allow-refresh-config-and-volume-export-unexp.patch
50dc83
Patch0062: 0062-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch
50dc83
Patch0063: 0063-glusterd-ganesha-update-cache-invalidation-properly-.patch
50dc83
Patch0064: 0064-glusterd-ganesha-return-proper-value-in-pre_setup.patch
50dc83
Patch0065: 0065-ganesha-scripts-remove-dependency-over-export-config.patch
50dc83
Patch0066: 0066-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch
50dc83
Patch0067: 0067-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch
50dc83
Patch0068: 0068-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch
50dc83
Patch0069: 0069-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
50dc83
Patch0070: 0070-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch
50dc83
Patch0071: 0071-common-ha-enable-and-disable-selinux-gluster_use_exe.patch
50dc83
Patch0072: 0072-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch
50dc83
Patch0073: 0073-build-remove-ganesha-dependency-on-selinux-policy.patch
50dc83
Patch0074: 0074-common-ha-enable-pacemaker-at-end-of-setup.patch
50dc83
Patch0075: 0075-common-ha-Fix-an-incorrect-syntax-during-setup.patch
50dc83
Patch0076: 0076-glusterd-ganesha-change-voltype-for-ganesha.enable-i.patch
50dc83
Patch0077: 0077-glusterd-ganesha-create-remove-export-file-only-from.patch
50dc83
Patch0078: 0078-common-ha-scripts-pass-the-list-of-servers-properly-.patch
50dc83
Patch0079: 0079-common-ha-All-statd-related-files-need-to-be-owned-b.patch
50dc83
Patch0080: 0080-glusterd-ganesha-Skip-non-ganesha-nodes-properly-for.patch
50dc83
Patch0081: 0081-ganesha-ha-ensure-pacemaker-is-enabled-after-setup.patch
50dc83
Patch0082: 0082-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch
50dc83
Patch0083: 0083-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
50dc83
Patch0084: 0084-glusterd-Fix-duplicate-client_op_version-in-info-fil.patch
50dc83
Patch0085: 0085-Revert-all-remove-code-which-is-not-being-considered.patch
50dc83
Patch0086: 0086-Revert-tiering-remove-the-translator-from-build-and-.patch
50dc83
Patch0087: 0087-ganesha-fixing-minor-issues-after-the-backport-from-.patch
50dc83
Patch0088: 0088-tier-fix-failures-noticed-during-tier-start-and-tier.patch
50dc83
Patch0089: 0089-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch
50dc83
Patch0090: 0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch
50dc83
Patch0091: 0091-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch
50dc83
Patch0092: 0092-logging-Fix-GF_LOG_OCCASSIONALLY-API.patch
50dc83
Patch0093: 0093-glusterd-Change-op-version-of-cache-invalidation-in-.patch
50dc83
Patch0094: 0094-glusterd-load-ctime-in-the-client-graph-only-if-it-s.patch
50dc83
Patch0095: 0095-cluster-afr-Remove-local-from-owners_list-on-failure.patch
50dc83
Patch0096: 0096-core-Brick-is-not-able-to-detach-successfully-in-bri.patch
50dc83
Patch0097: 0097-glusterd-tier-while-doing-an-attach-tier-the-self-he.patch
50dc83
Patch0098: 0098-mgmt-shd-Implement-multiplexing-in-self-heal-daemon.patch
50dc83
Patch0099: 0099-client-fini-return-fini-after-rpc-cleanup.patch
50dc83
Patch0100: 0100-clnt-rpc-ref-leak-during-disconnect.patch
50dc83
Patch0101: 0101-shd-mux-Fix-coverity-issues-introduced-by-shd-mux-pa.patch
50dc83
Patch0102: 0102-rpc-transport-Missing-a-ref-on-dict-while-creating-t.patch
50dc83
Patch0103: 0103-dht-NULL-check-before-setting-error-flag.patch
50dc83
Patch0104: 0104-afr-shd-Cleanup-self-heal-daemon-resources-during-af.patch
50dc83
Patch0105: 0105-core-Log-level-changes-do-not-effect-on-running-clie.patch
50dc83
Patch0106: 0106-libgfchangelog-use-find_library-to-locate-shared-lib.patch
50dc83
Patch0107: 0107-gfapi-add-function-to-set-client-pid.patch
50dc83
Patch0108: 0108-afr-add-client-pid-to-all-gf_event-calls.patch
50dc83
Patch0109: 0109-glusterd-Optimize-glusterd-handshaking-code-path.patch
50dc83
Patch0110: 0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch
50dc83
Patch0111: 0111-glusterd-fix-loading-ctime-in-client-graph-logic.patch
50dc83
Patch0112: 0112-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
50dc83
Patch0113: 0113-spec-Glusterd-did-not-start-by-default-after-node-re.patch
50dc83
Patch0114: 0114-core-fix-hang-issue-in-__gf_free.patch
50dc83
Patch0115: 0115-core-only-log-seek-errors-if-SEEK_HOLE-SEEK_DATA-is-.patch
50dc83
Patch0116: 0116-cluster-ec-fix-fd-reopen.patch
50dc83
Patch0117: 0117-spec-Remove-thin-arbiter-package.patch
50dc83
Patch0118: 0118-tests-mark-thin-arbiter-test-ta.t-as-bad.patch
50dc83
Patch0119: 0119-glusterd-provide-a-way-to-detach-failed-node.patch
50dc83
Patch0120: 0120-glusterd-shd-Keep-a-ref-on-volinfo-until-attach-rpc-.patch
50dc83
Patch0121: 0121-spec-glusterfs-devel-for-client-build-should-not-dep.patch
50dc83
Patch0122: 0122-posix-ctime-Fix-stat-time-attributes-inconsistency-d.patch
50dc83
Patch0123: 0123-ctime-Fix-log-repeated-logging-during-open.patch
50dc83
Patch0124: 0124-spec-remove-duplicate-references-to-files.patch
50dc83
Patch0125: 0125-glusterd-define-dumpops-in-the-xlator_api-of-gluster.patch
50dc83
Patch0126: 0126-cluster-dht-refactor-dht-lookup-functions.patch
50dc83
Patch0127: 0127-cluster-dht-Refactor-dht-lookup-functions.patch
50dc83
Patch0128: 0128-glusterd-Fix-bulkvoldict-thread-logic-in-brick-multi.patch
50dc83
Patch0129: 0129-core-handle-memory-accounting-correctly.patch
50dc83
Patch0130: 0130-tier-test-new-tier-cmds.t-fails-after-a-glusterd-res.patch
50dc83
Patch0131: 0131-tests-dht-Test-that-lookups-are-sent-post-brick-up.patch
50dc83
Patch0132: 0132-glusterd-remove-duplicate-occurrence-of-features.sel.patch
50dc83
Patch0133: 0133-glusterd-enable-fips-mode-rchecksum-for-new-volumes.patch
50dc83
Patch0134: 0134-performance-write-behind-remove-request-from-wip-lis.patch
50dc83
Patch0135: 0135-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
50dc83
Patch0136: 0136-glusterd-fix-inconsistent-global-option-output-in-vo.patch
50dc83
Patch0137: 0137-shd-glusterd-Serialize-shd-manager-to-prevent-race-c.patch
50dc83
Patch0138: 0138-glusterd-Add-gluster-volume-stop-operation-to-gluste.patch
50dc83
Patch0139: 0139-ec-shd-Cleanup-self-heal-daemon-resources-during-ec-.patch
50dc83
Patch0140: 0140-cluster-ec-Reopen-shouldn-t-happen-with-O_TRUNC.patch
50dc83
Patch0141: 0141-socket-ssl-fix-crl-handling.patch
50dc83
Patch0142: 0142-lock-check-null-value-of-dict-to-avoid-log-flooding.patch
50dc83
Patch0143: 0143-packaging-Change-the-dependency-on-nfs-ganesha-to-2..patch
50dc83
Patch0144: 0144-cluster-ec-honor-contention-notifications-for-partia.patch
50dc83
Patch0145: 0145-core-Capture-process-memory-usage-at-the-time-of-cal.patch
50dc83
Patch0146: 0146-dht-Custom-xattrs-are-not-healed-in-case-of-add-bric.patch
50dc83
Patch0147: 0147-glusterd-bulkvoldict-thread-is-not-handling-all-volu.patch
50dc83
Patch0148: 0148-cluster-dht-Lookup-all-files-when-processing-directo.patch
50dc83
Patch0149: 0149-glusterd-Optimize-code-to-copy-dictionary-in-handsha.patch
50dc83
Patch0150: 0150-libglusterfs-define-macros-needed-for-cloudsync.patch
50dc83
Patch0151: 0151-mgmt-glusterd-Make-changes-related-to-cloudsync-xlat.patch
50dc83
Patch0152: 0152-storage-posix-changes-with-respect-to-cloudsync.patch
50dc83
Patch0153: 0153-features-cloudsync-Added-some-new-functions.patch
50dc83
Patch0154: 0154-cloudsync-cvlt-Cloudsync-plugin-for-commvault-store.patch
50dc83
Patch0155: 0155-cloudsync-Make-readdirp-return-stat-info-of-all-the-.patch
50dc83
Patch0156: 0156-cloudsync-Fix-bug-in-cloudsync-fops-c.py.patch
50dc83
Patch0157: 0157-afr-frame-Destroy-frame-after-afr_selfheal_entry_gra.patch
50dc83
Patch0158: 0158-glusterfsd-cleanup-Protect-graph-object-under-a-lock.patch
50dc83
Patch0159: 0159-glusterd-add-an-op-version-check.patch
50dc83
Patch0160: 0160-geo-rep-Geo-rep-help-text-issue.patch
50dc83
Patch0161: 0161-geo-rep-Fix-rename-with-existing-destination-with-sa.patch
50dc83
Patch0162: 0162-geo-rep-Fix-sync-method-config.patch
50dc83
Patch0163: 0163-geo-rep-Fix-sync-hang-with-tarssh.patch
50dc83
Patch0164: 0164-cluster-ec-Fix-handling-of-heal-info-cases-without-l.patch
50dc83
Patch0165: 0165-tests-shd-Add-test-coverage-for-shd-mux.patch
50dc83
Patch0166: 0166-glusterd-svc-glusterd_svcs_stop-should-call-individu.patch
50dc83
Patch0167: 0167-glusterd-shd-Optimize-the-glustershd-manager-to-send.patch
50dc83
Patch0168: 0168-cluster-dht-Fix-directory-perms-during-selfheal.patch
50dc83
Patch0169: 0169-Build-Fix-spec-to-enable-rhel8-client-build.patch
50dc83
Patch0170: 0170-geo-rep-Convert-gfid-conflict-resolutiong-logs-into-.patch
50dc83
Patch0171: 0171-posix-add-storage.reserve-size-option.patch
50dc83
Patch0172: 0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
50dc83
Patch0173: 0173-glusterd-store-fips-mode-rchecksum-option-in-the-inf.patch
50dc83
Patch0174: 0174-xlator-log-Add-more-logging-in-xlator_is_cleanup_sta.patch
50dc83
Patch0175: 0175-ec-fini-Fix-race-between-xlator-cleanup-and-on-going.patch
50dc83
Patch0176: 0176-features-shard-Fix-crash-during-background-shard-del.patch
50dc83
Patch0177: 0177-features-shard-Fix-extra-unref-when-inode-object-is-.patch
50dc83
Patch0178: 0178-Cluster-afr-Don-t-treat-all-bricks-having-metadata-p.patch
50dc83
Patch0179: 0179-tests-Fix-split-brain-favorite-child-policy.t-failur.patch
50dc83
Patch0180: 0180-ganesha-scripts-Make-generate-epoch.py-python3-compa.patch
50dc83
Patch0181: 0181-afr-log-before-attempting-data-self-heal.patch
50dc83
Patch0182: 0182-geo-rep-fix-mountbroker-setup.patch
50dc83
Patch0183: 0183-glusterd-svc-Stop-stale-process-using-the-glusterd_p.patch
50dc83
Patch0184: 0184-tests-Add-gating-configuration-file-for-rhel8.patch
50dc83
Patch0185: 0185-gfapi-provide-an-api-for-setting-statedump-path.patch
50dc83
Patch0186: 0186-cli-Remove-brick-warning-seems-unnecessary.patch
50dc83
Patch0187: 0187-gfapi-statedump_path-add-proper-version-number.patch
50dc83
Patch0188: 0188-features-shard-Fix-integer-overflow-in-block-count-a.patch
50dc83
Patch0189: 0189-features-shard-Fix-block-count-accounting-upon-trunc.patch
50dc83
Patch0190: 0190-Build-removing-the-hardcoded-usage-of-python3.patch
50dc83
Patch0191: 0191-Build-Update-python-shebangs-based-on-version.patch
50dc83
Patch0192: 0192-build-Ensure-gluster-cli-package-is-built-as-part-of.patch
50dc83
Patch0193: 0193-spec-fixed-python-dependency-for-rhel6.patch
50dc83
Patch0194: 0194-stack-Make-sure-to-have-unique-call-stacks-in-all-ca.patch
50dc83
Patch0195: 0195-build-package-glusterfs-ganesha-for-rhel7-and-above.patch
50dc83
Patch0196: 0196-posix-ctime-Fix-ctime-upgrade-issue.patch
50dc83
Patch0197: 0197-posix-fix-crash-in-posix_cs_set_state.patch
50dc83
Patch0198: 0198-cluster-ec-Prevent-double-pre-op-xattrops.patch
50dc83
Patch0199: 0199-upcall-Avoid-sending-notifications-for-invalid-inode.patch
50dc83
Patch0200: 0200-gfapi-fix-incorrect-initialization-of-upcall-syncop-.patch
50dc83
Patch0201: 0201-geo-rep-Fix-permissions-for-GEOREP_DIR-in-non-root-s.patch
50dc83
Patch0202: 0202-shd-mux-Fix-race-between-mux_proc-unlink-and-stop.patch
50dc83
Patch0203: 0203-glusterd-shd-Change-shd-logfile-to-a-unique-name.patch
50dc83
Patch0204: 0204-glusterd-conditionally-clear-txn_opinfo-in-stage-op.patch
50dc83
Patch0205: 0205-glusterd-Can-t-run-rebalance-due-to-long-unix-socket.patch
50dc83
Patch0206: 0206-glusterd-ignore-user.-options-from-compatibility-che.patch
50dc83
Patch0207: 0207-glusterd-fix-use-after-free-of-a-dict_t.patch
50dc83
Patch0208: 0208-mem-pool-remove-dead-code.patch
50dc83
Patch0209: 0209-core-avoid-dynamic-TLS-allocation-when-possible.patch
50dc83
Patch0210: 0210-mem-pool.-c-h-minor-changes.patch
50dc83
Patch0211: 0211-libglusterfs-Fix-compilation-when-disable-mempool-is.patch
50dc83
Patch0212: 0212-core-fix-memory-allocation-issues.patch
50dc83
Patch0213: 0213-cluster-dht-Strip-out-dht-xattrs.patch
50dc83
Patch0214: 0214-geo-rep-Upgrading-config-file-to-new-version.patch
50dc83
Patch0215: 0215-posix-modify-storage.reserve-option-to-take-size-and.patch
50dc83
Patch0216: 0216-Test-case-fixe-for-downstream-3.5.0.patch
50dc83
Patch0217: 0217-uss-Fix-tar-issue-with-ctime-and-uss-enabled.patch
50dc83
Patch0218: 0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch
50dc83
Patch0219: 0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch
50dc83
Patch0220: 0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch
50dc83
Patch0221: 0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch
50dc83
Patch0222: 0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch
50dc83
Patch0223: 0223-change-get_real_filename-implementation-to-use-ENOAT.patch
50dc83
Patch0224: 0224-core-replace-inet_addr-with-inet_pton.patch
50dc83
Patch0225: 0225-tests-utils-Fix-py2-py3-util-python-scripts.patch
50dc83
Patch0226: 0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch
50dc83
Patch0227: 0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch
50dc83
Patch0228: 0228-locks-enable-notify-contention-by-default.patch
50dc83
Patch0229: 0229-glusterd-Show-the-correct-brick-status-in-get-state.patch
50dc83
Patch0230: 0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch
50dc83
Patch0231: 0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch
50dc83
Patch0232: 0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch
50dc83
Patch0233: 0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch
50dc83
Patch0234: 0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch
50dc83
Patch0235: 0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch
50dc83
Patch0236: 0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch
50dc83
Patch0237: 0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch
50dc83
Patch0238: 0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch
50dc83
Patch0239: 0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
50dc83
Patch0240: 0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch
50dc83
Patch0241: 0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch
50dc83
Patch0242: 0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch
50dc83
Patch0243: 0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch
50dc83
Patch0244: 0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch
50dc83
Patch0245: 0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch
50dc83
Patch0246: 0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch
50dc83
Patch0247: 0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch
50dc83
Patch0248: 0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch
50dc83
Patch0249: 0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch
50dc83
Patch0250: 0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch
50dc83
Patch0251: 0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch
50dc83
Patch0252: 0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch
50dc83
Patch0253: 0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch
50dc83
Patch0254: 0254-Detach-iot_worker-to-release-its-resources.patch
50dc83
Patch0255: 0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch
50dc83
Patch0256: 0256-features-snapview-server-use-the-same-volfile-server.patch
50dc83
Patch0257: 0257-geo-rep-Test-case-for-upgrading-config-file.patch
50dc83
Patch0258: 0258-geo-rep-Fix-mount-broker-setup-issue.patch
50dc83
Patch0259: 0259-gluster-block-tuning-perf-options.patch
50dc83
Patch0260: 0260-ctime-Set-mdata-xattr-on-legacy-files.patch
50dc83
Patch0261: 0261-features-utime-Fix-mem_put-crash.patch
50dc83
Patch0262: 0262-glusterd-ctime-Disable-ctime-by-default.patch
50dc83
Patch0263: 0263-tests-fix-ctime-related-tests.patch
50dc83
Patch0264: 0264-gfapi-Fix-deadlock-while-processing-upcall.patch
50dc83
Patch0265: 0265-fuse-add-missing-GF_FREE-to-fuse_interrupt.patch
50dc83
Patch0266: 0266-geo-rep-Fix-mount-broker-setup-issue.patch
50dc83
Patch0267: 0267-posix-ctime-Fix-race-during-lookup-ctime-xattr-heal.patch
50dc83
Patch0268: 0268-rpc-transport-have-default-listen-port.patch
50dc83
Patch0269: 0269-ec-fix-truncate-lock-to-cover-the-write-in-tuncate-c.patch
50dc83
Patch0270: 0270-cluster-ec-inherit-healing-from-lock-when-it-has-inf.patch
50dc83
Patch0271: 0271-cluster-ec-fix-EIO-error-for-concurrent-writes-on-sp.patch
50dc83
Patch0272: 0272-cluster-ec-Always-read-from-good-mask.patch
50dc83
Patch0273: 0273-cluster-ec-Fix-reopen-flags-to-avoid-misbehavior.patch
50dc83
Patch0274: 0274-cluster-ec-Update-lock-good_mask-on-parent-fop-failu.patch
50dc83
Patch0275: 0275-cluster-ec-Create-heal-task-with-heal-process-id.patch
50dc83
Patch0276: 0276-features-utime-always-update-ctime-at-setattr.patch
50dc83
Patch0277: 0277-geo-rep-Fix-Config-Get-Race.patch
50dc83
Patch0278: 0278-geo-rep-Fix-worker-connection-issue.patch
50dc83
Patch0279: 0279-posix-In-brick_mux-brick-is-crashed-while-start-stop.patch
50dc83
Patch0280: 0280-performance-md-cache-Do-not-skip-caching-of-null-cha.patch
50dc83
Patch0281: 0281-ctime-Fix-incorrect-realtime-passed-to-frame-root-ct.patch
50dc83
Patch0282: 0282-geo-rep-Fix-the-name-of-changelog-archive-file.patch
50dc83
Patch0283: 0283-ctime-Fix-ctime-issue-with-utime-family-of-syscalls.patch
50dc83
Patch0284: 0284-posix-log-aio_error-return-codes-in-posix_fs_health_.patch
50dc83
Patch0285: 0285-glusterd-glusterd-service-is-getting-timed-out-on-sc.patch
50dc83
Patch0286: 0286-glusterfs.spec.in-added-script-files-for-machine-com.patch
50dc83
Patch0287: 0287-cluster-ec-Fail-fsync-flush-for-files-on-update-size.patch
50dc83
Patch0288: 0288-cluster-ec-Fix-coverity-issues.patch
50dc83
Patch0289: 0289-cluster-ec-quorum-count-implementation.patch
50dc83
Patch0290: 0290-glusterd-tag-disperse.quorum-count-for-31306.patch
50dc83
Patch0291: 0291-cluster-ec-Mark-release-only-when-it-is-acquired.patch
50dc83
Patch0292: 0292-rpc-Update-address-family-if-it-is-not-provide-in-cm.patch
50dc83
Patch0293: 0293-glusterd-IPV6-hostname-address-is-not-parsed-correct.patch
50dc83
Patch0294: 0294-eventsapi-Set-IPv4-IPv6-family-based-on-input-IP.patch
50dc83
Patch0295: 0295-ctime-rebalance-Heal-ctime-xattr-on-directory-during.patch
50dc83
Patch0296: 0296-glusterfind-pre-command-failure-on-a-modify.patch
50dc83
Patch0297: 0297-rpmbuild-fixing-the-build-errors-with-2a905a8ae.patch
50dc83
Patch0298: 0298-geo-rep-fix-sub-command-during-worker-connection.patch
50dc83
Patch0299: 0299-geo-rep-performance-improvement-while-syncing-rename.patch
50dc83
Patch0300: 0300-cli-remove-the-warning-displayed-when-remove-brick-s.patch
50dc83
Patch0301: 0301-posix-Brick-is-going-down-unexpectedly.patch
50dc83
Patch0302: 0302-cluster-ec-prevent-filling-shd-log-with-table-not-fo.patch
9f5ccc
Patch0303: 0303-posix-heketidbstorage-bricks-go-down-during-PVC-crea.patch
9f5ccc
Patch0304: 0304-cluster-dht-Correct-fd-processing-loop.patch
9f5ccc
Patch0305: 0305-glusterd-rebalance-start-should-fail-when-quorum-is-.patch
9f5ccc
Patch0306: 0306-cli-fix-distCount-value.patch
9f5ccc
Patch0307: 0307-ssl-fix-RHEL8-regression-failure.patch
9f5ccc
Patch0308: 0308-dht-Rebalance-causing-IO-Error-File-descriptor-in-ba.patch
9f5ccc
Patch0309: 0309-geo-rep-Fix-config-upgrade-on-non-participating-node.patch
9f5ccc
Patch0310: 0310-tests-test-case-for-non-root-geo-rep-setup.patch
9f5ccc
Patch0311: 0311-geo-rep-Fix-Permission-denied-traceback-on-non-root-.patch
9f5ccc
Patch0312: 0312-Scripts-quota_fsck-script-KeyError-contri_size.patch
9f5ccc
Patch0313: 0313-extras-Cgroup-CPU-Mem-restriction-are-not-working-on.patch
9f5ccc
Patch0314: 0314-glusterd-tier-is_tier_enabled-inserted-causing-check.patch
9f5ccc
Patch0315: 0315-geo-rep-Fix-py2-py3-compatibility-in-repce.patch
9f5ccc
Patch0316: 0316-spec-fixed-python-prettytable-dependency-for-rhel6.patch
9f5ccc
Patch0317: 0317-Update-rfc.sh-to-rhgs-3.5.1.patch
9f5ccc
Patch0318: 0318-Update-rfc.sh-to-rhgs-3.5.1.patch
9f5ccc
Patch0319: 0319-features-snapview-server-obtain-the-list-of-snapshot.patch
9f5ccc
Patch0320: 0320-gf-event-Handle-unix-volfile-servers.patch
9f5ccc
Patch0321: 0321-Adding-white-spaces-to-description-of-set-group.patch
9f5ccc
Patch0322: 0322-glusterd-display-correct-rebalance-data-size-after-g.patch
9f5ccc
Patch0323: 0323-cli-display-detailed-rebalance-info.patch
9f5ccc
Patch0324: 0324-extras-hooks-Add-SELinux-label-on-new-bricks-during-.patch
9f5ccc
Patch0325: 0325-extras-hooks-Install-and-package-newly-added-post-ad.patch
9f5ccc
Patch0326: 0326-tests-subdir-mount.t-is-failing-for-brick_mux-regrss.patch
9f5ccc
Patch0327: 0327-glusterfind-integrate-with-gfid2path.patch
9f5ccc
Patch0328: 0328-glusterd-Add-warning-and-abort-in-case-of-failures-i.patch
9f5ccc
Patch0329: 0329-cluster-afr-Heal-entries-when-there-is-a-source-no-h.patch
9f5ccc
Patch0330: 0330-mount.glusterfs-change-the-error-message.patch
9f5ccc
Patch0331: 0331-features-locks-Do-special-handling-for-op-version-3..patch
9f5ccc
Patch0332: 0332-Removing-one-top-command-from-gluster-v-help.patch
9f5ccc
Patch0333: 0333-rpc-Synchronize-slot-allocation-code.patch
9f5ccc
Patch0334: 0334-dht-log-getxattr-failure-for-node-uuid-at-DEBUG.patch
9f5ccc
Patch0335: 0335-tests-RHEL8-test-failure-fixes-for-RHGS.patch
9f5ccc
Patch0336: 0336-spec-check-and-return-exit-code-in-rpm-scripts.patch
9f5ccc
Patch0337: 0337-fuse-Set-limit-on-invalidate-queue-size.patch
9f5ccc
Patch0338: 0338-glusterfs-fuse-Reduce-the-default-lru-limit-value.patch
9f5ccc
Patch0339: 0339-geo-rep-fix-integer-config-validation.patch
9f5ccc
Patch0340: 0340-rpc-event_slot_alloc-converted-infinite-loop-after-r.patch
9f5ccc
Patch0341: 0341-socket-fix-error-handling.patch
9f5ccc
Patch0342: 0342-Revert-hooks-remove-selinux-hooks.patch
9f5ccc
Patch0343: 0343-extras-hooks-syntactical-errors-in-SELinux-hooks-sci.patch
9f5ccc
Patch0344: 0344-Revert-all-fixes-to-include-SELinux-hook-scripts.patch
9f5ccc
Patch0345: 0345-read-ahead-io-cache-turn-off-by-default.patch
9f5ccc
Patch0346: 0346-fuse-degrade-logging-of-write-failure-to-fuse-device.patch
9f5ccc
Patch0347: 0347-tools-glusterfind-handle-offline-bricks.patch
9f5ccc
Patch0348: 0348-glusterfind-Fix-py2-py3-issues.patch
9f5ccc
Patch0349: 0349-glusterfind-python3-compatibility.patch
9f5ccc
Patch0350: 0350-tools-glusterfind-Remove-an-extra-argument.patch
9f5ccc
Patch0351: 0351-server-Mount-fails-after-reboot-1-3-gluster-nodes.patch
9f5ccc
Patch0352: 0352-spec-fixed-missing-dependencies-for-glusterfs-clouds.patch
9f5ccc
Patch0353: 0353-build-glusterfs-ganesha-pkg-requires-python3-policyc.patch
9f5ccc
Patch0354: 0354-core-fix-memory-pool-management-races.patch
9f5ccc
Patch0355: 0355-core-Prevent-crash-on-process-termination.patch
9f5ccc
Patch0356: 0356-Update-rfc.sh-to-rhgs-3.5.1-rhel-8.patch
9f5ccc
Patch0357: 0357-ganesha-ha-updates-for-pcs-0.10.x-i.e.-in-Fedora-29-.patch
9f5ccc
Patch0358: 0358-inode-fix-wrong-loop-count-in-__inode_ctx_free.patch
9f5ccc
Patch0359: 0359-dht-gf_defrag_process_dir-is-called-even-if-gf_defra.patch
9f5ccc
Patch0360: 0360-rpc-Make-ssl-log-more-useful.patch
9f5ccc
Patch0361: 0361-snap_scheduler-python3-compatibility-and-new-test-ca.patch
9f5ccc
Patch0362: 0362-write-behind-fix-data-corruption.patch
9f5ccc
Patch0363: 0363-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
9f5ccc
Patch0364: 0364-dht-fixing-rebalance-failures-for-files-with-holes.patch
9f5ccc
Patch0365: 0365-build-geo-rep-requires-relevant-selinux-permission-f.patch
9f5ccc
Patch0366: 0366-snapshot-fix-python3-issue-in-gcron.patch
9f5ccc
Patch0367: 0367-dht-Handle-setxattr-and-rm-race-for-directory-in-reb.patch
9f5ccc
Patch0368: 0368-Update-rfc.sh-to-rhgs-3.5.2.patch
9f5ccc
Patch0369: 0369-cluster-ec-Return-correct-error-code-and-log-message.patch
9f5ccc
Patch0370: 0370-dht-Do-opendir-selectively-in-gf_defrag_process_dir.patch
9f5ccc
Patch0371: 0371-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
9f5ccc
Patch0372: 0372-posix-fix-seek-functionality.patch
9f5ccc
Patch0373: 0373-build-geo-rep-sub-pkg-requires-policycoreutils-pytho.patch
9f5ccc
Patch0374: 0374-open-behind-fix-missing-fd-reference.patch
9f5ccc
Patch0375: 0375-features-shard-Send-correct-size-when-reads-are-sent.patch
9f5ccc
Patch0376: 0376-features-shard-Fix-crash-during-shards-cleanup-in-er.patch
9f5ccc
Patch0377: 0377-syncop-improve-scaling-and-implement-more-tools.patch
9f5ccc
Patch0378: 0378-Revert-open-behind-fix-missing-fd-reference.patch
9f5ccc
Patch0379: 0379-glusterd-add-missing-synccond_broadcast.patch
9f5ccc
Patch0380: 0380-features-shard-Aggregate-size-block-count-in-iatt-be.patch
9f5ccc
Patch0381: 0381-dht-add-null-check-in-gf_defrag_free_dir_dfmeta.patch
9f5ccc
Patch0382: 0382-features-shard-Aggregate-file-size-block-count-befor.patch
9f5ccc
Patch0383: 0383-common-ha-ganesha-ha.sh-bad-test-for-rhel-centos-for.patch
190130
Patch0384: 0384-Update-rfc.sh-to-rhgs-3.5.3.patch
190130
Patch0385: 0385-glusterd-start-glusterd-automatically-on-abnormal-sh.patch
190130
Patch0386: 0386-glusterd-increase-the-StartLimitBurst.patch
190130
Patch0387: 0387-To-fix-readdir-ahead-memory-leak.patch
2072c5
Patch0388: 0388-rpc-Cleanup-SSL-specific-data-at-the-time-of-freeing.patch
190130
Patch0389: 0389-posix-Avoid-diskpace-error-in-case-of-overwriting-th.patch
190130
Patch0390: 0390-glusterd-deafult-options-after-volume-reset.patch
190130
Patch0391: 0391-glusterd-unlink-the-file-after-killing-the-process.patch
190130
Patch0392: 0392-glusterd-Brick-process-fails-to-come-up-with-brickmu.patch
190130
Patch0393: 0393-afr-restore-timestamp-of-files-during-metadata-heal.patch
190130
Patch0394: 0394-man-gluster-Add-volume-top-command-to-gluster-man-pa.patch
190130
Patch0395: 0395-Cli-Removing-old-log-rotate-command.patch
190130
Patch0396: 0396-Updating-gluster-manual.patch
190130
Patch0397: 0397-mgmt-brick-mux-Avoid-sending-two-response-when-attac.patch
190130
Patch0398: 0398-ec-change-error-message-for-heal-commands-for-disper.patch
190130
Patch0399: 0399-glusterd-coverity-fixes.patch
190130
Patch0400: 0400-cli-throw-a-warning-if-replica-count-greater-than-3.patch
190130
Patch0401: 0401-cli-change-the-warning-message.patch
190130
Patch0402: 0402-afr-wake-up-index-healer-threads.patch
190130
Patch0403: 0403-Fix-spurious-failure-in-bug-1744548-heal-timeout.t.patch
190130
Patch0404: 0404-tests-Fix-spurious-failure.patch
190130
Patch0405: 0405-core-fix-return-of-local-in-__nlc_inode_ctx_get.patch
190130
Patch0406: 0406-afr-support-split-brain-CLI-for-replica-3.patch
190130
Patch0407: 0407-geo-rep-Improving-help-message-in-schedule_georep.py.patch
190130
Patch0408: 0408-geo-rep-Fix-ssh-port-validation.patch
190130
Patch0409: 0409-system-posix-acl-update-ctx-only-if-iatt-is-non-NULL.patch
190130
Patch0410: 0410-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
190130
Patch0411: 0411-tools-glusterfind-validate-session-name.patch
190130
Patch0412: 0412-gluster-smb-add-smb-parameter-when-access-gluster-by.patch
190130
Patch0413: 0413-extras-hooks-Remove-smb.conf-parameter-allowing-gues.patch
190130
Patch0414: 0414-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
190130
Patch0415: 0415-dht-Fix-stale-layout-and-create-issue.patch
190130
Patch0416: 0416-tests-fix-spurious-failure-of-bug-1402841.t-mt-dir-s.patch
190130
Patch0417: 0417-events-fix-IPv6-memory-corruption.patch
190130
Patch0418: 0418-md-cache-avoid-clearing-cache-when-not-necessary.patch
190130
Patch0419: 0419-cluster-afr-fix-race-when-bricks-come-up.patch
190130
Patch0420: 0420-scripts-quota_fsck-script-TypeError-d-format-not-dic.patch
190130
Patch0421: 0421-Improve-logging-in-EC-client-and-lock-translator.patch
190130
Patch0422: 0422-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
190130
Patch0423: 0423-ctime-Fix-ctime-inconsisteny-with-utimensat.patch
190130
Patch0424: 0424-afr-make-heal-info-lockless.patch
190130
Patch0425: 0425-tests-Fix-spurious-self-heald.t-failure.patch
190130
Patch0426: 0426-geo-rep-Fix-for-Transport-End-Point-not-connected-is.patch
190130
Patch0427: 0427-storage-posix-Fixing-a-coverity-issue.patch
190130
Patch0428: 0428-glusterd-ganesha-fixing-resource-leak-in-tear_down_c.patch
190130
Patch0429: 0429-dht-rebalance-fixing-failure-occurace-due-to-rebalan.patch
190130
Patch0430: 0430-Fix-some-Null-pointer-dereference-coverity-issues.patch
190130
Patch0431: 0431-glusterd-check-for-same-node-while-adding-bricks-in-.patch
190130
Patch0432: 0432-glusterd-Fix-coverity-defects-put-coverity-annotatio.patch
190130
Patch0433: 0433-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
190130
Patch0434: 0434-glusterd-ganesha-fix-Coverity-CID-1405785.patch
190130
Patch0435: 0435-glusterd-coverity-fix.patch
190130
Patch0436: 0436-glusterd-coverity-fixes.patch
190130
Patch0437: 0437-glusterd-prevent-use-after-free-in-glusterd_op_ac_se.patch
190130
Patch0438: 0438-dht-sparse-files-rebalance-enhancements.patch
190130
Patch0439: 0439-cluster-afr-Delay-post-op-for-fsync.patch
190130
Patch0440: 0440-glusterd-snapshot-Improve-log-message-during-snapsho.patch
190130
Patch0441: 0441-fuse-occasional-logging-for-fuse-device-weird-write-.patch
190130
Patch0442: 0442-fuse-correctly-handle-setxattr-values.patch
190130
Patch0443: 0443-fuse-fix-high-sev-coverity-issue.patch
190130
Patch0444: 0444-mount-fuse-Fixing-a-coverity-issue.patch
190130
Patch0445: 0445-feature-changelog-Avoid-thread-creation-if-xlator-is.patch
190130
Patch0446: 0446-bitrot-Make-number-of-signer-threads-configurable.patch
190130
Patch0447: 0447-core-brick_mux-brick-crashed-when-creating-and-delet.patch
190130
Patch0448: 0448-Posix-Use-simple-approach-to-close-fd.patch
190130
Patch0449: 0449-test-Test-case-brick-mux-validation-in-cluster.t-is-.patch
190130
Patch0450: 0450-tests-basic-ctime-enable-ctime-before-testing.patch
190130
Patch0451: 0451-extras-Modify-group-virt-to-include-network-related-.patch
190130
Patch0452: 0452-Tier-DHT-Handle-the-pause-case-missed-out.patch
190130
Patch0453: 0453-glusterd-add-brick-command-failure.patch
190130
Patch0454: 0454-features-locks-avoid-use-after-freed-of-frame-for-bl.patch
190130
Patch0455: 0455-locks-prevent-deletion-of-locked-entries.patch
190130
Patch0456: 0456-add-clean-local-after-grant-lock.patch
190130
Patch0457: 0457-cluster-ec-Improve-detection-of-new-heals.patch
190130
Patch0458: 0458-features-bit-rot-stub-clean-the-mutex-after-cancelli.patch
190130
Patch0459: 0459-features-bit-rot-Unconditionally-sign-the-files-duri.patch
190130
Patch0460: 0460-cluster-ec-Remove-stale-entries-from-indices-xattrop.patch
190130
Patch0461: 0461-geo-replication-Fix-IPv6-parsing.patch
190130
Patch0462: 0462-Issue-with-gf_fill_iatt_for_dirent.patch
190130
Patch0463: 0463-cluster-ec-Change-handling-of-heal-failure-to-avoid-.patch
190130
Patch0464: 0464-storage-posix-Remove-nr_files-usage.patch
190130
Patch0465: 0465-posix-Implement-a-janitor-thread-to-close-fd.patch
190130
Patch0466: 0466-cluster-ec-Change-stale-index-handling.patch
190130
Patch0467: 0467-build-Added-dependency-for-glusterfs-selinux.patch
190130
Patch0468: 0468-build-Update-the-glusterfs-selinux-version.patch
190130
Patch0469: 0469-cluster-ec-Don-t-trigger-heal-for-stale-index.patch
190130
Patch0470: 0470-extras-snap_scheduler-changes-in-gluster-shared-stor.patch
190130
Patch0471: 0471-nfs-ganesha-gluster_shared_storage-fails-to-automoun.patch
190130
Patch0472: 0472-geo-rep-gluster_shared_storage-fails-to-automount-on.patch
190130
Patch0473: 0473-glusterd-Fix-Add-brick-with-increasing-replica-count.patch
190130
Patch0474: 0474-features-locks-posixlk-clear-lock-should-set-error-a.patch
190130
Patch0475: 0475-fuse-lock-interrupt-fix-flock_interrupt.t.patch
190130
Patch0476: 0476-mount-fuse-use-cookies-to-get-fuse-interrupt-record-.patch
190130
Patch0477: 0477-glusterd-snapshot-Snapshot-prevalidation-failure-not.patch
190130
Patch0478: 0478-DHT-Fixing-rebalance-failure-on-issuing-stop-command.patch
190130
Patch0479: 0479-ganesha-ha-revised-regex-exprs-for-status.patch
190130
Patch0480: 0480-DHT-Rebalance-Ensure-Rebalance-reports-status-only-o.patch
b7d4d7
Patch0481: 0481-Update-rfc.sh-to-rhgs-3.5.4.patch
b7d4d7
Patch0482: 0482-logger-Always-print-errors-in-english.patch
b7d4d7
Patch0483: 0483-afr-more-quorum-checks-in-lookup-and-new-entry-marki.patch
b7d4d7
Patch0484: 0484-glusterd-rebalance-status-displays-stats-as-0-after-.patch
b7d4d7
Patch0485: 0485-cli-rpc-conditional-init-of-global-quota-rpc-1578.patch
b7d4d7
Patch0486: 0486-glusterd-brick-sock-file-deleted-log-error-1560.patch
b7d4d7
Patch0487: 0487-Events-Log-file-not-re-opened-after-logrotate.patch
b7d4d7
Patch0488: 0488-glusterd-afr-enable-granular-entry-heal-by-default.patch
b7d4d7
Patch0489: 0489-glusterd-fix-bug-in-enabling-granular-entry-heal.patch
b7d4d7
Patch0490: 0490-Segmentation-fault-occurs-during-truncate.patch
b7d4d7
Patch0491: 0491-glusterd-mount-directory-getting-truncated-on-mounti.patch
b7d4d7
Patch0492: 0492-afr-lookup-Pass-xattr_req-in-while-doing-a-selfheal-.patch
b7d4d7
Patch0493: 0493-geo-rep-Note-section-is-required-for-ignore_deletes.patch
b7d4d7
Patch0494: 0494-glusterd-start-the-brick-on-a-different-port.patch
b7d4d7
Patch0495: 0495-geo-rep-descriptive-message-when-worker-crashes-due-.patch
b7d4d7
Patch0496: 0496-posix-Use-MALLOC-instead-of-alloca-to-allocate-memor.patch
b7d4d7
Patch0497: 0497-socket-Use-AES128-cipher-in-SSL-if-AES-is-supported-.patch
b7d4d7
Patch0498: 0498-geo-rep-Fix-corner-case-in-rename-on-mkdir-during-hy.patch
b7d4d7
Patch0499: 0499-gfapi-give-appropriate-error-when-size-exceeds.patch
b7d4d7
Patch0500: 0500-features-shard-Convert-shard-block-indices-to-uint64.patch
b7d4d7
Patch0501: 0501-Cli-Removing-old-syntax-of-tier-cmds-from-help-menu.patch
b7d4d7
Patch0502: 0502-dht-fixing-a-permission-update-issue.patch
b7d4d7
Patch0503: 0503-gfapi-Suspend-synctasks-instead-of-blocking-them.patch
b7d4d7
Patch0504: 0504-io-stats-Configure-ios_sample_buf_size-based-on-samp.patch
b7d4d7
Patch0505: 0505-trash-Create-inode_table-only-while-feature-is-enabl.patch
b7d4d7
Patch0506: 0506-posix-Attach-a-posix_spawn_disk_thread-with-glusterf.patch
b7d4d7
Patch0507: 0507-inode-make-critical-section-smaller.patch
b7d4d7
Patch0508: 0508-fuse-fetch-arbitrary-number-of-groups-from-proc-pid-.patch
b7d4d7
Patch0509: 0509-core-configure-optimum-inode-table-hash_size-for-shd.patch
b7d4d7
Patch0510: 0510-glusterd-brick_mux-Optimize-friend-handshake-code-to.patch
b7d4d7
Patch0511: 0511-features-shard-Missing-format-specifier.patch
b7d4d7
Patch0512: 0512-glusterd-shared-storage-mount-fails-in-ipv6-environm.patch
b7d4d7
Patch0513: 0513-afr-mark-pending-xattrs-as-a-part-of-metadata-heal.patch
b7d4d7
Patch0514: 0514-afr-event-gen-changes.patch
b7d4d7
Patch0515: 0515-cluster-afr-Heal-directory-rename-without-rmdir-mkdi.patch
b7d4d7
Patch0516: 0516-afr-return-EIO-for-gfid-split-brains.patch
b7d4d7
Patch0517: 0517-gfapi-glfs_h_creat_open-new-API-to-create-handle-and.patch
b7d4d7
Patch0518: 0518-glusterd-Fix-for-shared-storage-in-ipv6-env.patch
b7d4d7
Patch0519: 0519-glusterfs-events-Fix-incorrect-attribute-access-2002.patch
b7d4d7
Patch0520: 0520-performance-open-behind-seek-fop-should-open_and_res.patch
b7d4d7
Patch0521: 0521-open-behind-fix-missing-fd-reference.patch
b7d4d7
Patch0522: 0522-lcov-improve-line-coverage.patch
b7d4d7
Patch0523: 0523-open-behind-rewrite-of-internal-logic.patch
b7d4d7
Patch0524: 0524-open-behind-fix-call_frame-leak.patch
b7d4d7
Patch0525: 0525-open-behind-implement-create-fop.patch
b7d4d7
Patch0526: 0526-Quota-quota_fsck.py-converting-byte-string-to-string.patch
b7d4d7
Patch0527: 0527-Events-Socket-creation-after-getaddrinfo-and-IPv4-an.patch
b7d4d7
Patch0528: 0528-Extras-Removing-xattr_analysis-script.patch
b7d4d7
Patch0529: 0529-geo-rep-prompt-should-work-for-ignore_deletes.patch
b7d4d7
Patch0530: 0530-gfapi-avoid-crash-while-logging-message.patch
b7d4d7
Patch0531: 0531-Glustereventsd-Default-port-change-2091.patch
b7d4d7
Patch0532: 0532-glusterd-fix-for-starting-brick-on-new-port.patch
b7d4d7
Patch0533: 0533-glusterd-Rebalance-cli-is-not-showing-correct-status.patch
b7d4d7
Patch0534: 0534-glusterd-Resolve-use-after-free-bug-2181.patch
b7d4d7
Patch0535: 0535-multiple-files-use-dict_allocate_and_serialize-where.patch
b7d4d7
Patch0536: 0536-dht-Ongoing-IO-is-failed-during-volume-shrink-operat.patch
b7d4d7
Patch0537: 0537-cluster-afr-Fix-race-in-lockinfo-f-getxattr.patch
b7d4d7
Patch0538: 0538-afr-fix-coverity-issue-introduced-by-90cefde.patch
b7d4d7
Patch0539: 0539-extras-disable-lookup-optimize-in-virt-and-block-gro.patch
b7d4d7
Patch0540: 0540-extras-Disable-write-behind-for-group-samba.patch
b7d4d7
Patch0541: 0541-glusterd-volgen-Add-functionality-to-accept-any-cust.patch
b7d4d7
Patch0542: 0542-xlaotrs-mgmt-Fixing-coverity-issue-1445996.patch
b7d4d7
Patch0543: 0543-glusterd-handle-custom-xlator-failure-cases.patch
b7d4d7
Patch0544: 0544-RHGS-3.5.4-rebuild-to-ship-with-RHEL-8.5.patch
e7a346
e7a346
%description
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package includes the glusterfs binary, the glusterfsd daemon and the
e7a346
libglusterfs and glusterfs translator modules common to both GlusterFS server
e7a346
and client framework.
e7a346
e7a346
%package api
e7a346
Summary:          GlusterFS api library
e7a346
Requires:         %{name}%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e7a346
e7a346
%description api
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides the glusterfs libgfapi library.
e7a346
e7a346
%package api-devel
e7a346
Summary:          Development Libraries
e7a346
Requires:         %{name}%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-devel%{?_isa} = %{version}-%{release}
e7a346
Requires:         libacl-devel
e7a346
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
e7a346
e7a346
%description api-devel
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides the api include files.
e7a346
e7a346
%package cli
e7a346
Summary:          GlusterFS CLI
e7a346
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e7a346
e7a346
%description cli
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides the GlusterFS CLI application and its man page
e7a346
50dc83
%package cloudsync-plugins
50dc83
Summary:          Cloudsync Plugins
50dc83
BuildRequires:    libcurl-devel
9f5ccc
Requires:         glusterfs-libs = %{version}-%{release}
50dc83
50dc83
%description cloudsync-plugins
50dc83
GlusterFS is a distributed file-system capable of scaling to several
50dc83
petabytes. It aggregates various storage bricks over Infiniband RDMA
50dc83
or TCP/IP interconnect into one large parallel network file
50dc83
system. GlusterFS is one of the most sophisticated file systems in
50dc83
terms of features and extensibility.  It borrows a powerful concept
50dc83
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
50dc83
is in user space and easily manageable.
50dc83
50dc83
This package provides cloudsync plugins for archival feature.
50dc83
e7a346
%package devel
e7a346
Summary:          Development Libraries
e7a346
Requires:         %{name}%{?_isa} = %{version}-%{release}
e7a346
# Needed for the Glupy examples to work
e7a346
%if ( 0%{!?_without_extra_xlators:1} )
50dc83
Requires:         %{name}-extra-xlators%{?_isa} = %{version}-%{release}
e7a346
%endif
e7a346
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
e7a346
%endif
e7a346
e7a346
%description devel
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides the development libraries and include files.
e7a346
e7a346
%if ( 0%{!?_without_extra_xlators:1} )
e7a346
%package extra-xlators
e7a346
Summary:          Extra Gluster filesystem Translators
e7a346
# We need python-gluster rpm for gluster module's __init__.py in Python
e7a346
# site-packages area
50dc83
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
50dc83
Requires:         python%{_pythonver}
e7a346
e7a346
%description extra-xlators
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides extra filesystem Translators, such as Glupy,
e7a346
for GlusterFS.
e7a346
%endif
e7a346
e7a346
%package fuse
e7a346
Summary:          Fuse client
e7a346
BuildRequires:    fuse-devel
e7a346
Requires:         attr
e7a346
Requires:         psmisc
e7a346
e7a346
Requires:         %{name}%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
e7a346
e7a346
Obsoletes:        %{name}-client < %{version}-%{release}
e7a346
Provides:         %{name}-client = %{version}-%{release}
e7a346
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e7a346
e7a346
%description fuse
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides support to FUSE based clients and inlcudes the
e7a346
glusterfs(d) binary.
e7a346
50dc83
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
e7a346
%package ganesha
e7a346
Summary:          NFS-Ganesha configuration
50dc83
Group:            Applications/File
e7a346
e7a346
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
9f5ccc
Requires:         nfs-ganesha-selinux >= 2.7.3
50dc83
Requires:         nfs-ganesha-gluster >= 2.7.3
e7a346
Requires:         pcs, dbus
e7a346
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
e7a346
Requires:         cman, pacemaker, corosync
e7a346
%endif
50dc83
50dc83
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 )
50dc83
# we need portblock resource-agent in 3.9.5 and later.
50dc83
Requires:         resource-agents >= 3.9.5
50dc83
Requires:         net-tools
50dc83
%endif
50dc83
50dc83
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
9f5ccc
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
e7a346
Requires: selinux-policy >= 3.13.1-160
e7a346
Requires(post):   policycoreutils-python
e7a346
Requires(postun): policycoreutils-python
e7a346
%else
e7a346
Requires(post):   policycoreutils-python-utils
e7a346
Requires(postun): policycoreutils-python-utils
e7a346
%endif
e7a346
%endif
e7a346
e7a346
%description ganesha
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides the configuration and related files for using
e7a346
NFS-Ganesha as the NFS server using GlusterFS
e7a346
%endif
e7a346
e7a346
%if ( 0%{!?_without_georeplication:1} )
e7a346
%package geo-replication
e7a346
Summary:          GlusterFS Geo-replication
e7a346
Requires:         %{name}%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
50dc83
Requires:         python%{_pythonver}
50dc83
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
e7a346
Requires:         python-prettytable
e7a346
%else
50dc83
Requires:         python%{_pythonver}-prettytable
e7a346
%endif
50dc83
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
50dc83
e7a346
Requires:         rsync
e7a346
Requires:         util-linux
e7a346
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
9f5ccc
# required for setting selinux bools
9f5ccc
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
9f5ccc
Requires(post):      policycoreutils-python-utils
9f5ccc
Requires(postun):    policycoreutils-python-utils
9f5ccc
Requires:            selinux-policy-targeted
9f5ccc
Requires(post):      selinux-policy-targeted
9f5ccc
BuildRequires:       selinux-policy-devel
9f5ccc
%endif
e7a346
e7a346
%description geo-replication
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file system in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in userspace and easily manageable.
e7a346
e7a346
This package provides support to geo-replication.
e7a346
%endif
e7a346
e7a346
%package libs
e7a346
Summary:          GlusterFS common libraries
e7a346
e7a346
%description libs
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides the base GlusterFS libraries
e7a346
50dc83
%package -n python%{_pythonver}-gluster
e7a346
Summary:          GlusterFS python library
50dc83
Requires:         python%{_pythonver}
50dc83
%if ( ! %{_usepython3} )
50dc83
%{?python_provide:%python_provide python-gluster}
e7a346
Provides:         python-gluster = %{version}-%{release}
e7a346
Obsoletes:        python-gluster < 3.10
50dc83
%endif
e7a346
50dc83
%description -n python%{_pythonver}-gluster
50dc83
GlusterFS is a distributed file-system capable of scaling to several
50dc83
petabytes. It aggregates various storage bricks over Infiniband RDMA
50dc83
or TCP/IP interconnect into one large parallel network file
50dc83
system. GlusterFS is one of the most sophisticated file systems in
50dc83
terms of features and extensibility.  It borrows a powerful concept
50dc83
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
50dc83
is in user space and easily manageable.
50dc83
50dc83
This package contains the python modules of GlusterFS and own gluster
50dc83
namespace.
e7a346
e7a346
%if ( 0%{!?_without_rdma:1} )
e7a346
%package rdma
e7a346
Summary:          GlusterFS rdma support for ib-verbs
e7a346
%if ( 0%{?fedora} && 0%{?fedora} > 26 )
e7a346
BuildRequires:    rdma-core-devel
e7a346
%else
e7a346
BuildRequires:    libibverbs-devel
e7a346
BuildRequires:    librdmacm-devel >= 1.0.15
e7a346
%endif
e7a346
Requires:         %{name}%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e7a346
e7a346
%description rdma
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides support to ib-verbs library.
e7a346
%endif
e7a346
e7a346
%if ( 0%{!?_without_regression_tests:1} )
e7a346
%package regression-tests
e7a346
Summary:          Development Tools
e7a346
Requires:         %{name}%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
e7a346
## thin provisioning support
e7a346
Requires:         lvm2 >= 2.02.89
e7a346
Requires:         perl(App::Prove) perl(Test::Harness) gcc util-linux-ng
50dc83
Requires:         python%{_pythonver}
50dc83
Requires:         attr dbench file git libacl-devel net-tools
e7a346
Requires:         nfs-utils xfsprogs yajl psmisc bc
e7a346
e7a346
%description regression-tests
e7a346
The Gluster Test Framework, is a suite of scripts used for
e7a346
regression testing of Gluster.
e7a346
%endif
e7a346
e7a346
%if ( 0%{!?_without_ocf:1} )
e7a346
%package resource-agents
e7a346
Summary:          OCF Resource Agents for GlusterFS
e7a346
License:          GPLv3+
e7a346
BuildArch:        noarch
50dc83
# this Group handling comes from the Fedora resource-agents package
e7a346
# for glusterd
e7a346
Requires:         %{name}-server = %{version}-%{release}
e7a346
# depending on the distribution, we need pacemaker or resource-agents
e7a346
Requires:         %{_prefix}/lib/ocf/resource.d
e7a346
e7a346
%description resource-agents
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides the resource agents which plug glusterd into
e7a346
Open Cluster Framework (OCF) compliant cluster resource managers,
e7a346
like Pacemaker.
e7a346
%endif
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%package server
e7a346
Summary:          Clustered file-system server
e7a346
Requires:         %{name}%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-cli%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
190130
%if ( 0%{?fedora} && 0%{?fedora} >= 30  || ( 0%{?rhel} && 0%{?rhel} >= 8 ) )
190130
Requires:         glusterfs-selinux >= 1.0-1
190130
%endif
e7a346
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
e7a346
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
e7a346
# self-heal daemon, rebalance, nfs-server etc. are actually clients
e7a346
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
e7a346
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
e7a346
# lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server
e7a346
Requires:         lvm2
e7a346
Requires:         nfs-utils
e7a346
%if ( 0%{?_with_systemd:1} )
e7a346
%{?systemd_requires}
e7a346
%else
e7a346
Requires(post):   /sbin/chkconfig
e7a346
Requires(preun):  /sbin/service
e7a346
Requires(preun):  /sbin/chkconfig
e7a346
Requires(postun): /sbin/service
e7a346
%endif
e7a346
%if (0%{?_with_firewalld:1})
e7a346
# we install firewalld rules, so we need to have the directory owned
e7a346
%if ( 0%{!?rhel} )
e7a346
# not on RHEL because firewalld-filesystem appeared in 7.3
e7a346
# when EL7 rpm gets weak dependencies we can add a Suggests:
e7a346
Requires:         firewalld-filesystem
e7a346
%endif
e7a346
%endif
e7a346
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
e7a346
Requires:         rpcbind
e7a346
%else
e7a346
Requires:         portmap
e7a346
%endif
e7a346
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
e7a346
Requires:         python-argparse
e7a346
%endif
50dc83
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
50dc83
Requires:         python%{_pythonver}-pyxattr
50dc83
%else
e7a346
Requires:         pyxattr
50dc83
%endif
e7a346
%if (0%{?_with_valgrind:1})
e7a346
Requires:         valgrind
e7a346
%endif
e7a346
e7a346
%description server
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides the glusterfs server daemon.
e7a346
%endif
e7a346
e7a346
%package client-xlators
e7a346
Summary:          GlusterFS client-side translators
e7a346
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e7a346
e7a346
%description client-xlators
e7a346
GlusterFS is a distributed file-system capable of scaling to several
e7a346
petabytes. It aggregates various storage bricks over Infiniband RDMA
e7a346
or TCP/IP interconnect into one large parallel network file
e7a346
system. GlusterFS is one of the most sophisticated file systems in
e7a346
terms of features and extensibility.  It borrows a powerful concept
e7a346
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e7a346
is in user space and easily manageable.
e7a346
e7a346
This package provides the translators needed on any GlusterFS client.
e7a346
e7a346
%if ( 0%{!?_without_events:1} )
e7a346
%package events
e7a346
Summary:          GlusterFS Events
e7a346
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
9f5ccc
Requires:         python%{_pythonver}
50dc83
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
50dc83
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
e7a346
Requires:         python-requests
e7a346
%else
50dc83
Requires:         python%{_pythonver}-requests
e7a346
%endif
e7a346
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
9f5ccc
Requires:         python-prettytable
e7a346
Requires:         python-argparse
9f5ccc
%else
9f5ccc
Requires:         python%{_pythonver}-prettytable
e7a346
%endif
e7a346
%if ( 0%{?_with_systemd:1} )
e7a346
%{?systemd_requires}
e7a346
%endif
e7a346
e7a346
%description events
e7a346
GlusterFS Events
e7a346
e7a346
%endif
e7a346
e7a346
%prep
e7a346
%setup -q -n %{name}-%{version}%{?prereltag}
e7a346
e7a346
# sanitization scriptlet for patches with file renames
e7a346
ls %{_topdir}/SOURCES/*.patch | sort | \
e7a346
while read p
e7a346
do
e7a346
    # if the destination file exists, its most probably stale
e7a346
    # so we must remove it
50dc83
    rename_to=( $(grep -i 'rename to' $p | cut -f 3 -d ' ') )
e7a346
    if [ ${#rename_to[*]} -gt 0 ]; then
e7a346
        for f in ${rename_to[*]}
e7a346
        do
e7a346
            if [ -f $f ]; then
e7a346
                rm -f $f
e7a346
            elif [ -d $f ]; then
e7a346
                rm -rf $f
e7a346
            fi
e7a346
        done
e7a346
    fi
e7a346
e7a346
    SOURCE_FILES=( $(egrep '^\-\-\- a/' $p | cut -f 2- -d '/') )
e7a346
    DEST_FILES=( $(egrep '^\+\+\+ b/' $p | cut -f 2- -d '/') )
e7a346
    EXCLUDE_DOCS=()
e7a346
    for idx in ${!SOURCE_FILES[@]}; do
2072c5
        # skip the doc
e7a346
        source_file=${SOURCE_FILES[$idx]}
e7a346
        dest_file=${DEST_FILES[$idx]}
e7a346
        if [[ "$dest_file" =~ ^doc/.+ ]]; then
e7a346
            if [ "$source_file" != "dev/null" ] && [ ! -f "$dest_file" ]; then
e7a346
                # if patch is being applied to a doc file and if the doc file
e7a346
                # hasn't been added so far then we need to exclude it
e7a346
                EXCLUDE_DOCS=( ${EXCLUDE_DOCS[*]} "$dest_file" )
e7a346
            fi
e7a346
        fi
e7a346
    done
e7a346
    EXCLUDE_DOCS_OPT=""
e7a346
    for doc in ${EXCLUDE_DOCS}; do
e7a346
        EXCLUDE_DOCS_OPT="--exclude=$doc $EXCLUDE_DOCS_OPT"
e7a346
    done
50dc83
50dc83
    # HACK to fix build
50dc83
    bn=$(basename $p)
50dc83
    if [ "$bn" == "0085-Revert-all-remove-code-which-is-not-being-considered.patch" ]; then
50dc83
        (patch -p1 -u -F3 < $p || :)
50dc83
        if [ -f libglusterfs/Makefile.am.rej ]; then
50dc83
            sed -i -e 's/^SUBDIRS = src/SUBDIRS = src src\/gfdb/g;s/^CLEANFILES = /CLEANFILES =/g' libglusterfs/Makefile.am
50dc83
        fi
50dc83
    elif [ "$bn" == "0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch" ]; then
50dc83
        (patch -p1 < $p || :)
50dc83
    elif [ "$bn" == "0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch" ]; then
50dc83
        (patch -p1 < $p || :)
50dc83
    elif [ "$bn" == "0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch" ]; then
50dc83
        (patch -p1 < $p || :)
50dc83
    elif [ "$bn" == "0117-spec-Remove-thin-arbiter-package.patch" ]; then
50dc83
        (patch -p1 < $p || :)
50dc83
    elif [ "$bn" == "0023-hooks-remove-selinux-hooks.patch" ]; then
50dc83
        (patch -p1 < $p || :)
50dc83
    elif [ "$bn" == "0042-spec-client-server-Builds-are-failing-on-rhel-6.patch" ]; then
50dc83
        (patch -p1 < $p || :)
50dc83
    else
50dc83
        # apply the patch with 'git apply'
50dc83
        git apply -p1 --exclude=rfc.sh \
50dc83
                      --exclude=.gitignore \
50dc83
                      --exclude=.testignore \
50dc83
                      --exclude=MAINTAINERS \
50dc83
                      --exclude=extras/checkpatch.pl \
50dc83
                      --exclude=build-aux/checkpatch.pl \
50dc83
                      --exclude='tests/*' \
50dc83
                      ${EXCLUDE_DOCS_OPT} \
50dc83
                      $p
50dc83
    fi
50dc83
e7a346
done
e7a346
50dc83
echo "fixing python shebangs..."
50dc83
%if ( %{_usepython3} )
50dc83
    for i in `find . -type f -exec bash -c "if file {} | grep 'Python script, ASCII text executable' >/dev/null; then echo {}; fi" ';'`; do
50dc83
        sed -i -e 's|^#!/usr/bin/python.*|#!%{__python3}|' -e 's|^#!/usr/bin/env python.*|#!%{__python3}|' $i
50dc83
    done
50dc83
%else
50dc83
    for f in api events extras geo-replication libglusterfs tools xlators; do
50dc83
        find $f -type f -exec sed -i 's|/usr/bin/python3|/usr/bin/python2|' {} \;
50dc83
    done
50dc83
%endif
e7a346
e7a346
%build
50dc83
e7a346
# In RHEL7 few hardening flags are available by default, however the RELRO
e7a346
# default behaviour is partial, convert to full
e7a346
%if ( 0%{?rhel} && 0%{?rhel} >= 7 )
e7a346
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
e7a346
export LDFLAGS
e7a346
%else
e7a346
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
e7a346
CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE"
e7a346
LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now"
e7a346
%else
e7a346
#It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and
e7a346
 # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does
e7a346
CFLAGS="$CFLAGS $RPM_OPT_FLAGS"
e7a346
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
e7a346
%endif
e7a346
export CFLAGS
e7a346
export LDFLAGS
e7a346
%endif
e7a346
e7a346
./autogen.sh && %configure \
50dc83
        %{?_with_asan} \
e7a346
        %{?_with_cmocka} \
e7a346
        %{?_with_debug} \
50dc83
        %{?_with_firewalld} \
e7a346
        %{?_with_tmpfilesdir} \
50dc83
        %{?_with_tsan} \
50dc83
        %{?_with_valgrind} \
e7a346
        %{?_without_epoll} \
50dc83
        %{?_without_events} \
e7a346
        %{?_without_fusermount} \
e7a346
        %{?_without_georeplication} \
e7a346
        %{?_without_ocf} \
e7a346
        %{?_without_rdma} \
50dc83
        %{?_without_server} \
e7a346
        %{?_without_syslog} \
e7a346
        %{?_without_tiering} \
50dc83
        %{?_with_ipv6default} \
e7a346
        %{?_without_libtirpc}
e7a346
e7a346
# fix hardening and remove rpath in shlibs
e7a346
%if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
e7a346
sed -i 's| \\\$compiler_flags |&\\\$LDFLAGS |' libtool
e7a346
%endif
e7a346
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|' libtool
e7a346
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|' libtool
e7a346
e7a346
make %{?_smp_mflags}
e7a346
e7a346
%check
e7a346
make check
e7a346
e7a346
%install
e7a346
rm -rf %{buildroot}
e7a346
make install DESTDIR=%{buildroot}
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%if ( 0%{_for_fedora_koji_builds} )
e7a346
install -D -p -m 0644 %{SOURCE1} \
e7a346
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
e7a346
install -D -p -m 0644 %{SOURCE2} \
e7a346
    %{buildroot}%{_sysconfdir}/sysconfig/glusterfsd
e7a346
%else
e7a346
install -D -p -m 0644 extras/glusterd-sysconfig \
e7a346
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
e7a346
%endif
e7a346
%endif
e7a346
e7a346
mkdir -p %{buildroot}%{_localstatedir}/log/glusterd
e7a346
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfs
e7a346
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfsd
e7a346
mkdir -p %{buildroot}%{_rundir}/gluster
e7a346
e7a346
# Remove unwanted files from all the shared libraries
e7a346
find %{buildroot}%{_libdir} -name '*.a' -delete
e7a346
find %{buildroot}%{_libdir} -name '*.la' -delete
e7a346
e7a346
# Remove installed docs, the ones we want are included by %%doc, in
e7a346
# /usr/share/doc/glusterfs or /usr/share/doc/glusterfs-x.y.z depending
e7a346
# on the distribution
e7a346
%if ( 0%{?fedora} && 0%{?fedora} > 19 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
e7a346
rm -rf %{buildroot}%{_pkgdocdir}/*
e7a346
%else
e7a346
rm -rf %{buildroot}%{_defaultdocdir}/%{name}
e7a346
mkdir -p %{buildroot}%{_pkgdocdir}
e7a346
%endif
e7a346
head -50 ChangeLog > ChangeLog.head && mv ChangeLog.head ChangeLog
e7a346
cat << EOM >> ChangeLog
e7a346
e7a346
More commit messages for this ChangeLog can be found at
e7a346
https://forge.gluster.org/glusterfs-core/glusterfs/commits/v%{version}%{?prereltag}
e7a346
EOM
e7a346
e7a346
# Remove benchmarking and other unpackaged files
e7a346
# make install always puts these in %%{_defaultdocdir}/%%{name} so don't
e7a346
# use %%{_pkgdocdir}; that will be wrong on later Fedora distributions
e7a346
rm -rf %{buildroot}%{_defaultdocdir}/%{name}/benchmarking
e7a346
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs-mode.el
e7a346
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs.vim
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
# Create working directory
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd
e7a346
e7a346
# Update configuration file to /var/lib working directory
e7a346
sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sharedstatedir}/glusterd|g' \
e7a346
    %{buildroot}%{_sysconfdir}/glusterfs/glusterd.vol
50dc83
%endif
e7a346
e7a346
# Install glusterfsd .service or init.d file
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%if ( 0%{_for_fedora_koji_builds} )
50dc83
%service_install glusterfsd %{glusterfsd_svcfile}
50dc83
%endif
e7a346
%endif
e7a346
e7a346
install -D -p -m 0644 extras/glusterfs-logrotate \
e7a346
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
e7a346
e7a346
# ganesha ghosts
50dc83
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
e7a346
mkdir -p %{buildroot}%{_sysconfdir}/ganesha
e7a346
touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf
50dc83
mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/
e7a346
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
e7a346
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
e7a346
%endif
e7a346
e7a346
%if ( 0%{!?_without_georeplication:1} )
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
e7a346
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
e7a346
install -D -p -m 0644 extras/glusterfs-georep-logrotate \
e7a346
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
e7a346
%endif
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
e7a346
touch %{buildroot}%{_sharedstatedir}/glusterd/options
e7a346
subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop)
e7a346
for dir in ${subdirs[@]}; do
e7a346
    mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/"$dir"/{pre,post}
e7a346
done
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/glustershd
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/peers
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/vols
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/nfs/run
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/bitd
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/quotad
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/scrub
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/snaps
e7a346
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/ss_brick
e7a346
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/nfs-server.vol
e7a346
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/run/nfs.pid
50dc83
%endif
e7a346
e7a346
find ./tests ./run-tests.sh -type f | cpio -pd %{buildroot}%{_prefix}/share/glusterfs
e7a346
e7a346
## Install bash completion for cli
e7a346
install -p -m 0744 -D extras/command-completion/gluster.bash \
e7a346
    %{buildroot}%{_sysconfdir}/bash_completion.d/gluster
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
50dc83
echo "RHGS 3.5" > %{buildroot}%{_datadir}/glusterfs/release
e7a346
%endif
e7a346
50dc83
%clean
50dc83
rm -rf %{buildroot}
50dc83
e7a346
##-----------------------------------------------------------------------------
e7a346
## All %%post should be placed here and keep them sorted
e7a346
##
e7a346
%post
e7a346
/sbin/ldconfig
e7a346
%if ( 0%{!?_without_syslog:1} )
e7a346
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
50dc83
%systemd_postun_with_restart rsyslog
e7a346
%endif
e7a346
%endif
e7a346
exit 0
e7a346
e7a346
%post api
e7a346
/sbin/ldconfig
e7a346
e7a346
%if ( 0%{!?_without_events:1} )
e7a346
%post events
50dc83
%service_enable glustereventsd
e7a346
%endif
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
e7a346
%post ganesha
e7a346
semanage boolean -m ganesha_use_fusefs --on
e7a346
exit 0
e7a346
%endif
e7a346
%endif
e7a346
e7a346
%if ( 0%{!?_without_georeplication:1} )
e7a346
%post geo-replication
9f5ccc
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
9f5ccc
%selinux_set_booleans %{selinuxbooleans}
9f5ccc
%endif
e7a346
if [ $1 -ge 1 ]; then
50dc83
    %systemd_postun_with_restart glusterd
e7a346
fi
e7a346
exit 0
e7a346
%endif
e7a346
e7a346
%post libs
e7a346
/sbin/ldconfig
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%post server
e7a346
# Legacy server
50dc83
%service_enable glusterd
e7a346
%if ( 0%{_for_fedora_koji_builds} )
50dc83
%service_enable glusterfsd
e7a346
%endif
e7a346
# ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 .
e7a346
# While upgrading glusterfs-server package form GlusterFS version <= 3.6 to
e7a346
# GlusterFS version 3.7, ".cmd_log_history" should be renamed to
e7a346
# "cmd_history.log" to retain cli command history contents.
e7a346
if [ -f %{_localstatedir}/log/glusterfs/.cmd_log_history ]; then
e7a346
    mv %{_localstatedir}/log/glusterfs/.cmd_log_history \
e7a346
       %{_localstatedir}/log/glusterfs/cmd_history.log
e7a346
fi
e7a346
e7a346
# Genuine Fedora (and EPEL) builds never put gluster files in /etc; if
e7a346
# there are any files in /etc from a prior gluster.org install, move them
e7a346
# to /var/lib. (N.B. Starting with 3.3.0 all gluster files are in /var/lib
e7a346
# in gluster.org RPMs.) Be careful to copy them on the off chance that
e7a346
# /etc and /var/lib are on separate file systems
e7a346
if [ -d /etc/glusterd -a ! -h %{_sharedstatedir}/glusterd ]; then
e7a346
    mkdir -p %{_sharedstatedir}/glusterd
e7a346
    cp -a /etc/glusterd %{_sharedstatedir}/glusterd
e7a346
    rm -rf /etc/glusterd
e7a346
    ln -sf %{_sharedstatedir}/glusterd /etc/glusterd
e7a346
fi
e7a346
e7a346
# Rename old volfiles in an RPM-standard way.  These aren't actually
e7a346
# considered package config files, so %%config doesn't work for them.
e7a346
if [ -d %{_sharedstatedir}/glusterd/vols ]; then
e7a346
    for file in $(find %{_sharedstatedir}/glusterd/vols -name '*.vol'); do
e7a346
        newfile=${file}.rpmsave
e7a346
        echo "warning: ${file} saved as ${newfile}"
e7a346
        cp ${file} ${newfile}
e7a346
    done
e7a346
fi
e7a346
e7a346
# add marker translator
e7a346
# but first make certain that there are no old libs around to bite us
e7a346
# BZ 834847
e7a346
if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then
e7a346
    rm -f /etc/ld.so.conf.d/glusterfs.conf
e7a346
    /sbin/ldconfig
e7a346
fi
e7a346
e7a346
%if (0%{?_with_firewalld:1})
e7a346
    %firewalld_reload
e7a346
%endif
e7a346
e7a346
%endif
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All %%pre should be placed here and keep them sorted
e7a346
##
e7a346
%pre
e7a346
getent group gluster > /dev/null || groupadd -r gluster
e7a346
getent passwd gluster > /dev/null || useradd -r -g gluster -d %{_rundir}/gluster -s /sbin/nologin -c "GlusterFS daemons" gluster
e7a346
exit 0
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All %%preun should be placed here and keep them sorted
e7a346
##
e7a346
%if ( 0%{!?_without_events:1} )
e7a346
%preun events
e7a346
if [ $1 -eq 0 ]; then
50dc83
    if [ -f %glustereventsd_svcfile ]; then
50dc83
        %service_stop glustereventsd
50dc83
        %systemd_preun glustereventsd
e7a346
    fi
e7a346
fi
e7a346
exit 0
e7a346
%endif
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%preun server
e7a346
if [ $1 -eq 0 ]; then
50dc83
    if [ -f %glusterfsd_svcfile ]; then
50dc83
        %service_stop glusterfsd
e7a346
    fi
50dc83
    %service_stop glusterd
50dc83
    if [ -f %glusterfsd_svcfile ]; then
50dc83
        %systemd_preun glusterfsd
e7a346
    fi
50dc83
    %systemd_preun glusterd
e7a346
fi
e7a346
if [ $1 -ge 1 ]; then
50dc83
    if [ -f %glusterfsd_svcfile ]; then
50dc83
        %systemd_postun_with_restart glusterfsd
e7a346
    fi
50dc83
    %systemd_postun_with_restart glusterd
e7a346
fi
50dc83
exit 0
e7a346
%endif
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All %%postun should be placed here and keep them sorted
e7a346
##
e7a346
%postun
e7a346
%if ( 0%{!?_without_syslog:1} )
e7a346
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
50dc83
%systemd_postun_with_restart rsyslog
e7a346
%endif
e7a346
%endif
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
50dc83
%postun server
50dc83
%if (0%{?_with_firewalld:1})
50dc83
    %firewalld_reload
50dc83
%endif
50dc83
exit 0
50dc83
%endif
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
e7a346
%postun ganesha
e7a346
semanage boolean -m ganesha_use_fusefs --off
e7a346
exit 0
e7a346
%endif
e7a346
%endif
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All %%trigger should be placed here and keep them sorted
e7a346
##
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
e7a346
%trigger ganesha -- selinux-policy-targeted
e7a346
semanage boolean -m ganesha_use_fusefs --on
e7a346
exit 0
e7a346
%endif
e7a346
%endif
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All %%triggerun should be placed here and keep them sorted
e7a346
##
50dc83
%if ( 0%{!?_without_server:1} )
50dc83
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
e7a346
%triggerun ganesha -- selinux-policy-targeted
e7a346
semanage boolean -m ganesha_use_fusefs --off
e7a346
exit 0
e7a346
%endif
e7a346
%endif
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All %%files should be placed here and keep them grouped
e7a346
##
e7a346
%files
50dc83
%doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS COMMITMENT
e7a346
%{_mandir}/man8/*gluster*.8*
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%exclude %{_mandir}/man8/gluster.8*
50dc83
%endif
e7a346
%dir %{_localstatedir}/log/glusterfs
e7a346
%if ( 0%{!?_without_rdma:1} )
e7a346
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
e7a346
%endif
50dc83
%if 0%{?!_without_server:1}
e7a346
%dir %{_datadir}/glusterfs
e7a346
%dir %{_datadir}/glusterfs/scripts
e7a346
     %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
e7a346
     %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
50dc83
%endif
50dc83
%{_datadir}/glusterfs/scripts/identify-hangs.sh
50dc83
%{_datadir}/glusterfs/scripts/collect-system-stats.sh
50dc83
%{_datadir}/glusterfs/scripts/log_accounting.sh
e7a346
# xlators that are needed on the client- and on the server-side
e7a346
%dir %{_libdir}/glusterfs
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/auth
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/addr.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/login.so
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/socket.so
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/error-gen.so
50dc83
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/delay-gen.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/io-stats.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/sink.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/trace.so
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/access-control.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/barrier.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cdc.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changelog.so
50dc83
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/utime.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/gfid-access.so
50dc83
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/namespace.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/read-only.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so
50dc83
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cloudsync.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/md-cache.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/open-behind.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/quick-read.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/read-ahead.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/readdir-ahead.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/nl-cache.so
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so
e7a346
%dir %attr(0775,gluster,gluster) %{_rundir}/gluster
50dc83
%if 0%{?_tmpfilesdir:1} && 0%{!?_without_server:1}
e7a346
%{_tmpfilesdir}/gluster.conf
e7a346
%endif
50dc83
%if ( 0%{?_without_extra_xlators:1} )
50dc83
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
50dc83
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
50dc83
%endif
50dc83
%if ( 0%{?_without_regression_tests:1} )
50dc83
%exclude %{_datadir}/glusterfs/run-tests.sh
50dc83
%exclude %{_datadir}/glusterfs/tests
50dc83
%endif
50dc83
%if 0%{?_without_server:1}
e7a346
%if ( 0%{?_with_systemd:1} )
e7a346
%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
e7a346
%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
e7a346
%endif
e7a346
%endif
e7a346
50dc83
%if ( 0%{?_without_server:1} || 0%{?rhel} < 7 )
50dc83
#exclude ganesha related files for rhel 6 and client builds
50dc83
%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample
50dc83
%exclude %{_libexecdir}/ganesha/*
50dc83
%exclude %{_prefix}/lib/ocf/resource.d/heartbeat/*
50dc83
%if ( 0%{!?_without_server:1} )
50dc83
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
50dc83
%endif
50dc83
%endif
50dc83
50dc83
%exclude %{_datadir}/glusterfs/scripts/setup-thin-arbiter.sh
50dc83
50dc83
%if ( 0%{?_without_server:1} )
50dc83
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
50dc83
%endif
50dc83
e7a346
%files api
e7a346
%exclude %{_libdir}/*.so
e7a346
# libgfapi files
e7a346
%{_libdir}/libgfapi.*
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
e7a346
e7a346
%files api-devel
e7a346
%{_libdir}/pkgconfig/glusterfs-api.pc
e7a346
%{_libdir}/libgfapi.so
e7a346
%dir %{_includedir}/glusterfs
e7a346
%dir %{_includedir}/glusterfs/api
e7a346
     %{_includedir}/glusterfs/api/*
e7a346
e7a346
%files cli
e7a346
%{_sbindir}/gluster
e7a346
%{_mandir}/man8/gluster.8*
e7a346
%{_sysconfdir}/bash_completion.d/gluster
e7a346
50dc83
%files cloudsync-plugins
50dc83
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins
50dc83
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsyncs3.so
50dc83
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsynccvlt.so
50dc83
e7a346
%files devel
e7a346
%dir %{_includedir}/glusterfs
e7a346
     %{_includedir}/glusterfs/*
e7a346
%exclude %{_includedir}/glusterfs/api
e7a346
%exclude %{_libdir}/libgfapi.so
e7a346
%{_libdir}/*.so
50dc83
%if ( 0%{?_without_server:1} )
e7a346
%exclude %{_libdir}/pkgconfig/libgfchangelog.pc
50dc83
%exclude %{_libdir}/libgfchangelog.so
50dc83
%if ( 0%{!?_without_tiering:1} )
50dc83
%exclude %{_libdir}/pkgconfig/libgfdb.pc
e7a346
%endif
e7a346
%else
50dc83
%{_libdir}/pkgconfig/libgfchangelog.pc
50dc83
%if ( 0%{!?_without_tiering:1} )
50dc83
%{_libdir}/pkgconfig/libgfdb.pc
e7a346
%endif
e7a346
%endif
e7a346
e7a346
%files client-xlators
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
e7a346
e7a346
%if ( 0%{!?_without_extra_xlators:1} )
e7a346
%files extra-xlators
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
50dc83
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground
50dc83
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
e7a346
%endif
e7a346
e7a346
%files fuse
e7a346
# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
e7a346
%{_sbindir}/glusterfs
e7a346
%{_sbindir}/glusterfsd
e7a346
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so
e7a346
/sbin/mount.glusterfs
e7a346
%if ( 0%{!?_without_fusermount:1} )
e7a346
%{_bindir}/fusermount-glusterfs
e7a346
%endif
e7a346
e7a346
%if ( 0%{!?_without_georeplication:1} )
e7a346
%files geo-replication
e7a346
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
e7a346
e7a346
%{_sbindir}/gfind_missing_files
e7a346
%{_sbindir}/gluster-mountbroker
e7a346
%dir %{_libexecdir}/glusterfs
e7a346
%dir %{_libexecdir}/glusterfs/python
e7a346
%dir %{_libexecdir}/glusterfs/python/syncdaemon
e7a346
     %{_libexecdir}/glusterfs/gsyncd
e7a346
     %{_libexecdir}/glusterfs/python/syncdaemon/*
e7a346
     %{_libexecdir}/glusterfs/gverify.sh
e7a346
     %{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
e7a346
     %{_libexecdir}/glusterfs/peer_gsec_create
e7a346
     %{_libexecdir}/glusterfs/peer_mountbroker
e7a346
     %{_libexecdir}/glusterfs/peer_mountbroker.py*
e7a346
     %{_libexecdir}/glusterfs/gfind_missing_files
e7a346
     %{_libexecdir}/glusterfs/peer_georep-sshkey.py*
e7a346
%{_sbindir}/gluster-georep-sshkey
e7a346
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication
e7a346
%ghost      %attr(0644,-,-) %{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/pre
e7a346
e7a346
%dir %{_datadir}/glusterfs
e7a346
%dir %{_datadir}/glusterfs/scripts
e7a346
     %{_datadir}/glusterfs/scripts/get-gfid.sh
e7a346
     %{_datadir}/glusterfs/scripts/slave-upgrade.sh
e7a346
     %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
e7a346
     %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
e7a346
     %{_datadir}/glusterfs/scripts/gsync-sync-gfid
e7a346
     %{_datadir}/glusterfs/scripts/schedule_georep.py*
e7a346
%endif
e7a346
e7a346
%files libs
e7a346
%{_libdir}/*.so.*
e7a346
%exclude %{_libdir}/libgfapi.*
e7a346
%if ( 0%{!?_without_tiering:1} )
e7a346
# libgfdb is only needed server-side
e7a346
%exclude %{_libdir}/libgfdb.*
e7a346
%endif
e7a346
50dc83
%files -n python%{_pythonver}-gluster
e7a346
# introducing glusterfs module in site packages.
e7a346
# so that all other gluster submodules can reside in the same namespace.
50dc83
%if ( %{_usepython3} )
50dc83
%dir %{python3_sitelib}/gluster
50dc83
     %{python3_sitelib}/gluster/__init__.*
50dc83
     %{python3_sitelib}/gluster/__pycache__
50dc83
     %{python3_sitelib}/gluster/cliutils
50dc83
%else
e7a346
%dir %{python2_sitelib}/gluster
e7a346
     %{python2_sitelib}/gluster/__init__.*
e7a346
     %{python2_sitelib}/gluster/cliutils
50dc83
%endif
e7a346
e7a346
%if ( 0%{!?_without_rdma:1} )
e7a346
%files rdma
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
e7a346
%endif
e7a346
e7a346
%if ( 0%{!?_without_regression_tests:1} )
e7a346
%files regression-tests
e7a346
%dir %{_datadir}/glusterfs
e7a346
     %{_datadir}/glusterfs/run-tests.sh
e7a346
     %{_datadir}/glusterfs/tests
e7a346
%exclude %{_datadir}/glusterfs/tests/vagrant
e7a346
%endif
50dc83
50dc83
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
50dc83
%files ganesha
50dc83
%dir %{_libexecdir}/ganesha
50dc83
%{_sysconfdir}/ganesha/ganesha-ha.conf.sample
50dc83
%{_libexecdir}/ganesha/*
50dc83
%{_prefix}/lib/ocf/resource.d/heartbeat/*
50dc83
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
50dc83
%ghost      %attr(0644,-,-) %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf
50dc83
%ghost %dir %attr(0755,-,-) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha
50dc83
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
50dc83
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
e7a346
%endif
e7a346
e7a346
%if ( 0%{!?_without_ocf:1} )
e7a346
%files resource-agents
e7a346
# /usr/lib is the standard for OCF, also on x86_64
e7a346
%{_prefix}/lib/ocf/resource.d/glusterfs
e7a346
%endif
e7a346
50dc83
%if ( 0%{!?_without_server:1} )
e7a346
%files server
e7a346
%doc extras/clear_xattrs.sh
50dc83
%{_datadir}/glusterfs/scripts/quota_fsck.py*
e7a346
# sysconf
e7a346
%config(noreplace) %{_sysconfdir}/glusterfs
50dc83
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
e7a346
%exclude %{_sysconfdir}/glusterfs/eventsconfig.json
e7a346
%config(noreplace) %{_sysconfdir}/sysconfig/glusterd
e7a346
%if ( 0%{_for_fedora_koji_builds} )
e7a346
%config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd
e7a346
%endif
e7a346
e7a346
# init files
50dc83
%glusterd_svcfile
e7a346
%if ( 0%{_for_fedora_koji_builds} )
50dc83
%glusterfsd_svcfile
e7a346
%endif
e7a346
%if ( 0%{?_with_systemd:1} )
50dc83
%glusterfssharedstorage_svcfile
e7a346
%endif
e7a346
e7a346
# binaries
e7a346
%{_sbindir}/glusterd
e7a346
%{_sbindir}/glfsheal
e7a346
%{_sbindir}/gf_attach
e7a346
%{_sbindir}/gluster-setgfid2path
e7a346
# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
e7a346
# symlink. The binary itself (and symlink) are part of the glusterfs-fuse
e7a346
# package, because glusterfs-server depends on that anyway.
e7a346
e7a346
# Manpages
e7a346
%{_mandir}/man8/gluster-setgfid2path.8*
e7a346
e7a346
# xlators
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
50dc83
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
e7a346
%if ( 0%{!?_without_tiering:1} )
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
e7a346
     %{_libdir}/libgfdb.so.*
e7a346
%endif
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
e7a346
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
e7a346
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
e7a346
e7a346
# snap_scheduler
e7a346
%{_sbindir}/snap_scheduler.py
e7a346
%{_sbindir}/gcron.py
e7a346
%{_sbindir}/conf.py
e7a346
e7a346
# /var/lib/glusterd, e.g. hookscripts, etc.
e7a346
%ghost      %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
50dc83
%ghost      %attr(0600,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/options
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/bitd
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/groups
e7a346
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/virt
e7a346
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/metadata-cache
e7a346
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/gluster-block
50dc83
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/nl-cache
e7a346
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/db-workload
e7a346
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/distributed-virt
e7a346
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/samba
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind/.keys
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glustershd
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/disabled-quota-root-xattr-heal.sh
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/S13create-subdir-mounts.sh
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/pre
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
e7a346
                            %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/post
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/pre
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/pre
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
e7a346
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
e7a346
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
e7a346
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
e7a346
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/snaps
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/ss_brick
e7a346
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/vols
e7a346
e7a346
# Extra utility script
e7a346
%dir %{_libexecdir}/glusterfs
e7a346
     %{_datadir}/glusterfs/release
e7a346
%dir %{_datadir}/glusterfs/scripts
e7a346
     %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
e7a346
%if ( 0%{?_with_systemd:1} )
e7a346
     %{_libexecdir}/glusterfs/mount-shared-storage.sh
e7a346
     %{_datadir}/glusterfs/scripts/control-cpu-load.sh
e7a346
     %{_datadir}/glusterfs/scripts/control-mem.sh
e7a346
%endif
e7a346
e7a346
# Incrementalapi
e7a346
     %{_libexecdir}/glusterfs/glusterfind
e7a346
%{_bindir}/glusterfind
e7a346
     %{_libexecdir}/glusterfs/peer_add_secret_pub
e7a346
e7a346
%if ( 0%{?_with_firewalld:1} )
e7a346
%{_prefix}/lib/firewalld/services/glusterfs.xml
e7a346
%endif
50dc83
# end of server files
e7a346
%endif
e7a346
50dc83
# Events
50dc83
%if ( 0%{!?_without_events:1} )
50dc83
%files events
50dc83
%config(noreplace) %{_sysconfdir}/glusterfs/eventsconfig.json
50dc83
%dir %{_sharedstatedir}/glusterd
50dc83
%dir %{_sharedstatedir}/glusterd/events
50dc83
%dir %{_libexecdir}/glusterfs
50dc83
     %{_libexecdir}/glusterfs/gfevents
50dc83
     %{_libexecdir}/glusterfs/peer_eventsapi.py*
50dc83
%{_sbindir}/glustereventsd
50dc83
%{_sbindir}/gluster-eventsapi
50dc83
%{_datadir}/glusterfs/scripts/eventsdash.py*
50dc83
%if ( 0%{?_with_systemd:1} )
50dc83
%{_unitdir}/glustereventsd.service
50dc83
%else
50dc83
%{_sysconfdir}/init.d/glustereventsd
50dc83
%endif
50dc83
%endif
e7a346
e7a346
##-----------------------------------------------------------------------------
e7a346
## All %pretrans should be placed here and keep them sorted
e7a346
##
50dc83
%if 0%{!?_without_server:1}
e7a346
%pretrans -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
e7a346
          echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
e7a346
   echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
e7a346
   echo "WARNING: Refer upgrade section of install guide for more details"
e7a346
   echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
e7a346
e7a346
e7a346
%pretrans api -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
e7a346
e7a346
e7a346
%pretrans api-devel -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
e7a346
e7a346
e7a346
%pretrans cli -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
e7a346
e7a346
%pretrans client-xlators -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
e7a346
e7a346
%pretrans fuse -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
e7a346
e7a346
e7a346
%if ( 0%{!?_without_georeplication:1} )
e7a346
%pretrans geo-replication -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
%endif
e7a346
e7a346
e7a346
e7a346
%pretrans libs -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
e7a346
e7a346
e7a346
%if ( 0%{!?_without_rdma:1} )
e7a346
%pretrans rdma -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
%endif
e7a346
e7a346
e7a346
e7a346
%if ( 0%{!?_without_ocf:1} )
e7a346
%pretrans resource-agents -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
%endif
e7a346
e7a346
e7a346
e7a346
%pretrans server -p <lua>
e7a346
if not posix.access("/bin/bash", "x") then
e7a346
    -- initial installation, no shell, no running glusterfsd
e7a346
    return 0
e7a346
end
e7a346
e7a346
-- TODO: move this completely to a lua script
e7a346
-- For now, we write a temporary bash script and execute that.
e7a346
e7a346
script = [[#!/bin/sh
e7a346
pidof -c -o %PPID -x glusterfsd &>/dev/null
e7a346
e7a346
if [ $? -eq 0 ]; then
e7a346
   pushd . > /dev/null 2>&1
e7a346
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
e7a346
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
e7a346
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
e7a346
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
e7a346
          exit 1;
e7a346
       fi
e7a346
   done
e7a346
e7a346
   popd > /dev/null 2>&1
e7a346
   exit 1;
e7a346
fi
e7a346
]]
e7a346
50dc83
ok, how, val = os.execute(script)
9f5ccc
rc = val or ok
9f5ccc
if not (rc == 0) then
9f5ccc
   error("Detected running glusterfs processes", rc)
e7a346
end
e7a346
e7a346
%posttrans server
e7a346
pidof -c -o %PPID -x glusterd &> /dev/null
e7a346
if [ $? -eq 0 ]; then
e7a346
    kill -9 `pgrep -f gsyncd.py` &> /dev/null
e7a346
e7a346
    killall --wait -SIGTERM glusterd &> /dev/null
e7a346
e7a346
    if [ "$?" != "0" ]; then
e7a346
        echo "killall failed while killing glusterd"
e7a346
    fi
e7a346
e7a346
    glusterd --xlator-option *.upgrade=on -N
e7a346
e7a346
    #Cleaning leftover glusterd socket file which is created by glusterd in
e7a346
    #rpm_script_t context.
e7a346
    rm -rf /var/run/glusterd.socket
e7a346
e7a346
    # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
e7a346
    # so start it again
50dc83
    %service_start glusterd
e7a346
else
e7a346
    glusterd --xlator-option *.upgrade=on -N
e7a346
e7a346
    #Cleaning leftover glusterd socket file which is created by glusterd in
e7a346
    #rpm_script_t context.
e7a346
    rm -rf /var/run/glusterd.socket
e7a346
fi
e7a346
e7a346
%endif
e7a346
e7a346
%changelog
116ed3
* Tue Nov 09 2021 CentOS Sources <bugs@centos.org> - 6.0-56.4.el8.centos
116ed3
- remove vendor and/or packager lines
116ed3
b7d4d7
* Mon Aug 30 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.4
b7d4d7
- Add gating.yaml, fixes bugs bz#1996984
ae7ba5
b7d4d7
* Tue Aug 24 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.3
b7d4d7
- fixes bugs bz#1996984
b7d4d7
b7d4d7
* Thu May 06 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.2
b7d4d7
- fixes bugs bz#1953901
b7d4d7
b7d4d7
* Thu Apr 22 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56.1
b7d4d7
- fixes bugs bz#1927235
b7d4d7
b7d4d7
* Wed Apr 14 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-56
b7d4d7
- fixes bugs bz#1948547
b7d4d7
b7d4d7
* Fri Mar 19 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-55
b7d4d7
- fixes bugs bz#1939372
b7d4d7
b7d4d7
* Wed Mar 03 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-54
b7d4d7
- fixes bugs bz#1832306 bz#1911292 bz#1924044
b7d4d7
b7d4d7
* Thu Feb 11 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-53
b7d4d7
- fixes bugs bz#1224906 bz#1691320 bz#1719171 bz#1814744 bz#1865796
b7d4d7
b7d4d7
* Thu Jan 28 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-52
b7d4d7
- fixes bugs bz#1600459 bz#1719171 bz#1830713 bz#1856574
b7d4d7
b7d4d7
* Mon Dec 28 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-51
b7d4d7
- fixes bugs bz#1640148 bz#1856574 bz#1910119
b7d4d7
b7d4d7
* Tue Dec 15 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-50
b7d4d7
- fixes bugs bz#1224906 bz#1412494 bz#1612973 bz#1663821 bz#1691320 
b7d4d7
  bz#1726673 bz#1749304 bz#1752739 bz#1779238 bz#1813866 bz#1814744 bz#1821599 
b7d4d7
  bz#1832306 bz#1835229 bz#1842449 bz#1865796 bz#1878077 bz#1882923 bz#1885966 
b7d4d7
  bz#1890506 bz#1896425 bz#1898776 bz#1898777 bz#1898778 bz#1898781 bz#1898784 
b7d4d7
  bz#1903468
190130
190130
* Wed Nov 25 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-49
190130
- fixes bugs bz#1286171
190130
190130
* Tue Nov 10 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-48
190130
- fixes bugs bz#1895301
190130
190130
* Thu Nov 05 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-47
190130
- fixes bugs bz#1286171 bz#1821743 bz#1837926
190130
190130
* Wed Oct 21 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-46
190130
- fixes bugs bz#1873469 bz#1881823
190130
190130
* Wed Sep 09 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-45
190130
- fixes bugs bz#1785714
190130
190130
* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-44
190130
- fixes bugs bz#1460657
190130
190130
* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-43
190130
- fixes bugs bz#1460657
190130
190130
* Wed Sep 02 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-42
190130
- fixes bugs bz#1785714
190130
190130
* Tue Aug 25 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-41
190130
- fixes bugs bz#1785714 bz#1851424 bz#1851989 bz#1852736 bz#1853189 bz#1855966
190130
190130
* Tue Jul 21 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-40
190130
- fixes bugs bz#1812789 bz#1844359 bz#1847081 bz#1854165
fb9c19
190130
* Wed Jun 17 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-39
190130
- fixes bugs bz#1844359 bz#1845064
2072c5
190130
* Wed Jun 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-38
190130
- fixes bugs bz#1234220 bz#1286171 bz#1487177 bz#1524457 bz#1640573 
190130
  bz#1663557 bz#1667954 bz#1683602 bz#1686897 bz#1721355 bz#1748865 bz#1750211 
190130
  bz#1754391 bz#1759875 bz#1761531 bz#1761932 bz#1763124 bz#1763129 bz#1764091 
190130
  bz#1775637 bz#1776901 bz#1781550 bz#1781649 bz#1781710 bz#1783232 bz#1784211 
190130
  bz#1784415 bz#1786516 bz#1786681 bz#1787294 bz#1787310 bz#1787331 bz#1787994 
190130
  bz#1790336 bz#1792873 bz#1794663 bz#1796814 bz#1804164 bz#1810924 bz#1815434 
190130
  bz#1836099 bz#1837467 bz#1837926 bz#1838479 bz#1839137 bz#1844359
473014
9f5ccc
* Fri May 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-37
9f5ccc
- fixes bugs bz#1840794
9f5ccc
9f5ccc
* Wed May 27 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-36
9f5ccc
- fixes bugs bz#1812789 bz#1823423
9f5ccc
9f5ccc
* Fri May 22 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-35
9f5ccc
- fixes bugs bz#1810516 bz#1830713 bz#1836233
9f5ccc
9f5ccc
* Sun May 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-34
9f5ccc
- fixes bugs bz#1802013 bz#1823706 bz#1825177 bz#1830713 bz#1831403 bz#1833017
9f5ccc
9f5ccc
* Wed Apr 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-33
9f5ccc
- fixes bugs bz#1812789 bz#1813917 bz#1823703 bz#1823706 bz#1825195
9f5ccc
9f5ccc
* Sat Apr 04 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-32
9f5ccc
- fixes bugs bz#1781543 bz#1812789 bz#1812824 bz#1817369 bz#1819059
9f5ccc
9f5ccc
* Tue Mar 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-31
9f5ccc
- fixes bugs bz#1802727
9f5ccc
9f5ccc
* Thu Feb 20 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30.1
9f5ccc
- fixes bugs bz#1800703
9f5ccc
9f5ccc
* Sat Feb 01 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30
9f5ccc
- fixes bugs bz#1775564 bz#1794153
9f5ccc
9f5ccc
* Thu Jan 23 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-29
9f5ccc
- fixes bugs bz#1793035
9f5ccc
9f5ccc
* Tue Jan 14 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-28
9f5ccc
- fixes bugs bz#1789447
9f5ccc
9f5ccc
* Mon Jan 13 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-27
9f5ccc
- fixes bugs bz#1789447
9f5ccc
9f5ccc
* Fri Jan 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-26
9f5ccc
- fixes bugs bz#1763208 bz#1788656
9f5ccc
9f5ccc
* Mon Dec 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-25
9f5ccc
- fixes bugs bz#1686800 bz#1763208 bz#1779696 bz#1781444 bz#1782162
9f5ccc
9f5ccc
* Thu Nov 28 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-24
9f5ccc
- fixes bugs bz#1768786
9f5ccc
9f5ccc
* Thu Nov 21 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-23
9f5ccc
- fixes bugs bz#1344758 bz#1599802 bz#1685406 bz#1686800 bz#1724021 
9f5ccc
  bz#1726058 bz#1727755 bz#1731513 bz#1741193 bz#1758923 bz#1761326 bz#1761486 
9f5ccc
  bz#1762180 bz#1764095 bz#1766640
9f5ccc
9f5ccc
* Thu Nov 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-22
9f5ccc
- fixes bugs bz#1771524 bz#1771614
9f5ccc
9f5ccc
* Fri Oct 25 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-21
9f5ccc
- fixes bugs bz#1765555
9f5ccc
9f5ccc
* Wed Oct 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-20
9f5ccc
- fixes bugs bz#1719171 bz#1763412 bz#1764202
9f5ccc
9f5ccc
* Thu Oct 17 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-19
9f5ccc
- fixes bugs bz#1760939
9f5ccc
9f5ccc
* Wed Oct 16 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-18
9f5ccc
- fixes bugs bz#1758432
9f5ccc
9f5ccc
* Fri Oct 11 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-17
9f5ccc
- fixes bugs bz#1704562 bz#1758618 bz#1760261
9f5ccc
9f5ccc
* Wed Oct 09 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-16
9f5ccc
- fixes bugs bz#1752713 bz#1756325
d45bac
50dc83
* Fri Sep 27 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-15
50dc83
- fixes bugs bz#1726000 bz#1731826 bz#1754407 bz#1754790 bz#1755227
e7a346
50dc83
* Fri Sep 20 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-14
50dc83
- fixes bugs bz#1719171 bz#1728673 bz#1731896 bz#1732443 bz#1733970 
50dc83
  bz#1745107 bz#1746027 bz#1748688 bz#1750241 bz#1572163
e7a346
50dc83
* Fri Aug 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-13
50dc83
- fixes bugs bz#1729915 bz#1732376 bz#1743611 bz#1743627 bz#1743634 bz#1744518
e7a346
50dc83
* Fri Aug 09 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-12
50dc83
- fixes bugs bz#1730914 bz#1731448 bz#1732770 bz#1732792 bz#1733531 
50dc83
  bz#1734305 bz#1734534 bz#1734734 bz#1735514 bz#1737705 bz#1732774
50dc83
  bz#1732793
e7a346
50dc83
* Tue Aug 06 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-11
50dc83
- fixes bugs bz#1733520 bz#1734423
e7a346
50dc83
* Fri Aug 02 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-10
50dc83
- fixes bugs bz#1713890
e7a346
50dc83
* Tue Jul 23 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-9
50dc83
- fixes bugs bz#1708064 bz#1708180 bz#1715422 bz#1720992 bz#1722757
e7a346
50dc83
* Tue Jul 16 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-8
50dc83
- fixes bugs bz#1698435 bz#1712591 bz#1715447 bz#1720488 bz#1722209
50dc83
  bz#1722512 bz#1724089 bz#1726991 bz#1727785 bz#1729108
e7a346
50dc83
* Fri Jun 28 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-7
50dc83
- fixes bugs bz#1573077 bz#1600918 bz#1703423 bz#1704207 bz#1708064
50dc83
  bz#1709301 bz#1713664 bz#1716760 bz#1717784 bz#1720163 bz#1720192
50dc83
  bz#1720551 bz#1721351 bz#1721357 bz#1721477 bz#1722131 bz#1722331
50dc83
  bz#1722509 bz#1722801 bz#1720248
e7a346
50dc83
* Fri Jun 14 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-6
50dc83
- fixes bugs bz#1668001 bz#1708043 bz#1708183 bz#1710701 
50dc83
  bz#1719640 bz#1720079 bz#1720248 bz#1720318 bz#1720461
e7a346
50dc83
* Tue Jun 11 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-5
50dc83
- fixes bugs bz#1573077 bz#1694595 bz#1703434 bz#1714536 bz#1714588 
50dc83
  bz#1715407 bz#1715438 bz#1705018
e7a346
50dc83
* Fri Jun 07 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-4
50dc83
- fixes bugs bz#1480907 bz#1702298 bz#1703455 bz#1704181 bz#1707246
50dc83
  bz#1708067 bz#1708116 bz#1708121 bz#1709087 bz#1711249 bz#1711296 
50dc83
  bz#1714078 bz#1714124 bz#1716385 bz#1716626 bz#1716821 bz#1716865 bz#1717927
e7a346
50dc83
* Tue May 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-3
50dc83
- fixes bugs bz#1583585 bz#1671862 bz#1702686 bz#1703434 bz#1703753 
50dc83
  bz#1703897 bz#1704562 bz#1704769 bz#1704851 bz#1706683 bz#1706776 bz#1706893
e7a346
50dc83
* Thu Apr 25 2019 Milind Changire <mchangir@redhat.com> - 6.0-2
50dc83
- fixes bugs bz#1471742 bz#1652461 bz#1671862 bz#1676495 bz#1691620 
50dc83
  bz#1696334 bz#1696903 bz#1697820 bz#1698436 bz#1698728 bz#1699709 bz#1699835 
50dc83
  bz#1702240
e7a346
50dc83
* Mon Apr 08 2019 Milind Changire <mchangir@redhat.com> - 6.0-1
50dc83
- rebase to upstream glusterfs at v6.0
50dc83
- fixes bugs bz#1493284 bz#1578703 bz#1600918 bz#1670415 bz#1691620 
50dc83
  bz#1693935 bz#1695057
e7a346