e1d1f9
%global _hardened_build 1
e1d1f9
e1d1f9
%global _for_fedora_koji_builds 0
e1d1f9
e1d1f9
# uncomment and add '%' to use the prereltag for pre-releases
473043
# %%global prereltag qa3
473043
473043
##-----------------------------------------------------------------------------
473043
## All argument definitions should be placed here and keep them sorted
473043
##
e1d1f9
74b1de
# asan
74b1de
# if you wish to compile an rpm with address sanitizer...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --with asan
74b1de
%{?_with_asan:%global _with_asan --enable-asan}
3604df
74b1de
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
74b1de
%global _with_asan %{nil}
74b1de
%endif
d1681e
74b1de
# bd
74b1de
# if you wish to compile an rpm without the BD map support...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --without bd
74b1de
%{?_without_bd:%global _without_bd --disable-bd-xlator}
74b1de
74b1de
%if ( 0%{?rhel} && 0%{?rhel} > 7 )
74b1de
%global _without_bd --without-bd
74b1de
%endif
74b1de
74b1de
# cmocka
cb8e9e
# if you wish to compile an rpm with cmocka unit testing...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --with cmocka
cb8e9e
%{?_with_cmocka:%global _with_cmocka --enable-cmocka}
cb8e9e
74b1de
# debug
74b1de
# if you wish to compile an rpm with debugging...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --with debug
74b1de
%{?_with_debug:%global _with_debug --enable-debug}
e1d1f9
74b1de
# epoll
e1d1f9
# if you wish to compile an rpm without epoll...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --without epoll
e1d1f9
%{?_without_epoll:%global _without_epoll --disable-epoll}
e1d1f9
74b1de
# fusermount
e1d1f9
# if you wish to compile an rpm without fusermount...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --without fusermount
e1d1f9
%{?_without_fusermount:%global _without_fusermount --disable-fusermount}
e1d1f9
74b1de
# geo-rep
e1d1f9
# if you wish to compile an rpm without geo-replication support, compile like this...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --without georeplication
473043
%{?_without_georeplication:%global _without_georeplication --disable-georeplication}
473043
74b1de
# ipv6default
74b1de
# if you wish to compile an rpm with IPv6 default...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --with ipv6default
74b1de
%{?_with_ipv6default:%global _with_ipv6default --with-ipv6-default}
74b1de
74b1de
# libtirpc
74b1de
# if you wish to compile an rpm without TIRPC (i.e. use legacy glibc rpc)
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --without libtirpc
74b1de
%{?_without_libtirpc:%global _without_libtirpc --without-libtirpc}
74b1de
74b1de
# Do not use libtirpc on EL6, it does not have xdr_uint64_t() and xdr_uint32_t
74b1de
# Do not use libtirpc on EL7, it does not have xdr_sizeof()
74b1de
%if ( 0%{?rhel} && 0%{?rhel} <= 7 )
74b1de
%global _without_libtirpc --without-libtirpc
473043
%endif
e1d1f9
74b1de
74b1de
# ocf
e1d1f9
# if you wish to compile an rpm without the OCF resource agents...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --without ocf
e1d1f9
%{?_without_ocf:%global _without_ocf --without-ocf}
e1d1f9
74b1de
# rdma
74b1de
# if you wish to compile an rpm without rdma support, compile like this...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --without rdma
74b1de
%{?_without_rdma:%global _without_rdma --disable-ibverbs}
74b1de
74b1de
# server
74b1de
# if you wish to build rpms without server components, compile like this
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --without server
74b1de
%{?_without_server:%global _without_server --without-server}
74b1de
74b1de
# disable server components forcefully as rhel <= 6
74b1de
%if ( 0%{?rhel} )
74b1de
%if (!(( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" ) || ( "%{?dist}" == ".el8rhgs" )))
74b1de
%global _without_server --without-server
74b1de
%endif
74b1de
%endif
74b1de
74b1de
%global _without_extra_xlators 1
74b1de
%global _without_regression_tests 1
74b1de
74b1de
# syslog
473043
# if you wish to build rpms without syslog logging, compile like this
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --without syslog
473043
%{?_without_syslog:%global _without_syslog --disable-syslog}
473043
473043
# disable syslog forcefully as rhel <= 6 doesn't have rsyslog or rsyslog-mmcount
12a457
# Fedora deprecated syslog, see
cb8e9e
#  https://fedoraproject.org/wiki/Changes/NoDefaultSyslog
cb8e9e
# (And what about RHEL7?)
cb8e9e
%if ( 0%{?fedora} && 0%{?fedora} >= 20 ) || ( 0%{?rhel} && 0%{?rhel} <= 6 )
473043
%global _without_syslog --disable-syslog
473043
%endif
e1d1f9
74b1de
# tsan
74b1de
# if you wish to compile an rpm with thread sanitizer...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --with tsan
74b1de
%{?_with_tsan:%global _with_tsan --enable-tsan}
e1d1f9
74b1de
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
74b1de
%global _with_tsan %{nil}
e1d1f9
%endif
e1d1f9
74b1de
# valgrind
74b1de
# if you wish to compile an rpm to run all processes under valgrind...
74b1de
# rpmbuild -ta glusterfs-6.0.tar.gz --with valgrind
74b1de
%{?_with_valgrind:%global _with_valgrind --enable-valgrind}
473043
473043
##-----------------------------------------------------------------------------
12a457
## All %%global definitions should be placed here and keep them sorted
473043
##
473043
cead9d
# selinux booleans whose defalut value needs modification
cead9d
# these booleans will be consumed by "%%selinux_set_booleans" macro.
cead9d
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
cead9d
%global selinuxbooleans rsync_full_access=1 rsync_client=1
cead9d
%endif
cead9d
74b1de
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
473043
%global _with_systemd true
e1d1f9
%endif
e1d1f9
cb8e9e
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 7 )
cb8e9e
%global _with_firewalld --enable-firewalld
cb8e9e
%endif
cb8e9e
cb8e9e
%if 0%{?_tmpfilesdir:1}
3604df
%global _with_tmpfilesdir --with-tmpfilesdir=%{_tmpfilesdir}
cb8e9e
%else
3604df
%global _with_tmpfilesdir --without-tmpfilesdir
cb8e9e
%endif
cb8e9e
74b1de
# without server should also disable some server-only components
74b1de
%if 0%{?_without_server:1}
3604df
%global _without_events --disable-events
74b1de
%global _without_georeplication --disable-georeplication
74b1de
%global _without_tiering --disable-tiering
74b1de
%global _without_ocf --without-ocf
74b1de
%endif
74b1de
74b1de
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
74b1de
%global _usepython3 1
74b1de
%global _pythonver 3
74b1de
%else
74b1de
%global _usepython3 0
74b1de
%global _pythonver 2
3604df
%endif
473043
79e268
# From https://fedoraproject.org/wiki/Packaging:Python#Macros
d1681e
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
d1681e
%{!?python2_sitelib: %global python2_sitelib %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
d1681e
%{!?python2_sitearch: %global python2_sitearch %(python2 -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
d1681e
%global _rundir %{_localstatedir}/run
473043
%endif
473043
473043
%if ( 0%{?_with_systemd:1} )
74b1de
%global service_enable()   /bin/systemctl --quiet enable %1.service || : \
74b1de
%{nil}
74b1de
%global service_start()   /bin/systemctl --quiet start %1.service || : \
74b1de
%{nil}
74b1de
%global service_stop()    /bin/systemctl --quiet stop %1.service || :\
74b1de
%{nil}
74b1de
%global service_install() install -D -p -m 0644 %1.service %{buildroot}%2 \
74b1de
%{nil}
473043
# can't seem to make a generic macro that works
74b1de
%global glusterd_svcfile   %{_unitdir}/glusterd.service
74b1de
%global glusterfsd_svcfile %{_unitdir}/glusterfsd.service
74b1de
%global glusterta_svcfile %{_unitdir}/gluster-ta-volume.service
74b1de
%global glustereventsd_svcfile %{_unitdir}/glustereventsd.service
74b1de
%global glusterfssharedstorage_svcfile %{_unitdir}/glusterfssharedstorage.service
473043
%else
74b1de
%global service_enable()  /sbin/chkconfig --add %1 >/dev/null 2>&1 || : \
74b1de
%{nil}
74b1de
%global systemd_preun() /sbin/chkconfig --del %1 >/dev/null 2>&1 || : \
74b1de
%{nil}
74b1de
%global systemd_postun_with_restart() /sbin/service %1 condrestart >/dev/null 2>&1 || : \
74b1de
%{nil}
74b1de
%global service_start()   /sbin/service %1 start >/dev/null 2>&1 || : \
74b1de
%{nil}
74b1de
%global service_stop()    /sbin/service %1 stop >/dev/null 2>&1 || : \
74b1de
%{nil}
74b1de
%global service_install() install -D -p -m 0755 %1.init %{buildroot}%2 \
74b1de
%{nil}
473043
# can't seem to make a generic macro that works
74b1de
%global glusterd_svcfile   %{_sysconfdir}/init.d/glusterd
74b1de
%global glusterfsd_svcfile %{_sysconfdir}/init.d/glusterfsd
74b1de
%global glustereventsd_svcfile %{_sysconfdir}/init.d/glustereventsd
473043
%endif
473043
473043
%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}}
473043
473043
# We do not want to generate useless provides and requires for xlator
473043
# .so files to be set for glusterfs packages.
473043
# Filter all generated:
473043
#
473043
# TODO: RHEL5 does not have a convenient solution
473043
%if ( 0%{?rhel} == 6 )
3604df
# filter_setup exists in RHEL6 only
3604df
%filter_provides_in %{_libdir}/glusterfs/%{version}/
3604df
%global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$'
3604df
%filter_setup
473043
%else
3604df
# modern rpm and current Fedora do not generate requires when the
3604df
# provides are filtered
3604df
%global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$
79e268
%endif
79e268
473043
473043
##-----------------------------------------------------------------------------
12a457
## All package definitions should be placed here in alphabetical order
473043
##
cb8e9e
Summary:          Distributed File System
e1d1f9
%if ( 0%{_for_fedora_koji_builds} )
e1d1f9
Name:             glusterfs
3604df
Version:          3.8.0
473043
Release:          0.1%{?prereltag:.%{prereltag}}%{?dist}
e1d1f9
%else
e1d1f9
Name:             glusterfs
74b1de
Version:          6.0
9ae3f9
Release:          49.1%{?dist}
e1d1f9
%endif
e1d1f9
License:          GPLv2 or LGPLv3+
74b1de
URL:              http://docs.gluster.org/
473043
%if ( 0%{_for_fedora_koji_builds} )
473043
Source0:          http://bits.gluster.org/pub/gluster/glusterfs/src/glusterfs-%{version}%{?prereltag}.tar.gz
473043
Source1:          glusterd.sysconfig
473043
Source2:          glusterfsd.sysconfig
473043
Source7:          glusterfsd.service
473043
Source8:          glusterfsd.init
473043
%else
74b1de
Source0:          glusterfs-6.0.tar.gz
473043
%endif
e1d1f9
21ab4e
Requires(pre):    shadow-utils
473043
%if ( 0%{?_with_systemd:1} )
3604df
BuildRequires:    systemd
e1d1f9
%endif
e1d1f9
12a457
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
3604df
%if ( 0%{?_with_systemd:1} )
3604df
%{?systemd_requires}
3604df
%endif
74b1de
%if 0%{?_with_asan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
74b1de
BuildRequires:    libasan
74b1de
%endif
74b1de
%if 0%{?_with_tsan:1} && !( 0%{?rhel} && 0%{?rhel} < 7 )
74b1de
BuildRequires:    libtsan
74b1de
%endif
3604df
BuildRequires:    git
e1d1f9
BuildRequires:    bison flex
3604df
BuildRequires:    gcc make libtool
e1d1f9
BuildRequires:    ncurses-devel readline-devel
e1d1f9
BuildRequires:    libxml2-devel openssl-devel
cb8e9e
BuildRequires:    libaio-devel libacl-devel
74b1de
BuildRequires:    python%{_pythonver}-devel
74b1de
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
e1d1f9
BuildRequires:    python-ctypes
d1681e
%endif
74b1de
%if ( 0%{?_with_ipv6default:1} ) || ( 0%{!?_without_libtirpc:1} ) || ( 0%{?rhel} && ( 0%{?rhel} >= 8 ) )
74b1de
BuildRequires:    libtirpc-devel
74b1de
%endif
74b1de
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
74b1de
BuildRequires:    rpcgen
74b1de
%endif
cb8e9e
BuildRequires:    userspace-rcu-devel >= 0.7
3604df
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
3604df
BuildRequires:    automake
3604df
%endif
cb8e9e
BuildRequires:    libuuid-devel
cb8e9e
%if ( 0%{?_with_cmocka:1} )
cb8e9e
BuildRequires:    libcmocka-devel >= 1.0.1
cb8e9e
%endif
cb8e9e
%if ( 0%{!?_without_tiering:1} )
cb8e9e
BuildRequires:    sqlite-devel
cb8e9e
%endif
473043
%if ( 0%{!?_without_georeplication:1} )
473043
BuildRequires:    libattr-devel
e1d1f9
%endif
e1d1f9
cb8e9e
%if (0%{?_with_firewalld:1})
cb8e9e
BuildRequires:    firewalld
cb8e9e
%endif
cb8e9e
473043
Obsoletes:        hekafs
e1d1f9
Obsoletes:        %{name}-common < %{version}-%{release}
e1d1f9
Obsoletes:        %{name}-core < %{version}-%{release}
473043
Obsoletes:        %{name}-ufo
74b1de
%if ( 0%{!?_with_gnfs:1} )
74b1de
Obsoletes:        %{name}-gnfs
74b1de
%endif
74b1de
%if ( 0%{?rhel} < 7 )
74b1de
Obsoletes:        %{name}-ganesha
74b1de
%endif
e1d1f9
Provides:         %{name}-common = %{version}-%{release}
e1d1f9
Provides:         %{name}-core = %{version}-%{release}
e1d1f9
74b1de
# Patch0001: 0001-Update-rfc.sh-to-rhgs-3.5.0.patch
12a457
Patch0002: 0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch
74b1de
Patch0003: 0003-rpc-set-bind-insecure-to-off-by-default.patch
74b1de
Patch0004: 0004-glusterd-spec-fixing-autogen-issue.patch
74b1de
Patch0005: 0005-libglusterfs-glusterd-Fix-compilation-errors.patch
74b1de
Patch0006: 0006-build-remove-ghost-directory-entries.patch
74b1de
Patch0007: 0007-build-add-RHGS-specific-changes.patch
74b1de
Patch0008: 0008-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch
74b1de
Patch0009: 0009-build-introduce-security-hardening-flags-in-gluster.patch
74b1de
Patch0010: 0010-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch
74b1de
Patch0011: 0011-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch
74b1de
Patch0012: 0012-build-add-pretrans-check.patch
74b1de
Patch0013: 0013-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch
74b1de
Patch0014: 0014-build-spec-file-conflict-resolution.patch
74b1de
Patch0015: 0015-build-randomize-temp-file-names-in-pretrans-scriptle.patch
74b1de
Patch0016: 0016-glusterd-parallel-readdir-Change-the-op-version-of-p.patch
74b1de
Patch0017: 0017-glusterd-Revert-op-version-for-cluster.max-brick-per.patch
74b1de
Patch0018: 0018-cli-Add-message-for-user-before-modifying-brick-mult.patch
74b1de
Patch0019: 0019-build-launch-glusterd-upgrade-after-all-new-bits-are.patch
74b1de
Patch0020: 0020-spec-unpackaged-files-found-for-RHEL-7-client-build.patch
74b1de
Patch0021: 0021-cli-glusterfsd-remove-copyright-information.patch
74b1de
Patch0022: 0022-cli-Remove-upstream-doc-reference.patch
74b1de
Patch0023: 0023-hooks-remove-selinux-hooks.patch
74b1de
Patch0024: 0024-glusterd-Make-localtime-logging-option-invisible-in-.patch
74b1de
Patch0025: 0025-build-make-RHGS-version-available-for-server.patch
74b1de
Patch0026: 0026-glusterd-Introduce-daemon-log-level-cluster-wide-opt.patch
74b1de
Patch0027: 0027-glusterd-change-op-version-of-fips-mode-rchecksum.patch
74b1de
Patch0028: 0028-glusterd-Reset-op-version-for-features.shard-deletio.patch
74b1de
Patch0029: 0029-glusterd-Reset-op-version-for-features.shard-lru-lim.patch
74b1de
Patch0030: 0030-selinux-glusterd-add-features.selinux-to-glusterd-vo.patch
74b1de
Patch0031: 0031-glusterd-turn-off-selinux-feature-in-downstream.patch
74b1de
Patch0032: 0032-glusterd-update-gd-op-version-to-3_7_0.patch
74b1de
Patch0033: 0033-build-add-missing-explicit-package-dependencies.patch
74b1de
Patch0034: 0034-glusterd-introduce-a-new-op-version-for-rhgs-3.4.3.patch
74b1de
Patch0035: 0035-glusterd-tag-rebalance-mgmt_v3-command-to-op-version.patch
74b1de
Patch0036: 0036-build-add-conditional-dependency-on-server-for-devel.patch
74b1de
Patch0037: 0037-cli-change-the-warning-message.patch
74b1de
Patch0038: 0038-spec-avoid-creation-of-temp-file-in-lua-script.patch
74b1de
Patch0039: 0039-cli-fix-query-to-user-during-brick-mux-selection.patch
74b1de
Patch0040: 0040-build-Remove-unsupported-test-cases-failing-consiste.patch
74b1de
Patch0041: 0041-tests-geo-rep-Build-failed-in-Jenkins-for-test-bug-1.patch
74b1de
Patch0042: 0042-spec-client-server-Builds-are-failing-on-rhel-6.patch
74b1de
Patch0043: 0043-inode-don-t-dump-the-whole-table-to-CLI.patch
74b1de
Patch0044: 0044-cluster-ec-Don-t-enqueue-an-entry-if-it-is-already-h.patch
74b1de
Patch0045: 0045-glusterd-fix-txn-id-mem-leak.patch
74b1de
Patch0046: 0046-protocol-client-Do-not-fallback-to-anon-fd-if-fd-is-.patch
74b1de
Patch0047: 0047-client-rpc-Fix-the-payload-being-sent-on-the-wire.patch
74b1de
Patch0048: 0048-gfapi-Unblock-epoll-thread-for-upcall-processing.patch
74b1de
Patch0049: 0049-transport-socket-log-shutdown-msg-occasionally.patch
74b1de
Patch0050: 0050-geo-rep-Fix-syncing-multiple-rename-of-symlink.patch
74b1de
Patch0051: 0051-spec-update-rpm-install-condition.patch
74b1de
Patch0052: 0052-geo-rep-IPv6-support.patch
74b1de
Patch0053: 0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch
74b1de
Patch0054: 0054-Revert-glusterd-storhaug-remove-ganesha.patch
74b1de
Patch0055: 0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch
74b1de
Patch0056: 0056-common-ha-fixes-for-Debian-based-systems.patch
74b1de
Patch0057: 0057-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch
74b1de
Patch0058: 0058-glusterd-ganesha-During-volume-delete-remove-the-gan.patch
74b1de
Patch0059: 0059-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch
74b1de
Patch0060: 0060-ganesha-scripts-Stop-ganesha-process-on-all-nodes-if.patch
74b1de
Patch0061: 0061-ganesha-allow-refresh-config-and-volume-export-unexp.patch
74b1de
Patch0062: 0062-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch
74b1de
Patch0063: 0063-glusterd-ganesha-update-cache-invalidation-properly-.patch
74b1de
Patch0064: 0064-glusterd-ganesha-return-proper-value-in-pre_setup.patch
74b1de
Patch0065: 0065-ganesha-scripts-remove-dependency-over-export-config.patch
74b1de
Patch0066: 0066-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch
74b1de
Patch0067: 0067-ganesha-minor-improvments-for-commit-e91cdf4-17081.patch
74b1de
Patch0068: 0068-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch
74b1de
Patch0069: 0069-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
74b1de
Patch0070: 0070-packaging-glusterfs-ganesha-update-sometimes-fails-s.patch
74b1de
Patch0071: 0071-common-ha-enable-and-disable-selinux-gluster_use_exe.patch
74b1de
Patch0072: 0072-ganesha-ha-don-t-set-SELinux-booleans-if-SELinux-is-.patch
74b1de
Patch0073: 0073-build-remove-ganesha-dependency-on-selinux-policy.patch
74b1de
Patch0074: 0074-common-ha-enable-pacemaker-at-end-of-setup.patch
74b1de
Patch0075: 0075-common-ha-Fix-an-incorrect-syntax-during-setup.patch
74b1de
Patch0076: 0076-glusterd-ganesha-change-voltype-for-ganesha.enable-i.patch
74b1de
Patch0077: 0077-glusterd-ganesha-create-remove-export-file-only-from.patch
74b1de
Patch0078: 0078-common-ha-scripts-pass-the-list-of-servers-properly-.patch
74b1de
Patch0079: 0079-common-ha-All-statd-related-files-need-to-be-owned-b.patch
74b1de
Patch0080: 0080-glusterd-ganesha-Skip-non-ganesha-nodes-properly-for.patch
74b1de
Patch0081: 0081-ganesha-ha-ensure-pacemaker-is-enabled-after-setup.patch
74b1de
Patch0082: 0082-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch
74b1de
Patch0083: 0083-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
74b1de
Patch0084: 0084-glusterd-Fix-duplicate-client_op_version-in-info-fil.patch
74b1de
Patch0085: 0085-Revert-all-remove-code-which-is-not-being-considered.patch
74b1de
Patch0086: 0086-Revert-tiering-remove-the-translator-from-build-and-.patch
74b1de
Patch0087: 0087-ganesha-fixing-minor-issues-after-the-backport-from-.patch
74b1de
Patch0088: 0088-tier-fix-failures-noticed-during-tier-start-and-tier.patch
74b1de
Patch0089: 0089-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch
74b1de
Patch0090: 0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch
74b1de
Patch0091: 0091-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch
74b1de
Patch0092: 0092-logging-Fix-GF_LOG_OCCASSIONALLY-API.patch
74b1de
Patch0093: 0093-glusterd-Change-op-version-of-cache-invalidation-in-.patch
74b1de
Patch0094: 0094-glusterd-load-ctime-in-the-client-graph-only-if-it-s.patch
74b1de
Patch0095: 0095-cluster-afr-Remove-local-from-owners_list-on-failure.patch
74b1de
Patch0096: 0096-core-Brick-is-not-able-to-detach-successfully-in-bri.patch
74b1de
Patch0097: 0097-glusterd-tier-while-doing-an-attach-tier-the-self-he.patch
74b1de
Patch0098: 0098-mgmt-shd-Implement-multiplexing-in-self-heal-daemon.patch
74b1de
Patch0099: 0099-client-fini-return-fini-after-rpc-cleanup.patch
74b1de
Patch0100: 0100-clnt-rpc-ref-leak-during-disconnect.patch
74b1de
Patch0101: 0101-shd-mux-Fix-coverity-issues-introduced-by-shd-mux-pa.patch
74b1de
Patch0102: 0102-rpc-transport-Missing-a-ref-on-dict-while-creating-t.patch
74b1de
Patch0103: 0103-dht-NULL-check-before-setting-error-flag.patch
74b1de
Patch0104: 0104-afr-shd-Cleanup-self-heal-daemon-resources-during-af.patch
74b1de
Patch0105: 0105-core-Log-level-changes-do-not-effect-on-running-clie.patch
74b1de
Patch0106: 0106-libgfchangelog-use-find_library-to-locate-shared-lib.patch
74b1de
Patch0107: 0107-gfapi-add-function-to-set-client-pid.patch
74b1de
Patch0108: 0108-afr-add-client-pid-to-all-gf_event-calls.patch
74b1de
Patch0109: 0109-glusterd-Optimize-glusterd-handshaking-code-path.patch
74b1de
Patch0110: 0110-tier-shd-glusterd-with-shd-mux-the-shd-volfile-path-.patch
74b1de
Patch0111: 0111-glusterd-fix-loading-ctime-in-client-graph-logic.patch
74b1de
Patch0112: 0112-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
74b1de
Patch0113: 0113-spec-Glusterd-did-not-start-by-default-after-node-re.patch
74b1de
Patch0114: 0114-core-fix-hang-issue-in-__gf_free.patch
74b1de
Patch0115: 0115-core-only-log-seek-errors-if-SEEK_HOLE-SEEK_DATA-is-.patch
74b1de
Patch0116: 0116-cluster-ec-fix-fd-reopen.patch
74b1de
Patch0117: 0117-spec-Remove-thin-arbiter-package.patch
74b1de
Patch0118: 0118-tests-mark-thin-arbiter-test-ta.t-as-bad.patch
74b1de
Patch0119: 0119-glusterd-provide-a-way-to-detach-failed-node.patch
74b1de
Patch0120: 0120-glusterd-shd-Keep-a-ref-on-volinfo-until-attach-rpc-.patch
74b1de
Patch0121: 0121-spec-glusterfs-devel-for-client-build-should-not-dep.patch
74b1de
Patch0122: 0122-posix-ctime-Fix-stat-time-attributes-inconsistency-d.patch
74b1de
Patch0123: 0123-ctime-Fix-log-repeated-logging-during-open.patch
74b1de
Patch0124: 0124-spec-remove-duplicate-references-to-files.patch
74b1de
Patch0125: 0125-glusterd-define-dumpops-in-the-xlator_api-of-gluster.patch
74b1de
Patch0126: 0126-cluster-dht-refactor-dht-lookup-functions.patch
74b1de
Patch0127: 0127-cluster-dht-Refactor-dht-lookup-functions.patch
74b1de
Patch0128: 0128-glusterd-Fix-bulkvoldict-thread-logic-in-brick-multi.patch
74b1de
Patch0129: 0129-core-handle-memory-accounting-correctly.patch
74b1de
Patch0130: 0130-tier-test-new-tier-cmds.t-fails-after-a-glusterd-res.patch
74b1de
Patch0131: 0131-tests-dht-Test-that-lookups-are-sent-post-brick-up.patch
74b1de
Patch0132: 0132-glusterd-remove-duplicate-occurrence-of-features.sel.patch
74b1de
Patch0133: 0133-glusterd-enable-fips-mode-rchecksum-for-new-volumes.patch
74b1de
Patch0134: 0134-performance-write-behind-remove-request-from-wip-lis.patch
74b1de
Patch0135: 0135-geo-rep-fix-incorrectly-formatted-authorized_keys.patch
74b1de
Patch0136: 0136-glusterd-fix-inconsistent-global-option-output-in-vo.patch
74b1de
Patch0137: 0137-shd-glusterd-Serialize-shd-manager-to-prevent-race-c.patch
74b1de
Patch0138: 0138-glusterd-Add-gluster-volume-stop-operation-to-gluste.patch
74b1de
Patch0139: 0139-ec-shd-Cleanup-self-heal-daemon-resources-during-ec-.patch
74b1de
Patch0140: 0140-cluster-ec-Reopen-shouldn-t-happen-with-O_TRUNC.patch
74b1de
Patch0141: 0141-socket-ssl-fix-crl-handling.patch
74b1de
Patch0142: 0142-lock-check-null-value-of-dict-to-avoid-log-flooding.patch
74b1de
Patch0143: 0143-packaging-Change-the-dependency-on-nfs-ganesha-to-2..patch
74b1de
Patch0144: 0144-cluster-ec-honor-contention-notifications-for-partia.patch
74b1de
Patch0145: 0145-core-Capture-process-memory-usage-at-the-time-of-cal.patch
74b1de
Patch0146: 0146-dht-Custom-xattrs-are-not-healed-in-case-of-add-bric.patch
74b1de
Patch0147: 0147-glusterd-bulkvoldict-thread-is-not-handling-all-volu.patch
74b1de
Patch0148: 0148-cluster-dht-Lookup-all-files-when-processing-directo.patch
74b1de
Patch0149: 0149-glusterd-Optimize-code-to-copy-dictionary-in-handsha.patch
74b1de
Patch0150: 0150-libglusterfs-define-macros-needed-for-cloudsync.patch
74b1de
Patch0151: 0151-mgmt-glusterd-Make-changes-related-to-cloudsync-xlat.patch
74b1de
Patch0152: 0152-storage-posix-changes-with-respect-to-cloudsync.patch
74b1de
Patch0153: 0153-features-cloudsync-Added-some-new-functions.patch
74b1de
Patch0154: 0154-cloudsync-cvlt-Cloudsync-plugin-for-commvault-store.patch
74b1de
Patch0155: 0155-cloudsync-Make-readdirp-return-stat-info-of-all-the-.patch
74b1de
Patch0156: 0156-cloudsync-Fix-bug-in-cloudsync-fops-c.py.patch
74b1de
Patch0157: 0157-afr-frame-Destroy-frame-after-afr_selfheal_entry_gra.patch
74b1de
Patch0158: 0158-glusterfsd-cleanup-Protect-graph-object-under-a-lock.patch
74b1de
Patch0159: 0159-glusterd-add-an-op-version-check.patch
74b1de
Patch0160: 0160-geo-rep-Geo-rep-help-text-issue.patch
74b1de
Patch0161: 0161-geo-rep-Fix-rename-with-existing-destination-with-sa.patch
74b1de
Patch0162: 0162-geo-rep-Fix-sync-method-config.patch
74b1de
Patch0163: 0163-geo-rep-Fix-sync-hang-with-tarssh.patch
74b1de
Patch0164: 0164-cluster-ec-Fix-handling-of-heal-info-cases-without-l.patch
74b1de
Patch0165: 0165-tests-shd-Add-test-coverage-for-shd-mux.patch
74b1de
Patch0166: 0166-glusterd-svc-glusterd_svcs_stop-should-call-individu.patch
74b1de
Patch0167: 0167-glusterd-shd-Optimize-the-glustershd-manager-to-send.patch
74b1de
Patch0168: 0168-cluster-dht-Fix-directory-perms-during-selfheal.patch
74b1de
Patch0169: 0169-Build-Fix-spec-to-enable-rhel8-client-build.patch
74b1de
Patch0170: 0170-geo-rep-Convert-gfid-conflict-resolutiong-logs-into-.patch
74b1de
Patch0171: 0171-posix-add-storage.reserve-size-option.patch
74b1de
Patch0172: 0172-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
74b1de
Patch0173: 0173-glusterd-store-fips-mode-rchecksum-option-in-the-inf.patch
74b1de
Patch0174: 0174-xlator-log-Add-more-logging-in-xlator_is_cleanup_sta.patch
74b1de
Patch0175: 0175-ec-fini-Fix-race-between-xlator-cleanup-and-on-going.patch
74b1de
Patch0176: 0176-features-shard-Fix-crash-during-background-shard-del.patch
74b1de
Patch0177: 0177-features-shard-Fix-extra-unref-when-inode-object-is-.patch
74b1de
Patch0178: 0178-Cluster-afr-Don-t-treat-all-bricks-having-metadata-p.patch
74b1de
Patch0179: 0179-tests-Fix-split-brain-favorite-child-policy.t-failur.patch
74b1de
Patch0180: 0180-ganesha-scripts-Make-generate-epoch.py-python3-compa.patch
74b1de
Patch0181: 0181-afr-log-before-attempting-data-self-heal.patch
74b1de
Patch0182: 0182-geo-rep-fix-mountbroker-setup.patch
74b1de
Patch0183: 0183-glusterd-svc-Stop-stale-process-using-the-glusterd_p.patch
74b1de
Patch0184: 0184-tests-Add-gating-configuration-file-for-rhel8.patch
74b1de
Patch0185: 0185-gfapi-provide-an-api-for-setting-statedump-path.patch
74b1de
Patch0186: 0186-cli-Remove-brick-warning-seems-unnecessary.patch
74b1de
Patch0187: 0187-gfapi-statedump_path-add-proper-version-number.patch
74b1de
Patch0188: 0188-features-shard-Fix-integer-overflow-in-block-count-a.patch
74b1de
Patch0189: 0189-features-shard-Fix-block-count-accounting-upon-trunc.patch
74b1de
Patch0190: 0190-Build-removing-the-hardcoded-usage-of-python3.patch
74b1de
Patch0191: 0191-Build-Update-python-shebangs-based-on-version.patch
74b1de
Patch0192: 0192-build-Ensure-gluster-cli-package-is-built-as-part-of.patch
74b1de
Patch0193: 0193-spec-fixed-python-dependency-for-rhel6.patch
74b1de
Patch0194: 0194-stack-Make-sure-to-have-unique-call-stacks-in-all-ca.patch
74b1de
Patch0195: 0195-build-package-glusterfs-ganesha-for-rhel7-and-above.patch
74b1de
Patch0196: 0196-posix-ctime-Fix-ctime-upgrade-issue.patch
74b1de
Patch0197: 0197-posix-fix-crash-in-posix_cs_set_state.patch
74b1de
Patch0198: 0198-cluster-ec-Prevent-double-pre-op-xattrops.patch
74b1de
Patch0199: 0199-upcall-Avoid-sending-notifications-for-invalid-inode.patch
74b1de
Patch0200: 0200-gfapi-fix-incorrect-initialization-of-upcall-syncop-.patch
74b1de
Patch0201: 0201-geo-rep-Fix-permissions-for-GEOREP_DIR-in-non-root-s.patch
74b1de
Patch0202: 0202-shd-mux-Fix-race-between-mux_proc-unlink-and-stop.patch
74b1de
Patch0203: 0203-glusterd-shd-Change-shd-logfile-to-a-unique-name.patch
74b1de
Patch0204: 0204-glusterd-conditionally-clear-txn_opinfo-in-stage-op.patch
74b1de
Patch0205: 0205-glusterd-Can-t-run-rebalance-due-to-long-unix-socket.patch
74b1de
Patch0206: 0206-glusterd-ignore-user.-options-from-compatibility-che.patch
74b1de
Patch0207: 0207-glusterd-fix-use-after-free-of-a-dict_t.patch
74b1de
Patch0208: 0208-mem-pool-remove-dead-code.patch
74b1de
Patch0209: 0209-core-avoid-dynamic-TLS-allocation-when-possible.patch
74b1de
Patch0210: 0210-mem-pool.-c-h-minor-changes.patch
74b1de
Patch0211: 0211-libglusterfs-Fix-compilation-when-disable-mempool-is.patch
74b1de
Patch0212: 0212-core-fix-memory-allocation-issues.patch
74b1de
Patch0213: 0213-cluster-dht-Strip-out-dht-xattrs.patch
74b1de
Patch0214: 0214-geo-rep-Upgrading-config-file-to-new-version.patch
74b1de
Patch0215: 0215-posix-modify-storage.reserve-option-to-take-size-and.patch
74b1de
Patch0216: 0216-Test-case-fixe-for-downstream-3.5.0.patch
74b1de
Patch0217: 0217-uss-Fix-tar-issue-with-ctime-and-uss-enabled.patch
74b1de
Patch0218: 0218-graph-shd-Use-glusterfs_graph_deactivate-to-free-the.patch
74b1de
Patch0219: 0219-posix-add-posix_set_ctime-in-posix_ftruncate.patch
74b1de
Patch0220: 0220-graph-shd-Use-top-down-approach-while-cleaning-xlato.patch
74b1de
Patch0221: 0221-protocol-client-propagte-GF_EVENT_CHILD_PING-only-fo.patch
74b1de
Patch0222: 0222-cluster-dht-Fixed-a-memleak-in-dht_rename_cbk.patch
74b1de
Patch0223: 0223-change-get_real_filename-implementation-to-use-ENOAT.patch
74b1de
Patch0224: 0224-core-replace-inet_addr-with-inet_pton.patch
74b1de
Patch0225: 0225-tests-utils-Fix-py2-py3-util-python-scripts.patch
74b1de
Patch0226: 0226-geo-rep-fix-gluster-command-path-for-non-root-sessio.patch
74b1de
Patch0227: 0227-glusterd-svc-update-pid-of-mux-volumes-from-the-shd-.patch
74b1de
Patch0228: 0228-locks-enable-notify-contention-by-default.patch
74b1de
Patch0229: 0229-glusterd-Show-the-correct-brick-status-in-get-state.patch
74b1de
Patch0230: 0230-Revert-glusterd-svc-update-pid-of-mux-volumes-from-t.patch
74b1de
Patch0231: 0231-Revert-graph-shd-Use-top-down-approach-while-cleanin.patch
74b1de
Patch0232: 0232-cluster-afr-Fix-incorrect-reporting-of-gfid-type-mis.patch
74b1de
Patch0233: 0233-Revert-graph-shd-Use-glusterfs_graph_deactivate-to-f.patch
74b1de
Patch0234: 0234-Revert-glusterd-shd-Change-shd-logfile-to-a-unique-n.patch
74b1de
Patch0235: 0235-Revert-glusterd-svc-Stop-stale-process-using-the-glu.patch
74b1de
Patch0236: 0236-Revert-shd-mux-Fix-race-between-mux_proc-unlink-and-.patch
74b1de
Patch0237: 0237-Revert-ec-fini-Fix-race-between-xlator-cleanup-and-o.patch
74b1de
Patch0238: 0238-Revert-xlator-log-Add-more-logging-in-xlator_is_clea.patch
74b1de
Patch0239: 0239-Revert-ec-fini-Fix-race-with-ec_fini-and-ec_notify.patch
74b1de
Patch0240: 0240-Revert-glusterd-shd-Optimize-the-glustershd-manager-.patch
74b1de
Patch0241: 0241-Revert-glusterd-svc-glusterd_svcs_stop-should-call-i.patch
74b1de
Patch0242: 0242-Revert-tests-shd-Add-test-coverage-for-shd-mux.patch
74b1de
Patch0243: 0243-Revert-glusterfsd-cleanup-Protect-graph-object-under.patch
74b1de
Patch0244: 0244-Revert-ec-shd-Cleanup-self-heal-daemon-resources-dur.patch
74b1de
Patch0245: 0245-Revert-shd-glusterd-Serialize-shd-manager-to-prevent.patch
74b1de
Patch0246: 0246-Revert-glusterd-shd-Keep-a-ref-on-volinfo-until-atta.patch
74b1de
Patch0247: 0247-Revert-afr-shd-Cleanup-self-heal-daemon-resources-du.patch
74b1de
Patch0248: 0248-Revert-shd-mux-Fix-coverity-issues-introduced-by-shd.patch
74b1de
Patch0249: 0249-Revert-client-fini-return-fini-after-rpc-cleanup.patch
74b1de
Patch0250: 0250-Revert-mgmt-shd-Implement-multiplexing-in-self-heal-.patch
74b1de
Patch0251: 0251-tests-Fix-bug-1717819-metadata-split-brain-detection.patch
74b1de
Patch0252: 0252-glusterd-do-not-mark-skip_locking-as-true-for-geo-re.patch
74b1de
Patch0253: 0253-core-fix-deadlock-between-statedump-and-fd_anonymous.patch
74b1de
Patch0254: 0254-Detach-iot_worker-to-release-its-resources.patch
74b1de
Patch0255: 0255-Revert-tier-shd-glusterd-with-shd-mux-the-shd-volfil.patch
74b1de
Patch0256: 0256-features-snapview-server-use-the-same-volfile-server.patch
74b1de
Patch0257: 0257-geo-rep-Test-case-for-upgrading-config-file.patch
74b1de
Patch0258: 0258-geo-rep-Fix-mount-broker-setup-issue.patch
74b1de
Patch0259: 0259-gluster-block-tuning-perf-options.patch
74b1de
Patch0260: 0260-ctime-Set-mdata-xattr-on-legacy-files.patch
74b1de
Patch0261: 0261-features-utime-Fix-mem_put-crash.patch
74b1de
Patch0262: 0262-glusterd-ctime-Disable-ctime-by-default.patch
74b1de
Patch0263: 0263-tests-fix-ctime-related-tests.patch
74b1de
Patch0264: 0264-gfapi-Fix-deadlock-while-processing-upcall.patch
74b1de
Patch0265: 0265-fuse-add-missing-GF_FREE-to-fuse_interrupt.patch
74b1de
Patch0266: 0266-geo-rep-Fix-mount-broker-setup-issue.patch
74b1de
Patch0267: 0267-posix-ctime-Fix-race-during-lookup-ctime-xattr-heal.patch
74b1de
Patch0268: 0268-rpc-transport-have-default-listen-port.patch
74b1de
Patch0269: 0269-ec-fix-truncate-lock-to-cover-the-write-in-tuncate-c.patch
74b1de
Patch0270: 0270-cluster-ec-inherit-healing-from-lock-when-it-has-inf.patch
74b1de
Patch0271: 0271-cluster-ec-fix-EIO-error-for-concurrent-writes-on-sp.patch
74b1de
Patch0272: 0272-cluster-ec-Always-read-from-good-mask.patch
74b1de
Patch0273: 0273-cluster-ec-Fix-reopen-flags-to-avoid-misbehavior.patch
74b1de
Patch0274: 0274-cluster-ec-Update-lock-good_mask-on-parent-fop-failu.patch
74b1de
Patch0275: 0275-cluster-ec-Create-heal-task-with-heal-process-id.patch
74b1de
Patch0276: 0276-features-utime-always-update-ctime-at-setattr.patch
74b1de
Patch0277: 0277-geo-rep-Fix-Config-Get-Race.patch
74b1de
Patch0278: 0278-geo-rep-Fix-worker-connection-issue.patch
74b1de
Patch0279: 0279-posix-In-brick_mux-brick-is-crashed-while-start-stop.patch
74b1de
Patch0280: 0280-performance-md-cache-Do-not-skip-caching-of-null-cha.patch
74b1de
Patch0281: 0281-ctime-Fix-incorrect-realtime-passed-to-frame-root-ct.patch
74b1de
Patch0282: 0282-geo-rep-Fix-the-name-of-changelog-archive-file.patch
74b1de
Patch0283: 0283-ctime-Fix-ctime-issue-with-utime-family-of-syscalls.patch
74b1de
Patch0284: 0284-posix-log-aio_error-return-codes-in-posix_fs_health_.patch
74b1de
Patch0285: 0285-glusterd-glusterd-service-is-getting-timed-out-on-sc.patch
74b1de
Patch0286: 0286-glusterfs.spec.in-added-script-files-for-machine-com.patch
74b1de
Patch0287: 0287-cluster-ec-Fail-fsync-flush-for-files-on-update-size.patch
74b1de
Patch0288: 0288-cluster-ec-Fix-coverity-issues.patch
74b1de
Patch0289: 0289-cluster-ec-quorum-count-implementation.patch
74b1de
Patch0290: 0290-glusterd-tag-disperse.quorum-count-for-31306.patch
74b1de
Patch0291: 0291-cluster-ec-Mark-release-only-when-it-is-acquired.patch
74b1de
Patch0292: 0292-rpc-Update-address-family-if-it-is-not-provide-in-cm.patch
74b1de
Patch0293: 0293-glusterd-IPV6-hostname-address-is-not-parsed-correct.patch
74b1de
Patch0294: 0294-eventsapi-Set-IPv4-IPv6-family-based-on-input-IP.patch
74b1de
Patch0295: 0295-ctime-rebalance-Heal-ctime-xattr-on-directory-during.patch
74b1de
Patch0296: 0296-glusterfind-pre-command-failure-on-a-modify.patch
74b1de
Patch0297: 0297-rpmbuild-fixing-the-build-errors-with-2a905a8ae.patch
74b1de
Patch0298: 0298-geo-rep-fix-sub-command-during-worker-connection.patch
74b1de
Patch0299: 0299-geo-rep-performance-improvement-while-syncing-rename.patch
74b1de
Patch0300: 0300-cli-remove-the-warning-displayed-when-remove-brick-s.patch
74b1de
Patch0301: 0301-posix-Brick-is-going-down-unexpectedly.patch
74b1de
Patch0302: 0302-cluster-ec-prevent-filling-shd-log-with-table-not-fo.patch
74b1de
Patch0303: 0303-posix-heketidbstorage-bricks-go-down-during-PVC-crea.patch
74b1de
Patch0304: 0304-cluster-dht-Correct-fd-processing-loop.patch
74b1de
Patch0305: 0305-glusterd-rebalance-start-should-fail-when-quorum-is-.patch
74b1de
Patch0306: 0306-cli-fix-distCount-value.patch
74b1de
Patch0307: 0307-ssl-fix-RHEL8-regression-failure.patch
74b1de
Patch0308: 0308-dht-Rebalance-causing-IO-Error-File-descriptor-in-ba.patch
74b1de
Patch0309: 0309-geo-rep-Fix-config-upgrade-on-non-participating-node.patch
74b1de
Patch0310: 0310-tests-test-case-for-non-root-geo-rep-setup.patch
74b1de
Patch0311: 0311-geo-rep-Fix-Permission-denied-traceback-on-non-root-.patch
74b1de
Patch0312: 0312-Scripts-quota_fsck-script-KeyError-contri_size.patch
74b1de
Patch0313: 0313-extras-Cgroup-CPU-Mem-restriction-are-not-working-on.patch
74b1de
Patch0314: 0314-glusterd-tier-is_tier_enabled-inserted-causing-check.patch
74b1de
Patch0315: 0315-geo-rep-Fix-py2-py3-compatibility-in-repce.patch
74b1de
Patch0316: 0316-spec-fixed-python-prettytable-dependency-for-rhel6.patch
74b1de
Patch0317: 0317-Update-rfc.sh-to-rhgs-3.5.1.patch
74b1de
Patch0318: 0318-Update-rfc.sh-to-rhgs-3.5.1.patch
74b1de
Patch0319: 0319-features-snapview-server-obtain-the-list-of-snapshot.patch
74b1de
Patch0320: 0320-gf-event-Handle-unix-volfile-servers.patch
74b1de
Patch0321: 0321-Adding-white-spaces-to-description-of-set-group.patch
74b1de
Patch0322: 0322-glusterd-display-correct-rebalance-data-size-after-g.patch
74b1de
Patch0323: 0323-cli-display-detailed-rebalance-info.patch
74b1de
Patch0324: 0324-extras-hooks-Add-SELinux-label-on-new-bricks-during-.patch
74b1de
Patch0325: 0325-extras-hooks-Install-and-package-newly-added-post-ad.patch
74b1de
Patch0326: 0326-tests-subdir-mount.t-is-failing-for-brick_mux-regrss.patch
74b1de
Patch0327: 0327-glusterfind-integrate-with-gfid2path.patch
74b1de
Patch0328: 0328-glusterd-Add-warning-and-abort-in-case-of-failures-i.patch
74b1de
Patch0329: 0329-cluster-afr-Heal-entries-when-there-is-a-source-no-h.patch
74b1de
Patch0330: 0330-mount.glusterfs-change-the-error-message.patch
74b1de
Patch0331: 0331-features-locks-Do-special-handling-for-op-version-3..patch
74b1de
Patch0332: 0332-Removing-one-top-command-from-gluster-v-help.patch
74b1de
Patch0333: 0333-rpc-Synchronize-slot-allocation-code.patch
74b1de
Patch0334: 0334-dht-log-getxattr-failure-for-node-uuid-at-DEBUG.patch
74b1de
Patch0335: 0335-tests-RHEL8-test-failure-fixes-for-RHGS.patch
74b1de
Patch0336: 0336-spec-check-and-return-exit-code-in-rpm-scripts.patch
74b1de
Patch0337: 0337-fuse-Set-limit-on-invalidate-queue-size.patch
74b1de
Patch0338: 0338-glusterfs-fuse-Reduce-the-default-lru-limit-value.patch
74b1de
Patch0339: 0339-geo-rep-fix-integer-config-validation.patch
74b1de
Patch0340: 0340-rpc-event_slot_alloc-converted-infinite-loop-after-r.patch
74b1de
Patch0341: 0341-socket-fix-error-handling.patch
74b1de
Patch0342: 0342-Revert-hooks-remove-selinux-hooks.patch
74b1de
Patch0343: 0343-extras-hooks-syntactical-errors-in-SELinux-hooks-sci.patch
74b1de
Patch0344: 0344-Revert-all-fixes-to-include-SELinux-hook-scripts.patch
74b1de
Patch0345: 0345-read-ahead-io-cache-turn-off-by-default.patch
74b1de
Patch0346: 0346-fuse-degrade-logging-of-write-failure-to-fuse-device.patch
74b1de
Patch0347: 0347-tools-glusterfind-handle-offline-bricks.patch
74b1de
Patch0348: 0348-glusterfind-Fix-py2-py3-issues.patch
74b1de
Patch0349: 0349-glusterfind-python3-compatibility.patch
74b1de
Patch0350: 0350-tools-glusterfind-Remove-an-extra-argument.patch
74b1de
Patch0351: 0351-server-Mount-fails-after-reboot-1-3-gluster-nodes.patch
cead9d
Patch0352: 0352-spec-fixed-missing-dependencies-for-glusterfs-clouds.patch
cead9d
Patch0353: 0353-build-glusterfs-ganesha-pkg-requires-python3-policyc.patch
cead9d
Patch0354: 0354-core-fix-memory-pool-management-races.patch
cead9d
Patch0355: 0355-core-Prevent-crash-on-process-termination.patch
cead9d
Patch0356: 0356-Update-rfc.sh-to-rhgs-3.5.1-rhel-8.patch
cead9d
Patch0357: 0357-ganesha-ha-updates-for-pcs-0.10.x-i.e.-in-Fedora-29-.patch
cead9d
Patch0358: 0358-inode-fix-wrong-loop-count-in-__inode_ctx_free.patch
cead9d
Patch0359: 0359-dht-gf_defrag_process_dir-is-called-even-if-gf_defra.patch
cead9d
Patch0360: 0360-rpc-Make-ssl-log-more-useful.patch
cead9d
Patch0361: 0361-snap_scheduler-python3-compatibility-and-new-test-ca.patch
cead9d
Patch0362: 0362-write-behind-fix-data-corruption.patch
cead9d
Patch0363: 0363-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
cead9d
Patch0364: 0364-dht-fixing-rebalance-failures-for-files-with-holes.patch
cead9d
Patch0365: 0365-build-geo-rep-requires-relevant-selinux-permission-f.patch
cead9d
Patch0366: 0366-snapshot-fix-python3-issue-in-gcron.patch
cead9d
Patch0367: 0367-dht-Handle-setxattr-and-rm-race-for-directory-in-reb.patch
cead9d
Patch0368: 0368-Update-rfc.sh-to-rhgs-3.5.2.patch
cead9d
Patch0369: 0369-cluster-ec-Return-correct-error-code-and-log-message.patch
cead9d
Patch0370: 0370-dht-Do-opendir-selectively-in-gf_defrag_process_dir.patch
cead9d
Patch0371: 0371-common-ha-cluster-status-shows-FAILOVER-when-actuall.patch
cead9d
Patch0372: 0372-posix-fix-seek-functionality.patch
cead9d
Patch0373: 0373-build-geo-rep-sub-pkg-requires-policycoreutils-pytho.patch
cead9d
Patch0374: 0374-open-behind-fix-missing-fd-reference.patch
cead9d
Patch0375: 0375-features-shard-Send-correct-size-when-reads-are-sent.patch
cead9d
Patch0376: 0376-features-shard-Fix-crash-during-shards-cleanup-in-er.patch
cead9d
Patch0377: 0377-syncop-improve-scaling-and-implement-more-tools.patch
cead9d
Patch0378: 0378-Revert-open-behind-fix-missing-fd-reference.patch
cead9d
Patch0379: 0379-glusterd-add-missing-synccond_broadcast.patch
cead9d
Patch0380: 0380-features-shard-Aggregate-size-block-count-in-iatt-be.patch
cead9d
Patch0381: 0381-dht-add-null-check-in-gf_defrag_free_dir_dfmeta.patch
cead9d
Patch0382: 0382-features-shard-Aggregate-file-size-block-count-befor.patch
cead9d
Patch0383: 0383-common-ha-ganesha-ha.sh-bad-test-for-rhel-centos-for.patch
9ae3f9
Patch0384: 0384-Update-rfc.sh-to-rhgs-3.5.3.patch
9ae3f9
Patch0385: 0385-glusterd-start-glusterd-automatically-on-abnormal-sh.patch
9ae3f9
Patch0386: 0386-glusterd-increase-the-StartLimitBurst.patch
9ae3f9
Patch0387: 0387-To-fix-readdir-ahead-memory-leak.patch
9ae3f9
Patch0388: 0388-rpc-Cleanup-SSL-specific-data-at-the-time-of-freeing.patch
9ae3f9
Patch0389: 0389-posix-Avoid-diskpace-error-in-case-of-overwriting-th.patch
9ae3f9
Patch0390: 0390-glusterd-deafult-options-after-volume-reset.patch
9ae3f9
Patch0391: 0391-glusterd-unlink-the-file-after-killing-the-process.patch
9ae3f9
Patch0392: 0392-glusterd-Brick-process-fails-to-come-up-with-brickmu.patch
9ae3f9
Patch0393: 0393-afr-restore-timestamp-of-files-during-metadata-heal.patch
9ae3f9
Patch0394: 0394-man-gluster-Add-volume-top-command-to-gluster-man-pa.patch
9ae3f9
Patch0395: 0395-Cli-Removing-old-log-rotate-command.patch
9ae3f9
Patch0396: 0396-Updating-gluster-manual.patch
9ae3f9
Patch0397: 0397-mgmt-brick-mux-Avoid-sending-two-response-when-attac.patch
9ae3f9
Patch0398: 0398-ec-change-error-message-for-heal-commands-for-disper.patch
9ae3f9
Patch0399: 0399-glusterd-coverity-fixes.patch
9ae3f9
Patch0400: 0400-cli-throw-a-warning-if-replica-count-greater-than-3.patch
9ae3f9
Patch0401: 0401-cli-change-the-warning-message.patch
9ae3f9
Patch0402: 0402-afr-wake-up-index-healer-threads.patch
9ae3f9
Patch0403: 0403-Fix-spurious-failure-in-bug-1744548-heal-timeout.t.patch
9ae3f9
Patch0404: 0404-tests-Fix-spurious-failure.patch
9ae3f9
Patch0405: 0405-core-fix-return-of-local-in-__nlc_inode_ctx_get.patch
9ae3f9
Patch0406: 0406-afr-support-split-brain-CLI-for-replica-3.patch
9ae3f9
Patch0407: 0407-geo-rep-Improving-help-message-in-schedule_georep.py.patch
9ae3f9
Patch0408: 0408-geo-rep-Fix-ssh-port-validation.patch
9ae3f9
Patch0409: 0409-system-posix-acl-update-ctx-only-if-iatt-is-non-NULL.patch
9ae3f9
Patch0410: 0410-afr-prevent-spurious-entry-heals-leading-to-gfid-spl.patch
9ae3f9
Patch0411: 0411-tools-glusterfind-validate-session-name.patch
9ae3f9
Patch0412: 0412-gluster-smb-add-smb-parameter-when-access-gluster-by.patch
9ae3f9
Patch0413: 0413-extras-hooks-Remove-smb.conf-parameter-allowing-gues.patch
9ae3f9
Patch0414: 0414-cluster-syncop-avoid-duplicate-unlock-of-inodelk-ent.patch
9ae3f9
Patch0415: 0415-dht-Fix-stale-layout-and-create-issue.patch
9ae3f9
Patch0416: 0416-tests-fix-spurious-failure-of-bug-1402841.t-mt-dir-s.patch
9ae3f9
Patch0417: 0417-events-fix-IPv6-memory-corruption.patch
9ae3f9
Patch0418: 0418-md-cache-avoid-clearing-cache-when-not-necessary.patch
9ae3f9
Patch0419: 0419-cluster-afr-fix-race-when-bricks-come-up.patch
9ae3f9
Patch0420: 0420-scripts-quota_fsck-script-TypeError-d-format-not-dic.patch
9ae3f9
Patch0421: 0421-Improve-logging-in-EC-client-and-lock-translator.patch
9ae3f9
Patch0422: 0422-cluster-afr-Prioritize-ENOSPC-over-other-errors.patch
9ae3f9
Patch0423: 0423-ctime-Fix-ctime-inconsisteny-with-utimensat.patch
9ae3f9
Patch0424: 0424-afr-make-heal-info-lockless.patch
9ae3f9
Patch0425: 0425-tests-Fix-spurious-self-heald.t-failure.patch
9ae3f9
Patch0426: 0426-geo-rep-Fix-for-Transport-End-Point-not-connected-is.patch
9ae3f9
Patch0427: 0427-storage-posix-Fixing-a-coverity-issue.patch
9ae3f9
Patch0428: 0428-glusterd-ganesha-fixing-resource-leak-in-tear_down_c.patch
9ae3f9
Patch0429: 0429-dht-rebalance-fixing-failure-occurace-due-to-rebalan.patch
9ae3f9
Patch0430: 0430-Fix-some-Null-pointer-dereference-coverity-issues.patch
9ae3f9
Patch0431: 0431-glusterd-check-for-same-node-while-adding-bricks-in-.patch
9ae3f9
Patch0432: 0432-glusterd-Fix-coverity-defects-put-coverity-annotatio.patch
9ae3f9
Patch0433: 0433-socket-Resolve-ssl_ctx-leak-for-a-brick-while-only-m.patch
9ae3f9
Patch0434: 0434-glusterd-ganesha-fix-Coverity-CID-1405785.patch
9ae3f9
Patch0435: 0435-glusterd-coverity-fix.patch
9ae3f9
Patch0436: 0436-glusterd-coverity-fixes.patch
9ae3f9
Patch0437: 0437-glusterd-prevent-use-after-free-in-glusterd_op_ac_se.patch
9ae3f9
Patch0438: 0438-dht-sparse-files-rebalance-enhancements.patch
9ae3f9
Patch0439: 0439-cluster-afr-Delay-post-op-for-fsync.patch
9ae3f9
Patch0440: 0440-glusterd-snapshot-Improve-log-message-during-snapsho.patch
9ae3f9
Patch0441: 0441-fuse-occasional-logging-for-fuse-device-weird-write-.patch
9ae3f9
Patch0442: 0442-fuse-correctly-handle-setxattr-values.patch
9ae3f9
Patch0443: 0443-fuse-fix-high-sev-coverity-issue.patch
9ae3f9
Patch0444: 0444-mount-fuse-Fixing-a-coverity-issue.patch
9ae3f9
Patch0445: 0445-feature-changelog-Avoid-thread-creation-if-xlator-is.patch
9ae3f9
Patch0446: 0446-bitrot-Make-number-of-signer-threads-configurable.patch
9ae3f9
Patch0447: 0447-core-brick_mux-brick-crashed-when-creating-and-delet.patch
9ae3f9
Patch0448: 0448-Posix-Use-simple-approach-to-close-fd.patch
9ae3f9
Patch0449: 0449-test-Test-case-brick-mux-validation-in-cluster.t-is-.patch
9ae3f9
Patch0450: 0450-tests-basic-ctime-enable-ctime-before-testing.patch
9ae3f9
Patch0451: 0451-extras-Modify-group-virt-to-include-network-related-.patch
9ae3f9
Patch0452: 0452-Tier-DHT-Handle-the-pause-case-missed-out.patch
9ae3f9
Patch0453: 0453-glusterd-add-brick-command-failure.patch
9ae3f9
Patch0454: 0454-features-locks-avoid-use-after-freed-of-frame-for-bl.patch
9ae3f9
Patch0455: 0455-locks-prevent-deletion-of-locked-entries.patch
9ae3f9
Patch0456: 0456-add-clean-local-after-grant-lock.patch
9ae3f9
Patch0457: 0457-cluster-ec-Improve-detection-of-new-heals.patch
9ae3f9
Patch0458: 0458-features-bit-rot-stub-clean-the-mutex-after-cancelli.patch
9ae3f9
Patch0459: 0459-features-bit-rot-Unconditionally-sign-the-files-duri.patch
9ae3f9
Patch0460: 0460-cluster-ec-Remove-stale-entries-from-indices-xattrop.patch
9ae3f9
Patch0461: 0461-geo-replication-Fix-IPv6-parsing.patch
9ae3f9
Patch0462: 0462-Issue-with-gf_fill_iatt_for_dirent.patch
9ae3f9
Patch0463: 0463-cluster-ec-Change-handling-of-heal-failure-to-avoid-.patch
9ae3f9
Patch0464: 0464-storage-posix-Remove-nr_files-usage.patch
9ae3f9
Patch0465: 0465-posix-Implement-a-janitor-thread-to-close-fd.patch
9ae3f9
Patch0466: 0466-cluster-ec-Change-stale-index-handling.patch
9ae3f9
Patch0467: 0467-build-Added-dependency-for-glusterfs-selinux.patch
9ae3f9
Patch0468: 0468-build-Update-the-glusterfs-selinux-version.patch
9ae3f9
Patch0469: 0469-cluster-ec-Don-t-trigger-heal-for-stale-index.patch
9ae3f9
Patch0470: 0470-extras-snap_scheduler-changes-in-gluster-shared-stor.patch
9ae3f9
Patch0471: 0471-nfs-ganesha-gluster_shared_storage-fails-to-automoun.patch
9ae3f9
Patch0472: 0472-geo-rep-gluster_shared_storage-fails-to-automount-on.patch
9ae3f9
Patch0473: 0473-glusterd-Fix-Add-brick-with-increasing-replica-count.patch
9ae3f9
Patch0474: 0474-features-locks-posixlk-clear-lock-should-set-error-a.patch
9ae3f9
Patch0475: 0475-fuse-lock-interrupt-fix-flock_interrupt.t.patch
9ae3f9
Patch0476: 0476-mount-fuse-use-cookies-to-get-fuse-interrupt-record-.patch
9ae3f9
Patch0477: 0477-glusterd-snapshot-Snapshot-prevalidation-failure-not.patch
9ae3f9
Patch0478: 0478-DHT-Fixing-rebalance-failure-on-issuing-stop-command.patch
9ae3f9
Patch0479: 0479-ganesha-ha-revised-regex-exprs-for-status.patch
9ae3f9
Patch0480: 0480-DHT-Rebalance-Ensure-Rebalance-reports-status-only-o.patch
9ae3f9
Patch0481: 0481-RHGS-3.5.3-rebuild-to-ship-with-RHEL.patch
cb8e9e
e1d1f9
%description
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
e1d1f9
petabytes. It aggregates various storage bricks over Infiniband RDMA
e1d1f9
or TCP/IP interconnect into one large parallel network file
e1d1f9
system. GlusterFS is one of the most sophisticated file systems in
e1d1f9
terms of features and extensibility.  It borrows a powerful concept
e1d1f9
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e1d1f9
is in user space and easily manageable.
e1d1f9
e1d1f9
This package includes the glusterfs binary, the glusterfsd daemon and the
cb8e9e
libglusterfs and glusterfs translator modules common to both GlusterFS server
cb8e9e
and client framework.
e1d1f9
473043
%package api
cb8e9e
Summary:          GlusterFS api library
12a457
Requires:         %{name}%{?_isa} = %{version}-%{release}
12a457
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
887953
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e1d1f9
473043
%description api
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
e1d1f9
petabytes. It aggregates various storage bricks over Infiniband RDMA
e1d1f9
or TCP/IP interconnect into one large parallel network file
e1d1f9
system. GlusterFS is one of the most sophisticated file systems in
e1d1f9
terms of features and extensibility.  It borrows a powerful concept
e1d1f9
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e1d1f9
is in user space and easily manageable.
e1d1f9
473043
This package provides the glusterfs libgfapi library.
e1d1f9
473043
%package api-devel
473043
Summary:          Development Libraries
12a457
Requires:         %{name}%{?_isa} = %{version}-%{release}
12a457
Requires:         %{name}-devel%{?_isa} = %{version}-%{release}
cb8e9e
Requires:         libacl-devel
887953
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
e1d1f9
473043
%description api-devel
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
e1d1f9
petabytes. It aggregates various storage bricks over Infiniband RDMA
e1d1f9
or TCP/IP interconnect into one large parallel network file
e1d1f9
system. GlusterFS is one of the most sophisticated file systems in
e1d1f9
terms of features and extensibility.  It borrows a powerful concept
e1d1f9
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e1d1f9
is in user space and easily manageable.
e1d1f9
473043
This package provides the api include files.
e1d1f9
473043
%package cli
473043
Summary:          GlusterFS CLI
12a457
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e1d1f9
473043
%description cli
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
473043
petabytes. It aggregates various storage bricks over Infiniband RDMA
e1d1f9
or TCP/IP interconnect into one large parallel network file
473043
system. GlusterFS is one of the most sophisticated file systems in
e1d1f9
terms of features and extensibility.  It borrows a powerful concept
e1d1f9
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
473043
is in user space and easily manageable.
e1d1f9
473043
This package provides the GlusterFS CLI application and its man page
473043
74b1de
%package cloudsync-plugins
74b1de
Summary:          Cloudsync Plugins
74b1de
BuildRequires:    libcurl-devel
cead9d
Requires:         glusterfs-libs = %{version}-%{release}
74b1de
74b1de
%description cloudsync-plugins
74b1de
GlusterFS is a distributed file-system capable of scaling to several
74b1de
petabytes. It aggregates various storage bricks over Infiniband RDMA
74b1de
or TCP/IP interconnect into one large parallel network file
74b1de
system. GlusterFS is one of the most sophisticated file systems in
74b1de
terms of features and extensibility.  It borrows a powerful concept
74b1de
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
74b1de
is in user space and easily manageable.
74b1de
74b1de
This package provides cloudsync plugins for archival feature.
74b1de
473043
%package devel
473043
Summary:          Development Libraries
12a457
Requires:         %{name}%{?_isa} = %{version}-%{release}
473043
# Needed for the Glupy examples to work
473043
%if ( 0%{!?_without_extra_xlators:1} )
74b1de
Requires:         %{name}-extra-xlators%{?_isa} = %{version}-%{release}
e1d1f9
%endif
887953
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
74b1de
%if ( 0%{!?_without_server:1} )
887953
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
887953
%endif
473043
473043
%description devel
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
473043
petabytes. It aggregates various storage bricks over Infiniband RDMA
473043
or TCP/IP interconnect into one large parallel network file
473043
system. GlusterFS is one of the most sophisticated file systems in
473043
terms of features and extensibility.  It borrows a powerful concept
473043
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
473043
is in user space and easily manageable.
473043
473043
This package provides the development libraries and include files.
473043
473043
%if ( 0%{!?_without_extra_xlators:1} )
473043
%package extra-xlators
473043
Summary:          Extra Gluster filesystem Translators
cb8e9e
# We need python-gluster rpm for gluster module's __init__.py in Python
cb8e9e
# site-packages area
74b1de
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
74b1de
Requires:         python%{_pythonver}
473043
473043
%description extra-xlators
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
473043
petabytes. It aggregates various storage bricks over Infiniband RDMA
473043
or TCP/IP interconnect into one large parallel network file
473043
system. GlusterFS is one of the most sophisticated file systems in
473043
terms of features and extensibility.  It borrows a powerful concept
473043
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
473043
is in user space and easily manageable.
473043
473043
This package provides extra filesystem Translators, such as Glupy,
473043
for GlusterFS.
e1d1f9
%endif
e1d1f9
e1d1f9
%package fuse
e1d1f9
Summary:          Fuse client
e1d1f9
BuildRequires:    fuse-devel
cb8e9e
Requires:         attr
3604df
Requires:         psmisc
e1d1f9
12a457
Requires:         %{name}%{?_isa} = %{version}-%{release}
12a457
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
e1d1f9
e1d1f9
Obsoletes:        %{name}-client < %{version}-%{release}
e1d1f9
Provides:         %{name}-client = %{version}-%{release}
887953
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e1d1f9
e1d1f9
%description fuse
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
e1d1f9
petabytes. It aggregates various storage bricks over Infiniband RDMA
e1d1f9
or TCP/IP interconnect into one large parallel network file
e1d1f9
system. GlusterFS is one of the most sophisticated file systems in
e1d1f9
terms of features and extensibility.  It borrows a powerful concept
e1d1f9
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e1d1f9
is in user space and easily manageable.
e1d1f9
cb8e9e
This package provides support to FUSE based clients and inlcudes the
cb8e9e
glusterfs(d) binary.
cb8e9e
74b1de
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
cb8e9e
%package ganesha
cb8e9e
Summary:          NFS-Ganesha configuration
cb8e9e
Group:            Applications/File
cb8e9e
12a457
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
cead9d
Requires:         nfs-ganesha-selinux >= 2.7.3
74b1de
Requires:         nfs-ganesha-gluster >= 2.7.3
21ab4e
Requires:         pcs, dbus
12a457
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
12a457
Requires:         cman, pacemaker, corosync
12a457
%endif
74b1de
74b1de
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 )
74b1de
# we need portblock resource-agent in 3.9.5 and later.
74b1de
Requires:         resource-agents >= 3.9.5
74b1de
Requires:         net-tools
74b1de
%endif
74b1de
74b1de
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
cead9d
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
d1681e
Requires: selinux-policy >= 3.13.1-160
21ab4e
Requires(post):   policycoreutils-python
21ab4e
Requires(postun): policycoreutils-python
21ab4e
%else
21ab4e
Requires(post):   policycoreutils-python-utils
21ab4e
Requires(postun): policycoreutils-python-utils
21ab4e
%endif
21ab4e
%endif
d1681e
cb8e9e
%description ganesha
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
cb8e9e
petabytes. It aggregates various storage bricks over Infiniband RDMA
cb8e9e
or TCP/IP interconnect into one large parallel network file
cb8e9e
system. GlusterFS is one of the most sophisticated file systems in
cb8e9e
terms of features and extensibility.  It borrows a powerful concept
cb8e9e
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
cb8e9e
is in user space and easily manageable.
cb8e9e
cb8e9e
This package provides the configuration and related files for using
cb8e9e
NFS-Ganesha as the NFS server using GlusterFS
cb8e9e
%endif
e1d1f9
473043
%if ( 0%{!?_without_georeplication:1} )
473043
%package geo-replication
473043
Summary:          GlusterFS Geo-replication
12a457
Requires:         %{name}%{?_isa} = %{version}-%{release}
12a457
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
74b1de
Requires:         python%{_pythonver}
74b1de
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
d1681e
Requires:         python-prettytable
74b1de
%else
74b1de
Requires:         python%{_pythonver}-prettytable
d1681e
%endif
74b1de
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
74b1de
cb8e9e
Requires:         rsync
7c2869
Requires:         util-linux
887953
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
cead9d
# required for setting selinux bools
cead9d
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
cead9d
Requires(post):      policycoreutils-python-utils
cead9d
Requires(postun):    policycoreutils-python-utils
cead9d
Requires:            selinux-policy-targeted
cead9d
Requires(post):      selinux-policy-targeted
cead9d
BuildRequires:       selinux-policy-devel
cead9d
%endif
473043
473043
%description geo-replication
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
473043
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
473043
or TCP/IP interconnect into one large parallel network file
473043
system. GlusterFS is one of the most sophisticated file system in
473043
terms of features and extensibility.  It borrows a powerful concept
473043
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
473043
is in userspace and easily manageable.
473043
473043
This package provides support to geo-replication.
473043
%endif
e1d1f9
473043
%package libs
473043
Summary:          GlusterFS common libraries
473043
473043
%description libs
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
e1d1f9
petabytes. It aggregates various storage bricks over Infiniband RDMA
e1d1f9
or TCP/IP interconnect into one large parallel network file
e1d1f9
system. GlusterFS is one of the most sophisticated file systems in
e1d1f9
terms of features and extensibility.  It borrows a powerful concept
e1d1f9
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e1d1f9
is in user space and easily manageable.
e1d1f9
473043
This package provides the base GlusterFS libraries
e1d1f9
74b1de
%package -n python%{_pythonver}-gluster
d1681e
Summary:          GlusterFS python library
74b1de
Requires:         python%{_pythonver}
74b1de
%if ( ! %{_usepython3} )
74b1de
%{?python_provide:%python_provide python-gluster}
d1681e
Provides:         python-gluster = %{version}-%{release}
d1681e
Obsoletes:        python-gluster < 3.10
74b1de
%endif
d1681e
74b1de
%description -n python%{_pythonver}-gluster
74b1de
GlusterFS is a distributed file-system capable of scaling to several
74b1de
petabytes. It aggregates various storage bricks over Infiniband RDMA
74b1de
or TCP/IP interconnect into one large parallel network file
74b1de
system. GlusterFS is one of the most sophisticated file systems in
74b1de
terms of features and extensibility.  It borrows a powerful concept
74b1de
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
74b1de
is in user space and easily manageable.
74b1de
74b1de
This package contains the python modules of GlusterFS and own gluster
74b1de
namespace.
cb8e9e
473043
%if ( 0%{!?_without_rdma:1} )
473043
%package rdma
473043
Summary:          GlusterFS rdma support for ib-verbs
d1681e
%if ( 0%{?fedora} && 0%{?fedora} > 26 )
d1681e
BuildRequires:    rdma-core-devel
d1681e
%else
473043
BuildRequires:    libibverbs-devel
cb8e9e
BuildRequires:    librdmacm-devel >= 1.0.15
d1681e
%endif
12a457
Requires:         %{name}%{?_isa} = %{version}-%{release}
887953
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
e1d1f9
473043
%description rdma
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
e1d1f9
petabytes. It aggregates various storage bricks over Infiniband RDMA
e1d1f9
or TCP/IP interconnect into one large parallel network file
e1d1f9
system. GlusterFS is one of the most sophisticated file systems in
e1d1f9
terms of features and extensibility.  It borrows a powerful concept
e1d1f9
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e1d1f9
is in user space and easily manageable.
e1d1f9
473043
This package provides support to ib-verbs library.
473043
%endif
473043
473043
%if ( 0%{!?_without_regression_tests:1} )
473043
%package regression-tests
473043
Summary:          Development Tools
12a457
Requires:         %{name}%{?_isa} = %{version}-%{release}
12a457
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
12a457
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
cb8e9e
## thin provisioning support
cb8e9e
Requires:         lvm2 >= 2.02.89
cb8e9e
Requires:         perl(App::Prove) perl(Test::Harness) gcc util-linux-ng
74b1de
Requires:         python%{_pythonver}
74b1de
Requires:         attr dbench file git libacl-devel net-tools
d1681e
Requires:         nfs-utils xfsprogs yajl psmisc bc
473043
473043
%description regression-tests
473043
The Gluster Test Framework, is a suite of scripts used for
473043
regression testing of Gluster.
473043
%endif
e1d1f9
e1d1f9
%if ( 0%{!?_without_ocf:1} )
e1d1f9
%package resource-agents
e1d1f9
Summary:          OCF Resource Agents for GlusterFS
e1d1f9
License:          GPLv3+
e1d1f9
BuildArch:        noarch
e1d1f9
# this Group handling comes from the Fedora resource-agents package
e1d1f9
# for glusterd
3604df
Requires:         %{name}-server = %{version}-%{release}
e1d1f9
# depending on the distribution, we need pacemaker or resource-agents
e1d1f9
Requires:         %{_prefix}/lib/ocf/resource.d
e1d1f9
e1d1f9
%description resource-agents
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
e1d1f9
petabytes. It aggregates various storage bricks over Infiniband RDMA
e1d1f9
or TCP/IP interconnect into one large parallel network file
e1d1f9
system. GlusterFS is one of the most sophisticated file systems in
e1d1f9
terms of features and extensibility.  It borrows a powerful concept
e1d1f9
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e1d1f9
is in user space and easily manageable.
e1d1f9
e1d1f9
This package provides the resource agents which plug glusterd into
e1d1f9
Open Cluster Framework (OCF) compliant cluster resource managers,
e1d1f9
like Pacemaker.
e1d1f9
%endif
e1d1f9
74b1de
%if ( 0%{!?_without_server:1} )
473043
%package server
473043
Summary:          Clustered file-system server
12a457
Requires:         %{name}%{?_isa} = %{version}-%{release}
12a457
Requires:         %{name}-cli%{?_isa} = %{version}-%{release}
12a457
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
9ae3f9
%if ( 0%{?fedora} && 0%{?fedora} >= 30  || ( 0%{?rhel} && 0%{?rhel} >= 8 ) )
9ae3f9
Requires:         glusterfs-selinux >= 1.0-1
9ae3f9
%endif
cb8e9e
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
12a457
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
cb8e9e
# self-heal daemon, rebalance, nfs-server etc. are actually clients
12a457
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
12a457
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
d1681e
# lvm2 for snapshot, and nfs-utils and rpcbind/portmap for gnfs server
cb8e9e
Requires:         lvm2
cb8e9e
Requires:         nfs-utils
cb8e9e
%if ( 0%{?_with_systemd:1} )
3604df
%{?systemd_requires}
cb8e9e
%else
cb8e9e
Requires(post):   /sbin/chkconfig
cb8e9e
Requires(preun):  /sbin/service
cb8e9e
Requires(preun):  /sbin/chkconfig
cb8e9e
Requires(postun): /sbin/service
cb8e9e
%endif
d1681e
%if (0%{?_with_firewalld:1})
d1681e
# we install firewalld rules, so we need to have the directory owned
d1681e
%if ( 0%{!?rhel} )
d1681e
# not on RHEL because firewalld-filesystem appeared in 7.3
d1681e
# when EL7 rpm gets weak dependencies we can add a Suggests:
d1681e
Requires:         firewalld-filesystem
d1681e
%endif
d1681e
%endif
473043
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
473043
Requires:         rpcbind
473043
%else
473043
Requires:         portmap
473043
%endif
cb8e9e
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
cb8e9e
Requires:         python-argparse
cb8e9e
%endif
74b1de
%if ( 0%{?fedora} && 0%{?fedora} > 27 ) || ( 0%{?rhel} && 0%{?rhel} > 7 )
74b1de
Requires:         python%{_pythonver}-pyxattr
74b1de
%else
cb8e9e
Requires:         pyxattr
74b1de
%endif
d1681e
%if (0%{?_with_valgrind:1})
d1681e
Requires:         valgrind
d1681e
%endif
e1d1f9
473043
%description server
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
e1d1f9
petabytes. It aggregates various storage bricks over Infiniband RDMA
e1d1f9
or TCP/IP interconnect into one large parallel network file
e1d1f9
system. GlusterFS is one of the most sophisticated file systems in
e1d1f9
terms of features and extensibility.  It borrows a powerful concept
e1d1f9
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
e1d1f9
is in user space and easily manageable.
e1d1f9
473043
This package provides the glusterfs server daemon.
473043
%endif
e1d1f9
cb8e9e
%package client-xlators
cb8e9e
Summary:          GlusterFS client-side translators
887953
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
cb8e9e
cb8e9e
%description client-xlators
cb8e9e
GlusterFS is a distributed file-system capable of scaling to several
cb8e9e
petabytes. It aggregates various storage bricks over Infiniband RDMA
cb8e9e
or TCP/IP interconnect into one large parallel network file
cb8e9e
system. GlusterFS is one of the most sophisticated file systems in
cb8e9e
terms of features and extensibility.  It borrows a powerful concept
cb8e9e
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
cb8e9e
is in user space and easily manageable.
cb8e9e
cb8e9e
This package provides the translators needed on any GlusterFS client.
cb8e9e
3604df
%if ( 0%{!?_without_events:1} )
3604df
%package events
3604df
Summary:          GlusterFS Events
3604df
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
74b1de
Requires:         python%{_pythonver}
74b1de
Requires:         python%{_pythonver}-gluster = %{version}-%{release}
74b1de
%if ( 0%{?rhel} && 0%{?rhel} < 8 )
d1681e
Requires:         python-requests
d1681e
%else
74b1de
Requires:         python%{_pythonver}-requests
d1681e
%endif
d1681e
%if ( 0%{?rhel} && 0%{?rhel} < 7 )
74b1de
Requires:         python-prettytable
3604df
Requires:         python-argparse
74b1de
%else
74b1de
Requires:         python%{_pythonver}-prettytable
3604df
%endif
3604df
%if ( 0%{?_with_systemd:1} )
3604df
%{?systemd_requires}
3604df
%endif
3604df
3604df
%description events
3604df
GlusterFS Events
3604df
3604df
%endif
3604df
e1d1f9
%prep
e1d1f9
%setup -q -n %{name}-%{version}%{?prereltag}
3604df
3604df
# sanitization scriptlet for patches with file renames
3604df
ls %{_topdir}/SOURCES/*.patch | sort | \
3604df
while read p
3604df
do
3604df
    # if the destination file exists, its most probably stale
3604df
    # so we must remove it
3604df
    rename_to=( $(grep -i 'rename to' $p | cut -f 3 -d ' ') )
3604df
    if [ ${#rename_to[*]} -gt 0 ]; then
3604df
        for f in ${rename_to[*]}
3604df
        do
3604df
            if [ -f $f ]; then
3604df
                rm -f $f
3604df
            elif [ -d $f ]; then
3604df
                rm -rf $f
3604df
            fi
3604df
        done
3604df
    fi
21ab4e
21ab4e
    SOURCE_FILES=( $(egrep '^\-\-\- a/' $p | cut -f 2- -d '/') )
21ab4e
    DEST_FILES=( $(egrep '^\+\+\+ b/' $p | cut -f 2- -d '/') )
21ab4e
    EXCLUDE_DOCS=()
21ab4e
    for idx in ${!SOURCE_FILES[@]}; do
9ae3f9
        # skip the doc
21ab4e
        source_file=${SOURCE_FILES[$idx]}
21ab4e
        dest_file=${DEST_FILES[$idx]}
21ab4e
        if [[ "$dest_file" =~ ^doc/.+ ]]; then
21ab4e
            if [ "$source_file" != "dev/null" ] && [ ! -f "$dest_file" ]; then
21ab4e
                # if patch is being applied to a doc file and if the doc file
21ab4e
                # hasn't been added so far then we need to exclude it
21ab4e
                EXCLUDE_DOCS=( ${EXCLUDE_DOCS[*]} "$dest_file" )
21ab4e
            fi
21ab4e
        fi
21ab4e
    done
21ab4e
    EXCLUDE_DOCS_OPT=""
21ab4e
    for doc in ${EXCLUDE_DOCS}; do
21ab4e
        EXCLUDE_DOCS_OPT="--exclude=$doc $EXCLUDE_DOCS_OPT"
21ab4e
    done
887953
887953
    # HACK to fix build
887953
    bn=$(basename $p)
887953
    if [ "$bn" == "0085-Revert-all-remove-code-which-is-not-being-considered.patch" ]; then
887953
        (patch -p1 -u -F3 < $p || :)
887953
        if [ -f libglusterfs/Makefile.am.rej ]; then
887953
            sed -i -e 's/^SUBDIRS = src/SUBDIRS = src src\/gfdb/g;s/^CLEANFILES = /CLEANFILES =/g' libglusterfs/Makefile.am
887953
        fi
887953
    elif [ "$bn" == "0053-Revert-packaging-ganesha-remove-glusterfs-ganesha-su.patch" ]; then
887953
        (patch -p1 < $p || :)
887953
    elif [ "$bn" == "0055-Revert-storhaug-HA-first-step-remove-resource-agents.patch" ]; then
887953
        (patch -p1 < $p || :)
887953
    elif [ "$bn" == "0090-Revert-build-conditionally-build-legacy-gNFS-server-.patch" ]; then
887953
        (patch -p1 < $p || :)
887953
    elif [ "$bn" == "0117-spec-Remove-thin-arbiter-package.patch" ]; then
887953
        (patch -p1 < $p || :)
887953
    elif [ "$bn" == "0023-hooks-remove-selinux-hooks.patch" ]; then
887953
        (patch -p1 < $p || :)
887953
    elif [ "$bn" == "0042-spec-client-server-Builds-are-failing-on-rhel-6.patch" ]; then
887953
        (patch -p1 < $p || :)
887953
    else
887953
        # apply the patch with 'git apply'
887953
        git apply -p1 --exclude=rfc.sh \
887953
                      --exclude=.gitignore \
887953
                      --exclude=.testignore \
887953
                      --exclude=MAINTAINERS \
887953
                      --exclude=extras/checkpatch.pl \
887953
                      --exclude=build-aux/checkpatch.pl \
887953
                      --exclude='tests/*' \
887953
                      ${EXCLUDE_DOCS_OPT} \
887953
                      $p
887953
    fi
887953
3604df
done
3604df
74b1de
echo "fixing python shebangs..."
74b1de
%if ( %{_usepython3} )
74b1de
    for i in `find . -type f -exec bash -c "if file {} | grep 'Python script, ASCII text executable' >/dev/null; then echo {}; fi" ';'`; do
74b1de
        sed -i -e 's|^#!/usr/bin/python.*|#!%{__python3}|' -e 's|^#!/usr/bin/env python.*|#!%{__python3}|' $i
74b1de
    done
74b1de
%else
74b1de
    for f in api events extras geo-replication libglusterfs tools xlators; do
74b1de
        find $f -type f -exec sed -i 's|/usr/bin/python3|/usr/bin/python2|' {} \;
74b1de
    done
74b1de
%endif
e1d1f9
e1d1f9
%build
74b1de
cb8e9e
# In RHEL7 few hardening flags are available by default, however the RELRO
cb8e9e
# default behaviour is partial, convert to full
cb8e9e
%if ( 0%{?rhel} && 0%{?rhel} >= 7 )
cb8e9e
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
cb8e9e
export LDFLAGS
cb8e9e
%else
cb8e9e
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
cb8e9e
CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE"
cb8e9e
LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now"
cb8e9e
%else
cb8e9e
#It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and
cb8e9e
 # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does
12a457
CFLAGS="$CFLAGS $RPM_OPT_FLAGS"
cb8e9e
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
cb8e9e
%endif
cb8e9e
export CFLAGS
cb8e9e
export LDFLAGS
cb8e9e
%endif
cb8e9e
cb8e9e
./autogen.sh && %configure \
74b1de
        %{?_with_asan} \
cb8e9e
        %{?_with_cmocka} \
3604df
        %{?_with_debug} \
74b1de
        %{?_with_firewalld} \
cb8e9e
        %{?_with_tmpfilesdir} \
74b1de
        %{?_with_tsan} \
74b1de
        %{?_with_valgrind} \
473043
        %{?_without_epoll} \
74b1de
        %{?_without_events} \
473043
        %{?_without_fusermount} \
473043
        %{?_without_georeplication} \
473043
        %{?_without_ocf} \
cb8e9e
        %{?_without_rdma} \
74b1de
        %{?_without_server} \
cb8e9e
        %{?_without_syslog} \
3604df
        %{?_without_tiering} \
74b1de
        %{?_with_ipv6default} \
74b1de
        %{?_without_libtirpc}
e1d1f9
e1d1f9
# fix hardening and remove rpath in shlibs
e1d1f9
%if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
473043
sed -i 's| \\\$compiler_flags |&\\\$LDFLAGS |' libtool
e1d1f9
%endif
473043
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|' libtool
473043
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|' libtool
473043
473043
make %{?_smp_mflags}
473043
cb8e9e
%check
cb8e9e
make check
e1d1f9
e1d1f9
%install
473043
rm -rf %{buildroot}
473043
make install DESTDIR=%{buildroot}
74b1de
%if ( 0%{!?_without_server:1} )
e1d1f9
%if ( 0%{_for_fedora_koji_builds} )
473043
install -D -p -m 0644 %{SOURCE1} \
e1d1f9
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
473043
install -D -p -m 0644 %{SOURCE2} \
e1d1f9
    %{buildroot}%{_sysconfdir}/sysconfig/glusterfsd
e1d1f9
%else
473043
install -D -p -m 0644 extras/glusterd-sysconfig \
e1d1f9
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
e1d1f9
%endif
e1d1f9
%endif
e1d1f9
473043
mkdir -p %{buildroot}%{_localstatedir}/log/glusterd
473043
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfs
473043
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfsd
d1681e
mkdir -p %{buildroot}%{_rundir}/gluster
e1d1f9
e1d1f9
# Remove unwanted files from all the shared libraries
e1d1f9
find %{buildroot}%{_libdir} -name '*.a' -delete
e1d1f9
find %{buildroot}%{_libdir} -name '*.la' -delete
e1d1f9
473043
# Remove installed docs, the ones we want are included by %%doc, in
473043
# /usr/share/doc/glusterfs or /usr/share/doc/glusterfs-x.y.z depending
473043
# on the distribution
473043
%if ( 0%{?fedora} && 0%{?fedora} > 19 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
473043
rm -rf %{buildroot}%{_pkgdocdir}/*
473043
%else
473043
rm -rf %{buildroot}%{_defaultdocdir}/%{name}
473043
mkdir -p %{buildroot}%{_pkgdocdir}
473043
%endif
473043
head -50 ChangeLog > ChangeLog.head && mv ChangeLog.head ChangeLog
473043
cat << EOM >> ChangeLog
473043
473043
More commit messages for this ChangeLog can be found at
473043
https://forge.gluster.org/glusterfs-core/glusterfs/commits/v%{version}%{?prereltag}
473043
EOM
e1d1f9
e1d1f9
# Remove benchmarking and other unpackaged files
473043
# make install always puts these in %%{_defaultdocdir}/%%{name} so don't
473043
# use %%{_pkgdocdir}; that will be wrong on later Fedora distributions
473043
rm -rf %{buildroot}%{_defaultdocdir}/%{name}/benchmarking
473043
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs-mode.el
473043
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs.vim
e1d1f9
74b1de
%if ( 0%{!?_without_server:1} )
e1d1f9
# Create working directory
473043
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd
e1d1f9
e1d1f9
# Update configuration file to /var/lib working directory
e1d1f9
sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sharedstatedir}/glusterd|g' \
e1d1f9
    %{buildroot}%{_sysconfdir}/glusterfs/glusterd.vol
74b1de
%endif
e1d1f9
e1d1f9
# Install glusterfsd .service or init.d file
74b1de
%if ( 0%{!?_without_server:1} )
e1d1f9
%if ( 0%{_for_fedora_koji_builds} )
74b1de
%service_install glusterfsd %{glusterfsd_svcfile}
74b1de
%endif
e1d1f9
%endif
e1d1f9
473043
install -D -p -m 0644 extras/glusterfs-logrotate \
e1d1f9
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs
e1d1f9
d1681e
# ganesha ghosts
74b1de
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
d1681e
mkdir -p %{buildroot}%{_sysconfdir}/ganesha
d1681e
touch %{buildroot}%{_sysconfdir}/ganesha/ganesha-ha.conf
74b1de
mkdir -p %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/
d1681e
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
d1681e
touch %{buildroot}%{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
d1681e
%endif
d1681e
e1d1f9
%if ( 0%{!?_without_georeplication:1} )
473043
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
79e268
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
473043
install -D -p -m 0644 extras/glusterfs-georep-logrotate \
473043
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
473043
%endif
e1d1f9
74b1de
%if ( 0%{!?_without_server:1} )
cb8e9e
touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
cb8e9e
touch %{buildroot}%{_sharedstatedir}/glusterd/options
12a457
subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop)
3604df
for dir in ${subdirs[@]}; do
3604df
    mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/"$dir"/{pre,post}
e1d1f9
done
473043
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/glustershd
473043
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/peers
473043
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/vols
473043
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/nfs/run
12a457
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/bitd
12a457
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/quotad
12a457
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/scrub
12a457
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/snaps
12a457
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/ss_brick
473043
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/nfs-server.vol
473043
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/run/nfs.pid
74b1de
%endif
473043
473043
find ./tests ./run-tests.sh -type f | cpio -pd %{buildroot}%{_prefix}/share/glusterfs
cb8e9e
cb8e9e
## Install bash completion for cli
cb8e9e
install -p -m 0744 -D extras/command-completion/gluster.bash \
cb8e9e
    %{buildroot}%{_sysconfdir}/bash_completion.d/gluster
cb8e9e
74b1de
%if ( 0%{!?_without_server:1} )
74b1de
echo "RHGS 3.5" > %{buildroot}%{_datadir}/glusterfs/release
d1681e
%endif
d1681e
473043
%clean
473043
rm -rf %{buildroot}
473043
473043
##-----------------------------------------------------------------------------
12a457
## All %%post should be placed here and keep them sorted
473043
##
473043
%post
12a457
/sbin/ldconfig
e1d1f9
%if ( 0%{!?_without_syslog:1} )
473043
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
74b1de
%systemd_postun_with_restart rsyslog
473043
%endif
e1d1f9
%endif
12a457
exit 0
e1d1f9
473043
%post api
473043
/sbin/ldconfig
473043
3604df
%if ( 0%{!?_without_events:1} )
3604df
%post events
74b1de
%service_enable glustereventsd
3604df
%endif
3604df
74b1de
%if ( 0%{!?_without_server:1} )
d1681e
%if ( 0%{?fedora} && 0%{?fedora} > 25 || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
21ab4e
%post ganesha
21ab4e
semanage boolean -m ganesha_use_fusefs --on
21ab4e
exit 0
21ab4e
%endif
21ab4e
%endif
21ab4e
473043
%if ( 0%{!?_without_georeplication:1} )
473043
%post geo-replication
cead9d
%if ( 0%{?rhel} && 0%{?rhel} >= 8 )
cead9d
%selinux_set_booleans %{selinuxbooleans}
cead9d
%endif
473043
if [ $1 -ge 1 ]; then
74b1de
    %systemd_postun_with_restart glusterd
473043
fi
12a457
exit 0
473043
%endif
473043
473043
%post libs
473043
/sbin/ldconfig
473043
74b1de
%if ( 0%{!?_without_server:1} )
473043
%post server
473043
# Legacy server
74b1de
%service_enable glusterd
3604df
%if ( 0%{_for_fedora_koji_builds} )
74b1de
%service_enable glusterfsd
3604df
%endif
cb8e9e
# ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 .
cb8e9e
# While upgrading glusterfs-server package form GlusterFS version <= 3.6 to
cb8e9e
# GlusterFS version 3.7, ".cmd_log_history" should be renamed to
cb8e9e
# "cmd_history.log" to retain cli command history contents.
cb8e9e
if [ -f %{_localstatedir}/log/glusterfs/.cmd_log_history ]; then
cb8e9e
    mv %{_localstatedir}/log/glusterfs/.cmd_log_history \
cb8e9e
       %{_localstatedir}/log/glusterfs/cmd_history.log
cb8e9e
fi
473043
473043
# Genuine Fedora (and EPEL) builds never put gluster files in /etc; if
473043
# there are any files in /etc from a prior gluster.org install, move them
473043
# to /var/lib. (N.B. Starting with 3.3.0 all gluster files are in /var/lib
473043
# in gluster.org RPMs.) Be careful to copy them on the off chance that
473043
# /etc and /var/lib are on separate file systems
473043
if [ -d /etc/glusterd -a ! -h %{_sharedstatedir}/glusterd ]; then
473043
    mkdir -p %{_sharedstatedir}/glusterd
473043
    cp -a /etc/glusterd %{_sharedstatedir}/glusterd
473043
    rm -rf /etc/glusterd
473043
    ln -sf %{_sharedstatedir}/glusterd /etc/glusterd
473043
fi
473043
473043
# Rename old volfiles in an RPM-standard way.  These aren't actually
473043
# considered package config files, so %%config doesn't work for them.
473043
if [ -d %{_sharedstatedir}/glusterd/vols ]; then
473043
    for file in $(find %{_sharedstatedir}/glusterd/vols -name '*.vol'); do
473043
        newfile=${file}.rpmsave
473043
        echo "warning: ${file} saved as ${newfile}"
473043
        cp ${file} ${newfile}
473043
    done
473043
fi
473043
473043
# add marker translator
473043
# but first make certain that there are no old libs around to bite us
473043
# BZ 834847
473043
if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then
473043
    rm -f /etc/ld.so.conf.d/glusterfs.conf
473043
    /sbin/ldconfig
473043
fi
cb8e9e
cb8e9e
%if (0%{?_with_firewalld:1})
d1681e
    %firewalld_reload
cb8e9e
%endif
cb8e9e
21ab4e
%endif
cb8e9e
21ab4e
##-----------------------------------------------------------------------------
21ab4e
## All %%pre should be placed here and keep them sorted
21ab4e
##
21ab4e
%pre
21ab4e
getent group gluster > /dev/null || groupadd -r gluster
d1681e
getent passwd gluster > /dev/null || useradd -r -g gluster -d %{_rundir}/gluster -s /sbin/nologin -c "GlusterFS daemons" gluster
21ab4e
exit 0
cb8e9e
473043
##-----------------------------------------------------------------------------
12a457
## All %%preun should be placed here and keep them sorted
473043
##
3604df
%if ( 0%{!?_without_events:1} )
3604df
%preun events
3604df
if [ $1 -eq 0 ]; then
74b1de
    if [ -f %glustereventsd_svcfile ]; then
74b1de
        %service_stop glustereventsd
74b1de
        %systemd_preun glustereventsd
3604df
    fi
3604df
fi
3604df
exit 0
3604df
%endif
3604df
74b1de
%if ( 0%{!?_without_server:1} )
473043
%preun server
473043
if [ $1 -eq 0 ]; then
74b1de
    if [ -f %glusterfsd_svcfile ]; then
74b1de
        %service_stop glusterfsd
473043
    fi
74b1de
    %service_stop glusterd
74b1de
    if [ -f %glusterfsd_svcfile ]; then
74b1de
        %systemd_preun glusterfsd
473043
    fi
74b1de
    %systemd_preun glusterd
473043
fi
473043
if [ $1 -ge 1 ]; then
74b1de
    if [ -f %glusterfsd_svcfile ]; then
74b1de
        %systemd_postun_with_restart glusterfsd
473043
    fi
74b1de
    %systemd_postun_with_restart glusterd
473043
fi
74b1de
exit 0
473043
%endif
473043
473043
##-----------------------------------------------------------------------------
3604df
## All %%postun should be placed here and keep them sorted
473043
##
473043
%postun
473043
%if ( 0%{!?_without_syslog:1} )
e1d1f9
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
74b1de
%systemd_postun_with_restart rsyslog
e1d1f9
%endif
e1d1f9
%endif
e1d1f9
74b1de
%if ( 0%{!?_without_server:1} )
74b1de
%postun server
74b1de
%if (0%{?_with_firewalld:1})
74b1de
    %firewalld_reload
74b1de
%endif
74b1de
exit 0
74b1de
%endif
e1d1f9
74b1de
%if ( 0%{!?_without_server:1} )
d1681e
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
21ab4e
%postun ganesha
21ab4e
semanage boolean -m ganesha_use_fusefs --off
21ab4e
exit 0
21ab4e
%endif
21ab4e
%endif
21ab4e
473043
##-----------------------------------------------------------------------------
d1681e
## All %%trigger should be placed here and keep them sorted
d1681e
##
74b1de
%if ( 0%{!?_without_server:1} )
d1681e
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
d1681e
%trigger ganesha -- selinux-policy-targeted
d1681e
semanage boolean -m ganesha_use_fusefs --on
d1681e
exit 0
d1681e
%endif
d1681e
%endif
d1681e
d1681e
##-----------------------------------------------------------------------------
d1681e
## All %%triggerun should be placed here and keep them sorted
d1681e
##
74b1de
%if ( 0%{!?_without_server:1} )
74b1de
%if ( 0%{?fedora} && 0%{?fedora} > 25  || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
d1681e
%triggerun ganesha -- selinux-policy-targeted
d1681e
semanage boolean -m ganesha_use_fusefs --off
d1681e
exit 0
d1681e
%endif
d1681e
%endif
d1681e
d1681e
##-----------------------------------------------------------------------------
12a457
## All %%files should be placed here and keep them grouped
473043
##
473043
%files
74b1de
%doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS COMMITMENT
473043
%{_mandir}/man8/*gluster*.8*
74b1de
%if ( 0%{!?_without_server:1} )
473043
%exclude %{_mandir}/man8/gluster.8*
74b1de
%endif
473043
%dir %{_localstatedir}/log/glusterfs
473043
%if ( 0%{!?_without_rdma:1} )
473043
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
473043
%endif
74b1de
%if 0%{?!_without_server:1}
d1681e
%dir %{_datadir}/glusterfs
cb8e9e
%dir %{_datadir}/glusterfs/scripts
d1681e
     %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
d1681e
     %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
74b1de
%endif
74b1de
%{_datadir}/glusterfs/scripts/identify-hangs.sh
74b1de
%{_datadir}/glusterfs/scripts/collect-system-stats.sh
74b1de
%{_datadir}/glusterfs/scripts/log_accounting.sh
cb8e9e
# xlators that are needed on the client- and on the server-side
d1681e
%dir %{_libdir}/glusterfs
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}
cb8e9e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/auth
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/addr.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/login.so
cb8e9e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/socket.so
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
cb8e9e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/error-gen.so
74b1de
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/delay-gen.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/io-stats.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/sink.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/trace.so
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/access-control.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/barrier.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cdc.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changelog.so
74b1de
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/utime.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/gfid-access.so
74b1de
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/namespace.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/read-only.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so
74b1de
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cloudsync.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/md-cache.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/open-behind.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/quick-read.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/read-ahead.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/readdir-ahead.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/nl-cache.so
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so
d1681e
%dir %attr(0775,gluster,gluster) %{_rundir}/gluster
74b1de
%if 0%{?_tmpfilesdir:1} && 0%{!?_without_server:1}
12a457
%{_tmpfilesdir}/gluster.conf
12a457
%endif
74b1de
%if ( 0%{?_without_extra_xlators:1} )
74b1de
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
74b1de
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
74b1de
%endif
74b1de
%if ( 0%{?_without_regression_tests:1} )
74b1de
%exclude %{_datadir}/glusterfs/run-tests.sh
74b1de
%exclude %{_datadir}/glusterfs/tests
74b1de
%endif
74b1de
%if 0%{?_without_server:1}
d1681e
%if ( 0%{?_with_systemd:1} )
d1681e
%exclude %{_datadir}/glusterfs/scripts/control-cpu-load.sh
d1681e
%exclude %{_datadir}/glusterfs/scripts/control-mem.sh
d1681e
%endif
d1681e
%endif
473043
74b1de
%if ( 0%{?_without_server:1} || 0%{?rhel} < 7 )
74b1de
#exclude ganesha related files for rhel 6 and client builds
74b1de
%exclude %{_sysconfdir}/ganesha/ganesha-ha.conf.sample
74b1de
%exclude %{_libexecdir}/ganesha/*
74b1de
%exclude %{_prefix}/lib/ocf/resource.d/heartbeat/*
74b1de
%if ( 0%{!?_without_server:1} )
74b1de
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
74b1de
%endif
74b1de
%endif
74b1de
74b1de
%exclude %{_datadir}/glusterfs/scripts/setup-thin-arbiter.sh
74b1de
74b1de
%if ( 0%{?_without_server:1} )
74b1de
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
74b1de
%endif
74b1de
473043
%files api
473043
%exclude %{_libdir}/*.so
473043
# libgfapi files
473043
%{_libdir}/libgfapi.*
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so
473043
473043
%files api-devel
473043
%{_libdir}/pkgconfig/glusterfs-api.pc
473043
%{_libdir}/libgfapi.so
d1681e
%dir %{_includedir}/glusterfs
d1681e
%dir %{_includedir}/glusterfs/api
d1681e
     %{_includedir}/glusterfs/api/*
473043
473043
%files cli
473043
%{_sbindir}/gluster
473043
%{_mandir}/man8/gluster.8*
cb8e9e
%{_sysconfdir}/bash_completion.d/gluster
e1d1f9
74b1de
%files cloudsync-plugins
74b1de
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins
74b1de
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsyncs3.so
74b1de
     %{_libdir}/glusterfs/%{version}%{?prereltag}/cloudsync-plugins/cloudsynccvlt.so
74b1de
473043
%files devel
3604df
%dir %{_includedir}/glusterfs
d1681e
     %{_includedir}/glusterfs/*
473043
%exclude %{_includedir}/glusterfs/api
473043
%exclude %{_libdir}/libgfapi.so
473043
%{_libdir}/*.so
74b1de
%if ( 0%{?_without_server:1} )
cb8e9e
%exclude %{_libdir}/pkgconfig/libgfchangelog.pc
74b1de
%exclude %{_libdir}/libgfchangelog.so
74b1de
%if ( 0%{!?_without_tiering:1} )
74b1de
%exclude %{_libdir}/pkgconfig/libgfdb.pc
12a457
%endif
cb8e9e
%else
74b1de
%{_libdir}/pkgconfig/libgfchangelog.pc
74b1de
%if ( 0%{!?_without_tiering:1} )
74b1de
%{_libdir}/pkgconfig/libgfdb.pc
cb8e9e
%endif
cb8e9e
%endif
cb8e9e
cb8e9e
%files client-xlators
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so
473043
473043
%if ( 0%{!?_without_extra_xlators:1} )
473043
%files extra-xlators
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
74b1de
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground
74b1de
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/playground/template.so
473043
%endif
473043
473043
%files fuse
cb8e9e
# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
cb8e9e
%{_sbindir}/glusterfs
cb8e9e
%{_sbindir}/glusterfsd
cb8e9e
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so
473043
/sbin/mount.glusterfs
473043
%if ( 0%{!?_without_fusermount:1} )
473043
%{_bindir}/fusermount-glusterfs
473043
%endif
473043
473043
%if ( 0%{!?_without_georeplication:1} )
473043
%files geo-replication
cb8e9e
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep
12a457
12a457
%{_sbindir}/gfind_missing_files
3604df
%{_sbindir}/gluster-mountbroker
d1681e
%dir %{_libexecdir}/glusterfs
d1681e
%dir %{_libexecdir}/glusterfs/python
d1681e
%dir %{_libexecdir}/glusterfs/python/syncdaemon
d1681e
     %{_libexecdir}/glusterfs/gsyncd
d1681e
     %{_libexecdir}/glusterfs/python/syncdaemon/*
d1681e
     %{_libexecdir}/glusterfs/gverify.sh
d1681e
     %{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
d1681e
     %{_libexecdir}/glusterfs/peer_gsec_create
d1681e
     %{_libexecdir}/glusterfs/peer_mountbroker
d1681e
     %{_libexecdir}/glusterfs/peer_mountbroker.py*
d1681e
     %{_libexecdir}/glusterfs/gfind_missing_files
d1681e
     %{_libexecdir}/glusterfs/peer_georep-sshkey.py*
d1681e
%{_sbindir}/gluster-georep-sshkey
12a457
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication
12a457
%ghost      %attr(0644,-,-) %{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post
12a457
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/pre
12a457
d1681e
%dir %{_datadir}/glusterfs
d1681e
%dir %{_datadir}/glusterfs/scripts
d1681e
     %{_datadir}/glusterfs/scripts/get-gfid.sh
d1681e
     %{_datadir}/glusterfs/scripts/slave-upgrade.sh
d1681e
     %{_datadir}/glusterfs/scripts/gsync-upgrade.sh
d1681e
     %{_datadir}/glusterfs/scripts/generate-gfid-file.sh
d1681e
     %{_datadir}/glusterfs/scripts/gsync-sync-gfid
d1681e
     %{_datadir}/glusterfs/scripts/schedule_georep.py*
473043
%endif
473043
473043
%files libs
473043
%{_libdir}/*.so.*
473043
%exclude %{_libdir}/libgfapi.*
cb8e9e
%if ( 0%{!?_without_tiering:1} )
cb8e9e
# libgfdb is only needed server-side
cb8e9e
%exclude %{_libdir}/libgfdb.*
cb8e9e
%endif
cb8e9e
74b1de
%files -n python%{_pythonver}-gluster
cb8e9e
# introducing glusterfs module in site packages.
cb8e9e
# so that all other gluster submodules can reside in the same namespace.
74b1de
%if ( %{_usepython3} )
74b1de
%dir %{python3_sitelib}/gluster
74b1de
     %{python3_sitelib}/gluster/__init__.*
74b1de
     %{python3_sitelib}/gluster/__pycache__
74b1de
     %{python3_sitelib}/gluster/cliutils
74b1de
%else
d1681e
%dir %{python2_sitelib}/gluster
d1681e
     %{python2_sitelib}/gluster/__init__.*
d1681e
     %{python2_sitelib}/gluster/cliutils
74b1de
%endif
473043
473043
%if ( 0%{!?_without_rdma:1} )
473043
%files rdma
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
473043
%endif
473043
473043
%if ( 0%{!?_without_regression_tests:1} )
473043
%files regression-tests
d1681e
%dir %{_datadir}/glusterfs
d1681e
     %{_datadir}/glusterfs/run-tests.sh
d1681e
     %{_datadir}/glusterfs/tests
d1681e
%exclude %{_datadir}/glusterfs/tests/vagrant
473043
%endif
74b1de
74b1de
%if ( 0%{!?_without_server:1} && 0%{?rhel} > 6 )
74b1de
%files ganesha
74b1de
%dir %{_libexecdir}/ganesha
74b1de
%{_sysconfdir}/ganesha/ganesha-ha.conf.sample
74b1de
%{_libexecdir}/ganesha/*
74b1de
%{_prefix}/lib/ocf/resource.d/heartbeat/*
74b1de
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
74b1de
%ghost      %attr(0644,-,-) %config(noreplace) %{_sysconfdir}/ganesha/ganesha-ha.conf
74b1de
%ghost %dir %attr(0755,-,-) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha
74b1de
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha.conf
74b1de
%ghost      %attr(0644,-,-) %config(noreplace) %{_localstatedir}/run/gluster/shared_storage/nfs-ganesha/ganesha-ha.conf
cb8e9e
%endif
473043
473043
%if ( 0%{!?_without_ocf:1} )
473043
%files resource-agents
473043
# /usr/lib is the standard for OCF, also on x86_64
473043
%{_prefix}/lib/ocf/resource.d/glusterfs
473043
%endif
473043
74b1de
%if ( 0%{!?_without_server:1} )
473043
%files server
473043
%doc extras/clear_xattrs.sh
74b1de
%{_datadir}/glusterfs/scripts/xattr_analysis.py*
74b1de
%{_datadir}/glusterfs/scripts/quota_fsck.py*
12a457
# sysconf
473043
%config(noreplace) %{_sysconfdir}/glusterfs
74b1de
%exclude %{_sysconfdir}/glusterfs/thin-arbiter.vol
3604df
%exclude %{_sysconfdir}/glusterfs/eventsconfig.json
12a457
%config(noreplace) %{_sysconfdir}/sysconfig/glusterd
3604df
%if ( 0%{_for_fedora_koji_builds} )
3604df
%config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd
3604df
%endif
12a457
473043
# init files
74b1de
%glusterd_svcfile
473043
%if ( 0%{_for_fedora_koji_builds} )
74b1de
%glusterfsd_svcfile
473043
%endif
21ab4e
%if ( 0%{?_with_systemd:1} )
74b1de
%glusterfssharedstorage_svcfile
21ab4e
%endif
12a457
473043
# binaries
473043
%{_sbindir}/glusterd
473043
%{_sbindir}/glfsheal
21ab4e
%{_sbindir}/gf_attach
d1681e
%{_sbindir}/gluster-setgfid2path
cb8e9e
# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
cb8e9e
# symlink. The binary itself (and symlink) are part of the glusterfs-fuse
cb8e9e
# package, because glusterfs-server depends on that anyway.
d1681e
d1681e
# Manpages
d1681e
%{_mandir}/man8/gluster-setgfid2path.8*
d1681e
d1681e
# xlators
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
74b1de
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/sdfs.so
cb8e9e
%if ( 0%{!?_without_tiering:1} )
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
d1681e
     %{_libdir}/libgfdb.so.*
d1681e
%endif
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/selinux.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt/glusterd.so
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server.so
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage/posix.so
d1681e
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance
d1681e
     %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
cb8e9e
3604df
# snap_scheduler
cb8e9e
%{_sbindir}/snap_scheduler.py
cb8e9e
%{_sbindir}/gcron.py
3604df
%{_sbindir}/conf.py
cb8e9e
12a457
# /var/lib/glusterd, e.g. hookscripts, etc.
12a457
%ghost      %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
74b1de
%ghost      %attr(0600,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/options
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/bitd
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/groups
12a457
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/virt
3604df
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/metadata-cache
21ab4e
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/gluster-block
74b1de
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/nl-cache
7c2869
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/db-workload
887953
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/distributed-virt
887953
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/samba
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind/.keys
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glustershd
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post
12a457
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/disabled-quota-root-xattr-heal.sh
d1681e
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/S13create-subdir-mounts.sh
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre
d1681e
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
d1681e
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
d1681e
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/pre
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
3604df
                            %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
d1681e
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/post
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/pre
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post
12a457
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh
12a457
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/pre
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
12a457
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
12a457
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
12a457
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre
12a457
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
12a457
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
12a457
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
12a457
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/snaps
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/ss_brick
12a457
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/vols
cb8e9e
cb8e9e
# Extra utility script
d1681e
%dir %{_libexecdir}/glusterfs
d1681e
     %{_datadir}/glusterfs/release
d1681e
%dir %{_datadir}/glusterfs/scripts
d1681e
     %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
21ab4e
%if ( 0%{?_with_systemd:1} )
d1681e
     %{_libexecdir}/glusterfs/mount-shared-storage.sh
d1681e
     %{_datadir}/glusterfs/scripts/control-cpu-load.sh
d1681e
     %{_datadir}/glusterfs/scripts/control-mem.sh
21ab4e
%endif
cb8e9e
cb8e9e
# Incrementalapi
d1681e
     %{_libexecdir}/glusterfs/glusterfind
cb8e9e
%{_bindir}/glusterfind
d1681e
     %{_libexecdir}/glusterfs/peer_add_secret_pub
473043
cb8e9e
%if ( 0%{?_with_firewalld:1} )
3604df
%{_prefix}/lib/firewalld/services/glusterfs.xml
cb8e9e
%endif
74b1de
# end of server files
12a457
%endif
cb8e9e
74b1de
# Events
74b1de
%if ( 0%{!?_without_events:1} )
74b1de
%files events
74b1de
%config(noreplace) %{_sysconfdir}/glusterfs/eventsconfig.json
74b1de
%dir %{_sharedstatedir}/glusterd
74b1de
%dir %{_sharedstatedir}/glusterd/events
74b1de
%dir %{_libexecdir}/glusterfs
74b1de
     %{_libexecdir}/glusterfs/gfevents
74b1de
     %{_libexecdir}/glusterfs/peer_eventsapi.py*
74b1de
%{_sbindir}/glustereventsd
74b1de
%{_sbindir}/gluster-eventsapi
74b1de
%{_datadir}/glusterfs/scripts/eventsdash.py*
74b1de
%if ( 0%{?_with_systemd:1} )
74b1de
%{_unitdir}/glustereventsd.service
74b1de
%else
74b1de
%{_sysconfdir}/init.d/glustereventsd
74b1de
%endif
74b1de
%endif
cb8e9e
473043
##-----------------------------------------------------------------------------
473043
## All %pretrans should be placed here and keep them sorted
473043
##
74b1de
%if 0%{!?_without_server:1}
79e268
%pretrans -p <lua>
79e268
if not posix.access("/bin/bash", "x") then
79e268
    -- initial installation, no shell, no running glusterfsd
79e268
    return 0
79e268
end
79e268
79e268
-- TODO: move this completely to a lua script
79e268
-- For now, we write a temporary bash script and execute that.
79e268
79e268
script = [[#!/bin/sh
e1d1f9
pidof -c -o %PPID -x glusterfsd &>/dev/null
79e268
e1d1f9
if [ $? -eq 0 ]; then
79e268
   pushd . > /dev/null 2>&1
79e268
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
79e268
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
79e268
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
79e268
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
79e268
          echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
79e268
          echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
79e268
          exit 1;
79e268
       fi
79e268
   done
79e268
79e268
   popd > /dev/null 2>&1
79e268
   echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
79e268
   echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
79e268
   echo "WARNING: Refer upgrade section of install guide for more details"
79e268
   echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
79e268
   exit 1;
e1d1f9
fi
79e268
]]
79e268
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
79e268
end
e1d1f9
473043
473043
473043
%pretrans api -p <lua>
79e268
if not posix.access("/bin/bash", "x") then
79e268
    -- initial installation, no shell, no running glusterfsd
79e268
    return 0
79e268
end
79e268
79e268
-- TODO: move this completely to a lua script
79e268
-- For now, we write a temporary bash script and execute that.
79e268
79e268
script = [[#!/bin/sh
e1d1f9
pidof -c -o %PPID -x glusterfsd &>/dev/null
79e268
e1d1f9
if [ $? -eq 0 ]; then
79e268
   pushd . > /dev/null 2>&1
79e268
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
79e268
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
79e268
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
79e268
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
79e268
          exit 1;
79e268
       fi
79e268
   done
79e268
79e268
   popd > /dev/null 2>&1
79e268
   exit 1;
e1d1f9
fi
79e268
]]
79e268
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
79e268
end
e1d1f9
e1d1f9
e1d1f9
473043
%pretrans api-devel -p <lua>
79e268
if not posix.access("/bin/bash", "x") then
79e268
    -- initial installation, no shell, no running glusterfsd
79e268
    return 0
79e268
end
79e268
79e268
-- TODO: move this completely to a lua script
79e268
-- For now, we write a temporary bash script and execute that.
e1d1f9
79e268
script = [[#!/bin/sh
e1d1f9
pidof -c -o %PPID -x glusterfsd &>/dev/null
79e268
e1d1f9
if [ $? -eq 0 ]; then
79e268
   pushd . > /dev/null 2>&1
79e268
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
79e268
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
79e268
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
79e268
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
79e268
          exit 1;
79e268
       fi
79e268
   done
79e268
79e268
   popd > /dev/null 2>&1
79e268
   exit 1;
e1d1f9
fi
79e268
]]
79e268
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
79e268
end
e1d1f9
e1d1f9
473043
cb8e9e
%pretrans cli -p <lua>
79e268
if not posix.access("/bin/bash", "x") then
79e268
    -- initial installation, no shell, no running glusterfsd
79e268
    return 0
79e268
end
79e268
79e268
-- TODO: move this completely to a lua script
79e268
-- For now, we write a temporary bash script and execute that.
e1d1f9
79e268
script = [[#!/bin/sh
e1d1f9
pidof -c -o %PPID -x glusterfsd &>/dev/null
79e268
e1d1f9
if [ $? -eq 0 ]; then
79e268
   pushd . > /dev/null 2>&1
79e268
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
79e268
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
79e268
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
79e268
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
79e268
          exit 1;
79e268
       fi
79e268
   done
79e268
79e268
   popd > /dev/null 2>&1
79e268
   exit 1;
e1d1f9
fi
79e268
]]
79e268
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
79e268
end
e1d1f9
e1d1f9
cb8e9e
%pretrans client-xlators -p <lua>
79e268
if not posix.access("/bin/bash", "x") then
79e268
    -- initial installation, no shell, no running glusterfsd
79e268
    return 0
79e268
end
79e268
79e268
-- TODO: move this completely to a lua script
79e268
-- For now, we write a temporary bash script and execute that.
79e268
79e268
script = [[#!/bin/sh
79e268
pidof -c -o %PPID -x glusterfsd &>/dev/null
79e268
79e268
if [ $? -eq 0 ]; then
79e268
   pushd . > /dev/null 2>&1
79e268
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
79e268
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
79e268
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
79e268
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
79e268
          exit 1;
79e268
       fi
79e268
   done
79e268
79e268
   popd > /dev/null 2>&1
79e268
   exit 1;
79e268
fi
79e268
]]
79e268
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
79e268
end
e1d1f9
473043
cb8e9e
%pretrans fuse -p <lua>
79e268
if not posix.access("/bin/bash", "x") then
79e268
    -- initial installation, no shell, no running glusterfsd
79e268
    return 0
79e268
end
79e268
79e268
-- TODO: move this completely to a lua script
79e268
-- For now, we write a temporary bash script and execute that.
79e268
79e268
script = [[#!/bin/sh
e1d1f9
pidof -c -o %PPID -x glusterfsd &>/dev/null
79e268
e1d1f9
if [ $? -eq 0 ]; then
79e268
   pushd . > /dev/null 2>&1
79e268
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
79e268
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
79e268
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
79e268
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
79e268
          exit 1;
79e268
       fi
79e268
   done
79e268
79e268
   popd > /dev/null 2>&1
79e268
   exit 1;
e1d1f9
fi
79e268
]]
79e268
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
79e268
end
e1d1f9
e1d1f9
e1d1f9
cb8e9e
%if ( 0%{!?_without_georeplication:1} )
cb8e9e
%pretrans geo-replication -p <lua>
79e268
if not posix.access("/bin/bash", "x") then
79e268
    -- initial installation, no shell, no running glusterfsd
79e268
    return 0
79e268
end
79e268
79e268
-- TODO: move this completely to a lua script
79e268
-- For now, we write a temporary bash script and execute that.
79e268
79e268
script = [[#!/bin/sh
e1d1f9
pidof -c -o %PPID -x glusterfsd &>/dev/null
79e268
e1d1f9
if [ $? -eq 0 ]; then
79e268
   pushd . > /dev/null 2>&1
79e268
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
79e268
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
79e268
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
79e268
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
79e268
          exit 1;
79e268
       fi
79e268
   done
79e268
79e268
   popd > /dev/null 2>&1
79e268
   exit 1;
e1d1f9
fi
79e268
]]
79e268
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
79e268
end
79e268
%endif
e1d1f9
e1d1f9
473043
cb8e9e
%pretrans libs -p <lua>
79e268
if not posix.access("/bin/bash", "x") then
79e268
    -- initial installation, no shell, no running glusterfsd
79e268
    return 0
79e268
end
79e268
79e268
-- TODO: move this completely to a lua script
79e268
-- For now, we write a temporary bash script and execute that.
79e268
79e268
script = [[#!/bin/sh
79e268
pidof -c -o %PPID -x glusterfsd &>/dev/null
79e268
79e268
if [ $? -eq 0 ]; then
79e268
   pushd . > /dev/null 2>&1
79e268
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
79e268
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
79e268
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
79e268
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
79e268
          exit 1;
79e268
       fi
79e268
   done
79e268
79e268
   popd > /dev/null 2>&1
79e268
   exit 1;
79e268
fi
79e268
]]
79e268
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
79e268
end
79e268
e1d1f9
e1d1f9
21ab4e
%if ( 0%{!?_without_rdma:1} )
21ab4e
%pretrans rdma -p <lua>
cb8e9e
if not posix.access("/bin/bash", "x") then
cb8e9e
    -- initial installation, no shell, no running glusterfsd
cb8e9e
    return 0
cb8e9e
end
e1d1f9
cb8e9e
-- TODO: move this completely to a lua script
cb8e9e
-- For now, we write a temporary bash script and execute that.
e1d1f9
cb8e9e
script = [[#!/bin/sh
cb8e9e
pidof -c -o %PPID -x glusterfsd &>/dev/null
e1d1f9
cb8e9e
if [ $? -eq 0 ]; then
cb8e9e
   pushd . > /dev/null 2>&1
cb8e9e
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
          exit 1;
cb8e9e
       fi
cb8e9e
   done
cb8e9e
cb8e9e
   popd > /dev/null 2>&1
cb8e9e
   exit 1;
cb8e9e
fi
cb8e9e
]]
cb8e9e
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
cb8e9e
end
21ab4e
%endif
e1d1f9
e1d1f9
473043
21ab4e
%if ( 0%{!?_without_ocf:1} )
21ab4e
%pretrans resource-agents -p <lua>
cb8e9e
if not posix.access("/bin/bash", "x") then
cb8e9e
    -- initial installation, no shell, no running glusterfsd
cb8e9e
    return 0
cb8e9e
end
473043
cb8e9e
-- TODO: move this completely to a lua script
cb8e9e
-- For now, we write a temporary bash script and execute that.
473043
cb8e9e
script = [[#!/bin/sh
cb8e9e
pidof -c -o %PPID -x glusterfsd &>/dev/null
473043
cb8e9e
if [ $? -eq 0 ]; then
cb8e9e
   pushd . > /dev/null 2>&1
cb8e9e
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
          exit 1;
cb8e9e
       fi
cb8e9e
   done
473043
cb8e9e
   popd > /dev/null 2>&1
cb8e9e
   exit 1;
cb8e9e
fi
cb8e9e
]]
473043
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
cb8e9e
end
cb8e9e
%endif
473043
473043
473043
21ab4e
%pretrans server -p <lua>
cb8e9e
if not posix.access("/bin/bash", "x") then
cb8e9e
    -- initial installation, no shell, no running glusterfsd
cb8e9e
    return 0
cb8e9e
end
473043
cb8e9e
-- TODO: move this completely to a lua script
cb8e9e
-- For now, we write a temporary bash script and execute that.
473043
cb8e9e
script = [[#!/bin/sh
cb8e9e
pidof -c -o %PPID -x glusterfsd &>/dev/null
473043
cb8e9e
if [ $? -eq 0 ]; then
cb8e9e
   pushd . > /dev/null 2>&1
cb8e9e
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
cb8e9e
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
cb8e9e
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
cb8e9e
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
cb8e9e
          exit 1;
cb8e9e
       fi
cb8e9e
   done
473043
cb8e9e
   popd > /dev/null 2>&1
cb8e9e
   exit 1;
cb8e9e
fi
cb8e9e
]]
473043
887953
ok, how, val = os.execute(script)
74b1de
rc = val or ok
74b1de
if not (rc == 0) then
74b1de
   error("Detected running glusterfs processes", rc)
cb8e9e
end
473043
21ab4e
%posttrans server
21ab4e
pidof -c -o %PPID -x glusterd &> /dev/null
21ab4e
if [ $? -eq 0 ]; then
21ab4e
    kill -9 `pgrep -f gsyncd.py` &> /dev/null
473043
21ab4e
    killall --wait -SIGTERM glusterd &> /dev/null
473043
21ab4e
    if [ "$?" != "0" ]; then
21ab4e
        echo "killall failed while killing glusterd"
21ab4e
    fi
473043
21ab4e
    glusterd --xlator-option *.upgrade=on -N
473043
21ab4e
    #Cleaning leftover glusterd socket file which is created by glusterd in
21ab4e
    #rpm_script_t context.
21ab4e
    rm -rf /var/run/glusterd.socket
473043
21ab4e
    # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
21ab4e
    # so start it again
74b1de
    %service_start glusterd
21ab4e
else
21ab4e
    glusterd --xlator-option *.upgrade=on -N
473043
21ab4e
    #Cleaning leftover glusterd socket file which is created by glusterd in
21ab4e
    #rpm_script_t context.
21ab4e
    rm -rf /var/run/glusterd.socket
cb8e9e
fi
473043
cb8e9e
%endif
473043
d1681e
%changelog
06e5a4
* Tue Mar 16 2021 CentOS Sources <bugs@centos.org> - 6.0-49.1.el7.centos
06e5a4
- remove vendor and/or packager lines
06e5a4
9ae3f9
* Fri Feb 19 2021 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-49.1
9ae3f9
- fixes bugs bz#1930561
9ae3f9
9ae3f9
* Wed Nov 25 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-49
9ae3f9
- fixes bugs bz#1286171
9ae3f9
9ae3f9
* Tue Nov 10 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-48
9ae3f9
- fixes bugs bz#1895301
9ae3f9
9ae3f9
* Thu Nov 05 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-47
9ae3f9
- fixes bugs bz#1286171 bz#1821743 bz#1837926
9ae3f9
9ae3f9
* Wed Oct 21 2020 Gluster Jenkins <dkhandel+glusterjenkins@redhat.com> - 6.0-46
9ae3f9
- fixes bugs bz#1873469 bz#1881823
9ae3f9
9ae3f9
* Wed Sep 09 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-45
9ae3f9
- fixes bugs bz#1785714
9ae3f9
9ae3f9
* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-44
9ae3f9
- fixes bugs bz#1460657
9ae3f9
9ae3f9
* Thu Sep 03 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-43
9ae3f9
- fixes bugs bz#1460657
9ae3f9
9ae3f9
* Wed Sep 02 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-42
9ae3f9
- fixes bugs bz#1785714
9ae3f9
9ae3f9
* Tue Aug 25 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-41
9ae3f9
- fixes bugs bz#1785714 bz#1851424 bz#1851989 bz#1852736 bz#1853189 bz#1855966
9ae3f9
9ae3f9
* Tue Jul 21 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-40
9ae3f9
- fixes bugs bz#1812789 bz#1844359 bz#1847081 bz#1854165
9ae3f9
9ae3f9
* Wed Jun 17 2020 Deepshikha Khandelwal <dkhandel@redhat.com> - 6.0-39
9ae3f9
- fixes bugs bz#1844359 bz#1845064
9ae3f9
9ae3f9
* Wed Jun 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-38
9ae3f9
- fixes bugs bz#1234220 bz#1286171 bz#1487177 bz#1524457 bz#1640573 
9ae3f9
  bz#1663557 bz#1667954 bz#1683602 bz#1686897 bz#1721355 bz#1748865 bz#1750211 
9ae3f9
  bz#1754391 bz#1759875 bz#1761531 bz#1761932 bz#1763124 bz#1763129 bz#1764091 
9ae3f9
  bz#1775637 bz#1776901 bz#1781550 bz#1781649 bz#1781710 bz#1783232 bz#1784211 
9ae3f9
  bz#1784415 bz#1786516 bz#1786681 bz#1787294 bz#1787310 bz#1787331 bz#1787994 
9ae3f9
  bz#1790336 bz#1792873 bz#1794663 bz#1796814 bz#1804164 bz#1810924 bz#1815434 
9ae3f9
  bz#1836099 bz#1837467 bz#1837926 bz#1838479 bz#1839137 bz#1844359
b48e15
cead9d
* Fri May 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-37
cead9d
- fixes bugs bz#1840794
cead9d
cead9d
* Wed May 27 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-36
cead9d
- fixes bugs bz#1812789 bz#1823423
cead9d
cead9d
* Fri May 22 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-35
cead9d
- fixes bugs bz#1810516 bz#1830713 bz#1836233
cead9d
cead9d
* Sun May 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-34
cead9d
- fixes bugs bz#1802013 bz#1823706 bz#1825177 bz#1830713 bz#1831403 bz#1833017
cead9d
cead9d
* Wed Apr 29 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-33
cead9d
- fixes bugs bz#1812789 bz#1813917 bz#1823703 bz#1823706 bz#1825195
cead9d
cead9d
* Sat Apr 04 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-32
cead9d
- fixes bugs bz#1781543 bz#1812789 bz#1812824 bz#1817369 bz#1819059
cead9d
cead9d
* Tue Mar 17 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-31
cead9d
- fixes bugs bz#1802727
cead9d
cead9d
* Thu Feb 20 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30.1
cead9d
- fixes bugs bz#1800703
cead9d
cead9d
* Sat Feb 01 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-30
cead9d
- fixes bugs bz#1775564 bz#1794153
1b6eae
74b1de
* Thu Jan 23 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-29
74b1de
- fixes bugs bz#1793035
887953
74b1de
* Tue Jan 14 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-28
74b1de
- fixes bugs bz#1789447
887953
74b1de
* Mon Jan 13 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-27
74b1de
- fixes bugs bz#1789447
887953
74b1de
* Fri Jan 10 2020 Rinku Kothiya <rkothiya@redhat.com> - 6.0-26
74b1de
- fixes bugs bz#1763208 bz#1788656
887953
74b1de
* Mon Dec 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-25
74b1de
- fixes bugs bz#1686800 bz#1763208 bz#1779696 bz#1781444 bz#1782162
887953
74b1de
* Thu Nov 28 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-24
74b1de
- fixes bugs bz#1768786
887953
74b1de
* Thu Nov 21 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-23
74b1de
- fixes bugs bz#1344758 bz#1599802 bz#1685406 bz#1686800 bz#1724021 
74b1de
  bz#1726058 bz#1727755 bz#1731513 bz#1741193 bz#1758923 bz#1761326 bz#1761486 
74b1de
  bz#1762180 bz#1764095 bz#1766640
887953
74b1de
* Thu Nov 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-22
74b1de
- fixes bugs bz#1771524 bz#1771614
887953
74b1de
* Fri Oct 25 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-21
74b1de
- fixes bugs bz#1765555
887953
74b1de
* Wed Oct 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-20
74b1de
- fixes bugs bz#1719171 bz#1763412 bz#1764202
887953
74b1de
* Thu Oct 17 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-19
74b1de
- fixes bugs bz#1760939
887953
74b1de
* Wed Oct 16 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-18
74b1de
- fixes bugs bz#1758432
887953
74b1de
* Fri Oct 11 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-17
74b1de
- fixes bugs bz#1704562 bz#1758618 bz#1760261
887953
74b1de
* Wed Oct 09 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-16
74b1de
- fixes bugs bz#1752713 bz#1756325
887953
74b1de
* Fri Sep 27 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-15
74b1de
- fixes bugs bz#1726000 bz#1731826 bz#1754407 bz#1754790 bz#1755227
48e6c8
74b1de
* Fri Sep 20 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-14
74b1de
- fixes bugs bz#1719171 bz#1728673 bz#1731896 bz#1732443 bz#1733970 
74b1de
  bz#1745107 bz#1746027 bz#1748688 bz#1750241 bz#1572163
d1681e
74b1de
* Fri Aug 23 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-13
74b1de
- fixes bugs bz#1729915 bz#1732376 bz#1743611 bz#1743627 bz#1743634 bz#1744518
d1681e
74b1de
* Fri Aug 09 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-12
74b1de
- fixes bugs bz#1730914 bz#1731448 bz#1732770 bz#1732792 bz#1733531 
74b1de
  bz#1734305 bz#1734534 bz#1734734 bz#1735514 bz#1737705 bz#1732774
74b1de
  bz#1732793
d1681e
74b1de
* Tue Aug 06 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-11
74b1de
- fixes bugs bz#1733520 bz#1734423
d1681e
74b1de
* Fri Aug 02 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-10
74b1de
- fixes bugs bz#1713890
d1681e
74b1de
* Tue Jul 23 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-9
74b1de
- fixes bugs bz#1708064 bz#1708180 bz#1715422 bz#1720992 bz#1722757
d1681e
74b1de
* Tue Jul 16 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-8
74b1de
- fixes bugs bz#1698435 bz#1712591 bz#1715447 bz#1720488 bz#1722209
74b1de
  bz#1722512 bz#1724089 bz#1726991 bz#1727785 bz#1729108
d1681e
74b1de
* Fri Jun 28 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-7
74b1de
- fixes bugs bz#1573077 bz#1600918 bz#1703423 bz#1704207 bz#1708064
74b1de
  bz#1709301 bz#1713664 bz#1716760 bz#1717784 bz#1720163 bz#1720192
74b1de
  bz#1720551 bz#1721351 bz#1721357 bz#1721477 bz#1722131 bz#1722331
74b1de
  bz#1722509 bz#1722801 bz#1720248
d1681e
74b1de
* Fri Jun 14 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-6
74b1de
- fixes bugs bz#1668001 bz#1708043 bz#1708183 bz#1710701 
74b1de
  bz#1719640 bz#1720079 bz#1720248 bz#1720318 bz#1720461
d1681e
74b1de
* Tue Jun 11 2019 Sunil Kumar Acharya <sheggodu@redhat.com> - 6.0-5
74b1de
- fixes bugs bz#1573077 bz#1694595 bz#1703434 bz#1714536 bz#1714588 
74b1de
  bz#1715407 bz#1715438 bz#1705018
d1681e
74b1de
* Fri Jun 07 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-4
74b1de
- fixes bugs bz#1480907 bz#1702298 bz#1703455 bz#1704181 bz#1707246
74b1de
  bz#1708067 bz#1708116 bz#1708121 bz#1709087 bz#1711249 bz#1711296 
74b1de
  bz#1714078 bz#1714124 bz#1716385 bz#1716626 bz#1716821 bz#1716865 bz#1717927
d1681e
74b1de
* Tue May 14 2019 Rinku Kothiya <rkothiya@redhat.com> - 6.0-3
74b1de
- fixes bugs bz#1583585 bz#1671862 bz#1702686 bz#1703434 bz#1703753 
74b1de
  bz#1703897 bz#1704562 bz#1704769 bz#1704851 bz#1706683 bz#1706776 bz#1706893
d1681e
74b1de
* Thu Apr 25 2019 Milind Changire <mchangir@redhat.com> - 6.0-2
74b1de
- fixes bugs bz#1471742 bz#1652461 bz#1671862 bz#1676495 bz#1691620 
74b1de
  bz#1696334 bz#1696903 bz#1697820 bz#1698436 bz#1698728 bz#1699709 bz#1699835 
74b1de
  bz#1702240
d1681e
74b1de
* Mon Apr 08 2019 Milind Changire <mchangir@redhat.com> - 6.0-1
74b1de
- rebase to upstream glusterfs at v6.0
74b1de
- fixes bugs bz#1493284 bz#1578703 bz#1600918 bz#1670415 bz#1691620 
74b1de
  bz#1693935 bz#1695057
12a457