Blob Blame History Raw
%global _hardened_build 1

%global _for_fedora_koji_builds 0

# uncomment and add '%' to use the prereltag for pre-releases
# %%global prereltag qa3

##-----------------------------------------------------------------------------
## All argument definitions should be placed here and keep them sorted
##

# if you wish to compile an rpm with debugging...
# rpmbuild -ta glusterfs-3.8.4.tar.gz --with debug
%{?_with_debug:%global _with_debug --enable-debug}

# if you wish to compile an rpm with cmocka unit testing...
# rpmbuild -ta glusterfs-3.8.4.tar.gz --with cmocka
%{?_with_cmocka:%global _with_cmocka --enable-cmocka}

# if you wish to compile an rpm without rdma support, compile like this...
# rpmbuild -ta glusterfs-3.8.4.tar.gz --without rdma
%{?_without_rdma:%global _without_rdma --disable-ibverbs}

# No RDMA Support on s390(x)
%ifarch s390 s390x
%global _without_rdma --disable-ibverbs
%endif

# if you wish to compile an rpm without epoll...
# rpmbuild -ta glusterfs-3.8.4.tar.gz --without epoll
%{?_without_epoll:%global _without_epoll --disable-epoll}

# if you wish to compile an rpm without fusermount...
# rpmbuild -ta glusterfs-3.8.4.tar.gz --without fusermount
%{?_without_fusermount:%global _without_fusermount --disable-fusermount}

# if you wish to compile an rpm without geo-replication support, compile like this...
# rpmbuild -ta glusterfs-3.8.4.tar.gz --without georeplication
%{?_without_georeplication:%global _without_georeplication --disable-georeplication}

# Disable geo-replication on EL5, as its default Python is too old
%if ( 0%{?rhel} && 0%{?rhel} < 6 )
%global _without_georeplication --disable-georeplication
%endif

# if you wish to compile an rpm without the OCF resource agents...
# rpmbuild -ta glusterfs-3.8.4.tar.gz --without ocf
%{?_without_ocf:%global _without_ocf --without-ocf}

# if you wish to build rpms without syslog logging, compile like this
# rpmbuild -ta glusterfs-3.8.4.tar.gz --without syslog
%{?_without_syslog:%global _without_syslog --disable-syslog}

# disable syslog forcefully as rhel <= 6 doesn't have rsyslog or rsyslog-mmcount
# Fedora deprecated syslog, see
#  https://fedoraproject.org/wiki/Changes/NoDefaultSyslog
# (And what about RHEL7?)
%if ( 0%{?fedora} && 0%{?fedora} >= 20 ) || ( 0%{?rhel} && 0%{?rhel} <= 6 )
%global _without_syslog --disable-syslog
%endif

# if you wish to compile an rpm without the BD map support...
# rpmbuild -ta glusterfs-3.8.4.tar.gz --without bd
%{?_without_bd:%global _without_bd --disable-bd-xlator}

%if ( 0%{?rhel} && 0%{?rhel} < 6 || 0%{?sles_version} )
%global _without_bd --disable-bd-xlator
%endif

# Disable data-tiering on EL5, sqlite is too old
%if ( 0%{?rhel} && 0%{?rhel} < 6 )
%global _without_tiering --disable-tiering
%endif

# if you wish not to build server rpms, compile like this.
# rpmbuild -ta glusterfs-3.8.4.tar.gz --without server

%global _build_server 1
%if "%{?_without_server}"
%global _build_server 0
%endif

%if ( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" )
%global _build_server 1
%else
%global _build_server 0
%global _without_georeplication --disable-georeplication
%endif

%global _without_extra_xlators 1
%global _without_regression_tests 1

##-----------------------------------------------------------------------------
## All %%global definitions should be placed here and keep them sorted
##

%if ( 0%{?fedora} && 0%{?fedora} > 16 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
%global _with_systemd true
%endif

%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 7 )
%global _with_firewalld --enable-firewalld
%endif

%if 0%{?_tmpfilesdir:1}
%global _with_tmpfilesdir --with-tmpfilesdir=%{_tmpfilesdir}
%else
%global _with_tmpfilesdir --without-tmpfilesdir
%endif

# Eventing
%if 0%{?_build_server}
%if ( 0%{?rhel} && 0%{?rhel} < 6 )
%global _without_events --disable-events
%endif
%endif

# From https://fedoraproject.org/wiki/Packaging:Python#Macros
%if ( 0%{?rhel} && 0%{?rhel} <= 5 )
%{!?python_sitelib: %global python_sitelib %(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
%{!?python_sitearch: %global python_sitearch %(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
%endif

%if ( 0%{?_with_systemd:1} )
%global _init_enable()  /bin/systemctl enable %1.service ;
%global _init_disable() /bin/systemctl disable %1.service ;
%global _init_restart() /bin/systemctl try-restart %1.service ;
%global _init_start()   /bin/systemctl start %1.service ;
%global _init_stop()    /bin/systemctl stop %1.service ;
%global _init_install() install -D -p -m 0644 %1 %{buildroot}%{_unitdir}/%2.service ;
# can't seem to make a generic macro that works
%global _init_glusterd   %{_unitdir}/glusterd.service
%global _init_glusterfsd %{_unitdir}/glusterfsd.service
%global _init_glustereventsd %{_unitdir}/glustereventsd.service
%global _init_glusterfssharedstorage %{_unitdir}/glusterfssharedstorage.service
%else
%global _init_enable()  /sbin/chkconfig --add %1 ;
%global _init_disable() /sbin/chkconfig --del %1 ;
%global _init_restart() /sbin/service %1 condrestart &>/dev/null ;
%global _init_start()   /sbin/service %1 start &>/dev/null ;
%global _init_stop()    /sbin/service %1 stop &>/dev/null ;
%global _init_install() install -D -p -m 0755 %1 %{buildroot}%{_sysconfdir}/init.d/%2 ;
# can't seem to make a generic macro that works
%global _init_glusterd   %{_sysconfdir}/init.d/glusterd
%global _init_glusterfsd %{_sysconfdir}/init.d/glusterfsd
%global _init_glustereventsd %{_sysconfdir}/init.d/glustereventsd
%endif

%if ( 0%{_for_fedora_koji_builds} )
%if ( 0%{?_with_systemd:1} )
%global glusterfsd_service glusterfsd.service
%else
%global glusterfsd_service glusterfsd.init
%endif
%endif

%{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}}

%if ( 0%{?rhel} && 0%{?rhel} < 6 )
# _sharedstatedir is not provided by RHEL5
%global _sharedstatedir /var/lib
%endif

# We do not want to generate useless provides and requires for xlator
# .so files to be set for glusterfs packages.
# Filter all generated:
#
# TODO: RHEL5 does not have a convenient solution
%if ( 0%{?rhel} == 6 )
# filter_setup exists in RHEL6 only
%filter_provides_in %{_libdir}/glusterfs/%{version}/
%global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$'
%filter_setup
%else
# modern rpm and current Fedora do not generate requires when the
# provides are filtered
%global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$
%endif


##-----------------------------------------------------------------------------
## All package definitions should be placed here in alphabetical order
##
Summary:          Distributed File System
%if ( 0%{_for_fedora_koji_builds} )
Name:             glusterfs
Version:          3.8.0
Release:          0.1%{?prereltag:.%{prereltag}}%{?dist}
#Vendor removed
%else
Name:             glusterfs
Version:          3.8.4
Release:          53%{?dist}
%endif
License:          GPLv2 or LGPLv3+
Group:            System Environment/Base
URL:              http://www.gluster.org/docs/index.php/GlusterFS
%if ( 0%{_for_fedora_koji_builds} )
Source0:          http://bits.gluster.org/pub/gluster/glusterfs/src/glusterfs-%{version}%{?prereltag}.tar.gz
Source1:          glusterd.sysconfig
Source2:          glusterfsd.sysconfig
Source6:          rhel5-load-fuse-modules
Source7:          glusterfsd.service
Source8:          glusterfsd.init
%else
Source0:          glusterfs-3.8.4.tar.gz
%endif

BuildRoot:        %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)

Requires(pre):    shadow-utils
%if ( 0%{?rhel} && 0%{?rhel} <= 5 )
BuildRequires:    python-simplejson
%endif
%if ( 0%{?_with_systemd:1} )
BuildRequires:    systemd
%endif

Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
%if ( 0%{?_with_systemd:1} )
%{?systemd_requires}
%endif
BuildRequires:    git
BuildRequires:    bison flex
BuildRequires:    gcc make libtool
BuildRequires:    ncurses-devel readline-devel
BuildRequires:    libxml2-devel openssl-devel
BuildRequires:    libaio-devel libacl-devel
BuildRequires:    python-devel
BuildRequires:    python-ctypes
BuildRequires:    userspace-rcu-devel >= 0.7
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
BuildRequires:    automake
%endif
%if ( 0%{?rhel} && 0%{?rhel} <= 5 )
BuildRequires:    e2fsprogs-devel
%else
BuildRequires:    libuuid-devel
%endif
%if ( 0%{?_with_cmocka:1} )
BuildRequires:    libcmocka-devel >= 1.0.1
%endif
%if ( 0%{!?_without_tiering:1} )
BuildRequires:    sqlite-devel
%endif
%if ( 0%{!?_without_bd:1} )
BuildRequires:    lvm2-devel
%endif
%if ( 0%{!?_without_georeplication:1} )
BuildRequires:    libattr-devel
%endif

%if (0%{?_with_firewalld:1})
BuildRequires:    firewalld
%endif

Obsoletes:        hekafs
Obsoletes:        %{name}-common < %{version}-%{release}
Obsoletes:        %{name}-core < %{version}-%{release}
Obsoletes:        %{name}-ufo
Provides:         %{name}-common = %{version}-%{release}
Provides:         %{name}-core = %{version}-%{release}

# Patch0001: 0001-Update-rfc.sh-to-rhgs-3.2.0.patch
Patch0002: 0002-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch
Patch0003: 0003-tier-ctr-sql-Dafault-values-for-sql-cache-and-wal-si.patch
Patch0004: 0004-rpc-set-bind-insecure-to-off-by-default.patch
Patch0005: 0005-glusterd-spec-fixing-autogen-issue.patch
Patch0006: 0006-libglusterfs-glusterd-Fix-compilation-errors.patch
Patch0007: 0007-build-remove-ghost-directory-entries.patch
Patch0008: 0008-build-add-RHGS-specific-changes.patch
Patch0009: 0009-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch
Patch0010: 0010-build-packaging-corrections-for-RHEL-5.patch
Patch0011: 0011-build-introduce-security-hardening-flags-in-gluster.patch
Patch0012: 0012-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch
Patch0013: 0013-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch
Patch0014: 0014-build-add-pretrans-check.patch
Patch0015: 0015-build-exclude-libgfdb.pc-conditionally.patch
Patch0016: 0016-build-exclude-glusterfs.xml-on-rhel-7-client-build.patch
Patch0017: 0017-glusterd-fix-info-file-checksum-mismatch-during-upgr.patch
Patch0018: 0018-build-spec-file-conflict-resolution.patch
Patch0019: 0019-build-dependency-error-during-upgrade.patch
Patch0020: 0020-Revert-gfapi-upcall-Use-GF_CALLOC-while-allocating-v.patch
Patch0021: 0021-logging-Change-log-file-name-for-glusterd.patch
Patch0022: 0022-glusterd-search-for-free-port-from-base_port.patch
Patch0023: 0023-glusterd-clean-up-old-port-and-allocate-new-one-on-e.patch
Patch0024: 0024-glusterd-Improve-mountbroker-logs.patch
Patch0025: 0025-glusterd-Fix-msgid-in-mountbroker-logs.patch
Patch0026: 0026-mgmt-glusterd-Enable-client-io-threads-by-default.patch
Patch0027: 0027-feature-bitrot-Ondemand-scrub-option-for-bitrot.patch
Patch0028: 0028-glusterd-cli-cli-to-get-local-state-representation-f.patch
Patch0029: 0029-eventsapi-Gluster-Eventing-Feature-implementation.patch
Patch0030: 0030-eventsapi-Fix-make-install-issue-second-time.patch
Patch0031: 0031-eventsapi-Volume-Set-and-Reset-Events.patch
Patch0032: 0032-eventsapi-Auto-generate-header-files-during-make.patch
Patch0033: 0033-eventsapi-Geo-replication-User-driven-events.patch
Patch0034: 0034-eventsapi-Python-library-for-gf_event.patch
Patch0035: 0035-events-move-conditional-macro-check-USE_EVENTS-insid.patch
Patch0036: 0036-eventsapi-Fix-disable-events-issue.patch
Patch0037: 0037-extras-kill-processes-gracefully.patch
Patch0038: 0038-extras-kill-gsyncd-before-glusterfsd.patch
Patch0039: 0039-build-fix-eventtypes.h-generation.patch
Patch0040: 0040-packaging-eventsapi-Make-Python-site-packages-variab.patch
Patch0041: 0041-eventsapi-Add-support-for-Client-side-Events.patch
Patch0042: 0042-eventsapi-Fix-Volume-Stop-and-delete-prompt-issue.patch
Patch0043: 0043-features-ganesha-remove-ganesha-xlator-from-client-g.patch
Patch0044: 0044-eventsapi-Add-Init-scripts-for-different-distributio.patch
Patch0045: 0045-eventsapi-Add-conditional-import-for-requests-librar.patch
Patch0046: 0046-eventsapi-packaging-Fix-conflict-during-rpm-install.patch
Patch0047: 0047-eventsapi-Bitrot-events.patch
Patch0048: 0048-glusterd-Add-async-events.patch
Patch0049: 0049-glusterd-add-async-events-part-2.patch
Patch0050: 0050-quota-cli-add-user-driven-quota-events.patch
Patch0051: 0051-quota-add-quota-events.patch
Patch0052: 0052-eventsapi-declare-all-the-identified-events-at-one-g.patch
Patch0053: 0053-protocol-server-generate-events.patch
Patch0054: 0054-extras-cliutils-Utils-for-creating-CLI-tools-for-Glu.patch
Patch0055: 0055-glusterd-ganesha-Move-ganesha-ha.conf-and-ganesha.co.patch
Patch0056: 0056-ganesha-scripts-remove-HA_VOL_SERVER-from-the-code.patch
Patch0057: 0057-afr-add-replication-events.patch
Patch0058: 0058-glusterd-Introduce-reset-brick.patch
Patch0059: 0059-Revert-glusterd-ganesha-copy-ganesha-export-configur.patch
Patch0060: 0060-ganesha-scripts-Modifying-ganesha-ha.sh-for-share-st.patch
Patch0061: 0061-glusterd-ganesha-create-export-configuration-file-in.patch
Patch0062: 0062-event-fix-gf_event-messages-for-replace-reset-brick-.patch
Patch0063: 0063-cluster-ec-Add-events-for-EC-translator.patch
Patch0064: 0064-storage-posix-Integrate-important-events-with-gf_eve.patch
Patch0065: 0065-cluster-tier-add-tiering-events.patch
Patch0066: 0066-xlators-ganesha-Remove-the-ganesha-xlator-code-entir.patch
Patch0067: 0067-snapshot-eventsapi-Integrate-snapshot-events-with-ev.patch
Patch0068: 0068-ganesha-glusterd-Correct-the-path-for-ganesha-conf-d.patch
Patch0069: 0069-events-dht-dht-cli-events.patch
Patch0070: 0070-dht-events-Added-rebalance-events.patch
Patch0071: 0071-geo-rep-Use-configured-log_level-for-libgfchangelog-.patch
Patch0072: 0072-cluster-dht-heal-root-permission-post-add-brick.patch
Patch0073: 0073-geo-rep-add-geo-rep-events-for-server-side-changes.patch
Patch0074: 0074-cluster-dht-Skip-layout-overlap-maximization-on-weig.patch
Patch0075: 0075-geo-rep-Simplify-Non-root-user-mountbroker-Setup.patch
Patch0076: 0076-protocol-client-fix-coding-style-violations.patch
# Patch0077: 0077-infra-remove-anti-typedef-check.patch
Patch0078: 0078-compound-fops-Some-fixes-to-compound-fops-framework.patch
Patch0079: 0079-afr-Consume-compound-fops-in-afr-transaction.patch
Patch0080: 0080-geo-rep-Defunct-tar-process-after-sync.patch
Patch0081: 0081-geo-rep-Fix-Geo-rep-status-if-monitor.pid-file-not-e.patch
Patch0082: 0082-geo-rep-Fix-logging-sync-failures.patch
Patch0083: 0083-eventsapi-Fix-eventtypes.h-header-generation-with-Py.patch
Patch0084: 0084-build-linux-oom.h-fix-for-RHEL-5.patch
Patch0085: 0085-syscall-remove-preadv-and-pwritev-sys-wrappers.patch
Patch0086: 0086-build-ignore-sbindir-conf.py-for-RHEL-5.patch
Patch0087: 0087-socket-pollerr-event-shouldn-t-trigger-socket_connne.patch
Patch0088: 0088-afr-Modifications-to-afr-events.patch
Patch0089: 0089-build-add-systemd-dependency.patch
Patch0090: 0090-glusterfsd-explicitly-turn-on-encryption-for-volfile.patch
Patch0091: 0091-glusterd-fix-return-val-in-glusterd_op_volume_dict_u.patch
Patch0092: 0092-afr-Ignore-gluster-internal-virtual-xattrs-in-metada.patch
Patch0093: 0093-dht-udpate-stbuf-from-servers-those-have-layout.patch
Patch0094: 0094-eventsapi-geo-rep-Geo-rep-will-not-work-without-even.patch
Patch0095: 0095-gfapi-Fix-few-fd-ref-leaks.patch
Patch0096: 0096-socket-log-the-client-identifier-in-ssl-connect.patch
Patch0097: 0097-performance-open-behind-Pass-O_DIRECT-flags-for-anon.patch
Patch0098: 0098-cluster-ec-set-unset-dirty-flag-for-data-metadata-up.patch
Patch0099: 0099-cluster-ec-Implement-heal-info-with-lock.patch
Patch0100: 0100-cluster-ec-Use-locks-for-opendir.patch
Patch0101: 0101-system-posix-acl-Unwind-with-NULL-xdata-on-error.patch
Patch0102: 0102-afr-Take-full-locks-in-arbiter-only-for-data-transac.patch
Patch0103: 0103-rpc-socket.c-Modify-gf_log-message-in-socket_poller-.patch
Patch0104: 0104-build-randomize-temp-file-names-in-pretrans-scriptle.patch
Patch0105: 0105-cluster-afr-Prevent-dict_set-on-NULL-dict.patch
Patch0106: 0106-glusterd-enable-default-configurations-post-upgrade-.patch
Patch0107: 0107-cluster-ec-Do-multi-threaded-self-heal.patch
Patch0108: 0108-geo-rep-eventsapi-Additional-Events.patch
Patch0109: 0109-storage-posix-Fix-race-in-posix_pstat.patch
Patch0110: 0110-glusterd-set-the-brickinfo-port-before-spawning-the-.patch
Patch0111: 0111-upcall-Add-support-to-invalidate-xattrs.patch
Patch0112: 0112-upcall-pass-dict-with-xattrs-on-xattr-invalidation.patch
Patch0113: 0113-md-cache-Cache-gluster-samba-metadata.patch
Patch0114: 0114-upcall-Invalidation-for-xattr-should-also-carry-a-va.patch
Patch0115: 0115-md-cache-Enable-caching-of-stat-fetched-from-readdir.patch
Patch0116: 0116-md-cache-Add-cache-invalidation-support-for-metadata.patch
Patch0117: 0117-md-cache-fix-indention-to-silence-Coverity.patch
Patch0118: 0118-md-cache-upcall-In-case-of-mode-bit-change-invalidat.patch
Patch0119: 0119-md-cache-Add-logging-to-increase-debuggability.patch
Patch0120: 0120-md-cache-Add-cache-hit-and-miss-counters.patch
Patch0121: 0121-md-cache-Do-not-use-features.cache-invalidation-for-.patch
Patch0122: 0122-md-cache-Fix-wrong-cache-time-update-for-xattrs.patch
Patch0123: 0123-dht-define-GF_IPC_TARGET_UPCALL.patch
Patch0124: 0124-dht-Implement-ipc-fop.patch
Patch0125: 0125-io-stats-Add-ipc-fop-for-display-in-the-profile-info.patch
Patch0126: 0126-upcall-Add-permission-change-flag-to-iatt-flag.patch
Patch0127: 0127-md-cache-Register-the-list-of-xattrs-with-cache-inva.patch
Patch0128: 0128-dht-md-cache-upcall-Add-invalidation-of-IATT-when-th.patch
Patch0129: 0129-md-cache-Process-all-the-cache-invalidation-flags.patch
Patch0130: 0130-upcall-Mark-the-clients-as-accessed-on-readdirp-entr.patch
Patch0131: 0131-io-stats-Add-stats-for-upcall-notifications.patch
Patch0132: 0132-ec-Implement-ipc-fop.patch
Patch0133: 0133-afr-Implement-IPC-fop.patch
Patch0134: 0134-md-cache-afr-Reduce-the-window-of-stale-read.patch
Patch0135: 0135-cli-glusterd-Address-issues-in-get-state-cli-output.patch
Patch0136: 0136-rpc-socket-Close-pipe-on-disconnection.patch
Patch0137: 0137-ganesha-scripts-modify-start-hook-script-for-shared-.patch
Patch0138: 0138-performance-io-threads-Exit-all-threads-on-PARENT_DO.patch
Patch0139: 0139-glusterd-conditionally-pass-uuid-for-EVENT_PEER_CONN.patch
Patch0140: 0140-eventsapi-packaging-Fix-wrong-usage-of-post.patch
Patch0141: 0141-compound-fops-Fix-file-corruption-issue.patch
Patch0142: 0142-rpc-Fix-the-race-between-notification-and-reconnecti.patch
Patch0143: 0143-cluster-dht-Incorrect-volname-in-rebalance-events.patch
Patch0144: 0144-events-add-TIER_START-and-TIER_START_FORCE-events.patch
Patch0145: 0145-tools-glusterfind-kill-remote-processes-and-separate.patch
Patch0146: 0146-afr-ec-Heal-device-files-with-correct-major-minor-nu.patch
Patch0147: 0147-rpc-socket.c-Modify-socket_poller-code-in-case-of-EN.patch
Patch0148: 0148-md-cache-Invalidate-cache-entry-for-open-with-O_TRUN.patch
Patch0149: 0149-glusterd-shared-storage-Check-for-hook-script-at-sta.patch
Patch0150: 0150-gfapi-async-fops-should-unref-in-callbacks.patch
Patch0151: 0151-CLI-TIER-throw-warning-regarding-the-removal-of-the-.patch
Patch0152: 0152-bitrot-cli-Add-ondemand-scrub-event.patch
Patch0153: 0153-glusterd-use-GF_BRICK_STOPPING-as-intermediate-brick.patch
Patch0154: 0154-features-shard-Fill-loc.pargfid-too-for-named-lookup.patch
Patch0155: 0155-eventsapi-Fix-sending-event-during-volume-set-help.patch
Patch0156: 0156-glusterd-quota-upgrade-quota.conf-file-during-an-upg.patch
Patch0157: 0157-posix-acl-check-dictionary-before-using-it.patch
Patch0158: 0158-Revert-rpc-Fix-the-race-between-notification-and-rec.patch
Patch0159: 0159-performance-open-behind-Avoid-deadlock-in-statedump.patch
Patch0160: 0160-commn-HA-Add-portblock-RA-to-tickle-packets-post-fai.patch
Patch0161: 0161-build-incorrect-Requires-for-portblock-resource-agen.patch
Patch0162: 0162-build-Update-version-check-for-resource-agents-on-RH.patch
Patch0163: 0163-cluster-afr-When-failing-fop-due-to-lack-of-quorum-a.patch
Patch0164: 0164-glusterfsd-Continuous-errors-are-getting-in-mount-lo.patch
Patch0165: 0165-nfs-cli-add-warning-message-while-enabling-gluster-n.patch
Patch0166: 0166-nfs-revalidate-lookup-converted-to-fresh-lookup.patch
Patch0167: 0167-geo-rep-cli-Validate-Checkpoint-label.patch
Patch0168: 0168-performance-write-behind-fix-flush-stuck-by-former-f.patch
Patch0169: 0169-nfs-ganesha-common-ha-remove-etc-corosysnc-corosync..patch
Patch0170: 0170-protocol-client-Fix-iobref-and-iobuf-leaks-in-COMPOU.patch
Patch0171: 0171-nfs-ganesha-common-ha-grace-monitor-timed-out-unknow.patch
Patch0172: 0172-cli-rebalance-remove-brick-status-is-incorrect.patch
Patch0173: 0173-cluster-dht-Check-for-null-inode.patch
Patch0174: 0174-cli-Print-to-screen-frequently.patch
Patch0175: 0175-events-Add-FMT_WARN-for-gf_event.patch
Patch0176: 0176-upcall-Fix-a-log-level.patch
Patch0177: 0177-marker-Fix-inode-value-in-loc-in-setxattr-fop.patch
Patch0178: 0178-snapshot-scheduler-Removing-dependency-of-scheduler-.patch
Patch0179: 0179-glusterd-gNFS-On-post-upgrade-to-3.2-disable-gNFS-fo.patch
Patch0180: 0180-build-Add-dependency-on-netstat-for-glusterfs-ganesh.patch
Patch0181: 0181-afr-dht-ec-Replace-GF_EVENT_CHILD_MODIFIED-with-even.patch
Patch0182: 0182-cluster-dht-Set-layout-after-mkdir-as-root.patch
Patch0183: 0183-Undo-revert-gfapi-upcall-Use-GF_CALLOC-while-allocat.patch
Patch0184: 0184-libglusterfs-add-gf_get_mem_type.patch
Patch0185: 0185-gfapi-redesign-the-public-interface-for-upcall-consu.patch
Patch0186: 0186-gfapi-add-glfs_free-to-glfs.h.patch
Patch0187: 0187-gfapi-upcall-Fix-mismatch-in-few-upcall-API-SYMVER.patch
Patch0188: 0188-glusterd-dump-volinfo-dict-in-gluster-get-state.patch
Patch0189: 0189-libglusterfs-Now-mempool-is-added-to-ctx-pool-list-u.patch
Patch0190: 0190-glusterd-fix-few-events-generation.patch
Patch0191: 0191-performance-io-threads-Exit-threads-in-fini-as-well.patch
Patch0192: 0192-ganesha-scripts-use-export-id-for-dbus-signals.patch
Patch0193: 0193-cluster-afr-Fix-deadlock-due-to-compound-fops.patch
Patch0194: 0194-cluster-afr-Handle-rpc-errors-xdr-failures-etc-with-.patch
Patch0195: 0195-cluster-afr-Fix-bugs-in-f-inodelk-f-entrylk.patch
Patch0196: 0196-afr-allow-I-O-when-favorite-child-policy-is-enabled.patch
Patch0197: 0197-geo-rep-Upgrade-conf-file-only-if-it-is-session-conf.patch
Patch0198: 0198-geo-rep-Handle-EISDIR-error-during-Unlink.patch
Patch0199: 0199-geo-rep-Handle-ENOENT-during-unlink.patch
Patch0200: 0200-features-index-Delete-granular-entry-indices-of-alre.patch
Patch0201: 0201-cluster-afr-CLI-for-granular-entry-heal-enablement-d.patch
Patch0202: 0202-afr-Fix-the-EIO-that-can-occur-in-afr_inode_refresh-.patch
Patch0203: 0203-geo-rep-Fix-Last-synced-status-column-issue-during-H.patch
Patch0204: 0204-eventsapi-Auto-reload-Webhooks-data-when-modified.patch
Patch0205: 0205-libglusterfs-Fix-a-read-hang.patch
Patch0206: 0206-cluster-dht-A-hard-link-is-lost-during-rebalance-loo.patch
Patch0207: 0207-protocol-server-Fix-mem-leaks-in-compound-fops.patch
Patch0208: 0208-gfapi-glfs_subvol_done-should-NOT-wait-for-graph-mig.patch
Patch0209: 0209-uss-snapd-should-enable-SSL-if-SSL-is-enabled-on-vol.patch
Patch0210: 0210-common-HA-Increase-timeout-for-portblock-RA-of-actio.patch
Patch0211: 0211-glusterd-cli-Fix-volume-options-output-format-in-get.patch
Patch0212: 0212-performance-write-behind-remove-the-request-from-lia.patch
Patch0213: 0213-build-add-systemd-dependency-to-the-glusterfs-sub-pa.patch
Patch0214: 0214-cluster-tier-handle-fast-demotions.patch
Patch0215: 0215-selfheal-fix-memory-leak-on-client-side-healing-queu.patch
Patch0216: 0216-geo-rep-eventsapi-Add-Master-node-information-in-Geo.patch
Patch0217: 0217-eventsapi-JSON-output-and-different-error-codes.patch
Patch0218: 0218-eventsapi-Push-Messages-to-Webhooks-in-parallel.patch
Patch0219: 0219-cluster-ec-Check-xdata-to-avoid-memory-leak.patch
Patch0220: 0220-dht-rename-Incase-of-failure-remove-linkto-file-prop.patch
Patch0221: 0221-ganesha-scripts-avoid-incrementing-Export-Id-value-f.patch
Patch0222: 0222-common-ha-IPaddr-RA-is-not-stopped-when-pacemaker-qu.patch
Patch0223: 0223-common-ha-add-cluster-HA-status-to-status-output-for.patch
Patch0224: 0224-dht-md-cache-Filter-invalidate-if-the-file-is-made-a.patch
Patch0225: 0225-extras-Include-shard-and-full-data-heal-in-virt-grou.patch
Patch0226: 0226-afr-client-More-mem-leak-fixes-in-COMPOUND-fop-cbk.patch
Patch0227: 0227-rpc-fix-for-race-between-rpc-and-protocol-client.patch
Patch0228: 0228-afr-fix-bug-in-passing-child-index-in-afr_inode_writ.patch
Patch0229: 0229-cluster-afr-Serialize-conflicting-locks-on-all-subvo.patch
Patch0230: 0230-glusterd-ganesha-handle-volume-reset-properly-for-ga.patch
Patch0231: 0231-glusterfsd-fix-null-pointer-dereference-in-glusterfs.patch
Patch0232: 0232-cluster-afr-Remove-backward-compatibility-for-locks-.patch
Patch0233: 0233-glusterd-geo-rep-Fix-glusterd-crash.patch
Patch0234: 0234-cluster-dht-Fix-memory-corruption-while-accessing-re.patch
Patch0235: 0235-ganesha-scripts-find-export-id-for-already-exported-.patch
Patch0236: 0236-syncop-fix-conditional-wait-bug-in-parallel-dir-scan.patch
Patch0237: 0237-cluster-afr-Fix-per-txn-optimistic-changelog-initial.patch
Patch0238: 0238-snapshot-ganesha-Copy-export.conf-only-if-ganesha.en.patch
Patch0239: 0239-glusterd-Handle-volinfo-refcnt-properly-during-volum.patch
Patch0240: 0240-common-ha-Create-portblock-RA-as-part-of-add-delete-.patch
Patch0241: 0241-cluster-ec-Fix-lk-owner-set-race-in-ec_unlock.patch
Patch0242: 0242-common-ha-explicitly-set-udpu-transport-for-corosync.patch
Patch0243: 0243-glfsheal-Explicitly-enable-self-heal-xlator-options.patch
Patch0244: 0244-common-ha-add-node-create-new-node-dirs-in-shared-st.patch
Patch0245: 0245-glusterd-gNFS-explicitly-set-nfs.disable-to-off-afte.patch
Patch0246: 0246-protocol-client-fix-op_errno-handling-was-unused-var.patch
Patch0247: 0247-snapshot-Fix-restore-rollback-to-reassign-snap-volum.patch
Patch0248: 0248-client-server-Free-xdr-allocated-compound-req-rsp-ar.patch
Patch0249: 0249-socket-socket-disconnect-should-wait-for-poller-thre.patch
Patch0250: 0250-afr-Ignore-event_generation-checks-post-inode-refres.patch
Patch0251: 0251-storage-posix-Do-not-create-a-directory-which-alread.patch
Patch0252: 0252-Fixes-GlusterFS-process-crashes-on-deep-directory-hi.patch
Patch0253: 0253-ganesha-scripts-Prevent-removal-of-entries-in-ganesh.patch
Patch0254: 0254-common-ha-Correct-the-VIP-assigned-to-the-new-node-a.patch
Patch0255: 0255-performance-readdir-ahead-limit-cache-size.patch
Patch0256: 0256-dht-rebalance-reverify-lookup-failures.patch
Patch0257: 0257-performance-readdir-ahead-fix-compiler-error.patch
Patch0258: 0258-afr-use-accused-matrix-instead-of-readable-matrix-fo.patch
Patch0259: 0259-cluster-afr-Fix-missing-name-indices-due-to-EEXIST-e.patch
Patch0260: 0260-performance-readdir-ahead-mark-two-options-as-NO_DOC.patch
Patch0261: 0261-glusterd-spawn-nfs-daemon-in-op-version-bump-if-nfs..patch
Patch0262: 0262-eventsapi-Use-getaddrinfo-instead-of-gethostbyname.patch
Patch0263: 0263-ec-Invalidations-in-disperse-volume-should-not-updat.patch
Patch0264: 0264-cluster-dht-Do-rename-cleanup-as-root.patch
Patch0265: 0265-cluster-ec-Do-lookup-on-an-existing-file-in-link.patch
Patch0266: 0266-glusterd-Fail-add-brick-on-replica-count-change-if-b.patch
Patch0267: 0267-dht-rebalance-remove-errno-check-for-failure-detecti.patch
Patch0268: 0268-cluster-dht-Incorrect-migration-checks-in-fsync.patch
Patch0269: 0269-afr-Avoid-resetting-event_gen-when-brick-is-always-d.patch
Patch0270: 0270-cluster-afr-Do-not-log-of-split-brain-when-there-isn.patch
Patch0271: 0271-upcall-Resolve-dict-leak-from-up_-f-remove-set-xattr.patch
Patch0272: 0272-Fixes-upgrade-issue-in-quota.conf.patch
Patch0273: 0273-features-changelog-Fix-htime-xattr-during-brick-cras.patch
Patch0274: 0274-cluster-ec-Do-not-start-heal-on-good-file-while-IO-i.patch
Patch0275: 0275-cluster-disperse-Do-not-log-fop-failed-for-lockless-.patch
Patch0276: 0276-common-ha-All-statd-related-files-need-to-be-owned-b.patch
Patch0277: 0277-cluster-ec-Fix-cthon-failures-observed-with-ec-volum.patch
Patch0278: 0278-tools-glusterfind-delete-temporary-folder.patch
Patch0279: 0279-glusterd-regenerate-volfiles-on-op-version-bump-up.patch
Patch0280: 0280-cluster-ec-Change-level-of-messages-to-DEBUG.patch
Patch0281: 0281-rpc-socket.c-Bonnie-hangs-during-rewrites-in-ganesha.patch
Patch0282: 0282-performance-write-behind-access-stub-only-if-availab.patch
Patch0283: 0283-glusterd-do-not-load-io-threads-in-client-graph-for-.patch
Patch0284: 0284-extras-Provide-group-set-for-md-cache-and-invalidati.patch
Patch0285: 0285-socket-GF_REF_PUT-should-be-called-outside-lock.patch
Patch0286: 0286-afr-all-children-of-AFR-must-be-up-to-resolve-s-brai.patch
Patch0287: 0287-glusterd-ignore-return-code-of-glusterd_restart_bric.patch
Patch0288: 0288-protocol-client-Fix-double-free-of-client-fdctx-dest.patch
Patch0289: 0289-gNFS-Keep-the-mountdict-as-long-as-the-service-is-ac.patch
Patch0290: 0290-geo-rep-Fix-xsync-crawl.patch
Patch0291: 0291-rpcsvc-Add-rpchdr-and-proghdr-to-iobref-before-submi.patch
Patch0292: 0292-cluster-afr-Perform-new-entry-mark-before-creating-n.patch
Patch0293: 0293-extras-Add-more-options-to-group-virt.patch
Patch0294: 0294-marker-Fix-inode-value-in-loc-in-setxattr-fop.patch
Patch0295: 0295-common-ha-unable-to-start-HA-Connection-Error.patch
Patch0296: 0296-features-shard-Put-onus-of-choosing-the-inode-to-res.patch
Patch0297: 0297-features-shard-Fix-EIO-error-on-add-brick.patch
Patch0298: 0298-ganesha-scripts-restart-pcs-cluster-during-add-node.patch
Patch0299: 0299-cluster-ec-Don-t-trigger-data-metadata-heal-on-Looku.patch
Patch0300: 0300-common-ha-setup-after-teardown-often-fails.patch
Patch0301: 0301-rpc-clnt-remove-locks-while-notifying-CONNECT-DISCON.patch
Patch0302: 0302-cluster-ec-Introduce-optimistic-changelog-in-EC.patch
Patch0303: 0303-cluster-ec-Don-t-mark-dirty-on-entry-meta-ops-in-que.patch
# Patch0304: 0304-Update-rfc.sh-to-rhgs-3.3.0.patch
Patch0305: 0305-Snapshot-xml-xml-output-for-snapshot-clone.patch
Patch0306: 0306-dht-replica.split-brain-status-attribute-value-is-no.patch
Patch0307: 0307-geo-rep-Do-not-restart-workers-when-log-rsync-perfor.patch
Patch0308: 0308-geo-rep-Fix-log-rsync-performance-config-issue.patch
Patch0309: 0309-glusterd-Add-info-on-op-version-for-clients-in-vol-s.patch
Patch0310: 0310-cli-geo-rep-Fix-geo-rep-status-detail.patch
Patch0311: 0311-geo-rep-Handle-directory-sync-failure-as-hard-error.patch
Patch0312: 0312-geo-rep-Separate-slave-mount-logs-for-each-connectio.patch
Patch0313: 0313-gfapi-add-API-to-trigger-events-for-debugging-and-tr.patch
Patch0314: 0314-dht-rebalance-Estimate-time-to-complete-rebalance.patch
Patch0315: 0315-glusterd-add-a-cli-command-to-trigger-a-statedump-on.patch
Patch0316: 0316-gfapi-create-statedump-when-glusterd-requests-it.patch
Patch0317: 0317-glusterd-daemon-restart-logic-should-adhere-server-s.patch
Patch0318: 0318-glusterd-cli-Get-global-options-through-volume-get-f.patch
Patch0319: 0319-core-run-many-bricks-within-one-glusterfsd-process.patch
Patch0320: 0320-libglusterfs-transport-io-threads-fix-256KB-stack-ab.patch
Patch0321: 0321-glusterd-double-check-whether-brick-is-alive-for-sta.patch
Patch0322: 0322-libglusterfs-make-memory-pools-more-thread-friendly.patch
Patch0323: 0323-socket-retry-connect-immediately-if-it-fails.patch
Patch0324: 0324-glusterd-double-check-brick-liveness-for-remove-bric.patch
Patch0325: 0325-cluster-dht-Don-t-update-layout-in-rebalance_task_co.patch
Patch0326: 0326-Fixes-quota-list-when-stale-gfid-exist-in-quota.conf.patch
Patch0327: 0327-trash-fix-problem-with-trash-feature-under-multiplex.patch
Patch0328: 0328-libglusterfs-fix-serious-leak-of-xlator_t-structures.patch
Patch0329: 0329-glusterd-put-null-check-for-mod_dict-in-build_shd_gr.patch
Patch0330: 0330-glusterd-set-default-GLUSTERD_QUORUM_RATIO_KEY-value.patch
Patch0331: 0331-cli-add-integer-check-for-timeout-option.patch
Patch0332: 0332-glusterd-keep-snapshot-bricks-separate-from-regular-.patch
Patch0333: 0333-glusterd-Fix-for-error-mesage-while-detaching-peers.patch
Patch0334: 0334-build-git-ignore-generated-executable-script-gf_atta.patch
Patch0335: 0335-cluster-dht-Fix-error-assignment-in-dht_-xattr2-func.patch
Patch0336: 0336-glusterd-take-conn-lock-around-operations-on-conn-re.patch
Patch0337: 0337-glusterd-unref-brickinfo-object-on-volume-stop.patch
Patch0338: 0338-snapshot-scheduler-Set-sebool-cron_system_cronjob_us.patch
Patch0339: 0339-gfapi-OBS-build-fails-in-post-build-analysis.patch
Patch0340: 0340-glusterd-add-portmap-details-in-glusterd-statedump.patch
Patch0341: 0341-core-Clean-up-pmap-registry-up-correctly-on-volume-b.patch
Patch0342: 0342-socket-Avoid-flooding-of-SSL-messages-in-case-of-fai.patch
Patch0343: 0343-ganesha-glusterd-create-ganesha.conf-symlink-for-nod.patch
Patch0344: 0344-geo-rep-Logging-improvements.patch
Patch0345: 0345-snapshot-Fix-the-failure-to-recreate-clones-with-sam.patch
Patch0346: 0346-cluster-ec-Healing-should-not-start-if-only-data-bri.patch
Patch0347: 0347-common-ha-Use-UpdateExports-dbus-msg-for-refresh-con.patch
Patch0348: 0348-rpc-log-more-about-socket-disconnects.patch
Patch0349: 0349-rpc-avoid-logging-success-on-failure.patch
Patch0350: 0350-glusterfsd-don-t-quit-in-client-on-connection-failur.patch
Patch0351: 0351-glusterd-don-t-queue-attach-reqs-before-connecting.patch
Patch0352: 0352-glusterfsd-libglusterfs-add-null-checks-during-attac.patch
Patch0353: 0353-rpc-bump-up-conn-cleanup_gen-in-rpc_clnt_reconnect_c.patch
Patch0354: 0354-geo-rep-Fix-ESTALE-EINVAL-issue-during-set_-xtime-st.patch
Patch0355: 0355-readdir-ahead-Enhance-EOD-detection-logic.patch
Patch0356: 0356-dht-At-places-needed-use-STACK_WIND_COOKIE.patch
Patch0357: 0357-readdir-ahead-Perform-STACK_UNWIND-outside-of-mutex-.patch
Patch0358: 0358-glusterd-Change-the-volfile-to-have-readdir-ahead-as.patch
Patch0359: 0359-glusterd-rda-If-parallel-readdir-is-enabled-split-th.patch
Patch0360: 0360-Readdir-ahead-Honor-readdir-optimise-option-of-dht.patch
Patch0361: 0361-glusterd-readdir-ahead-Fix-backward-incompatibility.patch
Patch0362: 0362-features-trash-Fix-bad-backport.patch
Patch0363: 0363-glusterd-glusterd-is-crashed-at-the-time-of-stop-vol.patch
Patch0364: 0364-protocol-fix-auth-allow-regression.patch
Patch0365: 0365-glusterd-reset-pid-to-1-if-brick-is-not-online.patch
Patch0366: 0366-glusterd-hold-off-volume-deletes-while-still-restart.patch
Patch0367: 0367-io-stats-fix-translator-names.patch
Patch0368: 0368-TIER-watermark-check-during-low-watermark-reset.patch
Patch0369: 0369-cluster-afr-Undo-pending-xattrs-only-on-the-up-brick.patch
Patch0370: 0370-reddir-ahead-Fix-EOD-propagation-problem.patch
Patch0371: 0371-protocol-client-Initialize-the-list_head-before-usin.patch
Patch0372: 0372-readdir-ahead-Remove-unnecessary-logging.patch
Patch0373: 0373-dht-The-xattrs-sent-in-readdirp-should-be-sent-in-op.patch
Patch0374: 0374-features-shard-Pass-the-correct-iatt-for-cache-inval.patch
Patch0375: 0375-geo-rep-Fix-cherry-pick-issues.patch
Patch0376: 0376-nfs-make-subdir-mounting-work-for-Solaris-10-clients.patch
Patch0377: 0377-cluster-dht-Modify-local-loc.gfid-in-thread-safe-man.patch
Patch0378: 0378-geo-rep-Retry-on-EBUSY.patch
Patch0379: 0379-geo-rep-Fix-EBUSY-traceback.patch
Patch0380: 0380-glusterd-fix-glusterd_wait_for_blockers-to-go-in-inf.patch
Patch0381: 0381-dht-Add-missing-braces-in-dht_opendir.patch
Patch0382: 0382-glusterd-Add-validation-for-options-rda-cache-limit-.patch
Patch0383: 0383-dht-Add-readdir-ahead-in-rebalance-graph-if-parallel.patch
Patch0384: 0384-glusterd-Fix-snapshot-failure-in-non-root-geo-rep-se.patch
Patch0385: 0385-eventsapi-Fix-webhook-test-when-no-schema-specified-.patch
Patch0386: 0386-features-bit-rot-stub-bring-in-optional-versioning.patch
Patch0387: 0387-glusterd-Fix-build-failure.patch
Patch0388: 0388-afr-don-t-do-a-post-op-on-a-brick-if-op-failed.patch
Patch0389: 0389-cluster-ec-Add-Modify-description-for-eager-lock-opt.patch
Patch0390: 0390-cluster-ec-Fixing-log-message.patch
Patch0391: 0391-dht-rebalance-Increase-maximum-read-block-size-from-.patch
Patch0392: 0392-Implement-negative-lookup-cache.patch
Patch0393: 0393-libglusterfs-provide-standardized-atomic-operations.patch
Patch0394: 0394-cluster-dht-Make-rebalance-honor-min-free-disk.patch
Patch0395: 0395-cluster-dht-Skip-file-migration-if-the-subvol-that-m.patch
Patch0396: 0396-glusterd-set-conn-reconnect-to-null-on-timer-cancell.patch
Patch0397: 0397-geo-rep-filter-out-xtime-attribute-during-getxattr.patch
Patch0398: 0398-glusterd-cluster.brick-multiplex-validation-is-missi.patch
Patch0399: 0399-ganesha-allow-refresh-config-and-volume-export-unexp.patch
Patch0400: 0400-glusterd-ganesha-During-volume-delete-remove-the-gan.patch
Patch0401: 0401-ganesha-scripts-Remove-export-entries-from-ganesha.c.patch
Patch0402: 0402-common-ha-cluster-names-must-be-15-characters-or-les.patch
Patch0403: 0403-glusterd-ganesha-throw-proper-error-for-gluster-nfs-.patch
Patch0404: 0404-glusterd-populate-volinfo-rebal.time_left.patch
Patch0405: 0405-glusterd-parallel-readdir-Change-the-op-version-of-p.patch
Patch0406: 0406-glusterd-geo-rep-Fix-snapshot-create-in-geo-rep-setu.patch
Patch0407: 0407-cluster-dht-rm-rf-fails-if-dir-has-stale-linkto-file.patch
Patch0408: 0408-cluster-dht-Remove-redundant-logs-in-dht-rmdir.patch
Patch0409: 0409-glusterd-ganesha-update-cache-invalidation-properly-.patch
Patch0410: 0410-glusterd-ganesha-add-proper-NULL-check-in-manage_exp.patch
Patch0411: 0411-packaging-var-run-gluster-owner-gluster-gluster-0775.patch
Patch0412: 0412-features-shard-Fix-vm-corruption-upon-fix-layout.patch
Patch0413: 0413-features-shard-Initialize-local-fop-in-readv.patch
Patch0414: 0414-cluster-dht-Use-int8-instead-of-string-to-pass-DHT_I.patch
Patch0415: 0415-cluster-dht-Pass-the-req-dict-instead-of-NULL-in-dht.patch
Patch0416: 0416-cluster-dht-Pass-the-correct-xdata-in-fremovexattr-f.patch
Patch0417: 0417-Fixes-quota-aux-mount-failure.patch
Patch0418: 0418-snapview-server-Refresh-the-snapshot-list-during-eac.patch
Patch0419: 0419-dht-send-lookup-on-old-name-inside-rename-with-bname.patch
Patch0420: 0420-cluster-dht-Fix-ret-check.patch
Patch0421: 0421-glusterd-cleanup-pidfile-on-pmap-signout.patch
Patch0422: 0422-glusterd-socketfile-pidfile-related-fixes-for-brick-.patch
Patch0423: 0423-Tier-Watermark-check-for-hi-and-low-value-being-equa.patch
Patch0424: 0424-rpc-fix-transport-add-remove-race-on-port-probing.patch
Patch0425: 0425-libglusterfs-stop-special-casing-cache-size-in-size_.patch
Patch0426: 0426-glusterd-ganesha-perform-removal-of-ganesha.conf-on-.patch
Patch0427: 0427-features-shard-Set-size-in-inode-ctx-before-size-upd.patch
Patch0428: 0428-Added-the-missing-FOPs-in-the-io-stats-xlator.patch
Patch0429: 0429-glusterfsd-send-PARENT_UP-on-brick-attach.patch
Patch0430: 0430-glusterd-Make-reset-brick-work-correctly-if-brick-mu.patch
Patch0431: 0431-cluster-dht-rebalance-perf-enhancement.patch
Patch0432: 0432-cluster-dht-Make-rebalance-throttle-option-tuned-by-.patch
Patch0433: 0433-brick-mux-Detach-brick-on-posix-health-check-failure.patch
Patch0434: 0434-core-make-the-per-glusterfs_ctx_t-timer-wheel-refcou.patch
Patch0435: 0435-nl-cache-free-nlc_conf_t-in-fini.patch
Patch0436: 0436-nl-cache-Fix-the-issue-in-refd_inode-counting-and-pr.patch
Patch0437: 0437-afr-propagate-correct-errno-for-fop-failures-in-arbi.patch
Patch0438: 0438-afr-send-the-correct-iatt-values-in-fsync-cbk.patch
Patch0439: 0439-nfs-nlm-unref-rpc-client-after-nlm4svc_send_granted.patch
Patch0440: 0440-nfs-nlm-ignore-notify-when-there-is-no-matching-rpc-.patch
Patch0441: 0441-nfs-nlm-log-the-caller_name-if-nlm_client_t-can-be-f.patch
Patch0442: 0442-nfs-nlm-free-the-nlm_client-upon-RPC_DISCONNECT.patch
Patch0443: 0443-nfs-nlm-remove-lock-request-from-the-list-after-canc.patch
Patch0444: 0444-build-Remove-throttle-related-files-from-make-fille.patch
Patch0445: 0445-refcount-return-pointer-to-the-structure-instead-of-.patch
Patch0446: 0446-refcount-typecast-function-for-calling-on-free.patch
Patch0447: 0447-features-changelog-Add-xattr-as-special-type-in-chan.patch
Patch0448: 0448-gfapi-fix-handling-of-dot-and-double-dot-in-path.patch
Patch0449: 0449-tests-gfapi-Adding-testcase-to-check-handling-of-.-a.patch
Patch0450: 0450-common-ha-adding-a-node-to-existing-cluster-failed-t.patch
Patch0451: 0451-cluster-dht-Fix-crash-in-dht-rmdir.patch
Patch0452: 0452-nl-cache-Remove-the-max-limit-for-nl-cache-limit-and.patch
Patch0453: 0453-socket-Avoid-flooding-of-error-message-in-case-of-SS.patch
Patch0454: 0454-cluster-dht-initialize-throttle-option-normal-to-sam.patch
Patch0455: 0455-dht-rebalance-Crawler-performance-improvement.patch
Patch0456: 0456-geo-rep-Improve-worker-log-messages.patch
Patch0457: 0457-extras-Provide-group-set-for-gluster-block-workloads.patch
Patch0458: 0458-cluster-dht-Rebalance-on-all-nodes-should-migrate-fi.patch
Patch0459: 0459-cluster-afr-Return-the-list-of-node_uuids-for-the-su.patch
Patch0460: 0460-cluster-ec-return-all-node-uuids-from-all-subvolumes.patch
Patch0461: 0461-glusterd-Don-t-spawn-new-glusterfsds-on-node-reboot-.patch
Patch0462: 0462-cluster-dht-Fix-crash-in-dht_selfheal_dir_setattr.patch
Patch0463: 0463-cluster-dht-correct-space-check-for-rebalance.patch
Patch0464: 0464-rda-glusterd-Change-the-max-of-rda-cache-limit-to-IN.patch
Patch0465: 0465-nl-cache-In-case-of-nameless-operations-do-not-cache.patch
Patch0466: 0466-glusterd-volume-profile-command-on-one-of-the-node-c.patch
Patch0467: 0467-cluster-ec-Implement-FALLOCATE-FOP-for-EC.patch
Patch0468: 0468-storage-posix-Execute-syscalls-in-xattrop-under-diff.patch
Patch0469: 0469-storage-posix-Set-ret-value-correctly-before-exiting.patch
Patch0470: 0470-storage-posix-Use-more-granular-mutex-locks-for-atom.patch
Patch0471: 0471-storage-posix-Use-granular-mutex-locks-for-pgfid-upd.patch
Patch0472: 0472-protocol-server-capture-offset-in-seek.patch
Patch0473: 0473-glusterd-Eliminate-race-in-brick-compatibility-check.patch
Patch0474: 0474-features-bitrot-Fix-glusterfsd-crash.patch
Patch0475: 0475-libglusterfs-extract-some-functionality-to-functions.patch
Patch0476: 0476-fuse-implement-oauto_unmount.patch
Patch0477: 0477-cluster-dht-fix-on-demand-migration-files-from-clien.patch
Patch0478: 0478-glusterd-do-not-load-io-threads-for-replica-volume-b.patch
Patch0479: 0479-event-epoll-Add-back-socket-for-polling-of-events-im.patch
Patch0480: 0480-features-shard-Handle-offset-in-appending-writes.patch
Patch0481: 0481-glusterfsd-process-attach-and-detach-request-inside-.patch
Patch0482: 0482-perf-ioc-Fix-race-causing-crash-when-accessing-freed.patch
Patch0483: 0483-terfs-Not-able-to-mount-running-volume-after-enable-.patch
Patch0484: 0484-afr-do-not-mention-split-brain-in-log-message-in-rea.patch
Patch0485: 0485-afr-add-errno-to-afr_inode_refresh_done.patch
Patch0486: 0486-posix-use-the-correct-op_errno.patch
Patch0487: 0487-cli-add-all-option-in-gluster-v-get-cli-usage.patch
Patch0488: 0488-glusterd-Fix-regression-wrt-add-brick-on-replica-cou.patch
Patch0489: 0489-glusterd-fix-brick-start-race.patch
Patch0490: 0490-glusterd-fix-glusterd-crash-from-glusterd_op_ac_rcvd.patch
Patch0491: 0491-cluster-ec-Update-xattr-and-heal-size-properly.patch
Patch0492: 0492-glusterfsd-Sometime-brick-process-is-crashed-after-e.patch
Patch0493: 0493-nl-cache-Remove-null-check-validation-for-frame-loca.patch
Patch0494: 0494-common-ha-enable-pacemaker-at-end-of-setup.patch
Patch0495: 0495-glusterfsd-Deletion-of-brick-dir-throw-emerg-msgs-af.patch
Patch0496: 0496-tools-glusterfind-add-field-separator-option.patch
Patch0497: 0497-tools-glusterfind-add-end-time-option.patch
Patch0498: 0498-geo-rep-Fix-ConfigInterface-Template-issue.patch
Patch0499: 0499-geo-rep-Fix-string-format-issue-caused-due-to-17489.patch
Patch0500: 0500-socket-reconfigure-reconfigure-should-be-done-on-new.patch
Patch0501: 0501-gfapi-change-root-lookup-from-nameless-to-named-look.patch
Patch0502: 0502-upcall-Update-the-access-time-in-missing-cases.patch
Patch0503: 0503-readdir-ahead-Fix-duplicate-listing-and-cache-size-c.patch
Patch0504: 0504-nl-cache-add-group-volume-set-option-for-ease-of-use.patch
Patch0505: 0505-nl-cache-Fix-a-possible-crash-and-stale-cache.patch
Patch0506: 0506-cluster-dht-Include-dirs-in-rebalance-estimates.patch
Patch0507: 0507-protocol-server-make-listen-backlog-value-as-configu.patch
Patch0508: 0508-glusterd-log-stale-rpc-disconnects-occasionally.patch
Patch0509: 0509-posix-acl-Whitelist-virtual-ACL-xattrs.patch
Patch0510: 0510-common-ha-Fix-an-incorrect-syntax-during-setup.patch
Patch0511: 0511-op-version-Change-max-op-version-to-GD_OP_VERSION_3_.patch
Patch0512: 0512-glusterd-fix-crash-on-statedump-when-no-volumes-are-.patch
Patch0513: 0513-common-ha-surviving-ganesha.nfsd-not-put-in-grace-on.patch
Patch0514: 0514-cluster-ec-lk-shouldn-t-be-a-transaction.patch
Patch0515: 0515-index-Do-not-proceed-with-init-if-brick-is-not-mount.patch
Patch0516: 0516-posix-Revert-modifying-op_errno-in-__posix_fd_ctx_ge.patch
Patch0517: 0517-geo-rep-Fix-worker-stuck-in-loop.patch
Patch0518: 0518-feature-bitrot-Fix-ondemand-scrub.patch
Patch0519: 0519-cluster-dht-Fix-dict_leak-in-migration-check-tasks.patch
Patch0520: 0520-cluster-afr-Implement-quorum-for-lk-fop.patch
Patch0521: 0521-tools-glusterfind-initialize-variable-end.patch
Patch0522: 0522-gfapi-Resolve-.-and-.-only-for-named-lookups.patch
Patch0523: 0523-scripts-shared_storage-systemd-helper-scripts-to-mou.patch
Patch0524: 0524-feature-changelog-Fix-buffer-overflow-crash.patch
Patch0525: 0525-build-exclude-glusterfssharedstorage.service-and-mou.patch
Patch0526: 0526-cluster-dht-Additional-checks-for-rebalance-estimate.patch
Patch0527: 0527-dht-hardlink-Remove-stale-linkto-file-incase-of-fail.patch
Patch0528: 0528-common-ha-enable-and-disable-selinux-ganesha_use_fus.patch
Patch0529: 0529-cluster-dht-rebalance-gets-file-count-periodically.patch
Patch0530: 0530-build-remove-ganesha-dependency-on-selinux-policy.patch
Patch0531: 0531-glusterd-brick-process-fails-to-restart-after-gluste.patch
Patch0532: 0532-ec-Increase-notification-in-all-the-cases.patch
Patch0533: 0533-glusterd-mark-brickinfo-to-started-on-successful-att.patch
Patch0534: 0534-features-shard-Remove-ctx-from-LRU-in-shard_forget.patch
Patch0535: 0535-cluster-afr-Returning-single-and-list-of-node-uuids-.patch
Patch0536: 0536-libglusterfs-Fix-crash-in-glusterd-while-peer-probin.patch
Patch0537: 0537-cluster-ec-Node-uuid-xattr-support-update-for-EC.patch
Patch0538: 0538-cluster-rebalance-Use-GF_XATTR_LIST_NODE_UUIDS_KEY-t.patch
Patch0539: 0539-tools-glusterfind-unquote-DELETE-path-before-further.patch
Patch0540: 0540-cluster-dht-Fix-crash-in-dht_rename_lock_cbk.patch
Patch0541: 0541-build-make-gf_attach-available-in-glusterfs-server.patch
Patch0542: 0542-cli-xml-fix-return-handling.patch
Patch0543: 0543-cluster-ec-Don-t-try-to-heal-when-no-sink-is-UP.patch
Patch0544: 0544-geo-rep-Fix-entry-failure-because-parent-dir-doesn-t.patch
Patch0545: 0545-groups-don-t-allocate-auxiliary-gid-list-on-stack.patch
Patch0546: 0546-nfs-make-nfs3_call_state_t-refcounted.patch
Patch0547: 0547-nfs-nlm-unref-fds-in-nlm_client_free.patch
Patch0548: 0548-nfs-nlm-handle-reconnect-for-non-NLM4_LOCK-requests.patch
Patch0549: 0549-nfs-nlm-use-refcounting-for-nfs3_call_state_t.patch
Patch0550: 0550-nfs-nlm-keep-track-of-the-call-state-and-frame-for-n.patch
Patch0551: 0551-nfs-add-permission-checking-for-mounting-over-WebNFS.patch
Patch0552: 0552-glusterd-Introduce-option-to-limit-no.-of-muxed-bric.patch
Patch0553: 0553-glusterd-Revert-op-version-for-cluster.max-brick-per.patch
Patch0554: 0554-svs-implement-CHILD-UP-notify-in-snapview-server.patch
Patch0555: 0555-svc-send-revalidate-lookup-on-special-dir.patch
Patch0556: 0556-uss-svc-fix-double-free-on-xdata-dictionary.patch
Patch0557: 0557-mgtm-core-use-sha-hash-function-for-volfile-check.patch
Patch0558: 0558-cluster-dht-Use-size-to-calculate-estimates.patch
Patch0559: 0559-cluster-dht-Check-if-fd-is-opened-on-dst-subvol.patch
Patch0560: 0560-cluster-dht-Fix-fd-check-race.patch
Patch0561: 0561-extras-Enable-stat-prefetch-in-virt-profile.patch
Patch0562: 0562-cluster-dht-Clear-clean_dst-flag-on-target-change.patch
Patch0563: 0563-posix-brick-process-crash-after-stop-the-volume-whil.patch
Patch0564: 0564-cluster-ec-Non-disruptive-upgrade-on-EC-volume-fails.patch
Patch0565: 0565-cluster-rebalance-Fix-hardlink-migration-failures.patch
Patch0566: 0566-systemd-glusterfssharedstorage-remove-dependency-for.patch
Patch0567: 0567-geo-rep-Fix-worker-crash-during-RMDIR.patch
Patch0568: 0568-glusterd-Add-description-field-to-global-options-for.patch
Patch0569: 0569-glusterd-Set-default-value-for-cluster.max-bricks-pe.patch
Patch0570: 0570-glusterd-fix-brick-start-race.patch
Patch0571: 0571-cluster-dht-Fixed-crash-in-dht_rmdir_is_subvol_empty.patch
Patch0572: 0572-cluster-dht-Correct-iterator-for-decommissioned-bric.patch
Patch0573: 0573-cluster-dht-Update-size-processed-for-non-migrated-f.patch
Patch0574: 0574-cluster-dht-Fix-negative-rebalance-estimates.patch
Patch0575: 0575-performance-io-cache-update-inode-contexts-of-each-e.patch
Patch0576: 0576-cluster-dht-change-log-level-to-debug-for-thread-act.patch
Patch0577: 0577-changetimerecorder-Brick-process-crashed-at-ctr-xlat.patch
Patch0578: 0578-geo-rep-Fix-syncing-of-self-healed-hardlinks.patch
Patch0579: 0579-geo-rep-Fix-worker-crash-during-rmdir.patch
Patch0580: 0580-ec-cluster-Update-failure-of-fop-on-a-brick-properly.patch
Patch0581: 0581-cluster-dht-rebalance-min-free-disk-fix.patch
Patch0582: 0582-cluster-dht-Handle-wrong-rebalance-status-reporting.patch
Patch0583: 0583-libglusterfs-the-global_xlator-should-have-valid-cbk.patch
Patch0584: 0584-nfs-use-as-subdir-for-volume-mounts.patch
Patch0585: 0585-extras-Disable-remote-dio-in-gluster-block-profile.patch
Patch0586: 0586-group-gluster-block-Set-default-shard-block-size-to-.patch
Patch0587: 0587-extras-Turn-eager-lock-off-for-gluster-block.patch
Patch0588: 0588-common-ha-enable-and-disable-selinux-gluster_use_exe.patch
Patch0589: 0589-libglusterfs-add-mem_pools_fini.patch
Patch0590: 0590-gfapi-add-mem_pools_init-and-mem_pools_fini-calls.patch
Patch0591: 0591-gfapi-libglusterfs-fix-mem_pools_fini-without-mem_po.patch
Patch0592: 0592-nfs-improve-error-handling-for-WebNFS-mount-permissi.patch
Patch0593: 0593-packaging-var-lib-glusterd-options-should-be-config-.patch
Patch0594: 0594-cluster-dht-Check-for-open-fd-only-on-EBADF.patch
Patch0595: 0595-afr-Prevent-null-gfids-in-self-heal-entry-re-creatio.patch
Patch0596: 0596-posix-add-null-gfid-checks.patch
Patch0597: 0597-nfs-add-NULL-check-for-call-state-in-nfs3_call_state.patch
Patch0598: 0598-cluster-dht-EBADF-handling-for-fremovexattr-and-fset.patch
Patch0599: 0599-gfapi-Duplicate-the-buffer-sent-in-setxattr-calls.patch
Patch0600: 0600-glusterd-Block-brick-attach-request-till-the-brick-s.patch
Patch0601: 0601-cli-Add-message-for-user-before-modifying-brick-mult.patch
Patch0602: 0602-glusterd-Gluster-should-keep-PID-file-in-correct-loc.patch
Patch0603: 0603-uss-svc-Send-the-first-lookup-to-the-normal-graph.patch
Patch0604: 0604-api-memory-leak-in-glfs_h_acl_get-missing-dict-unref.patch
Patch0605: 0605-glusterd-disable-rpc_clnt_t-after-relalance-process-.patch
Patch0606: 0606-gluster-block-strict-o-direct-should-be-on.patch
# Patch0607: 0607-Update-rfc.sh-to-rhgs-3.3.1.patch
Patch0608: 0608-features-shard-Increment-counts-in-locks.patch
Patch0609: 0609-glusterd-highlight-arbiter-brick-in-get-state.patch
Patch0610: 0610-glusterd-add-rebal-estimates-time-in-get-state.patch
Patch0611: 0611-features-shard-Return-aggregated-size-in-stbuf-of-LI.patch
Patch0612: 0612-glusterd-Add-client-details-to-get-state-output.patch
Patch0613: 0613-glusterd-Add-brick-capacity-details-to-get-state-CLI.patch
Patch0614: 0614-glusterd-Add-option-to-get-all-volume-options-throug.patch
Patch0615: 0615-io-stats-use-gf_atomic_t-instead-of-partial-atomic-v.patch
Patch0616: 0616-glusterd-Add-geo-replication-session-details-to-get-.patch
Patch0617: 0617-features-shard-Change-default-shard-block-size-to-64.patch
Patch0618: 0618-events-Add-brick-list-in-EVENT_VOLUME_CREATE.patch
Patch0619: 0619-features-locks-Fix-leak-of-posix_lock_t-s-client_uid.patch
Patch0620: 0620-features-locks-Maintain-separation-of-lock-client_pi.patch
Patch0621: 0621-glusterfsd-allow-subdir-mount.patch
Patch0622: 0622-mount-fuse-Include-sub-directory-in-source-argument-.patch
Patch0623: 0623-mount.glusterfs-fix-the-syntax-error.patch
Patch0624: 0624-build-launch-glusterd-upgrade-after-all-new-bits-are.patch
Patch0625: 0625-mount-fuse-Fix-parsing-of-vol_id-for-snapshot-volume.patch
Patch0626: 0626-protocol-auth-use-the-proper-validation-method.patch
Patch0627: 0627-protocol-server-fix-the-comparision-logic-in-case-of.patch
Patch0628: 0628-protocol-client-handle-the-subdir-handshake-properly.patch
Patch0629: 0629-fuse-fix-the-read-only-mount-flag-issue.patch
Patch0630: 0630-glusterd-delete-source-brick-only-once-in-reset-bric.patch
Patch0631: 0631-glusterd-persist-brickinfo-s-port-change-into-gluste.patch
Patch0632: 0632-build-remove-pretrans-script-for-python-gluster.patch

%description
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package includes the glusterfs binary, the glusterfsd daemon and the
libglusterfs and glusterfs translator modules common to both GlusterFS server
and client framework.

%package api
Summary:          GlusterFS api library
Group:            System Environment/Daemons
Requires:         %{name}%{?_isa} = %{version}-%{release}
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}

%description api
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the glusterfs libgfapi library.

%package api-devel
Summary:          Development Libraries
Group:            Development/Libraries
Requires:         %{name}%{?_isa} = %{version}-%{release}
Requires:         %{name}-devel%{?_isa} = %{version}-%{release}
Requires:         libacl-devel

%description api-devel
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the api include files.

%package cli
Summary:          GlusterFS CLI
Group:            Applications/File
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}

%description cli
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the GlusterFS CLI application and its man page

%package devel
Summary:          Development Libraries
Group:            Development/Libraries
Requires:         %{name}%{?_isa} = %{version}-%{release}
# Needed for the Glupy examples to work
%if ( 0%{!?_without_extra_xlators:1} )
Requires:         %{name}-extra-xlators = %{version}-%{release}
%endif

%description devel
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the development libraries and include files.

%if ( 0%{!?_without_extra_xlators:1} )
%package extra-xlators
Summary:          Extra Gluster filesystem Translators
Group:            Applications/File
# We need python-gluster rpm for gluster module's __init__.py in Python
# site-packages area
Requires:         python-gluster = %{version}-%{release}
Requires:         python python-ctypes

%description extra-xlators
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides extra filesystem Translators, such as Glupy,
for GlusterFS.
%endif

%package fuse
Summary:          Fuse client
Group:            Applications/File
BuildRequires:    fuse-devel
Requires:         attr
Requires:         psmisc

Requires:         %{name}%{?_isa} = %{version}-%{release}
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}

Obsoletes:        %{name}-client < %{version}-%{release}
Provides:         %{name}-client = %{version}-%{release}

%description fuse
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides support to FUSE based clients and inlcudes the
glusterfs(d) binary.

%if ( 0%{?_build_server} )
%package ganesha
Summary:          NFS-Ganesha configuration
Group:            Applications/File

Requires:         %{name}-server%{?_isa} = %{version}-%{release}
Requires:         nfs-ganesha-gluster >= 2.4.0
Requires:         pcs, dbus
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
Requires:         cman, pacemaker, corosync
%endif
%if ( ( 0%{?fedora} && 0%{?fedora} > 25 ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%if ( 0%{?rhel} )
Requires(post):   policycoreutils-python
Requires(postun): policycoreutils-python
%else
Requires(post):   policycoreutils-python-utils
Requires(postun): policycoreutils-python-utils
%endif
%endif
%if ( ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 5 ) )
# we need portblock resource-agent in 3.9.5 and later.
Requires:         resource-agents >= 3.9.5
Requires:         net-tools
%endif

%description ganesha
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the configuration and related files for using
NFS-Ganesha as the NFS server using GlusterFS
%endif

%if ( 0%{?_build_server} )
%if ( 0%{!?_without_georeplication:1} )
%package geo-replication
Summary:          GlusterFS Geo-replication
Group:            Applications/File
Requires:         %{name}%{?_isa} = %{version}-%{release}
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
Requires:         python python-ctypes
Requires:         rsync

%description geo-replication
GlusterFS is a distributed file-system capable of scaling to several
peta-bytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file system in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in userspace and easily manageable.

This package provides support to geo-replication.
%endif
%endif

%package libs
Summary:          GlusterFS common libraries
Group:            Applications/File

%description libs
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the base GlusterFS libraries

%package -n python-gluster
Summary:          GlusterFS python library
Group:            Development/Tools
%if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 || 0%{?sles_version} ) )
# EL5 does not support noarch sub-packages
BuildArch:        noarch
%endif
Requires:         python

%description -n python-gluster
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package contains the python modules of GlusterFS and own gluster
namespace.


%if ( 0%{!?_without_rdma:1} )
%package rdma
Summary:          GlusterFS rdma support for ib-verbs
Group:            Applications/File
BuildRequires:    libibverbs-devel
BuildRequires:    librdmacm-devel >= 1.0.15
Requires:         %{name}%{?_isa} = %{version}-%{release}

%description rdma
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides support to ib-verbs library.
%endif

%if ( 0%{?_build_server} )
%if ( 0%{!?_without_regression_tests:1} )
%package regression-tests
Summary:          Development Tools
Group:            Development/Tools
Requires:         %{name}%{?_isa} = %{version}-%{release}
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
## thin provisioning support
Requires:         lvm2 >= 2.02.89
Requires:         perl(App::Prove) perl(Test::Harness) gcc util-linux-ng
Requires:         python attr dbench file git libacl-devel net-tools
Requires:         nfs-utils xfsprogs yajl

%description regression-tests
The Gluster Test Framework, is a suite of scripts used for
regression testing of Gluster.
%endif
%endif

%if ( 0%{?_build_server} )
%if ( 0%{!?_without_ocf:1} )
%package resource-agents
Summary:          OCF Resource Agents for GlusterFS
License:          GPLv3+
%if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 || 0%{?sles_version} ) )
# EL5 does not support noarch sub-packages
BuildArch:        noarch
%endif
# this Group handling comes from the Fedora resource-agents package
%if ( 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} )
Group:            System Environment/Base
%else
Group:            Productivity/Clustering/HA
%endif
# for glusterd
Requires:         %{name}-server = %{version}-%{release}
# depending on the distribution, we need pacemaker or resource-agents
Requires:         %{_prefix}/lib/ocf/resource.d

%description resource-agents
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the resource agents which plug glusterd into
Open Cluster Framework (OCF) compliant cluster resource managers,
like Pacemaker.
%endif
%endif

%if ( 0%{?_build_server} )
%package server
Summary:          Clustered file-system server
Group:            System Environment/Daemons
Requires:         %{name}%{?_isa} = %{version}-%{release}
Requires:         %{name}-cli%{?_isa} = %{version}-%{release}
Requires:         %{name}-libs%{?_isa} = %{version}-%{release}
# some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse
Requires:         %{name}-fuse%{?_isa} = %{version}-%{release}
# self-heal daemon, rebalance, nfs-server etc. are actually clients
Requires:         %{name}-api%{?_isa} = %{version}-%{release}
Requires:         %{name}-client-xlators%{?_isa} = %{version}-%{release}
# psmisc for killall, lvm2 for snapshot, and nfs-utils and
# rpcbind/portmap for gnfs server
Requires:         psmisc
Requires:         lvm2
Requires:         nfs-utils
%if ( 0%{?_with_systemd:1} )
%{?systemd_requires}
%else
Requires(post):   /sbin/chkconfig
Requires(preun):  /sbin/service
Requires(preun):  /sbin/chkconfig
Requires(postun): /sbin/service
%endif
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
Requires:         rpcbind
%else
Requires:         portmap
%endif
%if ( 0%{?rhel} && 0%{?rhel} < 6 )
Obsoletes:        %{name}-geo-replication = %{version}-%{release}
%endif
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
Requires:         python-argparse
%endif
Requires:         pyxattr

%description server
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the glusterfs server daemon.
%endif

%package client-xlators
Summary:          GlusterFS client-side translators
Group:            Applications/File

%description client-xlators
GlusterFS is a distributed file-system capable of scaling to several
petabytes. It aggregates various storage bricks over Infiniband RDMA
or TCP/IP interconnect into one large parallel network file
system. GlusterFS is one of the most sophisticated file systems in
terms of features and extensibility.  It borrows a powerful concept
called Translators from GNU Hurd kernel. Much of the code in GlusterFS
is in user space and easily manageable.

This package provides the translators needed on any GlusterFS client.

%if 0%{?_build_server}
%if ( 0%{!?_without_events:1} )
%package events
Summary:          GlusterFS Events
Group:            Applications/File
Requires:         %{name}-server%{?_isa} = %{version}-%{release}
Requires:         python python-requests python-prettytable
Requires:         python-gluster = %{version}-%{release}
%if ( 0%{?rhel} && 0%{?rhel} <= 6 )
Requires:         python-argparse
%endif
%if ( 0%{?_with_systemd:1} )
%{?systemd_requires}
%endif

%description events
GlusterFS Events

%endif
%endif

%prep
%setup -q -n %{name}-%{version}%{?prereltag}

# sanitization scriptlet for patches with file renames
ls %{_topdir}/SOURCES/*.patch | sort | \
while read p
do
    # if the destination file exists, its most probably stale
    # so we must remove it
    rename_to=( $(grep -i 'rename to' $p | cut -f 3 -d ' ') )
    if [ ${#rename_to[*]} -gt 0 ]; then
        for f in ${rename_to[*]}
        do
            if [ -f $f ]; then
                rm -f $f
            elif [ -d $f ]; then
                rm -rf $f
            fi
        done
    fi

    SOURCE_FILES=( $(egrep '^\-\-\- a/' $p | cut -f 2- -d '/') )
    DEST_FILES=( $(egrep '^\+\+\+ b/' $p | cut -f 2- -d '/') )
    EXCLUDE_DOCS=()
    for idx in ${!SOURCE_FILES[@]}; do
        # skip the doc 
        source_file=${SOURCE_FILES[$idx]}
        dest_file=${DEST_FILES[$idx]}
        if [[ "$dest_file" =~ ^doc/.+ ]]; then
            if [ "$source_file" != "dev/null" ] && [ ! -f "$dest_file" ]; then
                # if patch is being applied to a doc file and if the doc file
                # hasn't been added so far then we need to exclude it
                EXCLUDE_DOCS=( ${EXCLUDE_DOCS[*]} "$dest_file" )
            fi
        fi
    done
    EXCLUDE_DOCS_OPT=""
    for doc in ${EXCLUDE_DOCS}; do
        EXCLUDE_DOCS_OPT="--exclude=$doc $EXCLUDE_DOCS_OPT"
    done
    # apply the patch with 'git apply'
    git apply -p1 --exclude=rfc.sh \
                  --exclude=.gitignore \
                  --exclude=MAINTAINERS \
                  --exclude=extras/checkpatch.pl \
                  --exclude=build-aux/checkpatch.pl \
                  --exclude='tests/*' \
                  ${EXCLUDE_DOCS_OPT} \
                  $p
done


%build
%if ( 0%{?rhel} && 0%{?rhel} < 6 )
CFLAGS=-DUSE_INSECURE_OPENSSL
export CFLAGS
%endif
# In RHEL7 few hardening flags are available by default, however the RELRO
# default behaviour is partial, convert to full
%if ( 0%{?rhel} && 0%{?rhel} >= 7 )
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
export LDFLAGS
%else
%if ( 0%{?rhel} && 0%{?rhel} == 6 )
CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE"
LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now"
%else
#It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and
 # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does
CFLAGS="$CFLAGS $RPM_OPT_FLAGS"
LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now"
%endif
export CFLAGS
export LDFLAGS
%endif

./autogen.sh && %configure \
        %{?_with_cmocka} \
        %{?_with_debug} \
        %{?_with_tmpfilesdir} \
        %{?_without_bd} \
        %{?_without_epoll} \
        %{?_without_fusermount} \
        %{?_without_georeplication} \
        %{?_with_firewalld} \
        %{?_without_ocf} \
        %{?_without_rdma} \
        %{?_without_syslog} \
        %{?_without_tiering} \
        %{?_without_events}

# fix hardening and remove rpath in shlibs
%if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
sed -i 's| \\\$compiler_flags |&\\\$LDFLAGS |' libtool
%endif
sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|' libtool
sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|' libtool

make %{?_smp_mflags}

%check
make check

%install
rm -rf %{buildroot}
make install DESTDIR=%{buildroot}
# Install include directory
install -p -m 0644 contrib/uuid/*.h \
    %{buildroot}%{_includedir}/glusterfs/
%if ( 0%{_for_fedora_koji_builds} )
install -D -p -m 0644 %{SOURCE1} \
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
install -D -p -m 0644 %{SOURCE2} \
    %{buildroot}%{_sysconfdir}/sysconfig/glusterfsd
%else
install -D -p -m 0644 extras/glusterd-sysconfig \
    %{buildroot}%{_sysconfdir}/sysconfig/glusterd
%endif

%if ( 0%{_for_fedora_koji_builds} )
%if ( 0%{?rhel} && 0%{?rhel} <= 5 )
install -D -p -m 0755 %{SOURCE6} \
    %{buildroot}%{_sysconfdir}/sysconfig/modules/glusterfs-fuse.modules
%endif
%endif

mkdir -p %{buildroot}%{_localstatedir}/log/glusterd
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfs
mkdir -p %{buildroot}%{_localstatedir}/log/glusterfsd
mkdir -p %{buildroot}%{_localstatedir}/run/gluster

# Remove unwanted files from all the shared libraries
find %{buildroot}%{_libdir} -name '*.a' -delete
find %{buildroot}%{_libdir} -name '*.la' -delete

# Remove installed docs, the ones we want are included by %%doc, in
# /usr/share/doc/glusterfs or /usr/share/doc/glusterfs-x.y.z depending
# on the distribution
%if ( 0%{?fedora} && 0%{?fedora} > 19 ) || ( 0%{?rhel} && 0%{?rhel} > 6 )
rm -rf %{buildroot}%{_pkgdocdir}/*
%else
rm -rf %{buildroot}%{_defaultdocdir}/%{name}
mkdir -p %{buildroot}%{_pkgdocdir}
%endif
head -50 ChangeLog > ChangeLog.head && mv ChangeLog.head ChangeLog
cat << EOM >> ChangeLog

More commit messages for this ChangeLog can be found at
https://forge.gluster.org/glusterfs-core/glusterfs/commits/v%{version}%{?prereltag}
EOM

# Remove benchmarking and other unpackaged files
%if ( 0%{?rhel} && 0%{?rhel} < 6 )
rm -rf %{buildroot}/benchmarking
rm -f %{buildroot}/glusterfs-mode.el
rm -f %{buildroot}/glusterfs.vim
%else
# make install always puts these in %%{_defaultdocdir}/%%{name} so don't
# use %%{_pkgdocdir}; that will be wrong on later Fedora distributions
rm -rf %{buildroot}%{_defaultdocdir}/%{name}/benchmarking
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs-mode.el
rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs.vim
%endif

# Create working directory
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd

# Update configuration file to /var/lib working directory
sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sharedstatedir}/glusterd|g' \
    %{buildroot}%{_sysconfdir}/glusterfs/glusterd.vol

# Install glusterfsd .service or init.d file
%if ( 0%{_for_fedora_koji_builds} )
%_init_install %{glusterfsd_service} glusterfsd
%endif

install -D -p -m 0644 extras/glusterfs-logrotate \
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs

%if ( 0%{!?_without_georeplication:1} )
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication
touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
install -D -p -m 0644 extras/glusterfs-georep-logrotate \
    %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep
%endif

touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info
touch %{buildroot}%{_sharedstatedir}/glusterd/options
subdirs=(add-brick create copy-file delete gsync-create remove-brick reset set start stop)
for dir in ${subdirs[@]}; do
    mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/"$dir"/{pre,post}
done
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/glustershd
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/peers
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/vols
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/nfs/run
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/bitd
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/quotad
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/scrub
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/snaps
mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/ss_brick
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/nfs-server.vol
touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/run/nfs.pid

find ./tests ./run-tests.sh -type f | cpio -pd %{buildroot}%{_prefix}/share/glusterfs

## Install bash completion for cli
install -p -m 0744 -D extras/command-completion/gluster.bash \
    %{buildroot}%{_sysconfdir}/bash_completion.d/gluster

%clean
rm -rf %{buildroot}

##-----------------------------------------------------------------------------
## All %%post should be placed here and keep them sorted
##
%post
/sbin/ldconfig
%if ( 0%{!?_without_syslog:1} )
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
%_init_restart rsyslog
%endif
%endif
exit 0

%post api
/sbin/ldconfig

%if ( 0%{?_build_server} )
%if ( 0%{!?_without_events:1} )
%post events
%_init_restart glustereventsd
%endif
%endif

%if ( 0%{?rhel} == 5 )
%post fuse
modprobe fuse
exit 0
%endif

%if ( 0%{?_build_server} )
%if ( ( 0%{?fedora} && 0%{?fedora} > 25 ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%post ganesha
semanage boolean -m ganesha_use_fusefs --on
exit 0
%endif
%endif

%if ( 0%{?_build_server} )
%if ( 0%{!?_without_georeplication:1} )
%post geo-replication
if [ $1 -ge 1 ]; then
    %_init_restart glusterd
fi
exit 0
%endif
%endif

%post libs
/sbin/ldconfig

%if ( 0%{?_build_server} )
%post server
# Legacy server
%_init_enable glusterd
%if ( 0%{_for_fedora_koji_builds} )
%_init_enable glusterfsd
%endif
# ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 .
# While upgrading glusterfs-server package form GlusterFS version <= 3.6 to
# GlusterFS version 3.7, ".cmd_log_history" should be renamed to
# "cmd_history.log" to retain cli command history contents.
if [ -f %{_localstatedir}/log/glusterfs/.cmd_log_history ]; then
    mv %{_localstatedir}/log/glusterfs/.cmd_log_history \
       %{_localstatedir}/log/glusterfs/cmd_history.log
fi

# Genuine Fedora (and EPEL) builds never put gluster files in /etc; if
# there are any files in /etc from a prior gluster.org install, move them
# to /var/lib. (N.B. Starting with 3.3.0 all gluster files are in /var/lib
# in gluster.org RPMs.) Be careful to copy them on the off chance that
# /etc and /var/lib are on separate file systems
if [ -d /etc/glusterd -a ! -h %{_sharedstatedir}/glusterd ]; then
    mkdir -p %{_sharedstatedir}/glusterd
    cp -a /etc/glusterd %{_sharedstatedir}/glusterd
    rm -rf /etc/glusterd
    ln -sf %{_sharedstatedir}/glusterd /etc/glusterd
fi

# Rename old volfiles in an RPM-standard way.  These aren't actually
# considered package config files, so %%config doesn't work for them.
if [ -d %{_sharedstatedir}/glusterd/vols ]; then
    for file in $(find %{_sharedstatedir}/glusterd/vols -name '*.vol'); do
        newfile=${file}.rpmsave
        echo "warning: ${file} saved as ${newfile}"
        cp ${file} ${newfile}
    done
fi

# add marker translator
# but first make certain that there are no old libs around to bite us
# BZ 834847
if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then
    rm -f /etc/ld.so.conf.d/glusterfs.conf
    /sbin/ldconfig
fi

%if (0%{?_with_firewalld:1})
#reload service files if firewalld running
if $(systemctl is-active firewalld 1>/dev/null 2>&1); then
  #firewalld-filesystem is not available for rhel7, so command used for reload.
  firewall-cmd  --reload 1>/dev/null 2>&1
fi
%endif

%endif

##-----------------------------------------------------------------------------
## All %%pre should be placed here and keep them sorted
##
%pre
getent group gluster > /dev/null || groupadd -r gluster
getent passwd gluster > /dev/null || useradd -r -g gluster -d /var/run/gluster -s /sbin/nologin -c "GlusterFS daemons" gluster
exit 0


##-----------------------------------------------------------------------------
## All %%preun should be placed here and keep them sorted
##
%if ( 0%{?_build_server} )
%if ( 0%{!?_without_events:1} )
%preun events
if [ $1 -eq 0 ]; then
    if [ -f %_init_glustereventsd ]; then
        %_init_stop glustereventsd
        %_init_disable glustereventsd
    fi
fi
exit 0
%endif
%endif

%if ( 0%{?_build_server} )
%preun server
if [ $1 -eq 0 ]; then
    if [ -f %_init_glusterfsd ]; then
        %_init_stop glusterfsd
    fi
    %_init_stop glusterd
    if [ -f %_init_glusterfsd ]; then
        %_init_disable glusterfsd
    fi
    %_init_disable glusterd
fi
if [ $1 -ge 1 ]; then
    if [ -f %_init_glusterfsd ]; then
        %_init_restart glusterfsd
    fi
    %_init_restart glusterd
fi
%endif

##-----------------------------------------------------------------------------
## All %%postun should be placed here and keep them sorted
##
%postun
/sbin/ldconfig
%if ( 0%{!?_without_syslog:1} )
%if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 )
%_init_restart rsyslog
%endif
%endif

%postun api
/sbin/ldconfig

%if ( 0%{?_build_server} )
%if ( ( 0%{?fedora} && 0%{?fedora} > 25 ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) )
%postun ganesha
semanage boolean -m ganesha_use_fusefs --off
exit 0
%endif
%endif

%postun libs
/sbin/ldconfig

%if ( 0%{?_build_server} )
%postun server
/sbin/ldconfig
%if (0%{?_with_firewalld:1})
#reload service files if firewalld running
if $(systemctl is-active firewalld 1>/dev/null 2>&1); then
    firewall-cmd  --reload
fi
%endif
exit 0
%endif

##-----------------------------------------------------------------------------
## All %%files should be placed here and keep them grouped
##
%files
# exclude extra-xlators files
%if ( ! 0%{!?_without_extra_xlators:1} )
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/mac-compat.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_client.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_dht.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_server.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/features/template.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
%exclude %{python_sitelib}/*
%endif
# exclude regression-tests files
%if ( ! 0%{!?_without_regression_tests:1} )
%exclude %{_prefix}/share/glusterfs/run-tests.sh
%exclude %{_prefix}/share/glusterfs/tests/*
%endif
%if ( ! 0%{?_build_server} )
# exclude ganesha files
%exclude %{_sysconfdir}/ganesha/*
%exclude %{_libexecdir}/ganesha/*
%exclude %{_prefix}/lib/ocf/*
# exclude incrementalapi
%exclude %{_libexecdir}/glusterfs/*
%exclude %{_sbindir}/gfind_missing_files
%exclude %{_libexecdir}/glusterfs/glusterfind
%exclude %{_bindir}/glusterfind
%exclude %{_libexecdir}/glusterfs/peer_add_secret_pub
# exclude eventsapi files
%exclude %{_sysconfdir}/glusterfs/eventsconfig.json
%exclude %{_sharedstatedir}/glusterd/events
%exclude %{_libexecdir}/glusterfs/events
%exclude %{_libexecdir}/glusterfs/peer_eventsapi.py*
%exclude %{_sbindir}/glustereventsd
%exclude %{_sbindir}/gluster-eventsapi
%exclude %{_datadir}/glusterfs/scripts/eventsdash.py*
%if ( 0%{?_with_systemd:1} )
%exclude %{_unitdir}/glustereventsd.service
%exclude %_init_glusterfssharedstorage
%else
%exclude %{_sysconfdir}/init.d/glustereventsd
%endif
# exclude server files
%exclude %{_sharedstatedir}/glusterd/*
%exclude %{_sysconfdir}/glusterfs
%exclude %{_sysconfdir}/glusterfs/glusterd.vol
%exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate
%exclude %{_sysconfdir}/glusterfs/glusterfs-logrotate
%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-5.8.conf
%exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-7.2.conf
%exclude %{_sysconfdir}/glusterfs/group-virt.example
%exclude %{_sysconfdir}/glusterfs/group-metadata-cache
%exclude %{_sysconfdir}/glusterfs/group-nl-cache
%exclude %{_sysconfdir}/glusterfs/group-gluster-block
%exclude %{_sysconfdir}/glusterfs/logger.conf.example
%exclude %_init_glusterd
%exclude %{_sysconfdir}/sysconfig/glusterd
%exclude %{_bindir}/glusterfind
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
%if ( 0%{!?_without_tiering:1} )
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
%endif
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt*
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server*
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage*
%if ( 0%{!?_without_tiering:1} )
%exclude %{_libdir}/libgfdb.so.*
%endif
%exclude %{_sbindir}/gcron.py
%exclude %{_sbindir}/glfsheal
%exclude %{_sbindir}/glusterd
%exclude %{_sbindir}/gf_attach
%exclude %{_sbindir}/snap_scheduler.py
%exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
%if ( 0%{?_with_systemd:1} )
%exclude %{_libexecdir}/glusterfs/mount-shared-storage.sh
%endif
%exclude %{_sbindir}/conf.py*
%if 0%{?_tmpfilesdir:1}
%exclude %{_tmpfilesdir}/gluster.conf
%endif
%if ( 0%{?_with_firewalld:1} )
%exclude /usr/lib/firewalld/services/glusterfs.xml
%endif
%endif
%doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS
%{_mandir}/man8/*gluster*.8*
%exclude %{_mandir}/man8/gluster.8*
%dir %{_localstatedir}/log/glusterfs
%if ( 0%{!?_without_rdma:1} )
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
%endif
%dir %{_datadir}/glusterfs/scripts
%{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh
%{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh
# xlators that are needed on the client- and on the server-side
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/auth
%{_libdir}/glusterfs/%{version}%{?prereltag}/auth/addr.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/auth/login.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport
%{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/socket.so
%dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/error-gen.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/io-stats.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/trace.so
%if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 ) )
# RHEL-5 based distributions have a too old openssl
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/crypt.so
%endif
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/access-control.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/barrier.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cdc.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changelog.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/gfid-access.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/read-only.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/md-cache.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/open-behind.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/quick-read.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/read-ahead.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/readdir-ahead.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/nl-cache.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so
%dir %attr(0775,gluster,gluster) %{_localstatedir}/run/gluster
%if 0%{?_tmpfilesdir:1}
%{_tmpfilesdir}/gluster.conf
%endif

%files api
%exclude %{_libdir}/*.so
# libgfapi files
%{_libdir}/libgfapi.*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so

%files api-devel
%{_libdir}/pkgconfig/glusterfs-api.pc
%{_libdir}/libgfapi.so
%{_includedir}/glusterfs/api/*

%files cli
%{_sbindir}/gluster
%{_mandir}/man8/gluster.8*
%{_sysconfdir}/bash_completion.d/gluster

%files devel
%dir %{_includedir}/glusterfs
%{_includedir}/glusterfs/*
%exclude %{_includedir}/glusterfs/api
%exclude %{_libdir}/libgfapi.so
%if ( ! 0%{?_build_server} )
%exclude %{_libdir}/libgfchangelog.so
%endif
%if ( 0%{!?_without_tiering:1} && ! 0%{?_build_server})
%exclude %{_libdir}/libgfdb.so
%endif
%{_libdir}/*.so
# Glupy Translator examples
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/debug-trace.*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/helloworld.*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/negative.*
%if ( 0%{?_build_server} )
%{_libdir}/pkgconfig/libgfchangelog.pc
%else
%exclude %{_libdir}/pkgconfig/libgfchangelog.pc
%endif
%if ( 0%{!?_without_tiering:1} && ! 0%{?_build_server})
%exclude %{_libdir}/libgfdb.so
%endif
%if ( 0%{!?_without_tiering:1} && 0%{?_build_server})
%{_libdir}/pkgconfig/libgfdb.pc
%else
%if ( 0%{?rhel} && 0%{?rhel} >= 6 )
%exclude %{_libdir}/pkgconfig/libgfdb.pc
%endif
%endif

%files client-xlators
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so
%exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so

%if ( 0%{!?_without_extra_xlators:1} )
%files extra-xlators
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/mac-compat.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_client.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_dht.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_server.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/features/template.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so
# Glupy Python files
%{python_sitelib}/gluster/glupy/*
# Don't expect a .egg-info file on EL5
%if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 ) )
%{python_sitelib}/glusterfs_glupy*.egg-info
%endif
%endif

%files fuse
# glusterfs is a symlink to glusterfsd, -server depends on -fuse.
%{_sbindir}/glusterfs
%{_sbindir}/glusterfsd
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so
/sbin/mount.glusterfs
%if ( 0%{!?_without_fusermount:1} )
%{_bindir}/fusermount-glusterfs
%endif
%if ( 0%{_for_fedora_koji_builds} )
%if ( 0%{?rhel} && 0%{?rhel} <= 5 )
%{_sysconfdir}/sysconfig/modules/glusterfs-fuse.modules
%endif
%endif

%if ( 0%{?_build_server} )
%files ganesha
%{_sysconfdir}/ganesha/*
%{_libexecdir}/ganesha/*
%{_prefix}/lib/ocf/resource.d/heartbeat/*
%{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh
%endif

%if ( 0%{?_build_server} )
%if ( 0%{!?_without_georeplication:1} )
%files geo-replication
%config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep

%{_sbindir}/gfind_missing_files
%{_sbindir}/gluster-mountbroker
%{_libexecdir}/glusterfs/gsyncd
%{_libexecdir}/glusterfs/python/syncdaemon/*
%{_libexecdir}/glusterfs/gverify.sh
%{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh
%{_libexecdir}/glusterfs/peer_gsec_create
%{_libexecdir}/glusterfs/peer_mountbroker
%{_libexecdir}/glusterfs/peer_mountbroker.py*
%{_libexecdir}/glusterfs/gfind_missing_files

       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication
%ghost      %attr(0644,-,-) %{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/gsync-create/pre

%{_datadir}/glusterfs/scripts/get-gfid.sh
%{_datadir}/glusterfs/scripts/slave-upgrade.sh
%{_datadir}/glusterfs/scripts/gsync-upgrade.sh
%{_datadir}/glusterfs/scripts/generate-gfid-file.sh
%{_datadir}/glusterfs/scripts/gsync-sync-gfid
%{_datadir}/glusterfs/scripts/schedule_georep.py*
%endif
%endif

%files libs
%{_libdir}/*.so.*
%exclude %{_libdir}/libgfapi.*
%if ( 0%{!?_without_tiering:1} )
# libgfdb is only needed server-side
%exclude %{_libdir}/libgfdb.*
%endif

%files -n python-gluster
# introducing glusterfs module in site packages.
# so that all other gluster submodules can reside in the same namespace.
%{python_sitelib}/gluster/__init__.*
%{python_sitelib}/gluster/cliutils

%if ( 0%{!?_without_rdma:1} )
%files rdma
%{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma*
%endif

%if ( 0%{?_build_server} )
%if ( 0%{!?_without_regression_tests:1} )
%files regression-tests
%{_prefix}/share/glusterfs/run-tests.sh
%{_prefix}/share/glusterfs/tests
%exclude %{_prefix}/share/glusterfs/tests/basic/rpm.t
%exclude %{_prefix}/share/glusterfs/tests/vagrant
%endif
%endif

%if ( 0%{?_build_server} )
%if ( 0%{!?_without_ocf:1} )
%files resource-agents
# /usr/lib is the standard for OCF, also on x86_64
%{_prefix}/lib/ocf/resource.d/glusterfs
%endif
%endif

%if ( 0%{?_build_server} )
%files server
%exclude %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh
%doc extras/clear_xattrs.sh
# sysconf
%config(noreplace) %{_sysconfdir}/glusterfs
%exclude %{_sysconfdir}/glusterfs/eventsconfig.json
%config(noreplace) %{_sysconfdir}/sysconfig/glusterd
%if ( 0%{_for_fedora_koji_builds} )
%config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd
%endif

# init files
%_init_glusterd
%if ( 0%{_for_fedora_koji_builds} )
%_init_glusterfsd
%endif
%if ( 0%{?_with_systemd:1} )
%_init_glusterfssharedstorage
%endif

# binaries
%{_sbindir}/glusterd
%{_sbindir}/glfsheal
%{_sbindir}/gf_attach
# {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a
# symlink. The binary itself (and symlink) are part of the glusterfs-fuse
# package, because glusterfs-server depends on that anyway.
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so
%if ( 0%{!?_without_tiering:1} )
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so
%endif
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/leases.so
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage*
%{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/decompounder.so
%if ( 0%{!?_without_tiering:1} )
%{_libdir}/libgfdb.so.*
%endif

# snap_scheduler
%{_sbindir}/snap_scheduler.py
%{_sbindir}/gcron.py
%{_sbindir}/conf.py

# /var/lib/glusterd, e.g. hookscripts, etc.
%ghost      %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/bitd
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/groups
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/virt
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/metadata-cache
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/nl-cache
            %attr(0644,-,-) %{_sharedstatedir}/glusterd/groups/gluster-block
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glusterfind/.keys
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/glustershd
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/disabled-quota-root-xattr-heal.sh
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/create/pre
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/copy-file/pre
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/post
                            %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/delete/pre
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/remove-brick/pre
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/post
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/reset/pre
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/pre
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/pre
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/post
       %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh
            %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/nfs/run
%ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid
%config(noreplace) %ghost      %attr(0600,-,-) %{_sharedstatedir}/glusterd/options
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/peers
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/quotad
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/scrub
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/snaps
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/ss_brick
%ghost %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/vols

# Extra utility script
%{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh
%if ( 0%{?_with_systemd:1} )
%{_libexecdir}/glusterfs/mount-shared-storage.sh
%endif

# Incrementalapi
%{_libexecdir}/glusterfs/glusterfind
%{_bindir}/glusterfind
%{_libexecdir}/glusterfs/peer_add_secret_pub

%if ( 0%{?_with_firewalld:1} )
%{_prefix}/lib/firewalld/services/glusterfs.xml
%endif
%endif

# Events
%if 0%{?_build_server}
%if ( 0%{!?_without_events:1} )
%files events
%config(noreplace) %{_sysconfdir}/glusterfs/eventsconfig.json
%dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/events
%{_libexecdir}/glusterfs/events
%{_libexecdir}/glusterfs/peer_eventsapi.py*
%{_sbindir}/glustereventsd
%{_sbindir}/gluster-eventsapi
%{_datadir}/glusterfs/scripts/eventsdash.py*
%if ( 0%{?_with_systemd:1} )
%{_unitdir}/glustereventsd.service
%else
%{_sysconfdir}/init.d/glustereventsd
%endif
%endif
%endif

##-----------------------------------------------------------------------------
## All %pretrans should be placed here and keep them sorted
##
%if 0%{?_build_server}
%pretrans -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped."
          echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!"
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime."
   echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding."
   echo "WARNING: Refer upgrade section of install guide for more details"
   echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;"
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end



%pretrans api -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end



%pretrans api-devel -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end



%pretrans cli -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end



%pretrans client-xlators -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end



%pretrans devel -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end



%pretrans fuse -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end



%pretrans ganesha -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end



%if ( 0%{!?_without_georeplication:1} )
%pretrans geo-replication -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end
%endif



%pretrans libs -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end



%if ( 0%{!?_without_rdma:1} )
%pretrans rdma -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end
%endif



%if ( 0%{!?_without_ocf:1} )
%pretrans resource-agents -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end
%endif



%pretrans server -p <lua>
if not posix.access("/bin/bash", "x") then
    -- initial installation, no shell, no running glusterfsd
    return 0
end

-- TODO: move this completely to a lua script
-- For now, we write a temporary bash script and execute that.

script = [[#!/bin/sh
pidof -c -o %PPID -x glusterfsd &>/dev/null

if [ $? -eq 0 ]; then
   pushd . > /dev/null 2>&1
   for volume in /var/lib/glusterd/vols/*; do cd $volume;
       vol_type=`grep '^type=' info | awk -F'=' '{print $2}'`
       volume_started=`grep '^status=' info | awk -F'=' '{print $2}'`
       if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then
          exit 1;
       fi
   done

   popd > /dev/null 2>&1
   exit 1;
fi
]]

-- Since we run pretrans scripts only for RPMs built for a server build,
-- we can now use os.tmpname() since it is available on RHEL6 and later
-- platforms which are server platforms.
tmpname = os.tmpname()
tmpfile = io.open(tmpname, "w")
tmpfile:write(script)
tmpfile:close()
ok, how, val = os.execute("/bin/bash " .. tmpname)
os.remove(tmpname)
if not (ok == 0) then
   error("Detected running glusterfs processes", ok)
end

%posttrans server
pidof -c -o %PPID -x glusterd &> /dev/null
if [ $? -eq 0 ]; then
    kill -9 `pgrep -f gsyncd.py` &> /dev/null

    killall --wait -SIGTERM glusterd &> /dev/null

    if [ "$?" != "0" ]; then
        echo "killall failed while killing glusterd"
    fi

    glusterd --xlator-option *.upgrade=on -N

    #Cleaning leftover glusterd socket file which is created by glusterd in
    #rpm_script_t context.
    rm -rf /var/run/glusterd.socket

    # glusterd _was_ running, we killed it, it exited after *.upgrade=on,
    # so start it again
    %_init_start glusterd
else
    glusterd --xlator-option *.upgrade=on -N

    #Cleaning leftover glusterd socket file which is created by glusterd in
    #rpm_script_t context.
    rm -rf /var/run/glusterd.socket
fi

%endif

%changelog
* Tue Apr 10 2018 CentOS Sources <bugs@centos.org> - 3.8.4-53.el7.centos
- remove vendor and/or packager lines

* Fri Dec 01 2017 Yaakov Selkowitz <yselkowi@redhat.com> - 3.8.4-53
- Rebuilt for multi-arch enablement bz#1493586

* Wed Nov 08 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-52
- fixes bugs bz#1257520

* Tue Oct 31 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-51
- fixes bugs bz#1491756 bz#1503413 bz#1505433 bz#1505473 bz#1507172

* Mon Oct 16 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-50
- fixes bugs bz#1500720 bz#1500816

* Wed Oct 11 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-49
- fixes bugs bz#1499251

* Thu Oct 05 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-48
- fixes bugs bz#1498862

* Thu Oct 05 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-47
- fixes bugs bz#1017362

* Wed Sep 27 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-46
- fixes bugs bz#1411338 bz#1468969 bz#1492077

* Wed Sep 06 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-45
- fixes bugs bz#1475326 bz#1475331 bz#1475333 bz#1475334 bz#1475721 
  bz#1482994 bz#1486115 bz#1488018 bz#1488020 bz#1488860

* Thu Aug 24 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-42
- fixes bugs bz#1483956

* Wed Aug 16 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-41
- fixes bugs bz#1478716 bz#1481392

* Fri Aug 11 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-40
- fixes bugs bz#1480423

* Wed Aug 09 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-39
- fixes bugs bz#1457713 bz#1474178 bz#1475136 bz#1476871 bz#1477024 bz#1479710

* Fri Aug 04 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-38
- fixes bugs bz#1466144 bz#1476867 bz#1477668 bz#1478136

* Thu Aug 03 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-37
- fixes bugs bz#1472129 bz#1472273 bz#1472764 bz#1472773 bz#1474380 
  bz#1474812 bz#1476556

* Thu Jul 27 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-36
- fixes bugs bz#1454602 bz#1460936 bz#1471918 bz#1473229 bz#1473259 
  bz#1473327 bz#1474284

* Thu Jul 20 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-35
- fixes bugs bz#1444790 bz#1472289 bz#1472604

* Mon Jul 17 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-34
- fixes bugs bz#1335090 bz#1460936 bz#1463907 bz#1465289 bz#1468484 
  bz#1468514 bz#1469041 bz#1469971

* Mon Jul 10 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-33
- fixes bugs bz#1191480 bz#1411344 bz#1451224 bz#1452513 bz#1457936 
  bz#1464336 bz#1466321 bz#1467621 bz#1467807 bz#1468186 bz#1468950

* Fri Jun 30 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-32
- fixes bugs bz#1462693 bz#1463108 bz#1463221 bz#1464453 bz#1465011 
  bz#1465638 bz#1466608

* Fri Jun 23 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-31
- fixes bugs bz#1461098

* Fri Jun 23 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-30
- fixes bugs bz#1452083 bz#1454602 bz#1457731

* Wed Jun 21 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-29
- fixes bugs bz#1335090 bz#1441055 bz#1444515 bz#1453145 bz#1454596 
  bz#1454689 bz#1455022 bz#1457179 bz#1457183 bz#1461649 bz#1462066 bz#1462687 
  bz#1462753 bz#1462773 bz#1463104

* Wed Jun 14 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-28
- fixes bugs bz#1427159 bz#1437960 bz#1441055 bz#1448386 bz#1450080 
  bz#1450722 bz#1450830 bz#1451602 bz#1451756 bz#1452205 bz#1453145 bz#1454602 
  bz#1458569 bz#1458585 bz#1459400 bz#1459756 bz#1459900 bz#1459972 bz#1460098

* Mon Jun 05 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-27
- fixes bugs bz#1420796 bz#1424680 bz#1428936 bz#1435357 bz#1448833 
  bz#1451280 bz#1451598 bz#1454313 bz#1454689 bz#1456402

* Sat May 27 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-26
- fixes bugs bz#1315781 bz#1381142 bz#1415178 bz#1426952 bz#1427958 
  bz#1435587 bz#1438245 bz#1441280 bz#1441783 bz#1443843 bz#1443941 bz#1443980 
  bz#1447559 bz#1447959 bz#1449226 bz#1449593 bz#1449684 bz#1450330 bz#1450336 
  bz#1450341 bz#1450806 bz#1450889 bz#1450904 bz#1451086 bz#1452205 bz#1452528 
  bz#1453049 bz#1454558

* Wed May 10 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-25
- fixes bugs bz#1165648 bz#1297743 bz#1400816 bz#1411352 bz#1414758 
  bz#1434653 bz#1442535 bz#1443972 bz#1443990 bz#1445195 bz#1445570
  bz#1446165 bz#1446645 bz#1447929

* Wed Apr 26 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-24
- fixes bugs bz#1327045 bz#1360317 bz#1380598 bz#1395989 bz#1416024 
  bz#1419816 bz#1426326 bz#1426523 bz#1427452 bz#1441992 bz#1443123
  bz#1443950

* Thu Apr 20 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-23
- fixes bugs bz#1298258 bz#1372283 bz#1394118 bz#1415038 bz#1427096 
  bz#1427099 bz#1427870 bz#1435592 bz#1435656 bz#1438245 bz#1438972
  bz#1441942 bz#1441946 bz#1442026 bz#1442943

* Mon Apr 10 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-22
- fixes bugs bz#1427096 bz#1438706 bz#1439708

* Wed Apr 05 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-21
- fixes bugs bz#1315583 bz#1437332 bz#1437773 bz#1437957 bz#1438051 
  bz#1438052 bz#1438378

* Fri Mar 31 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-20
- fixes bugs bz#1437782

* Thu Mar 30 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-19
- first build for RHGS 3.3.0 with following cherry picks
- fixes bugs bz#843838 bz#1167252 bz#1247056 bz#1260779 bz#1309209 bz#1323928 
  bz#1326183 bz#1351185 bz#1359613 bz#1370027 bz#1378085 bz#1380598 bz#1381158 
  bz#1381825 bz#1383979 bz#1387328 bz#1396010 bz#1400816 bz#1409474 bz#1412930 
  bz#1414750 bz#1417815 bz#1418227 bz#1425684 bz#1425690 bz#1425695 bz#1425697 
  bz#1426034 bz#1426950 bz#1427096 bz#1433276 bz#1433751

* Wed Mar 08 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-18
- fixes bugs bz#1408655

* Sat Mar 04 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-17
- fixes bugs bz#1408655 bz#1425740

* Thu Mar 02 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-16
- fixes bugs bz#1412554 bz#1424944 bz#1425748 bz#1426324 bz#1426559

* Mon Feb 20 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-15
- fixes bugs bz#1205162 bz#1315544 bz#1403180 bz#1409135 bz#1415101 
  bz#1417177 bz#1418901 bz#1420635 bz#1422431

* Mon Feb 06 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-14
- fixes bugs bz#1408576 bz#1408705 bz#1409563 bz#1411270 bz#1413513 
  bz#1414247 bz#1414663 bz#1415583 bz#1417955 bz#1418011

* Tue Jan 24 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-13
- fixes bugs bz#1406322 bz#1408639 bz#1411329 bz#1412883 bz#1412955

* Mon Jan 16 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-12
- fixes bugs bz#1410025 bz#1410406 bz#1411617

* Fri Jan 06 2017 Milind Changire <mchangir@redhat.com> - 3.8.4-11
- fixes bugs bz#1393316 bz#1404989 bz#1405000 bz#1408112 bz#1408413 
  bz#1408426 bz#1408836 bz#1409472 bz#1409782 bz#1409808

* Thu Dec 22 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-10
- fixes bugs bz#1393316 bz#1398311 bz#1398798 bz#1403672 bz#1404110 
  bz#1404633 bz#1404982 bz#1405000 bz#1406025 bz#1406401

* Fri Dec 16 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-9
- fixes bugs bz#1400057 bz#1400068 bz#1400093 bz#1400599 bz#1402774 
  bz#1403120 bz#1403587 bz#1403770 bz#1403840 bz#1404996 bz#1405299

* Fri Dec 09 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-8
- fixes bugs bz#1393694 bz#1397450 bz#1398188 bz#1399100 bz#1400365 
  bz#1401380 bz#1401806 bz#1401817

* Mon Dec 05 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-7
- fixes bugs bz#1361759 bz#1376464 bz#1377062 bz#1378131 bz#1381452 
  bz#1384070 bz#1385605 bz#1388755 bz#1395539 bz#1395603 bz#1395613 bz#1396449 
  bz#1397257 bz#1398257 bz#1398261 bz#1398315 bz#1399105 bz#1399598 bz#1399757 
  bz#1400395

* Tue Nov 29 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-6
- fixes bugs bz#1337565 bz#1376694 bz#1376695 bz#1380122 bz#1380419 
  bz#1380655 bz#1382686 bz#1383898 bz#1384316 bz#1384993 bz#1385474 bz#1385589 
  bz#1385606 bz#1386127 bz#1386635 bz#1387204 bz#1387501 bz#1387558 bz#1389422 
  bz#1389661 bz#1390843 bz#1391808 bz#1392299 bz#1392837 bz#1392895 bz#1393526 
  bz#1393694 bz#1393709 bz#1393758 bz#1394219 bz#1394752 bz#1395574 bz#1396166 
  bz#1396361 bz#1397257 bz#1397286 bz#1397430 bz#1398331

* Fri Nov 11 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-5
- fixes bugs bz#1278336 bz#1392899

* Thu Nov 10 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-4
- fixes bugs bz#1367472 bz#1370350 bz#1379790 bz#1380742 bz#1384311 
  bz#1385525 bz#1385605 bz#1386185 bz#1386366 bz#1386472 bz#1387152 bz#1387544 
  bz#1388464 bz#1388734 bz#1389168 bz#1391093

* Mon Oct 24 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-3
- fixes bugs bz#1284873 bz#1333885 bz#1336377 bz#1339765 bz#1347257 
  bz#1361513 bz#1361519 bz#1362044 bz#1374568 bz#1375465 bz#1378677 bz#1379919 
  bz#1379924 bz#1379966 bz#1380257 bz#1380276 bz#1380605 bz#1380619 bz#1380638 
  bz#1380710 bz#1381822 bz#1382277 bz#1386172 bz#1386538

* Thu Sep 29 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-2
- fixes bugs bz#1294035 bz#1377062 bz#1377387 bz#1378030 bz#1378300 
  bz#1378484 bz#1378528 bz#1378867

* Tue Sep 20 2016 Milind Changire <mchangir@redhat.com> - 3.8.4-1
- rebase to upstream glusterfs at v3.8.4
- fixes bugs bz#1256524 bz#1257182 bz#1294035 bz#1294754 bz#1306120 
  bz#1340756 bz#1344675 bz#1348949 bz#1348954 bz#1348962 bz#1351589 bz#1353427 
  bz#1356058 bz#1359180 bz#1359588 bz#1359605 bz#1359607 bz#1360807 bz#1360978 
  bz#1361066 bz#1361068 bz#1361078 bz#1361082 bz#1361084 bz#1361086 bz#1361118 
  bz#1361170 bz#1361184 bz#1363729 bz#1367382 bz#1369384 bz#1373976