%global _hardened_build 1 %global _for_fedora_koji_builds 0 # uncomment and add '%' to use the prereltag for pre-releases # %%global prereltag qa3 ##----------------------------------------------------------------------------- ## All argument definitions should be placed here and keep them sorted ## # if you wish to compile an rpm with cmocka unit testing... # rpmbuild -ta glusterfs-3.7.1.tar.gz --with cmocka %{?_with_cmocka:%global _with_cmocka --enable-cmocka} # if you wish to compile an rpm without rdma support, compile like this... # rpmbuild -ta glusterfs-3.7.1.tar.gz --without rdma %{?_without_rdma:%global _without_rdma --disable-ibverbs} # No RDMA Support on s390(x) %ifarch s390 s390x %global _without_rdma --disable-ibverbs %endif # if you wish to compile an rpm without epoll... # rpmbuild -ta glusterfs-3.7.1.tar.gz --without epoll %{?_without_epoll:%global _without_epoll --disable-epoll} # if you wish to compile an rpm without fusermount... # rpmbuild -ta glusterfs-3.7.1.tar.gz --without fusermount %{?_without_fusermount:%global _without_fusermount --disable-fusermount} # if you wish to compile an rpm without geo-replication support, compile like this... # rpmbuild -ta glusterfs-3.7.1.tar.gz --without georeplication %{?_without_georeplication:%global _without_georeplication --disable-georeplication} # Disable geo-replication on EL5, as its default Python is too old %if ( 0%{?rhel} && 0%{?rhel} < 6 ) %global _without_georeplication --disable-georeplication %endif # if you wish to compile an rpm without the OCF resource agents... # rpmbuild -ta glusterfs-3.7.1.tar.gz --without ocf %{?_without_ocf:%global _without_ocf --without-ocf} # if you wish to build rpms without syslog logging, compile like this # rpmbuild -ta glusterfs-3.7.1.tar.gz --without syslog %{?_without_syslog:%global _without_syslog --disable-syslog} # disable syslog forcefully as rhel <= 6 doesn't have rsyslog or rsyslog-mmcount # Fedora deprecated syslog, see # https://fedoraproject.org/wiki/Changes/NoDefaultSyslog # (And what about RHEL7?) %if ( 0%{?fedora} && 0%{?fedora} >= 20 ) || ( 0%{?rhel} && 0%{?rhel} <= 6 ) %global _without_syslog --disable-syslog %endif # if you wish to compile an rpm without the BD map support... # rpmbuild -ta glusterfs-3.7.1.tar.gz --without bd %{?_without_bd:%global _without_bd --disable-bd-xlator} %if ( 0%{?rhel} && 0%{?rhel} < 6 || 0%{?sles_version} ) %define _without_bd --disable-bd-xlator %endif # if you wish to compile an rpm without the qemu-block support... # rpmbuild -ta glusterfs-3.7.1.tar.gz --without qemu-block %{?_without_qemu_block:%global _without_qemu_block --disable-qemu-block} %if ( 0%{?rhel} && 0%{?rhel} < 6 ) # xlators/features/qemu-block fails to build on RHEL5, disable it %define _without_qemu_block --disable-qemu-block %endif # if you wish not to build server rpms, compile like this. # rpmbuild -ta glusterfs-3.7.1.tar.gz --without server %global _build_server 1 %if "%{?_without_server}" %global _build_server 0 %endif %if ( "%{?dist}" == ".el6rhs" ) || ( "%{?dist}" == ".el7rhs" ) || ( "%{?dist}" == ".el7rhgs" ) %global _build_server 1 %else %global _build_server 0 %global _without_georeplication --disable-georeplication %endif %global _without_extra_xlators 1 %global _without_regression_tests 1 # Disable data-tiering on EL5, sqlite is too old %if ( 0%{?rhel} && 0%{?rhel} < 6 ) %global _without_tiering --disable-tiering %endif ##----------------------------------------------------------------------------- ## All %global definitions should be placed here and keep them sorted ## %if ( 0%{?fedora} && 0%{?fedora} > 16 ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) %global _with_systemd true %endif %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 7 ) %global _with_firewalld --enable-firewalld %endif %if 0%{?_tmpfilesdir:1} %define _with_tmpfilesdir --with-tmpfilesdir=%{_tmpfilesdir} %else %define _with_tmpfilesdir --without-tmpfilesdir %endif # there is no systemtap support! Perhaps some day there will be %global _without_systemtap --enable-systemtap=no # From https://fedoraproject.org/wiki/Packaging:Python#Macros %if ( 0%{?rhel} && 0%{?rhel} <= 5 ) %{!?python_sitelib: %global python_sitelib %(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")} %{!?python_sitearch: %global python_sitearch %(python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")} %endif %if ( 0%{?_with_systemd:1} ) %define _init_enable() /bin/systemctl enable %1.service ; %define _init_disable() /bin/systemctl disable %1.service ; %define _init_restart() /bin/systemctl try-restart %1.service ; %define _init_start() /bin/systemctl start %1.service ; %define _init_stop() /bin/systemctl stop %1.service ; %define _init_install() install -D -p -m 0644 %1 %{buildroot}%{_unitdir}/%2.service ; # can't seem to make a generic macro that works %define _init_glusterd %{_unitdir}/glusterd.service %define _init_glusterfsd %{_unitdir}/glusterfsd.service %else %define _init_enable() /sbin/chkconfig --add %1 ; %define _init_disable() /sbin/chkconfig --del %1 ; %define _init_restart() /sbin/service %1 condrestart &>/dev/null ; %define _init_start() /sbin/service %1 start &>/dev/null ; %define _init_stop() /sbin/service %1 stop &>/dev/null ; %define _init_install() install -D -p -m 0755 %1 %{buildroot}%{_sysconfdir}/init.d/%2 ; # can't seem to make a generic macro that works %define _init_glusterd %{_sysconfdir}/init.d/glusterd %define _init_glusterfsd %{_sysconfdir}/init.d/glusterfsd %endif %if ( 0%{_for_fedora_koji_builds} ) %if ( 0%{?_with_systemd:1} ) %global glusterfsd_service glusterfsd.service %else %global glusterfsd_service glusterfsd.init %endif %endif %{!?_pkgdocdir: %global _pkgdocdir %{_docdir}/%{name}-%{version}} %if ( 0%{?rhel} && 0%{?rhel} < 6 ) # _sharedstatedir is not provided by RHEL5 %define _sharedstatedir /var/lib %endif # We do not want to generate useless provides and requires for xlator # .so files to be set for glusterfs packages. # Filter all generated: # # TODO: RHEL5 does not have a convenient solution %if ( 0%{?rhel} == 6 ) # filter_setup exists in RHEL6 only %filter_provides_in %{_libdir}/glusterfs/%{version}/ %global __filter_from_req %{?__filter_from_req} | grep -v -P '^(?!lib).*\.so.*$' %filter_setup %else # modern rpm and current Fedora do not generate requires when the # provides are filtered %global __provides_exclude_from ^%{_libdir}/glusterfs/%{version}/.*$ %endif ##----------------------------------------------------------------------------- ## All package definitions should be placed here and keep them sorted ## Summary: Distributed File System %if ( 0%{_for_fedora_koji_builds} ) Name: glusterfs Version: 3.5.0 Release: 0.1%{?prereltag:.%{prereltag}}%{?dist} #Vendor removed %else Name: glusterfs Version: 3.7.1 Release: 16%{?dist} ExclusiveArch: x86_64 aarch64 %endif License: GPLv2 or LGPLv3+ Group: System Environment/Base URL: http://www.gluster.org/docs/index.php/GlusterFS %if ( 0%{_for_fedora_koji_builds} ) Source0: http://bits.gluster.org/pub/gluster/glusterfs/src/glusterfs-%{version}%{?prereltag}.tar.gz Source1: glusterd.sysconfig Source2: glusterfsd.sysconfig Source6: rhel5-load-fuse-modules Source7: glusterfsd.service Source8: glusterfsd.init %else Source0: glusterfs-3.7.1.tar.gz %endif BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX) %if ( 0%{?rhel} && 0%{?rhel} <= 5 ) BuildRequires: python-simplejson %endif %if ( 0%{?_with_systemd:1} ) BuildRequires: systemd-units %endif Requires: %{name}-libs = %{version}-%{release} BuildRequires: bison flex BuildRequires: gcc make automake libtool BuildRequires: ncurses-devel readline-devel BuildRequires: libxml2-devel openssl-devel BuildRequires: libaio-devel libacl-devel BuildRequires: python-devel BuildRequires: python-ctypes BuildRequires: userspace-rcu-devel >= 0.7 %if ( 0%{?rhel} && 0%{?rhel} <= 5 ) BuildRequires: e2fsprogs-devel %else BuildRequires: libuuid-devel %endif %if ( 0%{?_with_cmocka:1} ) BuildRequires: libcmocka-devel >= 1.0.1 %endif %if ( 0%{!?_without_tiering:1} ) BuildRequires: sqlite-devel %endif %if ( 0%{!?_without_systemtap:1} ) BuildRequires: systemtap-sdt-devel %endif %if ( 0%{!?_without_bd:1} ) BuildRequires: lvm2-devel %endif %if ( 0%{!?_without_qemu_block:1} ) BuildRequires: glib2-devel %endif %if ( 0%{!?_without_georeplication:1} ) BuildRequires: libattr-devel %endif %if (0%{?_with_firewalld:1}) BuildRequires: firewalld %endif Obsoletes: hekafs Obsoletes: %{name}-common < %{version}-%{release} Obsoletes: %{name}-core < %{version}-%{release} Obsoletes: %{name}-ufo Provides: %{name}-common = %{version}-%{release} Provides: %{name}-core = %{version}-%{release} # Patch0001: 0001-Update-rfc.sh-to-rhgs-3.1.patch Patch0002: 0002-features-quota-Do-unwind-if-postbuf-is-NULL.patch Patch0003: 0003-features-quota-Make-quota-deem-statfs-option-on-by-d.patch Patch0004: 0004-features-quota-prevent-statfs-frame-loss-when-an-err.patch Patch0005: 0005-quota-retry-connecting-to-quotad-on-ENOTCONN-error.patch Patch0006: 0006-snapshot-scheduler-Return-proper-error-code-in-case-.patch Patch0007: 0007-glusterd-do-not-show-pid-of-brick-in-volume-status-i.patch Patch0008: 0008-glusterd-Buffer-overflow-causing-crash-for-glusterd.patch Patch0009: 0009-glusterd-shared_storage-Provide-a-volume-set-option-.patch Patch0010: 0010-dht-rebalance-Fixed-rebalance-failure.patch Patch0011: 0011-cluster-dht-Fix-dht_setxattr-to-follow-files-under-m.patch Patch0012: 0012-cluster-dht-Don-t-rely-on-linkto-xattr-to-find-desti.patch Patch0013: 0013-afr-honour-selfheal-enable-disable-volume-set-option.patch Patch0014: 0014-cluster-ec-EC_XATTR_DIRTY-doesn-t-come-in-response.patch Patch0015: 0015-glusterd-Bump-op-version-and-max-op-version-for-3.7..patch Patch0016: 0016-build-remove-ghost-directory-entries.patch Patch0017: 0017-build-add-RHGS-specific-changes.patch Patch0018: 0018-secalert-remove-setuid-bit-for-fusermount-glusterfs.patch Patch0019: 0019-dht-Add-lookup-optimize-configuration-option-for-DHT.patch Patch0020: 0020-features-quota-Fix-ref-leak.patch Patch0021: 0021-worm-Let-lock-zero-xattrop-calls-succeed.patch Patch0022: 0022-glusterd-snapshot-Return-correct-errno-in-events-of-.patch Patch0023: 0023-snapshot-Fix-finding-brick-mount-path-logic.patch Patch0024: 0024-bitrot-glusterd-scrub-option-should-be-disabled-once.patch Patch0025: 0025-glusterd-fix-repeated-connection-to-nfssvc-failed-ms.patch Patch0026: 0026-rpc-call-transport_unref-only-on-non-NULL-transport.patch Patch0027: 0027-socket-throttle-only-connected-transport.patch Patch0028: 0028-cluster-dht-pass-a-destination-subvol-to-fop2-varian.patch Patch0029: 0029-cluster-ec-Prevent-double-unwind.patch Patch0030: 0030-Upcall-cache-invalidation-Ignore-fops-with-frame-roo.patch Patch0031: 0031-cli-Fix-incorrect-parse-logic-for-volume-heal-comman.patch Patch0032: 0032-nfs-allocate-and-return-the-hashkey-for-the-auth_cac.patch Patch0033: 0033-features-marker-Cleanup-loc-in-case-of-errors.patch Patch0034: 0034-ec-Changing-log-level-to-DEBUG-in-case-of-ENOENT.patch Patch0035: 0035-protocol-server-Changing-log-level-from-Warning-to-D.patch Patch0036: 0036-glusterd-Stop-tcp-ip-listeners-during-glusterd-exit.patch Patch0037: 0037-libglusterfs-Enabling-the-fini-in-cleanup_and_exit.patch Patch0038: 0038-glusterd-disable-ping-timer.patch Patch0039: 0039-cluster-ec-Wind-unlock-fops-at-all-cost.patch Patch0040: 0040-build-fix-compiling-on-older-distributions.patch Patch0041: 0041-rpm-correct-date-and-order-of-entries-in-the-changel.patch Patch0042: 0042-nfs-ganesha-HA-fix-race-between-setting-grace-and-vi.patch Patch0043: 0043-logging-log-Stale-filehandle-on-the-client-as-Debug.patch Patch0044: 0044-snapshot-scheduler-Handle-OSError-in-os.-callbacks.patch Patch0045: 0045-snapshot-scheduler-Check-if-GCRON_TASKS-exists-befor.patch Patch0046: 0046-storage-posix-Handle-MAKE_INODE_HANDLE-failures.patch Patch0047: 0047-snapshot-scheduler-Reload-etc-cron.d-glusterfs_snap_.patch Patch0048: 0048-build-packaging-corrections-for-RHEL-5.patch Patch0049: 0049-features-marker-Pass-along-xdata-to-lower-translator.patch Patch0050: 0050-cluster-dht-fix-incorrect-dst-subvol-info-in-inode_c.patch Patch0051: 0051-build-introduce-security-hardening-flags-in-gluster.patch Patch0052: 0052-glusterd-Display-status-of-Self-Heal-Daemon-for-disp.patch Patch0053: 0053-cluster-dht-maintain-start-state-of-rebalance-daemon.patch Patch0054: 0054-cluster-tier-account-for-reordered-layouts.patch Patch0055: 0055-cluster-tier-make-attach-detach-work-with-new-rebala.patch Patch0056: 0056-tier-dht-Fixing-non-atomic-promotion-demotion-w.r.t-.patch Patch0057: 0057-cluster-ec-Prevent-Null-dereference-in-dht-rename.patch Patch0058: 0058-features-changelog-Avoid-setattr-fop-logging-during-.patch Patch0059: 0059-tools-glusterfind-verifying-volume-presence.patch Patch0060: 0060-geo-rep-ignore-symlink-and-harlink-errors-in-geo-rep.patch Patch0061: 0061-spec-fix-add-pre-transaction-scripts-for-geo-rep-and.patch Patch0062: 0062-tools-glusterfind-Fix-GFID-to-Path-conversion-for-di.patch Patch0063: 0063-tools-glusterfind-Cleanup-glusterfind-dir-after-a-vo.patch Patch0064: 0064-geo-rep-Validate-use_meta_volume-option.patch Patch0065: 0065-tools-glusterfind-Cleanup-session-dir-after-delete.patch Patch0066: 0066-tools-glusterfind-verifying-volume-is-online.patch Patch0067: 0067-features-changelog-Do-htime-setxattr-without-XATTR_R.patch Patch0068: 0068-snapshot-scheduler-Modified-main-function-to-take-ar.patch Patch0069: 0069-glusterd-fix-op-versions-for-RHS-backwards-compatabi.patch Patch0070: 0070-tools-glusterfind-ignoring-deleted-files.patch Patch0071: 0071-spec-geo-rep-Add-rsync-as-dependency-for-georeplicat.patch Patch0072: 0072-tools-glusterfind-print-message-for-good-cases.patch Patch0073: 0073-common-ha-handle-long-node-names-and-node-names-with.patch Patch0074: 0074-common-ha-Clean-up-persistent-cib-state.patch Patch0075: 0075-glusterd-subvol_count-value-for-replicate-volume-sho.patch Patch0076: 0076-upcall-prevent-busy-loop-in-reaper-thread.patch Patch0077: 0077-common-ha-Clean-up-cib-state-completely.patch Patch0078: 0078-NFS-Ganesha-Return-corect-return-value.patch Patch0079: 0079-contrib-timer-wheel-fix-deadlock-in-del_timer.patch Patch0080: 0080-libglusterfs-update-glfs-messages-header-for-reserve.patch Patch0081: 0081-protocol-client-porting-log-messages-to-new-framewor.patch Patch0082: 0082-protocol-server-port-log-messages-to-new-framework.patch Patch0083: 0083-common-ha-cluster-HA-setup-sometimes-fails.patch Patch0084: 0084-ec-Display-correct-message-after-successful-heal-sta.patch Patch0085: 0085-cluster-ec-Prevent-races-in-ec_unlock-for-rename.patch Patch0086: 0086-glusterd-Porting-messages-to-new-logging-framework.patch Patch0087: 0087-sm-glusterd-Porting-messages-to-new-logging-framewor.patch Patch0088: 0088-rebalance-store-glusterd-glusterd-porting-to-new-log.patch Patch0089: 0089-handler-messages-glusterd-Porting-to-new-logging-fra.patch Patch0090: 0090-handshake-locks-mountbroker-syncop-glusterd-New-logg.patch Patch0091: 0091-utils-glusterd-Porting-to-new-logging-framwork.patch Patch0092: 0092-quota-glusterd-porting-to-new-logging-framework.patch Patch0093: 0093-snapshot-Fix-terminating-slash-in-brick-mount-path.patch Patch0094: 0094-rpm-glusterfs-devel-for-client-builds-should-not-dep.patch Patch0095: 0095-snapshot-man-page-modifications.patch Patch0096: 0096-ops-glusterd-Porting-messages-to-new-logging-framewo.patch Patch0097: 0097-glusterd-uss-snapshot-Intialise-snapdsvc-after-volfi.patch Patch0098: 0098-build-add-pretrans-check.patch Patch0099: 0099-glusterd-geo-rep-porting-log-messages-to-a-new-frame.patch Patch0100: 0100-glusterd-Fixing-the-compilation-failures.patch Patch0101: 0101-cluster-afr-Do-not-attempt-entry-self-heal-if-the-la.patch Patch0102: 0102-common-ha-cluster-HA-setup-sometimes-fails.patch Patch0103: 0103-glusterd-Fix-snapshot-of-a-volume-with-geo-rep.patch Patch0104: 0104-quota-fix-double-accounting-with-rename-operation.patch Patch0105: 0105-gluster-shared_storage-Add-Remove-shared-storage-fro.patch Patch0106: 0106-glusterd-mgmt_v3-Print-the-node-info-with-failure-me.patch Patch0107: 0107-build-exclude-libgfdb.pc-conditionally.patch Patch0108: 0108-libgfchangelog-Fix-crash-in-gf_changelog_process.patch Patch0109: 0109-Porting-new-log-messages-for-posix.patch Patch0110: 0110-rdma-porting-rdma-to-a-new-message-id-logging-format.patch Patch0111: 0111-glusterd-afr-set-afr-pending-xattrs-on-replace-brick.patch Patch0112: 0112-libglusterfs-allocate-a-segment-for-quota-logging.patch Patch0113: 0113-libgfapi-port-gfapi-to-new-logging-framework.patch Patch0114: 0114-cluster-afr-set-pending-xattrs-for-replaced-brick.patch Patch0115: 0115-common-ha-cluster-setup-issues-on-RHEL7.patch Patch0116: 0116-NFS-Ganesha-Automatically-export-vol-that-was-export.patch Patch0117: 0117-features-bitrot-tuanble-object-signing-waiting-time-.patch Patch0118: 0118-features-bitrot-fix-fd-leak-in-truncate-stub.patch Patch0119: 0119-tier-volume-set-Validate-volume-set-option-for-tier.patch Patch0120: 0120-glusterd-tier-glusterd-crashed-with-detach-tier-comm.patch Patch0121: 0121-glusterd-shared_storage-Added-help-description-for-e.patch Patch0122: 0122-cluster-dht-Prevent-use-after-free-bug.patch Patch0123: 0123-afr-complete-conservative-merge-even-in-case-of-gfid.patch Patch0124: 0124-dht-Error-value-check-before-performing-rebalance-co.patch Patch0125: 0125-common-ha-Fixing-add-node-operation.patch Patch0126: 0126-quota-allow-writes-when-with-ENOENT-ESTALE-on-active.patch Patch0127: 0127-tier-glusterd-Add-description-for-volume-set-options.patch Patch0128: 0128-cluster-ec-Avoid-parallel-executions-of-the-same-sta.patch Patch0129: 0129-nfs.c-nfs3.c-port-log-messages-to-a-new-framework.patch Patch0130: 0130-features-quota-port-QUOTA-messages-to-new-logging-fr.patch Patch0131: 0131-NFS-Ganesha-Implement-refresh-config.patch Patch0132: 0132-glusterd-use-mkdir_p-for-creating-rundir.patch Patch0133: 0133-glusterd-Store-peerinfo-after-updating-hostnames.patch Patch0134: 0134-tools-glusterfind-Fail-glusterfind-creation-if-volum.patch Patch0135: 0135-geo-rep-Fix-geo-rep-fanout-setup-with-meta-volume.patch Patch0136: 0136-geo-rep-Fix-toggling-of-use_meta_volume-config.patch Patch0137: 0137-geo-rep-Fix-glusterd-working-directory.patch Patch0138: 0138-geo-rep-Fix-ssh-issue-in-geo-rep.patch Patch0139: 0139-cluster-ec-wind-fops-on-good-subvols-for-access-read.patch Patch0140: 0140-cluster-afr-Pick-gfid-from-poststat-during-fresh-loo.patch Patch0141: 0141-protocol-client-removing-duplicate-printing-in-gf_ms.patch Patch0142: 0142-EC-While-Healing-a-file-set-the-config-xattr.patch Patch0143: 0143-libglusterfs-Introducing-new-logging-message.patch Patch0144: 0144-core-add-gf_ref_t-for-common-refcounting-structures.patch Patch0145: 0145-quota-marker-fix-mem-leak-in-marker.patch Patch0146: 0146-cluster-afr-set-pending-xattrs-for-replaced-brick.patch Patch0147: 0147-common-utils-libglusterfs-Porting-to-a-new-logging-f.patch Patch0148: 0148-afr-Block-fops-when-file-is-in-split-brain.patch Patch0149: 0149-quota-marker-accounting-goes-bad-with-rename-while-w.patch Patch0150: 0150-dht-Adding-log-messages-to-the-new-logging-framework.patch Patch0151: 0151-gfdb-libglusterfs-Porting-to-a-new-logging-framework.patch Patch0152: 0152-changetimerecorder-Porting-to-new-logging-framework.patch Patch0153: 0153-features-bitrot-log-scrub-frequency-throttle-values.patch Patch0154: 0154-tests-bitrot-Scrub-state-change-tests.patch Patch0155: 0155-common-ha-fix-delete-node.patch Patch0156: 0156-features-bit-rot-check-for-both-inmemory-and-ondisk-.patch Patch0157: 0157-bit-rot-New-logging-framework-for-bit-rot-log-messag.patch Patch0158: 0158-features-bitrot-cleanup-v1.patch Patch0159: 0159-features-bitrot-cleanup-v2.patch Patch0160: 0160-features-bitrot-handle-scrub-states-via-state-machin.patch Patch0161: 0161-geo-rep-Fix-add-user-in-mountbroker-user-management.patch Patch0162: 0162-features-bitrot-convert-pending-gf_log-to-gf_msg.patch Patch0163: 0163-geo-rep-ignore-ESTALE-as-ENOENT.patch Patch0164: 0164-fd-inode-libglusterfs-porting-to-a-new-logging-frame.patch Patch0165: 0165-call-stub-circ-buff-client_t-compat-dict-libglusterf.patch Patch0166: 0166-graph-libglusterfs-porting-to-a-new-logging-framewor.patch Patch0167: 0167-defaults-globals-iobuf-latency-logging-options-xlato.patch Patch0168: 0168-event-parse-utils-quota-common-utils-rbthash-libglus.patch Patch0169: 0169-cluster-afr-truncate-all-sinks-files.patch Patch0170: 0170-features-changelog-Always-log-directory-rename-opera.patch Patch0171: 0171-cluster-ec-Fix-incorrect-check-for-iatt-differences.patch Patch0172: 0172-ec-Porting-messages-to-new-logging-framework.patch Patch0173: 0173-glusterd-Porting-left-out-log-messages-to-new-framew.patch Patch0174: 0174-ganesha-volinfo-is-not-persisted-after-modifying-opt.patch Patch0175: 0175-common-utils-libglusterfs-removing-strerror-to-avoid.patch Patch0176: 0176-NFS-Ganesha-Automatically-export-volume-after-volume.patch Patch0177: 0177-storage-posix-Check-xdata-for-NULL-before-dict_get.patch Patch0178: 0178-protocol-server-Include-a-check-to-validate-xprt-cli.patch Patch0179: 0179-Upcall-Fix-an-issue-with-invalidating-parent-entries.patch Patch0180: 0180-mem-pool-stack-store-syncop-timer-libglusterfs-Porti.patch Patch0181: 0181-quota-Fix-statfs-values-in-EC-when-quota_deem_statfs.patch Patch0182: 0182-Logging-Porting-the-performance-translator.patch Patch0183: 0183-afr-Porting-messages-to-new-logging-framework.patch Patch0184: 0184-features-performace-Updating-the-glfs-components.patch Patch0185: 0185-features-bit-rot-stub-deny-access-to-bad-objects.patch Patch0186: 0186-features-bit-rot-stub-do-not-allow-setxattr-and-remo.patch Patch0187: 0187-tier-ctr-Ignore-creation-of-T-file-and-Ctr-Lookup-he.patch Patch0188: 0188-glusterd-Correction-in-Error-message-for-disperse-vo.patch Patch0189: 0189-gfapi-symlink-resolution-for-glfs_object.patch Patch0190: 0190-feature-performace-Fix-broken-build.patch Patch0191: 0191-rdma-removing-duplicate-printing-of-error-string-in-.patch Patch0192: 0192-NFS-Ganesha-Exporting-volume-fails.patch Patch0193: 0193-common-ha-Fix-var-lib-nfs-statd-state-path-creation.patch Patch0194: 0194-cluster-ec-Add-throttling-in-background-healing.patch Patch0195: 0195-cluster-ec-Remove-dead-code.patch Patch0196: 0196-protocol-server-Correctly-reconfigure-auth.ssl-allow.patch Patch0197: 0197-cluster-dht-use-refcount-to-manage-memory-used-to-st.patch Patch0198: 0198-protocol-server-fail-setvolume-if-any-of-xlators-is-.patch Patch0199: 0199-NFS-Ganesha-Unexport-fails-after-S31ganesha-start-sc.patch Patch0200: 0200-cluster-ec-Make-background-healing-optional-behavior.patch Patch0201: 0201-access-control-validating-context-of-access-control-.patch Patch0202: 0202-tools-glusterfind-RENAME-and-MODIFY-issues.patch Patch0203: 0203-cluster-afr-expunge-first-impunge-next-in-entry-self.patch Patch0204: 0204-glusterd-shared_storage-Use-var-lib-glusterd-ss_bric.patch Patch0205: 0205-cluster-ec-Don-t-read-from-bad-subvols.patch Patch0206: 0206-cluster-ec-Remove-failed-subvols-from-source-sink-co.patch Patch0207: 0207-protocol-server-Add-null-check-to-gf_client_put.patch Patch0208: 0208-quota-fix-mem-leak-in-quota-enforcer.patch Patch0209: 0209-posix-fix-mem-leak-in-posix_get_ancestry-error-path.patch Patch0210: 0210-quota-marker-fix-mem-leak-in-marker.patch Patch0211: 0211-glusterd-Removing-sync-lock-and-unlock-inside-rcu-re.patch Patch0212: 0212-cluster-ec-Fix-use-after-free-bug.patch Patch0213: 0213-glusterd-geo-rep-Fix-failure-of-geo-rep-pause.patch Patch0214: 0214-glusterd-Get-the-local-txn_info-based-on-trans_id-in.patch Patch0215: 0215-snapshot-scheduler-Use-var-run-gluster-shared_storag.patch Patch0216: 0216-glusterd-correct-replace-brick-flow.patch Patch0217: 0217-glusterd-snapd-Stop-snapd-daemon-when-glusterd-is-re.patch Patch0218: 0218-quotad-create-sock-listener-only-after-graph-init-is.patch Patch0219: 0219-common-ha-ganesha-ha.sh-status-tries-to-read-ganesha.patch Patch0220: 0220-quota-marker-fix-mem-leak-in-marker.patch Patch0221: 0221-quota-marker-use-smaller-stacksize-in-synctask-for-m.patch Patch0222: 0222-quota-marker-set-lk_owner-when-taking-lock-on-parent.patch Patch0223: 0223-quota-marker-fix-spurious-failure-afr-quota-xattr-md.patch Patch0224: 0224-gfapi-Update-loc-inode-accordingly-in-glfs_loc_link.patch Patch0225: 0225-glusterd-Fix-management-encryption-issues-with-Glust.patch Patch0226: 0226-NFS-Ganesha-Export-fails-on-RHEL-7.1.patch Patch0227: 0227-libgfdb-sql-Fixing-broken-query-of-find_unchanged.patch Patch0228: 0228-glusterd-use-a-real-host-name-instead-of-numeric-whe.patch Patch0229: 0229-glusterd-Send-friend-update-even-for-EVENT_RCVD_ACC.patch Patch0230: 0230-logging-Fixed-incorrect-buffer-size.patch Patch0231: 0231-quota-marker-inspect-file-dir-invoked-without-having.patch Patch0232: 0232-NFS-Ganesha-Add-node-does-not-copy-exports-directory.patch Patch0233: 0233-access_control-avoid-double-unrefing-of-acl-variable.patch Patch0234: 0234-features-posix-Avoid-double-free-of-a-variable-in-po.patch Patch0235: 0235-cluster-ec-Propogate-correct-errno-in-case-of-failur.patch Patch0236: 0236-cluster-ec-Prevent-data-corruptions.patch Patch0237: 0237-rpc-transport-socket_poller-fixes-for-proper-working.patch Patch0238: 0238-glusterd-Fix-failure-in-replace-brick-when-src-brick.patch Patch0239: 0239-quota-marker-fix-mem-leak-in-marker.patch Patch0240: 0240-features-bitrot-move-inode-state-just-at-the-last-mo.patch Patch0241: 0241-features-bitrot-throttle-signer.patch Patch0242: 0242-extras-hooks-Fix-parsing-of-args-in-S30samba-set.sh.patch Patch0243: 0243-timer-fix-race-between-gf_timer_call_cancel-and-gf_t.patch Patch0244: 0244-cluster-ec-Handle-race-between-unlock-timer-new-lock.patch Patch0245: 0245-Revert-timer-fix-race-between-gf_timer_call_cancel-a.patch Patch0246: 0246-glusterd-Porting-the-left-out-gf_log_callingfns-to-n.patch Patch0247: 0247-glusterd-Pass-NULL-in-glusterd_svc_manager-in-gluste.patch Patch0248: 0248-glusterd-Do-not-log-failure-if-glusterd_get_txn_opin.patch Patch0249: 0249-cluster-afr-Fix-incorrect-logging-in-read-transactio.patch Patch0250: 0250-heal-Do-not-invoke-glfs_fini-for-glfs-heal-commands.patch Patch0251: 0251-dht-send-lookup-even-for-fd-based-operations-during-.patch Patch0252: 0252-geo-rep-Fix-fd-referenced-before-assignment.patch Patch0253: 0253-geo-rep-Fix-history-failure.patch Patch0254: 0254-geo-rep-Do-not-crash-worker-on-ESTALE.patch Patch0255: 0255-glusterd-getting-txn_id-from-frame-cookie-in-op_sm-c.patch Patch0256: 0256-client-rpc-make-ping-timeout-configurable-for-gluste.patch Patch0257: 0257-quota-marker-contribution-with-list_del-can-cause-me.patch Patch0258: 0258-quota-Fix-crash-in-quota-enforcer.patch Patch0259: 0259-quota-don-t-log-error-when-disk-quota-exceeded.patch Patch0260: 0260-posix-posix_make_ancestryfromgfid-shouldn-t-log-ENOE.patch Patch0261: 0261-quota-validating-soft-limit-percentage.patch Patch0262: 0262-quota-marker-set-log-level-to-debug-for-ESTALE-ENOEN.patch Patch0263: 0263-quota-fix-parents-caching-during-build-ancestry.patch Patch0264: 0264-posix-fix-mem-leak-in-posix-xattrop.patch Patch0265: 0265-rpc-server-glusterd-Init-transport-list-for-accepted.patch Patch0266: 0266-uss-Take-ref-on-root-inode.patch Patch0267: 0267-glusterd-initialize-the-daemon-services-on-demand.patch Patch0268: 0268-glusterd-Stop-restart-notify-to-daemons-svcs-during-.patch Patch0269: 0269-snapview-client-Allocate-memory-using-GF_CALLOC.patch Patch0270: 0270-rpc-add-owner-xlator-argument-to-rpc_clnt_new.patch Patch0271: 0271-rebalance-glusterd-Refactor-rebalance-volfile.patch Patch0272: 0272-glusterd-log-improvement-in-glusterd_peer_rpc_notify.patch Patch0273: 0273-glusterd-rebalance-trusted-rebalance-volfile.patch Patch0274: 0274-quota-checking-for-absolute-path-in-quota-command.patch Patch0275: 0275-quota-volume-reset-shouldn-t-remove-quota-deem-statf.patch Patch0276: 0276-libglusterfs-write-error-handling-when-filesystem-ha.patch Patch0277: 0277-xml-output-Fix-non-uniform-opErrstr-xml-output.patch Patch0278: 0278-Set-nfs.disable-to-on-when-global-NFS-Ganesha-key-is.patch Patch0279: 0279-logging-Stop-using-global-xlator-for-log_buf-allocat.patch Patch0280: 0280-quota-marker-fix-inode-quota-with-rename.patch Patch0281: 0281-bitrot-glusterd-gluster-volume-set-command-for-bitro.patch Patch0282: 0282-features-bit-rot-stub-handle-REOPEN_WAIT-on-forgotte.patch Patch0283: 0283-features-bit-rot-stub-fail-the-fop-if-inode-context-.patch Patch0284: 0284-bitrot-Scrubber-log-should-mark-bad-file-as-a-ALERT-.patch Patch0285: 0285-protocol-server-use-different-dict-for-resolving.patch Patch0286: 0286-tests-set-inode-lru-limit-to-1-and-check-if-bit-rot-.patch Patch0287: 0287-protocol-server-forget-the-inodes-which-got-ENOENT-i.patch Patch0288: 0288-afr-launch-index-heal-on-local-subvols-up-on-a-child.patch Patch0289: 0289-snapshot-scheduler-Output-correction-of-initialisati.patch Patch0290: 0290-snapshot-Log-deletion-of-snapshot-during-auto-delete.patch Patch0291: 0291-snapshot-scheduler-Check-if-volume-exists-before-add.patch Patch0292: 0292-dht-block-handle-create-op-falling-to-decommissioned.patch Patch0293: 0293-cluster-dht-avoid-mknod-on-decommissioned-brick.patch Patch0294: 0294-afr-modify-afr_txn_nothing_failed.patch Patch0295: 0295-heal-Add-check-for-healing-directories-in-split-brai.patch Patch0296: 0296-cluster-afr-Examine-data-metadata-readable-for-read-.patch Patch0297: 0297-common-ha-concise-output-for-HA-status.patch Patch0298: 0298-snapshot-Make-fops-static-for-correct-resolution-of-.patch Patch0299: 0299-glusterd-stop-all-the-daemons-services-on-peer-detac.patch Patch0300: 0300-glusterd-Don-t-allow-remove-brick-start-commit-if-gl.patch Patch0301: 0301-libgfapi-Gracefully-exit-when-glfd-is-invalid.patch Patch0302: 0302-snapshot-cleanup-snaps-during-unprobe.patch Patch0303: 0303-libgfapi-adding-follow-flag-to-glfs_h_lookupat.patch Patch0304: 0304-libgfapi-non-default-symbol-version-macros-are-incor.patch Patch0305: 0305-tools-glusterfind-Do-not-show-session-corrupted-if-n.patch Patch0306: 0306-tools-glusterfind-Prepend-prefix-in-case-of-delete.patch Patch0307: 0307-tools-glusterfind-password-prompts-for-peer-nodes-on.patch Patch0308: 0308-marker-fix-log-when-loc.parent-and-inode-gfid-is-NUL.patch Patch0309: 0309-posix-xattrop-GF_XATTROP_GET_AND_SET-implementation.patch Patch0310: 0310-cli-on-error-invoke-cli_cmd_broadcast_response-funct.patch Patch0311: 0311-dht-lock-on-subvols-to-prevent-lookup-vs-rmdir-race.patch Patch0312: 0312-glusterd-use-2-epoll-worker-threads-by-default.patch Patch0313: 0313-glusterd-Re-enable-ping-timer-between-glusterds.patch Patch0314: 0314-event-epoll-Use-pollers-to-check-if-event_pool_dispa.patch Patch0315: 0315-ec-trusted.ec.version-xattr-of-all-root-directories-.patch Patch0316: 0316-gluster-cli-snapshot-delete-all-does-not-work-with-x.patch Patch0317: 0317-marker-preserve-previous-dirty-flag-during-update-tx.patch Patch0318: 0318-cluster-ec-Fix-write-size-in-self-heal.patch Patch0319: 0319-cluster-dht-Don-t-set-posix-acls-on-linkto-files.patch Patch0320: 0320-dht-lock-on-all-subvols-to-prevent-rmdir-vs-lookup-s.patch Patch0321: 0321-CommonHA-Fix-the-path-of-systemctl-cmd.patch Patch0322: 0322-uss-handle-buf-variable-properly-in-svs_glfs_readdir.patch Patch0323: 0323-features-snap-cleanup-the-root-loc-in-statfs.patch Patch0324: 0324-dht-NULL-dereferencing-causes-crash.patch Patch0325: 0325-dht-remove-brick-Avoid-data-loss-for-hard-link-migra.patch Patch0326: 0326-firewall-spec-Create-glusterfs-firewall-service-if-f.patch Patch0327: 0327-glusterd-disable-ping-timer-b-w-glusterd-and-make-ep.patch Patch0328: 0328-dht-cluster-Avoid-crash-if-local-is-NULL.patch Patch0329: 0329-afr-get-split-brain-status-in-a-synctask.patch Patch0330: 0330-afr-perform-replace-brick-in-a-synctask.patch Patch0331: 0331-dht-cluster-Avoid-double-unlock-in-dht_refresh_layou.patch %description GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package includes the glusterfs binary, the glusterfsd daemon and the libglusterfs and glusterfs translator modules common to both GlusterFS server and client framework. %package api Summary: GlusterFS api library Group: System Environment/Daemons Requires: %{name} = %{version}-%{release} Requires: %{name}-client-xlators = %{version}-%{release} # we provide the Python package/namespace 'gluster' #Provides: python-gluster = %{version}-%{release} %description api GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides the glusterfs libgfapi library. %package api-devel Summary: Development Libraries Group: Development/Libraries Requires: %{name} = %{version}-%{release} Requires: %{name}-devel = %{version}-%{release} Requires: libacl-devel %description api-devel GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides the api include files. %package cli Summary: GlusterFS CLI Group: Applications/File Requires: %{name}-libs = %{version}-%{release} %description cli GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides the GlusterFS CLI application and its man page %package devel Summary: Development Libraries Group: Development/Libraries Requires: %{name} = %{version}-%{release} # Needed for the Glupy examples to work %if ( 0%{!?_without_extra_xlators:1} ) Requires: %{name}-extra-xlators = %{version}-%{release} %endif %description devel GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides the development libraries and include files. %if ( 0%{!?_without_extra_xlators:1} ) %package extra-xlators Summary: Extra Gluster filesystem Translators Group: Applications/File # We need python-gluster rpm for gluster module's __init__.py in Python # site-packages area Requires: python-gluster = %{version}-%{release} Requires: python python-ctypes %description extra-xlators GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides extra filesystem Translators, such as Glupy, for GlusterFS. %endif %package fuse Summary: Fuse client Group: Applications/File BuildRequires: fuse-devel Requires: attr Requires: %{name} = %{version}-%{release} Requires: %{name}-client-xlators = %{version}-%{release} Obsoletes: %{name}-client < %{version}-%{release} Provides: %{name}-client = %{version}-%{release} %description fuse GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides support to FUSE based clients and inlcudes the glusterfs(d) binary. %if ( 0%{?_build_server} ) %package ganesha Summary: NFS-Ganesha configuration Group: Applications/File Requires: %{name}-server = %{version}-%{release} Requires: nfs-ganesha-gluster Requires: pcs %description ganesha GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides the configuration and related files for using NFS-Ganesha as the NFS server using GlusterFS %endif %if ( 0%{?_build_server} ) %if ( 0%{!?_without_georeplication:1} ) %package geo-replication Summary: GlusterFS Geo-replication Group: Applications/File Requires: %{name} = %{version}-%{release} Requires: %{name}-server = %{version}-%{release} Requires: python python-ctypes Requires: rsync %description geo-replication GlusterFS is a distributed file-system capable of scaling to several peta-bytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file system in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in userspace and easily manageable. This package provides support to geo-replication. %endif %endif %package libs Summary: GlusterFS common libraries Group: Applications/File %if ( 0%{!?_without_syslog:1} ) %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) Requires: rsyslog-mmjsonparse %endif %if ( 0%{?rhel} && 0%{?rhel} == 6 ) Requires: rsyslog-mmcount %endif %endif %description libs GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides the base GlusterFS libraries %package -n python-gluster Summary: GlusterFS python library Group: Development/Tools Requires: python %description -n python-gluster GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package contains the python modules of GlusterFS and own gluster namespace. %if ( 0%{!?_without_rdma:1} ) %package rdma Summary: GlusterFS rdma support for ib-verbs Group: Applications/File BuildRequires: libibverbs-devel BuildRequires: librdmacm-devel >= 1.0.15 Requires: %{name} = %{version}-%{release} %description rdma GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides support to ib-verbs library. %endif %if ( 0%{?_build_server} ) %if ( 0%{!?_without_regression_tests:1} ) %package regression-tests Summary: Development Tools Group: Development/Tools Requires: %{name} = %{version}-%{release} Requires: %{name}-fuse = %{version}-%{release} Requires: %{name}-server = %{version}-%{release} ## thin provisioning support Requires: lvm2 >= 2.02.89 Requires: perl(App::Prove) perl(Test::Harness) gcc util-linux-ng Requires: python attr dbench file git libacl-devel net-tools Requires: nfs-utils xfsprogs yajl %description regression-tests The Gluster Test Framework, is a suite of scripts used for regression testing of Gluster. %endif %endif %if ( 0%{?_build_server} ) %if ( 0%{!?_without_ocf:1} ) %package resource-agents Summary: OCF Resource Agents for GlusterFS License: GPLv3+ %if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 || 0%{?sles_version} ) ) # EL5 does not support noarch sub-packages BuildArch: noarch %endif # this Group handling comes from the Fedora resource-agents package %if ( 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} ) Group: System Environment/Base %else Group: Productivity/Clustering/HA %endif # for glusterd Requires: %{name}-server # depending on the distribution, we need pacemaker or resource-agents Requires: %{_prefix}/lib/ocf/resource.d %description resource-agents GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides the resource agents which plug glusterd into Open Cluster Framework (OCF) compliant cluster resource managers, like Pacemaker. %endif %endif %if ( 0%{?_build_server} ) %package server Summary: Clustered file-system server Group: System Environment/Daemons Requires: %{name} = %{version}-%{release} Requires: %{name}-libs = %{version}-%{release} Requires: %{name}-cli = %{version}-%{release} # some daemons (like quota) use a fuse-mount, glusterfsd is part of -fuse Requires: %{name}-fuse = %{version}-%{release} # self-heal daemon, rebalance, nfs-server etc. are actually clients Requires: %{name}-client-xlators = %{version}-%{release} # psmisc for killall, lvm2 for snapshot, and nfs-utils and # rpcbind/portmap for gnfs server Requires: psmisc Requires: lvm2 Requires: nfs-utils %if ( 0%{?_with_systemd:1} ) Requires(post): systemd-units Requires(preun): systemd-units Requires(postun): systemd-units %else Requires(post): /sbin/chkconfig Requires(preun): /sbin/service Requires(preun): /sbin/chkconfig Requires(postun): /sbin/service %endif %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) Requires: rpcbind %else Requires: portmap %endif %if ( 0%{?rhel} && 0%{?rhel} < 6 ) Obsoletes: %{name}-geo-replication = %{version}-%{release} %endif %if ( 0%{?rhel} && 0%{?rhel} <= 6 ) Requires: python-argparse %endif Requires: pyxattr %description server GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides the glusterfs server daemon. %endif %package client-xlators Summary: GlusterFS client-side translators Group: Applications/File %description client-xlators GlusterFS is a distributed file-system capable of scaling to several petabytes. It aggregates various storage bricks over Infiniband RDMA or TCP/IP interconnect into one large parallel network file system. GlusterFS is one of the most sophisticated file systems in terms of features and extensibility. It borrows a powerful concept called Translators from GNU Hurd kernel. Much of the code in GlusterFS is in user space and easily manageable. This package provides the translators needed on any GlusterFS client. %prep %setup -q -n %{name}-%{version}%{?prereltag} # %patch0001 -p1 %patch0002 -p1 %patch0003 -p1 %patch0004 -p1 %patch0005 -p1 %patch0006 -p1 %patch0007 -p1 %patch0008 -p1 %patch0009 -p1 %patch0010 -p1 %patch0011 -p1 %patch0012 -p1 %patch0013 -p1 %patch0014 -p1 %patch0015 -p1 %patch0016 -p1 %patch0017 -p1 %patch0018 -p1 %patch0019 -p1 %patch0020 -p1 %patch0021 -p1 %patch0022 -p1 %patch0023 -p1 %patch0024 -p1 %patch0025 -p1 %patch0026 -p1 %patch0027 -p1 %patch0028 -p1 %patch0029 -p1 %patch0030 -p1 %patch0031 -p1 %patch0032 -p1 %patch0033 -p1 %patch0034 -p1 %patch0035 -p1 %patch0036 -p1 %patch0037 -p1 %patch0038 -p1 %patch0039 -p1 %patch0040 -p1 %patch0041 -p1 %patch0042 -p1 %patch0043 -p1 %patch0044 -p1 %patch0045 -p1 %patch0046 -p1 %patch0047 -p1 %patch0048 -p1 %patch0049 -p1 %patch0050 -p1 %patch0051 -p1 %patch0052 -p1 %patch0053 -p1 %patch0054 -p1 %patch0055 -p1 %patch0056 -p1 %patch0057 -p1 %patch0058 -p1 %patch0059 -p1 %patch0060 -p1 %patch0061 -p1 %patch0062 -p1 %patch0063 -p1 %patch0064 -p1 %patch0065 -p1 %patch0066 -p1 %patch0067 -p1 %patch0068 -p1 %patch0069 -p1 %patch0070 -p1 %patch0071 -p1 %patch0072 -p1 %patch0073 -p1 %patch0074 -p1 %patch0075 -p1 %patch0076 -p1 %patch0077 -p1 %patch0078 -p1 %patch0079 -p1 %patch0080 -p1 %patch0081 -p1 %patch0082 -p1 %patch0083 -p1 %patch0084 -p1 %patch0085 -p1 %patch0086 -p1 %patch0087 -p1 %patch0088 -p1 %patch0089 -p1 %patch0090 -p1 %patch0091 -p1 %patch0092 -p1 %patch0093 -p1 %patch0094 -p1 %patch0095 -p1 %patch0096 -p1 %patch0097 -p1 %patch0098 -p1 %patch0099 -p1 %patch0100 -p1 %patch0101 -p1 %patch0102 -p1 %patch0103 -p1 %patch0104 -p1 %patch0105 -p1 %patch0106 -p1 %patch0107 -p1 %patch0108 -p1 %patch0109 -p1 %patch0110 -p1 %patch0111 -p1 %patch0112 -p1 %patch0113 -p1 %patch0114 -p1 %patch0115 -p1 %patch0116 -p1 %patch0117 -p1 %patch0118 -p1 %patch0119 -p1 %patch0120 -p1 %patch0121 -p1 %patch0122 -p1 %patch0123 -p1 %patch0124 -p1 %patch0125 -p1 %patch0126 -p1 %patch0127 -p1 %patch0128 -p1 %patch0129 -p1 %patch0130 -p1 %patch0131 -p1 %patch0132 -p1 %patch0133 -p1 %patch0134 -p1 %patch0135 -p1 %patch0136 -p1 %patch0137 -p1 %patch0138 -p1 %patch0139 -p1 %patch0140 -p1 %patch0141 -p1 %patch0142 -p1 %patch0143 -p1 %patch0144 -p1 %patch0145 -p1 %patch0146 -p1 %patch0147 -p1 %patch0148 -p1 %patch0149 -p1 %patch0150 -p1 %patch0151 -p1 %patch0152 -p1 %patch0153 -p1 %patch0154 -p1 %patch0155 -p1 %patch0156 -p1 %patch0157 -p1 %patch0158 -p1 %patch0159 -p1 %patch0160 -p1 %patch0161 -p1 %patch0162 -p1 %patch0163 -p1 %patch0164 -p1 %patch0165 -p1 %patch0166 -p1 %patch0167 -p1 %patch0168 -p1 %patch0169 -p1 %patch0170 -p1 %patch0171 -p1 %patch0172 -p1 %patch0173 -p1 %patch0174 -p1 %patch0175 -p1 %patch0176 -p1 %patch0177 -p1 %patch0178 -p1 %patch0179 -p1 %patch0180 -p1 %patch0181 -p1 %patch0182 -p1 %patch0183 -p1 %patch0184 -p1 %patch0185 -p1 %patch0186 -p1 %patch0187 -p1 %patch0188 -p1 %patch0189 -p1 %patch0190 -p1 %patch0191 -p1 %patch0192 -p1 %patch0193 -p1 %patch0194 -p1 %patch0195 -p1 %patch0196 -p1 %patch0197 -p1 %patch0198 -p1 %patch0199 -p1 %patch0200 -p1 %patch0201 -p1 %patch0202 -p1 %patch0203 -p1 %patch0204 -p1 %patch0205 -p1 %patch0206 -p1 %patch0207 -p1 %patch0208 -p1 %patch0209 -p1 %patch0210 -p1 %patch0211 -p1 %patch0212 -p1 %patch0213 -p1 %patch0214 -p1 %patch0215 -p1 %patch0216 -p1 %patch0217 -p1 %patch0218 -p1 %patch0219 -p1 %patch0220 -p1 %patch0221 -p1 %patch0222 -p1 %patch0223 -p1 %patch0224 -p1 %patch0225 -p1 %patch0226 -p1 %patch0227 -p1 %patch0228 -p1 %patch0229 -p1 %patch0230 -p1 %patch0231 -p1 %patch0232 -p1 %patch0233 -p1 %patch0234 -p1 %patch0235 -p1 %patch0236 -p1 %patch0237 -p1 %patch0238 -p1 %patch0239 -p1 %patch0240 -p1 %patch0241 -p1 %patch0242 -p1 %patch0243 -p1 %patch0244 -p1 %patch0245 -p1 %patch0246 -p1 %patch0247 -p1 %patch0248 -p1 %patch0249 -p1 %patch0250 -p1 %patch0251 -p1 %patch0252 -p1 %patch0253 -p1 %patch0254 -p1 %patch0255 -p1 %patch0256 -p1 %patch0257 -p1 %patch0258 -p1 %patch0259 -p1 %patch0260 -p1 %patch0261 -p1 %patch0262 -p1 %patch0263 -p1 %patch0264 -p1 %patch0265 -p1 %patch0266 -p1 %patch0267 -p1 %patch0268 -p1 %patch0269 -p1 %patch0270 -p1 %patch0271 -p1 %patch0272 -p1 %patch0273 -p1 %patch0274 -p1 %patch0275 -p1 %patch0276 -p1 %patch0277 -p1 %patch0278 -p1 %patch0279 -p1 %patch0280 -p1 %patch0281 -p1 %patch0282 -p1 %patch0283 -p1 %patch0284 -p1 %patch0285 -p1 %patch0286 -p1 %patch0287 -p1 %patch0288 -p1 %patch0289 -p1 %patch0290 -p1 %patch0291 -p1 %patch0292 -p1 %patch0293 -p1 %patch0294 -p1 %patch0295 -p1 %patch0296 -p1 %patch0297 -p1 %patch0298 -p1 %patch0299 -p1 %patch0300 -p1 %patch0301 -p1 %patch0302 -p1 %patch0303 -p1 %patch0304 -p1 %patch0305 -p1 %patch0306 -p1 %patch0307 -p1 %patch0308 -p1 %patch0309 -p1 %patch0310 -p1 %patch0311 -p1 %patch0312 -p1 %patch0313 -p1 %patch0314 -p1 %patch0315 -p1 %patch0316 -p1 %patch0317 -p1 %patch0318 -p1 %patch0319 -p1 %patch0320 -p1 %patch0321 -p1 %patch0322 -p1 %patch0323 -p1 %patch0324 -p1 %patch0325 -p1 %patch0326 -p1 %patch0327 -p1 %patch0328 -p1 %patch0329 -p1 %patch0330 -p1 %patch0331 -p1 %build # In RHEL7 few hardening flags are available by default, however the RELRO # default behaviour is partial, convert to full %if ( 0%{?rhel} && 0%{?rhel} >= 7 ) LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now" export LDFLAGS %else %if ( 0%{?rhel} && 0%{?rhel} == 6 ) CFLAGS="$RPM_OPT_FLAGS -fPIE -DPIE" LDFLAGS="$RPM_LD_FLAGS -pie -Wl,-z,relro,-z,now" %else #It appears that with gcc-4.1.2 in RHEL5 there is an issue using both -fPIC and # -fPIE that makes -z relro not work; -fPIE seems to undo what -fPIC does CFLAGS="$RPM_OPT_FLAGS" LDFLAGS="$RPM_LD_FLAGS -Wl,-z,relro,-z,now" %endif export CFLAGS export LDFLAGS %endif ./autogen.sh && %configure \ %{?_with_cmocka} \ %{?_with_tmpfilesdir} \ %{?_without_bd} \ %{?_without_epoll} \ %{?_without_fusermount} \ %{?_without_georeplication} \ %{?_with_firewalld} \ %{?_without_ocf} \ %{?_without_qemu_block} \ %{?_without_rdma} \ %{?_without_syslog} \ %{?_without_systemtap} \ %{?_without_tiering} # fix hardening and remove rpath in shlibs %if ( 0%{?fedora} && 0%{?fedora} > 17 ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) sed -i 's| \\\$compiler_flags |&\\\$LDFLAGS |' libtool %endif sed -i 's|^hardcode_libdir_flag_spec=.*|hardcode_libdir_flag_spec=""|' libtool sed -i 's|^runpath_var=LD_RUN_PATH|runpath_var=DIE_RPATH_DIE|' libtool make %{?_smp_mflags} # Build Glupy pushd xlators/features/glupy/src FLAGS="$RPM_OPT_FLAGS" python setup.py build popd %check make check %install rm -rf %{buildroot} make install DESTDIR=%{buildroot} # install the Glupy Python library in /usr/lib/python*/site-packages pushd xlators/features/glupy/src python setup.py install --skip-build --verbose --root %{buildroot} popd # Install include directory mkdir -p %{buildroot}%{_includedir}/glusterfs install -p -m 0644 libglusterfs/src/*.h \ %{buildroot}%{_includedir}/glusterfs/ install -p -m 0644 contrib/uuid/*.h \ %{buildroot}%{_includedir}/glusterfs/ # Following needed by hekafs multi-tenant translator mkdir -p %{buildroot}%{_includedir}/glusterfs/rpc install -p -m 0644 rpc/rpc-lib/src/*.h \ %{buildroot}%{_includedir}/glusterfs/rpc/ install -p -m 0644 rpc/xdr/src/*.h \ %{buildroot}%{_includedir}/glusterfs/rpc/ mkdir -p %{buildroot}%{_includedir}/glusterfs/server install -p -m 0644 xlators/protocol/server/src/*.h \ %{buildroot}%{_includedir}/glusterfs/server/ %if ( 0%{_for_fedora_koji_builds} ) install -D -p -m 0644 %{SOURCE1} \ %{buildroot}%{_sysconfdir}/sysconfig/glusterd install -D -p -m 0644 %{SOURCE2} \ %{buildroot}%{_sysconfdir}/sysconfig/glusterfsd %else install -D -p -m 0644 extras/glusterd-sysconfig \ %{buildroot}%{_sysconfdir}/sysconfig/glusterd %endif %if ( 0%{_for_fedora_koji_builds} ) %if ( 0%{?rhel} && 0%{?rhel} <= 5 ) install -D -p -m 0755 %{SOURCE6} \ %{buildroot}%{_sysconfdir}/sysconfig/modules/glusterfs-fuse.modules %endif %endif mkdir -p %{buildroot}%{_localstatedir}/log/glusterd mkdir -p %{buildroot}%{_localstatedir}/log/glusterfs mkdir -p %{buildroot}%{_localstatedir}/log/glusterfsd mkdir -p %{buildroot}%{_localstatedir}/run/gluster touch %{buildroot}%{python_sitelib}/gluster/__init__.py # Remove unwanted files from all the shared libraries find %{buildroot}%{_libdir} -name '*.a' -delete find %{buildroot}%{_libdir} -name '*.la' -delete # Remove installed docs, the ones we want are included by %%doc, in # /usr/share/doc/glusterfs or /usr/share/doc/glusterfs-x.y.z depending # on the distribution %if ( 0%{?fedora} && 0%{?fedora} > 19 ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) rm -rf %{buildroot}%{_pkgdocdir}/* %else rm -rf %{buildroot}%{_defaultdocdir}/%{name} mkdir -p %{buildroot}%{_pkgdocdir} %endif head -50 ChangeLog > ChangeLog.head && mv ChangeLog.head ChangeLog cat << EOM >> ChangeLog More commit messages for this ChangeLog can be found at https://forge.gluster.org/glusterfs-core/glusterfs/commits/v%{version}%{?prereltag} EOM # Remove benchmarking and other unpackaged files %if ( 0%{?rhel} && 0%{?rhel} < 6 ) rm -rf %{buildroot}/benchmarking rm -f %{buildroot}/glusterfs-mode.el rm -f %{buildroot}/glusterfs.vim %else # make install always puts these in %%{_defaultdocdir}/%%{name} so don't # use %%{_pkgdocdir}; that will be wrong on later Fedora distributions rm -rf %{buildroot}%{_defaultdocdir}/%{name}/benchmarking rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs-mode.el rm -f %{buildroot}%{_defaultdocdir}/%{name}/glusterfs.vim %endif # Create working directory mkdir -p %{buildroot}%{_sharedstatedir}/glusterd # Update configuration file to /var/lib working directory sed -i 's|option working-directory /etc/glusterd|option working-directory %{_sharedstatedir}/glusterd|g' \ %{buildroot}%{_sysconfdir}/glusterfs/glusterd.vol # Install glusterfsd .service or init.d file %if ( 0%{_for_fedora_koji_builds} ) %_init_install %{glusterfsd_service} glusterfsd %endif install -D -p -m 0644 extras/glusterfs-logrotate \ %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs %if ( 0%{!?_without_georeplication:1} ) mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/geo-replication touch %{buildroot}%{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf install -D -p -m 0644 extras/glusterfs-georep-logrotate \ %{buildroot}%{_sysconfdir}/logrotate.d/glusterfs-georep %endif %if ( 0%{!?_without_syslog:1} ) %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} > 6 ) install -D -p -m 0644 extras/gluster-rsyslog-7.2.conf \ %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example %endif %if ( 0%{?rhel} && 0%{?rhel} == 6 ) install -D -p -m 0644 extras/gluster-rsyslog-5.8.conf \ %{buildroot}%{_sysconfdir}/rsyslog.d/gluster.conf.example %endif %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) install -D -p -m 0644 extras/logger.conf.example \ %{buildroot}%{_sysconfdir}/glusterfs/logger.conf.example %endif %endif touch %{buildroot}%{_sharedstatedir}/glusterd/glusterd.info touch %{buildroot}%{_sharedstatedir}/glusterd/options subdirs=("add-brick" "create" "copy-file" "delete" "gsync-create" "remove-brick" "reset" "set" "start" "stop") for dir in ${subdirs[@]} do mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/"$dir"/{pre,post} done mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/glustershd mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/peers mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/vols mkdir -p %{buildroot}%{_sharedstatedir}/glusterd/nfs/run touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/nfs-server.vol touch %{buildroot}%{_sharedstatedir}/glusterd/nfs/run/nfs.pid %{__install} -p -m 0744 extras/hook-scripts/start/post/*.sh \ %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/start/post %{__install} -p -m 0744 extras/hook-scripts/stop/pre/*.sh \ %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/stop/pre %{__install} -p -m 0744 extras/hook-scripts/set/post/*.sh \ %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/set/post %{__install} -p -m 0744 extras/hook-scripts/add-brick/post/*.sh \ %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/add-brick/post %{__install} -p -m 0744 extras/hook-scripts/add-brick/pre/*.sh \ %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/add-brick/pre %{__install} -p -m 0744 extras/hook-scripts/reset/post/*.sh \ %{buildroot}%{_sharedstatedir}/glusterd/hooks/1/reset/post find ./tests ./run-tests.sh -type f | cpio -pd %{buildroot}%{_prefix}/share/glusterfs ## Install bash completion for cli install -p -m 0744 -D extras/command-completion/gluster.bash \ %{buildroot}%{_sysconfdir}/bash_completion.d/gluster %clean rm -rf %{buildroot} ##----------------------------------------------------------------------------- ## All %post should be placed here and keep them sorted ## %post %if ( 0%{!?_without_syslog:1} ) %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) %_init_restart rsyslog %endif %endif %post api /sbin/ldconfig %post fuse %if ( 0%{?rhel} == 5 ) modprobe fuse %endif %if ( 0%{?_build_server} ) %if ( 0%{!?_without_georeplication:1} ) %post geo-replication #restart glusterd. if [ $1 -ge 1 ]; then %_init_restart glusterd fi %endif %endif %post libs /sbin/ldconfig %if ( 0%{?_build_server} ) %post server # Legacy server %_init_enable glusterd # fix bz#1110715 if [ -f %_init_glusterfsd ]; then %_init_enable glusterfsd fi # ".cmd_log_history" is renamed to "cmd_history.log" in GlusterFS-3.7 . # While upgrading glusterfs-server package form GlusterFS version <= 3.6 to # GlusterFS version 3.7, ".cmd_log_history" should be renamed to # "cmd_history.log" to retain cli command history contents. if [ -f %{_localstatedir}/log/glusterfs/.cmd_log_history ]; then mv %{_localstatedir}/log/glusterfs/.cmd_log_history \ %{_localstatedir}/log/glusterfs/cmd_history.log fi # Genuine Fedora (and EPEL) builds never put gluster files in /etc; if # there are any files in /etc from a prior gluster.org install, move them # to /var/lib. (N.B. Starting with 3.3.0 all gluster files are in /var/lib # in gluster.org RPMs.) Be careful to copy them on the off chance that # /etc and /var/lib are on separate file systems if [ -d /etc/glusterd -a ! -h %{_sharedstatedir}/glusterd ]; then mkdir -p %{_sharedstatedir}/glusterd cp -a /etc/glusterd %{_sharedstatedir}/glusterd rm -rf /etc/glusterd ln -sf %{_sharedstatedir}/glusterd /etc/glusterd fi # Rename old volfiles in an RPM-standard way. These aren't actually # considered package config files, so %%config doesn't work for them. if [ -d %{_sharedstatedir}/glusterd/vols ]; then for file in $(find %{_sharedstatedir}/glusterd/vols -name '*.vol'); do newfile=${file}.rpmsave echo "warning: ${file} saved as ${newfile}" cp ${file} ${newfile} done fi # add marker translator # but first make certain that there are no old libs around to bite us # BZ 834847 if [ -e /etc/ld.so.conf.d/glusterfs.conf ]; then rm -f /etc/ld.so.conf.d/glusterfs.conf /sbin/ldconfig fi %if (0%{?_with_firewalld:1}) #reload service files if firewalld running if $(systemctl is-active firewalld 1>/dev/null 2>&1); then #firewalld-filesystem is not available for rhel7, so command used for reload. firewall-cmd --reload fi %endif pidof -c -o %PPID -x glusterd &> /dev/null if [ $? -eq 0 ]; then kill -9 `pgrep -f gsyncd.py` &> /dev/null killall --wait glusterd &> /dev/null glusterd --xlator-option *.upgrade=on -N #Cleaning leftover glusterd socket file which is created by glusterd in #rpm_script_t context. rm -rf /var/run/glusterd.socket # glusterd _was_ running, we killed it, it exited after *.upgrade=on, # so start it again %_init_start glusterd else glusterd --xlator-option *.upgrade=on -N #Cleaning leftover glusterd socket file which is created by glusterd in #rpm_script_t context. rm -rf /var/run/glusterd.socket fi %endif ##----------------------------------------------------------------------------- ## All %preun should be placed here and keep them sorted ## %if ( 0%{?_build_server} ) %preun server if [ $1 -eq 0 ]; then if [ -f %_init_glusterfsd ]; then %_init_stop glusterfsd fi %_init_stop glusterd if [ -f %_init_glusterfsd ]; then %_init_disable glusterfsd fi %_init_disable glusterd fi if [ $1 -ge 1 ]; then if [ -f %_init_glusterfsd ]; then %_init_restart glusterfsd fi %_init_restart glusterd fi %endif ##----------------------------------------------------------------------------- ## All %postun should be placed here and keep them sorted ## %postun /sbin/ldconfig %if ( 0%{!?_without_syslog:1} ) %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) %_init_restart rsyslog %endif %endif %postun api /sbin/ldconfig %if ( 0%{?_build_server} ) %postun server %if (0%{?_with_firewalld:1}) #reload service files if firewalld running if $(systemctl is-active firewalld 1>/dev/null 2>&1); then firewall-cmd --reload fi %endif %endif %postun libs /sbin/ldconfig ##----------------------------------------------------------------------------- ## All files should be placed here and keep them grouped ## %files # exclude extra-xlators files %if ( ! 0%{!?_without_extra_xlators:1} ) %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/mac-compat.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_client.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_dht.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_server.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/features/template.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so %exclude %{python_sitelib}/* %endif # exclude regression-tests files %if ( ! 0%{!?_without_regression_tests:1} ) %exclude %{_prefix}/share/glusterfs/run-tests.sh %exclude %{_prefix}/share/glusterfs/tests/* %endif %if ( ! 0%{?_build_server} ) # exclude ganesha files %exclude %{_sysconfdir}/ganesha/* %exclude %{_libexecdir}/ganesha/* %exclude %{_prefix}/lib/ocf/* # exclude incrementalapi %exclude %{_libexecdir}/glusterfs/* %exclude %{_sbindir}/gfind_missing_files %exclude %{_libexecdir}/glusterfs/glusterfind %exclude %{_bindir}/glusterfind %exclude %{_libexecdir}/glusterfs/peer_add_secret_pub %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post.py %exclude %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post.pyc %exclude %{_sharedstatedir}/glusterd/hooks/1/delete/post/S57glusterfind-delete-post.pyo # exclude server files %exclude %{_sharedstatedir}/glusterd/* %exclude %{_sysconfdir}/glusterfs %exclude %{_sysconfdir}/glusterfs/glusterd.vol %exclude %{_sysconfdir}/glusterfs/glusterfs-georep-logrotate %exclude %{_sysconfdir}/glusterfs/glusterfs-logrotate %exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-5.8.conf %exclude %{_sysconfdir}/glusterfs/gluster-rsyslog-7.2.conf %exclude %{_sysconfdir}/glusterfs/group-virt.example %exclude %{_sysconfdir}/glusterfs/logger.conf.example %exclude %_init_glusterd %exclude %{_sysconfdir}/sysconfig/glusterd %exclude %{_bindir}/glusterfind %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so %if ( 0%{!?_without_tiering:1} ) %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so %endif %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix* %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota* %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt* %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs* %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server* %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage* %if ( 0%{!?_without_tiering:1} ) %exclude %{_libdir}/libgfdb.so.* %endif %exclude %{_sbindir}/gcron.py %exclude %{_sbindir}/glfsheal %exclude %{_sbindir}/glusterd %exclude %{_sbindir}/snap_scheduler.py %exclude %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh #/usr/share/doc/glusterfs-server-3.7.0beta2/clear_xattrs.sh %exclude %{_localstatedir}/run/gluster %if 0%{?_tmpfilesdir:1} %exclude %{_tmpfilesdir}/gluster.conf %endif %endif %doc ChangeLog COPYING-GPLV2 COPYING-LGPLV3 INSTALL README.md THANKS %if ( 0%{!?_without_syslog:1} ) %if ( 0%{?fedora} ) || ( 0%{?rhel} && 0%{?rhel} >= 6 ) %{_sysconfdir}/rsyslog.d/gluster.conf.example %endif %endif %{_mandir}/man8/*gluster*.8* %exclude %{_mandir}/man8/gluster.8* %dir %{_localstatedir}/log/glusterfs %if ( 0%{!?_without_rdma:1} ) %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma* %endif %dir %{_datadir}/glusterfs/scripts %{_datadir}/glusterfs/scripts/post-upgrade-script-for-quota.sh %{_datadir}/glusterfs/scripts/pre-upgrade-script-for-quota.sh # xlators that are needed on the client- and on the server-side %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/auth %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/addr.so %{_libdir}/glusterfs/%{version}%{?prereltag}/auth/login.so %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/socket.so %dir %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/error-gen.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/io-stats.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/debug/trace.so %if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 ) ) # RHEL-5 based distributions have a too old openssl %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/crypt.so %endif %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/access-control.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/barrier.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/cdc.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changelog.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/gfid-access.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/read-only.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/shard.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-client.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/worm.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/meta.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-cache.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/io-threads.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/md-cache.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/open-behind.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/quick-read.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/read-ahead.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/readdir-ahead.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/stat-prefetch.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/performance/write-behind.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/system/posix-acl.so %files api %exclude %{_libdir}/*.so # libgfapi files %{_libdir}/libgfapi.* %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/api.so %files api-devel %{_libdir}/pkgconfig/glusterfs-api.pc %{_libdir}/libgfapi.so %{_includedir}/glusterfs/api/* %files cli %{_sbindir}/gluster %{_mandir}/man8/gluster.8* %{_sysconfdir}/bash_completion.d/gluster %files devel %{_includedir}/glusterfs %exclude %{_includedir}/glusterfs/y.tab.h %exclude %{_includedir}/glusterfs/api %exclude %{_libdir}/libgfapi.so %if ( ! 0%{?_build_server} ) %exclude %{_libdir}/libgfchangelog.so %endif %if ( 0%{!?_without_tiering:1} && ! 0%{?_build_server}) %exclude %{_libdir}/libgfdb.so %endif %{_libdir}/*.so # Glupy Translator examples %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/debug-trace.* %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/helloworld.* %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy/negative.* %if ( 0%{?_build_server} ) %{_libdir}/pkgconfig/libgfchangelog.pc %else %exclude %{_libdir}/pkgconfig/libgfchangelog.pc %endif %if ( 0%{!?_without_tiering:1} && 0%{?_build_server}) %{_libdir}/pkgconfig/libgfdb.pc %else %if ( 0%{?rhel} && 0%{?rhel} >= 6 ) %exclude %{_libdir}/pkgconfig/libgfdb.pc %endif %endif %files client-xlators %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/*.so %exclude %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/ganesha.so %if ( 0%{!?_without_qemu_block:1} ) %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/qemu-block.so %endif %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/client.so %if ( 0%{!?_without_extra_xlators:1} ) %files extra-xlators %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/encryption/rot-13.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/glupy.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/mac-compat.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_client.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_dht.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/prot_server.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quiesce.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/features/template.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/testing/performance/symlink-cache.so # Glupy Python files %{python_sitelib}/gluster/glupy/* # Don't expect a .egg-info file on EL5 %if ( ! ( 0%{?rhel} && 0%{?rhel} < 6 ) ) %{python_sitelib}/glusterfs_glupy*.egg-info %endif %endif %files fuse # glusterfs is a symlink to glusterfsd, -server depends on -fuse. %{_sbindir}/glusterfs %{_sbindir}/glusterfsd %config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mount/fuse.so /sbin/mount.glusterfs %if ( 0%{!?_without_fusermount:1} ) %{_bindir}/fusermount-glusterfs %endif %if ( 0%{_for_fedora_koji_builds} ) %if ( 0%{?rhel} && 0%{?rhel} <= 5 ) %{_sysconfdir}/sysconfig/modules/glusterfs-fuse.modules %endif %endif %if ( 0%{?_build_server} ) %files ganesha %{_sysconfdir}/ganesha/* %attr(0755,-,-) %{_libexecdir}/ganesha/* %attr(0755,-,-) %{_prefix}/lib/ocf/resource.d/heartbeat/* %endif %if ( 0%{?_build_server} ) %if ( 0%{!?_without_georeplication:1} ) %files geo-replication %config(noreplace) %{_sysconfdir}/logrotate.d/glusterfs-georep %{_libexecdir}/glusterfs/gsyncd %{_libexecdir}/glusterfs/python/syncdaemon/* %{_libexecdir}/glusterfs/gverify.sh %{_libexecdir}/glusterfs/set_geo_rep_pem_keys.sh %{_libexecdir}/glusterfs/peer_gsec_create %{_libexecdir}/glusterfs/peer_mountbroker %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/geo-replication %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh %{_datadir}/glusterfs/scripts/get-gfid.sh %{_datadir}/glusterfs/scripts/slave-upgrade.sh %{_datadir}/glusterfs/scripts/gsync-upgrade.sh %{_datadir}/glusterfs/scripts/generate-gfid-file.sh %{_datadir}/glusterfs/scripts/gsync-sync-gfid %ghost %attr(0644,-,-) %{_sharedstatedir}/glusterd/geo-replication/gsyncd_template.conf %endif %{_libexecdir}/glusterfs/gfind_missing_files %{_sbindir}/gfind_missing_files %endif %files libs %{_libdir}/*.so.* %exclude %{_libdir}/libgfapi.* %if ( 0%{!?_without_tiering:1} ) # libgfdb is only needed server-side %exclude %{_libdir}/libgfdb.* %endif %files -n python-gluster # introducing glusterfs module in site packages. # so that all other gluster submodules can reside in the same namespace. %{python_sitelib}/gluster/__init__.* %if ( 0%{!?_without_rdma:1} ) %files rdma %{_libdir}/glusterfs/%{version}%{?prereltag}/rpc-transport/rdma* %endif %if ( 0%{?_build_server} ) %if ( 0%{!?_without_regression_tests:1} ) %files regression-tests %{_prefix}/share/glusterfs/run-tests.sh %{_prefix}/share/glusterfs/tests %exclude %{_prefix}/share/glusterfs/tests/basic/rpm.t %endif %endif %if ( 0%{?_build_server} ) %if ( 0%{!?_without_ocf:1} ) %files resource-agents # /usr/lib is the standard for OCF, also on x86_64 %{_prefix}/lib/ocf/resource.d/glusterfs %endif %endif %if ( 0%{?_build_server} ) %files server %exclude %{_sharedstatedir}/glusterd/hooks/1/gsync-create/post/S56glusterd-geo-rep-create-post.sh %doc extras/clear_xattrs.sh %config(noreplace) %{_sysconfdir}/sysconfig/glusterd %config(noreplace) %{_sysconfdir}/glusterfs %dir %{_localstatedir}/run/gluster %if 0%{?_tmpfilesdir:1} %{_tmpfilesdir}/gluster.conf %endif %dir %{_sharedstatedir}/glusterd %{_sharedstatedir}/glusterd/* %dir %{_sharedstatedir}/glusterd/groups %config(noreplace) %{_sharedstatedir}/glusterd/groups/virt # Legacy configs %if ( 0%{_for_fedora_koji_builds} ) %config(noreplace) %{_sysconfdir}/sysconfig/glusterfsd %endif %config %{_sharedstatedir}/glusterd/hooks/1/add-brick/post/disabled-quota-root-xattr-heal.sh %config %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre/S28Quota-enable-root-xattr-heal.sh %config %{_sharedstatedir}/glusterd/hooks/1/set/post/S30samba-set.sh %config %{_sharedstatedir}/glusterd/hooks/1/set/post/S32gluster_enable_shared_storage.sh %config %{_sharedstatedir}/glusterd/hooks/1/start/post/S29CTDBsetup.sh %config %{_sharedstatedir}/glusterd/hooks/1/start/post/S30samba-start.sh %config %{_sharedstatedir}/glusterd/hooks/1/start/post/S31ganesha-start.sh %config %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S30samba-stop.sh %config %{_sharedstatedir}/glusterd/hooks/1/stop/pre/S29CTDB-teardown.sh %config %{_sharedstatedir}/glusterd/hooks/1/reset/post/S31ganesha-reset.sh # init files %_init_glusterd %if ( 0%{_for_fedora_koji_builds} ) %_init_glusterfsd %endif # binaries %{_sbindir}/glusterd %{_sbindir}/glfsheal # {_sbindir}/glusterfsd is the actual binary, but glusterfs (client) is a # symlink. The binary itself (and symlink) are part of the glusterfs-fuse # package, because glusterfs-server depends on that anyway. %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/cluster/pump.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/arbiter.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bit-rot.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/bitrot-stub.so %if ( 0%{!?_without_tiering:1} ) %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/changetimerecorder.so %endif %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/index.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/locks.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/posix* %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/snapview-server.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/marker.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/quota* %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/trash.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/features/upcall.so %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/mgmt* %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/nfs* %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/protocol/server* %{_libdir}/glusterfs/%{version}%{?prereltag}/xlator/storage* %if ( 0%{!?_without_tiering:1} ) %{_libdir}/libgfdb.so.* %endif #snap_scheduler %{_sbindir}/snap_scheduler.py %{_sbindir}/gcron.py #hookscripts %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1 %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/post %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/add-brick/pre %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/set/post %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/start/post %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop %dir %attr(0755,-,-) %{_sharedstatedir}/glusterd/hooks/1/stop/pre %ghost %attr(0644,-,-) %config(noreplace) %{_sharedstatedir}/glusterd/glusterd.info %ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/options %ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/nfs-server.vol %ghost %attr(0600,-,-) %{_sharedstatedir}/glusterd/nfs/run/nfs.pid # Extra utility script %{_datadir}/glusterfs/scripts/stop-all-gluster-processes.sh # Incrementalapi %{_libexecdir}/glusterfs/glusterfind %{_bindir}/glusterfind %{_libexecdir}/glusterfs/peer_add_secret_pub %endif %if ( 0%{?_with_firewalld:1} ) /usr/lib/firewalld/services/glusterfs.xml %endif ##----------------------------------------------------------------------------- ## All %pretrans should be placed here and keep them sorted ## %if 0%{?_build_server} %pretrans -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then echo "ERROR: Distribute volumes detected. In-service rolling upgrade requires distribute volume(s) to be stopped." echo "ERROR: Please stop distribute volume(s) before proceeding... exiting!" exit 1; fi done popd > /dev/null 2>&1 echo "WARNING: Updating glusterfs requires its processes to be killed. This action does NOT incur downtime." echo "WARNING: Ensure to wait for the upgraded server to finish healing before proceeding." echo "WARNING: Refer upgrade section of install guide for more details" echo "Please run # service glusterd stop; pkill glusterfs; pkill glusterfsd; pkill gsyncd.py;" exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %pretrans api -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-api_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %pretrans api-devel -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-api-devel_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %pretrans cli -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-cli_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %pretrans client-xlators -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-client-xlators_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %pretrans devel -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-devel_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %pretrans fuse -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-fuse_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %pretrans ganesha -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-ganesha_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %if ( 0%{!?_without_georeplication:1} ) %pretrans geo-replication -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-geo-replication_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %endif %pretrans libs -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-libs_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %pretrans -n python-gluster -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/python-gluster_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %if ( 0%{!?_without_rdma:1} ) %pretrans rdma -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-rdma_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %endif %if ( 0%{!?_without_ocf:1} ) %pretrans resource-agents -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-resource-agents_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %endif %pretrans server -p if not posix.access("/bin/bash", "x") then -- initial installation, no shell, no running glusterfsd return 0 end -- TODO: move this completely to a lua script -- For now, we write a temporary bash script and execute that. script = [[#!/bin/sh pidof -c -o %PPID -x glusterfsd &>/dev/null if [ $? -eq 0 ]; then pushd . > /dev/null 2>&1 for volume in /var/lib/glusterd/vols/*; do cd $volume; vol_type=`grep '^type=' info | awk -F'=' '{print $2}'` volume_started=`grep '^status=' info | awk -F'=' '{print $2}'` if [ $vol_type -eq 0 ] && [ $volume_started -eq 1 ] ; then exit 1; fi done popd > /dev/null 2>&1 exit 1; fi ]] -- rpm in RHEL5 does not have os.tmpname() -- io.tmpfile() can not be resolved to a filename to pass to bash :-/ tmpname = "/tmp/glusterfs-server_pretrans_" .. os.date("%s") tmpfile = io.open(tmpname, "w") tmpfile:write(script) tmpfile:close() ok, how, val = os.execute("/bin/bash " .. tmpname) os.remove(tmpname) if not (ok == 0) then error("Detected running glusterfs processes", ok) end %endif %changelog * Thu Nov 19 2015 CentOS Sources - 3.7.1-16.el7.centos - remove vendor and/or packager lines * Mon Sep 21 2015 Bala.FA - 3.7.1-16 - fixes bugs bz#1263653 * Tue Sep 15 2015 Bala.FA - 3.7.1-15 - fixes bugs bz#1253774 bz#1257509 bz#1259221 bz#1259750 bz#1260086 bz#1260512 bz#1262236 bz#1262291 * Tue Sep 01 2015 Bala.FA - 3.7.1-14 - fixes bugs bz#1115367 bz#1232569 bz#1234213 bz#1234610 bz#1241336 bz#1241385 bz#1241862 bz#1242803 bz#1245077 bz#1251457 * Thu Aug 27 2015 Bala.FA - 3.7.1-13 - fixes bugs bz#1224184 bz#1225452 bz#1227759 bz#1228135 bz#1231080 bz#1234399 bz#1235971 bz#1236038 bz#1238070 bz#1238111 bz#1238171 bz#1238398 bz#1238977 bz#1239021 bz#1240918 bz#1245165 bz#1245636 bz#1245924 bz#1251409 bz#1255471 bz#1257007 bz#1247349 * Wed Aug 19 2015 Bala.FA - 3.7.1-12 - fixes bugs bz#1027723 bz#1064265 bz#1065651 bz#1134288 bz#1213893 bz#1226665 bz#1226817 bz#1229606 bz#1229621 bz#1230532 bz#1232216 bz#1235571 bz#1236546 bz#1236672 bz#1236990 bz#1238049 bz#1238071 bz#1239075 bz#1240657 bz#1241649 bz#1241761 bz#1243542 bz#1243722 bz#1243886 bz#1244527 bz#1245448 bz#1245536 bz#1245542 bz#1245897 bz#1245915 bz#1245919 bz#1246946 bz#1252359 bz#1253549 * Sun Jul 19 2015 Bala.FA - 3.7.1-11 - fixes bugs bz#1244187 * Wed Jul 15 2015 Bala.FA - 3.7.1-10 - fixes bugs bz#1224177 bz#1230513 bz#1240617 bz#1242367 bz#1242423 bz#1242543 bz#1242585 bz#1242767 * Mon Jul 13 2015 Bala.FA - 3.7.1-9 - fixes bugs bz#1224177 bz#1224610 bz#1230525 bz#1231647 bz#1234725 bz#1235121 bz#1239057 bz#1239108 bz#1239317 bz#1240196 bz#1240338 bz#1240925 bz#1241048 bz#1241150 bz#1241366 bz#1241830 bz#1241839 bz#1241904 bz#1242046 * Wed Jul 08 2015 Bala.FA - 3.7.1-8 - fixes bugs bz#1223205 bz#1224177 bz#1228247 bz#1229567 bz#1230513 bz#1231635 bz#1231732 bz#1239280 bz#1240168 bz#1240245 * Fri Jul 03 2015 Bala.FA - 3.7.1-7 - fixes bugs bz#1227197 bz#1228127 bz#1230612 bz#1231782 bz#1235735 bz#1236556 bz#1237053 bz#1237165 bz#1238167 * Sun Jun 28 2015 Bala.FA - 3.7.1-6 - fixes bugs bz#1140649 bz#1222856 bz#1223225 bz#1223677 bz#1223738 bz#1223757 bz#1224177 bz#1224199 bz#1224227 bz#1224619 bz#1225747 bz#1226149 bz#1226863 bz#1227869 bz#1228525 bz#1228598 bz#1231771 bz#1231773 bz#1231775 bz#1231779 bz#1231784 bz#1231792 bz#1231796 bz#1231797 bz#1232307 bz#1232309 bz#1232625 bz#1233033 bz#1233575 bz#1233694 bz#1234419 bz#1234720 bz#1234725 bz#1234869 bz#1235147 bz#1235182 bz#1235225 bz#1235236 bz#1235244 bz#1235540 bz#1235544 bz#1235628 bz#1235940 * Wed Jun 24 2015 Bala.FA - 3.7.1-5 - fixes bugs bz#1121560 bz#1140649 bz#1178130 bz#1224115 bz#1226863 bz#1227311 bz#1227900 bz#1228626 bz#1229260 bz#1229601 bz#1230513 bz#1230517 bz#1230646 bz#1231223 bz#1231778 bz#1231780 bz#1231782 bz#1231788 bz#1231792 bz#1232609 bz#1232624 bz#1233046 bz#1233486 * Thu Jun 18 2015 Bala.FA - 3.7.1-4 - fixes bugs bz#1130998 bz#1224137 bz#1225010 bz#1227029 bz#1227311 bz#1227709 bz#1230101 bz#1230635 bz#1231026 bz#1231166 bz#1231651 bz#1231771 bz#1231775 bz#1231776 bz#1231792 bz#1232428 bz#1232641 bz#1233144 * Mon Jun 15 2015 Bala.FA - 3.7.1-3 - fixes bugs bz#1211839 bz#1224046 bz#1224236 bz#1224662 bz#1224880 bz#1225507 bz#1226889 bz#1227618 bz#1228017 bz#1228495 bz#1228597 bz#1229623 bz#1229674 bz#1230607 bz#1230764 bz#1231078 * Fri Jun 12 2015 Bala.FA - 3.7.1-2 - fixes bugs bz#1140506 bz#1186216 bz#1200815 bz#1203901 bz#1222053 bz#1222785 bz#1223205 bz#1223746 bz#1224123 bz#1224136 bz#1224160 bz#1224161 bz#1224175 bz#1224177 bz#1224183 bz#1224195 bz#1224215 bz#1224249 bz#1224618 bz#1227172 bz#1227179 bz#1227469 bz#1227918 bz#1228164 bz#1228173 bz#1228496 bz#1228529 bz#1228643 bz#1229268 bz#1229612 bz#1230114 bz#1230186 bz#1230195 bz#1230522 * Tue Jun 09 2015 Bala FA - 3.7.1-1 - rebase to 3.7.1 - fixes bugs bz#1027710 bz#1039674 bz#1047481 bz#1140506 bz#1223201 bz#1223206 bz#1223715 bz#1224225 bz#1226162 bz#1227262 bz#1227317 bz#1227649 bz#1228266 bz#1228518 bz#1228541 * Tue Jun 02 2015 Bala FA - 3.7.0-3.1 - fix glusterfs server build in rhel-7 due to change in dist tag * Mon Jun 01 2015 Bala FA - 3.7.0-3 - Resolves bz#1134690 bz#1110715 * Fri May 15 2015 Bala FA - 3.7.0-2 - Resolves bz#1221743: glusterd not starting after a fresh install of 3.7.0-1.el6rhs build * Thu May 14 2015 Bala FA - 3.7.0-1 - rebased to 3.7.0