diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..ccd53d5
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+SOURCES/db-5.3.21.tar.gz
+SOURCES/db.1.85.tar.gz
+SOURCES/libdb-5.3.21-manpages.tar.gz
diff --git a/.libdb.metadata b/.libdb.metadata
new file mode 100644
index 0000000..1505856
--- /dev/null
+++ b/.libdb.metadata
@@ -0,0 +1,3 @@
+32e43c4898c8996750c958a90c174bd116fcba83 SOURCES/db-5.3.21.tar.gz
+ccb057b07761d1b2b34626e748c7392c749d5e6d SOURCES/db.1.85.tar.gz
+36992944b90f89ccdba8c0f188e39f5fba9d6ba9 SOURCES/libdb-5.3.21-manpages.tar.gz
diff --git a/SOURCES/007-mt19937db.c_license.patch b/SOURCES/007-mt19937db.c_license.patch
new file mode 100644
index 0000000..eab05d7
--- /dev/null
+++ b/SOURCES/007-mt19937db.c_license.patch
@@ -0,0 +1,49 @@
+Description: mt19937db.c license should include the GPL
+ This file is distributed from upstream Berkeley DB under the Artistic
+ License (no version specified), althouth it was later released by the
+ original author under both GPL2+ and BSD.
+ .
+ References:
+ http://web.archive.org/web/20010806225716/http://www.math.keio.ac.jp/matumoto/mt19937int.c
+ http://web.archive.org/web/20130127064020/http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/CODES/mt19937ar.c
+ https://bugzilla.redhat.com/show_bug.cgi?format=multiple&id=886838
+ https://lists.nongnu.org/archive/html/gnu-linux-libre/2010-05/msg00000.html
+ .
+Author: Ruben Rodriguez <ruben@trisquel.info>
+
+
+--- db-5.3.21/src/crypto/mersenne/mt19937db.c.licensefix
++++ db-5.3.21/src/crypto/mersenne/mt19937db.c
+@@ -16,16 +16,27 @@
+ /*   Coded by Takuji Nishimura, considering the suggestions by    */
+ /* Topher Cooper and Marc Rieffel in July-Aug. 1997.              */
+ 
+-/* This library is free software under the Artistic license:       */
+-/* see the file COPYING distributed together with this code.       */
+-/* For the verification of the code, its output sequence file      */
+-/* mt19937int.out is attached (2001/4/2)                           */
+-
+ /* Copyright (C) 1997, 1999 Makoto Matsumoto and Takuji Nishimura. */
+ /* Any feedback is very welcome. For any question, comments,       */
+ /* see http://www.math.keio.ac.jp/matumoto/emt.html or email       */
+ /* matumoto@math.keio.ac.jp                                        */
+ 
++/* This library is free software; you can redistribute it and/or   */
++/* modify it under the terms of the GNU Library General Public     */
++/* License as published by the Free Software Foundation; either    */
++/* version 2 of the License, or (at your option) any later         */
++/* version.                                                        */
++/* This library is distributed in the hope that it will be useful, */
++/* but WITHOUT ANY WARRANTY; without even the implied warranty of  */
++/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.            */
++/* See the GNU Library General Public License for more details.    */
++/* You should have received a copy of the GNU Library General      */
++/* Public License along with this library; if not, write to the    */
++/* Free Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA   */ 
++/* 02111-1307  USA                                                 */
++
++/* This library is free software under the Artistic license:       */
++
+ /* REFERENCE                                                       */
+ /* M. Matsumoto and T. Nishimura,                                  */
+ /* "Mersenne Twister: A 623-Dimensionally Equidistributed Uniform  */
diff --git a/SOURCES/checkpoint-opd-deadlock.patch b/SOURCES/checkpoint-opd-deadlock.patch
new file mode 100644
index 0000000..7aab6e1
--- /dev/null
+++ b/SOURCES/checkpoint-opd-deadlock.patch
@@ -0,0 +1,14 @@
+--- db-5.3.21/src/db/db_cam.c.opd_deadlock	2017-10-31 12:20:54.118979690 +0100
++++ db-5.3.21/src/db/db_cam.c	2017-10-31 12:21:06.828739341 +0100
+@@ -868,6 +868,11 @@
+ 	    flags == DB_PREV || flags == DB_PREV_DUP)) {
+ 		if (tmp_rmw && (ret = dbc->am_writelock(dbc)) != 0)
+ 			goto err;
++        /* Latch the primary tree page here in order to not deadlock later. */
++		if (cp->page == NULL &&
++		    (ret = __memp_fget(mpf, &cp->pgno,
++			 dbc->thread_info, dbc->txn, 0, &cp->page)) != 0)
++			goto err;
+ 		if (F_ISSET(dbc, DBC_TRANSIENT))
+ 			opd = cp->opd;
+ 		else if ((ret = __dbc_idup(cp->opd, &opd, DB_POSITION)) != 0)
diff --git a/SOURCES/db-1.85-errno.patch b/SOURCES/db-1.85-errno.patch
new file mode 100644
index 0000000..ccc9ddf
--- /dev/null
+++ b/SOURCES/db-1.85-errno.patch
@@ -0,0 +1,89 @@
+glibc doesn't like errno as the name of a field.
+--- db.1.85/hash/hash.h	Mon Feb 18 19:12:14 2002
++++ db.1.85/hash/hash.h	Mon Feb 18 19:12:20 2002
+@@ -103,7 +103,7 @@
+ 	BUFHEAD 	*cpage;		/* Current page */
+ 	int		cbucket;	/* Current bucket */
+ 	int		cndx;		/* Index of next item on cpage */
+-	int		errno;		/* Error Number -- for DBM 
++	int		err;		/* Error Number -- for DBM 
+ 					 * compatability */
+ 	int		new_file;	/* Indicates if fd is backing store 
+ 					 * or no */
+--- db.1.85/hash/hash.c	Mon Feb 18 19:12:24 2002
++++ db.1.85/hash/hash.c	Mon Feb 18 19:12:44 2002
+@@ -505,7 +505,7 @@
+ 	else
+ 		if (wsize != sizeof(HASHHDR)) {
+ 			errno = EFTYPE;
+-			hashp->errno = errno;
++			hashp->err = errno;
+ 			return (-1);
+ 		}
+ 	for (i = 0; i < NCACHED; i++)
+@@ -536,7 +536,7 @@
+ 
+ 	hashp = (HTAB *)dbp->internal;
+ 	if (flag) {
+-		hashp->errno = errno = EINVAL;
++		hashp->err = errno = EINVAL;
+ 		return (ERROR);
+ 	}
+ 	return (hash_access(hashp, HASH_GET, (DBT *)key, data));
+@@ -553,11 +553,11 @@
+ 
+ 	hashp = (HTAB *)dbp->internal;
+ 	if (flag && flag != R_NOOVERWRITE) {
+-		hashp->errno = errno = EINVAL;
++		hashp->err = errno = EINVAL;
+ 		return (ERROR);
+ 	}
+ 	if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
+-		hashp->errno = errno = EPERM;
++		hashp->err = errno = EPERM;
+ 		return (ERROR);
+ 	}
+ 	return (hash_access(hashp, flag == R_NOOVERWRITE ?
+@@ -574,11 +574,11 @@
+ 
+ 	hashp = (HTAB *)dbp->internal;
+ 	if (flag && flag != R_CURSOR) {
+-		hashp->errno = errno = EINVAL;
++		hashp->err = errno = EINVAL;
+ 		return (ERROR);
+ 	}
+ 	if ((hashp->flags & O_ACCMODE) == O_RDONLY) {
+-		hashp->errno = errno = EPERM;
++		hashp->err = errno = EPERM;
+ 		return (ERROR);
+ 	}
+ 	return (hash_access(hashp, HASH_DELETE, (DBT *)key, NULL));
+@@ -729,7 +729,7 @@
+ 
+ 	hashp = (HTAB *)dbp->internal;
+ 	if (flag && flag != R_FIRST && flag != R_NEXT) {
+-		hashp->errno = errno = EINVAL;
++		hashp->err = errno = EINVAL;
+ 		return (ERROR);
+ 	}
+ #ifdef HASH_STATISTICS
+--- db.1.85/hash/ndbm.c	Mon Feb 18 19:12:58 2002
++++ db.1.85/hash/ndbm.c	Mon Feb 18 19:13:05 2002
+@@ -180,7 +180,7 @@
+ 	HTAB *hp;
+ 
+ 	hp = (HTAB *)db->internal;
+-	return (hp->errno);
++	return (hp->err);
+ }
+ 
+ extern int
+@@ -190,7 +190,7 @@
+ 	HTAB *hp;
+ 
+ 	hp = (HTAB *)db->internal;
+-	hp->errno = 0;
++	hp->err = 0;
+ 	return (0);
+ }
+ 
diff --git a/SOURCES/db-4.5.20-jni-include-dir.patch b/SOURCES/db-4.5.20-jni-include-dir.patch
new file mode 100644
index 0000000..b0881c3
--- /dev/null
+++ b/SOURCES/db-4.5.20-jni-include-dir.patch
@@ -0,0 +1,12 @@
+diff -up db-4.7.25/dist/configure.ac.jni db-4.7.25/dist/configure.ac
+--- db-4.7.25/dist/configure.ac.jni	2008-08-20 14:22:59.000000000 +0200
++++ db-4.7.25/dist/configure.ac	2008-08-20 14:23:39.000000000 +0200
+@@ -418,7 +418,7 @@ if test "$db_cv_java" = "yes"; then
+ 	AC_PROG_JAVAC
+ 	AC_PROG_JAR
+ 	AC_PROG_JAVA
+-	AC_JNI_INCLUDE_DIR
++	JNI_INCLUDE_DIRS="/usr/lib/jvm/java/include /usr/lib/jvm/java/include/linux"
+ 
+ 	AC_MSG_CHECKING(java version)
+         case "$JAVA" in
diff --git a/SOURCES/db-4.6.21-1.85-compat.patch b/SOURCES/db-4.6.21-1.85-compat.patch
new file mode 100644
index 0000000..62ae950
--- /dev/null
+++ b/SOURCES/db-4.6.21-1.85-compat.patch
@@ -0,0 +1,14 @@
+diff -up db-5.1.19/dist/Makefile.in.185compat db-5.1.19/dist/Makefile.in
+--- db-5.1.19/dist/Makefile.in.185compat	2010-08-27 17:08:03.000000000 +0200
++++ db-5.1.19/dist/Makefile.in	2010-09-10 10:02:32.974640425 +0200
+@@ -193,8 +193,8 @@ libtso_major=	$(libtcl_base)-$(LIBMAJOR)
+ # local libraries, for example.  Do that by adding -I options to the DB185INC
+ # line, and -l options to the DB185LIB line.
+ ##################################################
+-DB185INC=	-c @CFLAGS@ -I$(topdir) @CPPFLAGS@
+-DB185LIB=
++DB185INC=	-c @CFLAGS@ -I$(srcdir) -I$(srcdir)/db.1.85/PORT/linux/include @CPPFLAGS@
++DB185LIB=	${srcdir}/db.1.85/PORT/linux/libdb.a
+ 
+ ##################################################
+ # Performance Event Monitoring definitions
diff --git a/SOURCES/libdb-5.3.21-mutex_leak.patch b/SOURCES/libdb-5.3.21-mutex_leak.patch
new file mode 100644
index 0000000..0738dab
--- /dev/null
+++ b/SOURCES/libdb-5.3.21-mutex_leak.patch
@@ -0,0 +1,718 @@
+diff -U 5 -r db-5.3.21.old/src/dbinc_auto/int_def.in db-5.3.21/src/dbinc_auto/int_def.in
+--- db-5.3.21.old/src/dbinc_auto/int_def.in	2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/dbinc_auto/int_def.in	2016-10-25 22:40:58.000000000 +0800
+@@ -1371,10 +1371,11 @@
+ #define	__memp_failchk __memp_failchk@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_bhwrite __memp_bhwrite@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_pgread __memp_pgread@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_pg __memp_pg@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_bhfree __memp_bhfree@DB_VERSION_UNIQUE_NAME@
++#define	__memp_bh_clear_dirty __memp_bh_clear_dirty@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_fget_pp __memp_fget_pp@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_fget __memp_fget@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_fcreate_pp __memp_fcreate_pp@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_fcreate __memp_fcreate@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_set_clear_len __memp_set_clear_len@DB_VERSION_UNIQUE_NAME@
+@@ -1395,10 +1396,11 @@
+ #define	__memp_fopen __memp_fopen@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_fclose_pp __memp_fclose_pp@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_fclose __memp_fclose@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_mf_discard __memp_mf_discard@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_inmemlist __memp_inmemlist@DB_VERSION_UNIQUE_NAME@
++#define	__memp_mf_mark_dead __memp_mf_mark_dead@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_fput_pp __memp_fput_pp@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_fput __memp_fput@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_unpin_buffers __memp_unpin_buffers@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_dirty __memp_dirty@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_shared __memp_shared@DB_VERSION_UNIQUE_NAME@
+@@ -1453,10 +1455,11 @@
+ #define	__memp_fsync_pp __memp_fsync_pp@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_fsync __memp_fsync@DB_VERSION_UNIQUE_NAME@
+ #define	__mp_xxx_fh __mp_xxx_fh@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_sync_int __memp_sync_int@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_mf_sync __memp_mf_sync@DB_VERSION_UNIQUE_NAME@
++#define	__memp_purge_dead_files __memp_purge_dead_files@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_trickle_pp __memp_trickle_pp@DB_VERSION_UNIQUE_NAME@
+ #define	__mutex_alloc __mutex_alloc@DB_VERSION_UNIQUE_NAME@
+ #define	__mutex_alloc_int __mutex_alloc_int@DB_VERSION_UNIQUE_NAME@
+ #define	__mutex_free __mutex_free@DB_VERSION_UNIQUE_NAME@
+ #define	__mutex_free_int __mutex_free_int@DB_VERSION_UNIQUE_NAME@
+diff -U 5 -r db-5.3.21.old/src/dbinc_auto/mp_ext.h db-5.3.21/src/dbinc_auto/mp_ext.h
+--- db-5.3.21.old/src/dbinc_auto/mp_ext.h	2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/dbinc_auto/mp_ext.h	2016-10-25 22:40:58.000000000 +0800
+@@ -14,10 +14,11 @@
+ int __memp_failchk __P((ENV *));
+ int __memp_bhwrite __P((DB_MPOOL *, DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
+ int __memp_pgread __P((DB_MPOOLFILE *, BH *, int));
+ int __memp_pg __P((DB_MPOOLFILE *, db_pgno_t, void *, int));
+ int __memp_bhfree __P((DB_MPOOL *, REGINFO *, MPOOLFILE *, DB_MPOOL_HASH *, BH *, u_int32_t));
++void __memp_bh_clear_dirty __P((ENV*, DB_MPOOL_HASH *, BH *));
+ int __memp_fget_pp __P((DB_MPOOLFILE *, db_pgno_t *, DB_TXN *, u_int32_t, void *));
+ int __memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, DB_THREAD_INFO *, DB_TXN *, u_int32_t, void *));
+ int __memp_fcreate_pp __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
+ int __memp_fcreate __P((ENV *, DB_MPOOLFILE **));
+ int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t));
+@@ -38,10 +39,11 @@
+ int __memp_fopen __P((DB_MPOOLFILE *, MPOOLFILE *, const char *, const char **, u_int32_t, int, size_t));
+ int __memp_fclose_pp __P((DB_MPOOLFILE *, u_int32_t));
+ int __memp_fclose __P((DB_MPOOLFILE *, u_int32_t));
+ int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *, int));
+ int __memp_inmemlist __P((ENV *, char ***, int *));
++void __memp_mf_mark_dead __P((DB_MPOOL *, MPOOLFILE *, int*));
+ int __memp_fput_pp __P((DB_MPOOLFILE *, void *, DB_CACHE_PRIORITY, u_int32_t));
+ int __memp_fput __P((DB_MPOOLFILE *, DB_THREAD_INFO *, void *, DB_CACHE_PRIORITY));
+ int __memp_unpin_buffers __P((ENV *, DB_THREAD_INFO *));
+ int __memp_dirty __P((DB_MPOOLFILE *, void *, DB_THREAD_INFO *, DB_TXN *, DB_CACHE_PRIORITY, u_int32_t));
+ int __memp_shared __P((DB_MPOOLFILE *, void *));
+@@ -96,10 +98,11 @@
+ int __memp_fsync_pp __P((DB_MPOOLFILE *));
+ int __memp_fsync __P((DB_MPOOLFILE *));
+ int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
+ int __memp_sync_int __P((ENV *, DB_MPOOLFILE *, u_int32_t, u_int32_t, u_int32_t *, int *));
+ int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *, int));
++int __memp_purge_dead_files __P((ENV *));
+ int __memp_trickle_pp __P((DB_ENV *, int, int *));
+ 
+ #if defined(__cplusplus)
+ }
+ #endif
+diff -U 5 -r db-5.3.21.old/src/mp/mp_bh.c db-5.3.21/src/mp/mp_bh.c
+--- db-5.3.21.old/src/mp/mp_bh.c	2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_bh.c	2016-10-25 17:09:35.000000000 +0800
+@@ -472,15 +472,12 @@
+ 	 * a shared latch.
+ 	 */
+ 	if (F_ISSET(bhp, BH_DIRTY | BH_TRASH)) {
+ 		MUTEX_LOCK(env, hp->mtx_hash);
+ 		DB_ASSERT(env, !SH_CHAIN_HASNEXT(bhp, vc));
+-		if (ret == 0 && F_ISSET(bhp, BH_DIRTY)) {
+-			F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
+-			DB_ASSERT(env, atomic_read(&hp->hash_page_dirty) > 0);
+-			atomic_dec(env, &hp->hash_page_dirty);
+-		}
++		if (ret == 0)
++			__memp_bh_clear_dirty(env, hp, bhp);
+ 
+ 		/* put the page back if necessary. */
+ 		if ((ret != 0 || BH_REFCOUNT(bhp) > 1) &&
+ 		    F_ISSET(bhp, BH_TRASH)) {
+ 			ret = __memp_pg(dbmfp, bhp->pgno, bhp->buf, 1);
+@@ -686,5 +683,31 @@
+ 	} else
+ 		MUTEX_UNLOCK(env, mfp->mutex);
+ 
+ 	return (ret);
+ }
++
++/*
++ * __memp_bh_clear_dirty --
++ *	Clear the dirty flag of of a buffer. Calls on the same buffer must be
++ *	serialized to get the accounting correct. This can be achieved by
++ *	acquiring an exclusive lock on the buffer, a shared lock on the
++ *	buffer plus an exclusive lock on the hash bucket, or some other
++ *	mechanism that guarantees single-thread access to the entire region
++ *	(e.g. during __memp_region_bhfree()).
++ *
++ * PUBLIC: void __memp_bh_clear_dirty __P((ENV*, DB_MPOOL_HASH *, BH *));
++ */
++void
++__memp_bh_clear_dirty(env, hp, bhp)
++	ENV *env;
++	DB_MPOOL_HASH *hp;
++	BH *bhp;
++{
++	COMPQUIET(env, env);
++	if (F_ISSET(bhp, BH_DIRTY)) {
++		F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
++		DB_ASSERT(env, atomic_read(&hp->hash_page_dirty) > 0);
++		(void)atomic_dec(env, &hp->hash_page_dirty);
++	}
++}
++
+diff -U 5 -r db-5.3.21.old/src/mp/mp_fget.c db-5.3.21/src/mp/mp_fget.c
+--- db-5.3.21.old/src/mp/mp_fget.c	2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_fget.c	2016-10-25 17:11:08.000000000 +0800
+@@ -437,16 +437,11 @@
+ 		 * complain and get out.
+ 		 */
+ 		if (flags == DB_MPOOL_FREE) {
+ freebuf:		MUTEX_LOCK(env, hp->mtx_hash);
+ 			h_locked = 1;
+-			if (F_ISSET(bhp, BH_DIRTY)) {
+-				F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
+-				DB_ASSERT(env,
+-				   atomic_read(&hp->hash_page_dirty) > 0);
+-				atomic_dec(env, &hp->hash_page_dirty);
+-			}
++			__memp_bh_clear_dirty(env, hp, bhp);
+ 
+ 			/*
+ 			 * If the buffer we found is already freed, we're done.
+ 			 * If the ref count is not 1 then someone may be
+ 			 * peeking at the buffer.  We cannot free it until they
+diff -U 5 -r db-5.3.21.old/src/mp/mp_fopen.c db-5.3.21/src/mp/mp_fopen.c
+--- db-5.3.21.old/src/mp/mp_fopen.c	2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_fopen.c	2016-10-25 22:31:05.000000000 +0800
+@@ -12,10 +12,11 @@
+ #include "dbinc/log.h"
+ #include "dbinc/mp.h"
+ #include "dbinc/db_page.h"
+ #include "dbinc/hash.h"
+ 
++static int __memp_count_dead_mutex __P((DB_MPOOL *, u_int32_t *));
+ static int __memp_mpf_alloc __P((DB_MPOOL *,
+     DB_MPOOLFILE *, const char *, u_int32_t, u_int32_t, MPOOLFILE **));
+ static int __memp_mpf_find __P((ENV *,
+     DB_MPOOLFILE *, DB_MPOOL_HASH *, const char *, u_int32_t, MPOOLFILE **));
+ 
+@@ -709,11 +710,15 @@
+ 		 * We should be able to set mfp to NULL and break out of the
+ 		 * loop, but I like the idea of checking all the entries.
+ 		 */
+ 		if (LF_ISSET(DB_TRUNCATE)) {
+ 			MUTEX_LOCK(env, mfp->mutex);
+-			mfp->deadfile = 1;
++			/*
++			 * We cannot purge dead files here, because the caller
++			 * is holding the mutex of the hash bucket of mfp.
++			 */
++			__memp_mf_mark_dead(dbmp, mfp, NULL);
+ 			MUTEX_UNLOCK(env, mfp->mutex);
+ 			continue;
+ 		}
+ 
+ 		/*
+@@ -907,14 +912,15 @@
+ 	DB_MPOOL *dbmp;
+ 	ENV *env;
+ 	MPOOLFILE *mfp;
+ 	char *rpath;
+ 	u_int32_t ref;
+-	int deleted, ret, t_ret;
++	int deleted, purge_dead, ret, t_ret;
+ 
+ 	env = dbmfp->env;
+ 	dbmp = env->mp_handle;
++	purge_dead = 0;
+ 	ret = 0;
+ 
+ 	/*
+ 	 * Remove the DB_MPOOLFILE from the process' list.
+ 	 *
+@@ -1004,11 +1010,11 @@
+ 	}
+ 	DB_ASSERT(env, mfp->neutral_cnt < mfp->mpf_cnt);
+ 	if (--mfp->mpf_cnt == 0 || LF_ISSET(DB_MPOOL_DISCARD)) {
+ 		if (LF_ISSET(DB_MPOOL_DISCARD) ||
+ 		    F_ISSET(mfp, MP_TEMP) || mfp->unlink_on_close) {
+-			mfp->deadfile = 1;
++			__memp_mf_mark_dead(dbmp, mfp, &purge_dead);
+ 		}
+ 		if (mfp->unlink_on_close) {
+ 			if ((t_ret = __db_appname(dbmp->env, DB_APP_DATA,
+ 			    R_ADDR(dbmp->reginfo, mfp->path_off), NULL,
+ 			    &rpath)) != 0 && ret == 0)
+@@ -1037,10 +1043,12 @@
+ 			deleted = 1;
+ 		}
+ 	}
+ 	if (!deleted && !LF_ISSET(DB_MPOOL_NOLOCK))
+ 		MUTEX_UNLOCK(env, mfp->mutex);
++	if (purge_dead)
++		(void)__memp_purge_dead_files(env);
+ 
+ done:	/* Discard the DB_MPOOLFILE structure. */
+ 	if (dbmfp->pgcookie != NULL) {
+ 		__os_free(env, dbmfp->pgcookie->data);
+ 		__os_free(env, dbmfp->pgcookie);
+@@ -1091,11 +1099,11 @@
+ 	/*
+ 	 * We have to release the MPOOLFILE mutex before acquiring the region
+ 	 * mutex so we don't deadlock.  Make sure nobody ever looks at this
+ 	 * structure again.
+ 	 */
+-	mfp->deadfile = 1;
++	__memp_mf_mark_dead(dbmp, mfp, NULL);
+ 
+ 	/* Discard the mutex we're holding and return it too the pool. */
+ 	MUTEX_UNLOCK(env, mfp->mutex);
+ 	if ((t_ret = __mutex_free(env, &mfp->mutex)) != 0 && ret == 0)
+ 		ret = t_ret;
+@@ -1216,5 +1224,106 @@
+ 	/* Make sure we don't return any garbage. */
+ 	*cntp = 0;
+ 	*namesp = NULL;
+ 	return (ret);
+ }
++
++/*
++ * __memp_mf_mark_dead --
++ *	Mark an MPOOLFILE as dead because its contents are no longer necessary.
++ *	This happens when removing, truncation, or closing an unnamed in-memory
++ *	database. Return, in the purgep parameter, whether the caller should
++ *	call __memp_purge_dead_files() after the lock on mfp is released. The
++ *	caller must hold an exclusive lock on the mfp handle.
++ *
++ * PUBLIC: void __memp_mf_mark_dead __P((DB_MPOOL *, MPOOLFILE *, int*));
++ */
++void
++__memp_mf_mark_dead(dbmp, mfp, purgep)
++	DB_MPOOL *dbmp;	
++	MPOOLFILE *mfp;
++	int *purgep;
++{
++	ENV *env;
++#ifdef HAVE_MUTEX_SUPPORT
++	REGINFO *infop;
++	DB_MUTEXREGION *mtxregion;
++	u_int32_t mutex_max, mutex_inuse, dead_mutex;
++#endif
++
++	if (purgep != NULL)
++		*purgep = 0;
++
++	env = dbmp->env;
++
++#ifdef HAVE_MUTEX_SUPPORT
++	MUTEX_REQUIRED(env, mfp->mutex);
++
++	if (MUTEX_ON(env) && mfp->deadfile == 0) {
++		infop = &env->mutex_handle->reginfo;
++		mtxregion = infop->primary;
++
++		mutex_inuse = mtxregion->stat.st_mutex_inuse;
++		if ((mutex_max = env->dbenv->mutex_max) == 0)
++			mutex_max = infop->rp->max / mtxregion->mutex_size;
++
++		/*
++		 * Purging dead pages requires a full scan of the entire cache
++		 * buffer, so it is a slow operation. We only want to do it
++		 * when it is necessary and provides enough benefits. Below is
++		 * a simple heuristic that determines when to purge all dead
++		 * pages.
++		 */
++		if (purgep != NULL && mutex_inuse > mutex_max - 200) {
++			/*
++			 * If the mutex region is almost full and there are
++			 * many mutexes held by dead files, purge dead files.
++			 */
++			(void)__memp_count_dead_mutex(dbmp, &dead_mutex);
++			dead_mutex += mfp->block_cnt + 1;
++
++			if (dead_mutex > mutex_inuse / 20)
++				*purgep = 1;
++		}
++	}
++#endif
++
++	mfp->deadfile = 1;
++}
++
++/*
++ * __memp_count_dead_mutex --
++ *	Estimate the number of mutexes held by dead files.
++ */
++static int
++__memp_count_dead_mutex(dbmp, dead_mutex)
++	DB_MPOOL *dbmp;
++	u_int32_t *dead_mutex;
++{
++	ENV *env;
++	DB_MPOOL_HASH *hp;
++	MPOOL *mp;
++	MPOOLFILE *mfp;
++	u_int32_t mutex_per_file;
++	int busy, i;
++
++	env = dbmp->env;
++	*dead_mutex = 0;
++	mutex_per_file = 1;
++#ifndef HAVE_ATOMICFILEREAD
++	mutex_per_file = 2;
++#endif
++	mp = dbmp->reginfo[0].primary;
++	hp = R_ADDR(dbmp->reginfo, mp->ftab);
++	for (i = 0; i < MPOOL_FILE_BUCKETS; i++, hp++) {
++		busy = MUTEX_TRYLOCK(env, hp->mtx_hash);
++		if (busy)
++			continue;
++		SH_TAILQ_FOREACH(mfp, &hp->hash_bucket, q, __mpoolfile) {
++			if (mfp->deadfile)
++				*dead_mutex += mfp->block_cnt + mutex_per_file;
++		}
++		MUTEX_UNLOCK(env, hp->mtx_hash);
++	}
++
++	return (0);
++}
+diff -U 5 -r db-5.3.21.old/src/mp/mp_method.c db-5.3.21/src/mp/mp_method.c
+--- db-5.3.21.old/src/mp/mp_method.c	2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_method.c	2016-10-25 17:22:23.000000000 +0800
+@@ -638,11 +638,11 @@
+ 	DB_MPOOL_HASH *hp, *nhp;
+ 	MPOOL *mp;
+ 	MPOOLFILE *mfp;
+ 	roff_t newname_off;
+ 	u_int32_t bucket;
+-	int locked, ret;
++	int locked, purge_dead, ret;
+ 	size_t nlen;
+ 	void *p;
+ 
+ #undef	op_is_remove
+ #define	op_is_remove	(newname == NULL)
+@@ -655,10 +655,11 @@
+ 	dbmp = NULL;
+ 	mfp = NULL;
+ 	nhp = NULL;
+ 	p = NULL;
+ 	locked = ret = 0;
++	purge_dead = 0;
+ 
+ 	if (!MPOOL_ON(env))
+ 		goto fsop;
+ 
+ 	dbmp = env->mp_handle;
+@@ -747,11 +748,11 @@
+ 		 * they do not get reclaimed as long as they exist.  Since we
+ 		 * are now deleting the database, we need to dec that count.
+ 		 */
+ 		if (mfp->no_backing_file)
+ 			mfp->mpf_cnt--;
+-		mfp->deadfile = 1;
++		__memp_mf_mark_dead(dbmp, mfp, &purge_dead);
+ 		MUTEX_UNLOCK(env, mfp->mutex);
+ 	} else {
+ 		/*
+ 		 * Else, it's a rename.  We've allocated memory for the new
+ 		 * name.  Swap it with the old one.  If it's in memory we
+@@ -806,10 +807,16 @@
+ 	if (locked == 1) {
+ 		MUTEX_UNLOCK(env, hp->mtx_hash);
+ 		if (nhp != NULL && nhp != hp)
+ 			MUTEX_UNLOCK(env, nhp->mtx_hash);
+ 	}
++	/* 
++	 * __memp_purge_dead_files() must be called when the hash bucket is
++	 * unlocked.
++	 */
++	if (purge_dead)
++		(void)__memp_purge_dead_files(env);
+ 	return (ret);
+ }
+ 
+ /*
+  * __memp_ftruncate __
+diff -U 5 -r db-5.3.21.old/src/mp/mp_sync.c db-5.3.21/src/mp/mp_sync.c
+--- db-5.3.21.old/src/mp/mp_sync.c	2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_sync.c	2016-10-25 17:26:58.000000000 +0800
+@@ -24,10 +24,11 @@
+ static int __bhcmp __P((const void *, const void *));
+ static int __memp_close_flush_files __P((ENV *, int));
+ static int __memp_sync_files __P((ENV *));
+ static int __memp_sync_file __P((ENV *,
+ 		MPOOLFILE *, void *, u_int32_t *, u_int32_t));
++static inline void __update_err_ret(int, int*);
+ 
+ /*
+  * __memp_walk_files --
+  * PUBLIC: int __memp_walk_files __P((ENV *, MPOOL *,
+  * PUBLIC:	int (*) __P((ENV *, MPOOLFILE *, void *,
+@@ -961,5 +962,125 @@
+ 		return (-1);
+ 	if (bhp1->track_pgno > bhp2->track_pgno)
+ 		return (1);
+ 	return (0);
+ }
++
++/*
++ * __memp_purge_dead_files --
++ *	Remove all dead files and their buffers from the mpool. The caller
++ *	cannot hold any lock on the dead MPOOLFILE handles, their buffers
++ *	or their hash buckets.
++ *
++ * PUBLIC: int __memp_purge_dead_files __P((ENV *));
++ */
++int
++__memp_purge_dead_files(env)
++	ENV *env;
++{
++	BH *bhp;
++	DB_MPOOL *dbmp;
++	DB_MPOOL_HASH *hp, *hp_end;
++	REGINFO *infop;
++	MPOOL *c_mp, *mp;
++	MPOOLFILE *mfp;
++	u_int32_t i_cache;
++	int ret, t_ret, h_lock;
++
++	if (!MPOOL_ON(env))
++		return (0);
++
++	dbmp = env->mp_handle;
++	mp = dbmp->reginfo[0].primary;
++	ret = t_ret = h_lock = 0;
++
++	/*
++	 * Walk each cache's list of buffers and free all buffers whose
++	 * MPOOLFILE is marked as dead.
++	 */
++	for (i_cache = 0; i_cache < mp->nreg; i_cache++) {
++		infop = &dbmp->reginfo[i_cache]; 
++		c_mp = infop->primary;
++
++		hp = R_ADDR(infop, c_mp->htab);
++		hp_end = &hp[c_mp->htab_buckets];
++		for (; hp < hp_end; hp++) {
++			/* Skip empty buckets. */
++			if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
++				continue;
++
++			/* 
++			 * Search for a dead buffer. Other places that call
++			 * __memp_bhfree() acquire the buffer lock before the
++			 * hash bucket lock. Even though we acquire the two
++			 * locks in reverse order, we cannot deadlock here
++			 * because we don't block waiting for the locks.
++			 */
++			t_ret = MUTEX_TRYLOCK(env, hp->mtx_hash);
++			if (t_ret != 0) {
++				__update_err_ret(t_ret, &ret);
++				continue;
++			}
++			h_lock = 1;
++			SH_TAILQ_FOREACH(bhp, &hp->hash_bucket, hq, __bh) {
++				/* Skip buffers that are being used. */
++				if (BH_REFCOUNT(bhp) > 0)
++					continue;
++
++				mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
++				if (!mfp->deadfile)
++					continue;
++
++				/* Found a dead buffer. Prepare to free it. */
++				t_ret = MUTEX_TRYLOCK(env, bhp->mtx_buf);
++				if (t_ret != 0) {
++					__update_err_ret(t_ret, &ret);
++					continue;
++				}
++
++				DB_ASSERT(env, (!F_ISSET(bhp, BH_EXCLUSIVE) &&
++				    BH_REFCOUNT(bhp) == 0));
++				F_SET(bhp, BH_EXCLUSIVE);
++				(void)atomic_inc(env, &bhp->ref);
++
++				__memp_bh_clear_dirty(env, hp, bhp);
++
++				/*
++				 * Free the buffer. The buffer and hash bucket
++				 * are unlocked by __memp_bhfree.
++				 */
++				if ((t_ret = __memp_bhfree(dbmp, infop, mfp,
++				    hp, bhp, BH_FREE_FREEMEM)) == 0)
++					/*
++					 * Decrement hp, so the next turn will
++					 * search the same bucket again.
++					 */
++					hp--;
++				else
++					__update_err_ret(t_ret, &ret);
++
++				/*
++				 * The hash bucket is unlocked, we need to
++				 * start over again.
++				 */
++				h_lock = 0;
++				break;
++			}
++
++			if (h_lock) {
++				MUTEX_UNLOCK(env, hp->mtx_hash);
++				h_lock = 0;
++			}
++		}
++	}
++
++	return (ret);
++}
++
++static inline void
++__update_err_ret(t_ret, retp)
++	int t_ret;
++	int *retp;
++{
++	if (t_ret != 0 && t_ret != DB_LOCK_NOTGRANTED && *retp == 0)
++		*retp = t_ret;
++}
+diff -U 5 -r db-5.3.21.old/src/mp/mp_trickle.c db-5.3.21/src/mp/mp_trickle.c
+--- db-5.3.21.old/src/mp/mp_trickle.c	2012-05-12 01:57:53.000000000 +0800
++++ db-5.3.21/src/mp/mp_trickle.c	2016-10-25 17:27:57.000000000 +0800
+@@ -65,10 +65,14 @@
+ 	    "DB_ENV->memp_trickle: %d: percent must be between 1 and 100",
+ 		    "%d"), pct);
+ 		return (EINVAL);
+ 	}
+ 
++	/* First we purge all dead files and their buffers. */
++	if ((ret = __memp_purge_dead_files(env)) != 0)
++		return (ret);
++
+ 	/*
+ 	 * Loop through the caches counting total/dirty buffers.
+ 	 *
+ 	 * XXX
+ 	 * Using hash_page_dirty is our only choice at the moment, but it's not
+diff -U 5 -r db-5.3.21.old/src/mutex/mut_region.c db-5.3.21/src/mutex/mut_region.c
+--- db-5.3.21.old/src/mutex/mut_region.c	2012-05-12 01:57:54.000000000 +0800
++++ db-5.3.21/src/mutex/mut_region.c	2016-10-25 17:34:22.000000000 +0800
+@@ -15,11 +15,11 @@
+ #include "dbinc/txn.h"
+ 
+ static db_size_t __mutex_align_size __P((ENV *));
+ static int __mutex_region_init __P((ENV *, DB_MUTEXMGR *));
+ static size_t __mutex_region_size __P((ENV *));
+-static size_t __mutex_region_max __P((ENV *));
++static size_t __mutex_region_max __P((ENV *, u_int32_t));
+ 
+ /*
+  * __mutex_open --
+  *	Open a mutex region.
+  *
+@@ -32,11 +32,11 @@
+ {
+ 	DB_ENV *dbenv;
+ 	DB_MUTEXMGR *mtxmgr;
+ 	DB_MUTEXREGION *mtxregion;
+ 	size_t size;
+-	u_int32_t cpu_count;
++	u_int32_t cpu_count, mutex_needed;
+ 	int ret;
+ #ifndef HAVE_ATOMIC_SUPPORT
+ 	u_int i;
+ #endif
+ 
+@@ -59,23 +59,24 @@
+ 		    cpu_count : cpu_count * MUTEX_SPINS_PER_PROCESSOR)) != 0)
+ 			return (ret);
+ 	}
+ 
+ 	/*
+-	 * If the user didn't set an absolute value on the number of mutexes
+-	 * we'll need, figure it out.  We're conservative in our allocation,
+-	 * we need mutexes for DB handles, group-commit queues and other things
+-	 * applications allocate at run-time.  The application may have kicked
+-	 * up our count to allocate its own mutexes, add that in.
++	 * Figure out the number of mutexes we'll need.  We're conservative in
++	 * our allocation, we need mutexes for DB handles, group-commit queues
++	 * and other things applications allocate at run-time.  The application
++	 * may have kicked up our count to allocate its own mutexes, add that
++	 * in.
+ 	 */
++	mutex_needed =
++	    __lock_region_mutex_count(env) +
++	    __log_region_mutex_count(env) +
++	    __memp_region_mutex_count(env) +
++	    __txn_region_mutex_count(env);
+ 	if (dbenv->mutex_cnt == 0 &&
+ 	    F_ISSET(env, ENV_PRIVATE | ENV_THREAD) != ENV_PRIVATE)
+-		dbenv->mutex_cnt =
+-		    __lock_region_mutex_count(env) +
+-		    __log_region_mutex_count(env) +
+-		    __memp_region_mutex_count(env) +
+-		    __txn_region_mutex_count(env);
++		dbenv->mutex_cnt = mutex_needed;
+ 
+ 	if (dbenv->mutex_max != 0 && dbenv->mutex_cnt > dbenv->mutex_max)
+ 		dbenv->mutex_cnt = dbenv->mutex_max;
+ 
+ 	/* Create/initialize the mutex manager structure. */
+@@ -88,12 +89,12 @@
+ 	mtxmgr->reginfo.id = INVALID_REGION_ID;
+ 	mtxmgr->reginfo.flags = REGION_JOIN_OK;
+ 	size = __mutex_region_size(env);
+ 	if (create_ok)
+ 		F_SET(&mtxmgr->reginfo, REGION_CREATE_OK);
+-	if ((ret = __env_region_attach(env,
+-	    &mtxmgr->reginfo, size, size + __mutex_region_max(env))) != 0)
++	if ((ret = __env_region_attach(env, &mtxmgr->reginfo,
++	    size, size + __mutex_region_max(env, mutex_needed))) != 0)
+ 		goto err;
+ 
+ 	/* If we created the region, initialize it. */
+ 	if (F_ISSET(&mtxmgr->reginfo, REGION_CREATE))
+ 		if ((ret = __mutex_region_init(env, mtxmgr)) != 0)
+@@ -350,44 +351,62 @@
+ 
+ 	dbenv = env->dbenv;
+ 
+ 	s = sizeof(DB_MUTEXMGR) + 1024;
+ 
+-	/* We discard one mutex for the OOB slot. */
++	/* 
++	 * We discard one mutex for the OOB slot. Make sure mutex_cnt doesn't
++	 * overflow.
++	 */
+ 	s += __env_alloc_size(
+-	    (dbenv->mutex_cnt + 1) *__mutex_align_size(env));
++	    (dbenv->mutex_cnt + (dbenv->mutex_cnt == UINT32_MAX ? 0 : 1)) *
++	    __mutex_align_size(env));
+ 
+ 	return (s);
+ }
+ 
+ /*
+  * __mutex_region_max --
+  *	 Return the amount of space needed to reach the maximum size.
+  */
+ static size_t
+-__mutex_region_max(env)
++__mutex_region_max(env, mutex_needed)
+ 	ENV *env;
++	u_int32_t mutex_needed;
+ {
+ 	DB_ENV *dbenv;
+-	u_int32_t max;
++	u_int32_t max, mutex_cnt;
+ 
+ 	dbenv = env->dbenv;
++	mutex_cnt = dbenv->mutex_cnt;
+ 
+-	if ((max = dbenv->mutex_max) == 0) {
++	/*
++	 * We want to limit the region size to accommodate at most UINT32_MAX
++	 * mutexes. If mutex_cnt is UINT32_MAX, no more space is allowed.
++	 */
++	if ((max = dbenv->mutex_max) == 0 && mutex_cnt != UINT32_MAX)
+ 		if (F_ISSET(env, ENV_PRIVATE | ENV_THREAD) == ENV_PRIVATE)
+-			max = dbenv->mutex_inc + 1;
+-		else
++			if (dbenv->mutex_inc + 1 < UINT32_MAX - mutex_cnt)
++				max = dbenv->mutex_inc + 1 + mutex_cnt;
++			else
++				max = UINT32_MAX;
++		else {
+ 			max = __lock_region_mutex_max(env) +
+ 			    __txn_region_mutex_max(env) +
+ 			    __log_region_mutex_max(env) +
+ 			    dbenv->mutex_inc + 100;
+-	} else if (max <= dbenv->mutex_cnt)
++			if (max < UINT32_MAX - mutex_needed)
++				max += mutex_needed;
++			else
++				max = UINT32_MAX;
++		}
++
++	if (max <= mutex_cnt)
+ 		return (0);
+ 	else
+-		max -= dbenv->mutex_cnt;
+-
+-	return ( __env_alloc_size(max * __mutex_align_size(env)));
++		return (__env_alloc_size(
++		    (max - mutex_cnt) * __mutex_align_size(env)));
+ }
+ 
+ #ifdef	HAVE_MUTEX_SYSTEM_RESOURCES
+ /*
+  * __mutex_resource_return
+
diff --git a/SOURCES/libdb-5.3.21-region-size-check.patch b/SOURCES/libdb-5.3.21-region-size-check.patch
new file mode 100644
index 0000000..77be25e
--- /dev/null
+++ b/SOURCES/libdb-5.3.21-region-size-check.patch
@@ -0,0 +1,39 @@
+diff -up db-5.3.21/src/env/env_region.c.zero-region db-5.3.21/src/env/env_region.c
+--- db-5.3.21/src/env/env_region.c.zero-region	2012-05-11 19:57:53.000000000 +0200
++++ db-5.3.21/src/env/env_region.c	2017-09-06 08:50:45.310276385 +0200
+@@ -1060,6 +1060,7 @@ __env_region_attach(env, infop, init, ma
+ 	REGION *rp;
+ 	int ret;
+ 	char buf[sizeof(DB_REGION_FMT) + 20];
++	struct stat st;
+ 
+ 	/*
+ 	 * Find or create a REGION structure for this region.  If we create
+@@ -1090,6 +1091,14 @@ __env_region_attach(env, infop, init, ma
+ 	if ((ret = __env_sys_attach(env, infop, rp)) != 0)
+ 		goto err;
+ 
++        /* Check the size of the underlying file */
++        if (infop->fhp != NULL && F_ISSET(infop->fhp, DB_FH_OPENED) &&
++            infop->fhp->fd != -1 && fstat(infop->fhp->fd, &st) != -1 &&
++            st.st_size == 0) {
++            ret = DB_RUNRECOVERY;
++            goto err;
++        }
++
+ 	/*
+ 	 * Fault the pages into memory.  Note, do this BEFORE we initialize
+ 	 * anything because we're writing pages in created regions, not just
+diff -up db-5.3.21/src/os/os_map.c.zero-region db-5.3.21/src/os/os_map.c
+--- db-5.3.21/src/os/os_map.c.zero-region	2012-05-11 19:57:54.000000000 +0200
++++ db-5.3.21/src/os/os_map.c	2017-09-06 08:49:39.144546552 +0200
+@@ -265,6 +265,9 @@ __os_detach(env, infop, destroy)
+ 	dbenv = env->dbenv;
+ 
+ 	rp = infop->rp;
++	/* Do not touch the region information if it no longer exists */
++	if (rp == NULL)
++	    return EINVAL;
+ 
+ 	/* If the user replaced the unmap call, call through their interface. */
+ 	if (DB_GLOBAL(j_region_unmap) != NULL)
diff --git a/SOURCES/libdb-5.3.21-trickle_cpu.patch b/SOURCES/libdb-5.3.21-trickle_cpu.patch
new file mode 100644
index 0000000..20b0e88
--- /dev/null
+++ b/SOURCES/libdb-5.3.21-trickle_cpu.patch
@@ -0,0 +1,150 @@
+diff -up db-5.3.21/src/dbinc_auto/int_def.in.trickle db-5.3.21/src/dbinc_auto/int_def.in
+--- db-5.3.21/src/dbinc_auto/int_def.in.trickle	2018-08-21 10:54:06.066757392 +0200
++++ db-5.3.21/src/dbinc_auto/int_def.in	2018-08-21 10:54:06.111756561 +0200
+@@ -1458,6 +1458,7 @@
+ #define	__memp_sync_int __memp_sync_int@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_mf_sync __memp_mf_sync@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_purge_dead_files __memp_purge_dead_files@DB_VERSION_UNIQUE_NAME@
++#define	__memp_purge_dead_and_count __memp_purge_dead_and_count@DB_VERSION_UNIQUE_NAME@
+ #define	__memp_trickle_pp __memp_trickle_pp@DB_VERSION_UNIQUE_NAME@
+ #define	__mutex_alloc __mutex_alloc@DB_VERSION_UNIQUE_NAME@
+ #define	__mutex_alloc_int __mutex_alloc_int@DB_VERSION_UNIQUE_NAME@
+diff -up db-5.3.21/src/dbinc_auto/mp_ext.h.trickle db-5.3.21/src/dbinc_auto/mp_ext.h
+--- db-5.3.21/src/dbinc_auto/mp_ext.h.trickle	2018-08-21 10:54:06.103756709 +0200
++++ db-5.3.21/src/dbinc_auto/mp_ext.h	2018-08-21 10:54:06.112756543 +0200
+@@ -101,6 +101,7 @@ int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_
+ int __memp_sync_int __P((ENV *, DB_MPOOLFILE *, u_int32_t, u_int32_t, u_int32_t *, int *));
+ int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *, int));
+ int __memp_purge_dead_files __P((ENV *));
++int __memp_purge_dead_and_count __P((ENV *, u_int32_t *, u_int32_t *));
+ int __memp_trickle_pp __P((DB_ENV *, int, int *));
+ 
+ #if defined(__cplusplus)
+diff -up db-5.3.21/src/mp/mp_sync.c.trickle db-5.3.21/src/mp/mp_sync.c
+--- db-5.3.21/src/mp/mp_sync.c.trickle	2018-08-21 10:54:06.105756672 +0200
++++ db-5.3.21/src/mp/mp_sync.c	2018-09-04 09:43:57.502992291 +0200
+@@ -965,17 +965,34 @@ __bhcmp(p1, p2)
+ 	return (0);
+ }
+ 
++
+ /*
+  * __memp_purge_dead_files --
++ *  Thin wrapper over __memp_purge_dead_and_count. Does not return any
++ *  information about the number of total/dirty buffers.
++ *
++ * PUBLIC: int __memp_purge_dead_files __P((ENV *));
++ */
++int
++__memp_purge_dead_files(env)
++    ENV *env;
++{
++    return __memp_purge_dead_and_count(env, NULL, NULL);
++}
++
++/*
++ * __memp_purge_dead_and_count --
+  *	Remove all dead files and their buffers from the mpool. The caller
+  *	cannot hold any lock on the dead MPOOLFILE handles, their buffers
+  *	or their hash buckets.
+  *
+- * PUBLIC: int __memp_purge_dead_files __P((ENV *));
++ * PUBLIC: int __memp_purge_dead_and_count __P((ENV *, u_int32_t *, u_int32_t *));
+  */
+ int
+-__memp_purge_dead_files(env)
++__memp_purge_dead_and_count(env, totalp, dirtyp)
+ 	ENV *env;
++    u_int32_t *totalp;
++    u_int32_t *dirtyp;
+ {
+ 	BH *bhp;
+ 	DB_MPOOL *dbmp;
+@@ -983,7 +1000,7 @@ __memp_purge_dead_files(env)
+ 	REGINFO *infop;
+ 	MPOOL *c_mp, *mp;
+ 	MPOOLFILE *mfp;
+-	u_int32_t i_cache;
++	u_int32_t i_cache, dirty, total;
+ 	int ret, t_ret, h_lock;
+ 
+ 	if (!MPOOL_ON(env))
+@@ -992,6 +1009,7 @@ __memp_purge_dead_files(env)
+ 	dbmp = env->mp_handle;
+ 	mp = dbmp->reginfo[0].primary;
+ 	ret = t_ret = h_lock = 0;
++    dirty = total = 0;
+ 
+ 	/*
+ 	 * Walk each cache's list of buffers and free all buffers whose
+@@ -1000,6 +1018,7 @@ __memp_purge_dead_files(env)
+ 	for (i_cache = 0; i_cache < mp->nreg; i_cache++) {
+ 		infop = &dbmp->reginfo[i_cache]; 
+ 		c_mp = infop->primary;
++        total += c_mp->pages;
+ 
+ 		hp = R_ADDR(infop, c_mp->htab);
+ 		hp_end = &hp[c_mp->htab_buckets];
+@@ -1008,6 +1027,9 @@ __memp_purge_dead_files(env)
+ 			if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
+ 				continue;
+ 
++            /* Count dirty pages first as we do not wait on mutex locks */
++            dirty += (u_int32_t)atomic_read(&hp->hash_page_dirty);
++
+ 			/* 
+ 			 * Search for a dead buffer. Other places that call
+ 			 * __memp_bhfree() acquire the buffer lock before the
+@@ -1073,6 +1095,11 @@ __memp_purge_dead_files(env)
+ 		}
+ 	}
+ 
++    if (dirtyp != NULL)
++		*dirtyp = dirty;
++    if (totalp != NULL)
++		*totalp = total;
++
+ 	return (ret);
+ }
+ 
+diff -up db-5.3.21/src/mp/mp_trickle.c.trickle db-5.3.21/src/mp/mp_trickle.c
+--- db-5.3.21/src/mp/mp_trickle.c.trickle	2018-08-21 10:54:06.105756672 +0200
++++ db-5.3.21/src/mp/mp_trickle.c	2018-08-21 10:54:06.112756543 +0200
+@@ -56,6 +56,7 @@ __memp_trickle(env, pct, nwrotep)
+ 
+ 	dbmp = env->mp_handle;
+ 	mp = dbmp->reginfo[0].primary;
++    dirty = total = 0;
+ 
+ 	if (nwrotep != NULL)
+ 		*nwrotep = 0;
+@@ -67,12 +68,8 @@ __memp_trickle(env, pct, nwrotep)
+ 		return (EINVAL);
+ 	}
+ 
+-	/* First we purge all dead files and their buffers. */
+-	if ((ret = __memp_purge_dead_files(env)) != 0)
+-		return (ret);
+-
+-	/*
+-	 * Loop through the caches counting total/dirty buffers.
++	/* First we purge all dead files and their buffers and
++	 * loop through the caches counting total/dirty buffers.
+ 	 *
+ 	 * XXX
+ 	 * Using hash_page_dirty is our only choice at the moment, but it's not
+@@ -80,12 +77,8 @@ __memp_trickle(env, pct, nwrotep)
+ 	 * than one page size, as a free 512B buffer may not be equivalent to
+ 	 * having a free 8KB buffer.
+ 	 */
+-	for (ret = 0, i = dirty = total = 0; i < mp->nreg; ++i) {
+-		c_mp = dbmp->reginfo[i].primary;
+-		total += c_mp->pages;
+-		__memp_stat_hash(&dbmp->reginfo[i], c_mp, &dtmp);
+-		dirty += dtmp;
+-	}
++	if ((ret = __memp_purge_dead_and_count(env, &total, &dirty)) != 0)
++		return (ret);
+ 
+ 	/*
+ 	 * If there are sufficient clean buffers, no buffers or no dirty
diff --git a/SOURCES/libdb-cbd-race.patch b/SOURCES/libdb-cbd-race.patch
new file mode 100644
index 0000000..ad56d06
--- /dev/null
+++ b/SOURCES/libdb-cbd-race.patch
@@ -0,0 +1,110 @@
+From 4ae2eb88fadc256ddf9862b2e72ed216ddbb919d Mon Sep 17 00:00:00 2001
+From: michael brey <michael.brey@oracle.com>
+Date: Tue, 20 May 2014 14:49:44 +0200
+Subject: [PATCH] Fix a CDB race
+
+Report and reproducer here:
+https://community.oracle.com/thread/3514381
+
+From: michael brey <michael.brey@oracle.com>
+To: Lubomir Rintel <lkundrak@v3.sk>
+Subject: Re: BDB crash
+Date: Tue, 13 May 2014 09:07:45 -0600 (05/13/2014 05:07:45 PM)
+Message-id: <53723541.7040203@oracle.com>
+
+  attached are patches for each release.  the 5.3.28 patch will apply on
+top of 5.3.21.
+
+thanks
+mike
+
+RHBZ: #1099509
+---
+ src/env/env_failchk.c | 24 ++++++++++++++++++++++++
+ src/mutex/mut_tas.c   | 18 +++++++++++++++++-
+ 2 files changed, 41 insertions(+), 1 deletion(-)
+
+diff --git a/src/env/env_failchk.c b/src/env/env_failchk.c
+index 05752f0..b09df96 100644
+--- a/src/env/env_failchk.c
++++ b/src/env/env_failchk.c
+@@ -312,6 +312,7 @@ __env_in_api(env)
+ 	REGINFO *infop;
+ 	THREAD_INFO *thread;
+ 	u_int32_t i;
++	pid_t pid;
+ 	int unpin, ret;
+ 
+ 	if ((htab = env->thr_hashtab) == NULL)
+@@ -325,6 +326,7 @@ __env_in_api(env)
+ 
+ 	for (i = 0; i < env->thr_nbucket; i++)
+ 		SH_TAILQ_FOREACH(ip, &htab[i], dbth_links, __db_thread_info) {
++			pid = ip->dbth_pid;
+ 			if (ip->dbth_state == THREAD_SLOT_NOT_IN_USE ||
+ 			    (ip->dbth_state == THREAD_OUT &&
+ 			    thread->thr_count <  thread->thr_max))
+@@ -341,6 +343,28 @@ __env_in_api(env)
+ 				ip->dbth_state = THREAD_SLOT_NOT_IN_USE;
+ 				continue;
+ 			}
++			/*
++			 * The above tests are not atomic, so it is possible that
++			 * the process pointed by ip has changed during the tests.
++			 * In particular, if the process pointed by ip when is_alive
++			 * was executed terminated normally, a new process may reuse
++			 * the same ip structure and change its dbth_state before the
++			 * next two tests were performed. Therefore, we need to test
++			 * here that all four tests above are done on the same process.
++			 * If the process pointed by ip changed, all tests are invalid
++			 * and can be ignored.
++			 * Similarly, it's also possible for two processes racing to
++			 * change the dbth_state of the same ip structure. For example,
++			 * both process A and B reach the above test for the same
++			 * terminated process C where C's dbth_state is THREAD_OUT.
++			 * If A goes into the 'if' block and changes C's dbth_state to
++			 * THREAD_SLOT_NOT_IN_USE before B checks the condition, B
++			 * would incorrectly fail the test and run into this line.
++			 * Therefore, we need to check C's dbth_state again and fail
++			 * the db only if C's dbth_state is indeed THREAD_ACTIVE.
++			 */
++			if (ip->dbth_state != THREAD_ACTIVE || ip->dbth_pid != pid)
++				continue;
+ 			return (__db_failed(env, DB_STR("1507",
+ 			    "Thread died in Berkeley DB library"),
+ 			    ip->dbth_pid, ip->dbth_tid));
+diff --git a/src/mutex/mut_tas.c b/src/mutex/mut_tas.c
+index 0899d23..db95030 100644
+--- a/src/mutex/mut_tas.c
++++ b/src/mutex/mut_tas.c
+@@ -151,10 +151,26 @@ loop:	/* Attempt to acquire the resource for N spins. */
+ 			if (F_ISSET(dbenv, DB_ENV_FAILCHK) &&
+ 			    ip == NULL && dbenv->is_alive(dbenv,
+ 			    mutexp->pid, mutexp->tid, 0) == 0) {
++				/*
++				 * The process owing the mutex is "dead" now, but it may
++				 * have already released the mutex. We need to check again
++				 * by going back to the top of the loop
++				 * if the mutex is still held by the "dead" process. We
++				 * yield 10 us to increase the likelyhood of mutexp fields
++				 * being up-to-date. Set spin so we spin one more time
++				 * because no need to spin more if dead process owns mutex.
++				 */                               
++				if (nspins > 1) {
++					nspins = 2;
++					__os_yield(env, 0, 10);
++					continue;
++				}
+ 				ret = __env_set_state(env, &ip, THREAD_VERIFY);
+ 				if (ret != 0 ||
+-				    ip->dbth_state == THREAD_FAILCHK)
++				    ip->dbth_state == THREAD_FAILCHK) {
++					printf("mut_tas:172, pid: %d, flag: %d\n", mutexp->pid, mutexp->flags);
+ 					return (DB_RUNRECOVERY);
++				}
+ 			}
+ 			if (nowait)
+ 				return (DB_LOCK_NOTGRANTED);
+-- 
+1.8.3.1
+
diff --git a/SOURCES/libdb-db_hotbackup-manpages.patch b/SOURCES/libdb-db_hotbackup-manpages.patch
new file mode 100644
index 0000000..021eeea
--- /dev/null
+++ b/SOURCES/libdb-db_hotbackup-manpages.patch
@@ -0,0 +1,17 @@
+--- db-5.3.21/man/db_hotbackup.1.different_user	2017-12-04 13:33:45.608125447 +0100
++++ db-5.3.21/man/db_hotbackup.1	2017-12-04 13:46:37.631766387 +0100
+@@ -55,6 +55,14 @@ Before performing the snapshot, checkpoi
+ and remove any log files that are no longer required in that environment.
+ \fBTo avoid making catastrophic failure impossible, log file removal
+ must be integrated with log file archival.\fR
++.RS
++.PP
++Care should be taken with the \fB\-c\fR option, where the \fBdb_hotbackup\fR
++MUST be run by the same user that owns the database. If \fBdb_hotbackup\fR
++is run by a user different to the owner of the database, a new log file owned
++by this user might be created, making it impossible to use the database for its
++owner.
++.RE
+ .TP
+ \fB\-D\fR
+ Use the data and log directories listed in a \fBDB_CONFIG\fR configuration file
diff --git a/SOURCES/libdb-limit-cpu.patch b/SOURCES/libdb-limit-cpu.patch
new file mode 100644
index 0000000..475aa50
--- /dev/null
+++ b/SOURCES/libdb-limit-cpu.patch
@@ -0,0 +1,12 @@
+diff -Naurp db_old/src/os/os_cpu.c db_new/src/os/os_cpu.c
+--- db_old/src/os/os_cpu.c	2012-05-11 12:57:54.000000000 -0500
++++ db_new/src/os/os_cpu.c	2015-08-12 14:00:37.232498880 -0500
+@@ -40,6 +40,8 @@ __os_cpu_count()
+ 	long nproc;
+ 
+ 	nproc = sysconf(_SC_NPROCESSORS_ONLN);
++	if (nproc > 1024)
++		nproc = 1024;
+ 	return ((u_int32_t)(nproc > 1 ? nproc : 1));
+ #else
+ 	return (1);
diff --git a/SOURCES/libdb-multiarch.patch b/SOURCES/libdb-multiarch.patch
new file mode 100644
index 0000000..f0e6ff6
--- /dev/null
+++ b/SOURCES/libdb-multiarch.patch
@@ -0,0 +1,12 @@
+diff -up db-5.3.15/dist/aclocal/sequence.m4.multiarch db-5.3.15/dist/aclocal/sequence.m4
+--- db-5.3.15/dist/aclocal/sequence.m4.multiarch	2010-06-25 17:50:36.000000000 +0200
++++ db-5.3.15/dist/aclocal/sequence.m4	2011-12-20 02:00:49.000000000 +0100
+@@ -78,7 +78,7 @@ AC_DEFUN(AM_SEQUENCE_CONFIGURE, [
+ 	fi
+ 	if test "$db_cv_build_sequence" = "yes"; then
+ 		AC_SUBST(db_seq_decl)
+-		db_seq_decl="typedef $db_cv_seq_type db_seq_t;";
++		db_seq_decl="typedef int64_t db_seq_t;";
+ 
+ 		AC_DEFINE(HAVE_64BIT_TYPES)
+ 		AH_TEMPLATE(HAVE_64BIT_TYPES,
diff --git a/SOURCES/patch.1.1 b/SOURCES/patch.1.1
new file mode 100644
index 0000000..fe3177d
--- /dev/null
+++ b/SOURCES/patch.1.1
@@ -0,0 +1,20 @@
+*** Makefile.orig	Wed Jul 13 21:43:16 1994
+--- Makefile	Wed Dec 31 19:00:00 1969
+***************
+*** 15,22 ****
+  
+  ${LIBDB}: ${OBJ1} ${OBJ2} ${OBJ3} ${OBJ4} ${OBJ5} ${MISC}
+  	rm -f $@
+! 	ar cq $@ \
+! 	    `lorder ${OBJ1} ${OBJ2} ${OBJ3} ${OBJ4} ${OBJ5} ${MISC} | tsort`
+  	ranlib $@
+  
+  clean:
+--- 15,21 ----
+  
+  ${LIBDB}: ${OBJ1} ${OBJ2} ${OBJ3} ${OBJ4} ${OBJ5} ${MISC}
+  	rm -f $@
+! 	ar cq $@ ${OBJ1} ${OBJ2} ${OBJ3} ${OBJ4} ${OBJ5} ${MISC}
+  	ranlib $@
+  
+  clean:
diff --git a/SOURCES/patch.1.2 b/SOURCES/patch.1.2
new file mode 100644
index 0000000..a343909
--- /dev/null
+++ b/SOURCES/patch.1.2
@@ -0,0 +1,19 @@
+*** btree/bt_split.c	Tue Jul 26 14:22:02 1994
+--- btree/bt_split.c	Sat Jan  4 14:38:55 1997
+***************
+*** 673,679 ****
+  		 * where we decide to try and copy too much onto the left page.
+  		 * Make sure that doesn't happen.
+  		 */
+! 		if (skip <= off && used + nbytes >= full) {
+  			--off;
+  			break;
+  		}
+--- 673,679 ----
+  		 * where we decide to try and copy too much onto the left page.
+  		 * Make sure that doesn't happen.
+  		 */
+! 		if (skip <= off && used + nbytes >= full || nxt == top - 1) {
+  			--off;
+  			break;
+  		}
diff --git a/SOURCES/patch.1.3 b/SOURCES/patch.1.3
new file mode 100644
index 0000000..5ca03df
--- /dev/null
+++ b/SOURCES/patch.1.3
@@ -0,0 +1,37 @@
+*** btree/bt_split.c.orig	Sat Feb  8 10:14:10 1997
+--- btree/bt_split.c	Sat Feb  8 10:14:51 1997
+***************
+*** 673,679 ****
+  		 * where we decide to try and copy too much onto the left page.
+  		 * Make sure that doesn't happen.
+  		 */
+! 		if (skip <= off && used + nbytes >= full || nxt == top - 1) {
+  			--off;
+  			break;
+  		}
+--- 673,680 ----
+  		 * where we decide to try and copy too much onto the left page.
+  		 * Make sure that doesn't happen.
+  		 */
+! 		if (skip <= off &&
+! 		    used + nbytes + sizeof(indx_t) >= full || nxt == top - 1) {
+  			--off;
+  			break;
+  		}
+***************
+*** 686,692 ****
+  			memmove((char *)l + l->upper, src, nbytes);
+  		}
+  
+! 		used += nbytes;
+  		if (used >= half) {
+  			if (!isbigkey || bigkeycnt == 3)
+  				break;
+--- 687,693 ----
+  			memmove((char *)l + l->upper, src, nbytes);
+  		}
+  
+! 		used += nbytes + sizeof(indx_t);
+  		if (used >= half) {
+  			if (!isbigkey || bigkeycnt == 3)
+  				break;
diff --git a/SOURCES/patch.1.4 b/SOURCES/patch.1.4
new file mode 100644
index 0000000..cec5fbc
--- /dev/null
+++ b/SOURCES/patch.1.4
@@ -0,0 +1,22 @@
+*** btree/bt_page.c.orig	Wed Jul 13 21:29:02 1994
+--- btree/bt_page.c	Wed Jun 11 20:14:43 1997
+***************
+*** 65,70 ****
+--- 65,71 ----
+  	h->prevpg = P_INVALID;
+  	h->nextpg = t->bt_free;
+  	t->bt_free = h->pgno;
++ 	F_SET(t, B_METADIRTY);
+  
+  	/* Make sure the page gets written back. */
+  	return (mpool_put(t->bt_mp, h, MPOOL_DIRTY));
+***************
+*** 92,97 ****
+--- 93,99 ----
+  	    (h = mpool_get(t->bt_mp, t->bt_free, 0)) != NULL) {
+  		*npg = t->bt_free;
+  		t->bt_free = h->nextpg;
++ 		F_SET(t, B_METADIRTY);
+  		return (h);
+  	}
+  	return (mpool_new(t->bt_mp, npg));
diff --git a/SOURCES/signed-overflow.patch b/SOURCES/signed-overflow.patch
new file mode 100644
index 0000000..1c204ba
--- /dev/null
+++ b/SOURCES/signed-overflow.patch
@@ -0,0 +1,25 @@
+diff -up db-5.3.21/lang/sql/generated/sqlite3.c.broken db-5.3.21/lang/sql/generated/sqlite3.c
+--- db-5.3.21/lang/sql/generated/sqlite3.c.broken	2013-11-05 15:47:30.503756426 +0100
++++ db-5.3.21/lang/sql/generated/sqlite3.c	2013-11-05 15:48:11.105888357 +0100
+@@ -8230,7 +8230,7 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp4(Vdb
+ SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int);
+ SQLITE_PRIVATE int sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp);
+ SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, int addr, int P1);
+-SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, int addr, int P2);
++SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, u32 addr, int P2);
+ SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, int addr, int P3);
+ SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u8 P5);
+ SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr);
+@@ -48686,10 +48686,9 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP1(
+ ** Change the value of the P2 operand for a specific instruction.
+ ** This routine is useful for setting a jump destination.
+ */
+-SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, int addr, int val){
++SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, u32 addr, int val){
+   assert( p!=0 );
+-  assert( addr>=0 );
+-  if( p->nOp>addr ){
++  if( ((u32)p->nOp)>addr ){
+     p->aOp[addr].p2 = val;
+   }
+ }
diff --git a/SPECS/libdb.spec b/SPECS/libdb.spec
new file mode 100644
index 0000000..409bab2
--- /dev/null
+++ b/SPECS/libdb.spec
@@ -0,0 +1,616 @@
+%define __soversion_major 5
+%define __soversion %{__soversion_major}.3
+
+Summary: The Berkeley DB database library for C
+Name: libdb
+Version: 5.3.21
+Release: 25%{?dist}
+Source0: http://download.oracle.com/berkeley-db/db-%{version}.tar.gz
+Source1: http://download.oracle.com/berkeley-db/db.1.85.tar.gz
+# libdb man pages generated from the 5.3.21 documentation
+Source2: libdb-5.3.21-manpages.tar.gz
+Patch0: libdb-multiarch.patch
+# db-1.85 upstream patches
+Patch10: http://www.oracle.com/technology/products/berkeley-db/db/update/1.85/patch.1.1
+Patch11: http://www.oracle.com/technology/products/berkeley-db/db/update/1.85/patch.1.2
+Patch12: http://www.oracle.com/technology/products/berkeley-db/db/update/1.85/patch.1.3
+Patch13: http://www.oracle.com/technology/products/berkeley-db/db/update/1.85/patch.1.4
+# other patches
+Patch20: db-1.85-errno.patch
+Patch22: db-4.6.21-1.85-compat.patch
+Patch24: db-4.5.20-jni-include-dir.patch
+# License clarification patch
+# http://devel.trisquel.info/gitweb/?p=package-helpers.git;a=blob;f=helpers/DATA/db4.8/007-mt19937db.c_license.patch;h=1036db4d337ce4c60984380b89afcaa63b2ef88f;hb=df48d40d3544088338759e8bea2e7f832a564d48
+Patch25: 007-mt19937db.c_license.patch
+# sqlite3 overflow fix backport
+Patch26: signed-overflow.patch
+# CDB race (rhbz #1099509)
+Patch27: libdb-cbd-race.patch
+# Limit concurrency to max 1024 CPUs
+Patch28: libdb-limit-cpu.patch
+Patch29: libdb-5.3.21-mutex_leak.patch
+# Upstream acknowledged and agreed to use it
+Patch30: libdb-5.3.21-region-size-check.patch
+# Patch sent upstream
+Patch31: checkpoint-opd-deadlock.patch
+
+Patch32: libdb-db_hotbackup-manpages.patch
+# rhbz#1608749 Patch sent upstream
+# Expects libdb-5.3.21-mutex_leak.patch applied
+Patch33: libdb-5.3.21-trickle_cpu.patch
+
+URL: http://www.oracle.com/database/berkeley-db/
+License: BSD and LGPLv2 and Sleepycat
+Group: System Environment/Libraries
+BuildRequires: perl libtool
+BuildRequires: tcl-devel >= 8.5.2-3
+BuildRequires: java-devel >= 1:1.6.0
+BuildRequires: chrpath
+Conflicts: filesystem < 3
+Obsoletes: db4 < 5
+
+%description
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. The Berkeley DB includes B+tree, Extended
+Linear Hashing, Fixed and Variable-length record access methods,
+transactions, locking, logging, shared memory caching, and database
+recovery. The Berkeley DB supports C, C++, Java, and Perl APIs. It is
+used by many applications, including Python and Perl, so this should
+be installed on all systems.
+
+%package utils
+Summary: Command line tools for managing Berkeley DB databases
+Group: Applications/Databases
+Requires: %{name}%{?_isa} = %{version}-%{release}
+Obsoletes: db4-utils < 5
+
+%description utils
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. Berkeley DB includes B+tree, Extended
+Linear Hashing, Fixed and Variable-length record access methods,
+transactions, locking, logging, shared memory caching, and database
+recovery. DB supports C, C++, Java and Perl APIs.
+
+%package devel
+Summary: C development files for the Berkeley DB library
+Group: Development/Libraries
+Requires: %{name}%{?_isa} = %{version}-%{release}
+Obsoletes: db4-devel < 5
+
+%description devel
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. This package contains the header files,
+libraries, and documentation for building programs which use the
+Berkeley DB.
+
+%package devel-doc
+Summary: C development documentation files for the Berkeley DB library
+Group: Documentation
+Requires: %{name} = %{version}-%{release}
+Requires: %{name}-devel = %{version}-%{release}
+BuildArch: noarch
+Obsoletes: db4-devel-doc < 5
+
+%description devel-doc
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. This package contains the header files,
+libraries, and documentation for building programs which use the
+Berkeley DB.
+
+%package devel-static
+Summary: Berkeley DB static libraries
+Group: Development/Libraries
+Requires: %{name}-devel%{?_isa} = %{version}-%{release}
+Obsoletes: db4-devel-static < 5
+
+%description devel-static
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. This package contains static libraries
+needed for applications that require static linking of
+Berkeley DB.
+
+%package cxx
+Summary: The Berkeley DB database library for C++
+Group: System Environment/Libraries
+Requires: %{name}%{?_isa} = %{version}-%{release}
+Obsoletes: db4-cxx < 5
+
+%description cxx
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. The Berkeley DB includes B+tree, Extended
+Linear Hashing, Fixed and Variable-length record access methods,
+transactions, locking, logging, shared memory caching, and database
+recovery. The Berkeley DB supports C, C++, Java, and Perl APIs. It is
+used by many applications, including Python and Perl, so this should
+be installed on all systems.
+
+%package cxx-devel
+Summary: The Berkeley DB database library for C++
+Group: System Environment/Libraries
+Requires: %{name}-cxx%{?_isa} = %{version}-%{release}
+Requires: %{name}-devel%{?_isa} = %{version}-%{release}
+Obsoletes: db4-cxx-devel < 5
+
+%description cxx-devel
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. The Berkeley DB includes B+tree, Extended
+Linear Hashing, Fixed and Variable-length record access methods,
+transactions, locking, logging, shared memory caching, and database
+recovery. The Berkeley DB supports C, C++, Java, and Perl APIs. It is
+used by many applications, including Python and Perl, so this should
+be installed on all systems.
+
+%package tcl
+Summary: Development files for using the Berkeley DB with tcl
+Group: Development/Libraries
+Requires: %{name}%{?_isa} = %{version}-%{release}
+Obsoletes: db4-tcl < 5
+
+%description tcl
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. This package contains the libraries
+for building programs which use the Berkeley DB in Tcl.
+
+%package tcl-devel
+Summary: Development files for using the Berkeley DB with tcl
+Group: Development/Libraries
+Requires: %{name}-tcl%{?_isa} = %{version}-%{release}
+Obsoletes: db4-tcl-devel < 5
+
+%description tcl-devel
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. This package contains the libraries
+for building programs which use the Berkeley DB in Tcl.
+
+%package sql
+Summary: Development files for using the Berkeley DB with sql
+Group: Development/Libraries
+Requires: %{name}%{?_isa} = %{version}-%{release}
+Obsoletes: db4-sql < 5
+
+%description sql
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. This package contains the libraries
+for building programs which use the Berkeley DB in SQL.
+
+%package sql-devel
+Summary: Development files for using the Berkeley DB with sql
+Group: Development/Libraries
+Requires: %{name}-sql%{?_isa} = %{version}-%{release}
+Obsoletes: db4-sql-devel < 5
+
+%description sql-devel
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. This package contains the libraries
+for building programs which use the Berkeley DB in SQL.
+
+%package java
+Summary: Development files for using the Berkeley DB with Java
+Group: Development/Libraries
+Requires: %{name}%{?_isa} = %{version}-%{release}
+Obsoletes: db4-java < 5
+
+%description java
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. This package contains the libraries
+for building programs which use the Berkeley DB in Java.
+
+%package java-devel
+Summary: Development files for using the Berkeley DB with Java
+Group: Development/Libraries
+Requires: %{name}-java%{?_isa} = %{version}-%{release}
+Obsoletes: db4-java-devel < 5
+
+%description java-devel
+The Berkeley Database (Berkeley DB) is a programmatic toolkit that
+provides embedded database support for both traditional and
+client/server applications. This package contains the libraries
+for building programs which use the Berkeley DB in Java.
+
+%prep
+%setup -q -n db-%{version} -a 1
+tar -xf %{SOURCE2}
+
+%patch0 -p1
+pushd db.1.85/PORT/linux
+%patch10 -p0
+popd
+pushd db.1.85
+%patch11 -p0
+%patch12 -p0
+%patch13 -p0
+%patch20 -p1
+popd
+
+%patch22 -p1
+%patch24 -p1
+%patch25 -p1
+
+%patch26 -p1
+%patch27 -p1
+%patch28 -p1
+%patch29 -p1
+%patch30 -p1
+%patch31 -p1
+%patch32 -p1
+%patch33 -p1
+
+cd dist
+./s_config
+
+%build
+CFLAGS="$RPM_OPT_FLAGS -fno-strict-aliasing"
+CFLAGS="$CFLAGS -DSQLITE_ENABLE_COLUMN_METADATA=1 -DSQLITE_DISABLE_DIRSYNC=1 -DSQLITE_ENABLE_FTS3=3 -DSQLITE_ENABLE_RTREE=1 -DSQLITE_SECURE_DELETE=1 -DSQLITE_ENABLE_UNLOCK_NOTIFY=1 -I../../../lang/sql/sqlite/ext/fts3/"
+export CFLAGS
+
+# Build the old db-185 libraries.
+make -C db.1.85/PORT/%{_os} OORG="$CFLAGS"
+
+test -d dist/dist-tls || mkdir dist/dist-tls
+# Static link db_dump185 with old db-185 libraries.
+/bin/sh libtool --tag=CC --mode=compile	%{__cc} $RPM_OPT_FLAGS -Idb.1.85/PORT/%{_os}/include -D_REENTRANT -c util/db_dump185.c -o dist/dist-tls/db_dump185.lo
+/bin/sh libtool --tag=LD --mode=link %{__cc} -o dist/dist-tls/db_dump185 %{__global_ldflags} dist/dist-tls/db_dump185.lo db.1.85/PORT/%{_os}/libdb.a
+
+# update gnu-config files for aarch64
+cp /usr/lib/rpm/redhat/config.guess dist
+cp /usr/lib/rpm/redhat/config.sub   dist
+cp /usr/lib/rpm/redhat/config.guess lang/sql/sqlite
+cp /usr/lib/rpm/redhat/config.sub   lang/sql/sqlite
+cp /usr/lib/rpm/redhat/config.guess lang/sql/jdbc
+cp /usr/lib/rpm/redhat/config.sub   lang/sql/jdbc
+cp /usr/lib/rpm/redhat/config.guess lang/sql/odbc
+cp /usr/lib/rpm/redhat/config.sub   lang/sql/odbc
+
+pushd dist/dist-tls
+ln -sf ../configure .
+%configure -C \
+	--enable-compat185 --enable-dump185 \
+	--enable-shared --enable-static \
+	--enable-tcl --with-tcl=%{_libdir} \
+	--enable-cxx --enable-sql \
+	--enable-java \
+	--enable-test \
+	--disable-rpath \
+	--with-tcl=%{_libdir}/tcl8.5
+
+# Remove libtool predep_objects and postdep_objects wonkiness so that
+# building without -nostdlib doesn't include them twice.  Because we
+# already link with g++, weird stuff happens if you don't let the
+# compiler handle this.
+perl -pi -e 's/^predep_objects=".*$/predep_objects=""/' libtool
+perl -pi -e 's/^postdep_objects=".*$/postdep_objects=""/' libtool
+perl -pi -e 's/-shared -nostdlib/-shared/' libtool
+
+make %{?_smp_mflags}
+
+# XXX hack around libtool not creating ./libs/libdb_java-X.Y.lai
+LDBJ=./.libs/libdb_java-%{__soversion}.la
+if test -f ${LDBJ} -a ! -f ${LDBJ}i; then
+	sed -e 's,^installed=no,installed=yes,' < ${LDBJ} > ${LDBJ}i
+fi
+popd
+
+%install
+rm -rf ${RPM_BUILD_ROOT}
+mkdir -p ${RPM_BUILD_ROOT}%{_includedir}
+mkdir -p ${RPM_BUILD_ROOT}%{_libdir}
+mkdir -p ${RPM_BUILD_ROOT}%{_mandir}/man1
+
+# Force off stripping of installed binaries
+%makeinstall STRIP=/bin/true -C dist/dist-tls
+
+# XXX Nuke non-versioned archives and symlinks
+rm -f ${RPM_BUILD_ROOT}%{_libdir}/{libdb.a,libdb_cxx.a,libdb_tcl.a,libdb_sql.a}
+
+chmod +x ${RPM_BUILD_ROOT}%{_libdir}/*.so*
+
+# Move the header files to a subdirectory, in case we're deploying on a
+# system with multiple versions of DB installed.
+mkdir -p ${RPM_BUILD_ROOT}%{_includedir}/%{name}
+mv ${RPM_BUILD_ROOT}%{_includedir}/*.h ${RPM_BUILD_ROOT}%{_includedir}/%{name}/
+
+# Create symlinks to includes so that "use <db.h> and link with -ldb" works.
+for i in db.h db_cxx.h db_185.h; do
+	ln -s %{name}/$i ${RPM_BUILD_ROOT}%{_includedir}
+done
+
+# Move java jar file to the correct place
+mkdir -p ${RPM_BUILD_ROOT}%{_datadir}/java
+mv ${RPM_BUILD_ROOT}%{_libdir}/*.jar ${RPM_BUILD_ROOT}%{_datadir}/java
+
+# Eliminate installed doco
+rm -rf ${RPM_BUILD_ROOT}%{_prefix}/docs
+
+# XXX Avoid Permission denied. strip when building as non-root.
+chmod u+w ${RPM_BUILD_ROOT}%{_bindir} ${RPM_BUILD_ROOT}%{_bindir}/*
+
+# remove unneeded .la files (#225675)
+rm -f ${RPM_BUILD_ROOT}%{_libdir}/*.la
+
+# remove RPATHs
+chrpath -d ${RPM_BUILD_ROOT}%{_libdir}/*.so ${RPM_BUILD_ROOT}%{_bindir}/*
+
+# unify documentation and examples, remove stuff we don't need
+rm -rf docs/csharp
+rm -rf examples/csharp
+rm -rf docs/installation
+mv examples docs
+mv man/* ${RPM_BUILD_ROOT}%{_mandir}/man1
+
+%clean
+rm -rf ${RPM_BUILD_ROOT}
+
+%post -p /sbin/ldconfig
+
+%postun -p /sbin/ldconfig
+
+%post -p /sbin/ldconfig cxx
+
+%postun -p /sbin/ldconfig cxx
+
+%post -p /sbin/ldconfig sql
+
+%postun -p /sbin/ldconfig sql
+
+%post -p /sbin/ldconfig tcl
+
+%postun -p /sbin/ldconfig tcl
+
+%post -p /sbin/ldconfig java
+
+%postun -p /sbin/ldconfig java
+
+%files
+%defattr(-,root,root,-)
+%doc LICENSE README
+%{_libdir}/libdb-%{__soversion}.so
+%{_libdir}/libdb-%{__soversion_major}.so
+
+%files devel
+%defattr(-,root,root,-)
+%{_libdir}/libdb.so
+%dir %{_includedir}/%{name}
+%{_includedir}/%{name}/db.h
+%{_includedir}/%{name}/db_185.h
+%{_includedir}/db.h
+%{_includedir}/db_185.h
+
+%files devel-doc
+%defattr(-,root,root,-)
+%doc	docs/*
+
+%files devel-static
+%defattr(-,root,root,-)
+%{_libdir}/libdb-%{__soversion}.a
+%{_libdir}/libdb_cxx-%{__soversion}.a
+%{_libdir}/libdb_tcl-%{__soversion}.a
+%{_libdir}/libdb_sql-%{__soversion}.a
+%{_libdir}/libdb_java-%{__soversion}.a
+
+%files utils
+%defattr(-,root,root,-)
+%{_bindir}/db*_archive
+%{_bindir}/db*_checkpoint
+%{_bindir}/db*_deadlock
+%{_bindir}/db*_dump*
+%{_bindir}/db*_hotbackup
+%{_bindir}/db*_load
+%{_bindir}/db*_printlog
+%{_bindir}/db*_recover
+%{_bindir}/db*_replicate
+%{_bindir}/db*_stat
+%{_bindir}/db*_upgrade
+%{_bindir}/db*_verify
+%{_bindir}/db*_tuner
+%{_mandir}/man1/db_*
+
+%files cxx
+%defattr(-,root,root,-)
+%{_libdir}/libdb_cxx-%{__soversion}.so
+%{_libdir}/libdb_cxx-%{__soversion_major}.so
+
+%files cxx-devel
+%defattr(-,root,root,-)
+%{_includedir}/%{name}/db_cxx.h
+%{_includedir}/db_cxx.h
+%{_libdir}/libdb_cxx.so
+
+%files tcl
+%defattr(-,root,root,-)
+%{_libdir}/libdb_tcl-%{__soversion}.so
+%{_libdir}/libdb_tcl-%{__soversion_major}.so
+
+%files tcl-devel
+%defattr(-,root,root,-)
+%{_libdir}/libdb_tcl.so
+
+%files sql
+%defattr(-,root,root,-)
+%{_libdir}/libdb_sql-%{__soversion}.so
+%{_libdir}/libdb_sql-%{__soversion_major}.so
+
+%files sql-devel
+%defattr(-,root,root,-)
+%{_bindir}/dbsql
+%{_libdir}/libdb_sql.so
+%{_includedir}/%{name}/dbsql.h
+
+%files java
+%defattr(-,root,root,-)
+%{_libdir}/libdb_java-%{__soversion_major}*.so
+%{_datadir}/java/*.jar
+
+%files java-devel
+%defattr(-,root,root,-)
+%{_libdir}/libdb_java.so
+
+%changelog
+* Wed Jan 30 2019 Petr Kubat <pkubat@redhat.com> 5.3.21-25
+- Optimize trickle thread CPU usage (#1608749)
+
+* Thu Jan 11 2018 Matej Mužila <mmuzila@redhat.com> - 5.3.21-24
+- Link db_dump185 with %{__global_ldflags}. Resolves: rhbz#1460077
+
+* Tue Dec 19 2017 Matej Mužila <mmuzila@redhat.com> - 5.3.21-23
+- Mention in man page that care should be taken when running db_hotbackup
+  with -c option. Resolves: rhbz#1460077
+
+* Tue Oct 31 2017 Petr Kubat <pkubat@redhat.com> 5.3.21-22
+- Fix deadlocks when reading/writing off-page duplicate tree (#1349779)
+
+* Thu Sep 07 2017 Petr Kubat <pkubat@redhat.com> 5.3.21-21
+- Fail properly when encountering removed or 0-byte regions (#1471011)
+
+* Mon Mar 20 2017 Petr Kubat <pkubat@redhat.com> 5.3.21-20
+- Add man pages for libdb-utils (#1395665)
+
+* Wed Dec 14 2016 Petr Kubat <pkubat@redhat.com> - 5.3.21-20
+- Fix mutexes not being released properly (#1277887)
+
+* Thu Sep 03 2015 Jan Stanek <jstanek@redhat.com> - 5.3.21-19
+- Add patch to workaround issues on large systems (>1024 CPU)
+  Resolves: #1245410
+
+* Thu Jul 24 2014 Honza Horak <hhorak@redhat.com> - 5.3.21-18
+- Concurrent access due to a race in CDB
+  Resolves: #1099509
+
+* Fri Jan 24 2014 Daniel Mach <dmach@redhat.com> - 5.3.21-17
+- Mass rebuild 2014-01-24
+
+* Thu Jan 16 2014 Jan Stanek <jstanek@redhat.com> - 5.3.21-16
+- Added Obsoletes in order to override unsupported db4 versions
+
+* Fri Dec 27 2013 Daniel Mach <dmach@redhat.com> - 5.3.21-15
+- Mass rebuild 2013-12-27
+
+* Tue Nov 05 2013 Jan Stanek <jstanek@redhat.com> - 5.3.21-14
+- Backported fix for possible signed overflow (#1026878)
+
+* Tue Nov 05 2013 Jan Stanek <jstanek@redhat.com> - 5.3.21-13
+- Updated config files for the aarch64 (#1023795)
+
+* Wed Oct 23 2013 Jan Stanek <jstanek@redhat.com> - 5.3.21-12
+- Added Sleepycat to the license list
+
+* Thu May 16 2013 Jan Stanek <jstanek@redhat.com> - 5.3.21-11
+- Fix missing debuginfo issue for utils subpackage
+
+* Thu May  9 2013 Tom Callaway <spot@fedoraproject.org> - 5.3.21-10
+- add license clarification fix
+
+* Wed Apr 03 2013 Jan Stanek <jstanek@redhat.com> 5.3.21-9
+- Added sqlite compability CFLAGS (#788496)
+
+* Wed Mar 27 2013 Jan Stanek <jstanek@redhat.com> 5.3.21-8
+- Cleaning the specfile - removed gcc-java dependecy other way
+
+* Wed Mar 27 2013 Jan Stanek <jstanek@redhat.com> 5.3.21-7
+- Removed dependency on obsolete gcc-java package (#927742)
+
+* Thu Mar  7 2013 Jindrich Novy <jnovy@redhat.com> 5.3.21-6
+- add LGPLv2+ and remove Sleepycat in license tag (#886838)
+
+* Thu Feb 14 2013 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 5.3.21-5
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_19_Mass_Rebuild
+
+* Tue Nov 27 2012 Tom Callaway <spot@fedoraproject.org> - 5.3.21-4
+- fix license tag
+
+* Thu Jul 19 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 5.3.21-3
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild
+
+* Sat Jul 14 2012 Peter Robinson <pbrobinson@fedoraproject.org> - 5.3.21-2
+- Specify tag for libtool (fixes FTBFS # 838334 )
+
+* Thu Jul  5 2012 Jindrich Novy <jnovy@redhat.com> 5.3.21-1
+- update to 5.3.21
+http://download.oracle.com/otndocs/products/berkeleydb/html/changelog_5_3.html
+
+* Tue Jul  3 2012 Jindrich Novy <jnovy@redhat.com> 5.3.15-5
+- move C++ header files to cxx-devel
+
+* Tue Jul  3 2012 Jindrich Novy <jnovy@redhat.com> 5.3.15-4
+- fix -devel packages dependencies yet more (#832225)
+
+* Sun May  6 2012 Jindrich Novy <jnovy@redhat.com> 5.3.15-3
+- package -devel packages correctly
+
+* Sat Apr 21 2012 Jindrich Novy <jnovy@redhat.com> 5.3.15-2
+- fix multiarch conflict in libdb-devel (#812901)
+- remove unneeded dos2unix BR
+
+* Thu Mar 15 2012 Jindrich Novy <jnovy@redhat.com> 5.3.15-1
+- update to 5.3.15
+  http://download.oracle.com/otndocs/products/berkeleydb/html/changelog_5_3.html
+
+* Fri Feb 17 2012 Deepak Bhole <dbhole@redhat.com> 5.2.36-5
+- Resolves rhbz#794472
+- Patch from Omair Majid <omajid@redhat.com> to remove explicit Java 6 req.
+
+* Wed Jan 25 2012 Harald Hoyer <harald@redhat.com> 5.2.36-4
+- add filesystem guard
+
+* Wed Jan 25 2012 Harald Hoyer <harald@redhat.com> 5.2.36-3
+- install everything in /usr
+  https://fedoraproject.org/wiki/Features/UsrMove
+
+* Fri Jan 13 2012 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 5.2.36-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_17_Mass_Rebuild
+
+* Wed Jun 15 2011 Jindrich Novy <jnovy@redhat.com> 5.2.36-1
+- update to 5.2.36,
+  http://download.oracle.com/otndocs/products/berkeleydb/html/changelog_5_2.html#id3647664
+
+* Wed Jun 15 2011 Jindrich Novy <jnovy@redhat.com> 5.2.28-2
+- move development documentation to devel-doc subpackage (#705386)
+
+* Tue Jun 14 2011 Jindrich Novy <jnovy@redhat.com> 5.2.28-1
+- update to 5.2.28
+
+* Mon Feb 07 2011 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 5.1.25-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_15_Mass_Rebuild
+
+* Thu Feb  3 2011 Jindrich Novy <jnovy@redhat.com> 5.1.25-1
+- update to 5.1.25
+
+* Wed Sep 29 2010 jkeating - 5.1.19-2
+- Rebuilt for gcc bug 634757
+
+* Fri Sep 10 2010 Jindrich Novy <jnovy@redhat.com> 5.1.19-1
+- update to 5.1.19
+- rename -devel-static to -static subpackage (#617800)
+- build java on all arches
+
+* Wed Jul  7 2010 Jindrich Novy <jnovy@redhat.com> 5.0.26-1
+- update to 5.0.26
+- drop BR: ed
+
+* Thu Jun 17 2010 Jindrich Novy <jnovy@redhat.com> 5.0.21-2
+- add Requires: libdb-cxx to libdb-devel
+
+* Wed Apr 21 2010 Jindrich Novy <jnovy@redhat.com> 5.0.21-1
+- initial build
+
+* Thu Apr 15 2010 Jindrich Novy <jnovy@redhat.com> 5.0.21-0.2
+- remove C# documentation
+- disable/remove rpath
+- fix description
+- tighten dependencies
+- run ldconfig for cxx and sql subpackages
+
+* Fri Apr  9 2010 Jindrich Novy <jnovy@redhat.com> 5.0.21-0.1
+- enable sql
+- package 5.0.21