|
|
c7d609 |
diff -U 5 -r db-5.3.21.old/src/dbinc_auto/int_def.in db-5.3.21/src/dbinc_auto/int_def.in
|
|
|
c7d609 |
--- db-5.3.21.old/src/dbinc_auto/int_def.in 2012-05-12 01:57:53.000000000 +0800
|
|
|
c7d609 |
+++ db-5.3.21/src/dbinc_auto/int_def.in 2016-10-25 22:40:58.000000000 +0800
|
|
|
c7d609 |
@@ -1371,10 +1371,11 @@
|
|
|
c7d609 |
#define __memp_failchk __memp_failchk@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_bhwrite __memp_bhwrite@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_pgread __memp_pgread@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_pg __memp_pg@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_bhfree __memp_bhfree@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
+#define __memp_bh_clear_dirty __memp_bh_clear_dirty@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_fget_pp __memp_fget_pp@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_fget __memp_fget@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_fcreate_pp __memp_fcreate_pp@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_fcreate __memp_fcreate@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_set_clear_len __memp_set_clear_len@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
@@ -1395,10 +1396,11 @@
|
|
|
c7d609 |
#define __memp_fopen __memp_fopen@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_fclose_pp __memp_fclose_pp@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_fclose __memp_fclose@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_mf_discard __memp_mf_discard@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_inmemlist __memp_inmemlist@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
+#define __memp_mf_mark_dead __memp_mf_mark_dead@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_fput_pp __memp_fput_pp@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_fput __memp_fput@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_unpin_buffers __memp_unpin_buffers@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_dirty __memp_dirty@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_shared __memp_shared@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
@@ -1453,10 +1455,11 @@
|
|
|
c7d609 |
#define __memp_fsync_pp __memp_fsync_pp@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_fsync __memp_fsync@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __mp_xxx_fh __mp_xxx_fh@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_sync_int __memp_sync_int@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_mf_sync __memp_mf_sync@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
+#define __memp_purge_dead_files __memp_purge_dead_files@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __memp_trickle_pp __memp_trickle_pp@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __mutex_alloc __mutex_alloc@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __mutex_alloc_int __mutex_alloc_int@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __mutex_free __mutex_free@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
#define __mutex_free_int __mutex_free_int@DB_VERSION_UNIQUE_NAME@
|
|
|
c7d609 |
diff -U 5 -r db-5.3.21.old/src/dbinc_auto/mp_ext.h db-5.3.21/src/dbinc_auto/mp_ext.h
|
|
|
c7d609 |
--- db-5.3.21.old/src/dbinc_auto/mp_ext.h 2012-05-12 01:57:53.000000000 +0800
|
|
|
c7d609 |
+++ db-5.3.21/src/dbinc_auto/mp_ext.h 2016-10-25 22:40:58.000000000 +0800
|
|
|
c7d609 |
@@ -14,10 +14,11 @@
|
|
|
c7d609 |
int __memp_failchk __P((ENV *));
|
|
|
c7d609 |
int __memp_bhwrite __P((DB_MPOOL *, DB_MPOOL_HASH *, MPOOLFILE *, BH *, int));
|
|
|
c7d609 |
int __memp_pgread __P((DB_MPOOLFILE *, BH *, int));
|
|
|
c7d609 |
int __memp_pg __P((DB_MPOOLFILE *, db_pgno_t, void *, int));
|
|
|
c7d609 |
int __memp_bhfree __P((DB_MPOOL *, REGINFO *, MPOOLFILE *, DB_MPOOL_HASH *, BH *, u_int32_t));
|
|
|
c7d609 |
+void __memp_bh_clear_dirty __P((ENV*, DB_MPOOL_HASH *, BH *));
|
|
|
c7d609 |
int __memp_fget_pp __P((DB_MPOOLFILE *, db_pgno_t *, DB_TXN *, u_int32_t, void *));
|
|
|
c7d609 |
int __memp_fget __P((DB_MPOOLFILE *, db_pgno_t *, DB_THREAD_INFO *, DB_TXN *, u_int32_t, void *));
|
|
|
c7d609 |
int __memp_fcreate_pp __P((DB_ENV *, DB_MPOOLFILE **, u_int32_t));
|
|
|
c7d609 |
int __memp_fcreate __P((ENV *, DB_MPOOLFILE **));
|
|
|
c7d609 |
int __memp_set_clear_len __P((DB_MPOOLFILE *, u_int32_t));
|
|
|
c7d609 |
@@ -38,10 +39,11 @@
|
|
|
c7d609 |
int __memp_fopen __P((DB_MPOOLFILE *, MPOOLFILE *, const char *, const char **, u_int32_t, int, size_t));
|
|
|
c7d609 |
int __memp_fclose_pp __P((DB_MPOOLFILE *, u_int32_t));
|
|
|
c7d609 |
int __memp_fclose __P((DB_MPOOLFILE *, u_int32_t));
|
|
|
c7d609 |
int __memp_mf_discard __P((DB_MPOOL *, MPOOLFILE *, int));
|
|
|
c7d609 |
int __memp_inmemlist __P((ENV *, char ***, int *));
|
|
|
c7d609 |
+void __memp_mf_mark_dead __P((DB_MPOOL *, MPOOLFILE *, int*));
|
|
|
c7d609 |
int __memp_fput_pp __P((DB_MPOOLFILE *, void *, DB_CACHE_PRIORITY, u_int32_t));
|
|
|
c7d609 |
int __memp_fput __P((DB_MPOOLFILE *, DB_THREAD_INFO *, void *, DB_CACHE_PRIORITY));
|
|
|
c7d609 |
int __memp_unpin_buffers __P((ENV *, DB_THREAD_INFO *));
|
|
|
c7d609 |
int __memp_dirty __P((DB_MPOOLFILE *, void *, DB_THREAD_INFO *, DB_TXN *, DB_CACHE_PRIORITY, u_int32_t));
|
|
|
c7d609 |
int __memp_shared __P((DB_MPOOLFILE *, void *));
|
|
|
c7d609 |
@@ -96,10 +98,11 @@
|
|
|
c7d609 |
int __memp_fsync_pp __P((DB_MPOOLFILE *));
|
|
|
c7d609 |
int __memp_fsync __P((DB_MPOOLFILE *));
|
|
|
c7d609 |
int __mp_xxx_fh __P((DB_MPOOLFILE *, DB_FH **));
|
|
|
c7d609 |
int __memp_sync_int __P((ENV *, DB_MPOOLFILE *, u_int32_t, u_int32_t, u_int32_t *, int *));
|
|
|
c7d609 |
int __memp_mf_sync __P((DB_MPOOL *, MPOOLFILE *, int));
|
|
|
c7d609 |
+int __memp_purge_dead_files __P((ENV *));
|
|
|
c7d609 |
int __memp_trickle_pp __P((DB_ENV *, int, int *));
|
|
|
c7d609 |
|
|
|
c7d609 |
#if defined(__cplusplus)
|
|
|
c7d609 |
}
|
|
|
c7d609 |
#endif
|
|
|
c7d609 |
diff -U 5 -r db-5.3.21.old/src/mp/mp_bh.c db-5.3.21/src/mp/mp_bh.c
|
|
|
c7d609 |
--- db-5.3.21.old/src/mp/mp_bh.c 2012-05-12 01:57:53.000000000 +0800
|
|
|
c7d609 |
+++ db-5.3.21/src/mp/mp_bh.c 2016-10-25 17:09:35.000000000 +0800
|
|
|
c7d609 |
@@ -472,15 +472,12 @@
|
|
|
c7d609 |
* a shared latch.
|
|
|
c7d609 |
*/
|
|
|
c7d609 |
if (F_ISSET(bhp, BH_DIRTY | BH_TRASH)) {
|
|
|
c7d609 |
MUTEX_LOCK(env, hp->mtx_hash);
|
|
|
c7d609 |
DB_ASSERT(env, !SH_CHAIN_HASNEXT(bhp, vc));
|
|
|
c7d609 |
- if (ret == 0 && F_ISSET(bhp, BH_DIRTY)) {
|
|
|
c7d609 |
- F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
|
|
|
c7d609 |
- DB_ASSERT(env, atomic_read(&hp->hash_page_dirty) > 0);
|
|
|
c7d609 |
- atomic_dec(env, &hp->hash_page_dirty);
|
|
|
c7d609 |
- }
|
|
|
c7d609 |
+ if (ret == 0)
|
|
|
c7d609 |
+ __memp_bh_clear_dirty(env, hp, bhp);
|
|
|
c7d609 |
|
|
|
c7d609 |
/* put the page back if necessary. */
|
|
|
c7d609 |
if ((ret != 0 || BH_REFCOUNT(bhp) > 1) &&
|
|
|
c7d609 |
F_ISSET(bhp, BH_TRASH)) {
|
|
|
c7d609 |
ret = __memp_pg(dbmfp, bhp->pgno, bhp->buf, 1);
|
|
|
c7d609 |
@@ -686,5 +683,31 @@
|
|
|
c7d609 |
} else
|
|
|
c7d609 |
MUTEX_UNLOCK(env, mfp->mutex);
|
|
|
c7d609 |
|
|
|
c7d609 |
return (ret);
|
|
|
c7d609 |
}
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+/*
|
|
|
c7d609 |
+ * __memp_bh_clear_dirty --
|
|
|
c7d609 |
+ * Clear the dirty flag of of a buffer. Calls on the same buffer must be
|
|
|
c7d609 |
+ * serialized to get the accounting correct. This can be achieved by
|
|
|
c7d609 |
+ * acquiring an exclusive lock on the buffer, a shared lock on the
|
|
|
c7d609 |
+ * buffer plus an exclusive lock on the hash bucket, or some other
|
|
|
c7d609 |
+ * mechanism that guarantees single-thread access to the entire region
|
|
|
c7d609 |
+ * (e.g. during __memp_region_bhfree()).
|
|
|
c7d609 |
+ *
|
|
|
c7d609 |
+ * PUBLIC: void __memp_bh_clear_dirty __P((ENV*, DB_MPOOL_HASH *, BH *));
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+void
|
|
|
c7d609 |
+__memp_bh_clear_dirty(env, hp, bhp)
|
|
|
c7d609 |
+ ENV *env;
|
|
|
c7d609 |
+ DB_MPOOL_HASH *hp;
|
|
|
c7d609 |
+ BH *bhp;
|
|
|
c7d609 |
+{
|
|
|
c7d609 |
+ COMPQUIET(env, env);
|
|
|
c7d609 |
+ if (F_ISSET(bhp, BH_DIRTY)) {
|
|
|
c7d609 |
+ F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
|
|
|
c7d609 |
+ DB_ASSERT(env, atomic_read(&hp->hash_page_dirty) > 0);
|
|
|
c7d609 |
+ (void)atomic_dec(env, &hp->hash_page_dirty);
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+}
|
|
|
c7d609 |
+
|
|
|
c7d609 |
diff -U 5 -r db-5.3.21.old/src/mp/mp_fget.c db-5.3.21/src/mp/mp_fget.c
|
|
|
c7d609 |
--- db-5.3.21.old/src/mp/mp_fget.c 2012-05-12 01:57:53.000000000 +0800
|
|
|
c7d609 |
+++ db-5.3.21/src/mp/mp_fget.c 2016-10-25 17:11:08.000000000 +0800
|
|
|
c7d609 |
@@ -437,16 +437,11 @@
|
|
|
c7d609 |
* complain and get out.
|
|
|
c7d609 |
*/
|
|
|
c7d609 |
if (flags == DB_MPOOL_FREE) {
|
|
|
c7d609 |
freebuf: MUTEX_LOCK(env, hp->mtx_hash);
|
|
|
c7d609 |
h_locked = 1;
|
|
|
c7d609 |
- if (F_ISSET(bhp, BH_DIRTY)) {
|
|
|
c7d609 |
- F_CLR(bhp, BH_DIRTY | BH_DIRTY_CREATE);
|
|
|
c7d609 |
- DB_ASSERT(env,
|
|
|
c7d609 |
- atomic_read(&hp->hash_page_dirty) > 0);
|
|
|
c7d609 |
- atomic_dec(env, &hp->hash_page_dirty);
|
|
|
c7d609 |
- }
|
|
|
c7d609 |
+ __memp_bh_clear_dirty(env, hp, bhp);
|
|
|
c7d609 |
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* If the buffer we found is already freed, we're done.
|
|
|
c7d609 |
* If the ref count is not 1 then someone may be
|
|
|
c7d609 |
* peeking at the buffer. We cannot free it until they
|
|
|
c7d609 |
diff -U 5 -r db-5.3.21.old/src/mp/mp_fopen.c db-5.3.21/src/mp/mp_fopen.c
|
|
|
c7d609 |
--- db-5.3.21.old/src/mp/mp_fopen.c 2012-05-12 01:57:53.000000000 +0800
|
|
|
c7d609 |
+++ db-5.3.21/src/mp/mp_fopen.c 2016-10-25 22:31:05.000000000 +0800
|
|
|
c7d609 |
@@ -12,10 +12,11 @@
|
|
|
c7d609 |
#include "dbinc/log.h"
|
|
|
c7d609 |
#include "dbinc/mp.h"
|
|
|
c7d609 |
#include "dbinc/db_page.h"
|
|
|
c7d609 |
#include "dbinc/hash.h"
|
|
|
c7d609 |
|
|
|
c7d609 |
+static int __memp_count_dead_mutex __P((DB_MPOOL *, u_int32_t *));
|
|
|
c7d609 |
static int __memp_mpf_alloc __P((DB_MPOOL *,
|
|
|
c7d609 |
DB_MPOOLFILE *, const char *, u_int32_t, u_int32_t, MPOOLFILE **));
|
|
|
c7d609 |
static int __memp_mpf_find __P((ENV *,
|
|
|
c7d609 |
DB_MPOOLFILE *, DB_MPOOL_HASH *, const char *, u_int32_t, MPOOLFILE **));
|
|
|
c7d609 |
|
|
|
c7d609 |
@@ -709,11 +710,15 @@
|
|
|
c7d609 |
* We should be able to set mfp to NULL and break out of the
|
|
|
c7d609 |
* loop, but I like the idea of checking all the entries.
|
|
|
c7d609 |
*/
|
|
|
c7d609 |
if (LF_ISSET(DB_TRUNCATE)) {
|
|
|
c7d609 |
MUTEX_LOCK(env, mfp->mutex);
|
|
|
c7d609 |
- mfp->deadfile = 1;
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * We cannot purge dead files here, because the caller
|
|
|
c7d609 |
+ * is holding the mutex of the hash bucket of mfp.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ __memp_mf_mark_dead(dbmp, mfp, NULL);
|
|
|
c7d609 |
MUTEX_UNLOCK(env, mfp->mutex);
|
|
|
c7d609 |
continue;
|
|
|
c7d609 |
}
|
|
|
c7d609 |
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
@@ -907,14 +912,15 @@
|
|
|
c7d609 |
DB_MPOOL *dbmp;
|
|
|
c7d609 |
ENV *env;
|
|
|
c7d609 |
MPOOLFILE *mfp;
|
|
|
c7d609 |
char *rpath;
|
|
|
c7d609 |
u_int32_t ref;
|
|
|
c7d609 |
- int deleted, ret, t_ret;
|
|
|
c7d609 |
+ int deleted, purge_dead, ret, t_ret;
|
|
|
c7d609 |
|
|
|
c7d609 |
env = dbmfp->env;
|
|
|
c7d609 |
dbmp = env->mp_handle;
|
|
|
c7d609 |
+ purge_dead = 0;
|
|
|
c7d609 |
ret = 0;
|
|
|
c7d609 |
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* Remove the DB_MPOOLFILE from the process' list.
|
|
|
c7d609 |
*
|
|
|
c7d609 |
@@ -1004,11 +1010,11 @@
|
|
|
c7d609 |
}
|
|
|
c7d609 |
DB_ASSERT(env, mfp->neutral_cnt < mfp->mpf_cnt);
|
|
|
c7d609 |
if (--mfp->mpf_cnt == 0 || LF_ISSET(DB_MPOOL_DISCARD)) {
|
|
|
c7d609 |
if (LF_ISSET(DB_MPOOL_DISCARD) ||
|
|
|
c7d609 |
F_ISSET(mfp, MP_TEMP) || mfp->unlink_on_close) {
|
|
|
c7d609 |
- mfp->deadfile = 1;
|
|
|
c7d609 |
+ __memp_mf_mark_dead(dbmp, mfp, &purge_dead);
|
|
|
c7d609 |
}
|
|
|
c7d609 |
if (mfp->unlink_on_close) {
|
|
|
c7d609 |
if ((t_ret = __db_appname(dbmp->env, DB_APP_DATA,
|
|
|
c7d609 |
R_ADDR(dbmp->reginfo, mfp->path_off), NULL,
|
|
|
c7d609 |
&rpath)) != 0 && ret == 0)
|
|
|
c7d609 |
@@ -1037,10 +1043,12 @@
|
|
|
c7d609 |
deleted = 1;
|
|
|
c7d609 |
}
|
|
|
c7d609 |
}
|
|
|
c7d609 |
if (!deleted && !LF_ISSET(DB_MPOOL_NOLOCK))
|
|
|
c7d609 |
MUTEX_UNLOCK(env, mfp->mutex);
|
|
|
c7d609 |
+ if (purge_dead)
|
|
|
c7d609 |
+ (void)__memp_purge_dead_files(env);
|
|
|
c7d609 |
|
|
|
c7d609 |
done: /* Discard the DB_MPOOLFILE structure. */
|
|
|
c7d609 |
if (dbmfp->pgcookie != NULL) {
|
|
|
c7d609 |
__os_free(env, dbmfp->pgcookie->data);
|
|
|
c7d609 |
__os_free(env, dbmfp->pgcookie);
|
|
|
c7d609 |
@@ -1091,11 +1099,11 @@
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* We have to release the MPOOLFILE mutex before acquiring the region
|
|
|
c7d609 |
* mutex so we don't deadlock. Make sure nobody ever looks at this
|
|
|
c7d609 |
* structure again.
|
|
|
c7d609 |
*/
|
|
|
c7d609 |
- mfp->deadfile = 1;
|
|
|
c7d609 |
+ __memp_mf_mark_dead(dbmp, mfp, NULL);
|
|
|
c7d609 |
|
|
|
c7d609 |
/* Discard the mutex we're holding and return it too the pool. */
|
|
|
c7d609 |
MUTEX_UNLOCK(env, mfp->mutex);
|
|
|
c7d609 |
if ((t_ret = __mutex_free(env, &mfp->mutex)) != 0 && ret == 0)
|
|
|
c7d609 |
ret = t_ret;
|
|
|
c7d609 |
@@ -1216,5 +1224,106 @@
|
|
|
c7d609 |
/* Make sure we don't return any garbage. */
|
|
|
c7d609 |
*cntp = 0;
|
|
|
c7d609 |
*namesp = NULL;
|
|
|
c7d609 |
return (ret);
|
|
|
c7d609 |
}
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+/*
|
|
|
c7d609 |
+ * __memp_mf_mark_dead --
|
|
|
c7d609 |
+ * Mark an MPOOLFILE as dead because its contents are no longer necessary.
|
|
|
c7d609 |
+ * This happens when removing, truncation, or closing an unnamed in-memory
|
|
|
c7d609 |
+ * database. Return, in the purgep parameter, whether the caller should
|
|
|
c7d609 |
+ * call __memp_purge_dead_files() after the lock on mfp is released. The
|
|
|
c7d609 |
+ * caller must hold an exclusive lock on the mfp handle.
|
|
|
c7d609 |
+ *
|
|
|
c7d609 |
+ * PUBLIC: void __memp_mf_mark_dead __P((DB_MPOOL *, MPOOLFILE *, int*));
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+void
|
|
|
c7d609 |
+__memp_mf_mark_dead(dbmp, mfp, purgep)
|
|
|
c7d609 |
+ DB_MPOOL *dbmp;
|
|
|
c7d609 |
+ MPOOLFILE *mfp;
|
|
|
c7d609 |
+ int *purgep;
|
|
|
c7d609 |
+{
|
|
|
c7d609 |
+ ENV *env;
|
|
|
c7d609 |
+#ifdef HAVE_MUTEX_SUPPORT
|
|
|
c7d609 |
+ REGINFO *infop;
|
|
|
c7d609 |
+ DB_MUTEXREGION *mtxregion;
|
|
|
c7d609 |
+ u_int32_t mutex_max, mutex_inuse, dead_mutex;
|
|
|
c7d609 |
+#endif
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ if (purgep != NULL)
|
|
|
c7d609 |
+ *purgep = 0;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ env = dbmp->env;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+#ifdef HAVE_MUTEX_SUPPORT
|
|
|
c7d609 |
+ MUTEX_REQUIRED(env, mfp->mutex);
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ if (MUTEX_ON(env) && mfp->deadfile == 0) {
|
|
|
c7d609 |
+ infop = &env->mutex_handle->reginfo;
|
|
|
c7d609 |
+ mtxregion = infop->primary;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ mutex_inuse = mtxregion->stat.st_mutex_inuse;
|
|
|
c7d609 |
+ if ((mutex_max = env->dbenv->mutex_max) == 0)
|
|
|
c7d609 |
+ mutex_max = infop->rp->max / mtxregion->mutex_size;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * Purging dead pages requires a full scan of the entire cache
|
|
|
c7d609 |
+ * buffer, so it is a slow operation. We only want to do it
|
|
|
c7d609 |
+ * when it is necessary and provides enough benefits. Below is
|
|
|
c7d609 |
+ * a simple heuristic that determines when to purge all dead
|
|
|
c7d609 |
+ * pages.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ if (purgep != NULL && mutex_inuse > mutex_max - 200) {
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * If the mutex region is almost full and there are
|
|
|
c7d609 |
+ * many mutexes held by dead files, purge dead files.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ (void)__memp_count_dead_mutex(dbmp, &dead_mutex);
|
|
|
c7d609 |
+ dead_mutex += mfp->block_cnt + 1;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ if (dead_mutex > mutex_inuse / 20)
|
|
|
c7d609 |
+ *purgep = 1;
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+#endif
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ mfp->deadfile = 1;
|
|
|
c7d609 |
+}
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+/*
|
|
|
c7d609 |
+ * __memp_count_dead_mutex --
|
|
|
c7d609 |
+ * Estimate the number of mutexes held by dead files.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+static int
|
|
|
c7d609 |
+__memp_count_dead_mutex(dbmp, dead_mutex)
|
|
|
c7d609 |
+ DB_MPOOL *dbmp;
|
|
|
c7d609 |
+ u_int32_t *dead_mutex;
|
|
|
c7d609 |
+{
|
|
|
c7d609 |
+ ENV *env;
|
|
|
c7d609 |
+ DB_MPOOL_HASH *hp;
|
|
|
c7d609 |
+ MPOOL *mp;
|
|
|
c7d609 |
+ MPOOLFILE *mfp;
|
|
|
c7d609 |
+ u_int32_t mutex_per_file;
|
|
|
c7d609 |
+ int busy, i;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ env = dbmp->env;
|
|
|
c7d609 |
+ *dead_mutex = 0;
|
|
|
c7d609 |
+ mutex_per_file = 1;
|
|
|
c7d609 |
+#ifndef HAVE_ATOMICFILEREAD
|
|
|
c7d609 |
+ mutex_per_file = 2;
|
|
|
c7d609 |
+#endif
|
|
|
c7d609 |
+ mp = dbmp->reginfo[0].primary;
|
|
|
c7d609 |
+ hp = R_ADDR(dbmp->reginfo, mp->ftab);
|
|
|
c7d609 |
+ for (i = 0; i < MPOOL_FILE_BUCKETS; i++, hp++) {
|
|
|
c7d609 |
+ busy = MUTEX_TRYLOCK(env, hp->mtx_hash);
|
|
|
c7d609 |
+ if (busy)
|
|
|
c7d609 |
+ continue;
|
|
|
c7d609 |
+ SH_TAILQ_FOREACH(mfp, &hp->hash_bucket, q, __mpoolfile) {
|
|
|
c7d609 |
+ if (mfp->deadfile)
|
|
|
c7d609 |
+ *dead_mutex += mfp->block_cnt + mutex_per_file;
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+ MUTEX_UNLOCK(env, hp->mtx_hash);
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ return (0);
|
|
|
c7d609 |
+}
|
|
|
c7d609 |
diff -U 5 -r db-5.3.21.old/src/mp/mp_method.c db-5.3.21/src/mp/mp_method.c
|
|
|
c7d609 |
--- db-5.3.21.old/src/mp/mp_method.c 2012-05-12 01:57:53.000000000 +0800
|
|
|
c7d609 |
+++ db-5.3.21/src/mp/mp_method.c 2016-10-25 17:22:23.000000000 +0800
|
|
|
c7d609 |
@@ -638,11 +638,11 @@
|
|
|
c7d609 |
DB_MPOOL_HASH *hp, *nhp;
|
|
|
c7d609 |
MPOOL *mp;
|
|
|
c7d609 |
MPOOLFILE *mfp;
|
|
|
c7d609 |
roff_t newname_off;
|
|
|
c7d609 |
u_int32_t bucket;
|
|
|
c7d609 |
- int locked, ret;
|
|
|
c7d609 |
+ int locked, purge_dead, ret;
|
|
|
c7d609 |
size_t nlen;
|
|
|
c7d609 |
void *p;
|
|
|
c7d609 |
|
|
|
c7d609 |
#undef op_is_remove
|
|
|
c7d609 |
#define op_is_remove (newname == NULL)
|
|
|
c7d609 |
@@ -655,10 +655,11 @@
|
|
|
c7d609 |
dbmp = NULL;
|
|
|
c7d609 |
mfp = NULL;
|
|
|
c7d609 |
nhp = NULL;
|
|
|
c7d609 |
p = NULL;
|
|
|
c7d609 |
locked = ret = 0;
|
|
|
c7d609 |
+ purge_dead = 0;
|
|
|
c7d609 |
|
|
|
c7d609 |
if (!MPOOL_ON(env))
|
|
|
c7d609 |
goto fsop;
|
|
|
c7d609 |
|
|
|
c7d609 |
dbmp = env->mp_handle;
|
|
|
c7d609 |
@@ -747,11 +748,11 @@
|
|
|
c7d609 |
* they do not get reclaimed as long as they exist. Since we
|
|
|
c7d609 |
* are now deleting the database, we need to dec that count.
|
|
|
c7d609 |
*/
|
|
|
c7d609 |
if (mfp->no_backing_file)
|
|
|
c7d609 |
mfp->mpf_cnt--;
|
|
|
c7d609 |
- mfp->deadfile = 1;
|
|
|
c7d609 |
+ __memp_mf_mark_dead(dbmp, mfp, &purge_dead);
|
|
|
c7d609 |
MUTEX_UNLOCK(env, mfp->mutex);
|
|
|
c7d609 |
} else {
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* Else, it's a rename. We've allocated memory for the new
|
|
|
c7d609 |
* name. Swap it with the old one. If it's in memory we
|
|
|
c7d609 |
@@ -806,10 +807,16 @@
|
|
|
c7d609 |
if (locked == 1) {
|
|
|
c7d609 |
MUTEX_UNLOCK(env, hp->mtx_hash);
|
|
|
c7d609 |
if (nhp != NULL && nhp != hp)
|
|
|
c7d609 |
MUTEX_UNLOCK(env, nhp->mtx_hash);
|
|
|
c7d609 |
}
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * __memp_purge_dead_files() must be called when the hash bucket is
|
|
|
c7d609 |
+ * unlocked.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ if (purge_dead)
|
|
|
c7d609 |
+ (void)__memp_purge_dead_files(env);
|
|
|
c7d609 |
return (ret);
|
|
|
c7d609 |
}
|
|
|
c7d609 |
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* __memp_ftruncate __
|
|
|
c7d609 |
diff -U 5 -r db-5.3.21.old/src/mp/mp_sync.c db-5.3.21/src/mp/mp_sync.c
|
|
|
c7d609 |
--- db-5.3.21.old/src/mp/mp_sync.c 2012-05-12 01:57:53.000000000 +0800
|
|
|
c7d609 |
+++ db-5.3.21/src/mp/mp_sync.c 2016-10-25 17:26:58.000000000 +0800
|
|
|
c7d609 |
@@ -24,10 +24,11 @@
|
|
|
c7d609 |
static int __bhcmp __P((const void *, const void *));
|
|
|
c7d609 |
static int __memp_close_flush_files __P((ENV *, int));
|
|
|
c7d609 |
static int __memp_sync_files __P((ENV *));
|
|
|
c7d609 |
static int __memp_sync_file __P((ENV *,
|
|
|
c7d609 |
MPOOLFILE *, void *, u_int32_t *, u_int32_t));
|
|
|
c7d609 |
+static inline void __update_err_ret(int, int*);
|
|
|
c7d609 |
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* __memp_walk_files --
|
|
|
c7d609 |
* PUBLIC: int __memp_walk_files __P((ENV *, MPOOL *,
|
|
|
c7d609 |
* PUBLIC: int (*) __P((ENV *, MPOOLFILE *, void *,
|
|
|
c7d609 |
@@ -961,5 +962,125 @@
|
|
|
c7d609 |
return (-1);
|
|
|
c7d609 |
if (bhp1->track_pgno > bhp2->track_pgno)
|
|
|
c7d609 |
return (1);
|
|
|
c7d609 |
return (0);
|
|
|
c7d609 |
}
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+/*
|
|
|
c7d609 |
+ * __memp_purge_dead_files --
|
|
|
c7d609 |
+ * Remove all dead files and their buffers from the mpool. The caller
|
|
|
c7d609 |
+ * cannot hold any lock on the dead MPOOLFILE handles, their buffers
|
|
|
c7d609 |
+ * or their hash buckets.
|
|
|
c7d609 |
+ *
|
|
|
c7d609 |
+ * PUBLIC: int __memp_purge_dead_files __P((ENV *));
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+int
|
|
|
c7d609 |
+__memp_purge_dead_files(env)
|
|
|
c7d609 |
+ ENV *env;
|
|
|
c7d609 |
+{
|
|
|
c7d609 |
+ BH *bhp;
|
|
|
c7d609 |
+ DB_MPOOL *dbmp;
|
|
|
c7d609 |
+ DB_MPOOL_HASH *hp, *hp_end;
|
|
|
c7d609 |
+ REGINFO *infop;
|
|
|
c7d609 |
+ MPOOL *c_mp, *mp;
|
|
|
c7d609 |
+ MPOOLFILE *mfp;
|
|
|
c7d609 |
+ u_int32_t i_cache;
|
|
|
c7d609 |
+ int ret, t_ret, h_lock;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ if (!MPOOL_ON(env))
|
|
|
c7d609 |
+ return (0);
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ dbmp = env->mp_handle;
|
|
|
c7d609 |
+ mp = dbmp->reginfo[0].primary;
|
|
|
c7d609 |
+ ret = t_ret = h_lock = 0;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * Walk each cache's list of buffers and free all buffers whose
|
|
|
c7d609 |
+ * MPOOLFILE is marked as dead.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ for (i_cache = 0; i_cache < mp->nreg; i_cache++) {
|
|
|
c7d609 |
+ infop = &dbmp->reginfo[i_cache];
|
|
|
c7d609 |
+ c_mp = infop->primary;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ hp = R_ADDR(infop, c_mp->htab);
|
|
|
c7d609 |
+ hp_end = &hp[c_mp->htab_buckets];
|
|
|
c7d609 |
+ for (; hp < hp_end; hp++) {
|
|
|
c7d609 |
+ /* Skip empty buckets. */
|
|
|
c7d609 |
+ if (SH_TAILQ_FIRST(&hp->hash_bucket, __bh) == NULL)
|
|
|
c7d609 |
+ continue;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * Search for a dead buffer. Other places that call
|
|
|
c7d609 |
+ * __memp_bhfree() acquire the buffer lock before the
|
|
|
c7d609 |
+ * hash bucket lock. Even though we acquire the two
|
|
|
c7d609 |
+ * locks in reverse order, we cannot deadlock here
|
|
|
c7d609 |
+ * because we don't block waiting for the locks.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ t_ret = MUTEX_TRYLOCK(env, hp->mtx_hash);
|
|
|
c7d609 |
+ if (t_ret != 0) {
|
|
|
c7d609 |
+ __update_err_ret(t_ret, &ret;;
|
|
|
c7d609 |
+ continue;
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+ h_lock = 1;
|
|
|
c7d609 |
+ SH_TAILQ_FOREACH(bhp, &hp->hash_bucket, hq, __bh) {
|
|
|
c7d609 |
+ /* Skip buffers that are being used. */
|
|
|
c7d609 |
+ if (BH_REFCOUNT(bhp) > 0)
|
|
|
c7d609 |
+ continue;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ mfp = R_ADDR(dbmp->reginfo, bhp->mf_offset);
|
|
|
c7d609 |
+ if (!mfp->deadfile)
|
|
|
c7d609 |
+ continue;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ /* Found a dead buffer. Prepare to free it. */
|
|
|
c7d609 |
+ t_ret = MUTEX_TRYLOCK(env, bhp->mtx_buf);
|
|
|
c7d609 |
+ if (t_ret != 0) {
|
|
|
c7d609 |
+ __update_err_ret(t_ret, &ret;;
|
|
|
c7d609 |
+ continue;
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ DB_ASSERT(env, (!F_ISSET(bhp, BH_EXCLUSIVE) &&
|
|
|
c7d609 |
+ BH_REFCOUNT(bhp) == 0));
|
|
|
c7d609 |
+ F_SET(bhp, BH_EXCLUSIVE);
|
|
|
c7d609 |
+ (void)atomic_inc(env, &bhp->ref);
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ __memp_bh_clear_dirty(env, hp, bhp);
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * Free the buffer. The buffer and hash bucket
|
|
|
c7d609 |
+ * are unlocked by __memp_bhfree.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ if ((t_ret = __memp_bhfree(dbmp, infop, mfp,
|
|
|
c7d609 |
+ hp, bhp, BH_FREE_FREEMEM)) == 0)
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * Decrement hp, so the next turn will
|
|
|
c7d609 |
+ * search the same bucket again.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ hp--;
|
|
|
c7d609 |
+ else
|
|
|
c7d609 |
+ __update_err_ret(t_ret, &ret;;
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * The hash bucket is unlocked, we need to
|
|
|
c7d609 |
+ * start over again.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ h_lock = 0;
|
|
|
c7d609 |
+ break;
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ if (h_lock) {
|
|
|
c7d609 |
+ MUTEX_UNLOCK(env, hp->mtx_hash);
|
|
|
c7d609 |
+ h_lock = 0;
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ return (ret);
|
|
|
c7d609 |
+}
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+static inline void
|
|
|
c7d609 |
+__update_err_ret(t_ret, retp)
|
|
|
c7d609 |
+ int t_ret;
|
|
|
c7d609 |
+ int *retp;
|
|
|
c7d609 |
+{
|
|
|
c7d609 |
+ if (t_ret != 0 && t_ret != DB_LOCK_NOTGRANTED && *retp == 0)
|
|
|
c7d609 |
+ *retp = t_ret;
|
|
|
c7d609 |
+}
|
|
|
c7d609 |
diff -U 5 -r db-5.3.21.old/src/mp/mp_trickle.c db-5.3.21/src/mp/mp_trickle.c
|
|
|
c7d609 |
--- db-5.3.21.old/src/mp/mp_trickle.c 2012-05-12 01:57:53.000000000 +0800
|
|
|
c7d609 |
+++ db-5.3.21/src/mp/mp_trickle.c 2016-10-25 17:27:57.000000000 +0800
|
|
|
c7d609 |
@@ -65,10 +65,14 @@
|
|
|
c7d609 |
"DB_ENV->memp_trickle: %d: percent must be between 1 and 100",
|
|
|
c7d609 |
"%d"), pct);
|
|
|
c7d609 |
return (EINVAL);
|
|
|
c7d609 |
}
|
|
|
c7d609 |
|
|
|
c7d609 |
+ /* First we purge all dead files and their buffers. */
|
|
|
c7d609 |
+ if ((ret = __memp_purge_dead_files(env)) != 0)
|
|
|
c7d609 |
+ return (ret);
|
|
|
c7d609 |
+
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* Loop through the caches counting total/dirty buffers.
|
|
|
c7d609 |
*
|
|
|
c7d609 |
* XXX
|
|
|
c7d609 |
* Using hash_page_dirty is our only choice at the moment, but it's not
|
|
|
c7d609 |
diff -U 5 -r db-5.3.21.old/src/mutex/mut_region.c db-5.3.21/src/mutex/mut_region.c
|
|
|
c7d609 |
--- db-5.3.21.old/src/mutex/mut_region.c 2012-05-12 01:57:54.000000000 +0800
|
|
|
c7d609 |
+++ db-5.3.21/src/mutex/mut_region.c 2016-10-25 17:34:22.000000000 +0800
|
|
|
c7d609 |
@@ -15,11 +15,11 @@
|
|
|
c7d609 |
#include "dbinc/txn.h"
|
|
|
c7d609 |
|
|
|
c7d609 |
static db_size_t __mutex_align_size __P((ENV *));
|
|
|
c7d609 |
static int __mutex_region_init __P((ENV *, DB_MUTEXMGR *));
|
|
|
c7d609 |
static size_t __mutex_region_size __P((ENV *));
|
|
|
c7d609 |
-static size_t __mutex_region_max __P((ENV *));
|
|
|
c7d609 |
+static size_t __mutex_region_max __P((ENV *, u_int32_t));
|
|
|
c7d609 |
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* __mutex_open --
|
|
|
c7d609 |
* Open a mutex region.
|
|
|
c7d609 |
*
|
|
|
c7d609 |
@@ -32,11 +32,11 @@
|
|
|
c7d609 |
{
|
|
|
c7d609 |
DB_ENV *dbenv;
|
|
|
c7d609 |
DB_MUTEXMGR *mtxmgr;
|
|
|
c7d609 |
DB_MUTEXREGION *mtxregion;
|
|
|
c7d609 |
size_t size;
|
|
|
c7d609 |
- u_int32_t cpu_count;
|
|
|
c7d609 |
+ u_int32_t cpu_count, mutex_needed;
|
|
|
c7d609 |
int ret;
|
|
|
c7d609 |
#ifndef HAVE_ATOMIC_SUPPORT
|
|
|
c7d609 |
u_int i;
|
|
|
c7d609 |
#endif
|
|
|
c7d609 |
|
|
|
c7d609 |
@@ -59,23 +59,24 @@
|
|
|
c7d609 |
cpu_count : cpu_count * MUTEX_SPINS_PER_PROCESSOR)) != 0)
|
|
|
c7d609 |
return (ret);
|
|
|
c7d609 |
}
|
|
|
c7d609 |
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
- * If the user didn't set an absolute value on the number of mutexes
|
|
|
c7d609 |
- * we'll need, figure it out. We're conservative in our allocation,
|
|
|
c7d609 |
- * we need mutexes for DB handles, group-commit queues and other things
|
|
|
c7d609 |
- * applications allocate at run-time. The application may have kicked
|
|
|
c7d609 |
- * up our count to allocate its own mutexes, add that in.
|
|
|
c7d609 |
+ * Figure out the number of mutexes we'll need. We're conservative in
|
|
|
c7d609 |
+ * our allocation, we need mutexes for DB handles, group-commit queues
|
|
|
c7d609 |
+ * and other things applications allocate at run-time. The application
|
|
|
c7d609 |
+ * may have kicked up our count to allocate its own mutexes, add that
|
|
|
c7d609 |
+ * in.
|
|
|
c7d609 |
*/
|
|
|
c7d609 |
+ mutex_needed =
|
|
|
c7d609 |
+ __lock_region_mutex_count(env) +
|
|
|
c7d609 |
+ __log_region_mutex_count(env) +
|
|
|
c7d609 |
+ __memp_region_mutex_count(env) +
|
|
|
c7d609 |
+ __txn_region_mutex_count(env);
|
|
|
c7d609 |
if (dbenv->mutex_cnt == 0 &&
|
|
|
c7d609 |
F_ISSET(env, ENV_PRIVATE | ENV_THREAD) != ENV_PRIVATE)
|
|
|
c7d609 |
- dbenv->mutex_cnt =
|
|
|
c7d609 |
- __lock_region_mutex_count(env) +
|
|
|
c7d609 |
- __log_region_mutex_count(env) +
|
|
|
c7d609 |
- __memp_region_mutex_count(env) +
|
|
|
c7d609 |
- __txn_region_mutex_count(env);
|
|
|
c7d609 |
+ dbenv->mutex_cnt = mutex_needed;
|
|
|
c7d609 |
|
|
|
c7d609 |
if (dbenv->mutex_max != 0 && dbenv->mutex_cnt > dbenv->mutex_max)
|
|
|
c7d609 |
dbenv->mutex_cnt = dbenv->mutex_max;
|
|
|
c7d609 |
|
|
|
c7d609 |
/* Create/initialize the mutex manager structure. */
|
|
|
c7d609 |
@@ -88,12 +89,12 @@
|
|
|
c7d609 |
mtxmgr->reginfo.id = INVALID_REGION_ID;
|
|
|
c7d609 |
mtxmgr->reginfo.flags = REGION_JOIN_OK;
|
|
|
c7d609 |
size = __mutex_region_size(env);
|
|
|
c7d609 |
if (create_ok)
|
|
|
c7d609 |
F_SET(&mtxmgr->reginfo, REGION_CREATE_OK);
|
|
|
c7d609 |
- if ((ret = __env_region_attach(env,
|
|
|
c7d609 |
- &mtxmgr->reginfo, size, size + __mutex_region_max(env))) != 0)
|
|
|
c7d609 |
+ if ((ret = __env_region_attach(env, &mtxmgr->reginfo,
|
|
|
c7d609 |
+ size, size + __mutex_region_max(env, mutex_needed))) != 0)
|
|
|
c7d609 |
goto err;
|
|
|
c7d609 |
|
|
|
c7d609 |
/* If we created the region, initialize it. */
|
|
|
c7d609 |
if (F_ISSET(&mtxmgr->reginfo, REGION_CREATE))
|
|
|
c7d609 |
if ((ret = __mutex_region_init(env, mtxmgr)) != 0)
|
|
|
c7d609 |
@@ -350,44 +351,62 @@
|
|
|
c7d609 |
|
|
|
c7d609 |
dbenv = env->dbenv;
|
|
|
c7d609 |
|
|
|
c7d609 |
s = sizeof(DB_MUTEXMGR) + 1024;
|
|
|
c7d609 |
|
|
|
c7d609 |
- /* We discard one mutex for the OOB slot. */
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * We discard one mutex for the OOB slot. Make sure mutex_cnt doesn't
|
|
|
c7d609 |
+ * overflow.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
s += __env_alloc_size(
|
|
|
c7d609 |
- (dbenv->mutex_cnt + 1) *__mutex_align_size(env));
|
|
|
c7d609 |
+ (dbenv->mutex_cnt + (dbenv->mutex_cnt == UINT32_MAX ? 0 : 1)) *
|
|
|
c7d609 |
+ __mutex_align_size(env));
|
|
|
c7d609 |
|
|
|
c7d609 |
return (s);
|
|
|
c7d609 |
}
|
|
|
c7d609 |
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* __mutex_region_max --
|
|
|
c7d609 |
* Return the amount of space needed to reach the maximum size.
|
|
|
c7d609 |
*/
|
|
|
c7d609 |
static size_t
|
|
|
c7d609 |
-__mutex_region_max(env)
|
|
|
c7d609 |
+__mutex_region_max(env, mutex_needed)
|
|
|
c7d609 |
ENV *env;
|
|
|
c7d609 |
+ u_int32_t mutex_needed;
|
|
|
c7d609 |
{
|
|
|
c7d609 |
DB_ENV *dbenv;
|
|
|
c7d609 |
- u_int32_t max;
|
|
|
c7d609 |
+ u_int32_t max, mutex_cnt;
|
|
|
c7d609 |
|
|
|
c7d609 |
dbenv = env->dbenv;
|
|
|
c7d609 |
+ mutex_cnt = dbenv->mutex_cnt;
|
|
|
c7d609 |
|
|
|
c7d609 |
- if ((max = dbenv->mutex_max) == 0) {
|
|
|
c7d609 |
+ /*
|
|
|
c7d609 |
+ * We want to limit the region size to accommodate at most UINT32_MAX
|
|
|
c7d609 |
+ * mutexes. If mutex_cnt is UINT32_MAX, no more space is allowed.
|
|
|
c7d609 |
+ */
|
|
|
c7d609 |
+ if ((max = dbenv->mutex_max) == 0 && mutex_cnt != UINT32_MAX)
|
|
|
c7d609 |
if (F_ISSET(env, ENV_PRIVATE | ENV_THREAD) == ENV_PRIVATE)
|
|
|
c7d609 |
- max = dbenv->mutex_inc + 1;
|
|
|
c7d609 |
- else
|
|
|
c7d609 |
+ if (dbenv->mutex_inc + 1 < UINT32_MAX - mutex_cnt)
|
|
|
c7d609 |
+ max = dbenv->mutex_inc + 1 + mutex_cnt;
|
|
|
c7d609 |
+ else
|
|
|
c7d609 |
+ max = UINT32_MAX;
|
|
|
c7d609 |
+ else {
|
|
|
c7d609 |
max = __lock_region_mutex_max(env) +
|
|
|
c7d609 |
__txn_region_mutex_max(env) +
|
|
|
c7d609 |
__log_region_mutex_max(env) +
|
|
|
c7d609 |
dbenv->mutex_inc + 100;
|
|
|
c7d609 |
- } else if (max <= dbenv->mutex_cnt)
|
|
|
c7d609 |
+ if (max < UINT32_MAX - mutex_needed)
|
|
|
c7d609 |
+ max += mutex_needed;
|
|
|
c7d609 |
+ else
|
|
|
c7d609 |
+ max = UINT32_MAX;
|
|
|
c7d609 |
+ }
|
|
|
c7d609 |
+
|
|
|
c7d609 |
+ if (max <= mutex_cnt)
|
|
|
c7d609 |
return (0);
|
|
|
c7d609 |
else
|
|
|
c7d609 |
- max -= dbenv->mutex_cnt;
|
|
|
c7d609 |
-
|
|
|
c7d609 |
- return ( __env_alloc_size(max * __mutex_align_size(env)));
|
|
|
c7d609 |
+ return (__env_alloc_size(
|
|
|
c7d609 |
+ (max - mutex_cnt) * __mutex_align_size(env)));
|
|
|
c7d609 |
}
|
|
|
c7d609 |
|
|
|
c7d609 |
#ifdef HAVE_MUTEX_SYSTEM_RESOURCES
|
|
|
c7d609 |
/*
|
|
|
c7d609 |
* __mutex_resource_return
|
|
|
c7d609 |
|