|
|
65878a |
From 2d9a810729f9c209cab7aa6bf50ec5f749ebff82 Mon Sep 17 00:00:00 2001
|
|
|
65878a |
From: Lennart Poettering <lennart@poettering.net>
|
|
|
65878a |
Date: Wed, 27 Nov 2013 01:44:52 +0100
|
|
|
65878a |
Subject: [PATCH] journal: simplify pre-allocation logic
|
|
|
65878a |
|
|
|
65878a |
let's just do a single fallocate() as far as possible, and don't
|
|
|
65878a |
distuingish between allocated space and file size.
|
|
|
65878a |
|
|
|
65878a |
This way we can save a syscall for each append, which makes quite some
|
|
|
65878a |
benefits.
|
|
|
65878a |
---
|
|
|
65878a |
src/journal/journal-file.c | 19 ++++++++-----------
|
|
|
65878a |
1 file changed, 8 insertions(+), 11 deletions(-)
|
|
|
65878a |
|
|
|
65878a |
diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c
|
|
|
65878a |
index 94e3921..962cdd6 100644
|
|
|
65878a |
--- a/src/journal/journal-file.c
|
|
|
65878a |
+++ b/src/journal/journal-file.c
|
|
|
65878a |
@@ -315,7 +315,7 @@ static int journal_file_verify_header(JournalFile *f) {
|
|
|
65878a |
}
|
|
|
65878a |
|
|
|
65878a |
static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
|
|
|
65878a |
- uint64_t old_size, new_size, file_size;
|
|
|
65878a |
+ uint64_t old_size, new_size;
|
|
|
65878a |
int r;
|
|
|
65878a |
|
|
|
65878a |
assert(f);
|
|
|
65878a |
@@ -356,6 +356,11 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size)
|
|
|
65878a |
}
|
|
|
65878a |
}
|
|
|
65878a |
|
|
|
65878a |
+ /* Increase by larger blocks at once */
|
|
|
65878a |
+ new_size = ((new_size+FILE_SIZE_INCREASE-1) / FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
|
|
|
65878a |
+ if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
|
|
|
65878a |
+ new_size = f->metrics.max_size;
|
|
|
65878a |
+
|
|
|
65878a |
/* Note that the glibc fallocate() fallback is very
|
|
|
65878a |
inefficient, hence we try to minimize the allocation area
|
|
|
65878a |
as we can. */
|
|
|
65878a |
@@ -363,16 +368,8 @@ static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size)
|
|
|
65878a |
if (r != 0)
|
|
|
65878a |
return -r;
|
|
|
65878a |
|
|
|
65878a |
- /* Increase the file size a bit further than this, so that we
|
|
|
65878a |
- * we can create larger memory maps to cache */
|
|
|
65878a |
- file_size = ((new_size+FILE_SIZE_INCREASE-1) / FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
|
|
|
65878a |
- if (file_size > (uint64_t) f->last_stat.st_size) {
|
|
|
65878a |
- if (file_size > new_size)
|
|
|
65878a |
- ftruncate(f->fd, file_size);
|
|
|
65878a |
-
|
|
|
65878a |
- if (fstat(f->fd, &f->last_stat) < 0)
|
|
|
65878a |
- return -errno;
|
|
|
65878a |
- }
|
|
|
65878a |
+ if (fstat(f->fd, &f->last_stat) < 0)
|
|
|
65878a |
+ return -errno;
|
|
|
65878a |
|
|
|
65878a |
f->header->arena_size = htole64(new_size - le64toh(f->header->header_size));
|
|
|
65878a |
|