From 15ffae8c3b615b07a046b0b78a66a887d43fc7c5 Mon Sep 17 00:00:00 2001 From: Adam Dickmeiss Date: Wed, 5 Mar 2008 09:18:51 +0000 Subject: [PATCH] Avoid record data merge when compression is disabled. Changed record data handling so that data is not combined from multiple records when compression is disabled. This should speed up record fetches considerably for the non-compression case. --- index/records.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/index/records.c b/index/records.c index c39ac40..ad11f22 100644 --- a/index/records.c +++ b/index/records.c @@ -1,5 +1,5 @@ -/* $Id: records.c,v 1.3 2007-11-28 11:16:32 adam Exp $ - Copyright (C) 1995-2007 +/* $Id: records.c,v 1.4 2008-03-05 09:18:51 adam Exp $ + Copyright (C) 1995-2008 Index Data ApS This file is part of the Zebra server. @@ -69,6 +69,8 @@ struct records_info { int cache_cur; int cache_max; + int compression_chunk_size; + Zebra_mutex mutex; struct records_head { @@ -289,6 +291,7 @@ Records rec_open(BFiles bfs, int rw, int compression_method) p->rw = rw; p->tmp_size = 1024; p->tmp_buf = (char *) xmalloc(p->tmp_size); + p->compression_chunk_size = 0; p->recindex = recindex_open(bfs, rw, 0 /* 1=isamb for recindex */); r = recindex_read_head(p->recindex, p->tmp_buf); switch (r) @@ -336,6 +339,7 @@ Records rec_open(BFiles bfs, int rw, int compression_method) recindex_get_fname(p->recindex), version, REC_VERSION); ret = ZEBRA_FAIL; } + p->compression_chunk_size = 90000; /* good for BZIP2 */ break; } for (i = 0; isize[j]; } - if (used > 90000) + if (used > p->compression_chunk_size) ret = rec_cache_flush(p, 1); } assert(p->cache_cur < p->cache_max); -- 1.7.10.4