1 /* $Id: physical.c,v 1.18 2002-08-02 19:26:56 adam Exp $
2 Copyright (C) 1995,1996,1997,1998,1999,2000,2001,2002
5 This file is part of the Zebra server.
7 Zebra is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
12 Zebra is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 You should have received a copy of the GNU General Public License
18 along with Zebra; see the file LICENSE.zebra. If not, write to the
19 Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA
26 * This module handles the representation of tables in the bfiles.
36 static int is_freestore_alloc(ISAM is, int type)
40 if (is->types[type].freelist >= 0)
42 tmp = is->types[type].freelist;
43 if (bf_read(is->types[type].bf, tmp, 0, sizeof(tmp),
44 &is->types[type].freelist) <=0)
46 logf (LOG_FATAL, "Failed to allocate block");
51 tmp = is->types[type].top++;
53 logf (LOG_DEBUG, "Allocating block #%d", tmp);
57 static void is_freestore_free(ISAM is, int type, int block)
61 logf (LOG_DEBUG, "Releasing block #%d", block);
62 tmp = is->types[type].freelist;
63 is->types[type].freelist = block;
64 if (bf_write(is->types[type].bf, block, 0, sizeof(tmp), &tmp) < 0)
66 logf (LOG_FATAL, "Failed to deallocate block.");
71 /* this code must be modified to handle an index */
72 int is_p_read_partial(is_mtable *tab, is_mblock *block)
77 assert(block->state == IS_MBSTATE_UNREAD);
78 block->data = buf = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
79 toread = tab->is->types[tab->pos_type].blocksize;
80 if (toread > is_mbuf_size[buf->type])
82 toread = is_mbuf_size[buf->type];
83 block->state = IS_MBSTATE_PARTIAL;
86 block->state = IS_MBSTATE_CLEAN;
87 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, 0, toread,
90 logf (LOG_FATAL, "bfread failed.");
93 /* extract header info */
95 memcpy(&block->num_records, buf->data, sizeof(block->num_records));
96 assert(block->num_records > 0);
97 buf->offset += sizeof(block->num_records);
98 memcpy(&block->nextpos, buf->data + buf->offset,
99 sizeof(block->nextpos));
100 buf->offset += sizeof(block->nextpos);
101 if (block == tab->data) /* first block */
103 memcpy(&tab->num_records, buf->data + buf->offset,
104 sizeof(tab->num_records));
105 buf->offset +=sizeof(tab->num_records);
107 logf(LOG_DEBUG, "R: Block #%d: num %d nextpos %d total %d",
108 block->diskpos, block->num_records, block->nextpos,
109 block == tab->data ? tab->num_records : -1);
110 buf->num = (toread - buf->offset) / is_keysize(tab->is);
111 if (buf->num >= block->num_records)
113 buf->num = block->num_records;
114 block->state = IS_MBSTATE_CLEAN;
117 block->bread = buf->offset + buf->num * is_keysize(tab->is);
121 int is_p_read_full(is_mtable *tab, is_mblock *block)
126 if (block->state == IS_MBSTATE_UNREAD && is_p_read_partial(tab, block) < 0)
128 logf (LOG_FATAL, "partial read failed.");
131 if (block->state == IS_MBSTATE_PARTIAL)
134 dread = block->data->num;
135 while (dread < block->num_records)
137 buf->next = xmalloc_mbuf(IS_MBUF_TYPE_LARGE);
140 toread = is_mbuf_size[buf->type] / is_keysize(tab->is);
141 if (toread > block->num_records - dread)
142 toread = block->num_records - dread;
144 if (bf_read(tab->is->types[tab->pos_type].bf, block->diskpos, block->bread, toread *
145 is_keysize(tab->is), buf->data) < 0)
147 logf (LOG_FATAL, "bfread failed.");
153 block->bread += toread * is_keysize(tab->is);
155 block->state = IS_MBSTATE_CLEAN;
157 logf (LOG_DEBUG, "R: Block #%d contains %d records.", block->diskpos, block->num_records);
162 * write dirty blocks to bfile.
163 * Allocate blocks as necessary.
165 void is_p_sync(is_mtable *tab)
170 isam_blocktype *type;
172 type = &tab->is->types[tab->pos_type];
173 for (p = tab->data; p; p = p->next)
175 if (p->state < IS_MBSTATE_DIRTY)
177 /* make sure that blocks are allocated. */
179 p->diskpos = is_freestore_alloc(tab->is, tab->pos_type);
182 if (p->next->diskpos < 0)
183 p->nextpos = p->next->diskpos = is_freestore_alloc(tab->is,
186 p->nextpos = p->next->diskpos;
191 memcpy(type->dbuf, &p->num_records, sizeof(p->num_records));
192 sum += sizeof(p->num_records);
193 memcpy(type->dbuf + sum, &p->nextpos, sizeof(p->nextpos));
194 sum += sizeof(p->nextpos);
195 if (p == tab->data) /* first block */
197 memcpy(type->dbuf + sum, &tab->num_records,
198 sizeof(tab->num_records));
199 sum += sizeof(tab->num_records);
201 logf (LOG_DEBUG, "W: Block #%d contains %d records.", p->diskpos,
203 assert(p->num_records > 0);
204 for (b = p->data; b; b = b->next)
206 logf(LOG_DEBUG, " buf: offset %d, keys %d, type %d, ref %d",
207 b->offset, b->num, b->type, b->refcount);
208 if ((v = b->num * is_keysize(tab->is)) > 0)
209 memcpy(type->dbuf + sum, b->data + b->offset, v);
212 assert(sum <= type->blocksize);
214 if (bf_write(type->bf, p->diskpos, 0, sum, type->dbuf) < 0)
216 logf (LOG_FATAL, "Failed to write block.");
223 * Free all disk blocks associated with table.
225 void is_p_unmap(is_mtable *tab)
229 for (p = tab->data; p; p = p->next)
233 is_freestore_free(tab->is, tab->pos_type, p->diskpos);
239 static is_mbuf *mbuf_takehead(is_mbuf **mb, int *num, int keysize)
241 is_mbuf *p = 0, **pp = &p, *inew;
246 while (*mb && toget >= (*mb)->num)
254 if (toget > 0 && *mb)
256 inew = xmalloc_mbuf(IS_MBUF_TYPE_SMALL);
257 inew->next = (*mb)->next;
259 inew->data = (*mb)->data;
261 inew->offset = (*mb)->offset + toget * keysize;
262 inew->num = (*mb)->num - toget;
274 * Split up individual blocks which have grown too large.
275 * is_p_align and is_p_remap are alternative functions which trade off
276 * speed in updating versus optimum usage of disk blocks.
278 void is_p_align(is_mtable *tab)
280 is_mblock *mblock, *inew, *last = 0, *next;
281 is_mbuf *mbufs, *mbp;
282 int blocks, recsblock;
284 logf (LOG_DEBUG, "Realigning table.");
285 for (mblock = tab->data; mblock; mblock = next)
288 if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records == 0)
292 last->next = mblock->next;
293 last->state = IS_MBSTATE_DIRTY;
298 next = tab->data->next;
301 if (next->state < IS_MBSTATE_CLEAN)
303 if (is_p_read_full(tab, next) < 0)
305 logf(LOG_FATAL, "Error during re-alignment");
308 if (next->nextpos && !next->next)
310 next->next = xmalloc_mblock();
311 next->next->diskpos = next->nextpos;
312 next->next->state = IS_MBSTATE_UNREAD;
313 next->next->data = 0;
316 next->state = IS_MBSTATE_DIRTY; /* force re-process */
320 if (mblock->diskpos >= 0)
321 is_freestore_free(tab->is, tab->pos_type, mblock->diskpos);
322 xrelease_mblock(mblock);
324 else if (mblock->state == IS_MBSTATE_DIRTY && mblock->num_records >
325 (mblock == tab->data ?
326 tab->is->types[tab->pos_type].max_keys_block0 :
327 tab->is->types[tab->pos_type].max_keys_block))
329 blocks = tab->num_records /
330 tab->is->types[tab->pos_type].nice_keys_block;
331 if (tab->num_records %
332 tab->is->types[tab->pos_type].nice_keys_block)
334 recsblock = tab->num_records / blocks;
337 mbufs = mblock->data;
338 while ((mbp = mbuf_takehead(&mbufs, &recsblock,
339 is_keysize(tab->is))) && recsblock)
343 inew = xmalloc_mblock();
345 inew->state = IS_MBSTATE_DIRTY;
346 inew->next = mblock->next;
350 mblock->num_records = recsblock;
352 mblock = mblock->next;
362 * Reorganize data in blocks for minimum block usage and quick access.
363 * Free surplus blocks.
364 * is_p_align and is_p_remap are alternative functions which trade off
365 * speed in updating versus optimum usage of disk blocks.
367 void is_p_remap(is_mtable *tab)
369 is_mbuf *mbufs, **bufpp, *mbp;
370 is_mblock *blockp, **blockpp;
371 int recsblock, blocks;
373 logf (LOG_DEBUG, "Remapping table.");
374 /* collect all data */
376 for (blockp = tab->data; blockp; blockp = blockp->next)
378 if (blockp->state < IS_MBSTATE_CLEAN && is_m_read_full(tab, blockp) < 0)
380 logf (LOG_FATAL, "Read-full failed in remap.");
383 *bufpp = blockp->data;
385 bufpp = &(*bufpp)->next;
388 blocks = tab->num_records / tab->is->types[tab->pos_type].nice_keys_block;
389 if (tab->num_records % tab->is->types[tab->pos_type].nice_keys_block)
393 recsblock = tab->num_records / blocks + 1;
394 if (recsblock > tab->is->types[tab->pos_type].nice_keys_block)
396 blockpp = &tab->data;
397 while ((mbp = mbuf_takehead(&mbufs, &recsblock, is_keysize(tab->is))) &&
402 *blockpp = xmalloc_mblock();
403 (*blockpp)->diskpos = -1;
405 (*blockpp)->data = mbp;
406 (*blockpp)->num_records = recsblock;
407 (*blockpp)->state = IS_MBSTATE_DIRTY;
408 blockpp = &(*blockpp)->next;
414 for (blockp = *blockpp; blockp; blockp = blockp->next)
415 if (blockp->diskpos >= 0)
416 is_freestore_free(tab->is, tab->pos_type, blockp->diskpos);
417 xfree_mblocks(*blockpp);