* Sebastian Hammer, Adam Dickmeiss
*
* $Log: cfile.c,v $
- * Revision 1.10 1996-02-07 14:03:46 adam
+ * Revision 1.14 1996-04-12 07:01:55 adam
+ * Yet another bug fix (next_block was initialized to 0; now set to 1).
+ *
+ * Revision 1.13 1996/04/09 14:48:49 adam
+ * Bug fix: offset calculation when using flat files was completely broken.
+ *
+ * Revision 1.12 1996/04/09 06:47:28 adam
+ * Function scan_areadef doesn't use sscanf (%n fails on this Linux).
+ *
+ * Revision 1.11 1996/03/26 15:59:05 adam
+ * The directory of the shadow table file can be specified by the new
+ * bf_lockDir call.
+ *
+ * Revision 1.10 1996/02/07 14:03:46 adam
* Work on flat indexed shadow files.
*
* Revision 1.9 1996/02/07 10:08:43 adam
int hash_bytes;
cf->rmf = mf;
+ logf (LOG_LOG, "cf_open %s", cf->rmf->name);
sprintf (path, "%s-b", fname);
if (!(cf->block_mf = mf_open (area, path, block_size, wflag)))
{
static int cf_lookup_flat (CFile cf, int no)
{
int hno = (no*sizeof(int))/HASH_BSIZE;
- int off = (no*sizeof(int)) - hno*sizeof(HASH_BSIZE);
+ int off = (no*sizeof(int)) - hno*HASH_BSIZE;
int vno = 0;
mf_read (cf->hash_mf, hno+cf->head.next_bucket, off, sizeof(int), &vno);
static void cf_write_flat (CFile cf, int no, int vno)
{
int hno = (no*sizeof(int))/HASH_BSIZE;
- int off = (no*sizeof(int)) - hno*sizeof(HASH_BSIZE);
+ int off = (no*sizeof(int)) - hno*HASH_BSIZE;
hno += cf->head.next_bucket;
if (hno >= cf->head.flat_bucket)
cf->head.flat_bucket = hno+1;
+ cf->dirty = 1;
mf_write (cf->hash_mf, hno, off, sizeof(int), &vno);
}
struct CFile_hash_bucket *p;
int i, j;
- logf (LOG_LOG, "Moving to flat shadow.");
+ logf (LOG_LOG, "Moving to flat shadow: %s", cf->rmf->name);
logf (LOG_LOG, "hits=%d miss=%d bucket_in_memory=%d total=%d",
cf->no_hits, cf->no_miss, cf->bucket_in_memory,
cf->head.next_bucket - cf->head.first_bucket);
int cf_close (CFile cf)
{
- logf (LOG_LOG, "cf_close");
+ logf (LOG_LOG, "cf_close %s", cf->rmf->name);
logf (LOG_LOG, "hits=%d miss=%d bucket_in_memory=%d total=%d",
cf->no_hits, cf->no_miss, cf->bucket_in_memory,
cf->head.next_bucket - cf->head.first_bucket);