+ p->head.total_bytes -= entry.size;
+}
+
+static void rec_delete_single(Records p, Record rec)
+{
+ struct record_index_entry entry;
+
+ rec_release_blocks(p, rec_sysno_to_int(rec->sysno));
+
+ entry.next = p->head.index_free;
+ entry.size = 0;
+ p->head.index_free = rec_sysno_to_int(rec->sysno);
+ write_indx(p, rec_sysno_to_int(rec->sysno), &entry, sizeof(entry));
+}
+
+static void rec_write_tmp_buf(Records p, int size, SYSNO *sysnos)
+{
+ struct record_index_entry entry;
+ int no_written = 0;
+ char *cptr = p->tmp_buf;
+ zint block_prev = -1, block_free;
+ int dst_type = 0;
+ int i;
+
+ for (i = 1; i<REC_BLOCK_TYPES; i++)
+ if (size >= p->head.block_move[i])
+ dst_type = i;
+ while (no_written < size)
+ {
+ block_free = p->head.block_free[dst_type];
+ if (block_free)
+ {
+ if (bf_read(p->data_BFile[dst_type],
+ block_free, 0, sizeof(*p->head.block_free),
+ &p->head.block_free[dst_type]) != 1)
+ {
+ yaz_log(YLOG_FATAL|YLOG_ERRNO, "read in %s at free block "
+ ZINT_FORMAT,
+ p->data_fname[dst_type], block_free);
+ exit(1);
+ }
+ }
+ else
+ block_free = p->head.block_last[dst_type]++;
+ if (block_prev == -1)
+ {
+ entry.next = block_free*8 + dst_type;
+ entry.size = size;
+ p->head.total_bytes += size;
+ while (*sysnos > 0)
+ {
+ write_indx(p, *sysnos, &entry, sizeof(entry));
+ sysnos++;
+ }
+ }
+ else
+ {
+ memcpy(cptr, &block_free, sizeof(block_free));
+ bf_write(p->data_BFile[dst_type], block_prev, 0, 0, cptr);
+ cptr = p->tmp_buf + no_written;
+ }
+ block_prev = block_free;
+ no_written += (int)(p->head.block_size[dst_type]) - sizeof(zint);
+ p->head.block_used[dst_type]++;
+ }
+ assert(block_prev != -1);
+ block_free = 0;
+ memcpy(cptr, &block_free, sizeof(block_free));
+ bf_write(p->data_BFile[dst_type], block_prev, 0,
+ sizeof(block_free) + (p->tmp_buf+size) - cptr, cptr);
+}
+
+Records rec_open(BFiles bfs, int rw, int compression_method)
+{
+ Records p;
+ int i, r;
+ int version;
+
+ p = (Records) xmalloc(sizeof(*p));
+ p->compression_method = compression_method;