-/* $Id: logic.c,v 1.44 2007-06-15 06:45:39 adam Exp $
+/* $Id: logic.c,v 1.52 2007-07-16 09:00:22 adam Exp $
Copyright (c) 2006-2007, Index Data.
This file is part of Pazpar2.
return 0;
}
}
+ else if (rec->which == Z_External_OPAC)
+ {
+ if (!sdb->yaz_marc)
+ {
+ yaz_log(YLOG_WARN, "MARC decoding not configured");
+ return 0;
+ }
+ else
+ {
+ /* OPAC gets converted to XML too */
+ WRBUF wrbuf_opac = wrbuf_alloc();
+ /* MARCXML inside the OPAC XML. Charset is in effect because we
+ use the yaz_marc handle */
+ yaz_marc_xml(sdb->yaz_marc, YAZ_MARC_MARCXML);
+ yaz_opac_decode_wrbuf(sdb->yaz_marc, rec->u.opac, wrbuf_opac);
+
+ rdoc = xmlParseMemory((char*) wrbuf_buf(wrbuf_opac),
+ wrbuf_len(wrbuf_opac));
+ if (!rdoc)
+ yaz_log(YLOG_WARN, "Unable to parse OPAC XML");
+ wrbuf_destroy(wrbuf_opac);
+ }
+ }
else if (oid && yaz_oid_is_iso2709(oid))
{
/* ISO2709 gets converted to MARCXML */
if (!sdb->yaz_marc)
{
- yaz_log(YLOG_FATAL, "Unable to handle ISO2709 record");
+ yaz_log(YLOG_WARN, "MARC decoding not configured");
return 0;
}
else
return 0;
}
- if (global_parameters.dump_records){
- fprintf(stderr,
- "Input Record (normalized) from %s\n----------------\n",
- db->url);
+ if (global_parameters.dump_records)
+ {
+ FILE *lf = yaz_log_file();
+ if (lf)
+ {
+ yaz_log(YLOG_LOG, "Normalized record from %s", db->url);
#if LIBXML_VERSION >= 20600
- xmlDocFormatDump(stderr, rdoc, 1);
+ xmlDocFormatDump(lf, rdoc, 1);
#else
- xmlDocDump(stderr, rdoc);
+ xmlDocDump(lf, rdoc);
#endif
+ fprintf(lf, "\n");
+ }
}
return rdoc;
}
(*m)->next = 0;
if (!((*m)->stylesheet = conf_load_stylesheet(stylesheets[i])))
{
- yaz_log(YLOG_FATAL, "Unable to load stylesheet: %s",
+ yaz_log(YLOG_FATAL|YLOG_ERRNO, "Unable to load stylesheet: %s",
stylesheets[i]);
return -1;
}
return 0;
}
+// called if watch should be removed because http_channel is to be destroyed
+static void session_watch_cancel(void *data, struct http_channel *c)
+{
+ struct session_watchentry *ent = data;
+
+ ent->fun = 0;
+ ent->data = 0;
+ ent->obs = 0;
+}
-void session_set_watch(struct session *s, int what,
- session_watchfun fun, void *data)
+// set watch. Returns 0=OK, -1 if watch is already set
+int session_set_watch(struct session *s, int what,
+ session_watchfun fun, void *data,
+ struct http_channel *chan)
{
+ if (s->watchlist[what].fun)
+ return -1;
s->watchlist[what].fun = fun;
s->watchlist[what].data = data;
+ s->watchlist[what].obs = http_add_observer(chan, &s->watchlist[what],
+ session_watch_cancel);
+ return 0;
}
void session_alert_watch(struct session *s, int what)
{
if (!s->watchlist[what].fun)
return;
+ http_remove_observer(s->watchlist[what].obs);
(*s->watchlist[what].fun)(s->watchlist[what].data);
s->watchlist[what].fun = 0;
s->watchlist[what].data = 0;
+ s->watchlist[what].obs = 0;
}
//callback for grep_databases
const char **addinfo)
{
int live_channels = 0;
+ int no_working = 0;
+ int no_failed = 0;
struct client *cl;
struct database_criterion *criteria;
*addinfo = client_get_database(cl)->database->url;
return PAZPAR2_CONFIG_TARGET;
}
- // Query must parse for all targets
+ // Parse query for target
if (client_parse_query(cl, query) < 0)
+ no_failed++;
+ else
{
- *addinfo = "query";
- return PAZPAR2_MALFORMED_PARAMETER_VALUE;
+ no_working++;
+ client_prep_connection(cl);
}
}
- for (cl = se->clients; cl; cl = client_next_in_session(cl))
- client_prep_connection(cl);
-
+ // If no queries could be mapped, we signal an error
+ if (no_working == 0)
+ {
+ *addinfo = "query";
+ return PAZPAR2_MALFORMED_PARAMETER_VALUE;
+ }
return PAZPAR2_NO_ERROR;
}
void session_init_databases(struct session *se)
{
se->databases = 0;
- grep_databases(se, 0, session_init_databases_fun);
+ predef_grep_databases(se, 0, session_init_databases_fun);
}
// Probably session_init_databases_fun should be refactored instead of
session->session_nmem = nmem;
session->nmem = nmem_create();
session->wrbuf = wrbuf_alloc();
- session_init_databases(session);
+ session->databases = 0;
for (i = 0; i <= SESSION_WATCH_MAX; i++)
{
session->watchlist[i].data = 0;
event_loop(&channel_list);
}
+static struct record_metadata *record_metadata_init(
+ NMEM nmem, char *value, enum conf_metadata_type type)
+{
+ struct record_metadata *rec_md = record_metadata_create(nmem);
+ if (type == Metadata_type_generic)
+ {
+ char * p = value;
+ p = normalize7bit_generic(p, " ,/.:([");
+
+ rec_md->data.text = nmem_strdup(nmem, p);
+ }
+ else if (type == Metadata_type_year)
+ {
+ int first, last;
+ if (extract7bit_years((char *) value, &first, &last) < 0)
+ return 0;
+ rec_md->data.number.min = first;
+ rec_md->data.number.max = last;
+ }
+ else
+ return 0;
+ return rec_md;
+}
+
struct record *ingest_record(struct client *cl, Z_External *rec,
int record_no)
{
struct record_metadata *rec_md = 0;
int md_field_id = -1;
int sk_field_id = -1;
- int first, last;
type = xmlGetProp(n, (xmlChar *) "type");
value = xmlNodeListGetString(xdoc, n->children, 1);
ser_sk = &service->sortkeys[sk_field_id];
}
- // Find out where we are putting it - based on merge or not
- if (ser_md->merge == Metadata_merge_no)
- wheretoput = &record->metadata[md_field_id];
- else
- wheretoput = &cluster->metadata[md_field_id];
-
- // create new record_metadata
- rec_md = record_metadata_create(se->nmem);
-
- // and polulate with data:
- // type based charmapping decisions follow here
- if (ser_md->type == Metadata_type_generic)
+ // non-merged metadata
+ rec_md = record_metadata_init(se->nmem, (char *) value,
+ ser_md->type);
+ if (!rec_md)
{
-
- char * p = (char *) value;
- p = normalize7bit_generic(p, " ,/.:([");
-
- rec_md->data.text = nmem_strdup(se->nmem, p);
-
- }
- else if (ser_md->type == Metadata_type_year)
- {
- if (extract7bit_years((char *) value, &first, &last) < 0)
- continue;
- }
- else
- {
- yaz_log(YLOG_WARN,
- "Unknown type in metadata element %s", type);
+ yaz_log(YLOG_WARN, "bad metadata data '%s' for element '%s'",
+ value, type);
continue;
}
+ rec_md->next = record->metadata[md_field_id];
+ record->metadata[md_field_id] = rec_md;
+
+ // merged metadata
+ rec_md = record_metadata_init(se->nmem, (char *) value,
+ ser_md->type);
+ wheretoput = &cluster->metadata[md_field_id];
// and polulate with data:
// assign cluster or record based on merge action
}
}
}
- else if (ser_md->merge == Metadata_merge_all
- || ser_md->merge == Metadata_merge_no)
+ else if (ser_md->merge == Metadata_merge_all)
{
rec_md->next = *wheretoput;
*wheretoput = rec_md;
if (!*wheretoput)
{
*wheretoput = rec_md;
- (*wheretoput)->data.number.min = first;
- (*wheretoput)->data.number.max = last;
if (ser_sk)
cluster->sortkeys[sk_field_id]
= &rec_md->data;
}
else
{
- if (first < (*wheretoput)->data.number.min)
- (*wheretoput)->data.number.min = first;
- if (last > (*wheretoput)->data.number.max)
- (*wheretoput)->data.number.max = last;
+ int this_min = rec_md->data.number.min;
+ int this_max = rec_md->data.number.max;
+ if (this_min < (*wheretoput)->data.number.min)
+ (*wheretoput)->data.number.min = this_min;
+ if (this_max > (*wheretoput)->data.number.max)
+ (*wheretoput)->data.number.max = this_max;
}
#ifdef GAGA
if (ser_sk)
if (ser_md->type == Metadata_type_year)
{
char year[64];
- sprintf(year, "%d", last);
+ sprintf(year, "%d", rec_md->data.number.max);
add_facet(se, (char *) type, year);
- if (first != last)
+ if (rec_md->data.number.max != rec_md->data.number.min)
{
- sprintf(year, "%d", first);
+ sprintf(year, "%d", rec_md->data.number.min);
add_facet(se, (char *) type, year);
}
}