X-Git-Url: http://git.indexdata.com/?a=blobdiff_plain;f=src%2Frelevance.c;h=08527aec06810451acf631fd1210ed81c791833d;hb=f4b58b73e53b5f3727530fed355ca7c7dc046c22;hp=7603a7a928c2422941e9f5340fe89b55d266cf51;hpb=beda9709478c02182ceadb5f0526c32d8986c039;p=pazpar2-moved-to-github.git diff --git a/src/relevance.c b/src/relevance.c index 7603a7a..08527ae 100644 --- a/src/relevance.c +++ b/src/relevance.c @@ -1,5 +1,5 @@ /* This file is part of Pazpar2. - Copyright (C) 2006-2009 Index Data + Copyright (C) 2006-2013 Index Data Pazpar2 is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -21,119 +21,325 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #include #endif +#include #include #include #include "relevance.h" -#include "pazpar2.h" +#include "session.h" + +#ifdef WIN32 +#define log2(x) (log(x)/log(2)) +#endif struct relevance { int *doc_frequency_vec; + int *term_frequency_vec_tmp; + int *term_pos; int vec_len; struct word_entry *entries; - pp2_charset_t pct; + pp2_charset_token_t prt; + int rank_cluster; + double follow_factor; + double lead_decay; + int length_divide; NMEM nmem; }; - struct word_entry { const char *norm_str; + const char *display_str; int termno; + char *ccl_field; struct word_entry *next; }; -static void add_word_entry(NMEM nmem, - struct word_entry **entries, - const char *norm_str, - int term_no) +static struct word_entry *word_entry_match(struct relevance *r, + const char *norm_str, + const char *rank, int *weight) { - struct word_entry *ne = nmem_malloc(nmem, sizeof(*ne)); - ne->norm_str = nmem_strdup(nmem, norm_str); - ne->termno = term_no; - - ne->next = *entries; - *entries = ne; + int i = 1; + struct word_entry *entries = r->entries; + for (; entries; entries = entries->next, i++) + { + if (*norm_str && !strcmp(norm_str, entries->norm_str)) + { + const char *cp = 0; + int no_read = 0; + sscanf(rank, "%d%n", weight, &no_read); + rank += no_read; + while (*rank == ' ') + rank++; + if (no_read > 0 && (cp = strchr(rank, ' '))) + { + if ((cp - rank) == strlen(entries->ccl_field) && + memcmp(entries->ccl_field, rank, cp - rank) == 0) + *weight = atoi(cp + 1); + } + return entries; + } + } + return 0; } - -int word_entry_match(struct word_entry *entries, const char *norm_str) +int relevance_snippet(struct relevance *r, + const char *words, const char *name, + WRBUF w_snippet) { - for (; entries; entries = entries->next) + int no = 0; + const char *norm_str; + int highlight = 0; + + pp2_charset_token_first(r->prt, words, 0); + while ((norm_str = pp2_charset_token_next(r->prt))) { - if (!strcmp(norm_str, entries->norm_str)) - return entries->termno; + size_t org_start, org_len; + struct word_entry *entries = r->entries; + int i; + + pp2_get_org(r->prt, &org_start, &org_len); + for (; entries; entries = entries->next, i++) + { + if (*norm_str && !strcmp(norm_str, entries->norm_str)) + break; + } + if (entries) + { + if (!highlight) + { + highlight = 1; + wrbuf_puts(w_snippet, ""); + no++; + } + } + else + { + if (highlight) + { + highlight = 0; + wrbuf_puts(w_snippet, ""); + } + } + wrbuf_xmlputs_n(w_snippet, words + org_start, org_len); } - return 0; + if (highlight) + wrbuf_puts(w_snippet, ""); + if (no) + { + yaz_log(YLOG_DEBUG, "SNIPPET match: %s", wrbuf_cstr(w_snippet)); + } + return no; } -static struct word_entry *build_word_entries(pp2_charset_t pct, NMEM nmem, - const char **terms) +void relevance_countwords(struct relevance *r, struct record_cluster *cluster, + const char *words, const char *rank, + const char *name) { - int termno = 1; /* >0 signals THERE is an entry */ - struct word_entry *entries = 0; - const char **p = terms; + int *w = r->term_frequency_vec_tmp; + const char *norm_str; + int i, length = 0; + double lead_decay = r->lead_decay; + struct word_entry *e; + WRBUF wr = cluster->relevance_explain1; + int printed_about_field = 0; - for (; *p; p++) + pp2_charset_token_first(r->prt, words, 0); + for (e = r->entries, i = 1; i < r->vec_len; i++, e = e->next) { - pp2_relevance_token_t prt = pp2_relevance_tokenize(pct, *p); - const char *norm_str; + w[i] = 0; + r->term_pos[i] = 0; + } - while ((norm_str = pp2_relevance_token_next(prt))) - add_word_entry(nmem, &entries, norm_str, termno); + assert(rank); + while ((norm_str = pp2_charset_token_next(r->prt))) + { + int local_weight = 0; + e = word_entry_match(r, norm_str, rank, &local_weight); + if (e) + { + int res = e->termno; + int j; - pp2_relevance_token_destroy(prt); + if (!printed_about_field) + { + printed_about_field = 1; + wrbuf_printf(wr, "field=%s content=", name); + if (strlen(words) > 50) + { + wrbuf_xmlputs_n(wr, words, 49); + wrbuf_puts(wr, " ..."); + } + else + wrbuf_xmlputs(wr, words); + wrbuf_puts(wr, ";\n"); + } + assert(res < r->vec_len); + w[res] += local_weight / (1 + log2(1 + lead_decay * length)); + wrbuf_printf(wr, "%s: w[%d] += w(%d) / " + "(1+log2(1+lead_decay(%f) * length(%d)));\n", + e->display_str, res, local_weight, lead_decay, length); + j = res - 1; + if (j > 0 && r->term_pos[j]) + { + int d = length + 1 - r->term_pos[j]; + wrbuf_printf(wr, "%s: w[%d] += w[%d](%d) * follow(%f) / " + "(1+log2(d(%d));\n", + e->display_str, res, res, w[res], + r->follow_factor, d); + w[res] += w[res] * r->follow_factor / (1 + log2(d)); + } + for (j = 0; j < r->vec_len; j++) + r->term_pos[j] = j < res ? 0 : length + 1; + } + length++; + } - termno++; + for (e = r->entries, i = 1; i < r->vec_len; i++, e = e->next) + { + if (length == 0 || w[i] == 0) + continue; + wrbuf_printf(wr, "%s: tf[%d] += w[%d](%d)", e->display_str, i, i, w[i]); + switch (r->length_divide) + { + case 0: + cluster->term_frequency_vecf[i] += (double) w[i]; + break; + case 1: + wrbuf_printf(wr, " / log2(1+length(%d))", length); + cluster->term_frequency_vecf[i] += + (double) w[i] / log2(1 + length); + break; + case 2: + wrbuf_printf(wr, " / length(%d)", length); + cluster->term_frequency_vecf[i] += (double) w[i] / length; + } + cluster->term_frequency_vec[i] += w[i]; + wrbuf_printf(wr, " (%f);\n", cluster->term_frequency_vecf[i]); } - return entries; + + cluster->term_frequency_vec[0] += length; } -void relevance_countwords(struct relevance *r, struct record_cluster *cluster, - const char *words, int multiplier) +static void pull_terms(struct relevance *res, struct ccl_rpn_node *n) { - pp2_relevance_token_t prt = pp2_relevance_tokenize(r->pct, words); - - const char *norm_str; - - while ((norm_str = pp2_relevance_token_next(prt))) + char **words; + int numwords; + char *ccl_field; + int i; + + switch (n->kind) { - int res = word_entry_match(r->entries, norm_str); - if (res) - cluster->term_frequency_vec[res] += multiplier; - cluster->term_frequency_vec[0]++; + case CCL_RPN_AND: + case CCL_RPN_OR: + case CCL_RPN_NOT: + case CCL_RPN_PROX: + pull_terms(res, n->u.p[0]); + pull_terms(res, n->u.p[1]); + break; + case CCL_RPN_TERM: + nmem_strsplit(res->nmem, " ", n->u.t.term, &words, &numwords); + for (i = 0; i < numwords; i++) + { + const char *norm_str; + + ccl_field = nmem_strdup_null(res->nmem, n->u.t.qual); + + pp2_charset_token_first(res->prt, words[i], 0); + while ((norm_str = pp2_charset_token_next(res->prt))) + { + struct word_entry **e = &res->entries; + while (*e) + e = &(*e)->next; + *e = nmem_malloc(res->nmem, sizeof(**e)); + (*e)->norm_str = nmem_strdup(res->nmem, norm_str); + (*e)->ccl_field = ccl_field; + (*e)->termno = res->vec_len++; + (*e)->display_str = nmem_strdup(res->nmem, words[i]); + (*e)->next = 0; + } + } + break; + default: + break; + } +} +void relevance_clear(struct relevance *r) +{ + if (r) + { + int i; + for (i = 0; i < r->vec_len; i++) + r->doc_frequency_vec[i] = 0; } - pp2_relevance_token_destroy(prt); } -struct relevance *relevance_create(pp2_charset_t pct, - NMEM nmem, const char **terms) +struct relevance *relevance_create_ccl(pp2_charset_fact_t pft, + struct ccl_rpn_node *query, + int rank_cluster, + double follow_factor, double lead_decay, + int length_divide) { - struct relevance *res = nmem_malloc(nmem, sizeof(struct relevance)); - const char **p; - int i; + NMEM nmem = nmem_create(); + struct relevance *res = nmem_malloc(nmem, sizeof(*res)); - for (p = terms, i = 0; *p; p++, i++) - ; - res->vec_len = ++i; - res->doc_frequency_vec = nmem_malloc(nmem, res->vec_len * sizeof(int)); - memset(res->doc_frequency_vec, 0, res->vec_len * sizeof(int)); res->nmem = nmem; - res->entries = build_word_entries(pct, nmem, terms); - res->pct = pct; + res->entries = 0; + res->vec_len = 1; + res->rank_cluster = rank_cluster; + res->follow_factor = follow_factor; + res->lead_decay = lead_decay; + res->length_divide = length_divide; + res->prt = pp2_charset_token_create(pft, "relevance"); + + pull_terms(res, query); + + res->doc_frequency_vec = nmem_malloc(nmem, res->vec_len * sizeof(int)); + + // worker array + res->term_frequency_vec_tmp = + nmem_malloc(res->nmem, + res->vec_len * sizeof(*res->term_frequency_vec_tmp)); + + res->term_pos = + nmem_malloc(res->nmem, res->vec_len * sizeof(*res->term_pos)); + + relevance_clear(res); return res; } +void relevance_destroy(struct relevance **rp) +{ + if (*rp) + { + pp2_charset_token_destroy((*rp)->prt); + nmem_destroy((*rp)->nmem); + *rp = 0; + } +} + void relevance_newrec(struct relevance *r, struct record_cluster *rec) { if (!rec->term_frequency_vec) { - rec->term_frequency_vec = nmem_malloc(r->nmem, r->vec_len * sizeof(int)); - memset(rec->term_frequency_vec, 0, r->vec_len * sizeof(int)); + int i; + + // term frequency [1,..] . [0] is total length of all fields + rec->term_frequency_vec = + nmem_malloc(r->nmem, + r->vec_len * sizeof(*rec->term_frequency_vec)); + for (i = 0; i < r->vec_len; i++) + rec->term_frequency_vec[i] = 0; + + // term frequency divided by length of field [1,...] + rec->term_frequency_vecf = + nmem_malloc(r->nmem, + r->vec_len * sizeof(*rec->term_frequency_vecf)); + for (i = 0; i < r->vec_len; i++) + rec->term_frequency_vecf[i] = 0.0; } } - void relevance_donerecord(struct relevance *r, struct record_cluster *cluster) { int i; @@ -151,7 +357,7 @@ void relevance_prepare_read(struct relevance *rel, struct reclist *reclist) int i; float *idfvec = xmalloc(rel->vec_len * sizeof(float)); - reclist_rewind(reclist); + reclist_enter(reclist); // Calculate document frequency vector for each term. for (i = 1; i < rel->vec_len; i++) { @@ -159,37 +365,56 @@ void relevance_prepare_read(struct relevance *rel, struct reclist *reclist) idfvec[i] = 0; else { - // This conditional may be terribly wrong - // It was there to address the situation where vec[0] == vec[i] - // which leads to idfvec[i] == 0... not sure about this - // Traditional TF-IDF may assume that a word that occurs in every - // record is irrelevant, but this is actually something we will - // see a lot - if ((idfvec[i] = log((float) rel->doc_frequency_vec[0] / - rel->doc_frequency_vec[i])) < 0.0000001) - idfvec[i] = 1; + /* add one to nominator idf(t,D) to ensure a value > 0 */ + idfvec[i] = log((float) (1 + rel->doc_frequency_vec[0]) / + rel->doc_frequency_vec[i]); } } // Calculate relevance for each document - while (1) { - int t; int relevance = 0; + WRBUF w; + struct word_entry *e = rel->entries; struct record_cluster *rec = reclist_read_record(reclist); if (!rec) break; - for (t = 1; t < rel->vec_len; t++) + w = rec->relevance_explain2; + wrbuf_rewind(w); + wrbuf_puts(w, "relevance = 0;\n"); + for (i = 1; i < rel->vec_len; i++) { - float termfreq; - if (!rec->term_frequency_vec[0]) - break; - termfreq = (float) rec->term_frequency_vec[t] / rec->term_frequency_vec[0]; - relevance += 100000 * (termfreq * idfvec[t] + 0.0000005); + float termfreq = (float) rec->term_frequency_vecf[i]; + int add = 100000 * termfreq * idfvec[i]; + + wrbuf_printf(w, "idf[%d] = log(((1 + total(%d))/termoccur(%d));\n", + i, rel->doc_frequency_vec[0], + rel->doc_frequency_vec[i]); + wrbuf_printf(w, "%s: relevance += 100000 * tf[%d](%f) * " + "idf[%d](%f) (%d);\n", + e->display_str, i, termfreq, i, idfvec[i], add); + relevance += add; + e = e->next; + } + if (!rel->rank_cluster) + { + struct record *record; + int cluster_size = 0; + + for (record = rec->records; record; record = record->next) + cluster_size++; + + wrbuf_printf(w, "score = relevance(%d)/cluster_size(%d);\n", + relevance, cluster_size); + relevance /= cluster_size; + } + else + { + wrbuf_printf(w, "score = relevance(%d);\n", relevance); } - rec->relevance = relevance; + rec->relevance_score = relevance; } - reclist_rewind(reclist); + reclist_leave(reclist); xfree(idfvec); }