X-Git-Url: http://git.indexdata.com/?a=blobdiff_plain;f=src%2Frelevance.c;h=933ca201dda35ce5b735e20e33f2a50d7892a284;hb=23baa3263f8e150101a6a3a483ab5d186311619a;hp=072a894a88dc8e59c0a28c8c23898b0124201ac3;hpb=556f48eff358140d9b68549bf88988133fba786d;p=pazpar2-moved-to-github.git diff --git a/src/relevance.c b/src/relevance.c index 072a894..933ca20 100644 --- a/src/relevance.c +++ b/src/relevance.c @@ -1,5 +1,5 @@ /* This file is part of Pazpar2. - Copyright (C) 2006-2009 Index Data + Copyright (C) 2006-2011 Index Data Pazpar2 is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free @@ -26,14 +26,14 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA #include #include "relevance.h" -#include "pazpar2.h" +#include "session.h" struct relevance { int *doc_frequency_vec; int vec_len; struct word_entry *entries; - pp2_charset_t pct; + pp2_charset_token_t prt; NMEM nmem; }; @@ -68,7 +68,8 @@ int word_entry_match(struct word_entry *entries, const char *norm_str) return 0; } -static struct word_entry *build_word_entries(pp2_charset_t pct, NMEM nmem, +static struct word_entry *build_word_entries(pp2_charset_token_t prt, + NMEM nmem, const char **terms) { int termno = 1; /* >0 signals THERE is an entry */ @@ -77,14 +78,11 @@ static struct word_entry *build_word_entries(pp2_charset_t pct, NMEM nmem, for (; *p; p++) { - pp2_relevance_token_t prt = pp2_relevance_tokenize(pct, *p); const char *norm_str; - while ((norm_str = pp2_relevance_token_next(prt))) + pp2_charset_token_first(prt, *p, 0); + while ((norm_str = pp2_charset_token_next(prt))) add_word_entry(nmem, &entries, norm_str, termno); - - pp2_relevance_token_destroy(prt); - termno++; } return entries; @@ -93,15 +91,15 @@ static struct word_entry *build_word_entries(pp2_charset_t pct, NMEM nmem, void relevance_countwords(struct relevance *r, struct record_cluster *cluster, const char *words, int multiplier, const char *name) { - pp2_relevance_token_t prt = pp2_relevance_tokenize(r->pct, words); int *mult = cluster->term_frequency_vec_tmp; const char *norm_str; int i, length = 0; + pp2_charset_token_first(r->prt, words, 0); for (i = 1; i < r->vec_len; i++) mult[i] = 0; - while ((norm_str = pp2_relevance_token_next(prt))) + while ((norm_str = pp2_charset_token_next(r->prt))) { int res = word_entry_match(r->entries, norm_str); if (res) @@ -114,16 +112,16 @@ void relevance_countwords(struct relevance *r, struct record_cluster *cluster, for (i = 1; i < r->vec_len; i++) { - cluster->term_frequency_vecf[i] += (double) mult[i] / length; + if (length > 0) /* only add if non-empty */ + cluster->term_frequency_vecf[i] += (double) mult[i] / length; cluster->term_frequency_vec[i] += mult[i]; } cluster->term_frequency_vec[0] += length; - pp2_relevance_token_destroy(prt); } -struct relevance *relevance_create(pp2_charset_t pct, - NMEM nmem, const char **terms) +static struct relevance *relevance_create(pp2_charset_fact_t pft, + NMEM nmem, const char **terms) { struct relevance *res = nmem_malloc(nmem, sizeof(struct relevance)); const char **p; @@ -135,11 +133,61 @@ struct relevance *relevance_create(pp2_charset_t pct, res->doc_frequency_vec = nmem_malloc(nmem, res->vec_len * sizeof(int)); memset(res->doc_frequency_vec, 0, res->vec_len * sizeof(int)); res->nmem = nmem; - res->entries = build_word_entries(pct, nmem, terms); - res->pct = pct; + res->prt = pp2_charset_token_create(pft, "relevance"); + res->entries = build_word_entries(res->prt, nmem, terms); return res; } +// Recursively traverse query structure to extract terms. +static void pull_terms(NMEM nmem, struct ccl_rpn_node *n, + char **termlist, int *num, int max_terms) +{ + char **words; + int numwords; + int i; + + switch (n->kind) + { + case CCL_RPN_AND: + case CCL_RPN_OR: + case CCL_RPN_NOT: + case CCL_RPN_PROX: + pull_terms(nmem, n->u.p[0], termlist, num, max_terms); + pull_terms(nmem, n->u.p[1], termlist, num, max_terms); + break; + case CCL_RPN_TERM: + nmem_strsplit(nmem, " ", n->u.t.term, &words, &numwords); + for (i = 0; i < numwords; i++) + { + if (*num < max_terms) + termlist[(*num)++] = words[i]; + } + break; + default: // NOOP + break; + } +} + +struct relevance *relevance_create_ccl(pp2_charset_fact_t pft, + NMEM nmem, struct ccl_rpn_node *query) +{ + char *termlist[512]; + int num = 0; + + pull_terms(nmem, query, termlist, &num, sizeof(termlist)/sizeof(*termlist)); + termlist[num] = 0; + return relevance_create(pft, nmem, (const char **) termlist); +} + +void relevance_destroy(struct relevance **rp) +{ + if (*rp) + { + pp2_charset_token_destroy((*rp)->prt); + *rp = 0; + } +} + void relevance_newrec(struct relevance *r, struct record_cluster *rec) { if (!rec->term_frequency_vec) @@ -185,7 +233,7 @@ void relevance_prepare_read(struct relevance *rel, struct reclist *reclist) int i; float *idfvec = xmalloc(rel->vec_len * sizeof(float)); - reclist_rewind(reclist); + reclist_enter(reclist); // Calculate document frequency vector for each term. for (i = 1; i < rel->vec_len; i++) { @@ -229,9 +277,9 @@ void relevance_prepare_read(struct relevance *rel, struct reclist *reclist) #endif relevance += 100000 * (termfreq * idfvec[t] + 0.0000005); } - rec->relevance = relevance; + rec->relevance_score = relevance; } - reclist_rewind(reclist); + reclist_leave(reclist); xfree(idfvec); }