4cbf7f2e26e8df21b6da654720e18a18a489a03b
[pazpar2-moved-to-github.git] / src / relevance.c
1 /* This file is part of Pazpar2.
2    Copyright (C) 2006-2013 Index Data
3
4 Pazpar2 is free software; you can redistribute it and/or modify it under
5 the terms of the GNU General Public License as published by the Free
6 Software Foundation; either version 2, or (at your option) any later
7 version.
8
9 Pazpar2 is distributed in the hope that it will be useful, but WITHOUT ANY
10 WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12 for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17
18 */
19
20 #if HAVE_CONFIG_H
21 #include <config.h>
22 #endif
23
24 #include <assert.h>
25 #include <math.h>
26 #include <stdlib.h>
27
28 #include "relevance.h"
29 #include "session.h"
30 #include "client.h"
31 #include "settings.h"
32
33 #ifdef WIN32
34 #define log2(x) (log(x)/log(2))
35 #endif
36
37 struct relevance
38 {
39     int *doc_frequency_vec;
40     int *term_frequency_vec_tmp;
41     int *term_pos;
42     int vec_len;
43     struct word_entry *entries;
44     pp2_charset_token_t prt;
45     int rank_cluster;
46     double follow_factor;
47     double lead_decay;
48     int length_divide;
49     NMEM nmem;
50     struct norm_client *norm;
51 };
52
53 struct word_entry {
54     const char *norm_str;
55     const char *display_str;
56     int termno;
57     char *ccl_field;
58     struct word_entry *next;
59 };
60
61 // Structure to keep data for norm_client scores from one client
62 struct norm_client
63 {
64     int num; // number of the client
65     float max;
66     float min;
67     int count;
68     const char *native_score;
69     int scorefield;
70     float a,b; // Rn = a*R + b
71     struct client *client;
72     struct norm_client *next;
73     struct norm_record *records;
74 };
75
76 const int scorefield_none = -1;  // Do not normalize anything, use tf/idf as is
77   // This is the old behavior, and the default
78 const int scorefield_internal = -2;  // use our tf/idf, but normalize it
79 const int scorefield_position = -3;  // fake a score based on the position
80
81 // A structure for each (sub)record. There is one list for each client
82 struct norm_record
83 {
84     struct record *record;
85     float score;
86     struct record_cluster *clust;
87     struct norm_record *next;
88 };
89
90 // Find the norm_client entry for this client, or create one if not there
91 struct norm_client *findnorm( struct relevance *rel, struct client* client)
92 {
93     struct norm_client *n = rel->norm;
94     struct session_database *sdb;
95     while (n) {
96         if (n->client == client )
97             return n;
98         n = n->next;
99     }
100     n = nmem_malloc(rel->nmem, sizeof(struct norm_client) );
101     if ( rel->norm )
102         n->num = rel->norm->num +1;
103     else
104         n->num = 1;
105     n->count = 0;
106     n->max = 0.0;
107     n->min = 0.0;
108     n->client = client;
109     n->next = rel->norm;
110     rel->norm = n;
111     sdb = client_get_database(client);
112     n->native_score = session_setting_oneval(sdb, PZ_NATIVE_SCORE);
113     n->records = 0;
114     n->scorefield = scorefield_none;
115     yaz_log(YLOG_LOG,"Normalizing: Client %d uses '%s'", n->num, n->native_score );
116     if ( ! n->native_score  || ! *n->native_score )  // not specified
117         n->scorefield = scorefield_none; 
118     else if ( strcmp(n->native_score,"position") == 0 )
119         n->scorefield = scorefield_position;
120     else if ( strcmp(n->native_score,"internal") == 0 )
121         n->scorefield = scorefield_internal;
122     else
123     { // Get the field index for the score
124         struct session *se = client_get_session(client);
125         n->scorefield = conf_service_metadata_field_id(se->service, n->native_score);
126     }
127     yaz_log(YLOG_LOG,"Normalizing: Client %d uses '%s' = %d",
128             n->num, n->native_score, n->scorefield );
129     return n;
130 }
131
132
133 // Add a record in the list for that client, for normalizing later
134 static void setup_norm_record( struct relevance *rel,  struct record_cluster *clust)
135 {
136     struct record *record;
137     for (record = clust->records; record; record = record->next)
138     {
139         struct norm_client *norm = findnorm(rel, record->client);
140         struct norm_record *rp;
141         if ( norm->scorefield == scorefield_none)
142             break;  // not interested in normalizing this client
143         rp = nmem_malloc(rel->nmem, sizeof(struct norm_record) );
144         norm->count ++;
145         rp->next = norm->records;
146         norm->records = rp;
147         rp->clust = clust;
148         rp->record = record;
149         if ( norm->scorefield == scorefield_position )
150             rp->score = 1.0 / record->position;
151         else if ( norm->scorefield == scorefield_internal )
152             rp->score = clust->relevance_score; // the tf/idf for the whole cluster
153               // TODO - Get them for each record, merge later!
154         else
155         {
156             struct record_metadata *md = record->metadata[norm->scorefield];
157             rp->score = md->data.fnumber;
158         }
159         yaz_log(YLOG_LOG,"Got score for %d/%d : %f ",
160                 norm->num, record->position, rp->score );
161         if ( norm->count == 1 )
162         {
163             norm->max = rp->score;
164             norm->min = rp->score;
165         } else {
166             if ( rp->score > norm->max )
167                 norm->max = rp->score;
168             if ( rp->score < norm->min && abs(rp->score) < 1e-6 )
169                 norm->min = rp->score;  // skip zeroes
170         }
171     }
172 }
173
174 // Calculate the squared sum of residuals, that is the difference from
175 // normalized values to the target curve, which is 1/n 
176 static double squaresum( struct norm_record *rp, double a, double b)
177 {
178     double sum = 0.0;
179     for ( ; rp; rp = rp->next )
180     {
181         double target = 1.0 / rp->record->position;
182         double normscore = rp->score * a + b;
183         double diff = target - normscore;
184         sum += diff * diff;
185     }
186     return sum;
187 }
188
189 // For each client, normalize scores
190 static void normalize_scores(struct relevance *rel)
191 {
192     const int maxiterations = 1000;
193     const double enough = 1000.0;  // sets the number of decimals we are happy with
194     const double stepchange = 0.5; // reduction of the step size when finding middle
195       // 0.5 sems to be magical, much better than 0.4 or 0.6
196     struct norm_client *norm;
197     for ( norm = rel->norm; norm; norm = norm->next )
198     {
199         yaz_log(YLOG_LOG,"Normalizing client %d: scorefield=%d count=%d range=%f %f",
200                 norm->num, norm->scorefield, norm->count, norm->min, norm->max);
201         norm->a = 1.0; // default normalizing factors, no change
202         norm->b = 0.0;
203         if ( norm->scorefield != scorefield_none &&
204              norm->scorefield != scorefield_position )
205         { // have something to normalize
206             double range = norm->max - norm->min;
207             int it = 0;
208             double a,b;  // params to optimize
209             double as,bs; // step sizes
210             double chi;
211             char *branch = "?";
212             // initial guesses for the parameters
213             if ( range < 1e-6 ) // practically zero
214                 range = norm->max;
215             a = 1.0 / range;
216             b = abs(norm->min);
217             as = a / 10;
218             bs = b / 10;
219             chi = squaresum( norm->records, a,b);
220             while (it++ < maxiterations)  // safeguard against things not converging
221             {
222                 double aplus = squaresum(norm->records, a+as, b);
223                 double aminus= squaresum(norm->records, a-as, b);
224                 double bplus = squaresum(norm->records, a, b+bs);
225                 double bminus= squaresum(norm->records, a, b-bs);
226                 double prevchi = chi;
227                 if ( aplus < chi && aplus < aminus && aplus < bplus && aplus < bminus)
228                 {
229                     a = a + as;
230                     chi = aplus;
231                     as = as * (1.0 + stepchange);
232                     branch = "aplus ";
233                 }
234                 else if ( aminus < chi && aminus < aplus && aminus < bplus && aminus < bminus)
235                 {
236                     a = a - as;
237                     chi = aminus;
238                     as = as * (1.0 + stepchange);
239                     branch = "aminus";
240                 }
241                 else if ( bplus < chi && bplus < aplus && bplus < aminus && bplus < bminus)
242                 {
243                     b = b + bs;
244                     chi = bplus;
245                     bs = bs * (1.0 + stepchange);
246                     branch = "bplus ";
247                 }
248                 else if ( bminus < chi && bminus < aplus && bminus < bplus && bminus < aminus)
249                 {
250                     b = b - bs;
251                     chi = bminus;
252                     branch = "bminus";
253                     bs = bs * (1.0+stepchange);
254                 }
255                 else
256                 { // a,b is the best so far, adjust one step size
257                   // which one? The one that has the greatest effect to chi
258                   // That is, the average of plus and minus is further away from chi
259                     double adif = 0.5 * ( aplus + aminus ) - prevchi;
260                     double bdif = 0.5 * ( bplus + bminus ) - prevchi;
261                     if ( fabs(adif) > fabs(bdif) )
262                     {
263                         as = as * ( 1.0 - stepchange);
264                         branch = "step a";
265                     }
266                     else
267                     {
268                         bs = bs * ( 1.0 - stepchange);
269                         branch = "step b";
270                     }
271                 }
272                 yaz_log(YLOG_LOG,"Fitting %s it=%d: a=%f %f  b=%f %f  chi=%f ap=%f am=%f, bp=%f bm=%f p=%f",
273                     branch, it, a, as, b, bs, chi,
274                     aplus, aminus, bplus, bminus, prevchi );
275                 norm->a = a;
276                 norm->b = b;
277                 if ( fabs(as) * enough < fabs(a) &&
278                      fabs(bs) * enough < fabs(b) ) {
279                     break;  // not changing much any more
280
281                 }
282             }
283             yaz_log(YLOG_LOG,"Fitting done: it=%d: a=%f / %f  b=%f / %f  chi = %f",
284                         it-1, a, as, b, bs, chi );
285             yaz_log(YLOG_LOG,"  a: %f < %f %d",
286                     fabs(as)*enough, fabs(a), (fabs(as) * enough < fabs(a)) );
287             yaz_log(YLOG_LOG,"  b: %f < %f %d",
288                     fabs(bs)*enough, fabs(b), (fabs(bs) * enough < fabs(b)) );
289         }
290
291         if ( norm->scorefield != scorefield_none )
292         { // distribute the normalized scores to the records
293             struct norm_record *nr = norm->records;
294             for ( ; nr ; nr = nr->next ) {
295                 double r = nr->score;
296                 r = norm->a * r + norm -> b;
297                 nr->clust->relevance_score = 10000 * r;
298                 yaz_log(YLOG_LOG,"Normalized %f * %f + %f = %f",
299                         nr->score, norm->a, norm->b, r );
300                 // TODO - This keeps overwriting the cluster score in random order!
301                 // Need to merge results better
302             }
303
304         }
305
306     } // client loop
307 }
308
309
310 static struct word_entry *word_entry_match(struct relevance *r,
311                                            const char *norm_str,
312                                            const char *rank, int *weight)
313 {
314     int i = 1;
315     struct word_entry *entries = r->entries;
316     for (; entries; entries = entries->next, i++)
317     {
318         if (*norm_str && !strcmp(norm_str, entries->norm_str))
319         {
320             const char *cp = 0;
321             int no_read = 0;
322             sscanf(rank, "%d%n", weight, &no_read);
323             rank += no_read;
324             while (*rank == ' ')
325                 rank++;
326             if (no_read > 0 && (cp = strchr(rank, ' ')))
327             {
328                 if ((cp - rank) == strlen(entries->ccl_field) &&
329                     memcmp(entries->ccl_field, rank, cp - rank) == 0)
330                     *weight = atoi(cp + 1);
331             }
332             return entries;
333         }
334     }
335     return 0;
336 }
337
338 int relevance_snippet(struct relevance *r,
339                       const char *words, const char *name,
340                       WRBUF w_snippet)
341 {
342     int no = 0;
343     const char *norm_str;
344     int highlight = 0;
345
346     pp2_charset_token_first(r->prt, words, 0);
347     while ((norm_str = pp2_charset_token_next(r->prt)))
348     {
349         size_t org_start, org_len;
350         struct word_entry *entries = r->entries;
351         int i;
352
353         pp2_get_org(r->prt, &org_start, &org_len);
354         for (; entries; entries = entries->next, i++)
355         {
356             if (*norm_str && !strcmp(norm_str, entries->norm_str))
357                 break;
358         }
359         if (entries)
360         {
361             if (!highlight)
362             {
363                 highlight = 1;
364                 wrbuf_puts(w_snippet, "<match>");
365                 no++;
366             }
367         }
368         else
369         {
370             if (highlight)
371             {
372                 highlight = 0;
373                 wrbuf_puts(w_snippet, "</match>");
374             }
375         }
376         wrbuf_xmlputs_n(w_snippet, words + org_start, org_len);
377     }
378     if (highlight)
379         wrbuf_puts(w_snippet, "</match>");
380     if (no)
381     {
382         yaz_log(YLOG_DEBUG, "SNIPPET match: %s", wrbuf_cstr(w_snippet));
383     }
384     return no;
385 }
386
387 void relevance_countwords(struct relevance *r, struct record_cluster *cluster,
388                           const char *words, const char *rank,
389                           const char *name)
390 {
391     int *w = r->term_frequency_vec_tmp;
392     const char *norm_str;
393     int i, length = 0;
394     double lead_decay = r->lead_decay;
395     struct word_entry *e;
396     WRBUF wr = cluster->relevance_explain1;
397     int printed_about_field = 0;
398
399     pp2_charset_token_first(r->prt, words, 0);
400     for (e = r->entries, i = 1; i < r->vec_len; i++, e = e->next)
401     {
402         w[i] = 0;
403         r->term_pos[i] = 0;
404     }
405
406     assert(rank);
407     while ((norm_str = pp2_charset_token_next(r->prt)))
408     {
409         int local_weight = 0;
410         e = word_entry_match(r, norm_str, rank, &local_weight);
411         if (e)
412         {
413             int res = e->termno;
414             int j;
415
416             if (!printed_about_field)
417             {
418                 printed_about_field = 1;
419                 wrbuf_printf(wr, "field=%s content=", name);
420                 if (strlen(words) > 50)
421                 {
422                     wrbuf_xmlputs_n(wr, words, 49);
423                     wrbuf_puts(wr, " ...");
424                 }
425                 else
426                     wrbuf_xmlputs(wr, words);
427                 wrbuf_puts(wr, ";\n");
428             }
429             assert(res < r->vec_len);
430             w[res] += local_weight / (1 + log2(1 + lead_decay * length));
431             wrbuf_printf(wr, "%s: w[%d] += w(%d) / "
432                          "(1+log2(1+lead_decay(%f) * length(%d)));\n",
433                          e->display_str, res, local_weight, lead_decay, length);
434             j = res - 1;
435             if (j > 0 && r->term_pos[j])
436             {
437                 int d = length + 1 - r->term_pos[j];
438                 wrbuf_printf(wr, "%s: w[%d] += w[%d](%d) * follow(%f) / "
439                              "(1+log2(d(%d));\n",
440                              e->display_str, res, res, w[res],
441                              r->follow_factor, d);
442                 w[res] += w[res] * r->follow_factor / (1 + log2(d));
443             }
444             for (j = 0; j < r->vec_len; j++)
445                 r->term_pos[j] = j < res ? 0 : length + 1;
446         }
447         length++;
448     }
449
450     for (e = r->entries, i = 1; i < r->vec_len; i++, e = e->next)
451     {
452         if (length == 0 || w[i] == 0)
453             continue;
454         wrbuf_printf(wr, "%s: tf[%d] += w[%d](%d)", e->display_str, i, i, w[i]);
455         switch (r->length_divide)
456         {
457         case 0:
458             cluster->term_frequency_vecf[i] += (double) w[i];
459             break;
460         case 1:
461             wrbuf_printf(wr, " / log2(1+length(%d))", length);
462             cluster->term_frequency_vecf[i] +=
463                 (double) w[i] / log2(1 + length);
464             break;
465         case 2:
466             wrbuf_printf(wr, " / length(%d)", length);
467             cluster->term_frequency_vecf[i] += (double) w[i] / length;
468         }
469         cluster->term_frequency_vec[i] += w[i];
470         wrbuf_printf(wr, " (%f);\n", cluster->term_frequency_vecf[i]);
471     }
472
473     cluster->term_frequency_vec[0] += length;
474 }
475
476 static void pull_terms(struct relevance *res, struct ccl_rpn_node *n)
477 {
478     char **words;
479     int numwords;
480     char *ccl_field;
481     int i;
482
483     switch (n->kind)
484     {
485     case CCL_RPN_AND:
486     case CCL_RPN_OR:
487     case CCL_RPN_NOT:
488     case CCL_RPN_PROX:
489         pull_terms(res, n->u.p[0]);
490         pull_terms(res, n->u.p[1]);
491         break;
492     case CCL_RPN_TERM:
493         nmem_strsplit(res->nmem, " ", n->u.t.term, &words, &numwords);
494         for (i = 0; i < numwords; i++)
495         {
496             const char *norm_str;
497
498             ccl_field = nmem_strdup_null(res->nmem, n->u.t.qual);
499
500             pp2_charset_token_first(res->prt, words[i], 0);
501             while ((norm_str = pp2_charset_token_next(res->prt)))
502             {
503                 struct word_entry **e = &res->entries;
504                 while (*e)
505                     e = &(*e)->next;
506                 *e = nmem_malloc(res->nmem, sizeof(**e));
507                 (*e)->norm_str = nmem_strdup(res->nmem, norm_str);
508                 (*e)->ccl_field = ccl_field;
509                 (*e)->termno = res->vec_len++;
510                 (*e)->display_str = nmem_strdup(res->nmem, words[i]);
511                 (*e)->next = 0;
512             }
513         }
514         break;
515     default:
516         break;
517     }
518 }
519 void relevance_clear(struct relevance *r)
520 {
521     if (r)
522     {
523         int i;
524         for (i = 0; i < r->vec_len; i++)
525             r->doc_frequency_vec[i] = 0;
526     }
527 }
528
529 struct relevance *relevance_create_ccl(pp2_charset_fact_t pft,
530                                        struct ccl_rpn_node *query,
531                                        int rank_cluster,
532                                        double follow_factor, double lead_decay,
533                                        int length_divide)
534 {
535     NMEM nmem = nmem_create();
536     struct relevance *res = nmem_malloc(nmem, sizeof(*res));
537
538     res->nmem = nmem;
539     res->entries = 0;
540     res->vec_len = 1;
541     res->rank_cluster = rank_cluster;
542     res->follow_factor = follow_factor;
543     res->lead_decay = lead_decay;
544     res->length_divide = length_divide;
545     res->norm = 0;
546     res->prt = pp2_charset_token_create(pft, "relevance");
547
548     pull_terms(res, query);
549
550     res->doc_frequency_vec = nmem_malloc(nmem, res->vec_len * sizeof(int));
551
552     // worker array
553     res->term_frequency_vec_tmp =
554         nmem_malloc(res->nmem,
555                     res->vec_len * sizeof(*res->term_frequency_vec_tmp));
556
557     res->term_pos =
558         nmem_malloc(res->nmem, res->vec_len * sizeof(*res->term_pos));
559
560     relevance_clear(res);
561     return res;
562 }
563
564 void relevance_destroy(struct relevance **rp)
565 {
566     if (*rp)
567     {
568         pp2_charset_token_destroy((*rp)->prt);
569         nmem_destroy((*rp)->nmem);
570         *rp = 0;
571     }
572 }
573
574 void relevance_mergerec(struct relevance *r, struct record_cluster *dst,
575                         const struct record_cluster *src)
576 {
577     int i;
578
579     for (i = 0; i < r->vec_len; i++)
580         dst->term_frequency_vec[i] += src->term_frequency_vec[i];
581
582     for (i = 0; i < r->vec_len; i++)
583         dst->term_frequency_vecf[i] += src->term_frequency_vecf[i];
584 }
585
586 void relevance_newrec(struct relevance *r, struct record_cluster *rec)
587 {
588     int i;
589
590     // term frequency [1,..] . [0] is total length of all fields
591     rec->term_frequency_vec =
592         nmem_malloc(r->nmem,
593                     r->vec_len * sizeof(*rec->term_frequency_vec));
594     for (i = 0; i < r->vec_len; i++)
595         rec->term_frequency_vec[i] = 0;
596
597     // term frequency divided by length of field [1,...]
598     rec->term_frequency_vecf =
599         nmem_malloc(r->nmem,
600                     r->vec_len * sizeof(*rec->term_frequency_vecf));
601     for (i = 0; i < r->vec_len; i++)
602         rec->term_frequency_vecf[i] = 0.0;
603 }
604
605 void relevance_donerecord(struct relevance *r, struct record_cluster *cluster)
606 {
607     int i;
608
609     for (i = 1; i < r->vec_len; i++)
610         if (cluster->term_frequency_vec[i] > 0)
611             r->doc_frequency_vec[i]++;
612
613     r->doc_frequency_vec[0]++;
614 }
615
616
617
618 // Prepare for a relevance-sorted read
619 void relevance_prepare_read(struct relevance *rel, struct reclist *reclist)
620 {
621     int i;
622     float *idfvec = xmalloc(rel->vec_len * sizeof(float));
623
624     reclist_enter(reclist);
625
626     // Calculate document frequency vector for each term.
627     for (i = 1; i < rel->vec_len; i++)
628     {
629         if (!rel->doc_frequency_vec[i])
630             idfvec[i] = 0;
631         else
632         {
633             /* add one to nominator idf(t,D) to ensure a value > 0 */
634             idfvec[i] = log((float) (1 + rel->doc_frequency_vec[0]) /
635                             rel->doc_frequency_vec[i]);
636         }
637     }
638     // Calculate relevance for each document
639     while (1)
640     {
641         int relevance = 0;
642         WRBUF w;
643         struct word_entry *e = rel->entries;
644         struct record_cluster *rec = reclist_read_record(reclist);
645         if (!rec)
646             break;
647         w = rec->relevance_explain2;
648         wrbuf_rewind(w);
649         wrbuf_puts(w, "relevance = 0;\n");
650         for (i = 1; i < rel->vec_len; i++)
651         {
652             float termfreq = (float) rec->term_frequency_vecf[i];
653             int add = 100000 * termfreq * idfvec[i];
654
655             wrbuf_printf(w, "idf[%d] = log(((1 + total(%d))/termoccur(%d));\n",
656                          i, rel->doc_frequency_vec[0],
657                          rel->doc_frequency_vec[i]);
658             wrbuf_printf(w, "%s: relevance += 100000 * tf[%d](%f) * "
659                          "idf[%d](%f) (%d);\n",
660                          e->display_str, i, termfreq, i, idfvec[i], add);
661             relevance += add;
662             e = e->next;
663         }
664         if (!rel->rank_cluster)
665         {
666             struct record *record;
667             int cluster_size = 0;
668
669             for (record = rec->records; record; record = record->next)
670                 cluster_size++;
671
672             wrbuf_printf(w, "score = relevance(%d)/cluster_size(%d);\n",
673                          relevance, cluster_size);
674             relevance /= cluster_size;
675         }
676         else
677         {
678             wrbuf_printf(w, "score = relevance(%d);\n", relevance);
679         }
680         rec->relevance_score = relevance;
681
682         // Build the normalizing structures
683         // List of (sub)records for each target
684         setup_norm_record( rel, rec );
685         
686         // TODO - Loop again, merge individual record scores into clusters
687         // Can I reset the reclist, or can I leave and enter without race conditions?
688         
689     } // cluster loop
690
691     normalize_scores(rel);
692     
693     reclist_leave(reclist);
694     xfree(idfvec);
695
696 }
697
698 /*
699  * Local variables:
700  * c-basic-offset: 4
701  * c-file-style: "Stroustrup"
702  * indent-tabs-mode: nil
703  * End:
704  * vim: shiftwidth=4 tabstop=8 expandtab
705  */
706