]> source.dussan.org Git - rspamd.git/commitdiff
[CritFix] Fix processing of learned tokens count for redis backend
authorVsevolod Stakhov <vsevolod@highsecure.ru>
Tue, 11 Oct 2016 15:43:34 +0000 (16:43 +0100)
committerVsevolod Stakhov <vsevolod@highsecure.ru>
Tue, 11 Oct 2016 15:43:34 +0000 (16:43 +0100)
src/libstat/backends/redis_backend.c
src/libstat/stat_process.c

index 4ff9b08b106def4c1d10e0cb972e9b49c89860e6..72b3a349152ac01de59c984db6585a674e3bd4cf 100644 (file)
@@ -749,7 +749,7 @@ rspamd_redis_connected (redisAsyncContext *c, gpointer r, gpointer priv)
 
        if (c->err == 0) {
                if (r != NULL) {
-                       if (G_LIKELY (reply->type == REDIS_REPLY_INTEGER)) {
+                       if (G_UNLIKELY (reply->type == REDIS_REPLY_INTEGER)) {
                                val = reply->integer;
                        }
                        else if (reply->type == REDIS_REPLY_STRING) {
@@ -807,7 +807,7 @@ rspamd_redis_processed (redisAsyncContext *c, gpointer r, gpointer priv)
                                                tok = g_ptr_array_index (task->tokens, i);
                                                elt = reply->element[i];
 
-                                               if (G_LIKELY (elt->type == REDIS_REPLY_INTEGER)) {
+                                               if (G_UNLIKELY (elt->type == REDIS_REPLY_INTEGER)) {
                                                        tok->values[rt->id] = elt->integer;
                                                        found ++;
                                                }
index ea0fcd601f2f2913eb820f4c9bc4004811ad55c5..ed24499ba4f4653af95364ace64ff498ebd8a17c 100644 (file)
@@ -323,17 +323,6 @@ rspamd_stat_backends_process (struct rspamd_stat_ctx *st_ctx,
 
                if (bk_run != NULL) {
                        st->backend->process_tokens (task, task->tokens, i, bk_run);
-
-                       if (st->stcf->is_spam) {
-                               cl->spam_learns = st->backend->total_learns (task,
-                                               bk_run,
-                                               st_ctx);
-                       }
-                       else {
-                               cl->ham_learns = st->backend->total_learns (task,
-                                               bk_run,
-                                               st_ctx);
-                       }
                }
        }
 }
@@ -372,6 +361,8 @@ rspamd_stat_classifiers_process (struct rspamd_stat_ctx *st_ctx,
 {
        guint i;
        struct rspamd_classifier *cl;
+       struct rspamd_statfile *st;
+       gpointer bk_run;
 
        if (st_ctx->classifiers->len == 0) {
                return;
@@ -391,8 +382,34 @@ rspamd_stat_classifiers_process (struct rspamd_stat_ctx *st_ctx,
                return;
        }
 
+       for (i = 0; i < st_ctx->statfiles->len; i++) {
+               st = g_ptr_array_index (st_ctx->statfiles, i);
+               cl = st->classifier;
+
+               if (cl->cfg->flags & RSPAMD_FLAG_CLASSIFIER_NO_BACKEND) {
+                       continue;
+               }
+
+               bk_run = g_ptr_array_index (task->stat_runtimes, i);
+               g_assert (st != NULL);
+
+               if (bk_run != NULL) {
+                       if (st->stcf->is_spam) {
+                               cl->spam_learns += st->backend->total_learns (task,
+                                               bk_run,
+                                               st_ctx);
+                       }
+                       else {
+                               cl->ham_learns += st->backend->total_learns (task,
+                                               bk_run,
+                                               st_ctx);
+                       }
+               }
+       }
+
        for (i = 0; i < st_ctx->classifiers->len; i++) {
                cl = g_ptr_array_index (st_ctx->classifiers, i);
+
                g_assert (cl != NULL);
 
                if (cl->cfg->min_tokens > 0 && task->tokens->len < cl->cfg->min_tokens) {