diff options
author | Vsevolod Stakhov <vsevolod@rambler-co.ru> | 2011-06-03 20:23:13 +0400 |
---|---|---|
committer | Vsevolod Stakhov <vsevolod@rambler-co.ru> | 2011-06-03 20:23:13 +0400 |
commit | 92de380c2c5e8ce7073ce979df4e5c7868e52bb6 (patch) | |
tree | 27be3202d27f129f3d94d90298a4d1e0ecf2c281 /src/tokenizers | |
parent | 83a9452974ec2f9c7be262a77e54a1ea2557c795 (diff) | |
download | rspamd-92de380c2c5e8ce7073ce979df4e5c7868e52bb6.tar.gz rspamd-92de380c2c5e8ce7073ce979df4e5c7868e52bb6.zip |
* Skip short utf words in statistics
Diffstat (limited to 'src/tokenizers')
-rw-r--r-- | src/tokenizers/osb.c | 13 | ||||
-rw-r--r-- | src/tokenizers/tokenizers.c | 4 | ||||
-rw-r--r-- | src/tokenizers/tokenizers.h | 4 |
3 files changed, 14 insertions, 7 deletions
diff --git a/src/tokenizers/osb.c b/src/tokenizers/osb.c index 1a04f3464..5f5dfcdcd 100644 --- a/src/tokenizers/osb.c +++ b/src/tokenizers/osb.c @@ -36,7 +36,7 @@ extern const int primes[]; int osb_tokenize_text (struct tokenizer *tokenizer, memory_pool_t * pool, f_str_t * input, GTree ** tree, - gboolean save_token) + gboolean save_token, gboolean is_utf) { token_node_t *new = NULL; f_str_t token = { NULL, 0, 0 }, *res; @@ -55,8 +55,15 @@ osb_tokenize_text (struct tokenizer *tokenizer, memory_pool_t * pool, f_str_t * while ((res = tokenizer->get_next_word (input, &token)) != NULL) { /* Skip small words */ - if (token.len < MIN_LEN) { - continue; + if (is_utf) { + if (g_utf8_strlen (token.begin, token.len) < MIN_LEN) { + continue; + } + } + else { + if (token.len < MIN_LEN) { + continue; + } } /* Shift hashpipe */ for (i = FEATURE_WINDOW_SIZE - 1; i > 0; i--) { diff --git a/src/tokenizers/tokenizers.c b/src/tokenizers/tokenizers.c index 5af3fe6d5..9e41a9101 100644 --- a/src/tokenizers/tokenizers.c +++ b/src/tokenizers/tokenizers.c @@ -239,13 +239,13 @@ tokenize_subject (struct worker_task *task, GTree ** tree) new = memory_pool_alloc (task->task_pool, sizeof (token_node_t)); subject.begin = task->subject; subject.len = strlen (task->subject); - osb_tokenizer->tokenize_func (osb_tokenizer, task->task_pool, &subject, tree, FALSE); + osb_tokenizer->tokenize_func (osb_tokenizer, task->task_pool, &subject, tree, FALSE, TRUE); } if ((sub = g_mime_message_get_subject (task->message)) != NULL) { new = memory_pool_alloc (task->task_pool, sizeof (token_node_t)); subject.begin = (gchar *)sub; subject.len = strlen (sub); - osb_tokenizer->tokenize_func (osb_tokenizer, task->task_pool, &subject, tree, FALSE); + osb_tokenizer->tokenize_func (osb_tokenizer, task->task_pool, &subject, tree, FALSE, TRUE); } } diff --git a/src/tokenizers/tokenizers.h b/src/tokenizers/tokenizers.h index 741753328..df5481a1f 100644 --- a/src/tokenizers/tokenizers.h +++ b/src/tokenizers/tokenizers.h @@ -24,7 +24,7 @@ typedef struct token_node_s { /* Common tokenizer structure */ struct tokenizer { char *name; - int (*tokenize_func)(struct tokenizer *tokenizer, memory_pool_t *pool, f_str_t *input, GTree **cur, gboolean save_token); + int (*tokenize_func)(struct tokenizer *tokenizer, memory_pool_t *pool, f_str_t *input, GTree **cur, gboolean save_token, gboolean is_utf); f_str_t* (*get_next_word)(f_str_t *buf, f_str_t *token); }; @@ -35,7 +35,7 @@ struct tokenizer* get_tokenizer (char *name); /* Get next word from specified f_str_t buf */ f_str_t *get_next_word (f_str_t *buf, f_str_t *token); /* OSB tokenize function */ -int osb_tokenize_text (struct tokenizer *tokenizer, memory_pool_t *pool, f_str_t *input, GTree **cur, gboolean save_token); +int osb_tokenize_text (struct tokenizer *tokenizer, memory_pool_t *pool, f_str_t *input, GTree **cur, gboolean save_token, gboolean is_utf); /* Common tokenizer for headers */ int tokenize_headers (memory_pool_t *pool, struct worker_task *task, GTree **cur); /* Make tokens for a subject */ |