]> source.dussan.org Git - rspamd.git/commitdiff
[Minor] Some adjustments to neural module
authorVsevolod Stakhov <vsevolod@highsecure.ru>
Thu, 8 Mar 2018 16:02:45 +0000 (16:02 +0000)
committerVsevolod Stakhov <vsevolod@highsecure.ru>
Thu, 8 Mar 2018 16:02:45 +0000 (16:02 +0000)
conf/modules.d/neural.conf
src/plugins/neural.lua

index 296ee2f2a55d786d909fa7c15f64208ec75904a8..1c27403bf0ead21552a9bd62b4491bb92ae086ac 100644 (file)
@@ -21,6 +21,8 @@ neural {
     max_usages = 20; # Number of learn iterations while ANN data is valid
     spam_score = 8; # Score to learn spam
     ham_score = -2; # Score to learn ham
+    learning_rate = 0.01; # Rate of learning (Torch only)
+    max_iterations = 25; # Maximum iterations of learning (Torch only)
   }
 
   timeout = 20; # Increase redis timeout
index b2c7adcfa797d2aeaa47026e229305e88ec112c2..e0bab70f378df04c1f946c9d35b874ee243f9f5f 100644 (file)
@@ -48,6 +48,7 @@ local default_options = {
     autotrain = true,
     train_prob = 1.0,
     learn_threads = 1,
+    learning_rate = 0.01,
   },
   use_settings = false,
   per_user = false,
@@ -92,7 +93,7 @@ local redis_lua_script_can_train = [[
   lim = lim + lim * 0.1
 
   local exists = redis.call('SISMEMBER', KEYS[1], KEYS[2])
-  if not exists or exists == 0 then
+  if not exists or tonumber(exists) == 0 then
     redis.call('SADD', KEYS[1], KEYS[2])
   end
 
@@ -669,7 +670,7 @@ local function train_ann(rule, _, ev_base, elt, worker)
             local criterion = nn.MSECriterion()
             local trainer = nn.StochasticGradient(anns[elt].ann_train,
               criterion)
-            trainer.learning_rate = 0.01
+            trainer.learning_rate = rule.train.learning_rate
             trainer.verbose = false
             trainer.maxIteration = rule.train.max_iterations
             trainer.hookIteration = function(self, iteration, currentError)