aboutsummaryrefslogtreecommitdiffstats
path: root/src/kvstorage_bdb.c
diff options
context:
space:
mode:
authorVsevolod Stakhov <vsevolod@rambler-co.ru>2011-11-08 18:47:37 +0300
committerVsevolod Stakhov <vsevolod@rambler-co.ru>2011-11-08 18:47:37 +0300
commitcf21ad184448908536c32495db26f97bffd3f584 (patch)
treee8f4bf0efbcbbd166f79f26bc6bbc3ff6658028f /src/kvstorage_bdb.c
parentd8f9f8f6c64001dab5f0357b1c6af93bf6c3eea5 (diff)
downloadrspamd-cf21ad184448908536c32495db26f97bffd3f584.tar.gz
rspamd-cf21ad184448908536c32495db26f97bffd3f584.zip
Implement lazy backend writing using sync_ops = 0.
Allways wait for kvstorage worker to prevent data corruption.
Diffstat (limited to 'src/kvstorage_bdb.c')
-rw-r--r--src/kvstorage_bdb.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/src/kvstorage_bdb.c b/src/kvstorage_bdb.c
index 280cb5ce9..07fcfd8d8 100644
--- a/src/kvstorage_bdb.c
+++ b/src/kvstorage_bdb.c
@@ -199,7 +199,7 @@ rspamd_bdb_insert (struct rspamd_kv_backend *backend, gpointer key, struct rspam
g_queue_push_head (db->ops_queue, op);
g_hash_table_insert (db->ops_hash, ELT_KEY (elt), op);
- if (g_queue_get_length (db->ops_queue) >= db->sync_ops) {
+ if (db->sync_ops > 0 && g_queue_get_length (db->ops_queue) >= db->sync_ops) {
return bdb_process_queue (backend);
}
@@ -224,7 +224,7 @@ rspamd_bdb_replace (struct rspamd_kv_backend *backend, gpointer key, struct rspa
g_queue_push_head (db->ops_queue, op);
g_hash_table_insert (db->ops_hash, ELT_KEY (elt), op);
- if (g_queue_get_length (db->ops_queue) >= db->sync_ops) {
+ if (db->sync_ops > 0 && g_queue_get_length (db->ops_queue) >= db->sync_ops) {
return bdb_process_queue (backend);
}
@@ -293,7 +293,7 @@ rspamd_bdb_delete (struct rspamd_kv_backend *backend, gpointer key)
g_queue_push_head (db->ops_queue, op);
g_hash_table_insert (db->ops_hash, ELT_KEY(elt), op);
- if (g_queue_get_length (db->ops_queue) >= db->sync_ops) {
+ if (db->sync_ops > 0 && g_queue_get_length (db->ops_queue) >= db->sync_ops) {
bdb_process_queue (backend);
}