summaryrefslogtreecommitdiffstats
path: root/main/linux-pae/0017-flow-delayed-deletion-of-flow-cache-entries.patch
diff options
context:
space:
mode:
authorCedric Schieli <cschieli@gmail.com>2010-04-21 08:05:41 +0000
committerCedric Schieli <cschieli@gmail.com>2010-04-21 08:07:00 +0000
commitf6060f09c7ce9ee321b65b86f583251a94b39eeb (patch)
tree692c5e737e4ca785408560e9bafce96ad1ceaafd /main/linux-pae/0017-flow-delayed-deletion-of-flow-cache-entries.patch
parent93d401987e9e376a7e402aeb32141114f05f4af7 (diff)
downloadaports-f6060f09c7ce9ee321b65b86f583251a94b39eeb.tar.bz2
aports-f6060f09c7ce9ee321b65b86f583251a94b39eeb.tar.xz
main/linux-pae: sync with main/linux-grsec
Diffstat (limited to 'main/linux-pae/0017-flow-delayed-deletion-of-flow-cache-entries.patch')
-rw-r--r--main/linux-pae/0017-flow-delayed-deletion-of-flow-cache-entries.patch231
1 files changed, 231 insertions, 0 deletions
diff --git a/main/linux-pae/0017-flow-delayed-deletion-of-flow-cache-entries.patch b/main/linux-pae/0017-flow-delayed-deletion-of-flow-cache-entries.patch
new file mode 100644
index 000000000..7d17d41ae
--- /dev/null
+++ b/main/linux-pae/0017-flow-delayed-deletion-of-flow-cache-entries.patch
@@ -0,0 +1,231 @@
+From fede05e99e2d860e97bc877b8b77fb9e63f55cc8 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Timo=20Ter=C3=A4s?= <timo.teras@iki.fi>
+Date: Wed, 7 Apr 2010 00:30:07 +0000
+Subject: [PATCH 17/18] flow: delayed deletion of flow cache entries
+
+Speed up lookups by freeing flow cache entries later. After
+virtualizing flow cache entry operations, the flow cache may now
+end up calling policy or bundle destructor which can be slowish.
+
+As gc_list is more effective with double linked list, the flow cache
+is converted to use common hlist and list macroes where appropriate.
+
+Signed-off-by: Timo Teras <timo.teras@iki.fi>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+(cherry picked from commit 8e4795605d1e1b39113818ad7c147b8a867a1f6a)
+---
+ net/core/flow.c | 100 ++++++++++++++++++++++++++++++++++++++-----------------
+ 1 files changed, 69 insertions(+), 31 deletions(-)
+
+diff --git a/net/core/flow.c b/net/core/flow.c
+index 521df52..1619006 100644
+--- a/net/core/flow.c
++++ b/net/core/flow.c
+@@ -26,7 +26,10 @@
+ #include <linux/security.h>
+
+ struct flow_cache_entry {
+- struct flow_cache_entry *next;
++ union {
++ struct hlist_node hlist;
++ struct list_head gc_list;
++ } u;
+ u16 family;
+ u8 dir;
+ u32 genid;
+@@ -35,7 +38,7 @@ struct flow_cache_entry {
+ };
+
+ struct flow_cache_percpu {
+- struct flow_cache_entry **hash_table;
++ struct hlist_head *hash_table;
+ int hash_count;
+ u32 hash_rnd;
+ int hash_rnd_recalc;
+@@ -62,6 +65,9 @@ atomic_t flow_cache_genid = ATOMIC_INIT(0);
+ static struct flow_cache flow_cache_global;
+ static struct kmem_cache *flow_cachep;
+
++static DEFINE_SPINLOCK(flow_cache_gc_lock);
++static LIST_HEAD(flow_cache_gc_list);
++
+ #define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
+ #define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
+
+@@ -86,38 +92,66 @@ static int flow_entry_valid(struct flow_cache_entry *fle)
+ return 1;
+ }
+
+-static void flow_entry_kill(struct flow_cache *fc,
+- struct flow_cache_percpu *fcp,
+- struct flow_cache_entry *fle)
++static void flow_entry_kill(struct flow_cache_entry *fle)
+ {
+ if (fle->object)
+ fle->object->ops->delete(fle->object);
+ kmem_cache_free(flow_cachep, fle);
+- fcp->hash_count--;
++}
++
++static void flow_cache_gc_task(struct work_struct *work)
++{
++ struct list_head gc_list;
++ struct flow_cache_entry *fce, *n;
++
++ INIT_LIST_HEAD(&gc_list);
++ spin_lock_bh(&flow_cache_gc_lock);
++ list_splice_tail_init(&flow_cache_gc_list, &gc_list);
++ spin_unlock_bh(&flow_cache_gc_lock);
++
++ list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
++ flow_entry_kill(fce);
++}
++static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
++
++static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
++ int deleted, struct list_head *gc_list)
++{
++ if (deleted) {
++ fcp->hash_count -= deleted;
++ spin_lock_bh(&flow_cache_gc_lock);
++ list_splice_tail(gc_list, &flow_cache_gc_list);
++ spin_unlock_bh(&flow_cache_gc_lock);
++ schedule_work(&flow_cache_gc_work);
++ }
+ }
+
+ static void __flow_cache_shrink(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp,
+ int shrink_to)
+ {
+- struct flow_cache_entry *fle, **flp;
+- int i;
++ struct flow_cache_entry *fle;
++ struct hlist_node *entry, *tmp;
++ LIST_HEAD(gc_list);
++ int i, deleted = 0;
+
+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
+ int saved = 0;
+
+- flp = &fcp->hash_table[i];
+- while ((fle = *flp) != NULL) {
++ hlist_for_each_entry_safe(fle, entry, tmp,
++ &fcp->hash_table[i], u.hlist) {
+ if (saved < shrink_to &&
+ flow_entry_valid(fle)) {
+ saved++;
+- flp = &fle->next;
+ } else {
+- *flp = fle->next;
+- flow_entry_kill(fc, fcp, fle);
++ deleted++;
++ hlist_del(&fle->u.hlist);
++ list_add_tail(&fle->u.gc_list, &gc_list);
+ }
+ }
+ }
++
++ flow_cache_queue_garbage(fcp, deleted, &gc_list);
+ }
+
+ static void flow_cache_shrink(struct flow_cache *fc,
+@@ -182,7 +216,8 @@ flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
+ {
+ struct flow_cache *fc = &flow_cache_global;
+ struct flow_cache_percpu *fcp;
+- struct flow_cache_entry *fle, **head;
++ struct flow_cache_entry *fle, *tfle;
++ struct hlist_node *entry;
+ struct flow_cache_object *flo;
+ unsigned int hash;
+
+@@ -200,12 +235,13 @@ flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
+ flow_new_hash_rnd(fc, fcp);
+
+ hash = flow_hash_code(fc, fcp, key);
+- head = &fcp->hash_table[hash];
+- for (fle = *head; fle; fle = fle->next) {
+- if (fle->family == family &&
+- fle->dir == dir &&
+- flow_key_compare(key, &fle->key) == 0)
++ hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
++ if (tfle->family == family &&
++ tfle->dir == dir &&
++ flow_key_compare(key, &tfle->key) == 0) {
++ fle = tfle;
+ break;
++ }
+ }
+
+ if (unlikely(!fle)) {
+@@ -214,12 +250,11 @@ flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
+
+ fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
+ if (fle) {
+- fle->next = *head;
+- *head = fle;
+ fle->family = family;
+ fle->dir = dir;
+ memcpy(&fle->key, key, sizeof(*key));
+ fle->object = NULL;
++ hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
+ fcp->hash_count++;
+ }
+ } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
+@@ -262,23 +297,26 @@ static void flow_cache_flush_tasklet(unsigned long data)
+ struct flow_flush_info *info = (void *)data;
+ struct flow_cache *fc = info->cache;
+ struct flow_cache_percpu *fcp;
+- int i;
++ struct flow_cache_entry *fle;
++ struct hlist_node *entry, *tmp;
++ LIST_HEAD(gc_list);
++ int i, deleted = 0;
+
+ fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
+- struct flow_cache_entry *fle;
+-
+- fle = fcp->hash_table[i];
+- for (; fle; fle = fle->next) {
++ hlist_for_each_entry_safe(fle, entry, tmp,
++ &fcp->hash_table[i], u.hlist) {
+ if (flow_entry_valid(fle))
+ continue;
+
+- if (fle->object)
+- fle->object->ops->delete(fle->object);
+- fle->object = NULL;
++ deleted++;
++ hlist_del(&fle->u.hlist);
++ list_add_tail(&fle->u.gc_list, &gc_list);
+ }
+ }
+
++ flow_cache_queue_garbage(fcp, deleted, &gc_list);
++
+ if (atomic_dec_and_test(&info->cpuleft))
+ complete(&info->completion);
+ }
+@@ -320,7 +358,7 @@ void flow_cache_flush(void)
+ static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
+ {
+- fcp->hash_table = (struct flow_cache_entry **)
++ fcp->hash_table = (struct hlist_head *)
+ __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
+ if (!fcp->hash_table)
+ panic("NET: failed to allocate flow cache order %lu\n", fc->order);
+@@ -354,7 +392,7 @@ static int flow_cache_init(struct flow_cache *fc)
+
+ for (order = 0;
+ (PAGE_SIZE << order) <
+- (sizeof(struct flow_cache_entry *)*flow_cache_hash_size(fc));
++ (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
+ order++)
+ /* NOTHING */;
+ fc->order = order;
+--
+1.7.0.2
+