aboutsummaryrefslogtreecommitdiffstats
path: root/main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2012-03-12 12:38:13 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2012-03-12 15:33:54 +0000
commitce55fee0bbc4ab5a81b930d244d716a3c4273983 (patch)
tree6e563f0f3e2d6ea7ce8d5348108092d3f08dae32 /main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
parent4fc2408b8f327b1313c349b63f9eec60d8cfe13d (diff)
downloadaports-ce55fee0bbc4ab5a81b930d244d716a3c4273983.tar.bz2
aports-ce55fee0bbc4ab5a81b930d244d716a3c4273983.tar.xz
main/linux-grsec: upgrade to 3.2.9 update mtu fix patch
The mtu regression seems to have been fixed upstream. We backport.
Diffstat (limited to 'main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch')
-rw-r--r--main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch174
1 files changed, 174 insertions, 0 deletions
diff --git a/main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch b/main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
new file mode 100644
index 0000000000..0f26cf40ae
--- /dev/null
+++ b/main/linux-grsec/inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
@@ -0,0 +1,174 @@
+From: Steffen Klassert <steffen.klassert@secunet.com>
+Date: Tue, 6 Mar 2012 21:20:26 +0000 (+0000)
+Subject: inetpeer: Invalidate the inetpeer tree along with the routing cache
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdavem%2Fnet.git;a=commitdiff_plain;h=5faa5df1fa2024bd750089ff21dcc4191798263d
+
+inetpeer: Invalidate the inetpeer tree along with the routing cache
+
+We initialize the routing metrics with the values cached on the
+inetpeer in rt_init_metrics(). So if we have the metrics cached on the
+inetpeer, we ignore the user configured fib_metrics.
+
+To fix this issue, we replace the old tree with a fresh initialized
+inet_peer_base. The old tree is removed later with a delayed work queue.
+
+Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
+index 06b795d..ff04a33 100644
+--- a/include/net/inetpeer.h
++++ b/include/net/inetpeer.h
+@@ -41,6 +41,7 @@ struct inet_peer {
+ u32 pmtu_orig;
+ u32 pmtu_learned;
+ struct inetpeer_addr_base redirect_learned;
++ struct list_head gc_list;
+ /*
+ * Once inet_peer is queued for deletion (refcnt == -1), following fields
+ * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
+@@ -96,6 +97,8 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
+ extern void inet_putpeer(struct inet_peer *p);
+ extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+
++extern void inetpeer_invalidate_tree(int family);
++
+ /*
+ * temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
+ * tcp_ts_stamp if no refcount is taken on inet_peer
+diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
+index bf4a9c4..deea2e9 100644
+--- a/net/ipv4/inetpeer.c
++++ b/net/ipv4/inetpeer.c
+@@ -17,6 +17,7 @@
+ #include <linux/kernel.h>
+ #include <linux/mm.h>
+ #include <linux/net.h>
++#include <linux/workqueue.h>
+ #include <net/ip.h>
+ #include <net/inetpeer.h>
+ #include <net/secure_seq.h>
+@@ -66,6 +67,11 @@
+
+ static struct kmem_cache *peer_cachep __read_mostly;
+
++static LIST_HEAD(gc_list);
++static const int gc_delay = 60 * HZ;
++static struct delayed_work gc_work;
++static DEFINE_SPINLOCK(gc_lock);
++
+ #define node_height(x) x->avl_height
+
+ #define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
+@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
+ int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
+ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
+
++static void inetpeer_gc_worker(struct work_struct *work)
++{
++ struct inet_peer *p, *n;
++ LIST_HEAD(list);
++
++ spin_lock_bh(&gc_lock);
++ list_replace_init(&gc_list, &list);
++ spin_unlock_bh(&gc_lock);
++
++ if (list_empty(&list))
++ return;
++
++ list_for_each_entry_safe(p, n, &list, gc_list) {
++
++ if(need_resched())
++ cond_resched();
++
++ if (p->avl_left != peer_avl_empty) {
++ list_add_tail(&p->avl_left->gc_list, &list);
++ p->avl_left = peer_avl_empty;
++ }
++
++ if (p->avl_right != peer_avl_empty) {
++ list_add_tail(&p->avl_right->gc_list, &list);
++ p->avl_right = peer_avl_empty;
++ }
++
++ n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
++
++ if (!atomic_read(&p->refcnt)) {
++ list_del(&p->gc_list);
++ kmem_cache_free(peer_cachep, p);
++ }
++ }
++
++ if (list_empty(&list))
++ return;
++
++ spin_lock_bh(&gc_lock);
++ list_splice(&list, &gc_list);
++ spin_unlock_bh(&gc_lock);
++
++ schedule_delayed_work(&gc_work, gc_delay);
++}
+
+ /* Called from ip_output.c:ip_init */
+ void __init inet_initpeers(void)
+@@ -126,6 +176,7 @@ void __init inet_initpeers(void)
+ 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
+ NULL);
+
++ INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
+ }
+
+ static int addr_compare(const struct inetpeer_addr *a,
+@@ -449,7 +500,7 @@ relookup:
+ p->pmtu_orig = 0;
+ p->redirect_genid = 0;
+ memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
+-
++ INIT_LIST_HEAD(&p->gc_list);
+
+ /* Link the node. */
+ link_to_pool(p, base);
+@@ -509,3 +560,30 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
+ return rc;
+ }
+ EXPORT_SYMBOL(inet_peer_xrlim_allow);
++
++void inetpeer_invalidate_tree(int family)
++{
++ struct inet_peer *old, *new, *prev;
++ struct inet_peer_base *base = family_to_base(family);
++
++ write_seqlock_bh(&base->lock);
++
++ old = base->root;
++ if (old == peer_avl_empty_rcu)
++ goto out;
++
++ new = peer_avl_empty_rcu;
++
++ prev = cmpxchg(&base->root, old, new);
++ if (prev == old) {
++ base->total = 0;
++ spin_lock(&gc_lock);
++ list_add_tail(&prev->gc_list, &gc_list);
++ spin_unlock(&gc_lock);
++ schedule_delayed_work(&gc_work, gc_delay);
++ }
++
++out:
++ write_sequnlock_bh(&base->lock);
++}
++EXPORT_SYMBOL(inetpeer_invalidate_tree);
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index bcacf54..23ce0c1 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -938,6 +938,7 @@ static void rt_cache_invalidate(struct net *net)
+ get_random_bytes(&shuffle, sizeof(shuffle));
+ atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+ redirect_genid++;
++ inetpeer_invalidate_tree(AF_INET);
+ }
+
+ /*