Commit ce55fee0 authored by Natanael Copa's avatar Natanael Copa

main/linux-grsec: upgrade to 3.2.9 update mtu fix patch

The mtu regression seems to have been fixed upstream. We backport.
parent 4fc2408b
......@@ -2,9 +2,9 @@
_flavor=grsec
pkgname=linux-${_flavor}
pkgver=3.2.7
pkgver=3.2.9
_kernver=3.2
pkgrel=1
pkgrel=0
pkgdesc="Linux kernel with grsecurity"
url=http://grsecurity.net
depends="mkinitfs linux-firmware"
......@@ -14,12 +14,13 @@ _config=${config:-kernelconfig.${CARCH}}
install=
source="http://ftp.kernel.org/pub/linux/kernel/v3.0/linux-$_kernver.tar.bz2
http://ftp.kernel.org/pub/linux/kernel/v3.0/patch-$pkgver.bz2
grsecurity-2.9-3.2.7-201202261954.patch
grsecurity-2.9-3.2.9-201203112136.patch
0004-arp-flush-arp-cache-on-device-change.patch
x86-centaur-enable-cx8-for-via-eden-too.patch
linux-3.0.x-regression-with-ipv4-routes-having-mtu.patch
inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
route-remove-redirect-genid.patch
kernelconfig.x86
kernelconfig.x86_64
......@@ -140,10 +141,11 @@ dev() {
}
md5sums="7ceb61f87c097fc17509844b71268935 linux-3.2.tar.bz2
899624bffed6a19578613b672cc9483f patch-3.2.7.bz2
27f2e8898e796ff0301f3193e2ba76b3 grsecurity-2.9-3.2.7-201202261954.patch
4610f3e62a5446422d1e81a90ab3cd06 patch-3.2.9.bz2
349de864a65ad6714e20bf8a14dd8756 grsecurity-2.9-3.2.9-201203112136.patch
776adeeb5272093574f8836c5037dd7d 0004-arp-flush-arp-cache-on-device-change.patch
f3eda7112ef074a4121ec6de943c63ee x86-centaur-enable-cx8-for-via-eden-too.patch
62cc7d7b5ba7ef05b72ff91c0411c189 linux-3.0.x-regression-with-ipv4-routes-having-mtu.patch
0e57daa3b43acadd82ae66fa9e3f7da1 inetpeer-invalidate-the-inetpeer-tree-along-with-the-routing-cache.patch
06061e5de624849e082c3c8dbe37c908 route-remove-redirect-genid.patch
a4e7d46b18ca1495a1605c8520d74ee3 kernelconfig.x86
147306257d376f27799e9e72a303c80c kernelconfig.x86_64"
From: Steffen Klassert <steffen.klassert@secunet.com>
Date: Tue, 6 Mar 2012 21:20:26 +0000 (+0000)
Subject: inetpeer: Invalidate the inetpeer tree along with the routing cache
X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdavem%2Fnet.git;a=commitdiff_plain;h=5faa5df1fa2024bd750089ff21dcc4191798263d
inetpeer: Invalidate the inetpeer tree along with the routing cache
We initialize the routing metrics with the values cached on the
inetpeer in rt_init_metrics(). So if we have the metrics cached on the
inetpeer, we ignore the user configured fib_metrics.
To fix this issue, we replace the old tree with a fresh initialized
inet_peer_base. The old tree is removed later with a delayed work queue.
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 06b795d..ff04a33 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -41,6 +41,7 @@ struct inet_peer {
u32 pmtu_orig;
u32 pmtu_learned;
struct inetpeer_addr_base redirect_learned;
+ struct list_head gc_list;
/*
* Once inet_peer is queued for deletion (refcnt == -1), following fields
* are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
@@ -96,6 +97,8 @@ static inline struct inet_peer *inet_getpeer_v6(const struct in6_addr *v6daddr,
extern void inet_putpeer(struct inet_peer *p);
extern bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
+extern void inetpeer_invalidate_tree(int family);
+
/*
* temporary check to make sure we dont access rid, ip_id_count, tcp_ts,
* tcp_ts_stamp if no refcount is taken on inet_peer
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index bf4a9c4..deea2e9 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -17,6 +17,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/net.h>
+#include <linux/workqueue.h>
#include <net/ip.h>
#include <net/inetpeer.h>
#include <net/secure_seq.h>
@@ -66,6 +67,11 @@
static struct kmem_cache *peer_cachep __read_mostly;
+static LIST_HEAD(gc_list);
+static const int gc_delay = 60 * HZ;
+static struct delayed_work gc_work;
+static DEFINE_SPINLOCK(gc_lock);
+
#define node_height(x) x->avl_height
#define peer_avl_empty ((struct inet_peer *)&peer_fake_node)
@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m
int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
+static void inetpeer_gc_worker(struct work_struct *work)
+{
+ struct inet_peer *p, *n;
+ LIST_HEAD(list);
+
+ spin_lock_bh(&gc_lock);
+ list_replace_init(&gc_list, &list);
+ spin_unlock_bh(&gc_lock);
+
+ if (list_empty(&list))
+ return;
+
+ list_for_each_entry_safe(p, n, &list, gc_list) {
+
+ if(need_resched())
+ cond_resched();
+
+ if (p->avl_left != peer_avl_empty) {
+ list_add_tail(&p->avl_left->gc_list, &list);
+ p->avl_left = peer_avl_empty;
+ }
+
+ if (p->avl_right != peer_avl_empty) {
+ list_add_tail(&p->avl_right->gc_list, &list);
+ p->avl_right = peer_avl_empty;
+ }
+
+ n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
+
+ if (!atomic_read(&p->refcnt)) {
+ list_del(&p->gc_list);
+ kmem_cache_free(peer_cachep, p);
+ }
+ }
+
+ if (list_empty(&list))
+ return;
+
+ spin_lock_bh(&gc_lock);
+ list_splice(&list, &gc_list);
+ spin_unlock_bh(&gc_lock);
+
+ schedule_delayed_work(&gc_work, gc_delay);
+}
/* Called from ip_output.c:ip_init */
void __init inet_initpeers(void)
@@ -126,6 +176,7 @@ void __init inet_initpeers(void)
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
+ INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker);
}
static int addr_compare(const struct inetpeer_addr *a,
@@ -449,7 +500,7 @@ relookup:
p->pmtu_orig = 0;
p->redirect_genid = 0;
memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
-
+ INIT_LIST_HEAD(&p->gc_list);
/* Link the node. */
link_to_pool(p, base);
@@ -509,3 +560,30 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
return rc;
}
EXPORT_SYMBOL(inet_peer_xrlim_allow);
+
+void inetpeer_invalidate_tree(int family)
+{
+ struct inet_peer *old, *new, *prev;
+ struct inet_peer_base *base = family_to_base(family);
+
+ write_seqlock_bh(&base->lock);
+
+ old = base->root;
+ if (old == peer_avl_empty_rcu)
+ goto out;
+
+ new = peer_avl_empty_rcu;
+
+ prev = cmpxchg(&base->root, old, new);
+ if (prev == old) {
+ base->total = 0;
+ spin_lock(&gc_lock);
+ list_add_tail(&prev->gc_list, &gc_list);
+ spin_unlock(&gc_lock);
+ schedule_delayed_work(&gc_work, gc_delay);
+ }
+
+out:
+ write_sequnlock_bh(&base->lock);
+}
+EXPORT_SYMBOL(inetpeer_invalidate_tree);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index bcacf54..23ce0c1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -938,6 +938,7 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
redirect_genid++;
+ inetpeer_invalidate_tree(AF_INET);
}
/*
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index f30112f..26a6249 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1841,6 +1841,22 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
return mtu;
}
+static void __rt_init_metrics(struct rtable *rt, struct fib_info *fi,
+ struct inet_peer *peer)
+{
+ if (peer && fi->fib_metrics == (u32 *) dst_default_metrics) {
+ dst_init_metrics(&rt->dst, peer->metrics, false);
+ return;
+ }
+
+ if (fi->fib_metrics != (u32 *) dst_default_metrics) {
+ rt->fi = fi;
+ atomic_inc(&fi->fib_clntref);
+ }
+
+ dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+}
+
static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
struct fib_info *fi)
{
@@ -1859,7 +1875,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
if (inet_metrics_new(peer))
memcpy(peer->metrics, fi->fib_metrics,
sizeof(u32) * RTAX_MAX);
- dst_init_metrics(&rt->dst, peer->metrics, false);
+
+ __rt_init_metrics(rt, fi, peer);
check_peer_pmtu(&rt->dst, peer);
if (peer->redirect_genid != redirect_genid)
@@ -1869,13 +1886,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
rt->rt_gateway = peer->redirect_learned.a4;
rt->rt_flags |= RTCF_REDIRECTED;
}
- } else {
- if (fi->fib_metrics != (u32 *) dst_default_metrics) {
- rt->fi = fi;
- atomic_inc(&fi->fib_clntref);
- }
- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
- }
+ } else
+ __rt_init_metrics(rt, fi, NULL);
}
static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4,
One hunk was remove from this patch.
From: Steffen Klassert <steffen.klassert@secunet.com>
Date: Tue, 6 Mar 2012 21:21:10 +0000 (+0000)
Subject: route: Remove redirect_genid
X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Fdavem%2Fnet.git;a=commitdiff_plain;h=ac3f48de09d8f4b73397047e413fadff7f65cfa7
route: Remove redirect_genid
As we invalidate the inetpeer tree along with the routing cache now,
we don't need a genid to reset the redirect handling when the routing
cache is flushed.
Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index ff04a33..b94765e 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -35,7 +35,6 @@ struct inet_peer {
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
- int redirect_genid;
unsigned long rate_last;
unsigned long pmtu_expires;
u32 pmtu_orig;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 23ce0c1..0197747 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -132,7 +132,6 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
-static int redirect_genid;
static struct delayed_work expires_work;
static unsigned long expires_ljiffies;
@@ -937,7 +936,6 @@ static void rt_cache_invalidate(struct net *net)
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
- redirect_genid++;
inetpeer_invalidate_tree(AF_INET);
}
@@ -1486,10 +1484,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
peer = rt->peer;
if (peer) {
- if (peer->redirect_learned.a4 != new_gw ||
- peer->redirect_genid != redirect_genid) {
+ if (peer->redirect_learned.a4 != new_gw) {
peer->redirect_learned.a4 = new_gw;
- peer->redirect_genid = redirect_genid;
atomic_inc(&__rt_peer_genid);
}
check_peer_redir(&rt->dst, peer);
@@ -1794,8 +1790,6 @@ static void ipv4_validate_peer(struct rtable *rt)
if (peer) {
check_peer_pmtu(&rt->dst, peer);
- if (peer->redirect_genid != redirect_genid)
- peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway)
check_peer_redir(&rt->dst, peer);
@@ -1959,8 +1953,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
dst_init_metrics(&rt->dst, peer->metrics, false);
check_peer_pmtu(&rt->dst, peer);
- if (peer->redirect_genid != redirect_genid)
- peer->redirect_learned.a4 = 0;
+
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment