Commit be4d4c98 authored by Natanael Copa's avatar Natanael Copa

main/linux-grsec: grsec 2.1.14-2.6.32.9-201003112025 + fixes for ipsec and gre

parent ae99c382
......@@ -4,7 +4,7 @@ _flavor=grsec
pkgname=linux-${_flavor}
pkgver=2.6.32.9
_kernver=2.6.32
pkgrel=1
pkgrel=2
pkgdesc="Linux kernel with grsecurity"
url=http://grsecurity.net
depends="mkinitfs linux-firmware"
......@@ -14,8 +14,11 @@ _config=${config:-kernelconfig.${CARCH:-x86}}
install=
source="ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-$_kernver.tar.bz2
ftp://ftp.kernel.org/pub/linux/kernel/v2.6/patch-$pkgver.bz2
grsecurity-2.1.14-2.6.32.9-201002231820.patch
grsecurity-2.1.14-2.6.32.9-201003112025.patch
net-2.6.git-87c1e12b5eeb7b30b4b41291bef8e0b41fc3dde9.patch
xfrm-flow-cache-grsec.patch
ip_gre.patch
ip_gre2.patch
arp.patch
kernelconfig.x86
"
......@@ -122,7 +125,10 @@ firmware() {
md5sums="260551284ac224c3a43c4adac7df4879 linux-2.6.32.tar.bz2
7f615dd3b4a3b19fb86e479996a2deb5 patch-2.6.32.9.bz2
7da77829d4d994498218c412caed1db8 grsecurity-2.1.14-2.6.32.9-201002231820.patch
98721ae28fe928f970ce92d8fc99d3a0 grsecurity-2.1.14-2.6.32.9-201003112025.patch
b60772a7fe2a6161e34514adcbddc191 net-2.6.git-87c1e12b5eeb7b30b4b41291bef8e0b41fc3dde9.patch
a30b7b40203f8063abd1afc57d98e559 xfrm-flow-cache-grsec.patch
3ef822f3a2723b9a80c3f12954457225 ip_gre.patch
13ca9e91700e459da269c957062bbea7 ip_gre2.patch
4c39a161d918e7f274292ecfd168b891 arp.patch
782074af6a1f1b1b1c9a33f5ac1b42bf kernelconfig.x86"
7f442049b29ab749180e54ff8f20f1d0 kernelconfig.x86"
This source diff could not be displayed because it is too large. You can view the blob instead.
--- linux-2.6.32/net/ipv4/ip_gre.c.orig
+++ linux-2.6.32/net/ipv4/ip_gre.c
@@ -803,11 +803,13 @@
tunnel->err_count = 0;
}
- max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen;
+ max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + rt->u.dst.header_len;
if (skb_headroom(skb) < max_headroom || skb_shared(skb)||
(skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+ if (max_headroom > dev->needed_headroom)
+ dev->needed_headroom = max_headroom;
if (!new_skb) {
ip_rt_put(rt);
stats->tx_dropped++;
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.32.9
# Wed Mar 3 11:01:05 2010
# Mon Mar 15 14:11:23 2010
#
# CONFIG_64BIT is not set
CONFIG_X86_32=y
......@@ -4352,6 +4352,7 @@ CONFIG_GRKERNSEC_CHROOT_CAPS=y
# CONFIG_GRKERNSEC_EXECLOG is not set
CONFIG_GRKERNSEC_RESLOG=y
# CONFIG_GRKERNSEC_CHROOT_EXECLOG is not set
# CONFIG_GRKERNSEC_AUDIT_PTRACE is not set
# CONFIG_GRKERNSEC_AUDIT_CHDIR is not set
# CONFIG_GRKERNSEC_AUDIT_MOUNT is not set
CONFIG_GRKERNSEC_SIGNAL=y
......
From 87c1e12b5eeb7b30b4b41291bef8e0b41fc3dde9 Mon Sep 17 00:00:00 2001
From: Herbert Xu <herbert@gondor.apana.org.au>
Date: Tue, 2 Mar 2010 02:51:56 +0000
Subject: [PATCH] ipsec: Fix bogus bundle flowi
When I merged the bundle creation code, I introduced a bogus
flowi value in the bundle. Instead of getting from the caller,
it was instead set to the flow in the route object, which is
totally different.
The end result is that the bundles we created never match, and
we instead end up with an ever growing bundle list.
Thanks to Jamal for find this problem.
Reported-by: Jamal Hadi Salim <hadi@cyberus.ca>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Acked-by: Steffen Klassert <steffen.klassert@secunet.com>
Acked-by: Jamal Hadi Salim <hadi@cyberus.ca>
Signed-off-by: David S. Miller <davem@davemloft.net>
---
include/net/xfrm.h | 3 ++-
net/ipv4/xfrm4_policy.c | 5 +++--
net/ipv6/xfrm6_policy.c | 3 ++-
net/xfrm/xfrm_policy.c | 7 ++++---
4 files changed, 11 insertions(+), 7 deletions(-)
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index a7df327..d74e080 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -275,7 +275,8 @@ struct xfrm_policy_afinfo {
struct dst_entry *dst,
int nfheader_len);
int (*fill_dst)(struct xfrm_dst *xdst,
- struct net_device *dev);
+ struct net_device *dev,
+ struct flowi *fl);
};
extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 67107d6..e4a1483 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -91,11 +91,12 @@ static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return 0;
}
-static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ struct flowi *fl)
{
struct rtable *rt = (struct rtable *)xdst->route;
- xdst->u.rt.fl = rt->fl;
+ xdst->u.rt.fl = *fl;
xdst->u.dst.dev = dev;
dev_hold(dev);
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index dbdc696..ae18165 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -116,7 +116,8 @@ static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return 0;
}
-static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ struct flowi *fl)
{
struct rt6_info *rt = (struct rt6_info*)xdst->route;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 34a5ef8..843e066 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1372,7 +1372,8 @@ static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst,
return err;
}
-static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
+static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
+ struct flowi *fl)
{
struct xfrm_policy_afinfo *afinfo =
xfrm_policy_get_afinfo(xdst->u.dst.ops->family);
@@ -1381,7 +1382,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev)
if (!afinfo)
return -EINVAL;
- err = afinfo->fill_dst(xdst, dev);
+ err = afinfo->fill_dst(xdst, dev, fl);
xfrm_policy_put_afinfo(afinfo);
@@ -1486,7 +1487,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) {
struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev;
- err = xfrm_fill_dst(xdst, dev);
+ err = xfrm_fill_dst(xdst, dev, fl);
if (err)
goto free_dst;
--
1.7.0.2
From 3519d7c86a6e87584d25f3292b53d3ce865a659e Mon Sep 17 00:00:00 2001
From: Natanael Copa <ncopa@alpinelinux.org>
Date: Mon, 15 Mar 2010 15:31:37 +0000
Subject: [PATCH] xfrm: flow cache2
---
include/net/flow.h | 39 ++++-
include/net/netns/xfrm.h | 4 +
include/net/xfrm.h | 1 +
net/core/flow.c | 342 ++++++++++++++++++--------------------
net/ipv6/inet6_connection_sock.c | 6 +-
net/xfrm/xfrm_policy.c | 271 +++++++++++++++++++++---------
6 files changed, 394 insertions(+), 269 deletions(-)
diff --git a/include/net/flow.h b/include/net/flow.h
index 809970b..814a9d2 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -8,6 +8,9 @@
#define _NET_FLOW_H
#include <linux/in6.h>
+#include <linux/notifier.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
#include <asm/atomic.h>
struct flowi {
@@ -86,13 +89,37 @@ struct flowi {
struct net;
struct sock;
-typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
- u8 dir, void **objp, atomic_t **obj_refp);
-extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
- u8 dir, flow_resolve_t resolver);
-extern void flow_cache_flush(void);
-extern atomic_t flow_cache_genid;
+struct flow_cache_percpu;
+struct flow_cache_entry;
+
+struct flow_cache {
+ u32 hash_shift;
+ u32 order;
+ struct flow_cache_percpu * percpu;
+ struct notifier_block hotcpu_notifier;
+ int low_watermark;
+ int high_watermark;
+ struct timer_list rnd_timer;
+ struct kmem_cache * flow_cachep;
+};
+
+struct flow_cache_entry {
+ struct flow_cache_entry *next;
+ struct flowi key;
+ u16 family;
+ u8 dir;
+};
+
+extern struct flow_cache_entry *flow_cache_lookup(
+ struct flow_cache *cache, struct flowi *key,
+ u16 family, u8 dir);
+extern void flow_cache_entry_put(struct flow_cache_entry *fce);
+
+void flow_cache_flush(struct flow_cache *fc,
+ void (*flush)(struct flow_cache *fc, struct flow_cache_entry *fce));
+extern int flow_cache_init(struct flow_cache *cache, size_t entry_size);
+extern void flow_cache_fini(struct flow_cache *cache);
static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
{
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 1ba9127..4bb72c4 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -41,6 +41,10 @@ struct netns_xfrm {
struct xfrm_policy_hash policy_bydst[XFRM_POLICY_MAX * 2];
unsigned int policy_count[XFRM_POLICY_MAX * 2];
struct work_struct policy_hash_work;
+ atomic_t policy_genid;
+ struct hlist_head policy_gc_list;
+ struct work_struct policy_gc_work;
+ struct flow_cache flow_cache;
struct sock *nlsk;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 223e90a..5cd4e29 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -487,6 +487,7 @@ struct xfrm_policy
struct xfrm_lifetime_cfg lft;
struct xfrm_lifetime_cur curlft;
struct dst_entry *bundles;
+ atomic_t bundles_genid;
struct xfrm_policy_walk_entry walk;
u8 type;
u8 action;
diff --git a/net/core/flow.c b/net/core/flow.c
index 5b27992..e3782c2 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -25,114 +25,85 @@
#include <asm/atomic.h>
#include <linux/security.h>
-struct flow_cache_entry {
- struct flow_cache_entry *next;
- u16 family;
- u8 dir;
- u32 genid;
- struct flowi key;
- void *object;
- atomic_t *object_ref;
-};
-
-atomic_t flow_cache_genid = ATOMIC_INIT(0);
-
-static u32 flow_hash_shift;
-#define flow_hash_size (1 << flow_hash_shift)
-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
-
-#define flow_table(cpu) (per_cpu(flow_tables, cpu))
-
-static struct kmem_cache *flow_cachep __read_mostly;
-static int flow_lwm, flow_hwm;
-
-struct flow_percpu_info {
- int hash_rnd_recalc;
- u32 hash_rnd;
- int count;
+struct flow_cache_percpu {
+ struct flow_cache_entry ** hash_table;
+ int hash_count;
+ u32 hash_rnd;
+ int hash_rnd_recalc;
+ struct tasklet_struct flush_tasklet;
};
-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
-
-#define flow_hash_rnd_recalc(cpu) \
- (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
-#define flow_hash_rnd(cpu) \
- (per_cpu(flow_hash_info, cpu).hash_rnd)
-#define flow_count(cpu) \
- (per_cpu(flow_hash_info, cpu).count)
-
-static struct timer_list flow_hash_rnd_timer;
-
-#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
struct flow_flush_info {
- atomic_t cpuleft;
- struct completion completion;
+ void (*flush)(struct flow_cache *fc, struct flow_cache_entry *fce);
+ struct flow_cache * cache;
+ atomic_t cpuleft;
+ struct completion completion;
};
-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
-#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
+#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
+#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
static void flow_cache_new_hashrnd(unsigned long arg)
{
+ struct flow_cache *fc = (struct flow_cache *) arg;
int i;
for_each_possible_cpu(i)
- flow_hash_rnd_recalc(i) = 1;
+ per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
- flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
- add_timer(&flow_hash_rnd_timer);
+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
+ add_timer(&fc->rnd_timer);
}
-static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
-{
- if (fle->object)
- atomic_dec(fle->object_ref);
- kmem_cache_free(flow_cachep, fle);
- flow_count(cpu)--;
-}
-
-static void __flow_cache_shrink(int cpu, int shrink_to)
+static void __flow_cache_shrink(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp,
+ int shrink_to)
{
struct flow_cache_entry *fle, **flp;
int i;
- for (i = 0; i < flow_hash_size; i++) {
+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
int k = 0;
- flp = &flow_table(cpu)[i];
+ flp = &fcp->hash_table[i];
while ((fle = *flp) != NULL && k < shrink_to) {
k++;
flp = &fle->next;
}
while ((fle = *flp) != NULL) {
*flp = fle->next;
- flow_entry_kill(cpu, fle);
+
+ kmem_cache_free(fc->flow_cachep, fle);
+ fcp->hash_count--;
}
}
}
-static void flow_cache_shrink(int cpu)
+static void flow_cache_shrink(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
{
- int shrink_to = flow_lwm / flow_hash_size;
+ int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
- __flow_cache_shrink(cpu, shrink_to);
+ __flow_cache_shrink(fc, fcp, shrink_to);
}
-static void flow_new_hash_rnd(int cpu)
+static void flow_new_hash_rnd(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
{
- get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32));
- flow_hash_rnd_recalc(cpu) = 0;
-
- __flow_cache_shrink(cpu, 0);
+ get_random_bytes(&fcp->hash_rnd, sizeof(u32));
+ fcp->hash_rnd_recalc = 0;
+ __flow_cache_shrink(fc, fcp, 0);
}
-static u32 flow_hash_code(struct flowi *key, int cpu)
+static u32 flow_hash_code(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp,
+ struct flowi *key)
{
u32 *k = (u32 *) key;
- return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) &
- (flow_hash_size - 1));
+ return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
+ & (flow_cache_hash_size(fc) - 1));
}
#if (BITS_PER_LONG == 64)
@@ -165,128 +136,100 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
return 0;
}
-void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
- flow_resolve_t resolver)
+struct flow_cache_entry *flow_cache_lookup(struct flow_cache *fc,
+ struct flowi *key,
+ u16 family, u8 dir)
{
struct flow_cache_entry *fle, **head;
+ struct flow_cache_percpu *fcp;
unsigned int hash;
- int cpu;
local_bh_disable();
- cpu = smp_processor_id();
+ fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
fle = NULL;
/* Packet really early in init? Making flow_cache_init a
* pre-smp initcall would solve this. --RR */
- if (!flow_table(cpu))
+ if (!fcp->hash_table)
goto nocache;
- if (flow_hash_rnd_recalc(cpu))
- flow_new_hash_rnd(cpu);
- hash = flow_hash_code(key, cpu);
+ if (fcp->hash_rnd_recalc)
+ flow_new_hash_rnd(fc, fcp);
+
+ hash = flow_hash_code(fc, fcp, key);
- head = &flow_table(cpu)[hash];
+ head = &fcp->hash_table[hash];
for (fle = *head; fle; fle = fle->next) {
if (fle->family == family &&
fle->dir == dir &&
flow_key_compare(key, &fle->key) == 0) {
- if (fle->genid == atomic_read(&flow_cache_genid)) {
- void *ret = fle->object;
-
- if (ret)
- atomic_inc(fle->object_ref);
- local_bh_enable();
-
- return ret;
- }
- break;
- }
- }
-
- if (!fle) {
- if (flow_count(cpu) > flow_hwm)
- flow_cache_shrink(cpu);
-
- fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
- if (fle) {
- fle->next = *head;
- *head = fle;
- fle->family = family;
- fle->dir = dir;
- memcpy(&fle->key, key, sizeof(*key));
- fle->object = NULL;
- flow_count(cpu)++;
+ return fle;
}
}
-nocache:
- {
- int err;
- void *obj;
- atomic_t *obj_ref;
-
- err = resolver(net, key, family, dir, &obj, &obj_ref);
+ if (fcp->hash_count > fc->high_watermark)
+ flow_cache_shrink(fc, fcp);
- if (fle && !err) {
- fle->genid = atomic_read(&flow_cache_genid);
+ fle = kmem_cache_zalloc(fc->flow_cachep, GFP_ATOMIC);
+ if (!fle)
+ goto nocache;
- if (fle->object)
- atomic_dec(fle->object_ref);
+ fle->next = *head;
+ *head = fle;
+ fle->family = family;
+ fle->dir = dir;
+ memcpy(&fle->key, key, sizeof(*key));
+ fcp->hash_count++;
+ return fle;
- fle->object = obj;
- fle->object_ref = obj_ref;
- if (obj)
- atomic_inc(fle->object_ref);
- }
- local_bh_enable();
+nocache:
+ local_bh_enable();
+ return NULL;
+}
- if (err)
- obj = ERR_PTR(err);
- return obj;
- }
+void flow_cache_entry_put(struct flow_cache_entry *fce)
+{
+ local_bh_enable();
}
static void flow_cache_flush_tasklet(unsigned long data)
{
- struct flow_flush_info *info = (void *)data;
+ struct flow_flush_info *info = (void *) data;
+ struct flow_cache *fc = (void *) info->cache;
+ struct flow_cache_percpu *fcp;
int i;
- int cpu;
- cpu = smp_processor_id();
- for (i = 0; i < flow_hash_size; i++) {
- struct flow_cache_entry *fle;
+ if (info->flush == NULL)
+ goto done;
- fle = flow_table(cpu)[i];
- for (; fle; fle = fle->next) {
- unsigned genid = atomic_read(&flow_cache_genid);
-
- if (!fle->object || fle->genid == genid)
- continue;
+ fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
+ for (i = 0; i < flow_cache_hash_size(fc); i++) {
+ struct flow_cache_entry *fle;
- fle->object = NULL;
- atomic_dec(fle->object_ref);
- }
+ fle = fcp->hash_table[i];
+ for (; fle; fle = fle->next)
+ info->flush(fc, fle);
}
+done:
if (atomic_dec_and_test(&info->cpuleft))
complete(&info->completion);
}
-static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
static void flow_cache_flush_per_cpu(void *data)
{
struct flow_flush_info *info = data;
- int cpu;
struct tasklet_struct *tasklet;
+ int cpu;
cpu = smp_processor_id();
-
- tasklet = flow_flush_tasklet(cpu);
- tasklet->data = (unsigned long)info;
+ tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
+ tasklet->data = (unsigned long) data;
tasklet_schedule(tasklet);
}
-void flow_cache_flush(void)
+void flow_cache_flush(struct flow_cache *fc,
+ void (*flush)(struct flow_cache *fc, struct flow_cache_entry *fce))
{
struct flow_flush_info info;
static DEFINE_MUTEX(flow_flush_sem);
@@ -294,6 +237,8 @@ void flow_cache_flush(void)
/* Don't want cpus going down or up during this. */
get_online_cpus();
mutex_lock(&flow_flush_sem);
+ info.cache = fc;
+ info.flush = flush;
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion);
@@ -307,62 +252,99 @@ void flow_cache_flush(void)
put_online_cpus();
}
-static void __init flow_cache_cpu_prepare(int cpu)
+static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
+ struct flow_cache_percpu *fcp)
+{
+ fcp->hash_table = (struct flow_cache_entry **)
+ __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
+ fcp->hash_rnd_recalc = 1;
+ fcp->hash_count = 0;
+
+ tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
+}
+
+static int __cpuinit flow_cache_cpu(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
+ int cpu = (unsigned long) hcpu;
+ struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ flow_cache_cpu_prepare(fc, fcp);
+ if (!fcp->hash_table)
+ return NOTIFY_BAD;
+ break;
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ if (fcp->hash_table) {
+ __flow_cache_shrink(fc, fcp, 0);
+ free_pages((unsigned long) fcp->hash_table, fc->order);
+ fcp->hash_table = NULL;
+ }
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+int flow_cache_init(struct flow_cache *fc, size_t entry_size)
{
- struct tasklet_struct *tasklet;
unsigned long order;
+ int i, r;
+
+ BUG_ON(entry_size < sizeof(struct flow_cache_entry));
+ fc->flow_cachep = kmem_cache_create("flow_cache",
+ entry_size,
+ 0, SLAB_PANIC,
+ NULL);
+ fc->hash_shift = 10;
+ fc->low_watermark = 2 * flow_cache_hash_size(fc);
+ fc->high_watermark = 4 * flow_cache_hash_size(fc);
+ fc->percpu = alloc_percpu(struct flow_cache_percpu);
for (order = 0;
(PAGE_SIZE << order) <
- (sizeof(struct flow_cache_entry *)*flow_hash_size);
+ (sizeof(struct flow_cache_entry *) * flow_cache_hash_size(fc));
order++)
/* NOTHING */;
+ fc->order = order;
- flow_table(cpu) = (struct flow_cache_entry **)
- __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
- if (!flow_table(cpu))
- panic("NET: failed to allocate flow cache order %lu\n", order);
+ setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, (unsigned long) fc);
+ fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
+ add_timer(&fc->rnd_timer);
- flow_hash_rnd_recalc(cpu) = 1;
- flow_count(cpu) = 0;
+ for_each_online_cpu(i) {
+ r = flow_cache_cpu(&fc->hotcpu_notifier,
+ CPU_UP_PREPARE, (void*) i);
+ if (r != NOTIFY_OK)
+ panic("NET: failed to allocate flow cache order %lu\n", order);
+ }
- tasklet = flow_flush_tasklet(cpu);
- tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
-}
+ fc->hotcpu_notifier = (struct notifier_block){
+ .notifier_call = flow_cache_cpu,
+ };
+ register_hotcpu_notifier(&fc->hotcpu_notifier);
-static int flow_cache_cpu(struct notifier_block *nfb,
- unsigned long action,
- void *hcpu)
-{
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
- __flow_cache_shrink((unsigned long)hcpu, 0);
- return NOTIFY_OK;
+ return 0;
}
-static int __init flow_cache_init(void)
+void flow_cache_fini(struct flow_cache *fc)
{
int i;
- flow_cachep = kmem_cache_create("flow_cache",
- sizeof(struct flow_cache_entry),
- 0, SLAB_PANIC,
- NULL);
- flow_hash_shift = 10;
- flow_lwm = 2 * flow_hash_size;