git://git.onelab.eu
/
linux-2.6.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git]
/
net
/
core
/
flow.c
diff --git
a/net/core/flow.c
b/net/core/flow.c
index
c4f2538
..
5d25697
100644
(file)
--- a/
net/core/flow.c
+++ b/
net/core/flow.c
@@
-20,6
+20,7
@@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
+#include <linux/mutex.h>
#include <net/flow.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <net/flow.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
@@
-31,7
+32,6
@@
struct flow_cache_entry {
u8 dir;
struct flowi key;
u32 genid;
u8 dir;
struct flowi key;
u32 genid;
- u32 sk_sid;
void *object;
atomic_t *object_ref;
};
void *object;
atomic_t *object_ref;
};
@@
-44,7
+44,7
@@
static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
#define flow_table(cpu) (per_cpu(flow_tables, cpu))
#define flow_table(cpu) (per_cpu(flow_tables, cpu))
-static
kmem_cache_t
*flow_cachep __read_mostly;
+static
struct kmem_cache
*flow_cachep __read_mostly;
static int flow_lwm, flow_hwm;
static int flow_lwm, flow_hwm;
@@
-78,13
+78,21
@@
static void flow_cache_new_hashrnd(unsigned long arg)
{
int i;
{
int i;
- for_each_cpu(i)
+ for_each_
possible_
cpu(i)
flow_hash_rnd_recalc(i) = 1;
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer);
}
flow_hash_rnd_recalc(i) = 1;
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer);
}
+static void flow_entry_kill(int cpu, struct flow_cache_entry *fle)
+{
+ if (fle->object)
+ atomic_dec(fle->object_ref);
+ kmem_cache_free(flow_cachep, fle);
+ flow_count(cpu)--;
+}
+
static void __flow_cache_shrink(int cpu, int shrink_to)
{
struct flow_cache_entry *fle, **flp;
static void __flow_cache_shrink(int cpu, int shrink_to)
{
struct flow_cache_entry *fle, **flp;
@@
-100,10
+108,7
@@
static void __flow_cache_shrink(int cpu, int shrink_to)
}
while ((fle = *flp) != NULL) {
*flp = fle->next;
}
while ((fle = *flp) != NULL) {
*flp = fle->next;
- if (fle->object)
- atomic_dec(fle->object_ref);
- kmem_cache_free(flow_cachep, fle);
- flow_count(cpu)--;
+ flow_entry_kill(cpu, fle);
}
}
}
}
}
}
@@
-164,7
+169,7
@@
static int flow_key_compare(struct flowi *key1, struct flowi *key2)
return 0;
}
return 0;
}
-void *flow_cache_lookup(struct flowi *key, u
32 sk_sid, u
16 family, u8 dir,
+void *flow_cache_lookup(struct flowi *key, u16 family, u8 dir,
flow_resolve_t resolver)
{
struct flow_cache_entry *fle, **head;
flow_resolve_t resolver)
{
struct flow_cache_entry *fle, **head;
@@
-188,7
+193,6
@@
void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir,
for (fle = *head; fle; fle = fle->next) {
if (fle->family == family &&
fle->dir == dir &&
for (fle = *head; fle; fle = fle->next) {
if (fle->family == family &&
fle->dir == dir &&
- fle->sk_sid == sk_sid &&
flow_key_compare(key, &fle->key) == 0) {
if (fle->genid == atomic_read(&flow_cache_genid)) {
void *ret = fle->object;
flow_key_compare(key, &fle->key) == 0) {
if (fle->genid == atomic_read(&flow_cache_genid)) {
void *ret = fle->object;
@@
-207,13
+211,12
@@
void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir,
if (flow_count(cpu) > flow_hwm)
flow_cache_shrink(cpu);
if (flow_count(cpu) > flow_hwm)
flow_cache_shrink(cpu);
- fle = kmem_cache_alloc(flow_cachep,
SLAB
_ATOMIC);
+ fle = kmem_cache_alloc(flow_cachep,
GFP
_ATOMIC);
if (fle) {
fle->next = *head;
*head = fle;
fle->family = family;
fle->dir = dir;
if (fle) {
fle->next = *head;
*head = fle;
fle->family = family;
fle->dir = dir;
- fle->sk_sid = sk_sid;
memcpy(&fle->key, key, sizeof(*key));
fle->object = NULL;
flow_count(cpu)++;
memcpy(&fle->key, key, sizeof(*key));
fle->object = NULL;
flow_count(cpu)++;
@@
-222,12
+225,13
@@
void *flow_cache_lookup(struct flowi *key, u32 sk_sid, u16 family, u8 dir,
nocache:
{
nocache:
{
+ int err;
void *obj;
atomic_t *obj_ref;
void *obj;
atomic_t *obj_ref;
-
resolver(key, sk_sid
, family, dir, &obj, &obj_ref);
+
err = resolver(key
, family, dir, &obj, &obj_ref);
- if (fle) {
+ if (fle
&& !err
) {
fle->genid = atomic_read(&flow_cache_genid);
if (fle->object)
fle->genid = atomic_read(&flow_cache_genid);
if (fle->object)
@@
-240,6
+244,8
@@
nocache:
}
local_bh_enable();
}
local_bh_enable();
+ if (err)
+ obj = ERR_PTR(err);
return obj;
}
}
return obj;
}
}
@@
-287,11
+293,11
@@
static void flow_cache_flush_per_cpu(void *data)
void flow_cache_flush(void)
{
struct flow_flush_info info;
void flow_cache_flush(void)
{
struct flow_flush_info info;
- static DE
CLAR
E_MUTEX(flow_flush_sem);
+ static DE
FIN
E_MUTEX(flow_flush_sem);
/* Don't want cpus going down or up during this. */
lock_cpu_hotplug();
/* Don't want cpus going down or up during this. */
lock_cpu_hotplug();
-
down
(&flow_flush_sem);
+
mutex_lock
(&flow_flush_sem);
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion);
atomic_set(&info.cpuleft, num_online_cpus());
init_completion(&info.completion);
@@
-301,7
+307,7
@@
void flow_cache_flush(void)
local_bh_enable();
wait_for_completion(&info.completion);
local_bh_enable();
wait_for_completion(&info.completion);
-
up
(&flow_flush_sem);
+
mutex_unlock
(&flow_flush_sem);
unlock_cpu_hotplug();
}
unlock_cpu_hotplug();
}
@@
-317,12
+323,10
@@
static void __devinit flow_cache_cpu_prepare(int cpu)
/* NOTHING */;
flow_table(cpu) = (struct flow_cache_entry **)
/* NOTHING */;
flow_table(cpu) = (struct flow_cache_entry **)
- __get_free_pages(GFP_KERNEL, order);
+ __get_free_pages(GFP_KERNEL
|__GFP_ZERO
, order);
if (!flow_table(cpu))
panic("NET: failed to allocate flow cache order %lu\n", order);
if (!flow_table(cpu))
panic("NET: failed to allocate flow cache order %lu\n", order);
- memset(flow_table(cpu), 0, PAGE_SIZE << order);
-
flow_hash_rnd_recalc(cpu) = 1;
flow_count(cpu) = 0;
flow_hash_rnd_recalc(cpu) = 1;
flow_count(cpu) = 0;
@@
-330,7
+334,6
@@
static void __devinit flow_cache_cpu_prepare(int cpu)
tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
}
tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
}
-#ifdef CONFIG_HOTPLUG_CPU
static int flow_cache_cpu(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
static int flow_cache_cpu(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
@@
-339,7
+342,6
@@
static int flow_cache_cpu(struct notifier_block *nfb,
__flow_cache_shrink((unsigned long)hcpu, 0);
return NOTIFY_OK;
}
__flow_cache_shrink((unsigned long)hcpu, 0);
return NOTIFY_OK;
}
-#endif /* CONFIG_HOTPLUG_CPU */
static int __init flow_cache_init(void)
{
static int __init flow_cache_init(void)
{
@@
-347,12
+349,8
@@
static int __init flow_cache_init(void)
flow_cachep = kmem_cache_create("flow_cache",
sizeof(struct flow_cache_entry),
flow_cachep = kmem_cache_create("flow_cache",
sizeof(struct flow_cache_entry),
- 0, SLAB_HWCACHE_ALIGN,
+ 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
,
NULL, NULL);
NULL, NULL);
-
- if (!flow_cachep)
- panic("NET: failed to allocate flow cache slab\n");
-
flow_hash_shift = 10;
flow_lwm = 2 * flow_hash_size;
flow_hwm = 4 * flow_hash_size;
flow_hash_shift = 10;
flow_lwm = 2 * flow_hash_size;
flow_hwm = 4 * flow_hash_size;
@@
-362,7
+360,7
@@
static int __init flow_cache_init(void)
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer);
flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
add_timer(&flow_hash_rnd_timer);
- for_each_cpu(i)
+ for_each_
possible_
cpu(i)
flow_cache_cpu_prepare(i);
hotcpu_notifier(flow_cache_cpu, 0);
flow_cache_cpu_prepare(i);
hotcpu_notifier(flow_cache_cpu, 0);