2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2010 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/hardirq.h>
12 #include <linux/kernel.h>
13 #include <linux/percpu.h>
14 #include <linux/sched.h>
16 #include "loop_counter.h"
18 void loop_suppress(struct datapath *dp, struct sw_flow_actions *actions)
21 pr_warn("%s: flow looped %d times, dropping\n",
22 dp_name(dp), MAX_LOOPS);
23 actions->actions_len = 0;
26 #ifndef CONFIG_PREEMPT_RT
28 /* We use a separate counter for each CPU for both interrupt and non-interrupt
29 * context in order to keep the limit deterministic for a given packet.
31 struct percpu_loop_counters {
32 struct loop_counter counters[2];
35 static DEFINE_PER_CPU(struct percpu_loop_counters, loop_counters);
37 struct loop_counter *loop_get_counter(void)
39 return &get_cpu_var(loop_counters).counters[!!in_interrupt()];
42 void loop_put_counter(void)
44 put_cpu_var(loop_counters);
47 #else /* !CONFIG_PREEMPT_RT */
49 struct loop_counter *loop_get_counter(void)
51 WARN_ON(in_interrupt());
53 /* Only two bits of the extra_flags field in struct task_struct are
54 * used and it's an unsigned int. We hijack the most significant bits
55 * to be our counter structure. On RT kernels softirqs always run in
56 * process context so we are guaranteed to have a valid task_struct.
59 #ifdef __LITTLE_ENDIAN
60 return (void *)(¤t->extra_flags + 1) -
61 sizeof(struct loop_counter);
63 return (struct loop_counter *)¤t->extra_flags;
65 #error "Please fix <asm/byteorder.h>."
69 void loop_put_counter(void) { }
71 #endif /* CONFIG_PREEMPT_RT */