Fix problem introduced by kernel threads when building for 2.4.20.
authorJustin Pettit <jpettit@nicira.com>
Mon, 5 May 2008 04:07:24 +0000 (21:07 -0700)
committerJustin Pettit <jpettit@nicira.com>
Mon, 5 May 2008 04:07:24 +0000 (21:07 -0700)
datapath/linux-2.4/Modules.mk
datapath/linux-2.4/compat-2.4/include/linux/sched.h [new file with mode: 0644]
datapath/linux-2.4/compat-2.4/sched.c [new file with mode: 0644]

index 43a5ef3..7681d76 100644 (file)
@@ -9,6 +9,7 @@ compat24_sources = \
        linux-2.4/compat-2.4/netlink.c \
        linux-2.4/compat-2.4/random32.c \
        linux-2.4/compat-2.4/rcupdate.c \
+       linux-2.4/compat-2.4/sched.c \
        linux-2.4/compat-2.4/string.c
 
 compat24_headers = \
@@ -41,6 +42,7 @@ compat24_headers = \
        linux-2.4/compat-2.4/include/linux/netlink.h \
        linux-2.4/compat-2.4/include/linux/random.h \
        linux-2.4/compat-2.4/include/linux/rcupdate.h \
+       linux-2.4/compat-2.4/include/linux/sched.h \
        linux-2.4/compat-2.4/include/linux/skbuff.h \
        linux-2.4/compat-2.4/include/linux/slab.h \
        linux-2.4/compat-2.4/include/linux/sockios.h \
diff --git a/datapath/linux-2.4/compat-2.4/include/linux/sched.h b/datapath/linux-2.4/compat-2.4/include/linux/sched.h
new file mode 100644 (file)
index 0000000..8475b81
--- /dev/null
@@ -0,0 +1,17 @@
+#ifndef __LINUX_SCHED_WRAPPER_H
+#define __LINUX_SCHED_WRAPPER_H 1
+
+#include_next <linux/sched.h>
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21)
+
+#if CONFIG_SMP
+extern void set_cpus_allowed(struct task_struct *p, unsigned long new_mask);
+#else
+# define set_cpus_allowed(p, new_mask) do { } while (0)
+#endif
+
+#endif /* linux kernel < 2.4.21 */
+
+#endif
diff --git a/datapath/linux-2.4/compat-2.4/sched.c b/datapath/linux-2.4/compat-2.4/sched.c
new file mode 100644 (file)
index 0000000..60733b9
--- /dev/null
@@ -0,0 +1,44 @@
+#include "linux/sched.h"
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,21)
+
+#ifdef CONFIG_SMP
+/**
+ * set_cpus_allowed() - change a given task's processor affinity
+ * @p: task to bind
+ * @new_mask: bitmask of allowed processors
+ *
+ * Upon return, the task is running on a legal processor.  Note the caller
+ * must have a valid reference to the task: it must not exit() prematurely.
+ * This call can sleep; do not hold locks on call.
+ */
+void set_cpus_allowed(struct task_struct *p, unsigned long new_mask)
+{
+       new_mask &= cpu_online_map;
+       BUG_ON(!new_mask);
+
+       p->cpus_allowed = new_mask;
+
+       /*
+        * If the task is on a no-longer-allowed processor, we need to move
+        * it.  If the task is not current, then set need_resched and send
+        * its processor an IPI to reschedule.
+        */
+       if (!(p->cpus_runnable & p->cpus_allowed)) {
+               if (p != current) {
+                       p->need_resched = 1;
+                       smp_send_reschedule(p->processor);
+               }
+               /*
+                * Wait until we are on a legal processor.  If the task is
+                * current, then we should be on a legal processor the next
+                * time we reschedule.  Otherwise, we need to wait for the IPI.
+                */
+               while (!(p->cpus_runnable & p->cpus_allowed))
+                       schedule();
+       }
+}
+#endif
+
+#endif /* kernel < 2.4.21 */