This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / arch / ia64 / kernel / smp.c
index f75b517..14134f2 100644 (file)
@@ -29,9 +29,9 @@
 #include <linux/cache.h>
 #include <linux/delay.h>
 #include <linux/efi.h>
-#include <linux/bitops.h>
 
 #include <asm/atomic.h>
+#include <asm/bitops.h>
 #include <asm/current.h>
 #include <asm/delay.h>
 #include <asm/machvec.h>
@@ -62,7 +62,7 @@ struct call_data_struct {
        atomic_t finished;
 };
 
-static struct call_data_struct * call_data;
+static volatile struct call_data_struct *call_data;
 
 #define IPI_CALL_FUNC          0
 #define IPI_CPU_STOP           1
@@ -290,11 +290,11 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
 
        /* Wait for response */
        while (atomic_read(&data.started) != cpus)
-               cpu_relax();
+               barrier();
 
        if (wait)
                while (atomic_read(&data.finished) != cpus)
-                       cpu_relax();
+                       barrier();
        call_data = NULL;
 
        spin_unlock_bh(&call_lock);
@@ -313,10 +313,7 @@ EXPORT_SYMBOL(smp_call_function_single);
  *  <func>     The function to run. This must be fast and non-blocking.
  *  <info>     An arbitrary pointer to pass to the function.
  *  <nonatomic>        currently unused.
- *  <wait>     If 1, wait (atomically) until function has complete on other
- *             CPUs. If 0, wait for the IPI to be received by other CPUs, but
- *             do not wait for the completion of the IPI on each CPU.  If -1,
- *             do not wait for other CPUs to receive IPI.
+ *  <wait>     If true, wait (atomically) until function has completed on other CPUs.
  *  [RETURNS]   0 on success, else a negative status code.
  *
  * Does not return until remote CPUs are nearly ready to execute <func> or are or have
@@ -328,51 +325,36 @@ EXPORT_SYMBOL(smp_call_function_single);
 int
 smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait)
 {
-       static struct call_data_struct dumpdata;
-       struct call_data_struct normaldata;
-       struct call_data_struct *data;
+       struct call_data_struct data;
        int cpus = num_online_cpus()-1;
 
        if (!cpus)
                return 0;
 
        /* Can deadlock when called with interrupts disabled */
-       /* Only if we are waiting for other CPU to ack */
-       WARN_ON(irqs_disabled() && wait >= 0);
+       WARN_ON(irqs_disabled());
+
+       data.func = func;
+       data.info = info;
+       atomic_set(&data.started, 0);
+       data.wait = wait;
+       if (wait)
+               atomic_set(&data.finished, 0);
 
        spin_lock(&call_lock);
-       if (wait == -1) {
-               /* if another cpu beat us, they win! */
-               if (dumpdata.func) {
-                       spin_unlock(&call_lock);
-                       return 0;
-               }
-               data = &dumpdata;
-       } else
-               data = &normaldata;
-
-       data->func = func;
-       data->info = info;
-       atomic_set(&data->started, 0);
-       data->wait = wait > 0 ? wait : 0;
-       if (wait > 0)
-               atomic_set(&data->finished, 0);
-
-       call_data = data;
+
+       call_data = &data;
        mb();   /* ensure store to call_data precedes setting of IPI_CALL_FUNC */
        send_IPI_allbutself(IPI_CALL_FUNC);
 
        /* Wait for response */
-       if (wait >= 0)
-               while (atomic_read(&data->started) != cpus)
-                       cpu_relax();
-
-       if (wait > 0)
-               while (atomic_read(&data->finished) != cpus)
-                       cpu_relax();
+       while (atomic_read(&data.started) != cpus)
+               barrier();
 
-       if (wait >= 0)
-               call_data = NULL;
+       if (wait)
+               while (atomic_read(&data.finished) != cpus)
+                       barrier();
+       call_data = NULL;
 
        spin_unlock(&call_lock);
        return 0;