#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/ptrace.h>
-#include <linux/mman.h>
#include <linux/random.h>
#include <asm/uaccess.h>
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
-int hlt_counter;
+static int hlt_counter;
unsigned long boot_option_idle_override = 0;
EXPORT_SYMBOL(boot_option_idle_override);
* Powermanagement idle function, if any..
*/
void (*pm_idle)(void);
+static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
void disable_hlt(void)
{
*/
void default_idle(void)
{
- if (!hlt_counter && current_cpu_data.hlt_works_ok) {
+ if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
local_irq_disable();
if (!need_resched())
safe_halt();
while (1) {
while (!need_resched()) {
void (*idle)(void);
- /*
- * Mark this as an RCU critical section so that
- * synchronize_kernel() in the unload path waits
- * for our completion.
- */
- rcu_read_lock();
+
+ if (__get_cpu_var(cpu_idle_state))
+ __get_cpu_var(cpu_idle_state) = 0;
+
+ rmb();
idle = pm_idle;
if (!idle)
idle = default_idle;
- irq_stat[smp_processor_id()].idle_timestamp = jiffies;
+ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
idle();
- rcu_read_unlock();
}
schedule();
}
}
+void cpu_idle_wait(void)
+{
+ unsigned int cpu, this_cpu = get_cpu();
+ cpumask_t map;
+
+ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
+ put_cpu();
+
+ cpus_clear(map);
+ for_each_online_cpu(cpu) {
+ per_cpu(cpu_idle_state, cpu) = 1;
+ cpu_set(cpu, map);
+ }
+
+ __get_cpu_var(cpu_idle_state) = 0;
+
+ wmb();
+ do {
+ ssleep(1);
+ for_each_online_cpu(cpu) {
+ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
+ cpu_clear(cpu, map);
+ }
+ cpus_and(map, map, cpu_online_map);
+ } while (!cpus_empty(map));
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
* Forget coprocessor state..
*/
clear_fpu(tsk);
- tsk->used_math = 0;
+ clear_used_math();
}
void release_thread(struct task_struct *dead_task)
int err;
childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+ /*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+ * This is necessary to guarantee that the entire "struct pt_regs"
+ * is accessable even if the CPU haven't stored the SS/ESP registers
+ * on the stack (interrupt gate does not save these registers
+ * when switching to the same priv ring).
+ * Therefore beware: accessing the xss/esp fields of the
+ * "struct pt_regs" is possible, but they may contain the
+ * completely wrong values.
+ */
+ childregs = (struct pt_regs *) ((unsigned long) childregs - 8);
*childregs = *regs;
childregs->eax = 0;
childregs->esp = esp;
*/
tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
}
-/*
- * This special macro can be used to load a debugging register
- */
-#define loaddebug(thread,register) \
- __asm__("movl %0,%%db" #register \
- : /* no output */ \
- :"r" (thread->debugreg[register]))
/*
* switch_to(x,yn) should switch tasks from x to y.
* Save away %fs and %gs. No need to save %es and %ds, as
* those are always kernel segments while inside the kernel.
*/
- asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
- asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
+ asm volatile("mov %%fs,%0":"=m" (prev->fs));
+ asm volatile("mov %%gs,%0":"=m" (prev->gs));
/*
* Restore %fs and %gs if needed.
return 0;
}
-
unsigned long arch_align_stack(unsigned long sp)
{
- if (current->flags & PF_RELOCEXEC)
- sp -= ((get_random_int() % 65536) << 4);
+ if (randomize_va_space)
+ sp -= get_random_int() % 8192;
return sp & ~0xf;
}
-
void arch_add_exec_range(struct mm_struct *mm, unsigned long limit)
{
if (limit > mm->context.exec_limit) {
mm->context.exec_limit = limit;
set_user_cs(&mm->context.user_cs, limit);
- if (mm == current->mm)
+ if (mm == current->mm) {
+ preempt_disable();
load_user_cs_desc(smp_processor_id(), mm);
+ preempt_enable();
+ }
}
}
void arch_remove_exec_range(struct mm_struct *mm, unsigned long old_end)
{
struct vm_area_struct *vma;
- unsigned long limit = 0;
+ unsigned long limit = PAGE_SIZE;
if (old_end == mm->context.exec_limit) {
for (vma = mm->mmap; vma; vma = vma->vm_next)
mm->context.exec_limit = limit;
set_user_cs(&mm->context.user_cs, limit);
- if (mm == current->mm)
+ if (mm == current->mm) {
+ preempt_disable();
load_user_cs_desc(smp_processor_id(), mm);
+ preempt_enable();
+ }
}
}