#include <linux/cpu.h>
#include <linux/vs_cvirt.h>
#include <linux/vserver/sched.h>
+#include <linux/syscalls.h>
+#include <linux/delay.h>
+#include <linux/diskdump.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
spin_unlock_irqrestore(&base->lock, flags);
}
-EXPORT_SYMBOL_GPL(add_timer_on);
/***
* mod_timer - modify a timer's timeout
goto repeat;
}
list_del(&timer->entry);
- smp_wmb(); /* the list del must have taken effect before timer->base
- * change is visible to other CPUs, or a concurrent mod_timer
- * would cause a race with list_add
- */
+ /* Need to make sure that anybody who sees a NULL base also sees the list ops */
+ smp_wmb();
timer->base = NULL;
spin_unlock_irqrestore(&base->lock, flags);
static inline void __run_timers(tvec_base_t *base)
{
struct timer_list *timer;
+ unsigned long flags;
- spin_lock_irq(&base->lock);
+ spin_lock_irqsave(&base->lock, flags);
while (time_after_eq(jiffies, base->timer_jiffies)) {
struct list_head work_list = LIST_HEAD_INIT(work_list);
struct list_head *head = &work_list;
list_del(&timer->entry);
set_running_timer(base, timer);
- smp_wmb(); /* the list del must have taken effect before timer->base
- * change is visible to other CPUs, or a concurrent mod_timer
- * would cause a race with list_add
- */
+ smp_wmb();
timer->base = NULL;
- spin_unlock_irq(&base->lock);
+ spin_unlock_irqrestore(&base->lock, flags);
fn(data);
spin_lock_irq(&base->lock);
goto repeat;
}
}
set_running_timer(base, NULL);
- spin_unlock_irq(&base->lock);
+ spin_unlock_irqrestore(&base->lock, flags);
}
#ifdef CONFIG_NO_IDLE_HZ
psecs = (p->utime += user);
psecs += (p->stime += system);
if (p->signal && !unlikely(p->state & (EXIT_DEAD|EXIT_ZOMBIE)) &&
- (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_cur)) {
+ psecs / HZ >= p->signal->rlim[RLIMIT_CPU].rlim_cur) {
/* Send SIGXCPU every second.. */
if (!(psecs % HZ))
send_sig(SIGXCPU, p, 1);
/* and SIGKILL when we go over max.. */
- if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_max)
+ if (psecs / HZ >= p->signal->rlim[RLIMIT_CPU].rlim_max)
send_sig(SIGKILL, p, 1);
}
}
void do_timer(struct pt_regs *regs)
{
jiffies_64++;
-#ifndef CONFIG_SMP
- /* SMP process accounting uses the local APIC timer */
-
- update_process_times(user_mode(regs));
-#endif
update_times();
}
struct timer_list timer;
unsigned long expire;
+ if (crashdump_mode()) {
+ diskdump_mdelay(timeout);
+ set_current_state(TASK_RUNNING);
+ return timeout;
+ }
+
switch (timeout)
{
case MAX_SCHEDULE_TIMEOUT:
return 0;
}
-static void __devinit init_timers_cpu(int cpu)
+static void /* __devinit */ init_timers_cpu(int cpu)
{
int j;
tvec_base_t *base;
base->timer_jiffies = jiffies;
}
+static tvec_base_t saved_tvec_base;
+
+void dump_clear_timers(void)
+{
+ tvec_base_t *base = &per_cpu(tvec_bases, smp_processor_id());
+
+ memcpy(&saved_tvec_base, base, sizeof(saved_tvec_base));
+ init_timers_cpu(smp_processor_id());
+}
+
+EXPORT_SYMBOL_GPL(dump_clear_timers);
+
+void dump_run_timers(void)
+{
+ tvec_base_t *base = &__get_cpu_var(tvec_bases);
+
+ __run_timers(base);
+}
+
+EXPORT_SYMBOL_GPL(dump_run_timers);
+
#ifdef CONFIG_HOTPLUG_CPU
static int migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
{
*/
void msleep(unsigned int msecs)
{
- unsigned long timeout = msecs_to_jiffies(msecs);
+ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+ if (unlikely(crashdump_mode())) {
+ while (msecs--) udelay(1000);
+ return;
+ }
while (timeout) {
set_current_state(TASK_UNINTERRUPTIBLE);
*/
unsigned long msleep_interruptible(unsigned int msecs)
{
- unsigned long timeout = msecs_to_jiffies(msecs);
+ unsigned long timeout = msecs_to_jiffies(msecs) + 1;
while (timeout && !signal_pending(current)) {
set_current_state(TASK_INTERRUPTIBLE);