/*
* per-CPU timer vector definitions:
*/
+
#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
#define TVN_SIZE (1 << TVN_BITS)
#define TVN_MASK (TVN_SIZE - 1)
#define TVR_MASK (TVR_SIZE - 1)
+struct timer_base_s {
+ spinlock_t lock;
+ struct timer_list *running_timer;
+};
+
typedef struct tvec_s {
struct list_head vec[TVN_SIZE];
} tvec_t;
} tvec_root_t;
struct tvec_t_base_s {
- spinlock_t lock;
- struct timer_list *running_timer;
+ struct timer_base_s t_base;
unsigned long timer_jiffies;
tvec_root_t tv1;
tvec_t tv2;
} ____cacheline_aligned_in_smp;
typedef struct tvec_t_base_s tvec_base_t;
-
-tvec_base_t boot_tvec_bases;
-EXPORT_SYMBOL(boot_tvec_bases);
-static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
+static DEFINE_PER_CPU(tvec_base_t, tvec_bases);
static inline void set_running_timer(tvec_base_t *base,
struct timer_list *timer)
{
#ifdef CONFIG_SMP
- base->running_timer = timer;
+ base->t_base.running_timer = timer;
#endif
}
list_add_tail(&timer->entry, vec);
}
+typedef struct timer_base_s timer_base_t;
+/*
+ * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases)
+ * at compile time, and we need timer->base to lock the timer.
+ */
+timer_base_t __init_timer_base
+ ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED };
+EXPORT_SYMBOL(__init_timer_base);
+
/***
* init_timer - initialize a timer.
* @timer: the timer to be initialized
void fastcall init_timer(struct timer_list *timer)
{
timer->entry.next = NULL;
- timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
+ timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base;
}
EXPORT_SYMBOL(init_timer);
}
/*
- * We are using hashed locking: holding per_cpu(tvec_bases).lock
+ * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock
* means that all timers which are tied to this base via timer->base are
* locked, and the base itself is locked too.
*
* possible to set timer->base = NULL and drop the lock: the timer remains
* locked.
*/
-static tvec_base_t *lock_timer_base(struct timer_list *timer,
+static timer_base_t *lock_timer_base(struct timer_list *timer,
unsigned long *flags)
{
- tvec_base_t *base;
+ timer_base_t *base;
for (;;) {
base = timer->base;
int __mod_timer(struct timer_list *timer, unsigned long expires)
{
- tvec_base_t *base, *new_base;
+ timer_base_t *base;
+ tvec_base_t *new_base;
unsigned long flags;
int ret = 0;
ret = 1;
}
- new_base = __get_cpu_var(tvec_bases);
+ new_base = &__get_cpu_var(tvec_bases);
- if (base != new_base) {
+ if (base != &new_base->t_base) {
/*
* We are trying to schedule the timer on the local CPU.
* However we can't change timer's base while it is running,
* handler yet has not finished. This also guarantees that
* the timer is serialized wrt itself.
*/
- if (likely(base->running_timer != timer)) {
+ if (unlikely(base->running_timer == timer)) {
+ /* The timer remains on a former base */
+ new_base = container_of(base, tvec_base_t, t_base);
+ } else {
/* See the comment in lock_timer_base() */
timer->base = NULL;
spin_unlock(&base->lock);
- base = new_base;
- spin_lock(&base->lock);
- timer->base = base;
+ spin_lock(&new_base->t_base.lock);
+ timer->base = &new_base->t_base;
}
}
timer->expires = expires;
- internal_add_timer(base, timer);
- spin_unlock_irqrestore(&base->lock, flags);
+ internal_add_timer(new_base, timer);
+ spin_unlock_irqrestore(&new_base->t_base.lock, flags);
return ret;
}
*/
void add_timer_on(struct timer_list *timer, int cpu)
{
- tvec_base_t *base = per_cpu(tvec_bases, cpu);
+ tvec_base_t *base = &per_cpu(tvec_bases, cpu);
unsigned long flags;
BUG_ON(timer_pending(timer) || !timer->function);
- spin_lock_irqsave(&base->lock, flags);
- timer->base = base;
+ spin_lock_irqsave(&base->t_base.lock, flags);
+ timer->base = &base->t_base;
internal_add_timer(base, timer);
- spin_unlock_irqrestore(&base->lock, flags);
+ spin_unlock_irqrestore(&base->t_base.lock, flags);
}
*/
int del_timer(struct timer_list *timer)
{
- tvec_base_t *base;
+ timer_base_t *base;
unsigned long flags;
int ret = 0;
*/
int try_to_del_timer_sync(struct timer_list *timer)
{
- tvec_base_t *base;
+ timer_base_t *base;
unsigned long flags;
int ret = -1;
struct timer_list *tmp;
tmp = list_entry(curr, struct timer_list, entry);
- BUG_ON(tmp->base != base);
+ BUG_ON(tmp->base != &base->t_base);
curr = curr->next;
internal_add_timer(base, tmp);
}
{
struct timer_list *timer;
- spin_lock_irq(&base->lock);
+ spin_lock_irq(&base->t_base.lock);
while (time_after_eq(jiffies, base->timer_jiffies)) {
struct list_head work_list = LIST_HEAD_INIT(work_list);
struct list_head *head = &work_list;
set_running_timer(base, timer);
detach_timer(timer, 1);
- spin_unlock_irq(&base->lock);
+ spin_unlock_irq(&base->t_base.lock);
{
int preempt_count = preempt_count();
fn(data);
BUG();
}
}
- spin_lock_irq(&base->lock);
+ spin_lock_irq(&base->t_base.lock);
}
}
set_running_timer(base, NULL);
- spin_unlock_irq(&base->lock);
+ spin_unlock_irq(&base->t_base.lock);
}
#ifdef CONFIG_NO_IDLE_HZ
}
hr_expires += jiffies;
- base = __get_cpu_var(tvec_bases);
- spin_lock(&base->lock);
+ base = &__get_cpu_var(tvec_bases);
+ spin_lock(&base->t_base.lock);
expires = base->timer_jiffies + (LONG_MAX >> 1);
list = NULL;
expires = nte->expires;
}
}
- spin_unlock(&base->lock);
-
- /*
- * It can happen that other CPUs service timer IRQs and increment
- * jiffies, but we have not yet got a local timer tick to process
- * the timer wheels. In that case, the expiry time can be before
- * jiffies, but since the high-resolution timer here is relative to
- * jiffies, the default expression when high-resolution timers are
- * not active,
- *
- * time_before(MAX_JIFFY_OFFSET + jiffies, expires)
- *
- * would falsely evaluate to true. If that is the case, just
- * return jiffies so that we can immediately fire the local timer
- */
- if (time_before(expires, jiffies))
- return jiffies;
+ spin_unlock(&base->t_base.lock);
if (time_before(hr_expires, expires))
return hr_expires;
/*
* Compute the frequency estimate and additional phase adjustment due
- * to frequency error for the next second.
+ * to frequency error for the next second. When the PPS signal is
+ * engaged, gnaw on the watchdog counter and update the frequency
+ * computed by the pll and the PPS signal.
*/
- ltemp = time_freq;
+ pps_valid++;
+ if (pps_valid == PPS_VALID) { /* PPS signal lost */
+ pps_jitter = MAXTIME;
+ pps_stabil = MAXFREQ;
+ time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
+ STA_PPSWANDER | STA_PPSERROR);
+ }
+ ltemp = time_freq + pps_freq;
time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE));
#if HZ == 100
*/
static unsigned long count_active_tasks(void)
{
- return nr_active() * FIXED_1;
+ return (nr_running() + nr_uninterruptible()) * FIXED_1;
}
/*
*/
static void run_timer_softirq(struct softirq_action *h)
{
- tvec_base_t *base = __get_cpu_var(tvec_bases);
+ tvec_base_t *base = &__get_cpu_var(tvec_bases);
hrtimer_run_queues();
if (time_after_eq(jiffies, base->timer_jiffies))
void run_local_timers(void)
{
raise_softirq(TIMER_SOFTIRQ);
- softlockup_tick();
}
/*
/* prevent loading jiffies before storing new jiffies_64 value. */
barrier();
update_times();
+ softlockup_tick(regs);
}
#ifdef __ARCH_WANT_SYS_ALARM
*/
asmlinkage unsigned long sys_alarm(unsigned int seconds)
{
- return alarm_setitimer(seconds);
+ struct itimerval it_new, it_old;
+ unsigned int oldalarm;
+
+ it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
+ it_new.it_value.tv_sec = seconds;
+ it_new.it_value.tv_usec = 0;
+ do_setitimer(ITIMER_REAL, &it_new, &it_old);
+ oldalarm = it_old.it_value.tv_sec;
+ /* ehhh.. We can't return 0 if we have an alarm pending.. */
+ /* And we'd better return too much than too little anyway */
+ if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000)
+ oldalarm++;
+ return oldalarm;
}
#endif
}
/*
- * Accessing ->group_leader->real_parent is not SMP-safe, it could
- * change from under us. However, rather than getting any lock
- * we can use an optimistic algorithm: get the parent
- * pid, and go back and check that the parent is still
- * the same. If it has changed (which is extremely unlikely
- * indeed), we just try again..
- *
- * NOTE! This depends on the fact that even if we _do_
- * get an old value of "parent", we can happily dereference
- * the pointer (it was and remains a dereferencable kernel pointer
- * no matter what): we just can't necessarily trust the result
- * until we know that the parent pointer is valid.
- *
- * NOTE2: ->group_leader never changes from under us.
+ * Accessing ->real_parent is not SMP-safe, it could
+ * change from under us. However, we can use a stale
+ * value of ->real_parent under rcu_read_lock(), see
+ * release_task()->call_rcu(delayed_put_task_struct).
*/
asmlinkage long sys_getppid(void)
{
int pid;
- struct task_struct *me = current;
- struct task_struct *parent;
- parent = me->group_leader->real_parent;
- for (;;) {
- pid = parent->tgid;
-#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
-{
- struct task_struct *old = parent;
+ rcu_read_lock();
+ pid = rcu_dereference(current->real_parent)->tgid;
+ rcu_read_unlock();
- /*
- * Make sure we read the pid before re-reading the
- * parent pointer:
- */
- smp_rmb();
- parent = me->group_leader->real_parent;
- if (old != parent)
- continue;
-}
-#endif
- break;
- }
return vx_map_pid(pid);
}
*ppid = sys_getppid();
return sys_getpid();
}
-
+
#else /* _alpha_ */
asmlinkage long sys_getuid(void)
return 0;
}
-static int __devinit init_timers_cpu(int cpu)
+static void __devinit init_timers_cpu(int cpu)
{
int j;
tvec_base_t *base;
- static char __devinitdata tvec_base_done[NR_CPUS];
- if (!tvec_base_done[cpu]) {
- static char boot_done;
-
- if (boot_done) {
- /*
- * The APs use this path later in boot
- */
- base = kmalloc_node(sizeof(*base), GFP_KERNEL,
- cpu_to_node(cpu));
- if (!base)
- return -ENOMEM;
- memset(base, 0, sizeof(*base));
- per_cpu(tvec_bases, cpu) = base;
- } else {
- /*
- * This is for the boot CPU - we use compile-time
- * static initialisation because per-cpu memory isn't
- * ready yet and because the memory allocators are not
- * initialised either.
- */
- boot_done = 1;
- base = &boot_tvec_bases;
- }
- tvec_base_done[cpu] = 1;
- } else {
- base = per_cpu(tvec_bases, cpu);
- }
-
- spin_lock_init(&base->lock);
+ base = &per_cpu(tvec_bases, cpu);
+ spin_lock_init(&base->t_base.lock);
for (j = 0; j < TVN_SIZE; j++) {
INIT_LIST_HEAD(base->tv5.vec + j);
INIT_LIST_HEAD(base->tv4.vec + j);
INIT_LIST_HEAD(base->tv1.vec + j);
base->timer_jiffies = jiffies;
- return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
while (!list_empty(head)) {
timer = list_entry(head->next, struct timer_list, entry);
detach_timer(timer, 0);
- timer->base = new_base;
+ timer->base = &new_base->t_base;
internal_add_timer(new_base, timer);
}
}
int i;
BUG_ON(cpu_online(cpu));
- old_base = per_cpu(tvec_bases, cpu);
- new_base = get_cpu_var(tvec_bases);
+ old_base = &per_cpu(tvec_bases, cpu);
+ new_base = &get_cpu_var(tvec_bases);
local_irq_disable();
- spin_lock(&new_base->lock);
- spin_lock(&old_base->lock);
-
- BUG_ON(old_base->running_timer);
+ spin_lock(&new_base->t_base.lock);
+ spin_lock(&old_base->t_base.lock);
+ if (old_base->t_base.running_timer)
+ BUG();
for (i = 0; i < TVR_SIZE; i++)
migrate_timer_list(new_base, old_base->tv1.vec + i);
for (i = 0; i < TVN_SIZE; i++) {
migrate_timer_list(new_base, old_base->tv5.vec + i);
}
- spin_unlock(&old_base->lock);
- spin_unlock(&new_base->lock);
+ spin_unlock(&old_base->t_base.lock);
+ spin_unlock(&new_base->t_base.lock);
local_irq_enable();
put_cpu_var(tvec_bases);
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int timer_cpu_notify(struct notifier_block *self,
+static int __devinit timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
switch(action) {
case CPU_UP_PREPARE:
- if (init_timers_cpu(cpu) < 0)
- return NOTIFY_BAD;
+ init_timers_cpu(cpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
return NOTIFY_OK;
}
-static struct notifier_block timers_nb = {
+static struct notifier_block __devinitdata timers_nb = {
.notifier_call = timer_cpu_notify,
};
*/
if (jiffies % INTERPOLATOR_ADJUST == 0)
{
- if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
+ if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
time_interpolator->nsec_per_cyc--;
if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
time_interpolator->nsec_per_cyc++;
unsigned long flags;
/* Sanity check */
- BUG_ON(ti->frequency == 0 || ti->mask == 0);
+ if (ti->frequency == 0 || ti->mask == 0)
+ BUG();
ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
spin_lock(&time_interpolator_lock);