Merge to Fedora kernel-2.6.17-1.2142_FC4 patched with stable patch-2.6.17.13-vs2...
[linux-2.6.git] / kernel / timer.c
index 08cec6a..2f8cdbe 100644 (file)
 #include <linux/thread_info.h>
 #include <linux/time.h>
 #include <linux/jiffies.h>
+#include <linux/posix-timers.h>
 #include <linux/cpu.h>
+#include <linux/syscalls.h>
+#include <linux/delay.h>
+#include <linux/vs_cvirt.h>
+#include <linux/vserver/sched.h>
 
 #include <asm/uaccess.h>
+#include <asm/unistd.h>
 #include <asm/div64.h>
 #include <asm/timex.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_TIME_INTERPOLATION
+static void time_interpolator_update(long delta_nsec);
+#else
+#define time_interpolator_update(x)
+#endif
+
+u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
+
+EXPORT_SYMBOL(jiffies_64);
 
 /*
  * per-CPU timer vector definitions:
  */
-#define TVN_BITS 6
-#define TVR_BITS 8
+#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
+#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
 #define TVN_SIZE (1 << TVN_BITS)
 #define TVR_SIZE (1 << TVR_BITS)
 #define TVN_MASK (TVN_SIZE - 1)
@@ -56,8 +73,8 @@ typedef struct tvec_root_s {
 
 struct tvec_t_base_s {
        spinlock_t lock;
-       unsigned long timer_jiffies;
        struct timer_list *running_timer;
+       unsigned long timer_jiffies;
        tvec_root_t tv1;
        tvec_t tv2;
        tvec_t tv3;
@@ -67,6 +84,10 @@ struct tvec_t_base_s {
 
 typedef struct tvec_t_base_s tvec_base_t;
 
+tvec_base_t boot_tvec_bases;
+EXPORT_SYMBOL(boot_tvec_bases);
+static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
+
 static inline void set_running_timer(tvec_base_t *base,
                                        struct timer_list *timer)
 {
@@ -75,34 +96,6 @@ static inline void set_running_timer(tvec_base_t *base,
 #endif
 }
 
-/* Fake initialization */
-static DEFINE_PER_CPU(tvec_base_t, tvec_bases) = { SPIN_LOCK_UNLOCKED };
-
-static void check_timer_failed(struct timer_list *timer)
-{
-       static int whine_count;
-       if (whine_count < 16) {
-               whine_count++;
-               printk("Uninitialised timer!\n");
-               printk("This is just a warning.  Your computer is OK\n");
-               printk("function=0x%p, data=0x%lx\n",
-                       timer->function, timer->data);
-               dump_stack();
-       }
-       /*
-        * Now fix it up
-        */
-       spin_lock_init(&timer->lock);
-       timer->magic = TIMER_MAGIC;
-}
-
-static inline void check_timer(struct timer_list *timer)
-{
-       if (timer->magic != TIMER_MAGIC)
-               check_timer_failed(timer);
-}
-
-
 static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
 {
        unsigned long expires = timer->expires;
@@ -145,65 +138,99 @@ static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
        list_add_tail(&timer->entry, vec);
 }
 
+/***
+ * init_timer - initialize a timer.
+ * @timer: the timer to be initialized
+ *
+ * init_timer() must be done to a timer prior calling *any* of the
+ * other timer functions.
+ */
+void fastcall init_timer(struct timer_list *timer)
+{
+       timer->entry.next = NULL;
+       timer->base = per_cpu(tvec_bases, raw_smp_processor_id());
+}
+EXPORT_SYMBOL(init_timer);
+
+static inline void detach_timer(struct timer_list *timer,
+                                       int clear_pending)
+{
+       struct list_head *entry = &timer->entry;
+
+       __list_del(entry->prev, entry->next);
+       if (clear_pending)
+               entry->next = NULL;
+       entry->prev = LIST_POISON2;
+}
+
+/*
+ * We are using hashed locking: holding per_cpu(tvec_bases).lock
+ * means that all timers which are tied to this base via timer->base are
+ * locked, and the base itself is locked too.
+ *
+ * So __run_timers/migrate_timers can safely modify all timers which could
+ * be found on ->tvX lists.
+ *
+ * When the timer's base is locked, and the timer removed from list, it is
+ * possible to set timer->base = NULL and drop the lock: the timer remains
+ * locked.
+ */
+static tvec_base_t *lock_timer_base(struct timer_list *timer,
+                                       unsigned long *flags)
+{
+       tvec_base_t *base;
+
+       for (;;) {
+               base = timer->base;
+               if (likely(base != NULL)) {
+                       spin_lock_irqsave(&base->lock, *flags);
+                       if (likely(base == timer->base))
+                               return base;
+                       /* The timer has migrated to another CPU */
+                       spin_unlock_irqrestore(&base->lock, *flags);
+               }
+               cpu_relax();
+       }
+}
+
 int __mod_timer(struct timer_list *timer, unsigned long expires)
 {
-       tvec_base_t *old_base, *new_base;
+       tvec_base_t *base, *new_base;
        unsigned long flags;
        int ret = 0;
 
        BUG_ON(!timer->function);
 
-       check_timer(timer);
+       base = lock_timer_base(timer, &flags);
+
+       if (timer_pending(timer)) {
+               detach_timer(timer, 0);
+               ret = 1;
+       }
 
-       spin_lock_irqsave(&timer->lock, flags);
-       new_base = &__get_cpu_var(tvec_bases);
-repeat:
-       old_base = timer->base;
+       new_base = __get_cpu_var(tvec_bases);
 
-       /*
-        * Prevent deadlocks via ordering by old_base < new_base.
-        */
-       if (old_base && (new_base != old_base)) {
-               if (old_base < new_base) {
-                       spin_lock(&new_base->lock);
-                       spin_lock(&old_base->lock);
-               } else {
-                       spin_lock(&old_base->lock);
-                       spin_lock(&new_base->lock);
-               }
+       if (base != new_base) {
                /*
-                * The timer base might have been cancelled while we were
-                * trying to take the lock(s):
+                * We are trying to schedule the timer on the local CPU.
+                * However we can't change timer's base while it is running,
+                * otherwise del_timer_sync() can't detect that the timer's
+                * handler yet has not finished. This also guarantees that
+                * the timer is serialized wrt itself.
                 */
-               if (timer->base != old_base) {
-                       spin_unlock(&new_base->lock);
-                       spin_unlock(&old_base->lock);
-                       goto repeat;
-               }
-       } else {
-               spin_lock(&new_base->lock);
-               if (timer->base != old_base) {
-                       spin_unlock(&new_base->lock);
-                       goto repeat;
+               if (likely(base->running_timer != timer)) {
+                       /* See the comment in lock_timer_base() */
+                       timer->base = NULL;
+                       spin_unlock(&base->lock);
+                       base = new_base;
+                       spin_lock(&base->lock);
+                       timer->base = base;
                }
        }
 
-       /*
-        * Delete the previous timeout (if there was any), and install
-        * the new one:
-        */
-       if (old_base) {
-               list_del(&timer->entry);
-               ret = 1;
-       }
        timer->expires = expires;
-       internal_add_timer(new_base, timer);
-       timer->base = new_base;
-
-       if (old_base && (new_base != old_base))
-               spin_unlock(&old_base->lock);
-       spin_unlock(&new_base->lock);
-       spin_unlock_irqrestore(&timer->lock, flags);
+       internal_add_timer(base, timer);
+       spin_unlock_irqrestore(&base->lock, flags);
 
        return ret;
 }
@@ -219,19 +246,17 @@ EXPORT_SYMBOL(__mod_timer);
  */
 void add_timer_on(struct timer_list *timer, int cpu)
 {
-       tvec_base_t *base = &per_cpu(tvec_bases, cpu);
+       tvec_base_t *base = per_cpu(tvec_bases, cpu);
        unsigned long flags;
-  
-       BUG_ON(timer_pending(timer) || !timer->function);
-
-       check_timer(timer);
 
+       BUG_ON(timer_pending(timer) || !timer->function);
        spin_lock_irqsave(&base->lock, flags);
-       internal_add_timer(base, timer);
        timer->base = base;
+       internal_add_timer(base, timer);
        spin_unlock_irqrestore(&base->lock, flags);
 }
 
+
 /***
  * mod_timer - modify a timer's timeout
  * @timer: the timer to be modified
@@ -255,8 +280,6 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
 {
        BUG_ON(!timer->function);
 
-       check_timer(timer);
-
        /*
         * This is a common optimization triggered by the
         * networking code - if the timer is re-modified
@@ -283,30 +306,53 @@ EXPORT_SYMBOL(mod_timer);
  */
 int del_timer(struct timer_list *timer)
 {
-       unsigned long flags;
        tvec_base_t *base;
+       unsigned long flags;
+       int ret = 0;
 
-       check_timer(timer);
-
-repeat:
-       base = timer->base;
-       if (!base)
-               return 0;
-       spin_lock_irqsave(&base->lock, flags);
-       if (base != timer->base) {
+       if (timer_pending(timer)) {
+               base = lock_timer_base(timer, &flags);
+               if (timer_pending(timer)) {
+                       detach_timer(timer, 1);
+                       ret = 1;
+               }
                spin_unlock_irqrestore(&base->lock, flags);
-               goto repeat;
        }
-       list_del(&timer->entry);
-       timer->base = NULL;
-       spin_unlock_irqrestore(&base->lock, flags);
 
-       return 1;
+       return ret;
 }
 
 EXPORT_SYMBOL(del_timer);
 
 #ifdef CONFIG_SMP
+/*
+ * This function tries to deactivate a timer. Upon successful (ret >= 0)
+ * exit the timer is not queued and the handler is not running on any CPU.
+ *
+ * It must not be called from interrupt contexts.
+ */
+int try_to_del_timer_sync(struct timer_list *timer)
+{
+       tvec_base_t *base;
+       unsigned long flags;
+       int ret = -1;
+
+       base = lock_timer_base(timer, &flags);
+
+       if (base->running_timer == timer)
+               goto out;
+
+       ret = 0;
+       if (timer_pending(timer)) {
+               detach_timer(timer, 1);
+               ret = 1;
+       }
+out:
+       spin_unlock_irqrestore(&base->lock, flags);
+
+       return ret;
+}
+
 /***
  * del_timer_sync - deactivate a timer and wait for the handler to finish.
  * @timer: the timer to be deactivated
@@ -317,36 +363,20 @@ EXPORT_SYMBOL(del_timer);
  *
  * Synchronization rules: callers must prevent restarting of the timer,
  * otherwise this function is meaningless. It must not be called from
- * interrupt contexts. Upon exit the timer is not queued and the handler
- * is not running on any CPU.
+ * interrupt contexts. The caller must not hold locks which would prevent
+ * completion of the timer's handler. The timer's handler must not call
+ * add_timer_on(). Upon exit the timer is not queued and the handler is
+ * not running on any CPU.
  *
  * The function returns whether it has deactivated a pending timer or not.
  */
 int del_timer_sync(struct timer_list *timer)
 {
-       tvec_base_t *base;
-       int i, ret = 0;
-
-       check_timer(timer);
-
-del_again:
-       ret += del_timer(timer);
-
-       for_each_cpu(i) {
-               base = &per_cpu(tvec_bases, i);
-               if (base->running_timer == timer) {
-                       while (base->running_timer == timer) {
-                               cpu_relax();
-                               preempt_check_resched();
-                       }
-                       break;
-               }
+       for (;;) {
+               int ret = try_to_del_timer_sync(timer);
+               if (ret >= 0)
+                       return ret;
        }
-       smp_rmb();
-       if (timer_pending(timer))
-               goto del_again;
-
-       return ret;
 }
 
 EXPORT_SYMBOL(del_timer_sync);
@@ -405,8 +435,7 @@ static inline void __run_timers(tvec_base_t *base)
                        cascade(base, &base->tv5, INDEX(3));
                ++base->timer_jiffies; 
                list_splice_init(base->tv1.vec + index, &work_list);
-repeat:
-               if (!list_empty(head)) {
+               while (!list_empty(head)) {
                        void (*fn)(unsigned long);
                        unsigned long data;
 
@@ -414,14 +443,22 @@ repeat:
                        fn = timer->function;
                        data = timer->data;
 
-                       list_del(&timer->entry);
                        set_running_timer(base, timer);
-                       smp_wmb();
-                       timer->base = NULL;
+                       detach_timer(timer, 1);
                        spin_unlock_irq(&base->lock);
-                       fn(data);
+                       {
+                               int preempt_count = preempt_count();
+                               fn(data);
+                               if (preempt_count != preempt_count()) {
+                                       printk(KERN_WARNING "huh, entered %p "
+                                              "with preempt_count %08x, exited"
+                                              " with %08x?\n",
+                                              fn, preempt_count,
+                                              preempt_count());
+                                       BUG();
+                               }
+                       }
                        spin_lock_irq(&base->lock);
-                       goto repeat;
                }
        }
        set_running_timer(base, NULL);
@@ -440,13 +477,25 @@ unsigned long next_timer_interrupt(void)
        struct list_head *list;
        struct timer_list *nte;
        unsigned long expires;
+       unsigned long hr_expires = MAX_JIFFY_OFFSET;
+       ktime_t hr_delta;
        tvec_t *varray[4];
        int i, j;
 
-       base = &__get_cpu_var(tvec_bases);
+       hr_delta = hrtimer_get_next_event();
+       if (hr_delta.tv64 != KTIME_MAX) {
+               struct timespec tsdelta;
+               tsdelta = ktime_to_timespec(hr_delta);
+               hr_expires = timespec_to_jiffies(&tsdelta);
+               if (hr_expires < 3)
+                       return hr_expires + jiffies;
+       }
+       hr_expires += jiffies;
+
+       base = __get_cpu_var(tvec_bases);
        spin_lock(&base->lock);
        expires = base->timer_jiffies + (LONG_MAX >> 1);
-       list = 0;
+       list = NULL;
 
        /* Look for timer events in tv1. */
        j = base->timer_jiffies & TVR_MASK;
@@ -493,6 +542,26 @@ found:
                }
        }
        spin_unlock(&base->lock);
+
+       /*
+        * It can happen that other CPUs service timer IRQs and increment
+        * jiffies, but we have not yet got a local timer tick to process
+        * the timer wheels.  In that case, the expiry time can be before
+        * jiffies, but since the high-resolution timer here is relative to
+        * jiffies, the default expression when high-resolution timers are
+        * not active,
+        *
+        *   time_before(MAX_JIFFY_OFFSET + jiffies, expires)
+        *
+        * would falsely evaluate to true.  If that is the case, just
+        * return jiffies so that we can immediately fire the local timer
+        */
+       if (time_before(expires, jiffies))
+               return jiffies;
+
+       if (time_before(hr_expires, expires))
+               return hr_expires;
+
        return expires;
 }
 #endif
@@ -508,7 +577,7 @@ unsigned long tick_nsec = TICK_NSEC;                /* ACTHZ period (nsec) */
 /* 
  * The current time 
  * wall_to_monotonic is what we need to add to xtime (or xtime corrected 
- * for sub jiffie times) to get to monotonic time.  Monotonic is pegged at zero
+ * for sub jiffie times) to get to monotonic time.  Monotonic is pegged
  * at zero at system boot time, so wall_to_monotonic will be negative,
  * however, we will ALWAYS keep the tv_nsec part positive so we can use
  * the usual normalization.
@@ -534,10 +603,10 @@ long time_tolerance = MAXFREQ;            /* frequency tolerance (ppm)    */
 long time_precision = 1;               /* clock precision (us)         */
 long time_maxerror = NTP_PHASE_LIMIT;  /* maximum error (us)           */
 long time_esterror = NTP_PHASE_LIMIT;  /* estimated error (us)         */
-long time_phase;                       /* phase offset (scaled us)     */
+static long time_phase;                        /* phase offset (scaled us)     */
 long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
                                        /* frequency offset (scaled ppm)*/
-long time_adj;                         /* tick adjust (scaled 1 / HZ)  */
+static long time_adj;                  /* tick adjust (scaled 1 / HZ)  */
 long time_reftime;                     /* time at last adjustment (s)  */
 long time_adjust;
 long time_next_adjust;
@@ -553,169 +622,153 @@ long time_next_adjust;
  */
 static void second_overflow(void)
 {
-    long ltemp;
-
-    /* Bump the maxerror field */
-    time_maxerror += time_tolerance >> SHIFT_USEC;
-    if ( time_maxerror > NTP_PHASE_LIMIT ) {
-       time_maxerror = NTP_PHASE_LIMIT;
-       time_status |= STA_UNSYNC;
-    }
-
-    /*
-     * Leap second processing. If in leap-insert state at
-     * the end of the day, the system clock is set back one
-     * second; if in leap-delete state, the system clock is
-     * set ahead one second. The microtime() routine or
-     * external clock driver will insure that reported time
-     * is always monotonic. The ugly divides should be
-     * replaced.
-     */
-    switch (time_state) {
-
-    case TIME_OK:
-       if (time_status & STA_INS)
-           time_state = TIME_INS;
-       else if (time_status & STA_DEL)
-           time_state = TIME_DEL;
-       break;
-
-    case TIME_INS:
-       if (xtime.tv_sec % 86400 == 0) {
-           xtime.tv_sec--;
-           wall_to_monotonic.tv_sec++;
-           time_interpolator_update(-NSEC_PER_SEC);
-           time_state = TIME_OOP;
-           clock_was_set();
-           printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
+       long ltemp;
+
+       /* Bump the maxerror field */
+       time_maxerror += time_tolerance >> SHIFT_USEC;
+       if (time_maxerror > NTP_PHASE_LIMIT) {
+               time_maxerror = NTP_PHASE_LIMIT;
+               time_status |= STA_UNSYNC;
        }
-       break;
-
-    case TIME_DEL:
-       if ((xtime.tv_sec + 1) % 86400 == 0) {
-           xtime.tv_sec++;
-           wall_to_monotonic.tv_sec--;
-           time_interpolator_update(NSEC_PER_SEC);
-           time_state = TIME_WAIT;
-           clock_was_set();
-           printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
+
+       /*
+        * Leap second processing. If in leap-insert state at the end of the
+        * day, the system clock is set back one second; if in leap-delete
+        * state, the system clock is set ahead one second. The microtime()
+        * routine or external clock driver will insure that reported time is
+        * always monotonic. The ugly divides should be replaced.
+        */
+       switch (time_state) {
+       case TIME_OK:
+               if (time_status & STA_INS)
+                       time_state = TIME_INS;
+               else if (time_status & STA_DEL)
+                       time_state = TIME_DEL;
+               break;
+       case TIME_INS:
+               if (xtime.tv_sec % 86400 == 0) {
+                       xtime.tv_sec--;
+                       wall_to_monotonic.tv_sec++;
+                       /*
+                        * The timer interpolator will make time change
+                        * gradually instead of an immediate jump by one second
+                        */
+                       time_interpolator_update(-NSEC_PER_SEC);
+                       time_state = TIME_OOP;
+                       clock_was_set();
+                       printk(KERN_NOTICE "Clock: inserting leap second "
+                                       "23:59:60 UTC\n");
+               }
+               break;
+       case TIME_DEL:
+               if ((xtime.tv_sec + 1) % 86400 == 0) {
+                       xtime.tv_sec++;
+                       wall_to_monotonic.tv_sec--;
+                       /*
+                        * Use of time interpolator for a gradual change of
+                        * time
+                        */
+                       time_interpolator_update(NSEC_PER_SEC);
+                       time_state = TIME_WAIT;
+                       clock_was_set();
+                       printk(KERN_NOTICE "Clock: deleting leap second "
+                                       "23:59:59 UTC\n");
+               }
+               break;
+       case TIME_OOP:
+               time_state = TIME_WAIT;
+               break;
+       case TIME_WAIT:
+               if (!(time_status & (STA_INS | STA_DEL)))
+               time_state = TIME_OK;
        }
-       break;
-
-    case TIME_OOP:
-       time_state = TIME_WAIT;
-       break;
-
-    case TIME_WAIT:
-       if (!(time_status & (STA_INS | STA_DEL)))
-           time_state = TIME_OK;
-    }
-
-    /*
-     * Compute the phase adjustment for the next second. In
-     * PLL mode, the offset is reduced by a fixed factor
-     * times the time constant. In FLL mode the offset is
-     * used directly. In either mode, the maximum phase
-     * adjustment for each second is clamped so as to spread
-     * the adjustment over not more than the number of
-     * seconds between updates.
-     */
-    if (time_offset < 0) {
-       ltemp = -time_offset;
-       if (!(time_status & STA_FLL))
-           ltemp >>= SHIFT_KG + time_constant;
-       if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
-           ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
-       time_offset += ltemp;
-       time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
-    } else {
+
+       /*
+        * Compute the phase adjustment for the next second. In PLL mode, the
+        * offset is reduced by a fixed factor times the time constant. In FLL
+        * mode the offset is used directly. In either mode, the maximum phase
+        * adjustment for each second is clamped so as to spread the adjustment
+        * over not more than the number of seconds between updates.
+        */
        ltemp = time_offset;
        if (!(time_status & STA_FLL))
-           ltemp >>= SHIFT_KG + time_constant;
-       if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
-           ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
+               ltemp = shift_right(ltemp, SHIFT_KG + time_constant);
+       ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE);
+       ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE);
        time_offset -= ltemp;
        time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
-    }
-
-    /*
-     * Compute the frequency estimate and additional phase
-     * adjustment due to frequency error for the next
-     * second. When the PPS signal is engaged, gnaw on the
-     * watchdog counter and update the frequency computed by
-     * the pll and the PPS signal.
-     */
-    pps_valid++;
-    if (pps_valid == PPS_VALID) {      /* PPS signal lost */
-       pps_jitter = MAXTIME;
-       pps_stabil = MAXFREQ;
-       time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
-                        STA_PPSWANDER | STA_PPSERROR);
-    }
-    ltemp = time_freq + pps_freq;
-    if (ltemp < 0)
-       time_adj -= -ltemp >>
-           (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
-    else
-       time_adj += ltemp >>
-           (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
+
+       /*
+        * Compute the frequency estimate and additional phase adjustment due
+        * to frequency error for the next second.
+        */
+       ltemp = time_freq;
+       time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE));
 
 #if HZ == 100
-    /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
-     * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
-     */
-    if (time_adj < 0)
-       time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
-    else
-       time_adj += (time_adj >> 2) + (time_adj >> 5);
+       /*
+        * Compensate for (HZ==100) != (1 << SHIFT_HZ).  Add 25% and 3.125% to
+        * get 128.125; => only 0.125% error (p. 14)
+        */
+       time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5);
+#endif
+#if HZ == 250
+       /*
+        * Compensate for (HZ==250) != (1 << SHIFT_HZ).  Add 1.5625% and
+        * 0.78125% to get 255.85938; => only 0.05% error (p. 14)
+        */
+       time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
 #endif
 #if HZ == 1000
-    /* Compensate for (HZ==1000) != (1 << SHIFT_HZ).
-     * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
-     */
-    if (time_adj < 0)
-       time_adj -= (-time_adj >> 6) + (-time_adj >> 7);
-    else
-       time_adj += (time_adj >> 6) + (time_adj >> 7);
+       /*
+        * Compensate for (HZ==1000) != (1 << SHIFT_HZ).  Add 1.5625% and
+        * 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
+        */
+       time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
 #endif
 }
 
+/*
+ * Returns how many microseconds we need to add to xtime this tick
+ * in doing an adjustment requested with adjtime.
+ */
+static long adjtime_adjustment(void)
+{
+       long time_adjust_step;
+
+       time_adjust_step = time_adjust;
+       if (time_adjust_step) {
+               /*
+                * We are doing an adjtime thing.  Prepare time_adjust_step to
+                * be within bounds.  Note that a positive time_adjust means we
+                * want the clock to run faster.
+                *
+                * Limit the amount of the step to be in the range
+                * -tickadj .. +tickadj
+                */
+               time_adjust_step = min(time_adjust_step, (long)tickadj);
+               time_adjust_step = max(time_adjust_step, (long)-tickadj);
+       }
+       return time_adjust_step;
+}
+
 /* in the NTP reference this is called "hardclock()" */
 static void update_wall_time_one_tick(void)
 {
        long time_adjust_step, delta_nsec;
 
-       if ( (time_adjust_step = time_adjust) != 0 ) {
-           /* We are doing an adjtime thing. 
-            *
-            * Prepare time_adjust_step to be within bounds.
-            * Note that a positive time_adjust means we want the clock
-            * to run faster.
-            *
-            * Limit the amount of the step to be in the range
-            * -tickadj .. +tickadj
-            */
-            if (time_adjust > tickadj)
-               time_adjust_step = tickadj;
-            else if (time_adjust < -tickadj)
-               time_adjust_step = -tickadj;
-
-           /* Reduce by this step the amount of time left  */
-           time_adjust -= time_adjust_step;
-       }
+       time_adjust_step = adjtime_adjustment();
+       if (time_adjust_step)
+               /* Reduce by this step the amount of time left  */
+               time_adjust -= time_adjust_step;
        delta_nsec = tick_nsec + time_adjust_step * 1000;
        /*
         * Advance the phase, once it gets to one microsecond, then
         * advance the tick more.
         */
        time_phase += time_adj;
-       if (time_phase <= -FINENSEC) {
-               long ltemp = -time_phase >> (SHIFT_SCALE - 10);
-               time_phase += ltemp << (SHIFT_SCALE - 10);
-               delta_nsec -= ltemp;
-       }
-       else if (time_phase >= FINENSEC) {
-               long ltemp = time_phase >> (SHIFT_SCALE - 10);
+       if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) {
+               long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10));
                time_phase -= ltemp << (SHIFT_SCALE - 10);
                delta_nsec += ltemp;
        }
@@ -729,6 +782,22 @@ static void update_wall_time_one_tick(void)
        }
 }
 
+/*
+ * Return how long ticks are at the moment, that is, how much time
+ * update_wall_time_one_tick will add to xtime next time we call it
+ * (assuming no calls to do_adjtimex in the meantime).
+ * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10
+ * bits to the right of the binary point.
+ * This function has no side-effects.
+ */
+u64 current_tick_length(void)
+{
+       long delta_nsec;
+
+       delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
+       return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
+}
+
 /*
  * Using a loop looks inefficient, but "ticks" is
  * usually just one (we shouldn't be losing ticks,
@@ -741,67 +810,14 @@ static void update_wall_time(unsigned long ticks)
        do {
                ticks--;
                update_wall_time_one_tick();
-       } while (ticks);
-
-       if (xtime.tv_nsec >= 1000000000) {
-           xtime.tv_nsec -= 1000000000;
-           xtime.tv_sec++;
-           second_overflow();
-       }
-}
-
-static inline void do_process_times(struct task_struct *p,
-       unsigned long user, unsigned long system)
-{
-       unsigned long psecs;
-
-       psecs = (p->utime += user);
-       psecs += (p->stime += system);
-       if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
-               /* Send SIGXCPU every second.. */
-               if (!(psecs % HZ))
-                       send_sig(SIGXCPU, p, 1);
-               /* and SIGKILL when we go over max.. */
-               if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
-                       send_sig(SIGKILL, p, 1);
-       }
-}
-
-static inline void do_it_virt(struct task_struct * p, unsigned long ticks)
-{
-       unsigned long it_virt = p->it_virt_value;
-
-       if (it_virt) {
-               it_virt -= ticks;
-               if (!it_virt) {
-                       it_virt = p->it_virt_incr;
-                       send_sig(SIGVTALRM, p, 1);
-               }
-               p->it_virt_value = it_virt;
-       }
-}
-
-static inline void do_it_prof(struct task_struct *p)
-{
-       unsigned long it_prof = p->it_prof_value;
-
-       if (it_prof) {
-               if (--it_prof == 0) {
-                       it_prof = p->it_prof_incr;
-                       send_sig(SIGPROF, p, 1);
+               if (xtime.tv_nsec >= 1000000000) {
+                       xtime.tv_nsec -= 1000000000;
+                       xtime.tv_sec++;
+                       second_overflow();
                }
-               p->it_prof_value = it_prof;
-       }
+       } while (ticks);
 }
 
-void update_one_process(struct task_struct *p, unsigned long user,
-                       unsigned long system, int cpu)
-{
-       do_process_times(p, user, system);
-       do_it_virt(p, user);
-       do_it_prof(p);
-}      
-
 /*
  * Called from the timer interrupt handler to charge one tick to the current 
  * process.  user_tick is 1 if the tick is user time, 0 for system.
@@ -809,11 +825,18 @@ void update_one_process(struct task_struct *p, unsigned long user,
 void update_process_times(int user_tick)
 {
        struct task_struct *p = current;
-       int cpu = smp_processor_id(), system = user_tick ^ 1;
+       int cpu = smp_processor_id();
 
-       update_one_process(p, user_tick, system, cpu);
+       /* Note: this timer irq context must be accounted for as well. */
+       if (user_tick)
+               account_user_time(p, jiffies_to_cputime(1));
+       else
+               account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
        run_local_timers();
-       scheduler_tick(user_tick, system);
+       if (rcu_pending(cpu))
+               rcu_check_callbacks(cpu, user_tick);
+       scheduler_tick();
+       run_posix_cpu_timers(p);
 }
 
 /*
@@ -821,7 +844,7 @@ void update_process_times(int user_tick)
  */
 static unsigned long count_active_tasks(void)
 {
-       return (nr_running() + nr_uninterruptible()) * FIXED_1;
+       return nr_active() * FIXED_1;
 }
 
 /*
@@ -834,6 +857,8 @@ static unsigned long count_active_tasks(void)
  */
 unsigned long avenrun[3];
 
+EXPORT_SYMBOL(avenrun);
+
 /*
  * calc_load - given tick count, update the avenrun load estimates.
  * This is called while holding a write_lock on xtime_lock.
@@ -871,8 +896,9 @@ EXPORT_SYMBOL(xtime_lock);
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-       tvec_base_t *base = &__get_cpu_var(tvec_bases);
+       tvec_base_t *base = __get_cpu_var(tvec_bases);
 
+       hrtimer_run_queues();
        if (time_after_eq(jiffies, base->timer_jiffies))
                __run_timers(base);
 }
@@ -883,6 +909,7 @@ static void run_timer_softirq(struct softirq_action *h)
 void run_local_timers(void)
 {
        raise_softirq(TIMER_SOFTIRQ);
+       softlockup_tick();
 }
 
 /*
@@ -910,15 +937,12 @@ static inline void update_times(void)
 void do_timer(struct pt_regs *regs)
 {
        jiffies_64++;
-#ifndef CONFIG_SMP
-       /* SMP process accounting uses the local APIC timer */
-
-       update_process_times(user_mode(regs));
-#endif
+       /* prevent loading jiffies before storing new jiffies_64 value. */
+       barrier();
        update_times();
 }
 
-#if !defined(__alpha__) && !defined(__ia64__)
+#ifdef __ARCH_WANT_SYS_ALARM
 
 /*
  * For backwards compatibility?  This can be done in libc so Alpha
@@ -926,29 +950,11 @@ void do_timer(struct pt_regs *regs)
  */
 asmlinkage unsigned long sys_alarm(unsigned int seconds)
 {
-       struct itimerval it_new, it_old;
-       unsigned int oldalarm;
-
-       it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
-       it_new.it_value.tv_sec = seconds;
-       it_new.it_value.tv_usec = 0;
-       do_setitimer(ITIMER_REAL, &it_new, &it_old);
-       oldalarm = it_old.it_value.tv_sec;
-       /* ehhh.. We can't return 0 if we have an alarm pending.. */
-       /* And we'd better return too much than too little anyway */
-       if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000)
-               oldalarm++;
-       return oldalarm;
+       return alarm_setitimer(seconds);
 }
 
 #endif
 
-#ifndef __alpha__
-
-/*
- * The Alpha uses getxpid, getxuid, and getxgid instead.  Maybe this
- * should be moved into arch/i386 instead?
- */
 
 /**
  * sys_getpid - return the thread group id of the current process
@@ -961,7 +967,7 @@ asmlinkage unsigned long sys_alarm(unsigned int seconds)
  */
 asmlinkage long sys_getpid(void)
 {
-       return current->tgid;
+       return vx_map_tgid(current->tgid);
 }
 
 /*
@@ -989,7 +995,7 @@ asmlinkage long sys_getppid(void)
        parent = me->group_leader->real_parent;
        for (;;) {
                pid = parent->tgid;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
 {
                struct task_struct *old = parent;
 
@@ -997,7 +1003,7 @@ asmlinkage long sys_getppid(void)
                 * Make sure we read the pid before re-reading the
                 * parent pointer:
                 */
-               rmb();
+               smp_rmb();
                parent = me->group_leader->real_parent;
                if (old != parent)
                        continue;
@@ -1005,9 +1011,23 @@ asmlinkage long sys_getppid(void)
 #endif
                break;
        }
-       return pid;
+       return vx_map_pid(pid);
 }
 
+#ifdef __alpha__
+
+/*
+ * The Alpha uses getxpid, getxuid, and getxgid instead.
+ */
+
+asmlinkage long do_getxpid(long *ppid)
+{
+       *ppid = sys_getppid();
+       return sys_getpid();
+}
+  
+#else /* _alpha_ */
+
 asmlinkage long sys_getuid(void)
 {
        /* Only we change this so SMP safe */
@@ -1093,8 +1113,8 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
                if (timeout < 0)
                {
                        printk(KERN_ERR "schedule_timeout: wrong timeout "
-                              "value %lx from %p\n", timeout,
-                              __builtin_return_address(0));
+                               "value %lx from %p\n", timeout,
+                               __builtin_return_address(0));
                        current->state = TASK_RUNNING;
                        goto out;
                }
@@ -1102,85 +1122,40 @@ fastcall signed long __sched schedule_timeout(signed long timeout)
 
        expire = timeout + jiffies;
 
-       init_timer(&timer);
-       timer.expires = expire;
-       timer.data = (unsigned long) current;
-       timer.function = process_timeout;
-
-       add_timer(&timer);
+       setup_timer(&timer, process_timeout, (unsigned long)current);
+       __mod_timer(&timer, expire);
        schedule();
-       del_timer_sync(&timer);
+       del_singleshot_timer_sync(&timer);
 
        timeout = expire - jiffies;
 
  out:
        return timeout < 0 ? 0 : timeout;
 }
-
 EXPORT_SYMBOL(schedule_timeout);
 
-/* Thread ID - the internal kernel "pid" */
-asmlinkage long sys_gettid(void)
+/*
+ * We can use __set_current_state() here because schedule_timeout() calls
+ * schedule() unconditionally.
+ */
+signed long __sched schedule_timeout_interruptible(signed long timeout)
 {
-       return current->pid;
+       __set_current_state(TASK_INTERRUPTIBLE);
+       return schedule_timeout(timeout);
 }
+EXPORT_SYMBOL(schedule_timeout_interruptible);
 
-static long __sched nanosleep_restart(struct restart_block *restart)
+signed long __sched schedule_timeout_uninterruptible(signed long timeout)
 {
-       unsigned long expire = restart->arg0, now = jiffies;
-       struct timespec __user *rmtp = (struct timespec __user *) restart->arg1;
-       long ret;
-
-       /* Did it expire while we handled signals? */
-       if (!time_after(expire, now))
-               return 0;
-
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire - now);
-
-       ret = 0;
-       if (expire) {
-               struct timespec t;
-               jiffies_to_timespec(expire, &t);
-
-               ret = -ERESTART_RESTARTBLOCK;
-               if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
-                       ret = -EFAULT;
-               /* The 'restart' block is already filled in */
-       }
-       return ret;
+       __set_current_state(TASK_UNINTERRUPTIBLE);
+       return schedule_timeout(timeout);
 }
+EXPORT_SYMBOL(schedule_timeout_uninterruptible);
 
-asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
+/* Thread ID - the internal kernel "pid" */
+asmlinkage long sys_gettid(void)
 {
-       struct timespec t;
-       unsigned long expire;
-       long ret;
-
-       if (copy_from_user(&t, rqtp, sizeof(t)))
-               return -EFAULT;
-
-       if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0))
-               return -EINVAL;
-
-       expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
-       current->state = TASK_INTERRUPTIBLE;
-       expire = schedule_timeout(expire);
-
-       ret = 0;
-       if (expire) {
-               struct restart_block *restart;
-               jiffies_to_timespec(expire, &t);
-               if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
-                       return -EFAULT;
-
-               restart = &current_thread_info()->restart_block;
-               restart->fn = nanosleep_restart;
-               restart->arg0 = jiffies + expire;
-               restart->arg1 = (unsigned long) rmtp;
-               ret = -ERESTART_RESTARTBLOCK;
-       }
-       return ret;
+       return current->pid;
 }
 
 /*
@@ -1206,14 +1181,15 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
                 * too.
                 */
 
-               do_gettimeofday((struct timeval *)&tp);
-               tp.tv_nsec *= NSEC_PER_USEC;
+               getnstimeofday(&tp);
                tp.tv_sec += wall_to_monotonic.tv_sec;
                tp.tv_nsec += wall_to_monotonic.tv_nsec;
                if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
                        tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
                        tp.tv_sec++;
                }
+               if (vx_flags(VXF_VIRT_UPTIME, 0))
+                       vx_vsi_uptime(&tp, NULL);
                val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
 
                val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
@@ -1273,12 +1249,40 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
        return 0;
 }
 
-static void __devinit init_timers_cpu(int cpu)
+static int __devinit init_timers_cpu(int cpu)
 {
        int j;
        tvec_base_t *base;
-       
-       base = &per_cpu(tvec_bases, cpu);
+       static char __devinitdata tvec_base_done[NR_CPUS];
+
+       if (!tvec_base_done[cpu]) {
+               static char boot_done;
+
+               if (boot_done) {
+                       /*
+                        * The APs use this path later in boot
+                        */
+                       base = kmalloc_node(sizeof(*base), GFP_KERNEL,
+                                               cpu_to_node(cpu));
+                       if (!base)
+                               return -ENOMEM;
+                       memset(base, 0, sizeof(*base));
+                       per_cpu(tvec_bases, cpu) = base;
+               } else {
+                       /*
+                        * This is for the boot CPU - we use compile-time
+                        * static initialisation because per-cpu memory isn't
+                        * ready yet and because the memory allocators are not
+                        * initialised either.
+                        */
+                       boot_done = 1;
+                       base = &boot_tvec_bases;
+               }
+               tvec_base_done[cpu] = 1;
+       } else {
+               base = per_cpu(tvec_bases, cpu);
+       }
+
        spin_lock_init(&base->lock);
        for (j = 0; j < TVN_SIZE; j++) {
                INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1290,25 +1294,20 @@ static void __devinit init_timers_cpu(int cpu)
                INIT_LIST_HEAD(base->tv1.vec + j);
 
        base->timer_jiffies = jiffies;
+       return 0;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static int migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
+static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
 {
        struct timer_list *timer;
 
        while (!list_empty(head)) {
                timer = list_entry(head->next, struct timer_list, entry);
-               /* We're locking backwards from __mod_timer order here,
-                  beware deadlock. */
-               if (!spin_trylock(&timer->lock))
-                       return 0;
-               list_del(&timer->entry);
-               internal_add_timer(new_base, timer);
+               detach_timer(timer, 0);
                timer->base = new_base;
-               spin_unlock(&timer->lock);
+               internal_add_timer(new_base, timer);
        }
-       return 1;
 }
 
 static void __devinit migrate_timers(int cpu)
@@ -1318,53 +1317,39 @@ static void __devinit migrate_timers(int cpu)
        int i;
 
        BUG_ON(cpu_online(cpu));
-       old_base = &per_cpu(tvec_bases, cpu);
-       new_base = &get_cpu_var(tvec_bases);
+       old_base = per_cpu(tvec_bases, cpu);
+       new_base = get_cpu_var(tvec_bases);
 
        local_irq_disable();
-again:
-       /* Prevent deadlocks via ordering by old_base < new_base. */
-       if (old_base < new_base) {
-               spin_lock(&new_base->lock);
-               spin_lock(&old_base->lock);
-       } else {
-               spin_lock(&old_base->lock);
-               spin_lock(&new_base->lock);
-       }
+       spin_lock(&new_base->lock);
+       spin_lock(&old_base->lock);
+
+       BUG_ON(old_base->running_timer);
 
-       if (old_base->running_timer)
-               BUG();
        for (i = 0; i < TVR_SIZE; i++)
-               if (!migrate_timer_list(new_base, old_base->tv1.vec + i))
-                       goto unlock_again;
-       for (i = 0; i < TVN_SIZE; i++)
-               if (!migrate_timer_list(new_base, old_base->tv2.vec + i)
-                   || !migrate_timer_list(new_base, old_base->tv3.vec + i)
-                   || !migrate_timer_list(new_base, old_base->tv4.vec + i)
-                   || !migrate_timer_list(new_base, old_base->tv5.vec + i))
-                       goto unlock_again;
+               migrate_timer_list(new_base, old_base->tv1.vec + i);
+       for (i = 0; i < TVN_SIZE; i++) {
+               migrate_timer_list(new_base, old_base->tv2.vec + i);
+               migrate_timer_list(new_base, old_base->tv3.vec + i);
+               migrate_timer_list(new_base, old_base->tv4.vec + i);
+               migrate_timer_list(new_base, old_base->tv5.vec + i);
+       }
+
        spin_unlock(&old_base->lock);
        spin_unlock(&new_base->lock);
        local_irq_enable();
        put_cpu_var(tvec_bases);
-       return;
-
-unlock_again:
-       /* Avoid deadlock with __mod_timer, by backing off. */
-       spin_unlock(&old_base->lock);
-       spin_unlock(&new_base->lock);
-       cpu_relax();
-       goto again;
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int __devinit timer_cpu_notify(struct notifier_block *self, 
+static int timer_cpu_notify(struct notifier_block *self,
                                unsigned long action, void *hcpu)
 {
        long cpu = (long)hcpu;
        switch(action) {
        case CPU_UP_PREPARE:
-               init_timers_cpu(cpu);
+               if (init_timers_cpu(cpu) < 0)
+                       return NOTIFY_BAD;
                break;
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_DEAD:
@@ -1377,7 +1362,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
        return NOTIFY_OK;
 }
 
-static struct notifier_block __devinitdata timers_nb = {
+static struct notifier_block timers_nb = {
        .notifier_call  = timer_cpu_notify,
 };
 
@@ -1391,14 +1376,130 @@ void __init init_timers(void)
 }
 
 #ifdef CONFIG_TIME_INTERPOLATION
-volatile unsigned long last_nsec_offset;
-#ifndef __HAVE_ARCH_CMPXCHG
-spinlock_t last_nsec_offset_lock = SPIN_LOCK_UNLOCKED;
-#endif
 
-struct time_interpolator *time_interpolator;
-static struct time_interpolator *time_interpolator_list;
-static spinlock_t time_interpolator_lock = SPIN_LOCK_UNLOCKED;
+struct time_interpolator *time_interpolator __read_mostly;
+static struct time_interpolator *time_interpolator_list __read_mostly;
+static DEFINE_SPINLOCK(time_interpolator_lock);
+
+static inline u64 time_interpolator_get_cycles(unsigned int src)
+{
+       unsigned long (*x)(void);
+
+       switch (src)
+       {
+               case TIME_SOURCE_FUNCTION:
+                       x = time_interpolator->addr;
+                       return x();
+
+               case TIME_SOURCE_MMIO64 :
+                       return readq_relaxed((void __iomem *)time_interpolator->addr);
+
+               case TIME_SOURCE_MMIO32 :
+                       return readl_relaxed((void __iomem *)time_interpolator->addr);
+
+               default: return get_cycles();
+       }
+}
+
+static inline u64 time_interpolator_get_counter(int writelock)
+{
+       unsigned int src = time_interpolator->source;
+
+       if (time_interpolator->jitter)
+       {
+               u64 lcycle;
+               u64 now;
+
+               do {
+                       lcycle = time_interpolator->last_cycle;
+                       now = time_interpolator_get_cycles(src);
+                       if (lcycle && time_after(lcycle, now))
+                               return lcycle;
+
+                       /* When holding the xtime write lock, there's no need
+                        * to add the overhead of the cmpxchg.  Readers are
+                        * force to retry until the write lock is released.
+                        */
+                       if (writelock) {
+                               time_interpolator->last_cycle = now;
+                               return now;
+                       }
+                       /* Keep track of the last timer value returned. The use of cmpxchg here
+                        * will cause contention in an SMP environment.
+                        */
+               } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
+               return now;
+       }
+       else
+               return time_interpolator_get_cycles(src);
+}
+
+void time_interpolator_reset(void)
+{
+       time_interpolator->offset = 0;
+       time_interpolator->last_counter = time_interpolator_get_counter(1);
+}
+
+#define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
+
+unsigned long time_interpolator_get_offset(void)
+{
+       /* If we do not have a time interpolator set up then just return zero */
+       if (!time_interpolator)
+               return 0;
+
+       return time_interpolator->offset +
+               GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
+}
+
+#define INTERPOLATOR_ADJUST 65536
+#define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
+
+static void time_interpolator_update(long delta_nsec)
+{
+       u64 counter;
+       unsigned long offset;
+
+       /* If there is no time interpolator set up then do nothing */
+       if (!time_interpolator)
+               return;
+
+       /*
+        * The interpolator compensates for late ticks by accumulating the late
+        * time in time_interpolator->offset. A tick earlier than expected will
+        * lead to a reset of the offset and a corresponding jump of the clock
+        * forward. Again this only works if the interpolator clock is running
+        * slightly slower than the regular clock and the tuning logic insures
+        * that.
+        */
+
+       counter = time_interpolator_get_counter(1);
+       offset = time_interpolator->offset +
+                       GET_TI_NSECS(counter, time_interpolator);
+
+       if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
+               time_interpolator->offset = offset - delta_nsec;
+       else {
+               time_interpolator->skips++;
+               time_interpolator->ns_skipped += delta_nsec - offset;
+               time_interpolator->offset = 0;
+       }
+       time_interpolator->last_counter = counter;
+
+       /* Tuning logic for time interpolator invoked every minute or so.
+        * Decrease interpolator clock speed if no skips occurred and an offset is carried.
+        * Increase interpolator clock speed if we skip too much time.
+        */
+       if (jiffies % INTERPOLATOR_ADJUST == 0)
+       {
+               if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
+                       time_interpolator->nsec_per_cyc--;
+               if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
+                       time_interpolator->nsec_per_cyc++;
+               time_interpolator->skips = 0;
+               time_interpolator->ns_skipped = 0;
+       }
+}
 
 static inline int
 is_better_time_interpolator(struct time_interpolator *new)
@@ -1412,11 +1513,19 @@ is_better_time_interpolator(struct time_interpolator *new)
 void
 register_time_interpolator(struct time_interpolator *ti)
 {
+       unsigned long flags;
+
+       /* Sanity check */
+       BUG_ON(ti->frequency == 0 || ti->mask == 0);
+
+       ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
        spin_lock(&time_interpolator_lock);
-       write_seqlock_irq(&xtime_lock);
-       if (is_better_time_interpolator(ti))
+       write_seqlock_irqsave(&xtime_lock, flags);
+       if (is_better_time_interpolator(ti)) {
                time_interpolator = ti;
-       write_sequnlock_irq(&xtime_lock);
+               time_interpolator_reset();
+       }
+       write_sequnlock_irqrestore(&xtime_lock, flags);
 
        ti->next = time_interpolator_list;
        time_interpolator_list = ti;
@@ -1427,6 +1536,7 @@ void
 unregister_time_interpolator(struct time_interpolator *ti)
 {
        struct time_interpolator *curr, **prev;
+       unsigned long flags;
 
        spin_lock(&time_interpolator_lock);
        prev = &time_interpolator_list;
@@ -1438,7 +1548,7 @@ unregister_time_interpolator(struct time_interpolator *ti)
                prev = &curr->next;
        }
 
-       write_seqlock_irq(&xtime_lock);
+       write_seqlock_irqsave(&xtime_lock, flags);
        if (ti == time_interpolator) {
                /* we lost the best time-interpolator: */
                time_interpolator = NULL;
@@ -1446,8 +1556,38 @@ unregister_time_interpolator(struct time_interpolator *ti)
                for (curr = time_interpolator_list; curr; curr = curr->next)
                        if (is_better_time_interpolator(curr))
                                time_interpolator = curr;
+               time_interpolator_reset();
        }
-       write_sequnlock_irq(&xtime_lock);
+       write_sequnlock_irqrestore(&xtime_lock, flags);
        spin_unlock(&time_interpolator_lock);
 }
 #endif /* CONFIG_TIME_INTERPOLATION */
+
+/**
+ * msleep - sleep safely even with waitqueue interruptions
+ * @msecs: Time in milliseconds to sleep for
+ */
+void msleep(unsigned int msecs)
+{
+       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+       while (timeout)
+               timeout = schedule_timeout_uninterruptible(timeout);
+}
+
+EXPORT_SYMBOL(msleep);
+
+/**
+ * msleep_interruptible - sleep waiting for signals
+ * @msecs: Time in milliseconds to sleep for
+ */
+unsigned long msleep_interruptible(unsigned int msecs)
+{
+       unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+
+       while (timeout && !signal_pending(current))
+               timeout = schedule_timeout_interruptible(timeout);
+       return jiffies_to_msecs(timeout);
+}
+
+EXPORT_SYMBOL(msleep_interruptible);