#include <linux/time.h>
#include <linux/jiffies.h>
#include <linux/cpu.h>
-#include <linux/vs_base.h>
#include <linux/vs_cvirt.h>
#include <linux/vserver/sched.h>
#include <asm/unistd.h>
#include <asm/div64.h>
#include <asm/timex.h>
+#include <asm/io.h>
+
+#ifdef CONFIG_TIME_INTERPOLATION
+static void time_interpolator_update(long delta_nsec);
+#else
+#define time_interpolator_update(x)
+#endif
/*
* per-CPU timer vector definitions:
spin_unlock_irqrestore(&base->lock, flags);
}
+EXPORT_SYMBOL(add_timer_on);
+
/***
* mod_timer - modify a timer's timeout
* @timer: the timer to be modified
if (xtime.tv_sec % 86400 == 0) {
xtime.tv_sec--;
wall_to_monotonic.tv_sec++;
+ /* The timer interpolator will make time change gradually instead
+ * of an immediate jump by one second.
+ */
time_interpolator_update(-NSEC_PER_SEC);
time_state = TIME_OOP;
clock_was_set();
if ((xtime.tv_sec + 1) % 86400 == 0) {
xtime.tv_sec++;
wall_to_monotonic.tv_sec--;
+ /* Use of time interpolator for a gradual change of time */
time_interpolator_update(NSEC_PER_SEC);
time_state = TIME_WAIT;
clock_was_set();
psecs = (p->utime += user);
psecs += (p->stime += system);
- if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
+ if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_cur) {
/* Send SIGXCPU every second.. */
if (!(psecs % HZ))
send_sig(SIGXCPU, p, 1);
/* and SIGKILL when we go over max.. */
- if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_max)
+ if (psecs / HZ >= p->rlim[RLIMIT_CPU].rlim_max)
send_sig(SIGKILL, p, 1);
}
}
}
}
-void update_one_process(struct task_struct *p, unsigned long user,
+static void update_one_process(struct task_struct *p, unsigned long user,
unsigned long system, int cpu)
{
do_process_times(p, user, system);
*/
asmlinkage long sys_getpid(void)
{
- return vx_map_tgid(current->vx_info, current->tgid);
+ return vx_map_tgid(current->tgid);
}
/*
#endif
break;
}
- return vx_map_tgid(current->vx_info, pid);
+ return vx_map_pid(pid);
}
asmlinkage long sys_getuid(void)
* too.
*/
- do_gettimeofday((struct timeval *)&tp);
- tp.tv_nsec *= NSEC_PER_USEC;
+ getnstimeofday(&tp);
tp.tv_sec += wall_to_monotonic.tv_sec;
tp.tv_nsec += wall_to_monotonic.tv_nsec;
if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
}
#ifdef CONFIG_TIME_INTERPOLATION
-volatile unsigned long last_nsec_offset;
-#ifndef __HAVE_ARCH_CMPXCHG
-spinlock_t last_nsec_offset_lock = SPIN_LOCK_UNLOCKED;
-#endif
struct time_interpolator *time_interpolator;
static struct time_interpolator *time_interpolator_list;
static spinlock_t time_interpolator_lock = SPIN_LOCK_UNLOCKED;
+static inline unsigned long time_interpolator_get_cycles(unsigned int src)
+{
+ unsigned long (*x)(void);
+
+ switch (src)
+ {
+ case TIME_SOURCE_FUNCTION:
+ x = time_interpolator->addr;
+ return x();
+
+ case TIME_SOURCE_MMIO64 :
+ return readq(time_interpolator->addr);
+
+ case TIME_SOURCE_MMIO32 :
+ return readl(time_interpolator->addr);
+ default: return get_cycles();
+ }
+}
+
+static inline unsigned long time_interpolator_get_counter(void)
+{
+ unsigned int src = time_interpolator->source;
+
+ if (time_interpolator->jitter)
+ {
+ unsigned long lcycle;
+ unsigned long now;
+
+ do {
+ lcycle = time_interpolator->last_cycle;
+ now = time_interpolator_get_cycles(src);
+ if (lcycle && time_after(lcycle, now)) return lcycle;
+ /* Keep track of the last timer value returned. The use of cmpxchg here
+ * will cause contention in an SMP environment.
+ */
+ } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
+ return now;
+ }
+ else
+ return time_interpolator_get_cycles(src);
+}
+
+void time_interpolator_reset(void)
+{
+ time_interpolator->offset = 0;
+ time_interpolator->last_counter = time_interpolator_get_counter();
+}
+
+unsigned long time_interpolator_resolution(void)
+{
+ if (time_interpolator->frequency < NSEC_PER_SEC)
+ return NSEC_PER_SEC / time_interpolator->frequency;
+ else
+ return 1;
+}
+
+#define GET_TI_NSECS(count,i) ((((count) - i->last_counter) * i->nsec_per_cyc) >> i->shift)
+
+unsigned long time_interpolator_get_offset(void)
+{
+ return time_interpolator->offset +
+ GET_TI_NSECS(time_interpolator_get_counter(), time_interpolator);
+}
+
+static void time_interpolator_update(long delta_nsec)
+{
+ unsigned long counter = time_interpolator_get_counter();
+ unsigned long offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator);
+
+ /* The interpolator compensates for late ticks by accumulating
+ * the late time in time_interpolator->offset. A tick earlier than
+ * expected will lead to a reset of the offset and a corresponding
+ * jump of the clock forward. Again this only works if the
+ * interpolator clock is running slightly slower than the regular clock
+ * and the tuning logic insures that.
+ */
+
+ if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
+ time_interpolator->offset = offset - delta_nsec;
+ else {
+ time_interpolator->skips++;
+ time_interpolator->ns_skipped += delta_nsec - offset;
+ time_interpolator->offset = 0;
+ }
+ time_interpolator->last_counter = counter;
+
+ /* Tuning logic for time interpolator invoked every minute or so.
+ * Decrease interpolator clock speed if no skips occurred and an offset is carried.
+ * Increase interpolator clock speed if we skip too much time.
+ */
+ if (jiffies % INTERPOLATOR_ADJUST == 0)
+ {
+ if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
+ time_interpolator->nsec_per_cyc--;
+ if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
+ time_interpolator->nsec_per_cyc++;
+ time_interpolator->skips = 0;
+ time_interpolator->ns_skipped = 0;
+ }
+}
+
static inline int
is_better_time_interpolator(struct time_interpolator *new)
{
void
register_time_interpolator(struct time_interpolator *ti)
{
+ unsigned long flags;
+
+ ti->nsec_per_cyc = (NSEC_PER_SEC << ti->shift) / ti->frequency;
spin_lock(&time_interpolator_lock);
- write_seqlock_irq(&xtime_lock);
- if (is_better_time_interpolator(ti))
+ write_seqlock_irqsave(&xtime_lock, flags);
+ if (is_better_time_interpolator(ti)) {
time_interpolator = ti;
- write_sequnlock_irq(&xtime_lock);
+ time_interpolator_reset();
+ }
+ write_sequnlock_irqrestore(&xtime_lock, flags);
ti->next = time_interpolator_list;
time_interpolator_list = ti;
unregister_time_interpolator(struct time_interpolator *ti)
{
struct time_interpolator *curr, **prev;
+ unsigned long flags;
spin_lock(&time_interpolator_lock);
prev = &time_interpolator_list;
prev = &curr->next;
}
- write_seqlock_irq(&xtime_lock);
+ write_seqlock_irqsave(&xtime_lock, flags);
if (ti == time_interpolator) {
/* we lost the best time-interpolator: */
time_interpolator = NULL;
for (curr = time_interpolator_list; curr; curr = curr->next)
if (is_better_time_interpolator(curr))
time_interpolator = curr;
+ time_interpolator_reset();
}
- write_sequnlock_irq(&xtime_lock);
+ write_sequnlock_irqrestore(&xtime_lock, flags);
spin_unlock(&time_interpolator_lock);
}
#endif /* CONFIG_TIME_INTERPOLATION */
EXPORT_SYMBOL(msleep);
+/**
+ * msleep_interruptible - sleep waiting for waitqueue interruptions
+ * @msecs: Time in milliseconds to sleep for
+ */
+unsigned long msleep_interruptible(unsigned int msecs)
+{
+ unsigned long timeout = msecs_to_jiffies(msecs);
+
+ while (timeout && !signal_pending(current)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ timeout = schedule_timeout(timeout);
+ }
+ return jiffies_to_msecs(timeout);
+}
+
+EXPORT_SYMBOL(msleep_interruptible);