X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=arch%2Fs390%2Fkernel%2Fvtime.c;h=21baaf5496d61b02525a933c8139524710863cfb;hb=97bf2856c6014879bd04983a3e9dfcdac1e7fe85;hp=02d2179e40828a887ad3081e7bb606f310a6629b;hpb=c7b5ebbddf7bcd3651947760f423e3783bbe6573;p=linux-2.6.git diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 02d2179e4..21baaf549 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -7,7 +7,6 @@ * Author(s): Jan Glauber */ -#include #include #include #include @@ -17,25 +16,147 @@ #include #include #include +#include +#include +#include #include #include +#include -#define VTIMER_MAGIC (TIMER_MAGIC + 1) static ext_int_info_t ext_int_info_timer; DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); -void start_cpu_timer(void) +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +/* + * Update process times based on virtual cpu times stored by entry.S + * to the lowcore fields user_timer, system_timer & steal_clock. + */ +void account_tick_vtime(struct task_struct *tsk) +{ + cputime_t cputime; + __u64 timer, clock; + int rcu_user_flag; + + timer = S390_lowcore.last_update_timer; + clock = S390_lowcore.last_update_clock; + asm volatile (" STPT %0\n" /* Store current cpu timer value */ + " STCK %1" /* Store current tod clock value */ + : "=m" (S390_lowcore.last_update_timer), + "=m" (S390_lowcore.last_update_clock) ); + S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; + S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock; + + cputime = S390_lowcore.user_timer >> 12; + rcu_user_flag = cputime != 0; + S390_lowcore.user_timer -= cputime << 12; + S390_lowcore.steal_clock -= cputime << 12; + account_user_time(tsk, cputime); + + cputime = S390_lowcore.system_timer >> 12; + S390_lowcore.system_timer -= cputime << 12; + S390_lowcore.steal_clock -= cputime << 12; + account_system_time(tsk, HARDIRQ_OFFSET, cputime); + + cputime = S390_lowcore.steal_clock; + if ((__s64) cputime > 0) { + cputime >>= 12; + S390_lowcore.steal_clock -= cputime << 12; + account_steal_time(tsk, cputime); + } + + run_local_timers(); + if (rcu_pending(smp_processor_id())) + rcu_check_callbacks(smp_processor_id(), rcu_user_flag); + scheduler_tick(); + run_posix_cpu_timers(tsk); +} + +/* + * Update process times based on virtual cpu times stored by entry.S + * to the lowcore fields user_timer, system_timer & steal_clock. + */ +void account_vtime(struct task_struct *tsk) +{ + cputime_t cputime; + __u64 timer; + + timer = S390_lowcore.last_update_timer; + asm volatile (" STPT %0" /* Store current cpu timer value */ + : "=m" (S390_lowcore.last_update_timer) ); + S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; + + cputime = S390_lowcore.user_timer >> 12; + S390_lowcore.user_timer -= cputime << 12; + S390_lowcore.steal_clock -= cputime << 12; + account_user_time(tsk, cputime); + + cputime = S390_lowcore.system_timer >> 12; + S390_lowcore.system_timer -= cputime << 12; + S390_lowcore.steal_clock -= cputime << 12; + account_system_time(tsk, 0, cputime); +} + +/* + * Update process times based on virtual cpu times stored by entry.S + * to the lowcore fields user_timer, system_timer & steal_clock. + */ +void account_system_vtime(struct task_struct *tsk) +{ + cputime_t cputime; + __u64 timer; + + timer = S390_lowcore.last_update_timer; + asm volatile (" STPT %0" /* Store current cpu timer value */ + : "=m" (S390_lowcore.last_update_timer) ); + S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; + + cputime = S390_lowcore.system_timer >> 12; + S390_lowcore.system_timer -= cputime << 12; + S390_lowcore.steal_clock -= cputime << 12; + account_system_time(tsk, 0, cputime); +} + +static inline void set_vtimer(__u64 expires) +{ + __u64 timer; + + asm volatile (" STPT %0\n" /* Store current cpu timer value */ + " SPT %1" /* Set new value immediatly afterwards */ + : "=m" (timer) : "m" (expires) ); + S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; + S390_lowcore.last_update_timer = expires; + + /* store expire time for this CPU timer */ + per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; +} +#else +static inline void set_vtimer(__u64 expires) +{ + S390_lowcore.last_update_timer = expires; + asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); + + /* store expire time for this CPU timer */ + per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; +} +#endif + +static void start_cpu_timer(void) { struct vtimer_queue *vt_list; vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); - set_vtimer(vt_list->idle); + + /* CPU timer interrupt is pending, don't reprogramm it */ + if (vt_list->idle & 1LL<<63) + return; + + if (!list_empty(&vt_list->list)) + set_vtimer(vt_list->idle); } -void stop_cpu_timer(void) +static void stop_cpu_timer(void) { - __u64 done; struct vtimer_queue *vt_list; vt_list = &per_cpu(virt_cpu_timer, smp_processor_id()); @@ -46,21 +167,17 @@ void stop_cpu_timer(void) goto fire; } - /* store progress */ - asm volatile ("STPT %0" : "=m" (done)); + /* store the actual expire value */ + asm volatile ("STPT %0" : "=m" (vt_list->idle)); /* - * If done is negative we do not stop the CPU timer - * because we will get instantly an interrupt that - * will start the CPU timer again. + * If the CPU timer is negative we don't reprogramm + * it because we will get instantly an interrupt. */ - if (done & 1LL<<63) + if (vt_list->idle & 1LL<<63) return; - else - vt_list->offset += vt_list->to_expire - done; - /* save the actual expire value */ - vt_list->idle = done; + vt_list->offset += vt_list->to_expire - vt_list->idle; /* * We cannot halt the CPU timer, we just write a value that @@ -71,19 +188,11 @@ void stop_cpu_timer(void) set_vtimer(VTIMER_MAX_SLICE); } -void set_vtimer(__u64 expires) -{ - asm volatile ("SPT %0" : : "m" (expires)); - - /* store expire time for this CPU timer */ - per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires; -} - /* * Sorted add to a list. List is linear searched until first bigger * element is found. */ -void list_add_sorted(struct vtimer_list *timer, struct list_head *head) +static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) { struct vtimer_list *event; @@ -100,11 +209,11 @@ void list_add_sorted(struct vtimer_list *timer, struct list_head *head) * Do the callback functions of expired vtimer events. * Called from within the interrupt handler. */ -static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) +static void do_callbacks(struct list_head *cb_list) { struct vtimer_queue *vt_list; struct vtimer_list *event, *tmp; - void (*fn)(unsigned long, struct pt_regs*); + void (*fn)(unsigned long); unsigned long data; if (list_empty(cb_list)) @@ -115,7 +224,7 @@ static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) list_for_each_entry_safe(event, tmp, cb_list, entry) { fn = event->function; data = event->data; - fn(data, regs); + fn(data); if (!event->interval) /* delete one shot timer */ @@ -133,7 +242,7 @@ static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) /* * Handler for the virtual CPU timer. */ -static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code) +static void do_cpu_timer_interrupt(__u16 error_code) { int cpu; __u64 next, delta; @@ -166,7 +275,7 @@ static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code) list_move_tail(&event->entry, &cb_list); } spin_unlock(&vt_list->lock); - do_callbacks(&cb_list, regs); + do_callbacks(&cb_list); /* next event is first in list */ spin_lock(&vt_list->lock); @@ -192,20 +301,12 @@ static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code) void init_virt_timer(struct vtimer_list *timer) { - timer->magic = VTIMER_MAGIC; timer->function = NULL; INIT_LIST_HEAD(&timer->entry); spin_lock_init(&timer->lock); } EXPORT_SYMBOL(init_virt_timer); -static inline int check_vtimer(struct vtimer_list *timer) -{ - if (timer->magic != VTIMER_MAGIC) - return -EINVAL; - return 0; -} - static inline int vtimer_pending(struct vtimer_list *timer) { return (!list_empty(&timer->entry)); @@ -255,13 +356,13 @@ static void internal_add_vtimer(struct vtimer_list *timer) set_vtimer(event->expires); spin_unlock_irqrestore(&vt_list->lock, flags); - /* release CPU aquired in prepare_vtimer or mod_virt_timer() */ + /* release CPU acquired in prepare_vtimer or mod_virt_timer() */ put_cpu(); } static inline int prepare_vtimer(struct vtimer_list *timer) { - if (check_vtimer(timer) || !timer->function) { + if (!timer->function) { printk("add_virt_timer: uninitialized timer\n"); return -EINVAL; } @@ -329,7 +430,7 @@ int mod_virt_timer(struct vtimer_list *timer, __u64 expires) unsigned long flags; int cpu; - if (check_vtimer(timer) || !timer->function) { + if (!timer->function) { printk("mod_virt_timer: uninitialized timer\n"); return -EINVAL; } @@ -396,11 +497,6 @@ int del_virt_timer(struct vtimer_list *timer) unsigned long flags; struct vtimer_queue *vt_list; - if (check_vtimer(timer)) { - printk("del_virt_timer: timer not initialized\n"); - return -EINVAL; - } - /* check if timer is pending */ if (!vtimer_pending(timer)) return 0; @@ -429,11 +525,12 @@ void init_cpu_vtimer(void) { struct vtimer_queue *vt_list; unsigned long cr0; - __u64 timer; /* kick the virtual timer */ - timer = VTIMER_MAX_SLICE; - asm volatile ("SPT %0" : : "m" (timer)); + S390_lowcore.exit_timer = VTIMER_MAX_SLICE; + S390_lowcore.last_update_timer = VTIMER_MAX_SLICE; + asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer)); + asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock)); __ctl_store(cr0, 0, 0); cr0 |= 0x400; __ctl_load(cr0, 0, 0);