-int
-do_settimeofday (struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- {
- /*
- * This is revolting. We need to set "xtime" correctly. However, the value
- * in this location is the value at the most recent update of wall time.
- * Discover what correction gettimeofday would have done, and then undo
- * it!
- */
- nsec -= time_interpolator_get_offset();
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- time_adjust = 0; /* stop active adjtime() */
- time_status |= STA_UNSYNC;
- time_maxerror = NTP_PHASE_LIMIT;
- time_esterror = NTP_PHASE_LIMIT;
- time_interpolator_reset();
- }
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-
-void
-do_gettimeofday (struct timeval *tv)
-{
- unsigned long seq, nsec, usec, sec, old, offset;
-
- while (1) {
- seq = read_seqbegin(&xtime_lock);
- {
- old = last_nsec_offset;
- offset = time_interpolator_get_offset();
- sec = xtime.tv_sec;
- nsec = xtime.tv_nsec;
- }
- if (unlikely(read_seqretry(&xtime_lock, seq)))
- continue;
- /*
- * Ensure that for any pair of causally ordered gettimeofday() calls, time
- * never goes backwards (even when ITC on different CPUs are not perfectly
- * synchronized). (A pair of concurrent calls to gettimeofday() is by
- * definition non-causal and hence it makes no sense to talk about
- * time-continuity for such calls.)
- *
- * Doing this in a lock-free and race-free manner is tricky. Here is why
- * it works (most of the time): read_seqretry() just succeeded, which
- * implies we calculated a consistent (valid) value for "offset". If the
- * cmpxchg() below succeeds, we further know that last_nsec_offset still
- * has the same value as at the beginning of the loop, so there was
- * presumably no timer-tick or other updates to last_nsec_offset in the
- * meantime. This isn't 100% true though: there _is_ a possibility of a
- * timer-tick occurring right right after read_seqretry() and then getting
- * zero or more other readers which will set last_nsec_offset to the same
- * value as the one we read at the beginning of the loop. If this
- * happens, we'll end up returning a slightly newer time than we ought to
- * (the jump forward is at most "offset" nano-seconds). There is no
- * danger of causing time to go backwards, though, so we are safe in that
- * sense. We could make the probability of this unlucky case occurring
- * arbitrarily small by encoding a version number in last_nsec_offset, but
- * even without versioning, the probability of this unlucky case should be
- * so small that we won't worry about it.
- */
- if (offset <= old) {
- offset = old;
- break;
- } else if (likely(cmpxchg(&last_nsec_offset, old, offset) == old))
- break;
-
- /* someone else beat us to updating last_nsec_offset; try again */
- }
-
- usec = (nsec + offset) / 1000;
-
- while (unlikely(usec >= USEC_PER_SEC)) {
- usec -= USEC_PER_SEC;
- ++sec;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-/*
- * The profiling function is SMP safe. (nothing can mess
- * around with "current", and the profiling counters are
- * updated with atomic operations). This is especially
- * useful with a profiling multiplier != 1
- */
-static inline void
-ia64_do_profile (struct pt_regs * regs)
-{
- unsigned long ip, slot;
- extern cpumask_t prof_cpu_mask;
-
- profile_hook(regs);
-
- if (user_mode(regs))
- return;
-
- if (!prof_buffer)
- return;
-
- ip = instruction_pointer(regs);
- /* Conserve space in histogram by encoding slot bits in address
- * bits 2 and 3 rather than bits 0 and 1.
- */
- slot = ip & 3;
- ip = (ip & ~3UL) + 4*slot;
-
- /*
- * Only measure the CPUs specified by /proc/irq/prof_cpu_mask.
- * (default is all CPUs.)
- */
- if (!cpu_isset(smp_processor_id(), prof_cpu_mask))
- return;
-
- ip -= (unsigned long) &_stext;
- ip >>= prof_shift;
- /*
- * Don't ignore out-of-bounds IP values silently,
- * put them into the last histogram slot, so if
- * present, they will show up as a sharp peak.
- */
- if (ip > prof_len-1)
- ip = prof_len-1;
- atomic_inc((atomic_t *)&prof_buffer[ip]);
-}
-