1 /* $Id: time.c,v 1.42 2002/01/23 14:33:55 davem Exp $
2 * time.c: UltraSparc timer and TOD clock support.
4 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
7 * Based largely on code which is:
9 * Copyright (C) 1996 Thomas K. Dyas (tdyas@eden.rutgers.edu)
12 #include <linux/config.h>
13 #include <linux/errno.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/param.h>
18 #include <linux/string.h>
20 #include <linux/interrupt.h>
21 #include <linux/time.h>
22 #include <linux/timex.h>
23 #include <linux/init.h>
24 #include <linux/ioport.h>
25 #include <linux/mc146818rtc.h>
26 #include <linux/delay.h>
27 #include <linux/profile.h>
28 #include <linux/bcd.h>
29 #include <linux/jiffies.h>
30 #include <linux/cpufreq.h>
31 #include <linux/percpu.h>
33 #include <asm/oplib.h>
34 #include <asm/mostek.h>
35 #include <asm/timer.h>
43 #include <asm/starfire.h>
45 #include <asm/sections.h>
46 #include <asm/cpudata.h>
48 spinlock_t mostek_lock = SPIN_LOCK_UNLOCKED;
49 spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
50 unsigned long mstk48t02_regs = 0UL;
52 unsigned long ds1287_regs = 0UL;
55 extern unsigned long wall_jiffies;
57 u64 jiffies_64 = INITIAL_JIFFIES;
59 EXPORT_SYMBOL(jiffies_64);
61 static unsigned long mstk48t08_regs = 0UL;
62 static unsigned long mstk48t59_regs = 0UL;
64 static int set_rtc_mmss(unsigned long);
66 struct sparc64_tick_ops *tick_ops;
68 #define TICK_PRIV_BIT (1UL << 63)
70 static void tick_disable_protection(void)
72 /* Set things up so user can access tick register for profiling
73 * purposes. Also workaround BB_ERRATA_1 by doing a dummy
74 * read back of %tick after writing it.
80 "1: rd %%tick, %%g2\n"
81 " add %%g2, 6, %%g2\n"
82 " andn %%g2, %0, %%g2\n"
83 " wrpr %%g2, 0, %%tick\n"
90 static void tick_init_tick(unsigned long offset)
92 tick_disable_protection();
96 " andn %%g1, %1, %%g1\n"
98 " add %%g1, %0, %%g1\n"
100 "1: wr %%g1, 0x0, %%tick_cmpr\n"
101 " rd %%tick_cmpr, %%g0"
103 : "r" (offset), "r" (TICK_PRIV_BIT)
107 static unsigned long tick_get_tick(void)
111 __asm__ __volatile__("rd %%tick, %0\n\t"
115 return ret & ~TICK_PRIV_BIT;
118 static unsigned long tick_get_compare(void)
122 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
129 static unsigned long tick_add_compare(unsigned long adj)
131 unsigned long new_compare;
133 /* Workaround for Spitfire Errata (#54 I think??), I discovered
134 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
137 * On Blackbird writes to %tick_cmpr can fail, the
138 * workaround seems to be to execute the wr instruction
139 * at the start of an I-cache line, and perform a dummy
140 * read back from %tick_cmpr right after writing to it. -DaveM
142 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t"
143 "ba,pt %%xcc, 1f\n\t"
144 " add %0, %1, %0\n\t"
147 "wr %0, 0, %%tick_cmpr\n\t"
148 "rd %%tick_cmpr, %%g0"
149 : "=&r" (new_compare)
155 static unsigned long tick_add_tick(unsigned long adj, unsigned long offset)
157 unsigned long new_tick, tmp;
159 /* Also need to handle Blackbird bug here too. */
160 __asm__ __volatile__("rd %%tick, %0\n\t"
162 "wrpr %0, 0, %%tick\n\t"
163 "andn %0, %4, %1\n\t"
164 "ba,pt %%xcc, 1f\n\t"
165 " add %1, %3, %1\n\t"
168 "wr %1, 0, %%tick_cmpr\n\t"
169 "rd %%tick_cmpr, %%g0"
170 : "=&r" (new_tick), "=&r" (tmp)
171 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
176 static struct sparc64_tick_ops tick_operations = {
177 .init_tick = tick_init_tick,
178 .get_tick = tick_get_tick,
179 .get_compare = tick_get_compare,
180 .add_tick = tick_add_tick,
181 .add_compare = tick_add_compare,
182 .softint_mask = 1UL << 0,
185 static void stick_init_tick(unsigned long offset)
187 tick_disable_protection();
189 /* Let the user get at STICK too. */
190 __asm__ __volatile__(
191 " rd %%asr24, %%g2\n"
192 " andn %%g2, %0, %%g2\n"
193 " wr %%g2, 0, %%asr24"
195 : "r" (TICK_PRIV_BIT)
198 __asm__ __volatile__(
199 " rd %%asr24, %%g1\n"
200 " andn %%g1, %1, %%g1\n"
201 " add %%g1, %0, %%g1\n"
202 " wr %%g1, 0x0, %%asr25"
204 : "r" (offset), "r" (TICK_PRIV_BIT)
208 static unsigned long stick_get_tick(void)
212 __asm__ __volatile__("rd %%asr24, %0"
215 return ret & ~TICK_PRIV_BIT;
218 static unsigned long stick_get_compare(void)
222 __asm__ __volatile__("rd %%asr25, %0"
228 static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
230 unsigned long new_tick, tmp;
232 __asm__ __volatile__("rd %%asr24, %0\n\t"
234 "wr %0, 0, %%asr24\n\t"
235 "andn %0, %4, %1\n\t"
238 : "=&r" (new_tick), "=&r" (tmp)
239 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
244 static unsigned long stick_add_compare(unsigned long adj)
246 unsigned long new_compare;
248 __asm__ __volatile__("rd %%asr25, %0\n\t"
251 : "=&r" (new_compare)
257 static struct sparc64_tick_ops stick_operations = {
258 .init_tick = stick_init_tick,
259 .get_tick = stick_get_tick,
260 .get_compare = stick_get_compare,
261 .add_tick = stick_add_tick,
262 .add_compare = stick_add_compare,
263 .softint_mask = 1UL << 16,
266 /* On Hummingbird the STICK/STICK_CMPR register is implemented
267 * in I/O space. There are two 64-bit registers each, the
268 * first holds the low 32-bits of the value and the second holds
271 * Since STICK is constantly updating, we have to access it carefully.
273 * The sequence we use to read is:
276 * 3) read low again, if it rolled over increment high by 1
278 * Writing STICK safely is also tricky:
279 * 1) write low to zero
283 #define HBIRD_STICKCMP_ADDR 0x1fe0000f060UL
284 #define HBIRD_STICK_ADDR 0x1fe0000f070UL
286 static unsigned long __hbird_read_stick(void)
288 unsigned long ret, tmp1, tmp2, tmp3;
289 unsigned long addr = HBIRD_STICK_ADDR;
291 __asm__ __volatile__("ldxa [%1] %5, %2\n\t"
292 "add %1, 0x8, %1\n\t"
293 "ldxa [%1] %5, %3\n\t"
294 "sub %1, 0x8, %1\n\t"
295 "ldxa [%1] %5, %4\n\t"
297 "blu,a,pn %%xcc, 1f\n\t"
300 "sllx %3, 32, %3\n\t"
302 : "=&r" (ret), "=&r" (addr),
303 "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3)
304 : "i" (ASI_PHYS_BYPASS_EC_E), "1" (addr));
309 static unsigned long __hbird_read_compare(void)
311 unsigned long low, high;
312 unsigned long addr = HBIRD_STICKCMP_ADDR;
314 __asm__ __volatile__("ldxa [%2] %3, %0\n\t"
315 "add %2, 0x8, %2\n\t"
317 : "=&r" (low), "=&r" (high), "=&r" (addr)
318 : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
320 return (high << 32UL) | low;
323 static void __hbird_write_stick(unsigned long val)
325 unsigned long low = (val & 0xffffffffUL);
326 unsigned long high = (val >> 32UL);
327 unsigned long addr = HBIRD_STICK_ADDR;
329 __asm__ __volatile__("stxa %%g0, [%0] %4\n\t"
330 "add %0, 0x8, %0\n\t"
331 "stxa %3, [%0] %4\n\t"
332 "sub %0, 0x8, %0\n\t"
335 : "0" (addr), "r" (low), "r" (high),
336 "i" (ASI_PHYS_BYPASS_EC_E));
339 static void __hbird_write_compare(unsigned long val)
341 unsigned long low = (val & 0xffffffffUL);
342 unsigned long high = (val >> 32UL);
343 unsigned long addr = HBIRD_STICKCMP_ADDR + 0x8UL;
345 __asm__ __volatile__("stxa %3, [%0] %4\n\t"
346 "sub %0, 0x8, %0\n\t"
349 : "0" (addr), "r" (low), "r" (high),
350 "i" (ASI_PHYS_BYPASS_EC_E));
353 static void hbtick_init_tick(unsigned long offset)
357 tick_disable_protection();
359 /* XXX This seems to be necessary to 'jumpstart' Hummingbird
360 * XXX into actually sending STICK interrupts. I think because
361 * XXX of how we store %tick_cmpr in head.S this somehow resets the
362 * XXX {TICK + STICK} interrupt mux. -DaveM
364 __hbird_write_stick(__hbird_read_stick());
366 val = __hbird_read_stick() & ~TICK_PRIV_BIT;
367 __hbird_write_compare(val + offset);
370 static unsigned long hbtick_get_tick(void)
372 return __hbird_read_stick() & ~TICK_PRIV_BIT;
375 static unsigned long hbtick_get_compare(void)
377 return __hbird_read_compare();
380 static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
384 val = __hbird_read_stick() + adj;
385 __hbird_write_stick(val);
387 val &= ~TICK_PRIV_BIT;
388 __hbird_write_compare(val + offset);
393 static unsigned long hbtick_add_compare(unsigned long adj)
395 unsigned long val = __hbird_read_compare() + adj;
397 val &= ~TICK_PRIV_BIT;
398 __hbird_write_compare(val);
403 static struct sparc64_tick_ops hbtick_operations = {
404 .init_tick = hbtick_init_tick,
405 .get_tick = hbtick_get_tick,
406 .get_compare = hbtick_get_compare,
407 .add_tick = hbtick_add_tick,
408 .add_compare = hbtick_add_compare,
409 .softint_mask = 1UL << 0,
412 /* timer_interrupt() needs to keep up the real-time clock,
413 * as well as call the "do_timer()" routine every clocktick
415 * NOTE: On SUN5 systems the ticker interrupt comes in using 2
416 * interrupts, one at level14 and one with softint bit 0.
418 unsigned long timer_tick_offset;
419 unsigned long timer_tick_compare;
421 static unsigned long timer_ticks_per_usec_quotient;
422 static unsigned long timer_ticks_per_nsec_quotient;
424 #define TICK_SIZE (tick_nsec / 1000)
426 static __inline__ void timer_check_rtc(void)
428 /* last time the cmos clock got updated */
429 static long last_rtc_update;
431 /* Determine when to update the Mostek clock. */
432 if ((time_status & STA_UNSYNC) == 0 &&
433 xtime.tv_sec > last_rtc_update + 660 &&
434 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
435 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
436 if (set_rtc_mmss(xtime.tv_sec) == 0)
437 last_rtc_update = xtime.tv_sec;
439 last_rtc_update = xtime.tv_sec - 600;
440 /* do it again in 60 s */
444 void sparc64_do_profile(struct pt_regs *regs)
446 unsigned long pc = regs->tpc;
447 unsigned long o7 = regs->u_regs[UREG_RETPC];
458 extern int rwlock_impl_begin, rwlock_impl_end;
459 extern int atomic_impl_begin, atomic_impl_end;
460 extern int __memcpy_begin, __memcpy_end;
461 extern int __bzero_begin, __bzero_end;
462 extern int __bitops_begin, __bitops_end;
464 if ((pc >= (unsigned long) &atomic_impl_begin &&
465 pc < (unsigned long) &atomic_impl_end) ||
466 (pc >= (unsigned long) &rwlock_impl_begin &&
467 pc < (unsigned long) &rwlock_impl_end) ||
468 (pc >= (unsigned long) &__memcpy_begin &&
469 pc < (unsigned long) &__memcpy_end) ||
470 (pc >= (unsigned long) &__bzero_begin &&
471 pc < (unsigned long) &__bzero_end) ||
472 (pc >= (unsigned long) &__bitops_begin &&
473 pc < (unsigned long) &__bitops_end))
476 pc -= (unsigned long) _stext;
481 atomic_inc((atomic_t *)&prof_buffer[pc]);
485 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs * regs)
487 unsigned long ticks, pstate;
489 write_seqlock(&xtime_lock);
493 sparc64_do_profile(regs);
497 /* Guarantee that the following sequences execute
500 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
501 "wrpr %0, %1, %%pstate"
505 timer_tick_compare = tick_ops->add_compare(timer_tick_offset);
506 ticks = tick_ops->get_tick();
508 /* Restore PSTATE_IE. */
509 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
512 } while (time_after_eq(ticks, timer_tick_compare));
516 write_sequnlock(&xtime_lock);
522 void timer_tick_interrupt(struct pt_regs *regs)
524 write_seqlock(&xtime_lock);
529 * Only keep timer_tick_offset uptodate, but don't set TICK_CMPR.
531 timer_tick_compare = tick_ops->get_compare() + timer_tick_offset;
535 write_sequnlock(&xtime_lock);
539 /* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
540 static void __init kick_start_clock(void)
542 unsigned long regs = mstk48t02_regs;
546 prom_printf("CLOCK: Clock was stopped. Kick start ");
548 spin_lock_irq(&mostek_lock);
550 /* Turn on the kick start bit to start the oscillator. */
551 tmp = mostek_read(regs + MOSTEK_CREG);
552 tmp |= MSTK_CREG_WRITE;
553 mostek_write(regs + MOSTEK_CREG, tmp);
554 tmp = mostek_read(regs + MOSTEK_SEC);
556 mostek_write(regs + MOSTEK_SEC, tmp);
557 tmp = mostek_read(regs + MOSTEK_HOUR);
558 tmp |= MSTK_KICK_START;
559 mostek_write(regs + MOSTEK_HOUR, tmp);
560 tmp = mostek_read(regs + MOSTEK_CREG);
561 tmp &= ~MSTK_CREG_WRITE;
562 mostek_write(regs + MOSTEK_CREG, tmp);
564 spin_unlock_irq(&mostek_lock);
566 /* Delay to allow the clock oscillator to start. */
567 sec = MSTK_REG_SEC(regs);
568 for (i = 0; i < 3; i++) {
569 while (sec == MSTK_REG_SEC(regs))
570 for (count = 0; count < 100000; count++)
573 sec = MSTK_REG_SEC(regs);
577 spin_lock_irq(&mostek_lock);
579 /* Turn off kick start and set a "valid" time and date. */
580 tmp = mostek_read(regs + MOSTEK_CREG);
581 tmp |= MSTK_CREG_WRITE;
582 mostek_write(regs + MOSTEK_CREG, tmp);
583 tmp = mostek_read(regs + MOSTEK_HOUR);
584 tmp &= ~MSTK_KICK_START;
585 mostek_write(regs + MOSTEK_HOUR, tmp);
586 MSTK_SET_REG_SEC(regs,0);
587 MSTK_SET_REG_MIN(regs,0);
588 MSTK_SET_REG_HOUR(regs,0);
589 MSTK_SET_REG_DOW(regs,5);
590 MSTK_SET_REG_DOM(regs,1);
591 MSTK_SET_REG_MONTH(regs,8);
592 MSTK_SET_REG_YEAR(regs,1996 - MSTK_YEAR_ZERO);
593 tmp = mostek_read(regs + MOSTEK_CREG);
594 tmp &= ~MSTK_CREG_WRITE;
595 mostek_write(regs + MOSTEK_CREG, tmp);
597 spin_unlock_irq(&mostek_lock);
599 /* Ensure the kick start bit is off. If it isn't, turn it off. */
600 while (mostek_read(regs + MOSTEK_HOUR) & MSTK_KICK_START) {
601 prom_printf("CLOCK: Kick start still on!\n");
603 spin_lock_irq(&mostek_lock);
605 tmp = mostek_read(regs + MOSTEK_CREG);
606 tmp |= MSTK_CREG_WRITE;
607 mostek_write(regs + MOSTEK_CREG, tmp);
609 tmp = mostek_read(regs + MOSTEK_HOUR);
610 tmp &= ~MSTK_KICK_START;
611 mostek_write(regs + MOSTEK_HOUR, tmp);
613 tmp = mostek_read(regs + MOSTEK_CREG);
614 tmp &= ~MSTK_CREG_WRITE;
615 mostek_write(regs + MOSTEK_CREG, tmp);
617 spin_unlock_irq(&mostek_lock);
620 prom_printf("CLOCK: Kick start procedure successful.\n");
623 /* Return nonzero if the clock chip battery is low. */
624 static int __init has_low_battery(void)
626 unsigned long regs = mstk48t02_regs;
629 spin_lock_irq(&mostek_lock);
631 data1 = mostek_read(regs + MOSTEK_EEPROM); /* Read some data. */
632 mostek_write(regs + MOSTEK_EEPROM, ~data1); /* Write back the complement. */
633 data2 = mostek_read(regs + MOSTEK_EEPROM); /* Read back the complement. */
634 mostek_write(regs + MOSTEK_EEPROM, data1); /* Restore original value. */
636 spin_unlock_irq(&mostek_lock);
638 return (data1 == data2); /* Was the write blocked? */
641 /* Probe for the real time clock chip. */
642 static void __init set_system_time(void)
644 unsigned int year, mon, day, hour, min, sec;
645 unsigned long mregs = mstk48t02_regs;
647 unsigned long dregs = ds1287_regs;
649 unsigned long dregs = 0UL;
653 if (!mregs && !dregs) {
654 prom_printf("Something wrong, clock regs not mapped yet.\n");
659 spin_lock_irq(&mostek_lock);
661 /* Traditional Mostek chip. */
662 tmp = mostek_read(mregs + MOSTEK_CREG);
663 tmp |= MSTK_CREG_READ;
664 mostek_write(mregs + MOSTEK_CREG, tmp);
666 sec = MSTK_REG_SEC(mregs);
667 min = MSTK_REG_MIN(mregs);
668 hour = MSTK_REG_HOUR(mregs);
669 day = MSTK_REG_DOM(mregs);
670 mon = MSTK_REG_MONTH(mregs);
671 year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
675 /* Dallas 12887 RTC chip. */
677 /* Stolen from arch/i386/kernel/time.c, see there for
678 * credits and descriptive comments.
680 for (i = 0; i < 1000000; i++) {
681 if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP)
685 for (i = 0; i < 1000000; i++) {
686 if (!(CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP))
691 sec = CMOS_READ(RTC_SECONDS);
692 min = CMOS_READ(RTC_MINUTES);
693 hour = CMOS_READ(RTC_HOURS);
694 day = CMOS_READ(RTC_DAY_OF_MONTH);
695 mon = CMOS_READ(RTC_MONTH);
696 year = CMOS_READ(RTC_YEAR);
697 } while (sec != CMOS_READ(RTC_SECONDS));
698 if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
706 if ((year += 1900) < 1970)
710 xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
711 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
712 set_normalized_timespec(&wall_to_monotonic,
713 -xtime.tv_sec, -xtime.tv_nsec);
716 tmp = mostek_read(mregs + MOSTEK_CREG);
717 tmp &= ~MSTK_CREG_READ;
718 mostek_write(mregs + MOSTEK_CREG, tmp);
720 spin_unlock_irq(&mostek_lock);
724 void __init clock_probe(void)
726 struct linux_prom_registers clk_reg[2];
728 int node, busnd = -1, err;
730 struct linux_central *cbus;
732 struct linux_ebus *ebus = NULL;
733 struct sparc_isa_bridge *isa_br = NULL;
742 if (this_is_starfire) {
743 /* davem suggests we keep this within the 4M locked kernel image */
744 static char obp_gettod[256];
747 sprintf(obp_gettod, "h# %08x unix-gettod",
748 (unsigned int) (long) &unix_tod);
749 prom_feval(obp_gettod);
750 xtime.tv_sec = unix_tod;
751 xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
752 set_normalized_timespec(&wall_to_monotonic,
753 -xtime.tv_sec, -xtime.tv_nsec);
757 local_irq_save(flags);
761 busnd = central_bus->child->prom_node;
763 /* Check FHC Central then EBUSs then ISA bridges then SBUSs.
764 * That way we handle the presence of multiple properly.
766 * As a special case, machines with Central must provide the
770 if (ebus_chain != NULL) {
773 busnd = ebus->prom_node;
775 if (isa_chain != NULL) {
778 busnd = isa_br->prom_node;
781 if (sbus_root != NULL && busnd == -1)
782 busnd = sbus_root->prom_node;
785 prom_printf("clock_probe: problem, cannot find bus to search.\n");
789 node = prom_getchild(busnd);
795 prom_getstring(node, "model", model, sizeof(model));
796 if (strcmp(model, "mk48t02") &&
797 strcmp(model, "mk48t08") &&
798 strcmp(model, "mk48t59") &&
799 strcmp(model, "m5819") &&
800 strcmp(model, "m5819p") &&
801 strcmp(model, "ds1287")) {
803 prom_printf("clock_probe: Central bus lacks timer chip.\n");
808 node = prom_getsibling(node);
810 while ((node == 0) && ebus != NULL) {
813 busnd = ebus->prom_node;
814 node = prom_getchild(busnd);
817 while ((node == 0) && isa_br != NULL) {
818 isa_br = isa_br->next;
819 if (isa_br != NULL) {
820 busnd = isa_br->prom_node;
821 node = prom_getchild(busnd);
826 prom_printf("clock_probe: Cannot find timer chip\n");
832 err = prom_getproperty(node, "reg", (char *)clk_reg,
835 prom_printf("clock_probe: Cannot get Mostek reg property\n");
840 apply_fhc_ranges(central_bus->child, clk_reg, 1);
841 apply_central_ranges(central_bus, clk_reg, 1);
844 else if (ebus != NULL) {
845 struct linux_ebus_device *edev;
847 for_each_ebusdev(edev, ebus)
848 if (edev->prom_node == node)
851 if (isa_chain != NULL)
853 prom_printf("%s: Mostek not probed by EBUS\n",
858 if (!strcmp(model, "ds1287") ||
859 !strcmp(model, "m5819") ||
860 !strcmp(model, "m5819p")) {
861 ds1287_regs = edev->resource[0].start;
863 mstk48t59_regs = edev->resource[0].start;
864 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
868 else if (isa_br != NULL) {
869 struct sparc_isa_device *isadev;
872 for_each_isadev(isadev, isa_br)
873 if (isadev->prom_node == node)
875 if (isadev == NULL) {
876 prom_printf("%s: Mostek not probed by ISA\n");
879 if (!strcmp(model, "ds1287") ||
880 !strcmp(model, "m5819") ||
881 !strcmp(model, "m5819p")) {
882 ds1287_regs = isadev->resource.start;
884 mstk48t59_regs = isadev->resource.start;
885 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
891 if (sbus_root->num_sbus_ranges) {
892 int nranges = sbus_root->num_sbus_ranges;
895 for (rngc = 0; rngc < nranges; rngc++)
896 if (clk_reg[0].which_io ==
897 sbus_root->sbus_ranges[rngc].ot_child_space)
899 if (rngc == nranges) {
900 prom_printf("clock_probe: Cannot find ranges for "
904 clk_reg[0].which_io =
905 sbus_root->sbus_ranges[rngc].ot_parent_space;
906 clk_reg[0].phys_addr +=
907 sbus_root->sbus_ranges[rngc].ot_parent_base;
911 if(model[5] == '0' && model[6] == '2') {
912 mstk48t02_regs = (((u64)clk_reg[0].phys_addr) |
913 (((u64)clk_reg[0].which_io)<<32UL));
914 } else if(model[5] == '0' && model[6] == '8') {
915 mstk48t08_regs = (((u64)clk_reg[0].phys_addr) |
916 (((u64)clk_reg[0].which_io)<<32UL));
917 mstk48t02_regs = mstk48t08_regs + MOSTEK_48T08_48T02;
919 mstk48t59_regs = (((u64)clk_reg[0].phys_addr) |
920 (((u64)clk_reg[0].which_io)<<32UL));
921 mstk48t02_regs = mstk48t59_regs + MOSTEK_48T59_48T02;
926 if (mstk48t02_regs != 0UL) {
927 /* Report a low battery voltage condition. */
928 if (has_low_battery())
929 prom_printf("NVRAM: Low battery voltage!\n");
931 /* Kick start the clock if it is completely stopped. */
932 if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
938 local_irq_restore(flags);
941 /* This is gets the master TICK_INT timer going. */
942 static unsigned long sparc64_init_timers(irqreturn_t (*cfunc)(int, void *, struct pt_regs *))
944 unsigned long pstate, clock;
947 extern void smp_tick_init(void);
950 if (tlb_type == spitfire) {
951 unsigned long ver, manuf, impl;
953 __asm__ __volatile__ ("rdpr %%ver, %0"
955 manuf = ((ver >> 48) & 0xffff);
956 impl = ((ver >> 32) & 0xffff);
957 if (manuf == 0x17 && impl == 0x13) {
958 /* Hummingbird, aka Ultra-IIe */
959 tick_ops = &hbtick_operations;
960 node = prom_root_node;
961 clock = prom_getint(node, "stick-frequency");
963 tick_ops = &tick_operations;
964 cpu_find_by_instance(0, &node, NULL);
965 clock = prom_getint(node, "clock-frequency");
968 tick_ops = &stick_operations;
969 node = prom_root_node;
970 clock = prom_getint(node, "stick-frequency");
972 timer_tick_offset = clock / HZ;
978 /* Register IRQ handler. */
979 err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC,
983 prom_printf("Serious problem, cannot register TICK_INT\n");
987 /* Guarantee that the following sequences execute
990 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
991 "wrpr %0, %1, %%pstate"
995 tick_ops->init_tick(timer_tick_offset);
997 /* Restore PSTATE_IE. */
998 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1008 unsigned long udelay_val_ref;
1009 unsigned long clock_tick_ref;
1010 unsigned int ref_freq;
1012 static DEFINE_PER_CPU(struct freq_table, sparc64_freq_table) = { 0, 0, 0 };
1014 unsigned long sparc64_get_clock_tick(unsigned int cpu)
1016 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
1018 if (ft->clock_tick_ref)
1019 return ft->clock_tick_ref;
1020 return cpu_data(cpu).clock_tick;
1023 #ifdef CONFIG_CPU_FREQ
1025 static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
1028 struct cpufreq_freqs *freq = data;
1029 unsigned int cpu = freq->cpu;
1030 struct freq_table *ft = &per_cpu(sparc64_freq_table, cpu);
1032 if (!ft->ref_freq) {
1033 ft->ref_freq = freq->old;
1034 ft->udelay_val_ref = cpu_data(cpu).udelay_val;
1035 ft->clock_tick_ref = cpu_data(cpu).clock_tick;
1037 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
1038 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
1039 (val == CPUFREQ_RESUMECHANGE)) {
1040 cpu_data(cpu).udelay_val =
1041 cpufreq_scale(ft->udelay_val_ref,
1044 cpu_data(cpu).clock_tick =
1045 cpufreq_scale(ft->clock_tick_ref,
1053 static struct notifier_block sparc64_cpufreq_notifier_block = {
1054 .notifier_call = sparc64_cpufreq_notifier
1058 /* The quotient formula is taken from the IA64 port. */
1059 #define SPARC64_USEC_PER_CYC_SHIFT 30UL
1060 #define SPARC64_NSEC_PER_CYC_SHIFT 30UL
1061 void __init time_init(void)
1063 unsigned long clock = sparc64_init_timers(timer_interrupt);
1065 timer_ticks_per_usec_quotient =
1066 (((1000000UL << SPARC64_USEC_PER_CYC_SHIFT) +
1067 (clock / 2)) / clock);
1069 timer_ticks_per_nsec_quotient =
1070 (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) +
1071 (clock / 2)) / clock);
1073 #ifdef CONFIG_CPU_FREQ
1074 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
1075 CPUFREQ_TRANSITION_NOTIFIER);
1079 static __inline__ unsigned long do_gettimeoffset(void)
1081 unsigned long ticks = tick_ops->get_tick();
1083 ticks += timer_tick_offset;
1084 ticks -= timer_tick_compare;
1086 return (ticks * timer_ticks_per_usec_quotient)
1087 >> SPARC64_USEC_PER_CYC_SHIFT;
1090 unsigned long long sched_clock(void)
1092 unsigned long ticks = tick_ops->get_tick();
1094 return (ticks * timer_ticks_per_nsec_quotient)
1095 >> SPARC64_NSEC_PER_CYC_SHIFT;
1098 int do_settimeofday(struct timespec *tv)
1100 time_t wtm_sec, sec = tv->tv_sec;
1101 long wtm_nsec, nsec = tv->tv_nsec;
1103 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
1106 if (this_is_starfire)
1109 write_seqlock_irq(&xtime_lock);
1111 * This is revolting. We need to set "xtime" correctly. However, the
1112 * value in this location is the value at the most recent update of
1113 * wall time. Discover what correction gettimeofday() would have
1114 * made, and then undo it!
1116 nsec -= do_gettimeoffset() * 1000;
1117 nsec -= (jiffies - wall_jiffies) * (NSEC_PER_SEC / HZ);
1119 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
1120 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
1122 set_normalized_timespec(&xtime, sec, nsec);
1123 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
1125 time_adjust = 0; /* stop active adjtime() */
1126 time_status |= STA_UNSYNC;
1127 time_maxerror = NTP_PHASE_LIMIT;
1128 time_esterror = NTP_PHASE_LIMIT;
1129 write_sequnlock_irq(&xtime_lock);
1134 EXPORT_SYMBOL(do_settimeofday);
1136 /* Ok, my cute asm atomicity trick doesn't work anymore.
1137 * There are just too many variables that need to be protected
1138 * now (both members of xtime, wall_jiffies, et al.)
1140 void do_gettimeofday(struct timeval *tv)
1142 unsigned long flags;
1144 unsigned long usec, sec;
1145 unsigned long max_ntp_tick = tick_usec - tickadj;
1150 seq = read_seqbegin_irqsave(&xtime_lock, flags);
1151 usec = do_gettimeoffset();
1152 lost = jiffies - wall_jiffies;
1155 * If time_adjust is negative then NTP is slowing the clock
1156 * so make sure not to go into next possible interval.
1157 * Better to lose some accuracy than have time go backwards..
1159 if (unlikely(time_adjust < 0)) {
1160 usec = min(usec, max_ntp_tick);
1163 usec += lost * max_ntp_tick;
1165 else if (unlikely(lost))
1166 usec += lost * tick_usec;
1169 usec += (xtime.tv_nsec / 1000);
1170 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
1172 while (usec >= 1000000) {
1181 EXPORT_SYMBOL(do_gettimeofday);
1183 static int set_rtc_mmss(unsigned long nowtime)
1185 int real_seconds, real_minutes, chip_minutes;
1186 unsigned long mregs = mstk48t02_regs;
1188 unsigned long dregs = ds1287_regs;
1190 unsigned long dregs = 0UL;
1192 unsigned long flags;
1196 * Not having a register set can lead to trouble.
1197 * Also starfire doesn't have a tod clock.
1199 if (!mregs && !dregs)
1203 spin_lock_irqsave(&mostek_lock, flags);
1205 /* Read the current RTC minutes. */
1206 tmp = mostek_read(mregs + MOSTEK_CREG);
1207 tmp |= MSTK_CREG_READ;
1208 mostek_write(mregs + MOSTEK_CREG, tmp);
1210 chip_minutes = MSTK_REG_MIN(mregs);
1212 tmp = mostek_read(mregs + MOSTEK_CREG);
1213 tmp &= ~MSTK_CREG_READ;
1214 mostek_write(mregs + MOSTEK_CREG, tmp);
1217 * since we're only adjusting minutes and seconds,
1218 * don't interfere with hour overflow. This avoids
1219 * messing with unknown time zones but requires your
1220 * RTC not to be off by more than 15 minutes
1222 real_seconds = nowtime % 60;
1223 real_minutes = nowtime / 60;
1224 if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
1225 real_minutes += 30; /* correct for half hour time zone */
1228 if (abs(real_minutes - chip_minutes) < 30) {
1229 tmp = mostek_read(mregs + MOSTEK_CREG);
1230 tmp |= MSTK_CREG_WRITE;
1231 mostek_write(mregs + MOSTEK_CREG, tmp);
1233 MSTK_SET_REG_SEC(mregs,real_seconds);
1234 MSTK_SET_REG_MIN(mregs,real_minutes);
1236 tmp = mostek_read(mregs + MOSTEK_CREG);
1237 tmp &= ~MSTK_CREG_WRITE;
1238 mostek_write(mregs + MOSTEK_CREG, tmp);
1240 spin_unlock_irqrestore(&mostek_lock, flags);
1244 spin_unlock_irqrestore(&mostek_lock, flags);
1250 unsigned char save_control, save_freq_select;
1252 /* Stolen from arch/i386/kernel/time.c, see there for
1253 * credits and descriptive comments.
1255 spin_lock_irqsave(&rtc_lock, flags);
1256 save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
1257 CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
1259 save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
1260 CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
1262 chip_minutes = CMOS_READ(RTC_MINUTES);
1263 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
1264 BCD_TO_BIN(chip_minutes);
1265 real_seconds = nowtime % 60;
1266 real_minutes = nowtime / 60;
1267 if (((abs(real_minutes - chip_minutes) + 15)/30) & 1)
1271 if (abs(real_minutes - chip_minutes) < 30) {
1272 if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
1273 BIN_TO_BCD(real_seconds);
1274 BIN_TO_BCD(real_minutes);
1276 CMOS_WRITE(real_seconds,RTC_SECONDS);
1277 CMOS_WRITE(real_minutes,RTC_MINUTES);
1280 "set_rtc_mmss: can't update from %d to %d\n",
1281 chip_minutes, real_minutes);
1285 CMOS_WRITE(save_control, RTC_CONTROL);
1286 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
1287 spin_unlock_irqrestore(&rtc_lock, flags);