2 * Common time routines among all ppc machines.
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
35 #include <linux/errno.h>
36 #include <linux/module.h>
37 #include <linux/sched.h>
38 #include <linux/kernel.h>
39 #include <linux/param.h>
40 #include <linux/string.h>
42 #include <linux/interrupt.h>
43 #include <linux/timex.h>
44 #include <linux/kernel_stat.h>
45 #include <linux/time.h>
46 #include <linux/init.h>
47 #include <linux/profile.h>
48 #include <linux/cpu.h>
49 #include <linux/security.h>
50 #include <linux/percpu.h>
51 #include <linux/rtc.h>
52 #include <linux/jiffies.h>
53 #include <linux/posix-timers.h>
54 #include <linux/irq.h>
57 #include <asm/processor.h>
58 #include <asm/nvram.h>
59 #include <asm/cache.h>
60 #include <asm/machdep.h>
61 #include <asm/uaccess.h>
65 #include <asm/div64.h>
67 #include <asm/vdso_datapage.h>
69 #include <asm/firmware.h>
71 #ifdef CONFIG_PPC_ISERIES
72 #include <asm/iseries/it_lp_queue.h>
73 #include <asm/iseries/hv_call_xm.h>
76 #ifdef CONFIG_PPC_ISERIES
77 static unsigned long __initdata iSeries_recal_titan;
78 static signed long __initdata iSeries_recal_tb;
81 #define XSEC_PER_SEC (1024*1024)
84 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
86 /* compute ((xsec << 12) * max) >> 32 */
87 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
90 unsigned long tb_ticks_per_jiffy;
91 unsigned long tb_ticks_per_usec = 100; /* sane default */
92 EXPORT_SYMBOL(tb_ticks_per_usec);
93 unsigned long tb_ticks_per_sec;
94 EXPORT_SYMBOL(tb_ticks_per_sec); /* for cputime_t conversions */
98 #define TICKLEN_SCALE TICK_LENGTH_SHIFT
99 u64 last_tick_len; /* units are ns / 2^TICKLEN_SCALE */
100 u64 ticklen_to_xs; /* 0.64 fraction */
102 /* If last_tick_len corresponds to about 1/HZ seconds, then
103 last_tick_len << TICKLEN_SHIFT will be about 2^63. */
104 #define TICKLEN_SHIFT (63 - 30 - TICKLEN_SCALE + SHIFT_HZ)
106 DEFINE_SPINLOCK(rtc_lock);
107 EXPORT_SYMBOL_GPL(rtc_lock);
109 static u64 tb_to_ns_scale __read_mostly;
110 static unsigned tb_to_ns_shift __read_mostly;
111 static unsigned long boot_tb __read_mostly;
113 struct gettimeofday_struct do_gtod;
115 extern struct timezone sys_tz;
116 static long timezone_offset;
118 unsigned long ppc_proc_freq;
119 EXPORT_SYMBOL(ppc_proc_freq);
120 unsigned long ppc_tb_freq;
122 static u64 tb_last_jiffy __cacheline_aligned_in_smp;
123 static DEFINE_PER_CPU(u64, last_jiffy);
125 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
127 * Factors for converting from cputime_t (timebase ticks) to
128 * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
129 * These are all stored as 0.64 fixed-point binary fractions.
131 u64 __cputime_jiffies_factor;
132 EXPORT_SYMBOL(__cputime_jiffies_factor);
133 u64 __cputime_msec_factor;
134 EXPORT_SYMBOL(__cputime_msec_factor);
135 u64 __cputime_sec_factor;
136 EXPORT_SYMBOL(__cputime_sec_factor);
137 u64 __cputime_clockt_factor;
138 EXPORT_SYMBOL(__cputime_clockt_factor);
140 static void calc_cputime_factors(void)
142 struct div_result res;
144 div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
145 __cputime_jiffies_factor = res.result_low;
146 div128_by_32(1000, 0, tb_ticks_per_sec, &res);
147 __cputime_msec_factor = res.result_low;
148 div128_by_32(1, 0, tb_ticks_per_sec, &res);
149 __cputime_sec_factor = res.result_low;
150 div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
151 __cputime_clockt_factor = res.result_low;
155 * Read the PURR on systems that have it, otherwise the timebase.
157 static u64 read_purr(void)
159 if (cpu_has_feature(CPU_FTR_PURR))
160 return mfspr(SPRN_PURR);
165 * Account time for a transition between system, hard irq
168 void account_system_vtime(struct task_struct *tsk)
173 local_irq_save(flags);
175 delta = now - get_paca()->startpurr;
176 get_paca()->startpurr = now;
177 if (!in_interrupt()) {
178 delta += get_paca()->system_time;
179 get_paca()->system_time = 0;
181 account_system_time(tsk, 0, delta);
182 local_irq_restore(flags);
186 * Transfer the user and system times accumulated in the paca
187 * by the exception entry and exit code to the generic process
188 * user and system time records.
189 * Must be called with interrupts disabled.
191 void account_process_vtime(struct task_struct *tsk)
195 utime = get_paca()->user_time;
196 get_paca()->user_time = 0;
197 account_user_time(tsk, utime);
200 static void account_process_time(struct pt_regs *regs)
202 int cpu = smp_processor_id();
204 account_process_vtime(current);
206 if (rcu_pending(cpu))
207 rcu_check_callbacks(cpu, user_mode(regs));
209 run_posix_cpu_timers(current);
213 * Stuff for accounting stolen time.
215 struct cpu_purr_data {
216 int initialized; /* thread is running */
217 u64 tb; /* last TB value read */
218 u64 purr; /* last PURR value read */
222 * Each entry in the cpu_purr_data array is manipulated only by its
223 * "owner" cpu -- usually in the timer interrupt but also occasionally
224 * in process context for cpu online. As long as cpus do not touch
225 * each others' cpu_purr_data, disabling local interrupts is
226 * sufficient to serialize accesses.
228 static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data);
230 static void snapshot_tb_and_purr(void *data)
233 struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data);
235 local_irq_save(flags);
236 p->tb = get_tb_or_rtc();
237 p->purr = mfspr(SPRN_PURR);
240 local_irq_restore(flags);
244 * Called during boot when all cpus have come up.
246 void snapshot_timebases(void)
248 if (!cpu_has_feature(CPU_FTR_PURR))
250 on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1);
254 * Must be called with interrupts disabled.
256 void calculate_steal_time(void)
260 struct cpu_purr_data *pme;
262 if (!cpu_has_feature(CPU_FTR_PURR))
264 pme = &per_cpu(cpu_purr_data, smp_processor_id());
265 if (!pme->initialized)
266 return; /* this can happen in early boot */
268 purr = mfspr(SPRN_PURR);
269 stolen = (tb - pme->tb) - (purr - pme->purr);
271 account_steal_time(current, stolen);
276 #ifdef CONFIG_PPC_SPLPAR
278 * Must be called before the cpu is added to the online map when
279 * a cpu is being brought up at runtime.
281 static void snapshot_purr(void)
283 struct cpu_purr_data *pme;
286 if (!cpu_has_feature(CPU_FTR_PURR))
288 local_irq_save(flags);
289 pme = &per_cpu(cpu_purr_data, smp_processor_id());
291 pme->purr = mfspr(SPRN_PURR);
292 pme->initialized = 1;
293 local_irq_restore(flags);
296 #endif /* CONFIG_PPC_SPLPAR */
298 #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */
299 #define calc_cputime_factors()
300 #define account_process_time(regs) update_process_times(user_mode(regs))
301 #define calculate_steal_time() do { } while (0)
304 #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR))
305 #define snapshot_purr() do { } while (0)
309 * Called when a cpu comes up after the system has finished booting,
310 * i.e. as a result of a hotplug cpu action.
312 void snapshot_timebase(void)
314 __get_cpu_var(last_jiffy) = get_tb_or_rtc();
318 void __delay(unsigned long loops)
326 /* the RTCL register wraps at 1000000000 */
327 diff = get_rtcl() - start;
330 } while (diff < loops);
333 while (get_tbl() - start < loops)
338 EXPORT_SYMBOL(__delay);
340 void udelay(unsigned long usecs)
342 __delay(tb_ticks_per_usec * usecs);
344 EXPORT_SYMBOL(udelay);
347 * This version of gettimeofday has microsecond resolution.
349 static inline void __do_gettimeofday(struct timeval *tv)
351 unsigned long sec, usec;
353 struct gettimeofday_vars *temp_varp;
354 u64 temp_tb_to_xs, temp_stamp_xsec;
357 * These calculations are faster (gets rid of divides)
358 * if done in units of 1/2^20 rather than microseconds.
359 * The conversion to microseconds at the end is done
360 * without a divide (and in fact, without a multiply)
362 temp_varp = do_gtod.varp;
364 /* Sampling the time base must be done after loading
365 * do_gtod.varp in order to avoid racing with update_gtod.
367 data_barrier(temp_varp);
368 tb_ticks = get_tb() - temp_varp->tb_orig_stamp;
369 temp_tb_to_xs = temp_varp->tb_to_xs;
370 temp_stamp_xsec = temp_varp->stamp_xsec;
371 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
372 sec = xsec / XSEC_PER_SEC;
373 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
374 usec = SCALE_XSEC(usec, 1000000);
380 void do_gettimeofday(struct timeval *tv)
383 /* do this the old way */
384 unsigned long flags, seq;
385 unsigned int sec, nsec, usec;
388 seq = read_seqbegin_irqsave(&xtime_lock, flags);
390 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_jiffy);
391 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
393 while (usec >= 1000000) {
401 __do_gettimeofday(tv);
404 EXPORT_SYMBOL(do_gettimeofday);
407 * There are two copies of tb_to_xs and stamp_xsec so that no
408 * lock is needed to access and use these values in
409 * do_gettimeofday. We alternate the copies and as long as a
410 * reasonable time elapses between changes, there will never
411 * be inconsistent values. ntpd has a minimum of one minute
414 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
418 struct gettimeofday_vars *temp_varp;
420 temp_idx = (do_gtod.var_idx == 0);
421 temp_varp = &do_gtod.vars[temp_idx];
423 temp_varp->tb_to_xs = new_tb_to_xs;
424 temp_varp->tb_orig_stamp = new_tb_stamp;
425 temp_varp->stamp_xsec = new_stamp_xsec;
427 do_gtod.varp = temp_varp;
428 do_gtod.var_idx = temp_idx;
431 * tb_update_count is used to allow the userspace gettimeofday code
432 * to assure itself that it sees a consistent view of the tb_to_xs and
433 * stamp_xsec variables. It reads the tb_update_count, then reads
434 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
435 * the two values of tb_update_count match and are even then the
436 * tb_to_xs and stamp_xsec values are consistent. If not, then it
437 * loops back and reads them again until this criteria is met.
438 * We expect the caller to have done the first increment of
439 * vdso_data->tb_update_count already.
441 vdso_data->tb_orig_stamp = new_tb_stamp;
442 vdso_data->stamp_xsec = new_stamp_xsec;
443 vdso_data->tb_to_xs = new_tb_to_xs;
444 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
445 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
447 ++(vdso_data->tb_update_count);
451 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
452 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
453 * difference tb - tb_orig_stamp small enough to always fit inside a
454 * 32 bits number. This is a requirement of our fast 32 bits userland
455 * implementation in the vdso. If we "miss" a call to this function
456 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
457 * with a too big difference, then the vdso will fallback to calling
460 static __inline__ void timer_recalc_offset(u64 cur_tb)
462 unsigned long offset;
465 u64 tb, xsec_old, xsec_new;
466 struct gettimeofday_vars *varp;
470 tlen = current_tick_length();
471 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
472 if (tlen == last_tick_len && offset < 0x80000000u)
474 if (tlen != last_tick_len) {
475 t2x = mulhdu(tlen << TICKLEN_SHIFT, ticklen_to_xs);
476 last_tick_len = tlen;
478 t2x = do_gtod.varp->tb_to_xs;
479 new_stamp_xsec = (u64) xtime.tv_nsec * XSEC_PER_SEC;
480 do_div(new_stamp_xsec, 1000000000);
481 new_stamp_xsec += (u64) xtime.tv_sec * XSEC_PER_SEC;
483 ++vdso_data->tb_update_count;
487 * Make sure time doesn't go backwards for userspace gettimeofday.
491 xsec_old = mulhdu(tb - varp->tb_orig_stamp, varp->tb_to_xs)
493 xsec_new = mulhdu(tb - cur_tb, t2x) + new_stamp_xsec;
494 if (xsec_new < xsec_old)
495 new_stamp_xsec += xsec_old - xsec_new;
497 update_gtod(cur_tb, new_stamp_xsec, t2x);
501 unsigned long profile_pc(struct pt_regs *regs)
503 unsigned long pc = instruction_pointer(regs);
505 if (in_lock_functions(pc))
510 EXPORT_SYMBOL(profile_pc);
513 #ifdef CONFIG_PPC_ISERIES
516 * This function recalibrates the timebase based on the 49-bit time-of-day
517 * value in the Titan chip. The Titan is much more accurate than the value
518 * returned by the service processor for the timebase frequency.
521 static int __init iSeries_tb_recal(void)
523 struct div_result divres;
524 unsigned long titan, tb;
526 /* Make sure we only run on iSeries */
527 if (!firmware_has_feature(FW_FEATURE_ISERIES))
531 titan = HvCallXm_loadTod();
532 if ( iSeries_recal_titan ) {
533 unsigned long tb_ticks = tb - iSeries_recal_tb;
534 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
535 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
536 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
537 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
539 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
540 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
542 if ( tick_diff < 0 ) {
543 tick_diff = -tick_diff;
547 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
548 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
549 new_tb_ticks_per_jiffy, sign, tick_diff );
550 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
551 tb_ticks_per_sec = new_tb_ticks_per_sec;
552 calc_cputime_factors();
553 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
554 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
555 tb_to_xs = divres.result_low;
556 do_gtod.varp->tb_to_xs = tb_to_xs;
557 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
558 vdso_data->tb_to_xs = tb_to_xs;
561 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
562 " new tb_ticks_per_jiffy = %lu\n"
563 " old tb_ticks_per_jiffy = %lu\n",
564 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
568 iSeries_recal_titan = titan;
569 iSeries_recal_tb = tb;
573 late_initcall(iSeries_tb_recal);
575 /* Called from platform early init */
576 void __init iSeries_time_init_early(void)
578 iSeries_recal_tb = get_tb();
579 iSeries_recal_titan = HvCallXm_loadTod();
581 #endif /* CONFIG_PPC_ISERIES */
584 * For iSeries shared processors, we have to let the hypervisor
585 * set the hardware decrementer. We set a virtual decrementer
586 * in the lppaca and call the hypervisor if the virtual
587 * decrementer is less than the current value in the hardware
588 * decrementer. (almost always the new decrementer value will
589 * be greater than the current hardware decementer so the hypervisor
590 * call will not be needed)
594 * timer_interrupt - gets called when the decrementer overflows,
595 * with interrupts disabled.
597 void timer_interrupt(struct pt_regs * regs)
599 struct pt_regs *old_regs;
601 int cpu = smp_processor_id();
606 if (atomic_read(&ppc_n_lost_interrupts) != 0)
610 old_regs = set_irq_regs(regs);
613 profile_tick(CPU_PROFILING);
614 calculate_steal_time();
616 #ifdef CONFIG_PPC_ISERIES
617 if (firmware_has_feature(FW_FEATURE_ISERIES))
618 get_lppaca()->int_dword.fields.decr_int = 0;
621 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
622 >= tb_ticks_per_jiffy) {
623 /* Update last_jiffy */
624 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
625 /* Handle RTCL overflow on 601 */
626 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
627 per_cpu(last_jiffy, cpu) -= 1000000000;
630 * We cannot disable the decrementer, so in the period
631 * between this cpu's being marked offline in cpu_online_map
632 * and calling stop-self, it is taking timer interrupts.
633 * Avoid calling into the scheduler rebalancing code if this
636 if (!cpu_is_offline(cpu))
637 account_process_time(regs);
640 * No need to check whether cpu is offline here; boot_cpuid
641 * should have been fixed up by now.
643 if (cpu != boot_cpuid)
646 write_seqlock(&xtime_lock);
647 tb_next_jiffy = tb_last_jiffy + tb_ticks_per_jiffy;
648 if (__USE_RTC() && tb_next_jiffy >= 1000000000)
649 tb_next_jiffy -= 1000000000;
650 if (per_cpu(last_jiffy, cpu) >= tb_next_jiffy) {
651 tb_last_jiffy = tb_next_jiffy;
653 timer_recalc_offset(tb_last_jiffy);
655 write_sequnlock(&xtime_lock);
658 next_dec = tb_ticks_per_jiffy - ticks;
661 #ifdef CONFIG_PPC_ISERIES
662 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
663 process_hvlpevents();
667 /* collect purr register values often, for accurate calculations */
668 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
669 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
670 cu->current_tb = mfspr(SPRN_PURR);
675 set_irq_regs(old_regs);
678 void wakeup_decrementer(void)
683 * The timebase gets saved on sleep and restored on wakeup,
684 * so all we need to do is to reset the decrementer.
686 ticks = tb_ticks_since(__get_cpu_var(last_jiffy));
687 if (ticks < tb_ticks_per_jiffy)
688 ticks = tb_ticks_per_jiffy - ticks;
695 void __init smp_space_timers(unsigned int max_cpus)
698 u64 previous_tb = per_cpu(last_jiffy, boot_cpuid);
700 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
701 previous_tb -= tb_ticks_per_jiffy;
703 for_each_possible_cpu(i) {
706 per_cpu(last_jiffy, i) = previous_tb;
712 * Scheduler clock - returns current time in nanosec units.
714 * Note: mulhdu(a, b) (multiply high double unsigned) returns
715 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
716 * are 64-bit unsigned numbers.
718 unsigned long long sched_clock(void)
722 return mulhdu(get_tb() - boot_tb, tb_to_ns_scale) << tb_to_ns_shift;
725 int do_settimeofday(struct timespec *tv)
727 time_t wtm_sec, new_sec = tv->tv_sec;
728 long wtm_nsec, new_nsec = tv->tv_nsec;
731 unsigned long tb_delta;
733 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
736 write_seqlock_irqsave(&xtime_lock, flags);
739 * Updating the RTC is not the job of this code. If the time is
740 * stepped under NTP, the RTC will be updated after STA_UNSYNC
741 * is cleared. Tools like clock/hwclock either copy the RTC
742 * to the system time, in which case there is no point in writing
743 * to the RTC again, or write to the RTC but then they don't call
744 * settimeofday to perform this operation.
747 /* Make userspace gettimeofday spin until we're done. */
748 ++vdso_data->tb_update_count;
752 * Subtract off the number of nanoseconds since the
753 * beginning of the last tick.
755 tb_delta = tb_ticks_since(tb_last_jiffy);
756 tb_delta = mulhdu(tb_delta, do_gtod.varp->tb_to_xs); /* in xsec */
757 new_nsec -= SCALE_XSEC(tb_delta, 1000000000);
759 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
760 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
762 set_normalized_timespec(&xtime, new_sec, new_nsec);
763 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
767 new_xsec = xtime.tv_nsec;
769 new_xsec *= XSEC_PER_SEC;
770 do_div(new_xsec, NSEC_PER_SEC);
772 new_xsec += (u64)xtime.tv_sec * XSEC_PER_SEC;
773 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
775 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
776 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
778 write_sequnlock_irqrestore(&xtime_lock, flags);
783 EXPORT_SYMBOL(do_settimeofday);
785 static int __init get_freq(char *name, int cells, unsigned long *val)
787 struct device_node *cpu;
788 const unsigned int *fp;
791 /* The cpu node should have timebase and clock frequency properties */
792 cpu = of_find_node_by_type(NULL, "cpu");
795 fp = of_get_property(cpu, name, NULL);
798 *val = of_read_ulong(fp, cells);
807 void __init generic_calibrate_decr(void)
809 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
811 if (!get_freq("ibm,extended-timebase-frequency", 2, &ppc_tb_freq) &&
812 !get_freq("timebase-frequency", 1, &ppc_tb_freq)) {
814 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
818 ppc_proc_freq = DEFAULT_PROC_FREQ; /* hardcoded default */
820 if (!get_freq("ibm,extended-clock-frequency", 2, &ppc_proc_freq) &&
821 !get_freq("clock-frequency", 1, &ppc_proc_freq)) {
823 printk(KERN_ERR "WARNING: Estimating processor frequency "
827 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
828 /* Set the time base to zero */
832 /* Clear any pending timer interrupts */
833 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
835 /* Enable decrementer interrupt */
836 mtspr(SPRN_TCR, TCR_DIE);
840 int update_persistent_clock(struct timespec now)
844 if (!ppc_md.set_rtc_time)
847 to_tm(now.tv_sec + 1 + timezone_offset, &tm);
851 return ppc_md.set_rtc_time(&tm);
854 unsigned long read_persistent_clock(void)
857 static int first = 1;
859 /* XXX this is a litle fragile but will work okay in the short term */
862 if (ppc_md.time_init)
863 timezone_offset = ppc_md.time_init();
865 /* get_boot_time() isn't guaranteed to be safe to call late */
866 if (ppc_md.get_boot_time)
867 return ppc_md.get_boot_time() -timezone_offset;
869 if (!ppc_md.get_rtc_time)
871 ppc_md.get_rtc_time(&tm);
872 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
873 tm.tm_hour, tm.tm_min, tm.tm_sec);
876 /* This function is only called on the boot processor */
877 void __init time_init(void)
880 struct div_result res;
885 /* 601 processor: dec counts down by 128 every 128ns */
886 ppc_tb_freq = 1000000000;
887 tb_last_jiffy = get_rtcl();
889 /* Normal PowerPC with timebase register */
890 ppc_md.calibrate_decr();
891 printk(KERN_DEBUG "time_init: decrementer frequency = %lu.%.6lu MHz\n",
892 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
893 printk(KERN_DEBUG "time_init: processor frequency = %lu.%.6lu MHz\n",
894 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
895 tb_last_jiffy = get_tb();
898 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
899 tb_ticks_per_sec = ppc_tb_freq;
900 tb_ticks_per_usec = ppc_tb_freq / 1000000;
901 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
902 calc_cputime_factors();
905 * Calculate the length of each tick in ns. It will not be
906 * exactly 1e9/HZ unless ppc_tb_freq is divisible by HZ.
907 * We compute 1e9 * tb_ticks_per_jiffy / ppc_tb_freq,
910 x = (u64) NSEC_PER_SEC * tb_ticks_per_jiffy + ppc_tb_freq - 1;
911 do_div(x, ppc_tb_freq);
913 last_tick_len = x << TICKLEN_SCALE;
916 * Compute ticklen_to_xs, which is a factor which gets multiplied
917 * by (last_tick_len << TICKLEN_SHIFT) to get a tb_to_xs value.
919 * ticklen_to_xs = 2^N / (tb_ticks_per_jiffy * 1e9)
920 * where N = 64 + 20 - TICKLEN_SCALE - TICKLEN_SHIFT
921 * which turns out to be N = 51 - SHIFT_HZ.
922 * This gives the result as a 0.64 fixed-point fraction.
923 * That value is reduced by an offset amounting to 1 xsec per
924 * 2^31 timebase ticks to avoid problems with time going backwards
925 * by 1 xsec when we do timer_recalc_offset due to losing the
926 * fractional xsec. That offset is equal to ppc_tb_freq/2^51
927 * since there are 2^20 xsec in a second.
929 div128_by_32((1ULL << 51) - ppc_tb_freq, 0,
930 tb_ticks_per_jiffy << SHIFT_HZ, &res);
931 div128_by_32(res.result_high, res.result_low, NSEC_PER_SEC, &res);
932 ticklen_to_xs = res.result_low;
934 /* Compute tb_to_xs from tick_nsec */
935 tb_to_xs = mulhdu(last_tick_len << TICKLEN_SHIFT, ticklen_to_xs);
938 * Compute scale factor for sched_clock.
939 * The calibrate_decr() function has set tb_ticks_per_sec,
940 * which is the timebase frequency.
941 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
942 * the 128-bit result as a 64.64 fixed-point number.
943 * We then shift that number right until it is less than 1.0,
944 * giving us the scale factor and shift count to use in
947 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
948 scale = res.result_low;
949 for (shift = 0; res.result_high != 0; ++shift) {
950 scale = (scale >> 1) | (res.result_high << 63);
951 res.result_high >>= 1;
953 tb_to_ns_scale = scale;
954 tb_to_ns_shift = shift;
955 /* Save the current timebase to pretty up CONFIG_PRINTK_TIME */
956 boot_tb = get_tb_or_rtc();
958 write_seqlock_irqsave(&xtime_lock, flags);
960 /* If platform provided a timezone (pmac), we correct the time */
961 if (timezone_offset) {
962 sys_tz.tz_minuteswest = -timezone_offset / 60;
963 sys_tz.tz_dsttime = 0;
966 do_gtod.varp = &do_gtod.vars[0];
968 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
969 __get_cpu_var(last_jiffy) = tb_last_jiffy;
970 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
971 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
972 do_gtod.varp->tb_to_xs = tb_to_xs;
973 do_gtod.tb_to_us = tb_to_us;
975 vdso_data->tb_orig_stamp = tb_last_jiffy;
976 vdso_data->tb_update_count = 0;
977 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
978 vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
979 vdso_data->tb_to_xs = tb_to_xs;
983 write_sequnlock_irqrestore(&xtime_lock, flags);
985 /* Not exact, but the timer interrupt takes care of this */
986 set_dec(tb_ticks_per_jiffy);
991 #define STARTOFTIME 1970
992 #define SECDAY 86400L
993 #define SECYR (SECDAY * 365)
994 #define leapyear(year) ((year) % 4 == 0 && \
995 ((year) % 100 != 0 || (year) % 400 == 0))
996 #define days_in_year(a) (leapyear(a) ? 366 : 365)
997 #define days_in_month(a) (month_days[(a) - 1])
999 static int month_days[12] = {
1000 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
1004 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
1006 void GregorianDay(struct rtc_time * tm)
1011 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
1013 lastYear = tm->tm_year - 1;
1016 * Number of leap corrections to apply up to end of last year
1018 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
1021 * This year is a leap year if it is divisible by 4 except when it is
1022 * divisible by 100 unless it is divisible by 400
1024 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
1026 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
1028 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
1031 tm->tm_wday = day % 7;
1034 void to_tm(int tim, struct rtc_time * tm)
1037 register long hms, day;
1042 /* Hours, minutes, seconds are easy */
1043 tm->tm_hour = hms / 3600;
1044 tm->tm_min = (hms % 3600) / 60;
1045 tm->tm_sec = (hms % 3600) % 60;
1047 /* Number of years in days */
1048 for (i = STARTOFTIME; day >= days_in_year(i); i++)
1049 day -= days_in_year(i);
1052 /* Number of months in days left */
1053 if (leapyear(tm->tm_year))
1054 days_in_month(FEBRUARY) = 29;
1055 for (i = 1; day >= days_in_month(i); i++)
1056 day -= days_in_month(i);
1057 days_in_month(FEBRUARY) = 28;
1060 /* Days are what is left over (+1) from all that. */
1061 tm->tm_mday = day + 1;
1064 * Determine the day of week
1069 /* Auxiliary function to compute scaling factors */
1070 /* Actually the choice of a timebase running at 1/4 the of the bus
1071 * frequency giving resolution of a few tens of nanoseconds is quite nice.
1072 * It makes this computation very precise (27-28 bits typically) which
1073 * is optimistic considering the stability of most processor clock
1074 * oscillators and the precision with which the timebase frequency
1075 * is measured but does not harm.
1077 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
1079 unsigned mlt=0, tmp, err;
1080 /* No concern for performance, it's done once: use a stupid
1081 * but safe and compact method to find the multiplier.
1084 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
1085 if (mulhwu(inscale, mlt|tmp) < outscale)
1089 /* We might still be off by 1 for the best approximation.
1090 * A side effect of this is that if outscale is too large
1091 * the returned value will be zero.
1092 * Many corner cases have been checked and seem to work,
1093 * some might have been forgotten in the test however.
1096 err = inscale * (mlt+1);
1097 if (err <= inscale/2)
1103 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
1106 void div128_by_32(u64 dividend_high, u64 dividend_low,
1107 unsigned divisor, struct div_result *dr)
1109 unsigned long a, b, c, d;
1110 unsigned long w, x, y, z;
1113 a = dividend_high >> 32;
1114 b = dividend_high & 0xffffffff;
1115 c = dividend_low >> 32;
1116 d = dividend_low & 0xffffffff;
1119 ra = ((u64)(a - (w * divisor)) << 32) + b;
1121 rb = ((u64) do_div(ra, divisor) << 32) + c;
1124 rc = ((u64) do_div(rb, divisor) << 32) + d;
1127 do_div(rc, divisor);
1130 dr->result_high = ((u64)w << 32) + x;
1131 dr->result_low = ((u64)y << 32) + z;