blob: 2a59bde663f2f1c2b32028f141603bfdab1f38ca [file] [log] [blame]
john stultzc37e7bb2007-02-16 01:28:19 -08001#include <linux/kernel.h>
2#include <linux/sched.h>
3#include <linux/interrupt.h>
4#include <linux/init.h>
5#include <linux/clocksource.h>
6#include <linux/time.h>
7#include <linux/acpi.h>
8#include <linux/cpufreq.h>
9
10#include <asm/timex.h>
11
john stultz14899392007-02-16 01:28:20 -080012static int notsc __initdata = 0;
john stultzc37e7bb2007-02-16 01:28:19 -080013
14unsigned int cpu_khz; /* TSC clocks / usec, not used here */
15EXPORT_SYMBOL(cpu_khz);
Joerg Roedel6b37f5a2007-05-02 19:27:06 +020016unsigned int tsc_khz;
17EXPORT_SYMBOL(tsc_khz);
john stultzc37e7bb2007-02-16 01:28:19 -080018
john stultzc37e7bb2007-02-16 01:28:19 -080019static unsigned int cyc2ns_scale __read_mostly;
20
21void set_cyc2ns_scale(unsigned long khz)
22{
23 cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
24}
25
john stultz14899392007-02-16 01:28:20 -080026static unsigned long long cycles_2_ns(unsigned long long cyc)
john stultzc37e7bb2007-02-16 01:28:19 -080027{
28 return (cyc * cyc2ns_scale) >> NS_SCALE;
29}
30
31unsigned long long sched_clock(void)
32{
33 unsigned long a = 0;
34
35 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
36 * which means it is not completely exact and may not be monotonous
37 * between CPUs. But the errors should be too small to matter for
38 * scheduling purposes.
39 */
40
41 rdtscll(a);
42 return cycles_2_ns(a);
43}
44
john stultz14899392007-02-16 01:28:20 -080045static int tsc_unstable;
46
Rusty Russelld7e28ff2007-07-19 01:49:23 -070047inline int check_tsc_unstable(void)
john stultz14899392007-02-16 01:28:20 -080048{
49 return tsc_unstable;
50}
john stultzc37e7bb2007-02-16 01:28:19 -080051#ifdef CONFIG_CPU_FREQ
52
53/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
54 * changes.
55 *
56 * RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
57 * not that important because current Opteron setups do not support
58 * scaling on SMP anyroads.
59 *
60 * Should fix up last_tsc too. Currently gettimeofday in the
61 * first tick after the change will be slightly wrong.
62 */
63
Thomas Gleixner7ff98472007-07-21 17:10:13 +020064static unsigned int ref_freq;
65static unsigned long loops_per_jiffy_ref;
66static unsigned long tsc_khz_ref;
john stultzc37e7bb2007-02-16 01:28:19 -080067
68static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
69 void *data)
70{
71 struct cpufreq_freqs *freq = data;
72 unsigned long *lpj, dummy;
73
74 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
75 return 0;
76
77 lpj = &dummy;
78 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
79#ifdef CONFIG_SMP
80 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
81#else
82 lpj = &boot_cpu_data.loops_per_jiffy;
83#endif
84
85 if (!ref_freq) {
86 ref_freq = freq->old;
87 loops_per_jiffy_ref = *lpj;
Joerg Roedel6b37f5a2007-05-02 19:27:06 +020088 tsc_khz_ref = tsc_khz;
john stultzc37e7bb2007-02-16 01:28:19 -080089 }
90 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
91 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
92 (val == CPUFREQ_RESUMECHANGE)) {
93 *lpj =
94 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
95
Joerg Roedel6b37f5a2007-05-02 19:27:06 +020096 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
john stultzc37e7bb2007-02-16 01:28:19 -080097 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
john stultz5a90cf22007-05-02 19:27:08 +020098 mark_tsc_unstable("cpufreq changes");
john stultzc37e7bb2007-02-16 01:28:19 -080099 }
100
Joerg Roedel6b37f5a2007-05-02 19:27:06 +0200101 set_cyc2ns_scale(tsc_khz_ref);
john stultzc37e7bb2007-02-16 01:28:19 -0800102
103 return 0;
104}
105
106static struct notifier_block time_cpufreq_notifier_block = {
107 .notifier_call = time_cpufreq_notifier
108};
109
110static int __init cpufreq_tsc(void)
111{
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200112 cpufreq_register_notifier(&time_cpufreq_notifier_block,
113 CPUFREQ_TRANSITION_NOTIFIER);
john stultzc37e7bb2007-02-16 01:28:19 -0800114 return 0;
115}
116
117core_initcall(cpufreq_tsc);
118
119#endif
120
john stultzc37e7bb2007-02-16 01:28:19 -0800121/*
122 * Make an educated guess if the TSC is trustworthy and synchronized
123 * over all CPUs.
124 */
125__cpuinit int unsynchronized_tsc(void)
126{
127 if (tsc_unstable)
128 return 1;
129
130#ifdef CONFIG_SMP
131 if (apic_is_clustered_box())
132 return 1;
133#endif
134 /* Most intel systems have synchronized TSCs except for
135 multi node systems */
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200136 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
john stultzc37e7bb2007-02-16 01:28:19 -0800137#ifdef CONFIG_ACPI
138 /* But TSC doesn't tick in C3 so don't use it there */
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200139 if (acpi_gbl_FADT.header.length > 0 &&
140 acpi_gbl_FADT.C3latency < 1000)
john stultzc37e7bb2007-02-16 01:28:19 -0800141 return 1;
142#endif
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200143 return 0;
john stultzc37e7bb2007-02-16 01:28:19 -0800144 }
145
Thomas Gleixner7ff98472007-07-21 17:10:13 +0200146 /* Assume multi socket systems are not synchronized */
147 return num_present_cpus() > 1;
john stultzc37e7bb2007-02-16 01:28:19 -0800148}
149
150int __init notsc_setup(char *s)
151{
152 notsc = 1;
153 return 1;
154}
155
156__setup("notsc", notsc_setup);
john stultz14899392007-02-16 01:28:20 -0800157
158
159/* clock source code: */
160static cycle_t read_tsc(void)
161{
162 cycle_t ret = (cycle_t)get_cycles_sync();
163 return ret;
164}
165
john stultz7460ed22007-02-16 01:28:21 -0800166static cycle_t __vsyscall_fn vread_tsc(void)
167{
168 cycle_t ret = (cycle_t)get_cycles_sync();
169 return ret;
170}
171
john stultz14899392007-02-16 01:28:20 -0800172static struct clocksource clocksource_tsc = {
173 .name = "tsc",
174 .rating = 300,
175 .read = read_tsc,
176 .mask = CLOCKSOURCE_MASK(64),
177 .shift = 22,
178 .flags = CLOCK_SOURCE_IS_CONTINUOUS |
179 CLOCK_SOURCE_MUST_VERIFY,
john stultz7460ed22007-02-16 01:28:21 -0800180 .vread = vread_tsc,
john stultz14899392007-02-16 01:28:20 -0800181};
182
john stultz5a90cf22007-05-02 19:27:08 +0200183void mark_tsc_unstable(char *reason)
john stultz14899392007-02-16 01:28:20 -0800184{
185 if (!tsc_unstable) {
186 tsc_unstable = 1;
john stultz5a90cf22007-05-02 19:27:08 +0200187 printk("Marking TSC unstable due to %s\n", reason);
john stultz14899392007-02-16 01:28:20 -0800188 /* Change only the rating, when not registered */
189 if (clocksource_tsc.mult)
190 clocksource_change_rating(&clocksource_tsc, 0);
191 else
192 clocksource_tsc.rating = 0;
193 }
194}
195EXPORT_SYMBOL_GPL(mark_tsc_unstable);
196
john stultz6bb74df2007-03-05 00:30:50 -0800197void __init init_tsc_clocksource(void)
john stultz14899392007-02-16 01:28:20 -0800198{
199 if (!notsc) {
Joerg Roedel6b37f5a2007-05-02 19:27:06 +0200200 clocksource_tsc.mult = clocksource_khz2mult(tsc_khz,
john stultz14899392007-02-16 01:28:20 -0800201 clocksource_tsc.shift);
202 if (check_tsc_unstable())
203 clocksource_tsc.rating = 0;
204
john stultz6bb74df2007-03-05 00:30:50 -0800205 clocksource_register(&clocksource_tsc);
john stultz14899392007-02-16 01:28:20 -0800206 }
john stultz14899392007-02-16 01:28:20 -0800207}