1
#include <linux/init.h>
2
#include <linux/kernel.h>
4
#include <linux/string.h>
5
#include <linux/bitops.h>
7
#include <linux/sched.h>
8
#include <linux/thread_info.h>
9
#include <linux/module.h>
10
#include <linux/uaccess.h>
12
#include <asm/processor.h>
13
#include <asm/pgtable.h>
19
#include <linux/topology.h>
20
#include <asm/numa_64.h>
25
#ifdef CONFIG_X86_LOCAL_APIC
26
#include <asm/mpspec.h>
30
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
34
/* Unmask CPUID levels if masked: */
35
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
36
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
38
if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
39
misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
40
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
41
c->cpuid_level = cpuid_eax(0);
46
if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
47
(c->x86 == 0x6 && c->x86_model >= 0x0e))
48
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
50
if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
53
wrmsr(MSR_IA32_UCODE_REV, 0, 0);
54
/* Required by the SDM */
56
rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
60
* Atom erratum AAE44/AAF40/AAG38/AAH41:
62
* A race condition between speculative fetches and invalidating
63
* a large page. This is worked around in microcode, but we
64
* need the microcode to have already been loaded... so if it is
65
* not, recommend a BIOS update and disable large pages.
67
if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
68
c->microcode < 0x20e) {
69
printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
70
clear_cpu_cap(c, X86_FEATURE_PSE);
74
set_cpu_cap(c, X86_FEATURE_SYSENTER32);
76
/* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
77
if (c->x86 == 15 && c->x86_cache_alignment == 64)
78
c->x86_cache_alignment = 128;
81
/* CPUID workaround for 0F33/0F34 CPU */
82
if (c->x86 == 0xF && c->x86_model == 0x3
83
&& (c->x86_mask == 0x3 || c->x86_mask == 0x4))
84
c->x86_phys_bits = 36;
87
* c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
88
* with P/T states and does not stop in deep C-states.
90
* It is also reliable across cores and sockets. (but not across
91
* cabinets - we turn it off in that case explicitly.)
93
if (c->x86_power & (1 << 8)) {
94
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
95
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
96
if (!check_tsc_unstable())
97
sched_clock_stable = 1;
101
* There is a known erratum on Pentium III and Core Solo
103
* " Page with PAT set to WC while associated MTRR is UC
104
* may consolidate to UC "
105
* Because of this erratum, it is better to stick with
106
* setting WC in MTRR rather than using PAT on these CPUs.
108
* Enable PAT WC only on P4, Core 2 or later CPUs.
110
if (c->x86 == 6 && c->x86_model < 15)
111
clear_cpu_cap(c, X86_FEATURE_PAT);
113
#ifdef CONFIG_KMEMCHECK
115
* P4s have a "fast strings" feature which causes single-
116
* stepping REP instructions to only generate a #DB on
117
* cache-line boundaries.
119
* Ingo Molnar reported a Pentium D (model 6) and a Xeon
120
* (model 2) with the same problem.
123
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
125
if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
126
printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
128
misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
129
wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
135
* If fast string is not enabled in IA32_MISC_ENABLE for any reason,
136
* clear the fast string and enhanced fast string CPU capabilities.
138
if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
139
rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
140
if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
141
printk(KERN_INFO "Disabled fast string operations\n");
142
setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
143
setup_clear_cpu_cap(X86_FEATURE_ERMS);
150
* Early probe support logic for ppro memory erratum #50
152
* This is called before we do cpu ident work
155
int __cpuinit ppro_with_ram_bug(void)
157
/* Uses data from early_cpu_detect now */
158
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
159
boot_cpu_data.x86 == 6 &&
160
boot_cpu_data.x86_model == 1 &&
161
boot_cpu_data.x86_mask < 8) {
162
printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
168
#ifdef CONFIG_X86_F00F_BUG
169
static void __cpuinit trap_init_f00f_bug(void)
171
__set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
174
* Update the IDT descriptor and reload the IDT so that
175
* it uses the read-only mapped virtual address.
177
idt_descr.address = fix_to_virt(FIX_F00F_IDT);
178
load_idt(&idt_descr);
182
static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
185
/* calling is from identify_secondary_cpu() ? */
190
* Mask B, Pentium, but not Pentium MMX
193
c->x86_mask >= 1 && c->x86_mask <= 4 &&
196
* Remember we have B step Pentia with bugs
198
WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
199
"with B stepping processors.\n");
204
static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
206
unsigned long lo, hi;
208
#ifdef CONFIG_X86_F00F_BUG
210
* All current models of Pentium and Pentium with MMX technology CPUs
211
* have the F0 0F bug, which lets nonprivileged users lock up the
213
* Note that the workaround only should be initialized once...
216
if (!paravirt_enabled() && c->x86 == 5) {
217
static int f00f_workaround_enabled;
220
if (!f00f_workaround_enabled) {
221
trap_init_f00f_bug();
222
printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
223
f00f_workaround_enabled = 1;
229
* SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
232
if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
233
clear_cpu_cap(c, X86_FEATURE_SEP);
236
* P4 Xeon errata 037 workaround.
237
* Hardware prefetcher may cause stale data to be loaded into the cache.
239
if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
240
rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
241
if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
242
printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
243
printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
244
lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
245
wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
250
* See if we have a good local APIC by checking for buggy Pentia,
251
* i.e. all B steppings and the C2 stepping of P54C when using their
252
* integrated APIC (see 11AP erratum in "Pentium Processor
253
* Specification Update").
255
if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
256
(c->x86_mask < 0x6 || c->x86_mask == 0xb))
257
set_cpu_cap(c, X86_FEATURE_11AP);
260
#ifdef CONFIG_X86_INTEL_USERCOPY
262
* Set up the preferred alignment for movsl bulk memory moves
265
case 4: /* 486: untested */
267
case 5: /* Old Pentia: untested */
269
case 6: /* PII/PIII only like movsl with 8-byte alignment */
272
case 15: /* P4 is OK down to 8-byte alignment */
278
#ifdef CONFIG_X86_NUMAQ
285
static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
290
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
294
int cpu = smp_processor_id();
296
/* Don't do the funky fallback heuristics the AMD version employs
298
node = numa_cpu_node(cpu);
299
if (node == NUMA_NO_NODE || !node_online(node)) {
300
/* reuse the value from init_cpu_to_node() */
301
node = cpu_to_node(cpu);
303
numa_set_node(cpu, node);
308
* find out the number of processor cores on the die
310
static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
312
unsigned int eax, ebx, ecx, edx;
314
if (c->cpuid_level < 4)
317
/* Intel has a non-standard dependency on %ecx for this CPUID level. */
318
cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
320
return (eax >> 26) + 1;
325
static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
327
/* Intel VMX MSR indicated features */
328
#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
329
#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
330
#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
331
#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
332
#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
333
#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
335
u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
337
clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
338
clear_cpu_cap(c, X86_FEATURE_VNMI);
339
clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
340
clear_cpu_cap(c, X86_FEATURE_EPT);
341
clear_cpu_cap(c, X86_FEATURE_VPID);
343
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
344
msr_ctl = vmx_msr_high | vmx_msr_low;
345
if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
346
set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
347
if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
348
set_cpu_cap(c, X86_FEATURE_VNMI);
349
if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
350
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
351
vmx_msr_low, vmx_msr_high);
352
msr_ctl2 = vmx_msr_high | vmx_msr_low;
353
if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
354
(msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
355
set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
356
if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
357
set_cpu_cap(c, X86_FEATURE_EPT);
358
if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
359
set_cpu_cap(c, X86_FEATURE_VPID);
363
static void __cpuinit init_intel(struct cpuinfo_x86 *c)
369
intel_workarounds(c);
372
* Detect the extended topology information if available. This
373
* will reinitialise the initial_apicid which will be used
374
* in init_intel_cacheinfo()
376
detect_extended_topology(c);
378
l2 = init_intel_cacheinfo(c);
379
if (c->cpuid_level > 9) {
380
unsigned eax = cpuid_eax(10);
381
/* Check for version and the number of counters */
382
if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
383
set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
387
set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
390
rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
392
set_cpu_cap(c, X86_FEATURE_BTS);
394
set_cpu_cap(c, X86_FEATURE_PEBS);
397
if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
398
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
402
c->x86_cache_alignment = c->x86_clflush_size * 2;
404
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
407
* Names for the Pentium II/Celeron processors
408
* detectable only by also checking the cache size.
409
* Dixon is NOT a Celeron.
414
switch (c->x86_model) {
417
p = "Celeron (Covington)";
419
p = "Mobile Pentium II (Dixon)";
424
p = "Celeron (Mendocino)";
425
else if (c->x86_mask == 0 || c->x86_mask == 5)
431
p = "Celeron (Coppermine)";
436
strcpy(c->x86_model_id, p);
440
set_cpu_cap(c, X86_FEATURE_P4);
442
set_cpu_cap(c, X86_FEATURE_P3);
445
if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
447
* let's use the legacy cpuid vector 0x1 and 0x4 for topology
450
c->x86_max_cores = intel_num_cpu_cores(c);
456
/* Work around errata */
459
if (cpu_has(c, X86_FEATURE_VMX))
460
detect_vmx_virtcap(c);
463
* Initialize MSR_IA32_ENERGY_PERF_BIAS if BIOS did not.
464
* x86_energy_perf_policy(8) is available to change it at run-time
466
if (cpu_has(c, X86_FEATURE_EPB)) {
469
rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
470
if ((epb & 0xF) == ENERGY_PERF_BIAS_PERFORMANCE) {
471
printk_once(KERN_WARNING "ENERGY_PERF_BIAS:"
472
" Set to 'normal', was 'performance'\n"
473
"ENERGY_PERF_BIAS: View and update with"
474
" x86_energy_perf_policy(8)\n");
475
epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
476
wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
482
static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
485
* Intel PIII Tualatin. This comes in two flavours.
486
* One has 256kb of cache, the other 512. We have no way
487
* to determine which, so we use a boottime override
488
* for the 512kb model, and assume 256 otherwise.
490
if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
496
static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
498
.c_ident = { "GenuineIntel" },
501
{ .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
503
[0] = "486 DX-25/33",
514
{ .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
516
[0] = "Pentium 60/66 A-step",
517
[1] = "Pentium 60/66",
518
[2] = "Pentium 75 - 200",
519
[3] = "OverDrive PODP5V83",
521
[7] = "Mobile Pentium 75 - 200",
522
[8] = "Mobile Pentium MMX"
525
{ .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
527
[0] = "Pentium Pro A-step",
529
[3] = "Pentium II (Klamath)",
530
[4] = "Pentium II (Deschutes)",
531
[5] = "Pentium II (Deschutes)",
532
[6] = "Mobile Pentium II",
533
[7] = "Pentium III (Katmai)",
534
[8] = "Pentium III (Coppermine)",
535
[10] = "Pentium III (Cascades)",
536
[11] = "Pentium III (Tualatin)",
539
{ .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
541
[0] = "Pentium 4 (Unknown)",
542
[1] = "Pentium 4 (Willamette)",
543
[2] = "Pentium 4 (Northwood)",
544
[4] = "Pentium 4 (Foster)",
545
[5] = "Pentium 4 (Foster)",
549
.c_size_cache = intel_size_cache,
551
.c_early_init = early_init_intel,
552
.c_init = init_intel,
553
.c_x86_vendor = X86_VENDOR_INTEL,
556
cpu_dev_register(intel_cpu_dev);