From: Ingo Molnar Date: Tue, 23 Sep 2008 11:26:27 +0000 (+0200) Subject: Merge branch 'x86/signal' into core/signal X-Git-Tag: v2.6.28-rc1~699^2^2^2~4 X-Git-Url: http://pilppa.com/gitweb/?a=commitdiff_plain;h=101d5b713700b902b1c200cdd1925c3cb7d34567;p=linux-2.6-omap-h63xx.git Merge branch 'x86/signal' into core/signal Conflicts: arch/x86/kernel/cpu/feature_names.c arch/x86/kernel/setup.c drivers/pci/intel-iommu.c include/asm-x86/cpufeature.h Signed-off-by: Ingo Molnar --- 101d5b713700b902b1c200cdd1925c3cb7d34567 diff --cc arch/x86/kernel/cpu/common.c index 4e456bd955b,c63ec65f484..8260d930eab --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@@ -340,21 -338,39 +340,50 @@@ static void __init early_cpu_detect(voi if (c->x86_vendor != X86_VENDOR_UNKNOWN && cpu_devs[c->x86_vendor]->c_early_init) cpu_devs[c->x86_vendor]->c_early_init(c); +} - early_get_cap(c); +/* + * The NOPL instruction is supposed to exist on all CPUs with + * family >= 6; unfortunately, that's not true in practice because + * of early VIA chips and (more importantly) broken virtualizers that + * are not easy to detect. In the latter case it doesn't even *fail* + * reliably, so probing for it doesn't even work. Disable it completely + * unless we can find a reliable way to detect all the broken cases. + */ +static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) +{ + clear_cpu_cap(c, X86_FEATURE_NOPL); } + /* + * The NOPL instruction is supposed to exist on all CPUs with + * family >= 6, unfortunately, that's not true in practice because + * of early VIA chips and (more importantly) broken virtualizers that + * are not easy to detect. Hence, probe for it based on first + * principles. + */ + static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) + { + const u32 nopl_signature = 0x888c53b1; /* Random number */ + u32 has_nopl = nopl_signature; + + clear_cpu_cap(c, X86_FEATURE_NOPL); + if (c->x86 >= 6) { + asm volatile("\n" + "1: .byte 0x0f,0x1f,0xc0\n" /* nopl %eax */ + "2:\n" + " .section .fixup,\"ax\"\n" + "3: xor %0,%0\n" + " jmp 2b\n" + " .previous\n" + _ASM_EXTABLE(1b,3b) + : "+a" (has_nopl)); + + if (has_nopl == nopl_signature) + set_cpu_cap(c, X86_FEATURE_NOPL); + } + } + static void __cpuinit generic_identify(struct cpuinfo_x86 *c) { u32 tfms, xlvl; diff --cc arch/x86/kernel/setup.c index 9838f2539df,673f12cf6eb..c6b9330c1bf --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@@ -742,6 -738,9 +742,8 @@@ void __init setup_arch(char **cmdline_p #else num_physpages = max_pfn; - check_efer(); + if (cpu_has_x2apic) + check_x2apic(); /* How many end-of-memory variables you have, grandma! */ /* need this before calling reserve_initrd */ diff --cc include/asm-x86/cpufeature.h index 9489283a4bc,6dfa2b3f18c..f1b8a53c3e6 --- a/include/asm-x86/cpufeature.h +++ b/include/asm-x86/cpufeature.h @@@ -64,22 -72,22 +72,23 @@@ #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ /* cpu types for specific tunings: */ - #define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */ - #define X86_FEATURE_K7 (3*32+ 5) /* Athlon */ - #define X86_FEATURE_P3 (3*32+ 6) /* P3 */ - #define X86_FEATURE_P4 (3*32+ 7) /* P4 */ + #define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ + #define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ + #define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ + #define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ - #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* FXSAVE leaks FOP/FIP/FOP */ + #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ ++#define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ - #define X86_FEATURE_SYSCALL32 (3*32+14) /* syscall in ia32 userspace */ - #define X86_FEATURE_SYSENTER32 (3*32+15) /* sysenter in ia32 userspace */ - #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well on this CPU */ - #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* Mfence synchronizes RDTSC */ - #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* Lfence synchronizes RDTSC */ - #define X86_FEATURE_11AP (3*32+19) /* Bad local APIC aka 11AP */ + #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ + #define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ + #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ + #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ + #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ + #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */