mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-20 16:46:23 +08:00
Merge branch 'x86/cpu' into x86/core
This commit is contained in:
commit
446d27338d
@ -1888,6 +1888,12 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
shapers= [NET]
|
||||
Maximal number of shapers.
|
||||
|
||||
show_msr= [x86] show boot-time MSR settings
|
||||
Format: { <integer> }
|
||||
Show boot-time (BIOS-initialized) MSR settings.
|
||||
The parameter means the number of CPUs to show,
|
||||
for example 1 means boot CPU only.
|
||||
|
||||
sim710= [SCSI,HW]
|
||||
See header of drivers/scsi/sim710.c.
|
||||
|
||||
|
@ -8,14 +8,14 @@ obj-y += proc.o capflags.o powerflags.o
|
||||
obj-$(CONFIG_X86_32) += common.o bugs.o cmpxchg.o
|
||||
obj-$(CONFIG_X86_64) += common_64.o bugs_64.o
|
||||
|
||||
obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o
|
||||
obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o
|
||||
obj-$(CONFIG_CPU_SUP_AMD_32) += amd.o
|
||||
obj-$(CONFIG_CPU_SUP_AMD_64) += amd_64.o
|
||||
obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
|
||||
obj-$(CONFIG_CPU_SUP_CENTAUR_32) += centaur.o
|
||||
obj-$(CONFIG_CPU_SUP_CENTAUR_64) += centaur_64.o
|
||||
obj-$(CONFIG_CPU_SUP_TRANSMETA_32) += transmeta.o
|
||||
obj-$(CONFIG_CPU_SUP_INTEL_32) += intel.o
|
||||
obj-$(CONFIG_CPU_SUP_INTEL_64) += intel_64.o
|
||||
obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o
|
||||
|
||||
obj-$(CONFIG_X86_MCE) += mcheck/
|
||||
|
@ -31,6 +31,11 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
|
||||
if (c->x86_power & (1<<8))
|
||||
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
|
||||
}
|
||||
|
||||
/* Set MTRR capability flag if appropriate */
|
||||
if (c->x86_model == 13 || c->x86_model == 9 ||
|
||||
(c->x86_model == 8 && c->x86_mask >= 8))
|
||||
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
|
||||
}
|
||||
|
||||
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
@ -166,10 +171,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
mbytes);
|
||||
}
|
||||
|
||||
/* Set MTRR capability flag if appropriate */
|
||||
if (c->x86_model == 13 || c->x86_model == 9 ||
|
||||
(c->x86_model == 8 && c->x86_mask >= 8))
|
||||
set_cpu_cap(c, X86_FEATURE_K6_MTRR);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -297,6 +298,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
|
||||
.c_early_init = early_init_amd,
|
||||
.c_init = init_amd,
|
||||
.c_size_cache = amd_size_cache,
|
||||
.c_x86_vendor = X86_VENDOR_AMD,
|
||||
};
|
||||
|
||||
cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
|
||||
cpu_dev_register(amd_cpu_dev);
|
||||
|
@ -218,7 +218,7 @@ static struct cpu_dev amd_cpu_dev __cpuinitdata = {
|
||||
.c_ident = { "AuthenticAMD" },
|
||||
.c_early_init = early_init_amd,
|
||||
.c_init = init_amd,
|
||||
.c_x86_vendor = X86_VENDOR_AMD,
|
||||
};
|
||||
|
||||
cpu_vendor_dev_register(X86_VENDOR_AMD, &amd_cpu_dev);
|
||||
|
||||
cpu_dev_register(amd_cpu_dev);
|
||||
|
@ -314,6 +314,16 @@ enum {
|
||||
EAMD3D = 1<<20,
|
||||
};
|
||||
|
||||
static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
|
||||
{
|
||||
switch (c->x86) {
|
||||
case 5:
|
||||
/* Emulate MTRRs using Centaur's MCR. */
|
||||
set_cpu_cap(c, X86_FEATURE_CENTAUR_MCR);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
|
||||
{
|
||||
|
||||
@ -462,8 +472,10 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||
static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
|
||||
.c_vendor = "Centaur",
|
||||
.c_ident = { "CentaurHauls" },
|
||||
.c_early_init = early_init_centaur,
|
||||
.c_init = init_centaur,
|
||||
.c_size_cache = centaur_size_cache,
|
||||
.c_x86_vendor = X86_VENDOR_CENTAUR,
|
||||
};
|
||||
|
||||
cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev);
|
||||
cpu_dev_register(centaur_cpu_dev);
|
||||
|
@ -29,7 +29,8 @@ static struct cpu_dev centaur_cpu_dev __cpuinitdata = {
|
||||
.c_ident = { "CentaurHauls" },
|
||||
.c_early_init = early_init_centaur,
|
||||
.c_init = init_centaur,
|
||||
.c_x86_vendor = X86_VENDOR_CENTAUR,
|
||||
};
|
||||
|
||||
cpu_vendor_dev_register(X86_VENDOR_CENTAUR, ¢aur_cpu_dev);
|
||||
cpu_dev_register(centaur_cpu_dev);
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
static struct cpu_dev *this_cpu __cpuinitdata;
|
||||
|
||||
DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
|
||||
[GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
|
||||
[GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
|
||||
@ -58,32 +60,9 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
|
||||
} };
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
|
||||
|
||||
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
|
||||
|
||||
static int cachesize_override __cpuinitdata = -1;
|
||||
static int disable_x86_serial_nr __cpuinitdata = 1;
|
||||
|
||||
struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
|
||||
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* Not much we can do here... */
|
||||
/* Check if at least it has cpuid */
|
||||
if (c->cpuid_level == -1) {
|
||||
/* No cpuid. It must be an ancient CPU */
|
||||
if (c->x86 == 4)
|
||||
strcpy(c->x86_model_id, "486");
|
||||
else if (c->x86 == 3)
|
||||
strcpy(c->x86_model_id, "386");
|
||||
}
|
||||
}
|
||||
|
||||
static struct cpu_dev __cpuinitdata default_cpu = {
|
||||
.c_init = default_init,
|
||||
.c_vendor = "Unknown",
|
||||
};
|
||||
static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
|
||||
|
||||
static int __init cachesize_setup(char *str)
|
||||
{
|
||||
get_option(&str, &cachesize_override);
|
||||
@ -91,72 +70,6 @@ static int __init cachesize_setup(char *str)
|
||||
}
|
||||
__setup("cachesize=", cachesize_setup);
|
||||
|
||||
int __cpuinit get_model_name(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int *v;
|
||||
char *p, *q;
|
||||
|
||||
if (cpuid_eax(0x80000000) < 0x80000004)
|
||||
return 0;
|
||||
|
||||
v = (unsigned int *) c->x86_model_id;
|
||||
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
|
||||
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
|
||||
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
|
||||
c->x86_model_id[48] = 0;
|
||||
|
||||
/* Intel chips right-justify this string for some dumb reason;
|
||||
undo that brain damage */
|
||||
p = q = &c->x86_model_id[0];
|
||||
while (*p == ' ')
|
||||
p++;
|
||||
if (p != q) {
|
||||
while (*p)
|
||||
*q++ = *p++;
|
||||
while (q <= &c->x86_model_id[48])
|
||||
*q++ = '\0'; /* Zero-pad the rest */
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int n, dummy, ecx, edx, l2size;
|
||||
|
||||
n = cpuid_eax(0x80000000);
|
||||
|
||||
if (n >= 0x80000005) {
|
||||
cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
|
||||
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
|
||||
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
|
||||
c->x86_cache_size = (ecx>>24)+(edx>>24);
|
||||
}
|
||||
|
||||
if (n < 0x80000006) /* Some chips just has a large L1. */
|
||||
return;
|
||||
|
||||
ecx = cpuid_ecx(0x80000006);
|
||||
l2size = ecx >> 16;
|
||||
|
||||
/* do processor-specific cache resizing */
|
||||
if (this_cpu->c_size_cache)
|
||||
l2size = this_cpu->c_size_cache(c, l2size);
|
||||
|
||||
/* Allow user to override all this if necessary. */
|
||||
if (cachesize_override != -1)
|
||||
l2size = cachesize_override;
|
||||
|
||||
if (l2size == 0)
|
||||
return; /* Again, no L2 cache is possible */
|
||||
|
||||
c->x86_cache_size = l2size;
|
||||
|
||||
printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
|
||||
l2size, ecx & 0xFF);
|
||||
}
|
||||
|
||||
/*
|
||||
* Naming convention should be: <Name> [(<Codename>)]
|
||||
* This table only is used unless init_<vendor>() below doesn't set it;
|
||||
@ -185,35 +98,6 @@ static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
|
||||
return NULL; /* Not found */
|
||||
}
|
||||
|
||||
|
||||
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
|
||||
{
|
||||
char *v = c->x86_vendor_id;
|
||||
int i;
|
||||
static int printed;
|
||||
|
||||
for (i = 0; i < X86_VENDOR_NUM; i++) {
|
||||
if (cpu_devs[i]) {
|
||||
if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
|
||||
(cpu_devs[i]->c_ident[1] &&
|
||||
!strcmp(v, cpu_devs[i]->c_ident[1]))) {
|
||||
c->x86_vendor = i;
|
||||
if (!early)
|
||||
this_cpu = cpu_devs[i];
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!printed) {
|
||||
printed++;
|
||||
printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
|
||||
printk(KERN_ERR "CPU: Your system may be unstable.\n");
|
||||
}
|
||||
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
||||
this_cpu = &default_cpu;
|
||||
}
|
||||
|
||||
|
||||
static int __init x86_fxsr_setup(char *s)
|
||||
{
|
||||
setup_clear_cpu_cap(X86_FEATURE_FXSR);
|
||||
@ -222,7 +106,6 @@ static int __init x86_fxsr_setup(char *s)
|
||||
}
|
||||
__setup("nofxsr", x86_fxsr_setup);
|
||||
|
||||
|
||||
static int __init x86_sep_setup(char *s)
|
||||
{
|
||||
setup_clear_cpu_cap(X86_FEATURE_SEP);
|
||||
@ -230,7 +113,6 @@ static int __init x86_sep_setup(char *s)
|
||||
}
|
||||
__setup("nosep", x86_sep_setup);
|
||||
|
||||
|
||||
/* Standard macro to see if a specific flag is changeable */
|
||||
static inline int flag_is_changeable_p(u32 flag)
|
||||
{
|
||||
@ -252,14 +134,216 @@ static inline int flag_is_changeable_p(u32 flag)
|
||||
return ((f1^f2) & flag) != 0;
|
||||
}
|
||||
|
||||
|
||||
/* Probe for the CPUID instruction */
|
||||
static int __cpuinit have_cpuid_p(void)
|
||||
{
|
||||
return flag_is_changeable_p(X86_EFLAGS_ID);
|
||||
}
|
||||
|
||||
void __init cpu_detect(struct cpuinfo_x86 *c)
|
||||
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
|
||||
/* Disable processor serial number */
|
||||
unsigned long lo, hi;
|
||||
rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
||||
lo |= 0x200000;
|
||||
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
||||
printk(KERN_NOTICE "CPU serial number disabled.\n");
|
||||
clear_cpu_cap(c, X86_FEATURE_PN);
|
||||
|
||||
/* Disabling the serial number may affect the cpuid level */
|
||||
c->cpuid_level = cpuid_eax(0);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init x86_serial_nr_setup(char *s)
|
||||
{
|
||||
disable_x86_serial_nr = 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("serialnumber", x86_serial_nr_setup);
|
||||
|
||||
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
|
||||
|
||||
/* Current gdt points %fs at the "master" per-cpu area: after this,
|
||||
* it's on the real one. */
|
||||
void switch_to_new_gdt(void)
|
||||
{
|
||||
struct desc_ptr gdt_descr;
|
||||
|
||||
gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
|
||||
gdt_descr.size = GDT_SIZE - 1;
|
||||
load_gdt(&gdt_descr);
|
||||
asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
|
||||
}
|
||||
|
||||
static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
|
||||
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* Not much we can do here... */
|
||||
/* Check if at least it has cpuid */
|
||||
if (c->cpuid_level == -1) {
|
||||
/* No cpuid. It must be an ancient CPU */
|
||||
if (c->x86 == 4)
|
||||
strcpy(c->x86_model_id, "486");
|
||||
else if (c->x86 == 3)
|
||||
strcpy(c->x86_model_id, "386");
|
||||
}
|
||||
}
|
||||
|
||||
static struct cpu_dev __cpuinitdata default_cpu = {
|
||||
.c_init = default_init,
|
||||
.c_vendor = "Unknown",
|
||||
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
||||
};
|
||||
|
||||
int __cpuinit get_model_name(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int *v;
|
||||
char *p, *q;
|
||||
|
||||
if (c->extended_cpuid_level < 0x80000004)
|
||||
return 0;
|
||||
|
||||
v = (unsigned int *) c->x86_model_id;
|
||||
cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
|
||||
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
|
||||
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
|
||||
c->x86_model_id[48] = 0;
|
||||
|
||||
/* Intel chips right-justify this string for some dumb reason;
|
||||
undo that brain damage */
|
||||
p = q = &c->x86_model_id[0];
|
||||
while (*p == ' ')
|
||||
p++;
|
||||
if (p != q) {
|
||||
while (*p)
|
||||
*q++ = *p++;
|
||||
while (q <= &c->x86_model_id[48])
|
||||
*q++ = '\0'; /* Zero-pad the rest */
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int n, dummy, ebx, ecx, edx, l2size;
|
||||
|
||||
n = c->extended_cpuid_level;
|
||||
|
||||
if (n >= 0x80000005) {
|
||||
cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
|
||||
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
|
||||
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
|
||||
c->x86_cache_size = (ecx>>24) + (edx>>24);
|
||||
}
|
||||
|
||||
if (n < 0x80000006) /* Some chips just has a large L1. */
|
||||
return;
|
||||
|
||||
cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
|
||||
l2size = ecx >> 16;
|
||||
|
||||
/* do processor-specific cache resizing */
|
||||
if (this_cpu->c_size_cache)
|
||||
l2size = this_cpu->c_size_cache(c, l2size);
|
||||
|
||||
/* Allow user to override all this if necessary. */
|
||||
if (cachesize_override != -1)
|
||||
l2size = cachesize_override;
|
||||
|
||||
if (l2size == 0)
|
||||
return; /* Again, no L2 cache is possible */
|
||||
|
||||
c->x86_cache_size = l2size;
|
||||
|
||||
printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
|
||||
l2size, ecx & 0xFF);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_HT
|
||||
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 eax, ebx, ecx, edx;
|
||||
int index_msb, core_bits;
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HT))
|
||||
return;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
|
||||
goto out;
|
||||
|
||||
cpuid(1, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
smp_num_siblings = (ebx & 0xff0000) >> 16;
|
||||
|
||||
if (smp_num_siblings == 1) {
|
||||
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
|
||||
} else if (smp_num_siblings > 1) {
|
||||
|
||||
if (smp_num_siblings > NR_CPUS) {
|
||||
printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
|
||||
smp_num_siblings);
|
||||
smp_num_siblings = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
index_msb = get_count_order(smp_num_siblings);
|
||||
c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
|
||||
|
||||
|
||||
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
|
||||
|
||||
index_msb = get_count_order(smp_num_siblings);
|
||||
|
||||
core_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
|
||||
((1 << core_bits) - 1);
|
||||
}
|
||||
|
||||
out:
|
||||
if ((c->x86_max_cores * smp_num_siblings) > 1) {
|
||||
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
|
||||
c->phys_proc_id);
|
||||
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
|
||||
c->cpu_core_id);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
|
||||
{
|
||||
char *v = c->x86_vendor_id;
|
||||
int i;
|
||||
static int printed;
|
||||
|
||||
for (i = 0; i < X86_VENDOR_NUM; i++) {
|
||||
if (!cpu_devs[i])
|
||||
break;
|
||||
|
||||
if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
|
||||
(cpu_devs[i]->c_ident[1] &&
|
||||
!strcmp(v, cpu_devs[i]->c_ident[1]))) {
|
||||
this_cpu = cpu_devs[i];
|
||||
c->x86_vendor = this_cpu->c_x86_vendor;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!printed) {
|
||||
printed++;
|
||||
printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
|
||||
printk(KERN_ERR "CPU: Your system may be unstable.\n");
|
||||
}
|
||||
|
||||
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
||||
this_cpu = &default_cpu;
|
||||
}
|
||||
|
||||
void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* Get vendor name */
|
||||
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
|
||||
@ -268,50 +352,47 @@ void __init cpu_detect(struct cpuinfo_x86 *c)
|
||||
(unsigned int *)&c->x86_vendor_id[4]);
|
||||
|
||||
c->x86 = 4;
|
||||
/* Intel-defined flags: level 0x00000001 */
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
u32 junk, tfms, cap0, misc;
|
||||
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
|
||||
c->x86 = (tfms >> 8) & 15;
|
||||
c->x86_model = (tfms >> 4) & 15;
|
||||
c->x86 = (tfms >> 8) & 0xf;
|
||||
c->x86_model = (tfms >> 4) & 0xf;
|
||||
c->x86_mask = tfms & 0xf;
|
||||
if (c->x86 == 0xf)
|
||||
c->x86 += (tfms >> 20) & 0xff;
|
||||
if (c->x86 >= 0x6)
|
||||
c->x86_model += ((tfms >> 16) & 0xF) << 4;
|
||||
c->x86_mask = tfms & 15;
|
||||
c->x86_model += ((tfms >> 16) & 0xf) << 4;
|
||||
if (cap0 & (1<<19)) {
|
||||
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
|
||||
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
||||
c->x86_cache_alignment = c->x86_clflush_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
|
||||
|
||||
static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 tfms, xlvl;
|
||||
unsigned int ebx;
|
||||
|
||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||
if (have_cpuid_p()) {
|
||||
/* Intel-defined flags: level 0x00000001 */
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
u32 capability, excap;
|
||||
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
|
||||
c->x86_capability[0] = capability;
|
||||
c->x86_capability[4] = excap;
|
||||
}
|
||||
|
||||
/* AMD-defined flags: level 0x80000001 */
|
||||
xlvl = cpuid_eax(0x80000000);
|
||||
if ((xlvl & 0xffff0000) == 0x80000000) {
|
||||
if (xlvl >= 0x80000001) {
|
||||
c->x86_capability[1] = cpuid_edx(0x80000001);
|
||||
c->x86_capability[6] = cpuid_ecx(0x80000001);
|
||||
}
|
||||
}
|
||||
u32 ebx;
|
||||
|
||||
/* Intel-defined flags: level 0x00000001 */
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
u32 capability, excap;
|
||||
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
|
||||
c->x86_capability[0] = capability;
|
||||
c->x86_capability[4] = excap;
|
||||
}
|
||||
|
||||
/* AMD-defined flags: level 0x80000001 */
|
||||
xlvl = cpuid_eax(0x80000000);
|
||||
c->extended_cpuid_level = xlvl;
|
||||
if ((xlvl & 0xffff0000) == 0x80000000) {
|
||||
if (xlvl >= 0x80000001) {
|
||||
c->x86_capability[1] = cpuid_edx(0x80000001);
|
||||
c->x86_capability[6] = cpuid_ecx(0x80000001);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Do minimum CPU detection early.
|
||||
* Fields really needed: vendor, cpuid_level, family, model, mask,
|
||||
@ -321,25 +402,54 @@ static void __cpuinit early_get_cap(struct cpuinfo_x86 *c)
|
||||
* WARNING: this function is only called on the BP. Don't add code here
|
||||
* that is supposed to run on all CPUs.
|
||||
*/
|
||||
static void __init early_cpu_detect(void)
|
||||
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
|
||||
c->x86_cache_alignment = 32;
|
||||
c->x86_clflush_size = 32;
|
||||
c->x86_cache_alignment = c->x86_clflush_size;
|
||||
|
||||
if (!have_cpuid_p())
|
||||
return;
|
||||
|
||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||
|
||||
c->extended_cpuid_level = 0;
|
||||
|
||||
cpu_detect(c);
|
||||
|
||||
get_cpu_vendor(c, 1);
|
||||
get_cpu_vendor(c);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
|
||||
cpu_devs[c->x86_vendor]->c_early_init)
|
||||
cpu_devs[c->x86_vendor]->c_early_init(c);
|
||||
get_cpu_cap(c);
|
||||
|
||||
early_get_cap(c);
|
||||
if (this_cpu->c_early_init)
|
||||
this_cpu->c_early_init(c);
|
||||
|
||||
validate_pat_support(c);
|
||||
}
|
||||
|
||||
void __init early_cpu_init(void)
|
||||
{
|
||||
struct cpu_dev **cdev;
|
||||
int count = 0;
|
||||
|
||||
printk("KERNEL supported cpus:\n");
|
||||
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
|
||||
struct cpu_dev *cpudev = *cdev;
|
||||
unsigned int j;
|
||||
|
||||
if (count >= X86_VENDOR_NUM)
|
||||
break;
|
||||
cpu_devs[count] = cpudev;
|
||||
count++;
|
||||
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (!cpudev->c_ident[j])
|
||||
continue;
|
||||
printk(" %s %s\n", cpudev->c_vendor,
|
||||
cpudev->c_ident[j]);
|
||||
}
|
||||
}
|
||||
|
||||
early_identify_cpu(&boot_cpu_data);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -373,87 +483,34 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
|
||||
|
||||
static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 tfms, xlvl;
|
||||
unsigned int ebx;
|
||||
if (!have_cpuid_p())
|
||||
return;
|
||||
|
||||
if (have_cpuid_p()) {
|
||||
/* Get vendor name */
|
||||
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
|
||||
(unsigned int *)&c->x86_vendor_id[0],
|
||||
(unsigned int *)&c->x86_vendor_id[8],
|
||||
(unsigned int *)&c->x86_vendor_id[4]);
|
||||
c->extended_cpuid_level = 0;
|
||||
|
||||
get_cpu_vendor(c, 0);
|
||||
/* Initialize the standard set of capabilities */
|
||||
/* Note that the vendor-specific code below might override */
|
||||
/* Intel-defined flags: level 0x00000001 */
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
u32 capability, excap;
|
||||
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
|
||||
c->x86_capability[0] = capability;
|
||||
c->x86_capability[4] = excap;
|
||||
c->x86 = (tfms >> 8) & 15;
|
||||
c->x86_model = (tfms >> 4) & 15;
|
||||
if (c->x86 == 0xf)
|
||||
c->x86 += (tfms >> 20) & 0xff;
|
||||
if (c->x86 >= 0x6)
|
||||
c->x86_model += ((tfms >> 16) & 0xF) << 4;
|
||||
c->x86_mask = tfms & 15;
|
||||
c->initial_apicid = (ebx >> 24) & 0xFF;
|
||||
cpu_detect(c);
|
||||
|
||||
get_cpu_vendor(c);
|
||||
|
||||
get_cpu_cap(c);
|
||||
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
|
||||
#ifdef CONFIG_X86_HT
|
||||
c->apicid = phys_pkg_id(c->initial_apicid, 0);
|
||||
c->phys_proc_id = c->initial_apicid;
|
||||
c->apicid = phys_pkg_id(c->initial_apicid, 0);
|
||||
c->phys_proc_id = c->initial_apicid;
|
||||
#else
|
||||
c->apicid = c->initial_apicid;
|
||||
c->apicid = c->initial_apicid;
|
||||
#endif
|
||||
if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
|
||||
c->x86_clflush_size = ((ebx >> 8) & 0xff) * 8;
|
||||
} else {
|
||||
/* Have CPUID level 0 only - unheard of */
|
||||
c->x86 = 4;
|
||||
}
|
||||
|
||||
/* AMD-defined flags: level 0x80000001 */
|
||||
xlvl = cpuid_eax(0x80000000);
|
||||
if ((xlvl & 0xffff0000) == 0x80000000) {
|
||||
if (xlvl >= 0x80000001) {
|
||||
c->x86_capability[1] = cpuid_edx(0x80000001);
|
||||
c->x86_capability[6] = cpuid_ecx(0x80000001);
|
||||
}
|
||||
if (xlvl >= 0x80000004)
|
||||
get_model_name(c); /* Default name */
|
||||
}
|
||||
|
||||
init_scattered_cpuid_features(c);
|
||||
detect_nopl(c);
|
||||
}
|
||||
|
||||
if (c->extended_cpuid_level >= 0x80000004)
|
||||
get_model_name(c); /* Default name */
|
||||
|
||||
init_scattered_cpuid_features(c);
|
||||
detect_nopl(c);
|
||||
}
|
||||
|
||||
static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
|
||||
/* Disable processor serial number */
|
||||
unsigned long lo, hi;
|
||||
rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
||||
lo |= 0x200000;
|
||||
wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
|
||||
printk(KERN_NOTICE "CPU serial number disabled.\n");
|
||||
clear_cpu_cap(c, X86_FEATURE_PN);
|
||||
|
||||
/* Disabling the serial number may affect the cpuid level */
|
||||
c->cpuid_level = cpuid_eax(0);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init x86_serial_nr_setup(char *s)
|
||||
{
|
||||
disable_x86_serial_nr = 0;
|
||||
return 1;
|
||||
}
|
||||
__setup("serialnumber", x86_serial_nr_setup);
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* This does the hard work of actually picking apart the CPU stuff...
|
||||
*/
|
||||
@ -529,7 +586,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
if (c != &boot_cpu_data) {
|
||||
/* AND the already accumulated flags with these */
|
||||
for (i = 0 ; i < NCAPINTS ; i++)
|
||||
for (i = 0; i < NCAPINTS; i++)
|
||||
boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
|
||||
}
|
||||
|
||||
@ -558,51 +615,48 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
|
||||
mtrr_ap_init();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_HT
|
||||
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
struct msr_range {
|
||||
unsigned min;
|
||||
unsigned max;
|
||||
};
|
||||
|
||||
static struct msr_range msr_range_array[] __cpuinitdata = {
|
||||
{ 0x00000000, 0x00000418},
|
||||
{ 0xc0000000, 0xc000040b},
|
||||
{ 0xc0010000, 0xc0010142},
|
||||
{ 0xc0011000, 0xc001103b},
|
||||
};
|
||||
|
||||
static void __cpuinit print_cpu_msr(void)
|
||||
{
|
||||
u32 eax, ebx, ecx, edx;
|
||||
int index_msb, core_bits;
|
||||
unsigned index;
|
||||
u64 val;
|
||||
int i;
|
||||
unsigned index_min, index_max;
|
||||
|
||||
cpuid(1, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
|
||||
return;
|
||||
|
||||
smp_num_siblings = (ebx & 0xff0000) >> 16;
|
||||
|
||||
if (smp_num_siblings == 1) {
|
||||
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
|
||||
} else if (smp_num_siblings > 1) {
|
||||
|
||||
if (smp_num_siblings > NR_CPUS) {
|
||||
printk(KERN_WARNING "CPU: Unsupported number of the "
|
||||
"siblings %d", smp_num_siblings);
|
||||
smp_num_siblings = 1;
|
||||
return;
|
||||
for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
|
||||
index_min = msr_range_array[i].min;
|
||||
index_max = msr_range_array[i].max;
|
||||
for (index = index_min; index < index_max; index++) {
|
||||
if (rdmsrl_amd_safe(index, &val))
|
||||
continue;
|
||||
printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
|
||||
}
|
||||
|
||||
index_msb = get_count_order(smp_num_siblings);
|
||||
c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
|
||||
|
||||
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
|
||||
c->phys_proc_id);
|
||||
|
||||
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
|
||||
|
||||
index_msb = get_count_order(smp_num_siblings) ;
|
||||
|
||||
core_bits = get_count_order(c->x86_max_cores);
|
||||
|
||||
c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
|
||||
((1 << core_bits) - 1);
|
||||
|
||||
if (c->x86_max_cores > 1)
|
||||
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
|
||||
c->cpu_core_id);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int show_msr __cpuinitdata;
|
||||
static __init int setup_show_msr(char *arg)
|
||||
{
|
||||
int num;
|
||||
|
||||
get_option(&arg, &num);
|
||||
|
||||
if (num > 0)
|
||||
show_msr = num;
|
||||
return 1;
|
||||
}
|
||||
__setup("show_msr=", setup_show_msr);
|
||||
|
||||
static __init int setup_noclflush(char *arg)
|
||||
{
|
||||
@ -621,17 +675,25 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
|
||||
vendor = c->x86_vendor_id;
|
||||
|
||||
if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
|
||||
printk("%s ", vendor);
|
||||
printk(KERN_CONT "%s ", vendor);
|
||||
|
||||
if (!c->x86_model_id[0])
|
||||
printk("%d86", c->x86);
|
||||
if (c->x86_model_id[0])
|
||||
printk(KERN_CONT "%s", c->x86_model_id);
|
||||
else
|
||||
printk("%s", c->x86_model_id);
|
||||
printk(KERN_CONT "%d86", c->x86);
|
||||
|
||||
if (c->x86_mask || c->cpuid_level >= 0)
|
||||
printk(" stepping %02x\n", c->x86_mask);
|
||||
printk(KERN_CONT " stepping %02x\n", c->x86_mask);
|
||||
else
|
||||
printk("\n");
|
||||
printk(KERN_CONT "\n");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (c->cpu_index < show_msr)
|
||||
print_cpu_msr();
|
||||
#else
|
||||
if (show_msr)
|
||||
print_cpu_msr();
|
||||
#endif
|
||||
}
|
||||
|
||||
static __init int setup_disablecpuid(char *arg)
|
||||
@ -647,19 +709,6 @@ __setup("clearcpuid=", setup_disablecpuid);
|
||||
|
||||
cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
|
||||
|
||||
void __init early_cpu_init(void)
|
||||
{
|
||||
struct cpu_vendor_dev *cvdev;
|
||||
|
||||
for (cvdev = __x86cpuvendor_start ;
|
||||
cvdev < __x86cpuvendor_end ;
|
||||
cvdev++)
|
||||
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
|
||||
|
||||
early_cpu_detect();
|
||||
validate_pat_support(&boot_cpu_data);
|
||||
}
|
||||
|
||||
/* Make sure %fs is initialized properly in idle threads */
|
||||
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
|
||||
{
|
||||
@ -668,18 +717,6 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
|
||||
return regs;
|
||||
}
|
||||
|
||||
/* Current gdt points %fs at the "master" per-cpu area: after this,
|
||||
* it's on the real one. */
|
||||
void switch_to_new_gdt(void)
|
||||
{
|
||||
struct desc_ptr gdt_descr;
|
||||
|
||||
gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id());
|
||||
gdt_descr.size = GDT_SIZE - 1;
|
||||
load_gdt(&gdt_descr);
|
||||
asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* cpu_init() initializes state that is per-CPU. Some data is already
|
||||
* initialized (naturally) in the bootstrap process, such as the GDT
|
||||
|
@ -37,6 +37,8 @@
|
||||
|
||||
#include "cpu.h"
|
||||
|
||||
static struct cpu_dev *this_cpu __cpuinitdata;
|
||||
|
||||
/* We need valid kernel segments for data and code in long mode too
|
||||
* IRET will check the segment types kkeil 2000/10/28
|
||||
* Also sysret mandates a special GDT layout
|
||||
@ -66,7 +68,7 @@ void switch_to_new_gdt(void)
|
||||
load_gdt(&gdt_descr);
|
||||
}
|
||||
|
||||
struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
|
||||
static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
|
||||
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
@ -76,12 +78,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
static struct cpu_dev __cpuinitdata default_cpu = {
|
||||
.c_init = default_init,
|
||||
.c_vendor = "Unknown",
|
||||
.c_x86_vendor = X86_VENDOR_UNKNOWN,
|
||||
};
|
||||
static struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
|
||||
|
||||
int __cpuinit get_model_name(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int *v;
|
||||
char *p, *q;
|
||||
|
||||
if (c->extended_cpuid_level < 0x80000004)
|
||||
return 0;
|
||||
@ -91,35 +94,49 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
|
||||
cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
|
||||
cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
|
||||
c->x86_model_id[48] = 0;
|
||||
|
||||
/* Intel chips right-justify this string for some dumb reason;
|
||||
undo that brain damage */
|
||||
p = q = &c->x86_model_id[0];
|
||||
while (*p == ' ')
|
||||
p++;
|
||||
if (p != q) {
|
||||
while (*p)
|
||||
*q++ = *p++;
|
||||
while (q <= &c->x86_model_id[48])
|
||||
*q++ = '\0'; /* Zero-pad the rest */
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int n, dummy, ebx, ecx, edx;
|
||||
unsigned int n, dummy, ebx, ecx, edx, l2size;
|
||||
|
||||
n = c->extended_cpuid_level;
|
||||
|
||||
if (n >= 0x80000005) {
|
||||
cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
|
||||
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), "
|
||||
"D cache %dK (%d bytes/line)\n",
|
||||
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
|
||||
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
|
||||
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
|
||||
c->x86_cache_size = (ecx>>24) + (edx>>24);
|
||||
/* On K8 L1 TLB is inclusive, so don't count it */
|
||||
c->x86_tlbsize = 0;
|
||||
}
|
||||
|
||||
if (n >= 0x80000006) {
|
||||
cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
|
||||
ecx = cpuid_ecx(0x80000006);
|
||||
c->x86_cache_size = ecx >> 16;
|
||||
c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
|
||||
if (n < 0x80000006) /* Some chips just has a large L1. */
|
||||
return;
|
||||
|
||||
printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
|
||||
c->x86_cache_size, ecx & 0xFF);
|
||||
}
|
||||
cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
|
||||
l2size = ecx >> 16;
|
||||
c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
|
||||
|
||||
c->x86_cache_size = l2size;
|
||||
|
||||
printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
|
||||
l2size, ecx & 0xFF);
|
||||
}
|
||||
|
||||
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
@ -128,14 +145,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
u32 eax, ebx, ecx, edx;
|
||||
int index_msb, core_bits;
|
||||
|
||||
cpuid(1, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
|
||||
if (!cpu_has(c, X86_FEATURE_HT))
|
||||
return;
|
||||
if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
|
||||
goto out;
|
||||
|
||||
cpuid(1, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
smp_num_siblings = (ebx & 0xff0000) >> 16;
|
||||
|
||||
if (smp_num_siblings == 1) {
|
||||
@ -143,8 +159,8 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
} else if (smp_num_siblings > 1) {
|
||||
|
||||
if (smp_num_siblings > NR_CPUS) {
|
||||
printk(KERN_WARNING "CPU: Unsupported number of "
|
||||
"siblings %d", smp_num_siblings);
|
||||
printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
|
||||
smp_num_siblings);
|
||||
smp_num_siblings = 1;
|
||||
return;
|
||||
}
|
||||
@ -161,6 +177,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
c->cpu_core_id = phys_pkg_id(index_msb) &
|
||||
((1 << core_bits) - 1);
|
||||
}
|
||||
|
||||
out:
|
||||
if ((c->x86_max_cores * smp_num_siblings) > 1) {
|
||||
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
|
||||
@ -168,7 +185,6 @@ out:
|
||||
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
|
||||
c->cpu_core_id);
|
||||
}
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -179,43 +195,150 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
|
||||
static int printed;
|
||||
|
||||
for (i = 0; i < X86_VENDOR_NUM; i++) {
|
||||
if (cpu_devs[i]) {
|
||||
if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
|
||||
(cpu_devs[i]->c_ident[1] &&
|
||||
!strcmp(v, cpu_devs[i]->c_ident[1]))) {
|
||||
c->x86_vendor = i;
|
||||
this_cpu = cpu_devs[i];
|
||||
return;
|
||||
}
|
||||
if (!cpu_devs[i])
|
||||
break;
|
||||
|
||||
if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
|
||||
(cpu_devs[i]->c_ident[1] &&
|
||||
!strcmp(v, cpu_devs[i]->c_ident[1]))) {
|
||||
this_cpu = cpu_devs[i];
|
||||
c->x86_vendor = this_cpu->c_x86_vendor;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!printed) {
|
||||
printed++;
|
||||
printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
|
||||
printk(KERN_ERR "CPU: Your system may be unstable.\n");
|
||||
}
|
||||
|
||||
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
||||
this_cpu = &default_cpu;
|
||||
}
|
||||
|
||||
static void __init early_cpu_support_print(void)
|
||||
void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
|
||||
{
|
||||
int i,j;
|
||||
struct cpu_dev *cpu_devx;
|
||||
/* Get vendor name */
|
||||
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
|
||||
(unsigned int *)&c->x86_vendor_id[0],
|
||||
(unsigned int *)&c->x86_vendor_id[8],
|
||||
(unsigned int *)&c->x86_vendor_id[4]);
|
||||
|
||||
printk("KERNEL supported cpus:\n");
|
||||
for (i = 0; i < X86_VENDOR_NUM; i++) {
|
||||
cpu_devx = cpu_devs[i];
|
||||
if (!cpu_devx)
|
||||
continue;
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (!cpu_devx->c_ident[j])
|
||||
continue;
|
||||
printk(" %s %s\n", cpu_devx->c_vendor,
|
||||
cpu_devx->c_ident[j]);
|
||||
c->x86 = 4;
|
||||
/* Intel-defined flags: level 0x00000001 */
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
u32 junk, tfms, cap0, misc;
|
||||
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
|
||||
c->x86 = (tfms >> 8) & 0xf;
|
||||
c->x86_model = (tfms >> 4) & 0xf;
|
||||
c->x86_mask = tfms & 0xf;
|
||||
if (c->x86 == 0xf)
|
||||
c->x86 += (tfms >> 20) & 0xff;
|
||||
if (c->x86 >= 0x6)
|
||||
c->x86_model += ((tfms >> 16) & 0xf) << 4;
|
||||
if (cap0 & (1<<19)) {
|
||||
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
||||
c->x86_cache_alignment = c->x86_clflush_size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 tfms, xlvl;
|
||||
u32 ebx;
|
||||
|
||||
/* Intel-defined flags: level 0x00000001 */
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
u32 capability, excap;
|
||||
|
||||
cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
|
||||
c->x86_capability[0] = capability;
|
||||
c->x86_capability[4] = excap;
|
||||
}
|
||||
|
||||
/* AMD-defined flags: level 0x80000001 */
|
||||
xlvl = cpuid_eax(0x80000000);
|
||||
c->extended_cpuid_level = xlvl;
|
||||
if ((xlvl & 0xffff0000) == 0x80000000) {
|
||||
if (xlvl >= 0x80000001) {
|
||||
c->x86_capability[1] = cpuid_edx(0x80000001);
|
||||
c->x86_capability[6] = cpuid_ecx(0x80000001);
|
||||
}
|
||||
}
|
||||
|
||||
/* Transmeta-defined flags: level 0x80860001 */
|
||||
xlvl = cpuid_eax(0x80860000);
|
||||
if ((xlvl & 0xffff0000) == 0x80860000) {
|
||||
/* Don't set x86_cpuid_level here for now to not confuse. */
|
||||
if (xlvl >= 0x80860001)
|
||||
c->x86_capability[2] = cpuid_edx(0x80860001);
|
||||
}
|
||||
|
||||
if (c->extended_cpuid_level >= 0x80000007)
|
||||
c->x86_power = cpuid_edx(0x80000007);
|
||||
|
||||
if (c->extended_cpuid_level >= 0x80000008) {
|
||||
u32 eax = cpuid_eax(0x80000008);
|
||||
|
||||
c->x86_virt_bits = (eax >> 8) & 0xff;
|
||||
c->x86_phys_bits = eax & 0xff;
|
||||
}
|
||||
}
|
||||
|
||||
/* Do some early cpuid on the boot CPU to get some parameter that are
|
||||
needed before check_bugs. Everything advanced is in identify_cpu
|
||||
below. */
|
||||
static void __init early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
{
|
||||
|
||||
c->x86_clflush_size = 64;
|
||||
c->x86_cache_alignment = c->x86_clflush_size;
|
||||
|
||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||
|
||||
c->extended_cpuid_level = 0;
|
||||
|
||||
cpu_detect(c);
|
||||
|
||||
get_cpu_vendor(c);
|
||||
|
||||
get_cpu_cap(c);
|
||||
|
||||
if (this_cpu->c_early_init)
|
||||
this_cpu->c_early_init(c);
|
||||
|
||||
validate_pat_support(c);
|
||||
}
|
||||
|
||||
void __init early_cpu_init(void)
|
||||
{
|
||||
struct cpu_dev **cdev;
|
||||
int count = 0;
|
||||
|
||||
printk("KERNEL supported cpus:\n");
|
||||
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
|
||||
struct cpu_dev *cpudev = *cdev;
|
||||
unsigned int j;
|
||||
|
||||
if (count >= X86_VENDOR_NUM)
|
||||
break;
|
||||
cpu_devs[count] = cpudev;
|
||||
count++;
|
||||
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (!cpudev->c_ident[j])
|
||||
continue;
|
||||
printk(" %s %s\n", cpudev->c_vendor,
|
||||
cpudev->c_ident[j]);
|
||||
}
|
||||
}
|
||||
|
||||
early_identify_cpu(&boot_cpu_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* The NOPL instruction is supposed to exist on all CPUs with
|
||||
* family >= 6, unfortunately, that's not true in practice because
|
||||
@ -249,111 +372,26 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c);
|
||||
|
||||
void __init early_cpu_init(void)
|
||||
static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
|
||||
{
|
||||
struct cpu_vendor_dev *cvdev;
|
||||
|
||||
for (cvdev = __x86cpuvendor_start ;
|
||||
cvdev < __x86cpuvendor_end ;
|
||||
cvdev++)
|
||||
cpu_devs[cvdev->vendor] = cvdev->cpu_dev;
|
||||
early_cpu_support_print();
|
||||
early_identify_cpu(&boot_cpu_data);
|
||||
}
|
||||
|
||||
/* Do some early cpuid on the boot CPU to get some parameter that are
|
||||
needed before check_bugs. Everything advanced is in identify_cpu
|
||||
below. */
|
||||
static void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u32 tfms, xlvl;
|
||||
|
||||
c->loops_per_jiffy = loops_per_jiffy;
|
||||
c->x86_cache_size = -1;
|
||||
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
||||
c->x86_model = c->x86_mask = 0; /* So far unknown... */
|
||||
c->x86_vendor_id[0] = '\0'; /* Unset */
|
||||
c->x86_model_id[0] = '\0'; /* Unset */
|
||||
c->x86_clflush_size = 64;
|
||||
c->x86_cache_alignment = c->x86_clflush_size;
|
||||
c->x86_max_cores = 1;
|
||||
c->x86_coreid_bits = 0;
|
||||
c->extended_cpuid_level = 0;
|
||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||
|
||||
/* Get vendor name */
|
||||
cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
|
||||
(unsigned int *)&c->x86_vendor_id[0],
|
||||
(unsigned int *)&c->x86_vendor_id[8],
|
||||
(unsigned int *)&c->x86_vendor_id[4]);
|
||||
cpu_detect(c);
|
||||
|
||||
get_cpu_vendor(c);
|
||||
|
||||
/* Initialize the standard set of capabilities */
|
||||
/* Note that the vendor-specific code below might override */
|
||||
|
||||
/* Intel-defined flags: level 0x00000001 */
|
||||
if (c->cpuid_level >= 0x00000001) {
|
||||
__u32 misc;
|
||||
cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
|
||||
&c->x86_capability[0]);
|
||||
c->x86 = (tfms >> 8) & 0xf;
|
||||
c->x86_model = (tfms >> 4) & 0xf;
|
||||
c->x86_mask = tfms & 0xf;
|
||||
if (c->x86 == 0xf)
|
||||
c->x86 += (tfms >> 20) & 0xff;
|
||||
if (c->x86 >= 0x6)
|
||||
c->x86_model += ((tfms >> 16) & 0xF) << 4;
|
||||
if (test_cpu_cap(c, X86_FEATURE_CLFLSH))
|
||||
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
|
||||
} else {
|
||||
/* Have CPUID level 0 only - unheard of */
|
||||
c->x86 = 4;
|
||||
}
|
||||
get_cpu_cap(c);
|
||||
|
||||
c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xff;
|
||||
#ifdef CONFIG_SMP
|
||||
c->phys_proc_id = c->initial_apicid;
|
||||
#endif
|
||||
/* AMD-defined flags: level 0x80000001 */
|
||||
xlvl = cpuid_eax(0x80000000);
|
||||
c->extended_cpuid_level = xlvl;
|
||||
if ((xlvl & 0xffff0000) == 0x80000000) {
|
||||
if (xlvl >= 0x80000001) {
|
||||
c->x86_capability[1] = cpuid_edx(0x80000001);
|
||||
c->x86_capability[6] = cpuid_ecx(0x80000001);
|
||||
}
|
||||
if (xlvl >= 0x80000004)
|
||||
get_model_name(c); /* Default name */
|
||||
}
|
||||
|
||||
/* Transmeta-defined flags: level 0x80860001 */
|
||||
xlvl = cpuid_eax(0x80860000);
|
||||
if ((xlvl & 0xffff0000) == 0x80860000) {
|
||||
/* Don't set x86_cpuid_level here for now to not confuse. */
|
||||
if (xlvl >= 0x80860001)
|
||||
c->x86_capability[2] = cpuid_edx(0x80860001);
|
||||
}
|
||||
|
||||
if (c->extended_cpuid_level >= 0x80000007)
|
||||
c->x86_power = cpuid_edx(0x80000007);
|
||||
|
||||
if (c->extended_cpuid_level >= 0x80000008) {
|
||||
u32 eax = cpuid_eax(0x80000008);
|
||||
|
||||
c->x86_virt_bits = (eax >> 8) & 0xff;
|
||||
c->x86_phys_bits = eax & 0xff;
|
||||
}
|
||||
if (c->extended_cpuid_level >= 0x80000004)
|
||||
get_model_name(c); /* Default name */
|
||||
|
||||
init_scattered_cpuid_features(c);
|
||||
detect_nopl(c);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_UNKNOWN &&
|
||||
cpu_devs[c->x86_vendor]->c_early_init)
|
||||
cpu_devs[c->x86_vendor]->c_early_init(c);
|
||||
|
||||
validate_pat_support(c);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -363,9 +401,19 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
{
|
||||
int i;
|
||||
|
||||
early_identify_cpu(c);
|
||||
c->loops_per_jiffy = loops_per_jiffy;
|
||||
c->x86_cache_size = -1;
|
||||
c->x86_vendor = X86_VENDOR_UNKNOWN;
|
||||
c->x86_model = c->x86_mask = 0; /* So far unknown... */
|
||||
c->x86_vendor_id[0] = '\0'; /* Unset */
|
||||
c->x86_model_id[0] = '\0'; /* Unset */
|
||||
c->x86_max_cores = 1;
|
||||
c->x86_coreid_bits = 0;
|
||||
c->x86_clflush_size = 64;
|
||||
c->x86_cache_alignment = c->x86_clflush_size;
|
||||
memset(&c->x86_capability, 0, sizeof c->x86_capability);
|
||||
|
||||
init_scattered_cpuid_features(c);
|
||||
generic_identify(c);
|
||||
|
||||
c->apicid = phys_pkg_id(0);
|
||||
|
||||
@ -411,7 +459,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
|
||||
}
|
||||
|
||||
void __cpuinit identify_boot_cpu(void)
|
||||
void __init identify_boot_cpu(void)
|
||||
{
|
||||
identify_cpu(&boot_cpu_data);
|
||||
}
|
||||
@ -423,6 +471,49 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
|
||||
mtrr_ap_init();
|
||||
}
|
||||
|
||||
struct msr_range {
|
||||
unsigned min;
|
||||
unsigned max;
|
||||
};
|
||||
|
||||
static struct msr_range msr_range_array[] __cpuinitdata = {
|
||||
{ 0x00000000, 0x00000418},
|
||||
{ 0xc0000000, 0xc000040b},
|
||||
{ 0xc0010000, 0xc0010142},
|
||||
{ 0xc0011000, 0xc001103b},
|
||||
};
|
||||
|
||||
static void __cpuinit print_cpu_msr(void)
|
||||
{
|
||||
unsigned index;
|
||||
u64 val;
|
||||
int i;
|
||||
unsigned index_min, index_max;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
|
||||
index_min = msr_range_array[i].min;
|
||||
index_max = msr_range_array[i].max;
|
||||
for (index = index_min; index < index_max; index++) {
|
||||
if (rdmsrl_amd_safe(index, &val))
|
||||
continue;
|
||||
printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int show_msr __cpuinitdata;
|
||||
static __init int setup_show_msr(char *arg)
|
||||
{
|
||||
int num;
|
||||
|
||||
get_option(&arg, &num);
|
||||
|
||||
if (num > 0)
|
||||
show_msr = num;
|
||||
return 1;
|
||||
}
|
||||
__setup("show_msr=", setup_show_msr);
|
||||
|
||||
static __init int setup_noclflush(char *arg)
|
||||
{
|
||||
setup_clear_cpu_cap(X86_FEATURE_CLFLSH);
|
||||
@ -439,6 +530,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
|
||||
printk(KERN_CONT " stepping %02x\n", c->x86_mask);
|
||||
else
|
||||
printk(KERN_CONT "\n");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (c->cpu_index < show_msr)
|
||||
print_cpu_msr();
|
||||
#else
|
||||
if (show_msr)
|
||||
print_cpu_msr();
|
||||
#endif
|
||||
}
|
||||
|
||||
static __init int setup_disablecpuid(char *arg)
|
||||
|
@ -21,21 +21,15 @@ struct cpu_dev {
|
||||
void (*c_init)(struct cpuinfo_x86 * c);
|
||||
void (*c_identify)(struct cpuinfo_x86 * c);
|
||||
unsigned int (*c_size_cache)(struct cpuinfo_x86 * c, unsigned int size);
|
||||
int c_x86_vendor;
|
||||
};
|
||||
|
||||
extern struct cpu_dev * cpu_devs [X86_VENDOR_NUM];
|
||||
#define cpu_dev_register(cpu_devX) \
|
||||
static struct cpu_dev *__cpu_dev_##cpu_devX __used \
|
||||
__attribute__((__section__(".x86_cpu_dev.init"))) = \
|
||||
&cpu_devX;
|
||||
|
||||
struct cpu_vendor_dev {
|
||||
int vendor;
|
||||
struct cpu_dev *cpu_dev;
|
||||
};
|
||||
|
||||
#define cpu_vendor_dev_register(cpu_vendor_id, cpu_dev) \
|
||||
static struct cpu_vendor_dev __cpu_vendor_dev_##cpu_vendor_id __used \
|
||||
__attribute__((__section__(".x86cpuvendor.init"))) = \
|
||||
{ cpu_vendor_id, cpu_dev }
|
||||
|
||||
extern struct cpu_vendor_dev __x86cpuvendor_start[], __x86cpuvendor_end[];
|
||||
extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[];
|
||||
|
||||
extern int get_model_name(struct cpuinfo_x86 *c);
|
||||
extern void display_cacheinfo(struct cpuinfo_x86 *c);
|
||||
|
@ -15,13 +15,11 @@
|
||||
/*
|
||||
* Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
|
||||
*/
|
||||
static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
|
||||
static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
|
||||
{
|
||||
unsigned char ccr2, ccr3;
|
||||
unsigned long flags;
|
||||
|
||||
/* we test for DEVID by checking whether CCR3 is writable */
|
||||
local_irq_save(flags);
|
||||
ccr3 = getCx86(CX86_CCR3);
|
||||
setCx86(CX86_CCR3, ccr3 ^ 0x80);
|
||||
getCx86(0xc0); /* dummy to change bus */
|
||||
@ -44,9 +42,16 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
|
||||
*dir0 = getCx86(CX86_DIR0);
|
||||
*dir1 = getCx86(CX86_DIR1);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__do_cyrix_devid(dir0, dir1);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
/*
|
||||
* Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in
|
||||
* order to identify the Cyrix CPU model after we're out of setup.c
|
||||
@ -161,6 +166,24 @@ static void __cpuinit geode_configure(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned char dir0, dir0_msn, dir1 = 0;
|
||||
|
||||
__do_cyrix_devid(&dir0, &dir1);
|
||||
dir0_msn = dir0 >> 4; /* identifies CPU "family" */
|
||||
|
||||
switch (dir0_msn) {
|
||||
case 3: /* 6x86/6x86L */
|
||||
/* Emulate MTRRs using Cyrix's ARRs. */
|
||||
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
|
||||
break;
|
||||
case 5: /* 6x86MX/M II */
|
||||
/* Emulate MTRRs using Cyrix's ARRs. */
|
||||
set_cpu_cap(c, X86_FEATURE_CYRIX_ARR);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
|
||||
{
|
||||
@ -416,16 +439,19 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
|
||||
static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
|
||||
.c_vendor = "Cyrix",
|
||||
.c_ident = { "CyrixInstead" },
|
||||
.c_early_init = early_init_cyrix,
|
||||
.c_init = init_cyrix,
|
||||
.c_identify = cyrix_identify,
|
||||
.c_x86_vendor = X86_VENDOR_CYRIX,
|
||||
};
|
||||
|
||||
cpu_vendor_dev_register(X86_VENDOR_CYRIX, &cyrix_cpu_dev);
|
||||
cpu_dev_register(cyrix_cpu_dev);
|
||||
|
||||
static struct cpu_dev nsc_cpu_dev __cpuinitdata = {
|
||||
.c_vendor = "NSC",
|
||||
.c_ident = { "Geode by NSC" },
|
||||
.c_init = init_nsc,
|
||||
.c_x86_vendor = X86_VENDOR_NSC,
|
||||
};
|
||||
|
||||
cpu_vendor_dev_register(X86_VENDOR_NSC, &nsc_cpu_dev);
|
||||
cpu_dev_register(nsc_cpu_dev);
|
||||
|
@ -303,9 +303,10 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
|
||||
.c_early_init = early_init_intel,
|
||||
.c_init = init_intel,
|
||||
.c_size_cache = intel_size_cache,
|
||||
.c_x86_vendor = X86_VENDOR_INTEL,
|
||||
};
|
||||
|
||||
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
|
||||
cpu_dev_register(intel_cpu_dev);
|
||||
|
||||
/* arch_initcall(intel_cpu_init); */
|
||||
|
||||
|
@ -90,6 +90,7 @@ static struct cpu_dev intel_cpu_dev __cpuinitdata = {
|
||||
.c_ident = { "GenuineIntel" },
|
||||
.c_early_init = early_init_intel,
|
||||
.c_init = init_intel,
|
||||
.c_x86_vendor = X86_VENDOR_INTEL,
|
||||
};
|
||||
cpu_vendor_dev_register(X86_VENDOR_INTEL, &intel_cpu_dev);
|
||||
|
||||
cpu_dev_register(intel_cpu_dev);
|
||||
|
@ -102,6 +102,7 @@ static struct cpu_dev transmeta_cpu_dev __cpuinitdata = {
|
||||
.c_ident = { "GenuineTMx86", "TransmetaCPU" },
|
||||
.c_init = init_transmeta,
|
||||
.c_identify = transmeta_identify,
|
||||
.c_x86_vendor = X86_VENDOR_TRANSMETA,
|
||||
};
|
||||
|
||||
cpu_vendor_dev_register(X86_VENDOR_TRANSMETA, &transmeta_cpu_dev);
|
||||
cpu_dev_register(transmeta_cpu_dev);
|
||||
|
@ -19,7 +19,8 @@ static struct cpu_dev umc_cpu_dev __cpuinitdata = {
|
||||
}
|
||||
},
|
||||
},
|
||||
.c_x86_vendor = X86_VENDOR_UMC,
|
||||
};
|
||||
|
||||
cpu_vendor_dev_register(X86_VENDOR_UMC, &umc_cpu_dev);
|
||||
cpu_dev_register(umc_cpu_dev);
|
||||
|
||||
|
@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = {
|
||||
#endif
|
||||
.wbinvd = native_wbinvd,
|
||||
.read_msr = native_read_msr_safe,
|
||||
.read_msr_amd = native_read_msr_amd_safe,
|
||||
.write_msr = native_write_msr_safe,
|
||||
.read_tsc = native_read_tsc,
|
||||
.read_pmc = native_read_pmc,
|
||||
|
@ -339,9 +339,8 @@ static void
|
||||
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
unsigned long *stack, unsigned long bp, char *log_lvl)
|
||||
{
|
||||
printk("\nCall Trace:\n");
|
||||
printk("Call Trace:\n");
|
||||
dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
void show_trace(struct task_struct *task, struct pt_regs *regs,
|
||||
@ -386,6 +385,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
printk(" %016lx", *stack++);
|
||||
touch_nmi_watchdog();
|
||||
}
|
||||
printk("\n");
|
||||
show_trace_log_lvl(task, regs, sp, bp, log_lvl);
|
||||
}
|
||||
|
||||
@ -443,7 +443,6 @@ void show_registers(struct pt_regs *regs)
|
||||
printk("Stack: ");
|
||||
show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
|
||||
regs->bp, "");
|
||||
printk("\n");
|
||||
|
||||
printk(KERN_EMERG "Code: ");
|
||||
|
||||
|
@ -140,10 +140,10 @@ SECTIONS
|
||||
*(.con_initcall.init)
|
||||
__con_initcall_end = .;
|
||||
}
|
||||
.x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
|
||||
__x86cpuvendor_start = .;
|
||||
*(.x86cpuvendor.init)
|
||||
__x86cpuvendor_end = .;
|
||||
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
|
||||
__x86_cpu_dev_start = .;
|
||||
*(.x86_cpu_dev.init)
|
||||
__x86_cpu_dev_end = .;
|
||||
}
|
||||
SECURITY_INIT
|
||||
. = ALIGN(4);
|
||||
|
@ -168,13 +168,12 @@ SECTIONS
|
||||
*(.con_initcall.init)
|
||||
}
|
||||
__con_initcall_end = .;
|
||||
. = ALIGN(16);
|
||||
__x86cpuvendor_start = .;
|
||||
.x86cpuvendor.init : AT(ADDR(.x86cpuvendor.init) - LOAD_OFFSET) {
|
||||
*(.x86cpuvendor.init)
|
||||
__x86_cpu_dev_start = .;
|
||||
.x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
|
||||
*(.x86_cpu_dev.init)
|
||||
}
|
||||
__x86cpuvendor_end = .;
|
||||
SECURITY_INIT
|
||||
__x86_cpu_dev_end = .;
|
||||
|
||||
. = ALIGN(8);
|
||||
.parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) {
|
||||
|
@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
|
||||
int *err)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
asm volatile("2: rdmsr ; xor %0,%0\n"
|
||||
"1:\n\t"
|
||||
".section .fixup,\"ax\"\n\t"
|
||||
"3: mov %3,%0 ; jmp 1b\n\t"
|
||||
".previous\n\t"
|
||||
_ASM_EXTABLE(2b, 3b)
|
||||
: "=r" (*err), EAX_EDX_RET(val, low, high)
|
||||
: "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline void native_write_msr(unsigned int msr,
|
||||
unsigned low, unsigned high)
|
||||
{
|
||||
@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
*p = native_read_msr_safe(msr, &err);
|
||||
return err;
|
||||
}
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
int err;
|
||||
|
||||
*p = native_read_msr_amd_safe(msr, &err);
|
||||
return err;
|
||||
}
|
||||
|
||||
#define rdtscl(low) \
|
||||
((low) = (u32)native_read_tsc())
|
||||
|
@ -137,6 +137,7 @@ struct pv_cpu_ops {
|
||||
|
||||
/* MSR, PMC and TSR operations.
|
||||
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
|
||||
u64 (*read_msr_amd)(unsigned int msr, int *err);
|
||||
u64 (*read_msr)(unsigned int msr, int *err);
|
||||
int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
|
||||
|
||||
@ -720,6 +721,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
|
||||
{
|
||||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
|
||||
}
|
||||
static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
|
||||
{
|
||||
return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
|
||||
}
|
||||
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
|
||||
{
|
||||
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
|
||||
@ -765,6 +770,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
|
||||
*p = paravirt_read_msr(msr, &err);
|
||||
return err;
|
||||
}
|
||||
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
|
||||
{
|
||||
int err;
|
||||
|
||||
*p = paravirt_read_msr_amd(msr, &err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline u64 paravirt_read_tsc(void)
|
||||
{
|
||||
|
@ -77,9 +77,9 @@ struct cpuinfo_x86 {
|
||||
__u8 x86_phys_bits;
|
||||
/* CPUID returned core id bits: */
|
||||
__u8 x86_coreid_bits;
|
||||
#endif
|
||||
/* Max extended CPUID function supported: */
|
||||
__u32 extended_cpuid_level;
|
||||
#endif
|
||||
/* Maximum supported CPUID level, -1=no CPUID: */
|
||||
int cpuid_level;
|
||||
__u32 x86_capability[NCAPINTS];
|
||||
|
Loading…
Reference in New Issue
Block a user