2012-12-21 15:44:22 +08:00
|
|
|
#ifndef _ASM_X86_MICROCODE_INTEL_H
|
|
|
|
#define _ASM_X86_MICROCODE_INTEL_H
|
|
|
|
|
|
|
|
#include <asm/microcode.h>
|
|
|
|
|
|
|
|
struct microcode_header_intel {
|
|
|
|
unsigned int hdrver;
|
|
|
|
unsigned int rev;
|
|
|
|
unsigned int date;
|
|
|
|
unsigned int sig;
|
|
|
|
unsigned int cksum;
|
|
|
|
unsigned int ldrver;
|
|
|
|
unsigned int pf;
|
|
|
|
unsigned int datasize;
|
|
|
|
unsigned int totalsize;
|
|
|
|
unsigned int reserved[3];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct microcode_intel {
|
|
|
|
struct microcode_header_intel hdr;
|
|
|
|
unsigned int bits[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
/* microcode format is extended from prescott processors */
|
|
|
|
struct extended_signature {
|
|
|
|
unsigned int sig;
|
|
|
|
unsigned int pf;
|
|
|
|
unsigned int cksum;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct extended_sigtable {
|
|
|
|
unsigned int count;
|
|
|
|
unsigned int cksum;
|
|
|
|
unsigned int reserved[3];
|
|
|
|
struct extended_signature sigs[0];
|
|
|
|
};
|
|
|
|
|
|
|
|
#define DEFAULT_UCODE_DATASIZE (2000)
|
|
|
|
#define MC_HEADER_SIZE (sizeof(struct microcode_header_intel))
|
|
|
|
#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE)
|
|
|
|
#define EXT_HEADER_SIZE (sizeof(struct extended_sigtable))
|
|
|
|
#define EXT_SIGNATURE_SIZE (sizeof(struct extended_signature))
|
|
|
|
#define DWSIZE (sizeof(u32))
|
|
|
|
|
|
|
|
#define get_totalsize(mc) \
|
x86, microcode, intel: Fix total_size computation
According to the Intel SDM vol 3A (order code 253668-051US, June 2014),
on section 9.11.1, page 9-28:
"For microcode updates with a data size field equal to 00000000H, the
size of the microcode update is 2048 bytes. The first 48 bytes contain
the microcode update header. The remaining 2000 bytes contain encrypted
data."
"For microcode updates with a data size not equal to 00000000H, the total
size field specifies the size of the microcode update."
Up to 2002/2003, Intel used an "old format" for the microcode update
containers that was always 2048 bytes in size. That old format did not
have Data Size and Total Size fields, the quadwords at those positions
in the microcode container header were "reserved". The microcode header
of the "old format" microcode container has a hrdver of 0x01. You can
hunt down an old copy of the Intel SDM to validate this through its
order number (#243192). I found one from 1999 through a Google search.
Sometime in 2002/2003 (AFAICT, for the Prescott processors), Intel
documented a new format for the microcode containers and contributed in
2003 some code to the Linux kernel microcode driver implementing support
for the new format. This new format has Data Size and Total Size fields,
as well as the optional extended signature table. However, it reuses the
same hrdver as the old format (0x01), and it can only be told apart from
the old format by a non-zero Data Size field.
In fact, the only reason we can even trust a Data Size of zero to mean
that the microcode container is in the old format, is because Intel
reatroatively promised that the old format would always have a zero
there when they wrote the documentation for the _new_ format.
This is a very old bug, dating back to 2003. It has been dormant
ever since, as Intel seems to set all reserved fields to zero on the
microcode updates they distribute: I could not find a public microcode
update that would trigger this bug.
Signed-off-by: Henrique de Moraes Holschuh <hmh@hmh.eng.br>
Link: http://lkml.kernel.org/r/1406146251-8540-1-git-send-email-hmh@hmh.eng.br
Signed-off-by: Borislav Petkov <bp@suse.de>
2014-07-24 04:10:49 +08:00
|
|
|
(((struct microcode_intel *)mc)->hdr.datasize ? \
|
2012-12-21 15:44:22 +08:00
|
|
|
((struct microcode_intel *)mc)->hdr.totalsize : \
|
|
|
|
DEFAULT_UCODE_TOTALSIZE)
|
|
|
|
|
|
|
|
#define get_datasize(mc) \
|
|
|
|
(((struct microcode_intel *)mc)->hdr.datasize ? \
|
|
|
|
((struct microcode_intel *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE)
|
|
|
|
|
|
|
|
#define sigmatch(s1, s2, p1, p2) \
|
|
|
|
(((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0))))
|
|
|
|
|
|
|
|
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
|
|
|
|
|
2015-02-10 18:28:23 +08:00
|
|
|
extern int get_matching_microcode(unsigned int csig, int cpf, int rev, void *mc);
|
2012-12-21 15:44:22 +08:00
|
|
|
extern int microcode_sanity_check(void *mc, int print_err);
|
2015-02-10 18:28:23 +08:00
|
|
|
extern int get_matching_sig(unsigned int csig, int cpf, int rev, void *mc);
|
2015-02-10 01:10:29 +08:00
|
|
|
|
|
|
|
static inline int
|
|
|
|
revision_is_newer(struct microcode_header_intel *mc_header, int rev)
|
|
|
|
{
|
|
|
|
return (mc_header->rev <= rev) ? 0 : 1;
|
|
|
|
}
|
2012-12-21 15:44:22 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_MICROCODE_INTEL_EARLY
|
|
|
|
extern void __init load_ucode_intel_bsp(void);
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 06:23:59 +08:00
|
|
|
extern void load_ucode_intel_ap(void);
|
2012-12-21 15:44:22 +08:00
|
|
|
extern void show_ucode_info_early(void);
|
2013-05-31 03:09:17 +08:00
|
|
|
extern int __init save_microcode_in_initrd_intel(void);
|
2014-12-04 00:21:41 +08:00
|
|
|
void reload_ucode_intel(void);
|
2012-12-21 15:44:22 +08:00
|
|
|
#else
|
|
|
|
static inline __init void load_ucode_intel_bsp(void) {}
|
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
2013-06-19 06:23:59 +08:00
|
|
|
static inline void load_ucode_intel_ap(void) {}
|
2012-12-21 15:44:22 +08:00
|
|
|
static inline void show_ucode_info_early(void) {}
|
2013-05-31 03:09:17 +08:00
|
|
|
static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; }
|
2014-12-04 00:21:41 +08:00
|
|
|
static inline void reload_ucode_intel(void) {}
|
2012-12-21 15:44:22 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
|
|
|
|
extern int save_mc_for_early(u8 *mc);
|
|
|
|
#else
|
|
|
|
static inline int save_mc_for_early(u8 *mc)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _ASM_X86_MICROCODE_INTEL_H */
|