mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
crypto: more robust crypto_memneq
Disabling compiler optimizations can be fragile, since a new optimization could be added to -O0 or -Os that breaks the assumptions the code is making. Instead of disabling compiler optimizations, use a dummy inline assembly (based on RELOC_HIDE) to block the problematic kinds of optimization, while still allowing other optimizations to be applied to the code. The dummy inline assembly is added after every OR, and has the accumulator variable as its input and output. The compiler is forced to assume that the dummy inline assembly could both depend on the accumulator variable and change the accumulator variable, so it is forced to compute the value correctly before the inline assembly, and cannot assume anything about its value after the inline assembly. This change should be enough to make crypto_memneq work correctly (with data-independent timing) even if it is inlined at its call sites. That can be done later in a followup patch. Compile-tested on x86_64. Signed-off-by: Cesar Eduardo Barros <cesarb@cesarb.eti.br> Acked-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
parent
b62ffd8c72
commit
fe8c8a1268
@ -2,11 +2,6 @@
|
|||||||
# Cryptographic API
|
# Cryptographic API
|
||||||
#
|
#
|
||||||
|
|
||||||
# memneq MUST be built with -Os or -O0 to prevent early-return optimizations
|
|
||||||
# that will defeat memneq's actual purpose to prevent timing attacks.
|
|
||||||
CFLAGS_REMOVE_memneq.o := -O1 -O2 -O3
|
|
||||||
CFLAGS_memneq.o := -Os
|
|
||||||
|
|
||||||
obj-$(CONFIG_CRYPTO) += crypto.o
|
obj-$(CONFIG_CRYPTO) += crypto.o
|
||||||
crypto-y := api.o cipher.o compress.o memneq.o
|
crypto-y := api.o cipher.o compress.o memneq.o
|
||||||
|
|
||||||
|
@ -72,6 +72,7 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size)
|
|||||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||||||
while (size >= sizeof(unsigned long)) {
|
while (size >= sizeof(unsigned long)) {
|
||||||
neq |= *(unsigned long *)a ^ *(unsigned long *)b;
|
neq |= *(unsigned long *)a ^ *(unsigned long *)b;
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
a += sizeof(unsigned long);
|
a += sizeof(unsigned long);
|
||||||
b += sizeof(unsigned long);
|
b += sizeof(unsigned long);
|
||||||
size -= sizeof(unsigned long);
|
size -= sizeof(unsigned long);
|
||||||
@ -79,6 +80,7 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size)
|
|||||||
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
|
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
|
||||||
while (size > 0) {
|
while (size > 0) {
|
||||||
neq |= *(unsigned char *)a ^ *(unsigned char *)b;
|
neq |= *(unsigned char *)a ^ *(unsigned char *)b;
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
a += 1;
|
a += 1;
|
||||||
b += 1;
|
b += 1;
|
||||||
size -= 1;
|
size -= 1;
|
||||||
@ -89,33 +91,60 @@ __crypto_memneq_generic(const void *a, const void *b, size_t size)
|
|||||||
/* Loop-free fast-path for frequently used 16-byte size */
|
/* Loop-free fast-path for frequently used 16-byte size */
|
||||||
static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
|
static inline unsigned long __crypto_memneq_16(const void *a, const void *b)
|
||||||
{
|
{
|
||||||
|
unsigned long neq = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||||
if (sizeof(unsigned long) == 8)
|
if (sizeof(unsigned long) == 8) {
|
||||||
return ((*(unsigned long *)(a) ^ *(unsigned long *)(b))
|
neq |= *(unsigned long *)(a) ^ *(unsigned long *)(b);
|
||||||
| (*(unsigned long *)(a+8) ^ *(unsigned long *)(b+8)));
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
else if (sizeof(unsigned int) == 4)
|
neq |= *(unsigned long *)(a+8) ^ *(unsigned long *)(b+8);
|
||||||
return ((*(unsigned int *)(a) ^ *(unsigned int *)(b))
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
| (*(unsigned int *)(a+4) ^ *(unsigned int *)(b+4))
|
} else if (sizeof(unsigned int) == 4) {
|
||||||
| (*(unsigned int *)(a+8) ^ *(unsigned int *)(b+8))
|
neq |= *(unsigned int *)(a) ^ *(unsigned int *)(b);
|
||||||
| (*(unsigned int *)(a+12) ^ *(unsigned int *)(b+12)));
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
else
|
neq |= *(unsigned int *)(a+4) ^ *(unsigned int *)(b+4);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned int *)(a+8) ^ *(unsigned int *)(b+8);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned int *)(a+12) ^ *(unsigned int *)(b+12);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
} else {
|
||||||
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
|
#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
|
||||||
return ((*(unsigned char *)(a) ^ *(unsigned char *)(b))
|
neq |= *(unsigned char *)(a) ^ *(unsigned char *)(b);
|
||||||
| (*(unsigned char *)(a+1) ^ *(unsigned char *)(b+1))
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
| (*(unsigned char *)(a+2) ^ *(unsigned char *)(b+2))
|
neq |= *(unsigned char *)(a+1) ^ *(unsigned char *)(b+1);
|
||||||
| (*(unsigned char *)(a+3) ^ *(unsigned char *)(b+3))
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
| (*(unsigned char *)(a+4) ^ *(unsigned char *)(b+4))
|
neq |= *(unsigned char *)(a+2) ^ *(unsigned char *)(b+2);
|
||||||
| (*(unsigned char *)(a+5) ^ *(unsigned char *)(b+5))
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
| (*(unsigned char *)(a+6) ^ *(unsigned char *)(b+6))
|
neq |= *(unsigned char *)(a+3) ^ *(unsigned char *)(b+3);
|
||||||
| (*(unsigned char *)(a+7) ^ *(unsigned char *)(b+7))
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
| (*(unsigned char *)(a+8) ^ *(unsigned char *)(b+8))
|
neq |= *(unsigned char *)(a+4) ^ *(unsigned char *)(b+4);
|
||||||
| (*(unsigned char *)(a+9) ^ *(unsigned char *)(b+9))
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
| (*(unsigned char *)(a+10) ^ *(unsigned char *)(b+10))
|
neq |= *(unsigned char *)(a+5) ^ *(unsigned char *)(b+5);
|
||||||
| (*(unsigned char *)(a+11) ^ *(unsigned char *)(b+11))
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
| (*(unsigned char *)(a+12) ^ *(unsigned char *)(b+12))
|
neq |= *(unsigned char *)(a+6) ^ *(unsigned char *)(b+6);
|
||||||
| (*(unsigned char *)(a+13) ^ *(unsigned char *)(b+13))
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
| (*(unsigned char *)(a+14) ^ *(unsigned char *)(b+14))
|
neq |= *(unsigned char *)(a+7) ^ *(unsigned char *)(b+7);
|
||||||
| (*(unsigned char *)(a+15) ^ *(unsigned char *)(b+15)));
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned char *)(a+8) ^ *(unsigned char *)(b+8);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned char *)(a+9) ^ *(unsigned char *)(b+9);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned char *)(a+10) ^ *(unsigned char *)(b+10);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned char *)(a+11) ^ *(unsigned char *)(b+11);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned char *)(a+12) ^ *(unsigned char *)(b+12);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned char *)(a+13) ^ *(unsigned char *)(b+13);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned char *)(a+14) ^ *(unsigned char *)(b+14);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
neq |= *(unsigned char *)(a+15) ^ *(unsigned char *)(b+15);
|
||||||
|
OPTIMIZER_HIDE_VAR(neq);
|
||||||
|
}
|
||||||
|
|
||||||
|
return neq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Compare two areas of memory without leaking timing information,
|
/* Compare two areas of memory without leaking timing information,
|
||||||
|
@ -37,6 +37,9 @@
|
|||||||
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
|
__asm__ ("" : "=r"(__ptr) : "0"(ptr)); \
|
||||||
(typeof(ptr)) (__ptr + (off)); })
|
(typeof(ptr)) (__ptr + (off)); })
|
||||||
|
|
||||||
|
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
|
||||||
|
#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var))
|
||||||
|
|
||||||
#ifdef __CHECKER__
|
#ifdef __CHECKER__
|
||||||
#define __must_be_array(arr) 0
|
#define __must_be_array(arr) 0
|
||||||
#else
|
#else
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
*/
|
*/
|
||||||
#undef barrier
|
#undef barrier
|
||||||
#undef RELOC_HIDE
|
#undef RELOC_HIDE
|
||||||
|
#undef OPTIMIZER_HIDE_VAR
|
||||||
|
|
||||||
#define barrier() __memory_barrier()
|
#define barrier() __memory_barrier()
|
||||||
|
|
||||||
@ -23,6 +24,12 @@
|
|||||||
__ptr = (unsigned long) (ptr); \
|
__ptr = (unsigned long) (ptr); \
|
||||||
(typeof(ptr)) (__ptr + (off)); })
|
(typeof(ptr)) (__ptr + (off)); })
|
||||||
|
|
||||||
|
/* This should act as an optimization barrier on var.
|
||||||
|
* Given that this compiler does not have inline assembly, a compiler barrier
|
||||||
|
* is the best we can do.
|
||||||
|
*/
|
||||||
|
#define OPTIMIZER_HIDE_VAR(var) barrier()
|
||||||
|
|
||||||
/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
|
/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */
|
||||||
#define __must_be_array(a) 0
|
#define __must_be_array(a) 0
|
||||||
|
|
||||||
|
@ -170,6 +170,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
|
|||||||
(typeof(ptr)) (__ptr + (off)); })
|
(typeof(ptr)) (__ptr + (off)); })
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef OPTIMIZER_HIDE_VAR
|
||||||
|
#define OPTIMIZER_HIDE_VAR(var) barrier()
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Not-quite-unique ID. */
|
/* Not-quite-unique ID. */
|
||||||
#ifndef __UNIQUE_ID
|
#ifndef __UNIQUE_ID
|
||||||
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
|
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
|
||||||
|
Loading…
Reference in New Issue
Block a user