mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 23:14:31 +08:00
94a855111e
been long in the making. It is a lighterweight software-only fix for Skylake-based cores where enabling IBRS is a big hammer and causes a significant performance impact. What it basically does is, it aligns all kernel functions to 16 bytes boundary and adds a 16-byte padding before the function, objtool collects all functions' locations and when the mitigation gets applied, it patches a call accounting thunk which is used to track the call depth of the stack at any time. When that call depth reaches a magical, microarchitecture-specific value for the Return Stack Buffer, the code stuffs that RSB and avoids its underflow which could otherwise lead to the Intel variant of Retbleed. This software-only solution brings a lot of the lost performance back, as benchmarks suggest: https://lore.kernel.org/all/20220915111039.092790446@infradead.org/ That page above also contains a lot more detailed explanation of the whole mechanism - Implement a new control flow integrity scheme called FineIBT which is based on the software kCFI implementation and uses hardware IBT support where present to annotate and track indirect branches using a hash to validate them - Other misc fixes and cleanups -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmOZp5EACgkQEsHwGGHe VUrZFxAAvi/+8L0IYSK4mKJvixGbTFjxN/Swo2JVOfs34LqGUT6JaBc+VUMwZxdb VMTFIZ3ttkKEodjhxGI7oGev6V8UfhI37SmO2lYKXpQVjXXnMlv/M+Vw3teE38CN gopi+xtGnT1IeWQ3tc/Tv18pleJ0mh5HKWiW+9KoqgXj0wgF9x4eRYDz1TDCDA/A iaBzs56j8m/FSykZHnrWZ/MvjKNPdGlfJASUCPeTM2dcrXQGJ93+X2hJctzDte0y Nuiw6Y0htfFBE7xoJn+sqm5Okr+McoUM18/CCprbgSKYk18iMYm3ZtAi6FUQZS1A ua4wQCf49loGp15PO61AS5d3OBf5D3q/WihQRbCaJvTVgPp9sWYnWwtcVUuhMllh ZQtBU9REcVJ/22bH09Q9CjBW0VpKpXHveqQdqRDViLJ6v/iI6EFGmD24SW/VxyRd 73k9MBGrL/dOf1SbEzdsnvcSB3LGzp0Om8o/KzJWOomrVKjBCJy16bwTEsCZEJmP i406m92GPXeaN1GhTko7vmF0GnkEdJs1GVCZPluCAxxbhHukyxHnrjlQjI4vC80n Ylc0B3Kvitw7LGJsPqu+/jfNHADC/zhx1qz/30wb5cFmFbN1aRdp3pm8JYUkn+l/ zri2Y6+O89gvE/9/xUhMohzHsWUO7xITiBavewKeTP9GSWybWUs= =cRy1 -----END PGP SIGNATURE----- Merge tag 'x86_core_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 core updates from Borislav Petkov: - Add the call depth tracking mitigation for Retbleed which has been long in the making. It is a lighterweight software-only fix for Skylake-based cores where enabling IBRS is a big hammer and causes a significant performance impact. What it basically does is, it aligns all kernel functions to 16 bytes boundary and adds a 16-byte padding before the function, objtool collects all functions' locations and when the mitigation gets applied, it patches a call accounting thunk which is used to track the call depth of the stack at any time. When that call depth reaches a magical, microarchitecture-specific value for the Return Stack Buffer, the code stuffs that RSB and avoids its underflow which could otherwise lead to the Intel variant of Retbleed. This software-only solution brings a lot of the lost performance back, as benchmarks suggest: https://lore.kernel.org/all/20220915111039.092790446@infradead.org/ That page above also contains a lot more detailed explanation of the whole mechanism - Implement a new control flow integrity scheme called FineIBT which is based on the software kCFI implementation and uses hardware IBT support where present to annotate and track indirect branches using a hash to validate them - Other misc fixes and cleanups * tag 'x86_core_for_v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (80 commits) x86/paravirt: Use common macro for creating simple asm paravirt functions x86/paravirt: Remove clobber bitmask from .parainstructions x86/debug: Include percpu.h in debugreg.h to get DECLARE_PER_CPU() et al x86/cpufeatures: Move X86_FEATURE_CALL_DEPTH from bit 18 to bit 19 of word 11, to leave space for WIP X86_FEATURE_SGX_EDECCSSA bit x86/Kconfig: Enable kernel IBT by default x86,pm: Force out-of-line memcpy() objtool: Fix weak hole vs prefix symbol objtool: Optimize elf_dirty_reloc_sym() x86/cfi: Add boot time hash randomization x86/cfi: Boot time selection of CFI scheme x86/ibt: Implement FineIBT objtool: Add --cfi to generate the .cfi_sites section x86: Add prefix symbols for function padding objtool: Add option to generate prefix symbols objtool: Avoid O(bloody terrible) behaviour -- an ode to libelf objtool: Slice up elf_create_section_symbol() kallsyms: Revert "Take callthunks into account" x86: Unconfuse CONFIG_ and X86_FEATURE_ namespaces x86/retpoline: Fix crash printing warning x86/paravirt: Fix a !PARAVIRT build warning ...
305 lines
7.7 KiB
ArmAsm
305 lines
7.7 KiB
ArmAsm
/*
|
|
* Intel SHA Extensions optimized implementation of a SHA-1 update function
|
|
*
|
|
* This file is provided under a dual BSD/GPLv2 license. When using or
|
|
* redistributing this file, you may do so under either license.
|
|
*
|
|
* GPL LICENSE SUMMARY
|
|
*
|
|
* Copyright(c) 2015 Intel Corporation.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of version 2 of the GNU General Public License as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* General Public License for more details.
|
|
*
|
|
* Contact Information:
|
|
* Sean Gulley <sean.m.gulley@intel.com>
|
|
* Tim Chen <tim.c.chen@linux.intel.com>
|
|
*
|
|
* BSD LICENSE
|
|
*
|
|
* Copyright(c) 2015 Intel Corporation.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
*
|
|
* * Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* * Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in
|
|
* the documentation and/or other materials provided with the
|
|
* distribution.
|
|
* * Neither the name of Intel Corporation nor the names of its
|
|
* contributors may be used to endorse or promote products derived
|
|
* from this software without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
*
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <linux/cfi_types.h>
|
|
|
|
#define DIGEST_PTR %rdi /* 1st arg */
|
|
#define DATA_PTR %rsi /* 2nd arg */
|
|
#define NUM_BLKS %rdx /* 3rd arg */
|
|
|
|
/* gcc conversion */
|
|
#define FRAME_SIZE 32 /* space for 2x16 bytes */
|
|
|
|
#define ABCD %xmm0
|
|
#define E0 %xmm1 /* Need two E's b/c they ping pong */
|
|
#define E1 %xmm2
|
|
#define MSG0 %xmm3
|
|
#define MSG1 %xmm4
|
|
#define MSG2 %xmm5
|
|
#define MSG3 %xmm6
|
|
#define SHUF_MASK %xmm7
|
|
|
|
|
|
/*
|
|
* Intel SHA Extensions optimized implementation of a SHA-1 update function
|
|
*
|
|
* The function takes a pointer to the current hash values, a pointer to the
|
|
* input data, and a number of 64 byte blocks to process. Once all blocks have
|
|
* been processed, the digest pointer is updated with the resulting hash value.
|
|
* The function only processes complete blocks, there is no functionality to
|
|
* store partial blocks. All message padding and hash value initialization must
|
|
* be done outside the update function.
|
|
*
|
|
* The indented lines in the loop are instructions related to rounds processing.
|
|
* The non-indented lines are instructions related to the message schedule.
|
|
*
|
|
* void sha1_ni_transform(uint32_t *digest, const void *data,
|
|
uint32_t numBlocks)
|
|
* digest : pointer to digest
|
|
* data: pointer to input data
|
|
* numBlocks: Number of blocks to process
|
|
*/
|
|
.text
|
|
SYM_TYPED_FUNC_START(sha1_ni_transform)
|
|
push %rbp
|
|
mov %rsp, %rbp
|
|
sub $FRAME_SIZE, %rsp
|
|
and $~0xF, %rsp
|
|
|
|
shl $6, NUM_BLKS /* convert to bytes */
|
|
jz .Ldone_hash
|
|
add DATA_PTR, NUM_BLKS /* pointer to end of data */
|
|
|
|
/* load initial hash values */
|
|
pinsrd $3, 1*16(DIGEST_PTR), E0
|
|
movdqu 0*16(DIGEST_PTR), ABCD
|
|
pand UPPER_WORD_MASK(%rip), E0
|
|
pshufd $0x1B, ABCD, ABCD
|
|
|
|
movdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), SHUF_MASK
|
|
|
|
.Lloop0:
|
|
/* Save hash values for addition after rounds */
|
|
movdqa E0, (0*16)(%rsp)
|
|
movdqa ABCD, (1*16)(%rsp)
|
|
|
|
/* Rounds 0-3 */
|
|
movdqu 0*16(DATA_PTR), MSG0
|
|
pshufb SHUF_MASK, MSG0
|
|
paddd MSG0, E0
|
|
movdqa ABCD, E1
|
|
sha1rnds4 $0, E0, ABCD
|
|
|
|
/* Rounds 4-7 */
|
|
movdqu 1*16(DATA_PTR), MSG1
|
|
pshufb SHUF_MASK, MSG1
|
|
sha1nexte MSG1, E1
|
|
movdqa ABCD, E0
|
|
sha1rnds4 $0, E1, ABCD
|
|
sha1msg1 MSG1, MSG0
|
|
|
|
/* Rounds 8-11 */
|
|
movdqu 2*16(DATA_PTR), MSG2
|
|
pshufb SHUF_MASK, MSG2
|
|
sha1nexte MSG2, E0
|
|
movdqa ABCD, E1
|
|
sha1rnds4 $0, E0, ABCD
|
|
sha1msg1 MSG2, MSG1
|
|
pxor MSG2, MSG0
|
|
|
|
/* Rounds 12-15 */
|
|
movdqu 3*16(DATA_PTR), MSG3
|
|
pshufb SHUF_MASK, MSG3
|
|
sha1nexte MSG3, E1
|
|
movdqa ABCD, E0
|
|
sha1msg2 MSG3, MSG0
|
|
sha1rnds4 $0, E1, ABCD
|
|
sha1msg1 MSG3, MSG2
|
|
pxor MSG3, MSG1
|
|
|
|
/* Rounds 16-19 */
|
|
sha1nexte MSG0, E0
|
|
movdqa ABCD, E1
|
|
sha1msg2 MSG0, MSG1
|
|
sha1rnds4 $0, E0, ABCD
|
|
sha1msg1 MSG0, MSG3
|
|
pxor MSG0, MSG2
|
|
|
|
/* Rounds 20-23 */
|
|
sha1nexte MSG1, E1
|
|
movdqa ABCD, E0
|
|
sha1msg2 MSG1, MSG2
|
|
sha1rnds4 $1, E1, ABCD
|
|
sha1msg1 MSG1, MSG0
|
|
pxor MSG1, MSG3
|
|
|
|
/* Rounds 24-27 */
|
|
sha1nexte MSG2, E0
|
|
movdqa ABCD, E1
|
|
sha1msg2 MSG2, MSG3
|
|
sha1rnds4 $1, E0, ABCD
|
|
sha1msg1 MSG2, MSG1
|
|
pxor MSG2, MSG0
|
|
|
|
/* Rounds 28-31 */
|
|
sha1nexte MSG3, E1
|
|
movdqa ABCD, E0
|
|
sha1msg2 MSG3, MSG0
|
|
sha1rnds4 $1, E1, ABCD
|
|
sha1msg1 MSG3, MSG2
|
|
pxor MSG3, MSG1
|
|
|
|
/* Rounds 32-35 */
|
|
sha1nexte MSG0, E0
|
|
movdqa ABCD, E1
|
|
sha1msg2 MSG0, MSG1
|
|
sha1rnds4 $1, E0, ABCD
|
|
sha1msg1 MSG0, MSG3
|
|
pxor MSG0, MSG2
|
|
|
|
/* Rounds 36-39 */
|
|
sha1nexte MSG1, E1
|
|
movdqa ABCD, E0
|
|
sha1msg2 MSG1, MSG2
|
|
sha1rnds4 $1, E1, ABCD
|
|
sha1msg1 MSG1, MSG0
|
|
pxor MSG1, MSG3
|
|
|
|
/* Rounds 40-43 */
|
|
sha1nexte MSG2, E0
|
|
movdqa ABCD, E1
|
|
sha1msg2 MSG2, MSG3
|
|
sha1rnds4 $2, E0, ABCD
|
|
sha1msg1 MSG2, MSG1
|
|
pxor MSG2, MSG0
|
|
|
|
/* Rounds 44-47 */
|
|
sha1nexte MSG3, E1
|
|
movdqa ABCD, E0
|
|
sha1msg2 MSG3, MSG0
|
|
sha1rnds4 $2, E1, ABCD
|
|
sha1msg1 MSG3, MSG2
|
|
pxor MSG3, MSG1
|
|
|
|
/* Rounds 48-51 */
|
|
sha1nexte MSG0, E0
|
|
movdqa ABCD, E1
|
|
sha1msg2 MSG0, MSG1
|
|
sha1rnds4 $2, E0, ABCD
|
|
sha1msg1 MSG0, MSG3
|
|
pxor MSG0, MSG2
|
|
|
|
/* Rounds 52-55 */
|
|
sha1nexte MSG1, E1
|
|
movdqa ABCD, E0
|
|
sha1msg2 MSG1, MSG2
|
|
sha1rnds4 $2, E1, ABCD
|
|
sha1msg1 MSG1, MSG0
|
|
pxor MSG1, MSG3
|
|
|
|
/* Rounds 56-59 */
|
|
sha1nexte MSG2, E0
|
|
movdqa ABCD, E1
|
|
sha1msg2 MSG2, MSG3
|
|
sha1rnds4 $2, E0, ABCD
|
|
sha1msg1 MSG2, MSG1
|
|
pxor MSG2, MSG0
|
|
|
|
/* Rounds 60-63 */
|
|
sha1nexte MSG3, E1
|
|
movdqa ABCD, E0
|
|
sha1msg2 MSG3, MSG0
|
|
sha1rnds4 $3, E1, ABCD
|
|
sha1msg1 MSG3, MSG2
|
|
pxor MSG3, MSG1
|
|
|
|
/* Rounds 64-67 */
|
|
sha1nexte MSG0, E0
|
|
movdqa ABCD, E1
|
|
sha1msg2 MSG0, MSG1
|
|
sha1rnds4 $3, E0, ABCD
|
|
sha1msg1 MSG0, MSG3
|
|
pxor MSG0, MSG2
|
|
|
|
/* Rounds 68-71 */
|
|
sha1nexte MSG1, E1
|
|
movdqa ABCD, E0
|
|
sha1msg2 MSG1, MSG2
|
|
sha1rnds4 $3, E1, ABCD
|
|
pxor MSG1, MSG3
|
|
|
|
/* Rounds 72-75 */
|
|
sha1nexte MSG2, E0
|
|
movdqa ABCD, E1
|
|
sha1msg2 MSG2, MSG3
|
|
sha1rnds4 $3, E0, ABCD
|
|
|
|
/* Rounds 76-79 */
|
|
sha1nexte MSG3, E1
|
|
movdqa ABCD, E0
|
|
sha1rnds4 $3, E1, ABCD
|
|
|
|
/* Add current hash values with previously saved */
|
|
sha1nexte (0*16)(%rsp), E0
|
|
paddd (1*16)(%rsp), ABCD
|
|
|
|
/* Increment data pointer and loop if more to process */
|
|
add $64, DATA_PTR
|
|
cmp NUM_BLKS, DATA_PTR
|
|
jne .Lloop0
|
|
|
|
/* Write hash values back in the correct order */
|
|
pshufd $0x1B, ABCD, ABCD
|
|
movdqu ABCD, 0*16(DIGEST_PTR)
|
|
pextrd $3, E0, 1*16(DIGEST_PTR)
|
|
|
|
.Ldone_hash:
|
|
mov %rbp, %rsp
|
|
pop %rbp
|
|
|
|
RET
|
|
SYM_FUNC_END(sha1_ni_transform)
|
|
|
|
.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
|
|
.align 16
|
|
PSHUFFLE_BYTE_FLIP_MASK:
|
|
.octa 0x000102030405060708090a0b0c0d0e0f
|
|
|
|
.section .rodata.cst16.UPPER_WORD_MASK, "aM", @progbits, 16
|
|
.align 16
|
|
UPPER_WORD_MASK:
|
|
.octa 0xFFFFFFFF000000000000000000000000
|