2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 09:43:59 +08:00

- Add Raptor Lake to the set of CPU models which support splitlock

- Make life miserable for apps using split locks by slowing them down
 considerably while the rest of the system remains responsive. The hope
 is it will hurt more and people will really fix their misaligned locks
 apps. As a result, free a TIF bit.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmKL5PQACgkQEsHwGGHe
 VUrz1Q//QjAKyKsAwCzGSPergtnZp9drimSuNsZAz6/xL8wFnn2nfWJTxugNF5jg
 n0Hal2oUGC8lg13mliB7NuDNu4RUWpkFzTzcIbPT8K9h7CUBdQPzqS7E3/p4s/eG
 ZCHp8psBGNp8+/+/LFfu9yhzYsAH9ji5KWmOzTVx9UdP3ovgR8BuCI7FCVJSfRz7
 cY690XgvcuKoXKckVNaCcoQXPJxykfk4Y1yt1TpITqivFbs2I0vLgzEhoRcTAhPA
 nX3pR3uy6oaA6rZAapRt8lbLWOwIEWoI0Tt1v+r5p28+nFiCRfm1XdPYK6CDBlox
 UuMBK4WyvSKjKHLu3wEdLCvYbs1kw2l9pXvS3hrqsKhbdeXKrxrNZ3zshwFMAYap
 MY/nSTsKSWUUgMgUbWI084csapGFB+hxwY8OVr6JXbxE8YYD/yCbPGOe1cLI7MMt
 /H3F6vNqSzdp1N3mAaaKVxiiT21lHIn6oJuSZcDE5sOvBwvpXsOp/w3FxhJCOX49
 PXrZLZmSHkDQSbh1XnvT/a+rq3XX1TFXFz71HYZf1yDk+xTijECglNtGnGSdj2Za
 iOw6M8VduV5Wy3ED9ubonruuHEJn6njpx/MH1B9+mAZsuLBpmuYFBxOn6AHOkXSb
 MVJD4flHXj0ugYm4Q5Y3yi24iWLsRI9utTOU079VL6i6DmFXeZc=
 =svvI
 -----END PGP SIGNATURE-----

Merge tag 'x86_splitlock_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 splitlock updates from Borislav Petkov:

 - Add Raptor Lake to the set of CPU models which support splitlock

 - Make life miserable for apps using split locks by slowing them down
   considerably while the rest of the system remains responsive. The
   hope is it will hurt more and people will really fix their misaligned
   locks apps. As a result, free a TIF bit.

* tag 'x86_splitlock_for_v5.19_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/split_lock: Enable the split lock feature on Raptor Lake
  x86/split-lock: Remove unused TIF_SLD bit
  x86/split_lock: Make life miserable for split lockers
This commit is contained in:
Linus Torvalds 2022-05-23 19:24:47 -07:00
commit 3e2cbc016b
6 changed files with 63 additions and 30 deletions

View File

@ -45,14 +45,12 @@ unsigned int x86_model(unsigned int sig);
unsigned int x86_stepping(unsigned int sig);
#ifdef CONFIG_CPU_SUP_INTEL
extern void __init sld_setup(struct cpuinfo_x86 *c);
extern void switch_to_sld(unsigned long tifn);
extern bool handle_user_split_lock(struct pt_regs *regs, long error_code);
extern bool handle_guest_split_lock(unsigned long ip);
extern void handle_bus_lock(struct pt_regs *regs);
u8 get_this_hybrid_cpu_type(void);
#else
static inline void __init sld_setup(struct cpuinfo_x86 *c) {}
static inline void switch_to_sld(unsigned long tifn) {}
static inline bool handle_user_split_lock(struct pt_regs *regs, long error_code)
{
return false;

View File

@ -92,7 +92,6 @@ struct thread_info {
#define TIF_NOCPUID 15 /* CPUID is not accessible in userland */
#define TIF_NOTSC 16 /* TSC is not accessible in userland */
#define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */
#define TIF_SLD 18 /* Restore split lock detection on context switch */
#define TIF_MEMDIE 20 /* is terminating due to OOM killer */
#define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */
#define TIF_IO_BITMAP 22 /* uses I/O bitmap */
@ -116,7 +115,6 @@ struct thread_info {
#define _TIF_NOCPUID (1 << TIF_NOCPUID)
#define _TIF_NOTSC (1 << TIF_NOTSC)
#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
#define _TIF_SLD (1 << TIF_SLD)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP)
#define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE)
@ -128,7 +126,7 @@ struct thread_info {
/* flags to check in __switch_to() */
#define _TIF_WORK_CTXSW_BASE \
(_TIF_NOCPUID | _TIF_NOTSC | _TIF_BLOCKSTEP | \
_TIF_SSBD | _TIF_SPEC_FORCE_UPDATE | _TIF_SLD)
_TIF_SSBD | _TIF_SPEC_FORCE_UPDATE)
/*
* Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated.

View File

@ -7,10 +7,13 @@
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/semaphore.h>
#include <linux/thread_info.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/cpuhotplug.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
@ -999,6 +1002,8 @@ static const struct {
static struct ratelimit_state bld_ratelimit;
static DEFINE_SEMAPHORE(buslock_sem);
static inline bool match_option(const char *arg, int arglen, const char *opt)
{
int len = strlen(opt), ratelimit;
@ -1109,18 +1114,52 @@ static void split_lock_init(void)
split_lock_verify_msr(sld_state != sld_off);
}
static void __split_lock_reenable(struct work_struct *work)
{
sld_update_msr(true);
up(&buslock_sem);
}
/*
* If a CPU goes offline with pending delayed work to re-enable split lock
* detection then the delayed work will be executed on some other CPU. That
* handles releasing the buslock_sem, but because it executes on a
* different CPU probably won't re-enable split lock detection. This is a
* problem on HT systems since the sibling CPU on the same core may then be
* left running with split lock detection disabled.
*
* Unconditionally re-enable detection here.
*/
static int splitlock_cpu_offline(unsigned int cpu)
{
sld_update_msr(true);
return 0;
}
static DECLARE_DELAYED_WORK(split_lock_reenable, __split_lock_reenable);
static void split_lock_warn(unsigned long ip)
{
pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
current->comm, current->pid, ip);
int cpu;
/*
* Disable the split lock detection for this task so it can make
* progress and set TIF_SLD so the detection is re-enabled via
* switch_to_sld() when the task is scheduled out.
*/
if (!current->reported_split_lock)
pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
current->comm, current->pid, ip);
current->reported_split_lock = 1;
/* misery factor #1, sleep 10ms before trying to execute split lock */
if (msleep_interruptible(10) > 0)
return;
/* Misery factor #2, only allow one buslocked disabled core at a time */
if (down_interruptible(&buslock_sem) == -EINTR)
return;
cpu = get_cpu();
schedule_delayed_work_on(cpu, &split_lock_reenable, 2);
/* Disable split lock detection on this CPU to make progress */
sld_update_msr(false);
set_tsk_thread_flag(current, TIF_SLD);
put_cpu();
}
bool handle_guest_split_lock(unsigned long ip)
@ -1193,18 +1232,6 @@ void handle_bus_lock(struct pt_regs *regs)
}
}
/*
* This function is called only when switching between tasks with
* different split-lock detection modes. It sets the MSR for the
* mode of the new task. This is right most of the time, but since
* the MSR is shared by hyperthreads on a physical core there can
* be glitches when the two threads need different modes.
*/
void switch_to_sld(unsigned long tifn)
{
sld_update_msr(!(tifn & _TIF_SLD));
}
/*
* Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
* only be trusted if it is confirmed that a CPU model implements a
@ -1230,6 +1257,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 1),
{}
};
@ -1274,10 +1302,14 @@ static void sld_state_show(void)
pr_info("disabled\n");
break;
case sld_warn:
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"x86/splitlock", NULL, splitlock_cpu_offline) < 0)
pr_warn("No splitlock CPU offline handler\n");
} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
pr_info("#DB: warning on user-space bus_locks\n");
}
break;
case sld_fatal:
if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {

View File

@ -684,9 +684,6 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
/* Enforce MSR update to ensure consistent state */
__speculation_ctrl_update(~tifn, tifn);
}
if ((tifp ^ tifn) & _TIF_SLD)
switch_to_sld(tifn);
}
/*

View File

@ -941,6 +941,9 @@ struct task_struct {
#ifdef CONFIG_IOMMU_SVA
unsigned pasid_activated:1;
#endif
#ifdef CONFIG_CPU_SUP_INTEL
unsigned reported_split_lock:1;
#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */

View File

@ -1046,6 +1046,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#ifdef CONFIG_MEMCG
tsk->active_memcg = NULL;
#endif
#ifdef CONFIG_CPU_SUP_INTEL
tsk->reported_split_lock = 0;
#endif
return tsk;
free_stack: