mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
Drivers: hv: vmbus: Don't assign VMbus channel interrupts to isolated CPUs
When initially assigning a VMbus channel interrupt to a CPU, don’t choose a managed IRQ isolated CPU (as specified on the kernel boot line with parameter 'isolcpus=managed_irq,<#cpu>'). Also, when using sysfs to change the CPU that a VMbus channel will interrupt, don't allow changing to a managed IRQ isolated CPU. Signed-off-by: Saurabh Sengar <ssengar@linux.microsoft.com> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Link: https://lore.kernel.org/r/1653637439-23060-1-git-send-email-ssengar@linux.microsoft.com Signed-off-by: Wei Liu <wei.liu@kernel.org>
This commit is contained in:
parent
f2906aa863
commit
6640b5df1a
@ -21,6 +21,7 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/hyperv.h>
|
||||
#include <asm/mshyperv.h>
|
||||
#include <linux/sched/isolation.h>
|
||||
|
||||
#include "hyperv_vmbus.h"
|
||||
|
||||
@ -728,16 +729,20 @@ static void init_vp_index(struct vmbus_channel *channel)
|
||||
u32 i, ncpu = num_online_cpus();
|
||||
cpumask_var_t available_mask;
|
||||
struct cpumask *allocated_mask;
|
||||
const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ);
|
||||
u32 target_cpu;
|
||||
int numa_node;
|
||||
|
||||
if (!perf_chn ||
|
||||
!alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
|
||||
!alloc_cpumask_var(&available_mask, GFP_KERNEL) ||
|
||||
cpumask_empty(hk_mask)) {
|
||||
/*
|
||||
* If the channel is not a performance critical
|
||||
* channel, bind it to VMBUS_CONNECT_CPU.
|
||||
* In case alloc_cpumask_var() fails, bind it to
|
||||
* VMBUS_CONNECT_CPU.
|
||||
* If all the cpus are isolated, bind it to
|
||||
* VMBUS_CONNECT_CPU.
|
||||
*/
|
||||
channel->target_cpu = VMBUS_CONNECT_CPU;
|
||||
if (perf_chn)
|
||||
@ -758,17 +763,19 @@ static void init_vp_index(struct vmbus_channel *channel)
|
||||
}
|
||||
allocated_mask = &hv_context.hv_numa_map[numa_node];
|
||||
|
||||
if (cpumask_equal(allocated_mask, cpumask_of_node(numa_node))) {
|
||||
retry:
|
||||
cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node));
|
||||
cpumask_and(available_mask, available_mask, hk_mask);
|
||||
|
||||
if (cpumask_empty(available_mask)) {
|
||||
/*
|
||||
* We have cycled through all the CPUs in the node;
|
||||
* reset the allocated map.
|
||||
*/
|
||||
cpumask_clear(allocated_mask);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
cpumask_xor(available_mask, allocated_mask,
|
||||
cpumask_of_node(numa_node));
|
||||
|
||||
target_cpu = cpumask_first(available_mask);
|
||||
cpumask_set_cpu(target_cpu, allocated_mask);
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/sched/isolation.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
|
||||
#include <linux/delay.h>
|
||||
@ -1770,6 +1771,9 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel,
|
||||
if (target_cpu >= nr_cpumask_bits)
|
||||
return -EINVAL;
|
||||
|
||||
if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ)))
|
||||
return -EINVAL;
|
||||
|
||||
/* No CPUs should come up or down during this. */
|
||||
cpus_read_lock();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user