mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 21:24:00 +08:00
Drivers: hv: vmbus: Improve the CPU affiliation for channels
The current code tracks the assigned CPUs within a NUMA node in the context of the primary channel. So, if we have a VM with a single NUMA node with 8 VCPUs, we may end up unevenly distributing the channel load. Fix the issue by tracking affiliations globally. Signed-off-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
3546448338
commit
9f01ec5345
@ -392,6 +392,7 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
|
||||
struct vmbus_channel *primary = channel->primary_channel;
|
||||
int next_node;
|
||||
struct cpumask available_mask;
|
||||
struct cpumask *alloced_mask;
|
||||
|
||||
for (i = IDE; i < MAX_PERF_CHN; i++) {
|
||||
if (!memcmp(type_guid->b, hp_devs[i].guid,
|
||||
@ -409,7 +410,6 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
|
||||
* channel, bind it to cpu 0.
|
||||
*/
|
||||
channel->numa_node = 0;
|
||||
cpumask_set_cpu(0, &channel->alloced_cpus_in_node);
|
||||
channel->target_cpu = 0;
|
||||
channel->target_vp = hv_context.vp_index[0];
|
||||
return;
|
||||
@ -434,21 +434,22 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
|
||||
channel->numa_node = next_node;
|
||||
primary = channel;
|
||||
}
|
||||
alloced_mask = &hv_context.hv_numa_map[primary->numa_node];
|
||||
|
||||
if (cpumask_weight(&primary->alloced_cpus_in_node) ==
|
||||
if (cpumask_weight(alloced_mask) ==
|
||||
cpumask_weight(cpumask_of_node(primary->numa_node))) {
|
||||
/*
|
||||
* We have cycled through all the CPUs in the node;
|
||||
* reset the alloced map.
|
||||
*/
|
||||
cpumask_clear(&primary->alloced_cpus_in_node);
|
||||
cpumask_clear(alloced_mask);
|
||||
}
|
||||
|
||||
cpumask_xor(&available_mask, &primary->alloced_cpus_in_node,
|
||||
cpumask_xor(&available_mask, alloced_mask,
|
||||
cpumask_of_node(primary->numa_node));
|
||||
|
||||
cur_cpu = cpumask_next(-1, &available_mask);
|
||||
cpumask_set_cpu(cur_cpu, &primary->alloced_cpus_in_node);
|
||||
cpumask_set_cpu(cur_cpu, alloced_mask);
|
||||
|
||||
channel->target_cpu = cur_cpu;
|
||||
channel->target_vp = hv_context.vp_index[cur_cpu];
|
||||
|
@ -332,6 +332,13 @@ int hv_synic_alloc(void)
|
||||
size_t ced_size = sizeof(struct clock_event_device);
|
||||
int cpu;
|
||||
|
||||
hv_context.hv_numa_map = kzalloc(sizeof(struct cpumask) * nr_node_ids,
|
||||
GFP_ATOMIC);
|
||||
if (hv_context.hv_numa_map == NULL) {
|
||||
pr_err("Unable to allocate NUMA map\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
hv_context.event_dpc[cpu] = kmalloc(size, GFP_ATOMIC);
|
||||
if (hv_context.event_dpc[cpu] == NULL) {
|
||||
@ -345,6 +352,7 @@ int hv_synic_alloc(void)
|
||||
pr_err("Unable to allocate clock event device\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
hv_init_clockevent_device(hv_context.clk_evt[cpu], cpu);
|
||||
|
||||
hv_context.synic_message_page[cpu] =
|
||||
@ -393,6 +401,7 @@ void hv_synic_free(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
kfree(hv_context.hv_numa_map);
|
||||
for_each_online_cpu(cpu)
|
||||
hv_synic_free_cpu(cpu);
|
||||
}
|
||||
|
@ -551,6 +551,11 @@ struct hv_context {
|
||||
* Support PV clockevent device.
|
||||
*/
|
||||
struct clock_event_device *clk_evt[NR_CPUS];
|
||||
/*
|
||||
* To manage allocations in a NUMA node.
|
||||
* Array indexed by numa node ID.
|
||||
*/
|
||||
struct cpumask *hv_numa_map;
|
||||
};
|
||||
|
||||
extern struct hv_context hv_context;
|
||||
|
@ -699,7 +699,6 @@ struct vmbus_channel {
|
||||
/*
|
||||
* State to manage the CPU affiliation of channels.
|
||||
*/
|
||||
struct cpumask alloced_cpus_in_node;
|
||||
int numa_node;
|
||||
/*
|
||||
* Support for sub-channels. For high performance devices,
|
||||
|
Loading…
Reference in New Issue
Block a user