mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-24 05:04:00 +08:00
333c5ae994
Thanks to the reviews and comments by Rafael, James, Mark and Andi. Here's version 2 of the patch incorporating your comments and also some update to my previous patch comments. I noticed that before entering idle state, the menu idle governor will look up the current pm_qos target value according to the list of qos requests received. This look up currently needs the acquisition of a lock to access the list of qos requests to find the qos target value, slowing down the entrance into idle state due to contention by multiple cpus to access this list. The contention is severe when there are a lot of cpus waking and going into idle. For example, for a simple workload that has 32 pair of processes ping ponging messages to each other, where 64 cpu cores are active in test system, I see the following profile with 37.82% of cpu cycles spent in contention of pm_qos_lock: - 37.82% swapper [kernel.kallsyms] [k] _raw_spin_lock_irqsave - _raw_spin_lock_irqsave - 95.65% pm_qos_request menu_select cpuidle_idle_call - cpu_idle 99.98% start_secondary A better approach will be to cache the updated pm_qos target value so reading it does not require lock acquisition as in the patch below. With this patch the contention for pm_qos_lock is removed and I saw a 2.2X increase in throughput for my message passing workload. cc: stable@kernel.org Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com> Acked-by: Andi Kleen <ak@linux.intel.com> Acked-by: James Bottomley <James.Bottomley@suse.de> Acked-by: mark gross <markgross@thegnar.org> Signed-off-by: Len Brown <len.brown@intel.com>
39 lines
1.2 KiB
C
39 lines
1.2 KiB
C
#ifndef _LINUX_PM_QOS_PARAMS_H
|
|
#define _LINUX_PM_QOS_PARAMS_H
|
|
/* interface for the pm_qos_power infrastructure of the linux kernel.
|
|
*
|
|
* Mark Gross <mgross@linux.intel.com>
|
|
*/
|
|
#include <linux/plist.h>
|
|
#include <linux/notifier.h>
|
|
#include <linux/miscdevice.h>
|
|
|
|
#define PM_QOS_RESERVED 0
|
|
#define PM_QOS_CPU_DMA_LATENCY 1
|
|
#define PM_QOS_NETWORK_LATENCY 2
|
|
#define PM_QOS_NETWORK_THROUGHPUT 3
|
|
|
|
#define PM_QOS_NUM_CLASSES 4
|
|
#define PM_QOS_DEFAULT_VALUE -1
|
|
|
|
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
|
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
|
|
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
|
|
|
|
struct pm_qos_request_list {
|
|
struct plist_node list;
|
|
int pm_qos_class;
|
|
};
|
|
|
|
void pm_qos_add_request(struct pm_qos_request_list *l, int pm_qos_class, s32 value);
|
|
void pm_qos_update_request(struct pm_qos_request_list *pm_qos_req,
|
|
s32 new_value);
|
|
void pm_qos_remove_request(struct pm_qos_request_list *pm_qos_req);
|
|
|
|
int pm_qos_request(int pm_qos_class);
|
|
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
|
|
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
|
|
int pm_qos_request_active(struct pm_qos_request_list *req);
|
|
|
|
#endif
|