mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
Merge branch 'pci/pm'
- Cache the PTM capability offset instead of searching for it every time (Bjorn Helgaas) - Separate PTM configuration from PTM enable (Bjorn Helgaas) - Add pci_suspend_ptm() and pci_resume_ptm() to disable and re-enable PTM on suspend/resume so some Root Ports can safely enter a lower-power PM state (Bjorn Helgaas) - Disable PTM for all devices during suspend; previously we only did this for Root Ports and even then only in certain cases (Bjorn Helgaas) - Simplify pci_pm_suspend_noirq() (Rajvi Jingar) - Reduce the delay after transitions to/from D3hot by using usleep_range() instead of msleep(), which reduces the typical delay from 19ms to 10ms (Sajid Dalvi, Will McVicker) * pci/pm: PCI/PM: Reduce D3hot delay with usleep_range() PCI/PM: Simplify pci_pm_suspend_noirq() PCI/PM: Always disable PTM for all devices during suspend PCI/PTM: Consolidate PTM interface declarations PCI/PTM: Reorder functions in logical order PCI/PTM: Preserve RsvdP bits in PTM Control register PCI/PTM: Move pci_ptm_info() body into its only caller PCI/PTM: Add pci_suspend_ptm() and pci_resume_ptm() PCI/PTM: Separate configuration and enable PCI/PTM: Add pci_upstream_ptm() helper PCI/PTM: Cache PTM Capability offset
This commit is contained in:
commit
034f93fcb1
@ -774,6 +774,12 @@ static int pci_pm_suspend(struct device *dev)
|
||||
|
||||
pci_dev->skip_bus_pm = false;
|
||||
|
||||
/*
|
||||
* Disabling PTM allows some systems, e.g., Intel mobile chips
|
||||
* since Coffee Lake, to enter a lower-power PM state.
|
||||
*/
|
||||
pci_suspend_ptm(pci_dev);
|
||||
|
||||
if (pci_has_legacy_pm_support(pci_dev))
|
||||
return pci_legacy_suspend(dev, PMSG_SUSPEND);
|
||||
|
||||
@ -867,20 +873,15 @@ static int pci_pm_suspend_noirq(struct device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
if (pci_dev->skip_bus_pm) {
|
||||
/*
|
||||
* Either the device is a bridge with a child in D0 below it, or
|
||||
* the function is running for the second time in a row without
|
||||
* going through full resume, which is possible only during
|
||||
* suspend-to-idle in a spurious wakeup case. The device should
|
||||
* be in D0 at this point, but if it is a bridge, it may be
|
||||
* necessary to save its state.
|
||||
*/
|
||||
if (!pci_dev->state_saved)
|
||||
pci_save_state(pci_dev);
|
||||
} else if (!pci_dev->state_saved) {
|
||||
if (!pci_dev->state_saved) {
|
||||
pci_save_state(pci_dev);
|
||||
if (pci_power_manageable(pci_dev))
|
||||
|
||||
/*
|
||||
* If the device is a bridge with a child in D0 below it,
|
||||
* it needs to stay in D0, so check skip_bus_pm to avoid
|
||||
* putting it into a low-power state in that case.
|
||||
*/
|
||||
if (!pci_dev->skip_bus_pm && pci_power_manageable(pci_dev))
|
||||
pci_prepare_to_sleep(pci_dev);
|
||||
}
|
||||
|
||||
@ -987,6 +988,8 @@ static int pci_pm_resume(struct device *dev)
|
||||
if (pci_dev->state_saved)
|
||||
pci_restore_standard_config(pci_dev);
|
||||
|
||||
pci_resume_ptm(pci_dev);
|
||||
|
||||
if (pci_has_legacy_pm_support(pci_dev))
|
||||
return pci_legacy_resume(dev);
|
||||
|
||||
@ -1274,6 +1277,8 @@ static int pci_pm_runtime_suspend(struct device *dev)
|
||||
pci_power_t prev = pci_dev->current_state;
|
||||
int error;
|
||||
|
||||
pci_suspend_ptm(pci_dev);
|
||||
|
||||
/*
|
||||
* If pci_dev->driver is not set (unbound), we leave the device in D0,
|
||||
* but it may go to D3cold when the bridge above it runtime suspends.
|
||||
@ -1335,6 +1340,7 @@ static int pci_pm_runtime_resume(struct device *dev)
|
||||
* D3cold when the bridge above it runtime suspended.
|
||||
*/
|
||||
pci_pm_default_resume_early(pci_dev);
|
||||
pci_resume_ptm(pci_dev);
|
||||
|
||||
if (!pci_dev->driver)
|
||||
return 0;
|
||||
|
@ -66,13 +66,15 @@ struct pci_pme_device {
|
||||
|
||||
static void pci_dev_d3_sleep(struct pci_dev *dev)
|
||||
{
|
||||
unsigned int delay = dev->d3hot_delay;
|
||||
unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
|
||||
unsigned int upper;
|
||||
|
||||
if (delay < pci_pm_d3hot_delay)
|
||||
delay = pci_pm_d3hot_delay;
|
||||
|
||||
if (delay)
|
||||
msleep(delay);
|
||||
if (delay_ms) {
|
||||
/* Use a 20% upper bound, 1ms minimum */
|
||||
upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
|
||||
usleep_range(delay_ms * USEC_PER_MSEC,
|
||||
(delay_ms + upper) * USEC_PER_MSEC);
|
||||
}
|
||||
}
|
||||
|
||||
bool pci_reset_supported(struct pci_dev *dev)
|
||||
@ -2708,24 +2710,12 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
|
||||
if (target_state == PCI_POWER_ERROR)
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* There are systems (for example, Intel mobile chips since Coffee
|
||||
* Lake) where the power drawn while suspended can be significantly
|
||||
* reduced by disabling PTM on PCIe root ports as this allows the
|
||||
* port to enter a lower-power PM state and the SoC to reach a
|
||||
* lower-power idle state as a whole.
|
||||
*/
|
||||
if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
|
||||
pci_disable_ptm(dev);
|
||||
|
||||
pci_enable_wake(dev, target_state, wakeup);
|
||||
|
||||
error = pci_set_power_state(dev, target_state);
|
||||
|
||||
if (error) {
|
||||
if (error)
|
||||
pci_enable_wake(dev, target_state, false);
|
||||
pci_restore_ptm_state(dev);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
@ -2766,24 +2756,12 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
|
||||
if (target_state == PCI_POWER_ERROR)
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* There are systems (for example, Intel mobile chips since Coffee
|
||||
* Lake) where the power drawn while suspended can be significantly
|
||||
* reduced by disabling PTM on PCIe root ports as this allows the
|
||||
* port to enter a lower-power PM state and the SoC to reach a
|
||||
* lower-power idle state as a whole.
|
||||
*/
|
||||
if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
|
||||
pci_disable_ptm(dev);
|
||||
|
||||
__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
|
||||
|
||||
error = pci_set_power_state(dev, target_state);
|
||||
|
||||
if (error) {
|
||||
if (error)
|
||||
pci_enable_wake(dev, target_state, false);
|
||||
pci_restore_ptm_state(dev);
|
||||
}
|
||||
|
||||
return error;
|
||||
}
|
||||
|
@ -505,13 +505,17 @@ static inline int pci_iov_bus_range(struct pci_bus *bus)
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
|
||||
#ifdef CONFIG_PCIE_PTM
|
||||
void pci_ptm_init(struct pci_dev *dev);
|
||||
void pci_save_ptm_state(struct pci_dev *dev);
|
||||
void pci_restore_ptm_state(struct pci_dev *dev);
|
||||
void pci_disable_ptm(struct pci_dev *dev);
|
||||
void pci_suspend_ptm(struct pci_dev *dev);
|
||||
void pci_resume_ptm(struct pci_dev *dev);
|
||||
#else
|
||||
static inline void pci_ptm_init(struct pci_dev *dev) { }
|
||||
static inline void pci_save_ptm_state(struct pci_dev *dev) { }
|
||||
static inline void pci_restore_ptm_state(struct pci_dev *dev) { }
|
||||
static inline void pci_disable_ptm(struct pci_dev *dev) { }
|
||||
static inline void pci_suspend_ptm(struct pci_dev *dev) { }
|
||||
static inline void pci_resume_ptm(struct pci_dev *dev) { }
|
||||
#endif
|
||||
|
||||
unsigned long pci_cardbus_resource_alignment(struct resource *);
|
||||
@ -579,12 +583,6 @@ static inline void pcie_set_ecrc_checking(struct pci_dev *dev) { }
|
||||
static inline void pcie_ecrc_get_policy(char *str) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCIE_PTM
|
||||
void pci_ptm_init(struct pci_dev *dev);
|
||||
#else
|
||||
static inline void pci_ptm_init(struct pci_dev *dev) { }
|
||||
#endif
|
||||
|
||||
struct pci_dev_reset_methods {
|
||||
u16 vendor;
|
||||
u16 device;
|
||||
|
@ -9,10 +9,176 @@
|
||||
#include <linux/pci.h>
|
||||
#include "../pci.h"
|
||||
|
||||
static void pci_ptm_info(struct pci_dev *dev)
|
||||
/*
|
||||
* If the next upstream device supports PTM, return it; otherwise return
|
||||
* NULL. PTM Messages are local, so both link partners must support it.
|
||||
*/
|
||||
static struct pci_dev *pci_upstream_ptm(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_dev *ups = pci_upstream_bridge(dev);
|
||||
|
||||
/*
|
||||
* Switch Downstream Ports are not permitted to have a PTM
|
||||
* capability; their PTM behavior is controlled by the Upstream
|
||||
* Port (PCIe r5.0, sec 7.9.16), so if the upstream bridge is a
|
||||
* Switch Downstream Port, look up one more level.
|
||||
*/
|
||||
if (ups && pci_pcie_type(ups) == PCI_EXP_TYPE_DOWNSTREAM)
|
||||
ups = pci_upstream_bridge(ups);
|
||||
|
||||
if (ups && ups->ptm_cap)
|
||||
return ups;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the PTM Capability (if present) and extract the information we need
|
||||
* to use it.
|
||||
*/
|
||||
void pci_ptm_init(struct pci_dev *dev)
|
||||
{
|
||||
u16 ptm;
|
||||
u32 cap;
|
||||
struct pci_dev *ups;
|
||||
|
||||
if (!pci_is_pcie(dev))
|
||||
return;
|
||||
|
||||
ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
|
||||
if (!ptm)
|
||||
return;
|
||||
|
||||
dev->ptm_cap = ptm;
|
||||
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u32));
|
||||
|
||||
pci_read_config_dword(dev, ptm + PCI_PTM_CAP, &cap);
|
||||
dev->ptm_granularity = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
|
||||
|
||||
/*
|
||||
* Per the spec recommendation (PCIe r6.0, sec 7.9.15.3), select the
|
||||
* furthest upstream Time Source as the PTM Root. For Endpoints,
|
||||
* "the Effective Granularity is the maximum Local Clock Granularity
|
||||
* reported by the PTM Root and all intervening PTM Time Sources."
|
||||
*/
|
||||
ups = pci_upstream_ptm(dev);
|
||||
if (ups) {
|
||||
if (ups->ptm_granularity == 0)
|
||||
dev->ptm_granularity = 0;
|
||||
else if (ups->ptm_granularity > dev->ptm_granularity)
|
||||
dev->ptm_granularity = ups->ptm_granularity;
|
||||
} else if (cap & PCI_PTM_CAP_ROOT) {
|
||||
dev->ptm_root = 1;
|
||||
} else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
|
||||
|
||||
/*
|
||||
* Per sec 7.9.15.3, this should be the Local Clock
|
||||
* Granularity of the associated Time Source. But it
|
||||
* doesn't say how to find that Time Source.
|
||||
*/
|
||||
dev->ptm_granularity = 0;
|
||||
}
|
||||
|
||||
if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT ||
|
||||
pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM)
|
||||
pci_enable_ptm(dev, NULL);
|
||||
}
|
||||
|
||||
void pci_save_ptm_state(struct pci_dev *dev)
|
||||
{
|
||||
u16 ptm = dev->ptm_cap;
|
||||
struct pci_cap_saved_state *save_state;
|
||||
u32 *cap;
|
||||
|
||||
if (!ptm)
|
||||
return;
|
||||
|
||||
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
|
||||
if (!save_state)
|
||||
return;
|
||||
|
||||
cap = (u32 *)&save_state->cap.data[0];
|
||||
pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, cap);
|
||||
}
|
||||
|
||||
void pci_restore_ptm_state(struct pci_dev *dev)
|
||||
{
|
||||
u16 ptm = dev->ptm_cap;
|
||||
struct pci_cap_saved_state *save_state;
|
||||
u32 *cap;
|
||||
|
||||
if (!ptm)
|
||||
return;
|
||||
|
||||
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
|
||||
if (!save_state)
|
||||
return;
|
||||
|
||||
cap = (u32 *)&save_state->cap.data[0];
|
||||
pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, *cap);
|
||||
}
|
||||
|
||||
/* Enable PTM in the Control register if possible */
|
||||
static int __pci_enable_ptm(struct pci_dev *dev)
|
||||
{
|
||||
u16 ptm = dev->ptm_cap;
|
||||
struct pci_dev *ups;
|
||||
u32 ctrl;
|
||||
|
||||
if (!ptm)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* A device uses local PTM Messages to request time information
|
||||
* from a PTM Root that's farther upstream. Every device along the
|
||||
* path must support PTM and have it enabled so it can handle the
|
||||
* messages. Therefore, if this device is not a PTM Root, the
|
||||
* upstream link partner must have PTM enabled before we can enable
|
||||
* PTM.
|
||||
*/
|
||||
if (!dev->ptm_root) {
|
||||
ups = pci_upstream_ptm(dev);
|
||||
if (!ups || !ups->ptm_enabled)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
|
||||
|
||||
ctrl |= PCI_PTM_CTRL_ENABLE;
|
||||
ctrl &= ~PCI_PTM_GRANULARITY_MASK;
|
||||
ctrl |= dev->ptm_granularity << 8;
|
||||
if (dev->ptm_root)
|
||||
ctrl |= PCI_PTM_CTRL_ROOT;
|
||||
|
||||
pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_enable_ptm() - Enable Precision Time Measurement
|
||||
* @dev: PCI device
|
||||
* @granularity: pointer to return granularity
|
||||
*
|
||||
* Enable Precision Time Measurement for @dev. If successful and
|
||||
* @granularity is non-NULL, return the Effective Granularity.
|
||||
*
|
||||
* Return: zero if successful, or -EINVAL if @dev lacks a PTM Capability or
|
||||
* is not a PTM Root and lacks an upstream path of PTM-enabled devices.
|
||||
*/
|
||||
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
|
||||
{
|
||||
int rc;
|
||||
char clock_desc[8];
|
||||
|
||||
rc = __pci_enable_ptm(dev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
dev->ptm_enabled = 1;
|
||||
|
||||
if (granularity)
|
||||
*granularity = dev->ptm_granularity;
|
||||
|
||||
switch (dev->ptm_granularity) {
|
||||
case 0:
|
||||
snprintf(clock_desc, sizeof(clock_desc), "unknown");
|
||||
@ -27,182 +193,56 @@ static void pci_ptm_info(struct pci_dev *dev)
|
||||
}
|
||||
pci_info(dev, "PTM enabled%s, %s granularity\n",
|
||||
dev->ptm_root ? " (root)" : "", clock_desc);
|
||||
}
|
||||
|
||||
void pci_disable_ptm(struct pci_dev *dev)
|
||||
{
|
||||
int ptm;
|
||||
u16 ctrl;
|
||||
|
||||
if (!pci_is_pcie(dev))
|
||||
return;
|
||||
|
||||
ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
|
||||
if (!ptm)
|
||||
return;
|
||||
|
||||
pci_read_config_word(dev, ptm + PCI_PTM_CTRL, &ctrl);
|
||||
ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
|
||||
pci_write_config_word(dev, ptm + PCI_PTM_CTRL, ctrl);
|
||||
}
|
||||
|
||||
void pci_save_ptm_state(struct pci_dev *dev)
|
||||
{
|
||||
int ptm;
|
||||
struct pci_cap_saved_state *save_state;
|
||||
u16 *cap;
|
||||
|
||||
if (!pci_is_pcie(dev))
|
||||
return;
|
||||
|
||||
ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
|
||||
if (!ptm)
|
||||
return;
|
||||
|
||||
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
|
||||
if (!save_state)
|
||||
return;
|
||||
|
||||
cap = (u16 *)&save_state->cap.data[0];
|
||||
pci_read_config_word(dev, ptm + PCI_PTM_CTRL, cap);
|
||||
}
|
||||
|
||||
void pci_restore_ptm_state(struct pci_dev *dev)
|
||||
{
|
||||
struct pci_cap_saved_state *save_state;
|
||||
int ptm;
|
||||
u16 *cap;
|
||||
|
||||
if (!pci_is_pcie(dev))
|
||||
return;
|
||||
|
||||
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_PTM);
|
||||
ptm = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
|
||||
if (!save_state || !ptm)
|
||||
return;
|
||||
|
||||
cap = (u16 *)&save_state->cap.data[0];
|
||||
pci_write_config_word(dev, ptm + PCI_PTM_CTRL, *cap);
|
||||
}
|
||||
|
||||
void pci_ptm_init(struct pci_dev *dev)
|
||||
{
|
||||
int pos;
|
||||
u32 cap, ctrl;
|
||||
u8 local_clock;
|
||||
struct pci_dev *ups;
|
||||
|
||||
if (!pci_is_pcie(dev))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Enable PTM only on interior devices (root ports, switch ports,
|
||||
* etc.) on the assumption that it causes no link traffic until an
|
||||
* endpoint enables it.
|
||||
*/
|
||||
if ((pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT ||
|
||||
pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Switch Downstream Ports are not permitted to have a PTM
|
||||
* capability; their PTM behavior is controlled by the Upstream
|
||||
* Port (PCIe r5.0, sec 7.9.16).
|
||||
*/
|
||||
ups = pci_upstream_bridge(dev);
|
||||
if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM &&
|
||||
ups && ups->ptm_enabled) {
|
||||
dev->ptm_granularity = ups->ptm_granularity;
|
||||
dev->ptm_enabled = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
|
||||
if (!pos)
|
||||
return;
|
||||
|
||||
pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_PTM, sizeof(u16));
|
||||
|
||||
pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
|
||||
local_clock = (cap & PCI_PTM_GRANULARITY_MASK) >> 8;
|
||||
|
||||
/*
|
||||
* There's no point in enabling PTM unless it's enabled in the
|
||||
* upstream device or this device can be a PTM Root itself. Per
|
||||
* the spec recommendation (PCIe r3.1, sec 7.32.3), select the
|
||||
* furthest upstream Time Source as the PTM Root.
|
||||
*/
|
||||
if (ups && ups->ptm_enabled) {
|
||||
ctrl = PCI_PTM_CTRL_ENABLE;
|
||||
if (ups->ptm_granularity == 0)
|
||||
dev->ptm_granularity = 0;
|
||||
else if (ups->ptm_granularity > local_clock)
|
||||
dev->ptm_granularity = ups->ptm_granularity;
|
||||
} else {
|
||||
if (cap & PCI_PTM_CAP_ROOT) {
|
||||
ctrl = PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT;
|
||||
dev->ptm_root = 1;
|
||||
dev->ptm_granularity = local_clock;
|
||||
} else
|
||||
return;
|
||||
}
|
||||
|
||||
ctrl |= dev->ptm_granularity << 8;
|
||||
pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
|
||||
dev->ptm_enabled = 1;
|
||||
|
||||
pci_ptm_info(dev);
|
||||
}
|
||||
|
||||
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
|
||||
{
|
||||
int pos;
|
||||
u32 cap, ctrl;
|
||||
struct pci_dev *ups;
|
||||
|
||||
if (!pci_is_pcie(dev))
|
||||
return -EINVAL;
|
||||
|
||||
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PTM);
|
||||
if (!pos)
|
||||
return -EINVAL;
|
||||
|
||||
pci_read_config_dword(dev, pos + PCI_PTM_CAP, &cap);
|
||||
if (!(cap & PCI_PTM_CAP_REQ))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* For a PCIe Endpoint, PTM is only useful if the endpoint can
|
||||
* issue PTM requests to upstream devices that have PTM enabled.
|
||||
*
|
||||
* For Root Complex Integrated Endpoints, there is no upstream
|
||||
* device, so there must be some implementation-specific way to
|
||||
* associate the endpoint with a time source.
|
||||
*/
|
||||
if (pci_pcie_type(dev) == PCI_EXP_TYPE_ENDPOINT) {
|
||||
ups = pci_upstream_bridge(dev);
|
||||
if (!ups || !ups->ptm_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
dev->ptm_granularity = ups->ptm_granularity;
|
||||
} else if (pci_pcie_type(dev) == PCI_EXP_TYPE_RC_END) {
|
||||
dev->ptm_granularity = 0;
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
||||
ctrl = PCI_PTM_CTRL_ENABLE;
|
||||
ctrl |= dev->ptm_granularity << 8;
|
||||
pci_write_config_dword(dev, pos + PCI_PTM_CTRL, ctrl);
|
||||
dev->ptm_enabled = 1;
|
||||
|
||||
pci_ptm_info(dev);
|
||||
|
||||
if (granularity)
|
||||
*granularity = dev->ptm_granularity;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_enable_ptm);
|
||||
|
||||
static void __pci_disable_ptm(struct pci_dev *dev)
|
||||
{
|
||||
u16 ptm = dev->ptm_cap;
|
||||
u32 ctrl;
|
||||
|
||||
if (!ptm)
|
||||
return;
|
||||
|
||||
pci_read_config_dword(dev, ptm + PCI_PTM_CTRL, &ctrl);
|
||||
ctrl &= ~(PCI_PTM_CTRL_ENABLE | PCI_PTM_CTRL_ROOT);
|
||||
pci_write_config_dword(dev, ptm + PCI_PTM_CTRL, ctrl);
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_disable_ptm() - Disable Precision Time Measurement
|
||||
* @dev: PCI device
|
||||
*
|
||||
* Disable Precision Time Measurement for @dev.
|
||||
*/
|
||||
void pci_disable_ptm(struct pci_dev *dev)
|
||||
{
|
||||
if (dev->ptm_enabled) {
|
||||
__pci_disable_ptm(dev);
|
||||
dev->ptm_enabled = 0;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(pci_disable_ptm);
|
||||
|
||||
/*
|
||||
* Disable PTM, but preserve dev->ptm_enabled so we silently re-enable it on
|
||||
* resume if necessary.
|
||||
*/
|
||||
void pci_suspend_ptm(struct pci_dev *dev)
|
||||
{
|
||||
if (dev->ptm_enabled)
|
||||
__pci_disable_ptm(dev);
|
||||
}
|
||||
|
||||
/* If PTM was enabled before suspend, re-enable it when resuming */
|
||||
void pci_resume_ptm(struct pci_dev *dev)
|
||||
{
|
||||
if (dev->ptm_enabled)
|
||||
__pci_enable_ptm(dev);
|
||||
}
|
||||
|
||||
bool pcie_ptm_enabled(struct pci_dev *dev)
|
||||
{
|
||||
if (!dev)
|
||||
|
@ -475,6 +475,7 @@ struct pci_dev {
|
||||
unsigned int broken_cmd_compl:1; /* No compl for some cmds */
|
||||
#endif
|
||||
#ifdef CONFIG_PCIE_PTM
|
||||
u16 ptm_cap; /* PTM Capability */
|
||||
unsigned int ptm_root:1;
|
||||
unsigned int ptm_enabled:1;
|
||||
u8 ptm_granularity;
|
||||
@ -1677,10 +1678,12 @@ bool pci_ats_disabled(void);
|
||||
|
||||
#ifdef CONFIG_PCIE_PTM
|
||||
int pci_enable_ptm(struct pci_dev *dev, u8 *granularity);
|
||||
void pci_disable_ptm(struct pci_dev *dev);
|
||||
bool pcie_ptm_enabled(struct pci_dev *dev);
|
||||
#else
|
||||
static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity)
|
||||
{ return -EINVAL; }
|
||||
static inline void pci_disable_ptm(struct pci_dev *dev) { }
|
||||
static inline bool pcie_ptm_enabled(struct pci_dev *dev)
|
||||
{ return false; }
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user