2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 22:53:55 +08:00

drm/i915: Remove RPM sequence checking

We only used the RPM sequence checking inside the lowlevel GTT
accessors, when we had to rely on callers taking the wakeref on our
behalf. Now that we take the RPM wakeref inside the GTT management
routines themselves, we can forgo the sanitycheck of the callers.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Imre Deak <imre.deak@intel.com>
Reviewed-by: Imre Deak <imre.deak@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161024124218.18252-4-chris@chris-wilson.co.uk
This commit is contained in:
Chris Wilson 2016-10-24 13:42:17 +01:00
parent 3594a3e21f
commit 2eedfc7d58
5 changed files with 2 additions and 75 deletions

View File

@ -1687,7 +1687,6 @@ struct skl_wm_level {
*/
struct i915_runtime_pm {
atomic_t wakeref_count;
atomic_t atomic_seq;
bool suspended;
bool irqs_enabled;
};

View File

@ -2395,16 +2395,11 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gen8_set_pte(pte, gen8_pte_encode(addr, level));
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
@ -2418,11 +2413,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
gen8_pte_t __iomem *gtt_entries;
gen8_pte_t gtt_entry;
dma_addr_t addr;
int rpm_atomic_seq;
int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
@ -2446,8 +2438,6 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
struct insert_entries {
@ -2486,16 +2476,11 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
gen6_pte_t __iomem *pte =
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
(offset >> PAGE_SHIFT);
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
iowrite32(vm->pte_encode(addr, level, flags), pte);
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
/*
@ -2515,11 +2500,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
gen6_pte_t __iomem *gtt_entries;
gen6_pte_t gtt_entry;
dma_addr_t addr;
int rpm_atomic_seq;
int i = 0;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
for_each_sgt_dma(addr, sgt_iter, st) {
@ -2542,8 +2524,6 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void nop_clear_range(struct i915_address_space *vm,
@ -2554,7 +2534,6 @@ static void nop_clear_range(struct i915_address_space *vm,
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start, uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@ -2562,9 +2541,6 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@ -2576,15 +2552,12 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
readl(gtt_base);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
@ -2592,9 +2565,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
@ -2607,8 +2577,6 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_page(struct i915_address_space *vm,
@ -2617,16 +2585,10 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
enum i915_cache_level cache_level,
u32 unused)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
@ -2634,33 +2596,18 @@ static void i915_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start,
enum i915_cache_level cache_level, u32 unused)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
uint64_t start,
uint64_t length)
{
struct drm_i915_private *dev_priv = to_i915(vm->dev);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
int rpm_atomic_seq;
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
intel_gtt_clear_range(first_entry, num_entries);
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
static int ggtt_bind_vma(struct i915_vma *vma,

View File

@ -1664,23 +1664,6 @@ assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
}
static inline int
assert_rpm_atomic_begin(struct drm_i915_private *dev_priv)
{
int seq = atomic_read(&dev_priv->pm.atomic_seq);
assert_rpm_wakelock_held(dev_priv);
return seq;
}
static inline void
assert_rpm_atomic_end(struct drm_i915_private *dev_priv, int begin_seq)
{
WARN_ONCE(atomic_read(&dev_priv->pm.atomic_seq) != begin_seq,
"HW access outside of RPM atomic section\n");
}
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @dev_priv: i915 device instance

View File

@ -8043,5 +8043,4 @@ void intel_pm_setup(struct drm_device *dev)
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
atomic_set(&dev_priv->pm.atomic_seq, 0);
}

View File

@ -2736,8 +2736,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
struct device *kdev = &pdev->dev;
assert_rpm_wakelock_held(dev_priv);
if (atomic_dec_and_test(&dev_priv->pm.wakeref_count))
atomic_inc(&dev_priv->pm.atomic_seq);
atomic_dec(&dev_priv->pm.wakeref_count);
pm_runtime_mark_last_busy(kdev);
pm_runtime_put_autosuspend(kdev);