- Fix a panic regression on gen8_ggtt_insert_entries (Matthew Wilcox)

- Fix load issue due to reservation address in ggtt_reserve_guc_top (Javier Pello)
 - Fix a possible deadlock with guc busyness worker (Umesh)
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEbSBwaO7dZQkcLOKj+mJfZA7rE8oFAmUVjBMACgkQ+mJfZA7r
 E8plzQf/dSxOvhyDvh/WDqT+Vk3aIxoypo7bBHrLyOzbYhdALCSBR70FijRS8OuK
 th15AYqUpa+Dqhl8RCTSGX+4aAeudN6pHzwEYMZtF8hwb7DnomcB+ztB853DdUMu
 U0NLi5Rc3d138oepTlHuwtKUJzpEqbjZKXUOfLx+GvFKdiM2p8js3LTF1XhAX9Sf
 u05xzi2tKjLJbnoxGbUOe9Np3Bqg2ril/HDVm0c7Yc+t98Sly6ZJljV0IA0fexqF
 ZFX+Bb4f8YFfqSuih3z1K75GpzEQiT6g2z9qOGWB64qFtAyV/ehpSjKP6fPf10FY
 G5I/iWRRf3dW6P9/4x/3W5wF+eZWoA==
 =xIyA
 -----END PGP SIGNATURE-----

Merge tag 'drm-intel-fixes-2023-09-28' of git://anongit.freedesktop.org/drm/drm-intel into drm-fixes

- Fix a panic regression on gen8_ggtt_insert_entries (Matthew Wilcox)
- Fix load issue due to reservation address in ggtt_reserve_guc_top (Javier Pello)
- Fix a possible deadlock with guc busyness worker (Umesh)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ZRWMI1HmUYPGGylp@intel.com
This commit is contained in:
Dave Airlie 2023-09-29 10:28:07 +10:00
commit 06365a04fd
3 changed files with 59 additions and 13 deletions

View File

@ -100,6 +100,7 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
st->nents = 0;
for (i = 0; i < page_count; i++) {
struct folio *folio;
unsigned long nr_pages;
const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND,
0,
@ -150,6 +151,8 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
}
} while (1);
nr_pages = min_t(unsigned long,
folio_nr_pages(folio), page_count - i);
if (!i ||
sg->length >= max_segment ||
folio_pfn(folio) != next_pfn) {
@ -157,13 +160,13 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
sg = sg_next(sg);
st->nents++;
sg_set_folio(sg, folio, folio_size(folio), 0);
sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
} else {
/* XXX: could overflow? */
sg->length += folio_size(folio);
sg->length += nr_pages * PAGE_SIZE;
}
next_pfn = folio_pfn(folio) + folio_nr_pages(folio);
i += folio_nr_pages(folio) - 1;
next_pfn = folio_pfn(folio) + nr_pages;
i += nr_pages - 1;
/* Check that the i965g/gm workaround works. */
GEM_BUG_ON(gfp & __GFP_DMA32 && next_pfn >= 0x00100000UL);

View File

@ -511,20 +511,31 @@ void intel_ggtt_unbind_vma(struct i915_address_space *vm,
vm->clear_range(vm, vma_res->start, vma_res->vma_size);
}
/*
* Reserve the top of the GuC address space for firmware images. Addresses
* beyond GUC_GGTT_TOP in the GuC address space are inaccessible by GuC,
* which makes for a suitable range to hold GuC/HuC firmware images if the
* size of the GGTT is 4G. However, on a 32-bit platform the size of the GGTT
* is limited to 2G, which is less than GUC_GGTT_TOP, but we reserve a chunk
* of the same size anyway, which is far more than needed, to keep the logic
* in uc_fw_ggtt_offset() simple.
*/
#define GUC_TOP_RESERVE_SIZE (SZ_4G - GUC_GGTT_TOP)
static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
{
u64 size;
u64 offset;
int ret;
if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
return 0;
GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
size = ggtt->vm.total - GUC_GGTT_TOP;
GEM_BUG_ON(ggtt->vm.total <= GUC_TOP_RESERVE_SIZE);
offset = ggtt->vm.total - GUC_TOP_RESERVE_SIZE;
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw, size,
GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
PIN_NOEVICT);
ret = i915_gem_gtt_reserve(&ggtt->vm, NULL, &ggtt->uc_fw,
GUC_TOP_RESERVE_SIZE, offset,
I915_COLOR_UNEVICTABLE, PIN_NOEVICT);
if (ret)
drm_dbg(&ggtt->vm.i915->drm,
"Failed to reserve top of GGTT for GuC\n");

View File

@ -1432,6 +1432,36 @@ static void guc_timestamp_ping(struct work_struct *wrk)
unsigned long index;
int srcu, ret;
/*
* Ideally the busyness worker should take a gt pm wakeref because the
* worker only needs to be active while gt is awake. However, the
* gt_park path cancels the worker synchronously and this complicates
* the flow if the worker is also running at the same time. The cancel
* waits for the worker and when the worker releases the wakeref, that
* would call gt_park and would lead to a deadlock.
*
* The resolution is to take the global pm wakeref if runtime pm is
* already active. If not, we don't need to update the busyness stats as
* the stats would already be updated when the gt was parked.
*
* Note:
* - We do not requeue the worker if we cannot take a reference to runtime
* pm since intel_guc_busyness_unpark would requeue the worker in the
* resume path.
*
* - If the gt was parked longer than time taken for GT timestamp to roll
* over, we ignore those rollovers since we don't care about tracking
* the exact GT time. We only care about roll overs when the gt is
* active and running workloads.
*
* - There is a window of time between gt_park and runtime suspend,
* where the worker may run. This is acceptable since the worker will
* not find any new data to update busyness.
*/
wakeref = intel_runtime_pm_get_if_active(&gt->i915->runtime_pm);
if (!wakeref)
return;
/*
* Synchronize with gt reset to make sure the worker does not
* corrupt the engine/guc stats. NB: can't actually block waiting
@ -1440,10 +1470,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
*/
ret = intel_gt_reset_trylock(gt, &srcu);
if (ret)
return;
goto err_trylock;
with_intel_runtime_pm(&gt->i915->runtime_pm, wakeref)
__update_guc_busyness_stats(guc);
__update_guc_busyness_stats(guc);
/* adjust context stats for overflow */
xa_for_each(&guc->context_lookup, index, ce)
@ -1452,6 +1481,9 @@ static void guc_timestamp_ping(struct work_struct *wrk)
intel_gt_reset_unlock(gt, srcu);
guc_enable_busyness_worker(guc);
err_trylock:
intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
}
static int guc_action_enable_usage_stats(struct intel_guc *guc)