drm/xe: Kill xe_device_mem_access_{get*,put}

Let's simply convert all the current callers towards direct
xe_pm_runtime access and remove this extra layer of indirection.

No functional change is expected with this patch since
xe_mem_access_get was already using the xe_pm_runtime_get_noresume
at this point.

v2: Convert all the current callers instead of a big refactor
at once.

v3: - Rebased
    - Squashed the GSC/HDCP
    - Added a new case: sriov_pf_policy
    - Improved commit message to highlight that
      there's no functional change in this patch.

Reviewed-by: Matthew Auld <matthew.auld@intel.com> #v2
Cc: Suraj Kandpal <suraj.kandpal@intel.com>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Suraj Kandpal <suraj.kandpal@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240418143049.43231-1-rodrigo.vivi@intel.com
This commit is contained in:
Rodrigo Vivi 2024-04-18 10:30:49 -04:00
parent 62422b7be4
commit 783d6cdc82
No known key found for this signature in database
GPG Key ID: FA625F640EEB13CA
11 changed files with 25 additions and 63 deletions

View File

@ -10,6 +10,7 @@
#include "intel_fb_pin.h"
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_pm.h"
#include <drm/ttm/ttm_bo.h>
@ -193,7 +194,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
/* TODO: Consider sharing framebuffer mapping?
* embed i915_vma inside intel_framebuffer
*/
xe_device_mem_access_get(tile_to_xe(ggtt->tile));
xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
ret = mutex_lock_interruptible(&ggtt->lock);
if (ret)
goto out;
@ -244,7 +245,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb,
out_unlock:
mutex_unlock(&ggtt->lock);
out:
xe_device_mem_access_put(tile_to_xe(ggtt->tile));
xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return ret;
}

View File

@ -215,7 +215,7 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
addr_out_off = PAGE_SIZE;
host_session_id = xe_gsc_create_host_session_id();
xe_device_mem_access_get(xe);
xe_pm_runtime_get_noresume(xe);
addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap,
addr_in_wr_off, HECI_MEADDRESS_HDCP,
host_session_id, msg_in_len);
@ -247,6 +247,6 @@ ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in,
msg_out_len);
out:
xe_device_mem_access_put(xe);
xe_pm_runtime_put(xe);
return ret;
}

View File

@ -716,7 +716,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_assert(xe, migrate);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
xe_device_mem_access_get(xe);
xe_pm_runtime_get_noresume(xe);
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
/*
@ -740,7 +740,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
ret = -EINVAL;
xe_device_mem_access_put(xe);
xe_pm_runtime_put(xe);
goto out;
}
@ -758,7 +758,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
new_mem, handle_system_ccs);
if (IS_ERR(fence)) {
ret = PTR_ERR(fence);
xe_device_mem_access_put(xe);
xe_pm_runtime_put(xe);
goto out;
}
if (!move_lacks_source) {
@ -783,7 +783,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
dma_fence_put(fence);
}
xe_device_mem_access_put(xe);
xe_pm_runtime_put(xe);
out:
return ret;

View File

@ -729,42 +729,6 @@ void xe_device_assert_mem_access(struct xe_device *xe)
xe_assert(xe, !xe_pm_runtime_suspended(xe));
}
void xe_device_mem_access_get(struct xe_device *xe)
{
int ref;
/*
* This looks racy, but should be fine since the pm_callback_task only
* transitions from NULL -> current (and back to NULL again), during the
* runtime_resume() or runtime_suspend() callbacks, for which there can
* only be a single one running for our device. We only need to prevent
* recursively calling the runtime_get or runtime_put from those
* callbacks, as well as preventing triggering any access_ongoing
* asserts.
*/
if (xe_pm_read_callback_task(xe) == current)
return;
xe_pm_runtime_get_noresume(xe);
ref = atomic_inc_return(&xe->mem_access.ref);
xe_assert(xe, ref != S32_MAX);
}
void xe_device_mem_access_put(struct xe_device *xe)
{
int ref;
if (xe_pm_read_callback_task(xe) == current)
return;
ref = atomic_dec_return(&xe->mem_access.ref);
xe_pm_runtime_put(xe);
xe_assert(xe, ref >= 0);
}
void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
{
struct xe_gt *gt;

View File

@ -133,9 +133,6 @@ static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt)
return &gt->mmio.fw;
}
void xe_device_mem_access_get(struct xe_device *xe);
void xe_device_mem_access_put(struct xe_device *xe);
void xe_device_assert_mem_access(struct xe_device *xe);
static inline bool xe_device_in_fault_mode(struct xe_device *xe)

View File

@ -384,9 +384,6 @@ struct xe_device {
* triggering additional actions when they occur.
*/
struct {
/** @mem_access.ref: ref count of memory accesses */
atomic_t ref;
/**
* @mem_access.vram_userfault: Encapsulate vram_userfault
* related stuff

View File

@ -589,7 +589,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
/* The migration vm doesn't hold rpm ref */
xe_device_mem_access_get(xe);
xe_pm_runtime_get_noresume(xe);
flags = EXEC_QUEUE_FLAG_VM | (id ? EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD : 0);
@ -598,7 +598,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
args->width, hwe, flags,
args->extensions);
xe_device_mem_access_put(xe); /* now held by engine */
xe_pm_runtime_put(xe); /* now held by engine */
xe_vm_put(migrate_vm);
if (IS_ERR(new)) {

View File

@ -21,6 +21,7 @@
#include "xe_gt_printk.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_sriov.h"
#include "xe_wopcm.h"
@ -403,7 +404,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (err)
return err;
xe_device_mem_access_get(tile_to_xe(ggtt->tile));
xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
mutex_lock(&ggtt->lock);
err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
alignment, 0, start, end, 0);
@ -413,7 +414,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (!err && bo->flags & XE_BO_FLAG_GGTT_INVALIDATE)
xe_ggtt_invalidate(ggtt);
xe_device_mem_access_put(tile_to_xe(ggtt->tile));
xe_pm_runtime_put(tile_to_xe(ggtt->tile));
return err;
}
@ -432,7 +433,7 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
bool invalidate)
{
xe_device_mem_access_get(tile_to_xe(ggtt->tile));
xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile));
mutex_lock(&ggtt->lock);
xe_ggtt_clear(ggtt, node->start, node->size);
@ -443,7 +444,7 @@ void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
if (invalidate)
xe_ggtt_invalidate(ggtt);
xe_device_mem_access_put(tile_to_xe(ggtt->tile));
xe_pm_runtime_put(tile_to_xe(ggtt->tile));
}
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)

View File

@ -12,6 +12,7 @@
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_guc_klv_helpers.h"
#include "xe_pm.h"
/*
* Return: number of KLVs that were successfully parsed and saved,
@ -368,7 +369,7 @@ int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset)
{
int err = 0;
xe_device_mem_access_get(gt_to_xe(gt));
xe_pm_runtime_get_noresume(gt_to_xe(gt));
mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
if (reset)
@ -378,7 +379,7 @@ int xe_gt_sriov_pf_policy_reprovision(struct xe_gt *gt, bool reset)
err |= pf_reprovision_sample_period(gt);
mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
xe_device_mem_access_put(gt_to_xe(gt));
xe_pm_runtime_put(gt_to_xe(gt));
return err ? -ENXIO : 0;
}

View File

@ -16,6 +16,7 @@
#include "xe_hw_fence.h"
#include "xe_lrc.h"
#include "xe_macros.h"
#include "xe_pm.h"
#include "xe_sync_types.h"
#include "xe_trace.h"
#include "xe_vm.h"
@ -159,7 +160,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
/* All other jobs require a VM to be open which has a ref */
if (unlikely(q->flags & EXEC_QUEUE_FLAG_KERNEL))
xe_device_mem_access_get(job_to_xe(job));
xe_pm_runtime_get_noresume(job_to_xe(job));
xe_device_assert_mem_access(job_to_xe(job));
trace_xe_sched_job_create(job);
@ -192,7 +193,7 @@ void xe_sched_job_destroy(struct kref *ref)
container_of(ref, struct xe_sched_job, refcount);
if (unlikely(job->q->flags & EXEC_QUEUE_FLAG_KERNEL))
xe_device_mem_access_put(job_to_xe(job));
xe_pm_runtime_put(job_to_xe(job));
xe_exec_queue_put(job->q);
dma_fence_put(job->fence);
drm_sched_job_cleanup(&job->drm);

View File

@ -1266,7 +1266,7 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags)
vm->pt_ops = &xelp_pt_ops;
if (!(flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_get(xe);
xe_pm_runtime_get_noresume(xe);
vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm);
if (!vm_resv_obj) {
@ -1376,7 +1376,7 @@ err_no_resv:
xe_range_fence_tree_fini(&vm->rftree[id]);
kfree(vm);
if (!(flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_put(xe);
xe_pm_runtime_put(xe);
return ERR_PTR(err);
}
@ -1507,7 +1507,7 @@ static void xe_vm_free(struct drm_gpuvm *gpuvm)
mutex_destroy(&vm->snap_mutex);
if (!(vm->flags & XE_VM_FLAG_MIGRATION))
xe_device_mem_access_put(xe);
xe_pm_runtime_put(xe);
for_each_tile(tile, xe, id)
XE_WARN_ON(vm->pt_root[id]);