intel: Sync xe_drm.h

Sync xe_drm.h with commit 3b8183b7efad ("drm/xe/uapi: Be more specific
about the vm_bind prefetch region").

Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Signed-off-by: José Roberto de Souza <jose.souza@intel.com>
Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26238>
This commit is contained in:
José Roberto de Souza 2023-11-16 11:58:55 -08:00 committed by Marge Bot
parent e4ffb2473b
commit 205c5874d4
9 changed files with 146 additions and 139 deletions

View File

@ -19,12 +19,12 @@ extern "C" {
/**
* DOC: uevent generated by xe on it's pci node.
*
* XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
* DRM_XE_RESET_FAILED_UEVENT - Event is generated when attempt to reset gt
* fails. The value supplied with the event is always "NEEDS_RESET".
* Additional information supplied is tile id and gt id of the gt unit for
* which reset has failed.
*/
#define XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
#define DRM_XE_RESET_FAILED_UEVENT "DEVICE_STATUS"
/**
* struct xe_user_extension - Base class for defining a chain of extensions
@ -141,21 +141,22 @@ struct drm_xe_engine_class_instance {
__u16 engine_instance;
__u16 gt_id;
__u16 rsvd;
/** @pad: MBZ */
__u16 pad;
};
/**
* enum drm_xe_memory_class - Supported memory classes.
*/
enum drm_xe_memory_class {
/** @XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
XE_MEM_REGION_CLASS_SYSMEM = 0,
/** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
/**
* @XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
* @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
* represents the memory that is local to the device, which we
* call VRAM. Not valid on integrated platforms.
*/
XE_MEM_REGION_CLASS_VRAM
DRM_XE_MEM_REGION_CLASS_VRAM
};
/**
@ -215,7 +216,7 @@ struct drm_xe_query_mem_region {
* always equal the @total_size, since all of it will be CPU
* accessible.
*
* Note this is only tracked for XE_MEM_REGION_CLASS_VRAM
* Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
* regions (for other types the value here will always equal
* zero).
*/
@ -227,7 +228,7 @@ struct drm_xe_query_mem_region {
* Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
* accounting. Without this the value here will always equal
* zero. Note this is only currently tracked for
* XE_MEM_REGION_CLASS_VRAM regions (for other types the value
* DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
* here will always be zero).
*/
__u64 cpu_visible_used;
@ -290,13 +291,13 @@ struct drm_xe_query_engine_cycles {
};
/**
* struct drm_xe_query_mem_usage - describe memory regions and usage
* struct drm_xe_query_mem_regions - describe memory regions
*
* If a query is made with a struct drm_xe_device_query where .query
* is equal to DRM_XE_DEVICE_QUERY_MEM_USAGE, then the reply uses
* struct drm_xe_query_mem_usage in .data.
* is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
* struct drm_xe_query_mem_regions in .data.
*/
struct drm_xe_query_mem_usage {
struct drm_xe_query_mem_regions {
/** @num_regions: number of memory regions returned in @regions */
__u32 num_regions;
/** @pad: MBZ */
@ -320,12 +321,12 @@ struct drm_xe_query_config {
/** @pad: MBZ */
__u32 pad;
#define XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
#define XE_QUERY_CONFIG_FLAGS 1
#define XE_QUERY_CONFIG_FLAGS_HAS_VRAM (0x1 << 0)
#define XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define XE_QUERY_CONFIG_VA_BITS 3
#define XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
#define DRM_XE_QUERY_CONFIG_FLAGS 1
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
/** @info: array of elements containing the config info */
__u64 info[];
};
@ -339,8 +340,8 @@ struct drm_xe_query_config {
* implementing graphics and/or media operations.
*/
struct drm_xe_query_gt {
#define XE_QUERY_GT_TYPE_MAIN 0
#define XE_QUERY_GT_TYPE_MEDIA 1
#define DRM_XE_QUERY_GT_TYPE_MAIN 0
#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
/** @type: GT type: Main or Media */
__u16 type;
/** @gt_id: Unique ID of this GT within the PCI Device */
@ -348,17 +349,18 @@ struct drm_xe_query_gt {
/** @clock_freq: A clock frequency for timestamp */
__u32 clock_freq;
/**
* @native_mem_regions: Bit mask of instances from
* drm_xe_query_mem_usage that lives on the same GPU/Tile and have
* direct access.
* @near_mem_regions: Bit mask of instances from
* drm_xe_query_mem_regions that is near the current engines of this GT.
*/
__u64 native_mem_regions;
__u64 near_mem_regions;
/**
* @slow_mem_regions: Bit mask of instances from
* drm_xe_query_mem_usage that this GT can indirectly access, although
* they live on a different GPU/Tile.
* @far_mem_regions: Bit mask of instances from
* drm_xe_query_mem_regions that is far from the engines of this GT.
* In general, it has extra indirections when compared to the
* @near_mem_regions. For a discrete device this could mean system
* memory and memory living in a different Tile.
*/
__u64 slow_mem_regions;
__u64 far_mem_regions;
/** @reserved: Reserved */
__u64 reserved[8];
};
@ -400,7 +402,7 @@ struct drm_xe_query_topology_mask {
* DSS_GEOMETRY ff ff ff ff 00 00 00 00
* means 32 DSS are available for geometry.
*/
#define XE_TOPO_DSS_GEOMETRY (1 << 0)
#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
/*
* To query the mask of Dual Sub Slices (DSS) available for compute
* operations. For example a query response containing the following
@ -408,7 +410,7 @@ struct drm_xe_query_topology_mask {
* DSS_COMPUTE ff ff ff ff 00 00 00 00
* means 32 DSS are available for compute.
*/
#define XE_TOPO_DSS_COMPUTE (1 << 1)
#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
/*
* To query the mask of Execution Units (EU) available per Dual Sub
* Slices (DSS). For example a query response containing the following
@ -416,7 +418,7 @@ struct drm_xe_query_topology_mask {
* EU_PER_DSS ff ff 00 00 00 00 00 00
* means each DSS has 16 EU.
*/
#define XE_TOPO_EU_PER_DSS (1 << 2)
#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
/** @type: type of mask */
__u16 type;
@ -467,7 +469,7 @@ struct drm_xe_device_query {
__u64 extensions;
#define DRM_XE_DEVICE_QUERY_ENGINES 0
#define DRM_XE_DEVICE_QUERY_MEM_USAGE 1
#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
#define DRM_XE_DEVICE_QUERY_CONFIG 2
#define DRM_XE_DEVICE_QUERY_GT_LIST 3
#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
@ -497,8 +499,8 @@ struct drm_xe_gem_create {
*/
__u64 size;
#define XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24)
#define XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25)
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (0x1 << 24)
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (0x1 << 25)
/*
* When using VRAM as a possible placement, ensure that the corresponding VRAM
* allocation will always use the CPU accessible part of VRAM. This is important
@ -514,7 +516,7 @@ struct drm_xe_gem_create {
* display surfaces, therefore the kernel requires setting this flag for such
* objects, otherwise an error is thrown on small-bar systems.
*/
#define XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26)
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (0x1 << 26)
/**
* @flags: Flags, currently a mask of memory instances of where BO can
* be placed
@ -581,14 +583,14 @@ struct drm_xe_ext_set_property {
};
struct drm_xe_vm_create {
#define XE_VM_EXTENSION_SET_PROPERTY 0
#define DRM_XE_VM_EXTENSION_SET_PROPERTY 0
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
#define DRM_XE_VM_CREATE_SCRATCH_PAGE (0x1 << 0)
#define DRM_XE_VM_CREATE_COMPUTE_MODE (0x1 << 1)
#define DRM_XE_VM_CREATE_ASYNC_DEFAULT (0x1 << 2)
#define DRM_XE_VM_CREATE_FAULT_MODE (0x1 << 3)
#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
#define DRM_XE_VM_CREATE_FLAG_COMPUTE_MODE (1 << 1)
#define DRM_XE_VM_CREATE_FLAG_ASYNC_DEFAULT (1 << 2)
#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 3)
/** @flags: Flags */
__u32 flags;
@ -644,34 +646,38 @@ struct drm_xe_vm_bind_op {
*/
__u64 tile_mask;
#define XE_VM_BIND_OP_MAP 0x0
#define XE_VM_BIND_OP_UNMAP 0x1
#define XE_VM_BIND_OP_MAP_USERPTR 0x2
#define XE_VM_BIND_OP_UNMAP_ALL 0x3
#define XE_VM_BIND_OP_PREFETCH 0x4
#define DRM_XE_VM_BIND_OP_MAP 0x0
#define DRM_XE_VM_BIND_OP_UNMAP 0x1
#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
/** @op: Bind operation to perform */
__u32 op;
#define XE_VM_BIND_FLAG_READONLY (0x1 << 0)
#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1)
#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
#define DRM_XE_VM_BIND_FLAG_ASYNC (1 << 1)
/*
* Valid on a faulting VM only, do the MAP operation immediately rather
* than deferring the MAP to the page fault handler.
*/
#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2)
#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 2)
/*
* When the NULL flag is set, the page tables are setup with a special
* bit which indicates writes are dropped and all reads return zero. In
* the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP
* the future, the NULL flags will only be valid for DRM_XE_VM_BIND_OP_MAP
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is
* intended to implement VK sparse bindings.
*/
#define XE_VM_BIND_FLAG_NULL (0x1 << 3)
#define DRM_XE_VM_BIND_FLAG_NULL (1 << 3)
/** @flags: Bind flags */
__u32 flags;
/** @mem_region: Memory region to prefetch VMA to, instance not a mask */
__u32 region;
/**
* @prefetch_mem_region_instance: Memory region to prefetch VMA to.
* It is a region instance, not a mask.
* To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
*/
__u32 prefetch_mem_region_instance;
/** @reserved: Reserved */
__u64 reserved[2];
@ -721,19 +727,19 @@ struct drm_xe_vm_bind {
__u64 reserved[2];
};
/* For use with XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */
/* For use with DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY */
/* Monitor 128KB contiguous region with 4K sub-granularity */
#define XE_ACC_GRANULARITY_128K 0
#define DRM_XE_ACC_GRANULARITY_128K 0
/* Monitor 2MB contiguous region with 64KB sub-granularity */
#define XE_ACC_GRANULARITY_2M 1
#define DRM_XE_ACC_GRANULARITY_2M 1
/* Monitor 16MB contiguous region with 512KB sub-granularity */
#define XE_ACC_GRANULARITY_16M 2
#define DRM_XE_ACC_GRANULARITY_16M 2
/* Monitor 64MB contiguous region with 2M sub-granularity */
#define XE_ACC_GRANULARITY_64M 3
#define DRM_XE_ACC_GRANULARITY_64M 3
/**
* struct drm_xe_exec_queue_set_property - exec queue set property
@ -747,14 +753,14 @@ struct drm_xe_exec_queue_set_property {
/** @exec_queue_id: Exec queue ID */
__u32 exec_queue_id;
#define XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
#define XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
#define XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
/** @property: property to set */
__u32 property;
@ -766,7 +772,7 @@ struct drm_xe_exec_queue_set_property {
};
struct drm_xe_exec_queue_create {
#define XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@ -805,7 +811,7 @@ struct drm_xe_exec_queue_get_property {
/** @exec_queue_id: Exec queue ID */
__u32 exec_queue_id;
#define XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
/** @property: property to get */
__u32 property;
@ -831,11 +837,11 @@ struct drm_xe_sync {
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
#define DRM_XE_SYNC_SYNCOBJ 0x0
#define DRM_XE_SYNC_TIMELINE_SYNCOBJ 0x1
#define DRM_XE_SYNC_DMA_BUF 0x2
#define DRM_XE_SYNC_USER_FENCE 0x3
#define DRM_XE_SYNC_SIGNAL 0x10
#define DRM_XE_SYNC_FLAG_SYNCOBJ 0x0
#define DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ 0x1
#define DRM_XE_SYNC_FLAG_DMA_BUF 0x2
#define DRM_XE_SYNC_FLAG_USER_FENCE 0x3
#define DRM_XE_SYNC_FLAG_SIGNAL 0x10
__u32 flags;
/** @pad: MBZ */
@ -912,17 +918,17 @@ struct drm_xe_wait_user_fence {
*/
__u64 addr;
#define DRM_XE_UFENCE_WAIT_EQ 0
#define DRM_XE_UFENCE_WAIT_NEQ 1
#define DRM_XE_UFENCE_WAIT_GT 2
#define DRM_XE_UFENCE_WAIT_GTE 3
#define DRM_XE_UFENCE_WAIT_LT 4
#define DRM_XE_UFENCE_WAIT_LTE 5
#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
/** @op: wait operation (type of comparison) */
__u16 op;
#define DRM_XE_UFENCE_WAIT_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_ABSTIME (1 << 1)
#define DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP (1 << 0) /* e.g. Wait on VM bind */
#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 1)
/** @flags: wait flags */
__u16 flags;
@ -932,18 +938,19 @@ struct drm_xe_wait_user_fence {
/** @value: compare value */
__u64 value;
#define DRM_XE_UFENCE_WAIT_U8 0xffu
#define DRM_XE_UFENCE_WAIT_U16 0xffffu
#define DRM_XE_UFENCE_WAIT_U32 0xffffffffu
#define DRM_XE_UFENCE_WAIT_U64 0xffffffffffffffffu
#define DRM_XE_UFENCE_WAIT_MASK_U8 0xffu
#define DRM_XE_UFENCE_WAIT_MASK_U16 0xffffu
#define DRM_XE_UFENCE_WAIT_MASK_U32 0xffffffffu
#define DRM_XE_UFENCE_WAIT_MASK_U64 0xffffffffffffffffu
/** @mask: comparison mask */
__u64 mask;
/**
* @timeout: how long to wait before bailing, value in nanoseconds.
* Without DRM_XE_UFENCE_WAIT_ABSTIME flag set (relative timeout)
* Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
* it contains timeout expressed in nanoseconds to wait (fence will
* expire at now() + timeout).
* When DRM_XE_UFENCE_WAIT_ABSTIME flat is set (absolute timeout) wait
* When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
* will end at timeout (uses system MONOTONIC_CLOCK).
* Passing negative timeout leads to neverending wait.
*
@ -956,13 +963,13 @@ struct drm_xe_wait_user_fence {
/**
* @num_engines: number of engine instances to wait on, must be zero
* when DRM_XE_UFENCE_WAIT_SOFT_OP set
* when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 num_engines;
/**
* @instances: user pointer to array of drm_xe_engine_class_instance to
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_SOFT_OP set
* wait on, must be NULL when DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP set
*/
__u64 instances;
@ -973,11 +980,11 @@ struct drm_xe_wait_user_fence {
/**
* DOC: XE PMU event config IDs
*
* Check 'man perf_event_open' to use the ID's XE_PMU_XXXX listed in xe_drm.h
* Check 'man perf_event_open' to use the ID's DRM_XE_PMU_XXXX listed in xe_drm.h
* in 'struct perf_event_attr' as part of perf_event_open syscall to read a
* particular event.
*
* For example to open the XE_PMU_RENDER_GROUP_BUSY(0):
* For example to open the DRMXE_PMU_RENDER_GROUP_BUSY(0):
*
* .. code-block:: C
*
@ -991,7 +998,7 @@ struct drm_xe_wait_user_fence {
* attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED;
* attr.use_clockid = 1;
* attr.clockid = CLOCK_MONOTONIC;
* attr.config = XE_PMU_RENDER_GROUP_BUSY(0);
* attr.config = DRM_XE_PMU_RENDER_GROUP_BUSY(0);
*
* fd = syscall(__NR_perf_event_open, &attr, -1, cpu, -1, 0);
*/
@ -999,15 +1006,15 @@ struct drm_xe_wait_user_fence {
/*
* Top bits of every counter are GT id.
*/
#define __XE_PMU_GT_SHIFT (56)
#define __DRM_XE_PMU_GT_SHIFT (56)
#define ___XE_PMU_OTHER(gt, x) \
(((__u64)(x)) | ((__u64)(gt) << __XE_PMU_GT_SHIFT))
#define ___DRM_XE_PMU_OTHER(gt, x) \
(((__u64)(x)) | ((__u64)(gt) << __DRM_XE_PMU_GT_SHIFT))
#define XE_PMU_RENDER_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 0)
#define XE_PMU_COPY_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 1)
#define XE_PMU_MEDIA_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 2)
#define XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___XE_PMU_OTHER(gt, 3)
#define DRM_XE_PMU_RENDER_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 0)
#define DRM_XE_PMU_COPY_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 1)
#define DRM_XE_PMU_MEDIA_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 2)
#define DRM_XE_PMU_ANY_ENGINE_GROUP_BUSY(gt) ___DRM_XE_PMU_OTHER(gt, 3)
#if defined(__cplusplus)
}

View File

@ -80,7 +80,7 @@ iris_xe_init_batch(struct iris_bufmgr *bufmgr,
.num_placements = count,
};
struct drm_xe_exec_queue_set_property exec_queue_property = {
.property = XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
.value = iris_context_priority_to_drm_sched_priority(priority),
};
int ret = intel_ioctl(iris_bufmgr_get_fd(bufmgr),

View File

@ -31,7 +31,7 @@ bool
iris_xe_init_global_vm(struct iris_bufmgr *bufmgr, uint32_t *vm_id)
{
struct drm_xe_vm_create create = {
.flags = DRM_XE_VM_CREATE_SCRATCH_PAGE,
.flags = DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE,
};
if (intel_ioctl(iris_bufmgr_get_fd(bufmgr), DRM_IOCTL_XE_VM_CREATE, &create))
return false;

View File

@ -52,10 +52,10 @@ xe_gem_create(struct iris_bufmgr *bufmgr,
* do not know what the process this is shared with will do with it
*/
if (alloc_flags & BO_ALLOC_SCANOUT)
flags |= XE_GEM_CREATE_FLAG_SCANOUT;
flags |= DRM_XE_GEM_CREATE_FLAG_SCANOUT;
if (!intel_vram_all_mappable(iris_bufmgr_get_device_info(bufmgr)) &&
heap_flags == IRIS_HEAP_DEVICE_LOCAL_PREFERRED)
flags |= XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
flags |= DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
struct drm_xe_gem_create gem_create = {
.vm_id = vm_id,
@ -89,7 +89,7 @@ xe_gem_mmap(struct iris_bufmgr *bufmgr, struct iris_bo *bo)
static inline int
xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op)
{
uint32_t handle = op == XE_VM_BIND_OP_UNMAP ? 0 : bo->gem_handle;
uint32_t handle = op == DRM_XE_VM_BIND_OP_UNMAP ? 0 : bo->gem_handle;
uint64_t range, obj_offset = 0;
int ret;
@ -102,8 +102,8 @@ xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op)
if (bo->real.userptr) {
handle = 0;
obj_offset = (uintptr_t)bo->real.map;
if (op == XE_VM_BIND_OP_MAP)
op = XE_VM_BIND_OP_MAP_USERPTR;
if (op == DRM_XE_VM_BIND_OP_MAP)
op = DRM_XE_VM_BIND_OP_MAP_USERPTR;
}
struct drm_xe_vm_bind args = {
@ -126,13 +126,13 @@ xe_gem_vm_bind_op(struct iris_bo *bo, uint32_t op)
static bool
xe_gem_vm_bind(struct iris_bo *bo)
{
return xe_gem_vm_bind_op(bo, XE_VM_BIND_OP_MAP) == 0;
return xe_gem_vm_bind_op(bo, DRM_XE_VM_BIND_OP_MAP) == 0;
}
static bool
xe_gem_vm_unbind(struct iris_bo *bo)
{
return xe_gem_vm_bind_op(bo, XE_VM_BIND_OP_UNMAP) == 0;
return xe_gem_vm_bind_op(bo, DRM_XE_VM_BIND_OP_UNMAP) == 0;
}
static bool
@ -160,7 +160,7 @@ xe_batch_check_for_reset(struct iris_batch *batch)
enum pipe_reset_status status = PIPE_NO_RESET;
struct drm_xe_exec_queue_get_property exec_queue_get_property = {
.exec_queue_id = batch->xe.exec_queue_id,
.property = XE_EXEC_QUEUE_GET_PROPERTY_BAN,
.property = DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN,
};
int ret = intel_ioctl(iris_bufmgr_get_fd(batch->screen->bufmgr),
DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY,
@ -330,10 +330,10 @@ xe_batch_submit(struct iris_batch *batch)
util_dynarray_foreach(&batch->exec_fences, struct iris_batch_fence,
fence) {
uint32_t flags = DRM_XE_SYNC_SYNCOBJ;
uint32_t flags = DRM_XE_SYNC_FLAG_SYNCOBJ;
if (fence->flags & IRIS_BATCH_FENCE_SIGNAL)
flags |= DRM_XE_SYNC_SIGNAL;
flags |= DRM_XE_SYNC_FLAG_SIGNAL;
syncs[i].handle = fence->handle;
syncs[i].flags = flags;

View File

@ -65,12 +65,12 @@ xe_query_config(int fd, struct intel_device_info *devinfo)
if (!config)
return false;
if (config->info[XE_QUERY_CONFIG_FLAGS] & XE_QUERY_CONFIG_FLAGS_HAS_VRAM)
if (config->info[DRM_XE_QUERY_CONFIG_FLAGS] & DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM)
devinfo->has_local_mem = true;
devinfo->revision = (config->info[XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xFFFF;
devinfo->gtt_size = 1ull << config->info[XE_QUERY_CONFIG_VA_BITS];
devinfo->mem_alignment = config->info[XE_QUERY_CONFIG_MIN_ALIGNMENT];
devinfo->revision = (config->info[DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID] >> 16) & 0xFFFF;
devinfo->gtt_size = 1ull << config->info[DRM_XE_QUERY_CONFIG_VA_BITS];
devinfo->mem_alignment = config->info[DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT];
free(config);
return true;
@ -80,8 +80,8 @@ bool
intel_device_info_xe_query_regions(int fd, struct intel_device_info *devinfo,
bool update)
{
struct drm_xe_query_mem_usage *regions;
regions = xe_query_alloc_fetch(fd, DRM_XE_DEVICE_QUERY_MEM_USAGE, NULL);
struct drm_xe_query_mem_regions *regions;
regions = xe_query_alloc_fetch(fd, DRM_XE_DEVICE_QUERY_MEM_REGIONS, NULL);
if (!regions)
return false;
@ -89,7 +89,7 @@ intel_device_info_xe_query_regions(int fd, struct intel_device_info *devinfo,
struct drm_xe_query_mem_region *region = &regions->regions[i];
switch (region->mem_class) {
case XE_MEM_REGION_CLASS_SYSMEM: {
case DRM_XE_MEM_REGION_CLASS_SYSMEM: {
if (!update) {
devinfo->mem.sram.mem.klass = region->mem_class;
devinfo->mem.sram.mem.instance = region->instance;
@ -102,7 +102,7 @@ intel_device_info_xe_query_regions(int fd, struct intel_device_info *devinfo,
devinfo->mem.sram.mappable.free = region->total_size - region->used;
break;
}
case XE_MEM_REGION_CLASS_VRAM: {
case DRM_XE_MEM_REGION_CLASS_VRAM: {
if (!update) {
devinfo->mem.vram.mem.klass = region->mem_class;
devinfo->mem.vram.mem.instance = region->instance;
@ -138,7 +138,7 @@ xe_query_gts(int fd, struct intel_device_info *devinfo)
return false;
for (uint32_t i = 0; i < gt_list->num_gt; i++) {
if (gt_list->gt_list[i].type == XE_QUERY_GT_TYPE_MAIN)
if (gt_list->gt_list[i].type == DRM_XE_QUERY_GT_TYPE_MAIN)
devinfo->timestamp_frequency = gt_list->gt_list[i].clock_freq;
}
@ -275,11 +275,11 @@ xe_query_topology(int fd, struct intel_device_info *devinfo)
while (topology < end) {
if (topology->gt_id == 0) {
switch (topology->type) {
case XE_TOPO_DSS_GEOMETRY:
case DRM_XE_TOPO_DSS_GEOMETRY:
geo_dss_mask = topology->mask;
geo_dss_num_bytes = topology->num_bytes;
break;
case XE_TOPO_EU_PER_DSS:
case DRM_XE_TOPO_EU_PER_DSS:
eu_per_dss_mask = (uint32_t *)topology->mask;
break;
}

View File

@ -45,7 +45,7 @@ xe_execute_simple_batch(struct anv_queue *queue,
return vk_errorf(device, VK_ERROR_UNKNOWN, "Unable to create sync obj");
struct drm_xe_sync sync = {
.flags = DRM_XE_SYNC_SYNCOBJ | DRM_XE_SYNC_SIGNAL,
.flags = DRM_XE_SYNC_FLAG_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
.handle = syncobj_handle,
};
struct drm_xe_exec exec = {
@ -91,14 +91,14 @@ xe_exec_fill_sync(struct drm_xe_sync *xe_sync, struct vk_sync *vk_sync,
xe_sync->handle = syncobj->syncobj;
if (value) {
xe_sync->flags |= DRM_XE_SYNC_TIMELINE_SYNCOBJ;
xe_sync->flags |= DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ;
xe_sync->timeline_value = value;
} else {
xe_sync->flags |= DRM_XE_SYNC_SYNCOBJ;
xe_sync->flags |= DRM_XE_SYNC_FLAG_SYNCOBJ;
}
if (signal)
xe_sync->flags |= DRM_XE_SYNC_SIGNAL;
xe_sync->flags |= DRM_XE_SYNC_FLAG_SIGNAL;
}
static VkResult
@ -193,7 +193,7 @@ xe_execute_trtt_batch(struct anv_sparse_submission *submit,
VkResult result;
struct drm_xe_sync extra_sync = {
.flags = DRM_XE_SYNC_TIMELINE_SYNCOBJ | DRM_XE_SYNC_SIGNAL,
.flags = DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ | DRM_XE_SYNC_FLAG_SIGNAL,
.handle = trtt->timeline_handle,
.timeline_value = trtt_bbo->timeline_val,
};

View File

@ -37,7 +37,7 @@ bool anv_xe_device_destroy_vm(struct anv_device *device)
VkResult anv_xe_device_setup_vm(struct anv_device *device)
{
struct drm_xe_vm_create create = {
.flags = DRM_XE_VM_CREATE_SCRATCH_PAGE,
.flags = DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE,
};
if (intel_ioctl(device->fd, DRM_IOCTL_XE_VM_CREATE, &create) != 0)
return vk_errorf(device, VK_ERROR_INITIALIZATION_FAILED,
@ -97,7 +97,7 @@ anv_xe_physical_device_get_parameters(struct anv_physical_device *device)
device->has_exec_timeline = true;
device->max_context_priority =
drm_sched_priority_to_vk_priority(config->info[XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY]);
drm_sched_priority_to_vk_priority(config->info[DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY]);
free(config);
return VK_SUCCESS;
@ -163,7 +163,7 @@ anv_xe_get_device_status(struct anv_device *device, uint32_t exec_queue_id)
VkResult result = VK_SUCCESS;
struct drm_xe_exec_queue_get_property exec_queue_get_property = {
.exec_queue_id = exec_queue_id,
.property = XE_EXEC_QUEUE_GET_PROPERTY_BAN,
.property = DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN,
};
int ret = intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY,
&exec_queue_get_property);

View File

@ -45,11 +45,11 @@ xe_gem_create(struct anv_device *device,
uint32_t flags = 0;
if (alloc_flags & ANV_BO_ALLOC_SCANOUT)
flags |= XE_GEM_CREATE_FLAG_SCANOUT;
flags |= DRM_XE_GEM_CREATE_FLAG_SCANOUT;
if ((alloc_flags & (ANV_BO_ALLOC_MAPPED | ANV_BO_ALLOC_LOCAL_MEM_CPU_VISIBLE)) &&
!(alloc_flags & ANV_BO_ALLOC_NO_LOCAL_MEM) &&
device->physical->vram_non_mappable.size > 0)
flags |= XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
flags |= DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM;
struct drm_xe_gem_create gem_create = {
/* From xe_drm.h: If a VM is specified, this BO must:
@ -132,20 +132,20 @@ xe_vm_bind_op(struct anv_device *device,
.range = bind->size,
.addr = intel_48b_address(bind->address),
.tile_mask = 0,
.op = XE_VM_BIND_OP_UNMAP,
.op = DRM_XE_VM_BIND_OP_UNMAP,
.flags = 0,
.region = 0,
.prefetch_mem_region_instance = 0,
};
if (bind->op == ANV_VM_BIND) {
if (!bo) {
xe_bind->op = XE_VM_BIND_OP_MAP;
xe_bind->flags |= XE_VM_BIND_FLAG_NULL;
xe_bind->op = DRM_XE_VM_BIND_OP_MAP;
xe_bind->flags |= DRM_XE_VM_BIND_FLAG_NULL;
assert(xe_bind->obj_offset == 0);
} else if (bo->from_host_ptr) {
xe_bind->op = XE_VM_BIND_OP_MAP_USERPTR;
xe_bind->op = DRM_XE_VM_BIND_OP_MAP_USERPTR;
} else {
xe_bind->op = XE_VM_BIND_OP_MAP;
xe_bind->op = DRM_XE_VM_BIND_OP_MAP;
xe_bind->obj = bo->gem_handle;
}
}

View File

@ -118,7 +118,7 @@ create_engine(struct anv_device *device,
struct drm_xe_exec_queue_set_property exec_queue_property = {
.exec_queue_id = create.exec_queue_id,
.property = XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
.property = DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY,
.value = anv_vk_priority_to_drm_sched_priority(priority),
};
ret = intel_ioctl(device->fd, DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY,