mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 06:04:14 +08:00
drm/msm: add madvise ioctl
Doesn't do anything too interesting until we wire up shrinker. Pretty much lifted from i915. Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
b5b4c264df
commit
4cd33c48ea
@ -690,6 +690,44 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
|
||||
return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
|
||||
}
|
||||
|
||||
static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_msm_gem_madvise *args = data;
|
||||
struct drm_gem_object *obj;
|
||||
int ret;
|
||||
|
||||
switch (args->madv) {
|
||||
case MSM_MADV_DONTNEED:
|
||||
case MSM_MADV_WILLNEED:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = mutex_lock_interruptible(&dev->struct_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
obj = drm_gem_object_lookup(file, args->handle);
|
||||
if (!obj) {
|
||||
ret = -ENOENT;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = msm_gem_madvise(obj, args->madv);
|
||||
if (ret >= 0) {
|
||||
args->retained = ret;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(obj);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct drm_ioctl_desc msm_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
@ -698,6 +736,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
|
||||
};
|
||||
|
||||
static const struct vm_operations_struct vm_ops = {
|
||||
|
@ -195,6 +195,7 @@ int msm_gem_prime_pin(struct drm_gem_object *obj);
|
||||
void msm_gem_prime_unpin(struct drm_gem_object *obj);
|
||||
void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
|
||||
void *msm_gem_vaddr(struct drm_gem_object *obj);
|
||||
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
|
||||
int msm_gem_sync_object(struct drm_gem_object *obj,
|
||||
struct msm_fence_context *fctx, bool exclusive);
|
||||
void msm_gem_move_to_active(struct drm_gem_object *obj,
|
||||
|
@ -413,6 +413,21 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Update madvise status, returns true if not purged, else
|
||||
* false or -errno.
|
||||
*/
|
||||
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
|
||||
if (msm_obj->madv != __MSM_MADV_PURGED)
|
||||
msm_obj->madv = madv;
|
||||
|
||||
return (msm_obj->madv != __MSM_MADV_PURGED);
|
||||
}
|
||||
|
||||
/* must be called before _move_to_active().. */
|
||||
int msm_gem_sync_object(struct drm_gem_object *obj,
|
||||
struct msm_fence_context *fctx, bool exclusive)
|
||||
@ -464,6 +479,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
|
||||
struct msm_gpu *gpu, bool exclusive, struct fence *fence)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
|
||||
msm_obj->gpu = gpu;
|
||||
if (exclusive)
|
||||
reservation_object_add_excl_fence(msm_obj->resv, fence);
|
||||
@ -532,13 +548,27 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
||||
struct reservation_object_list *fobj;
|
||||
struct fence *fence;
|
||||
uint64_t off = drm_vma_node_start(&obj->vma_node);
|
||||
const char *madv;
|
||||
|
||||
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
|
||||
|
||||
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n",
|
||||
switch (msm_obj->madv) {
|
||||
case __MSM_MADV_PURGED:
|
||||
madv = " purged";
|
||||
break;
|
||||
case MSM_MADV_DONTNEED:
|
||||
madv = " purgeable";
|
||||
break;
|
||||
case MSM_MADV_WILLNEED:
|
||||
default:
|
||||
madv = "";
|
||||
break;
|
||||
}
|
||||
|
||||
seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
|
||||
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
|
||||
obj->name, obj->refcount.refcount.counter,
|
||||
off, msm_obj->vaddr, obj->size);
|
||||
off, msm_obj->vaddr, obj->size, madv);
|
||||
|
||||
rcu_read_lock();
|
||||
fobj = rcu_dereference(robj->fence);
|
||||
@ -688,6 +718,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
|
||||
msm_obj->vram_node = (void *)&msm_obj[1];
|
||||
|
||||
msm_obj->flags = flags;
|
||||
msm_obj->madv = MSM_MADV_WILLNEED;
|
||||
|
||||
if (resv) {
|
||||
msm_obj->resv = resv;
|
||||
|
@ -29,6 +29,11 @@ struct msm_gem_object {
|
||||
|
||||
uint32_t flags;
|
||||
|
||||
/**
|
||||
* Advice: are the backing pages purgeable?
|
||||
*/
|
||||
uint8_t madv;
|
||||
|
||||
/* And object is either:
|
||||
* inactive - on priv->inactive_list
|
||||
* active - on one one of the gpu's active_list.. well, at
|
||||
|
@ -201,6 +201,27 @@ struct drm_msm_wait_fence {
|
||||
struct drm_msm_timespec timeout; /* in */
|
||||
};
|
||||
|
||||
/* madvise provides a way to tell the kernel in case a buffers contents
|
||||
* can be discarded under memory pressure, which is useful for userspace
|
||||
* bo cache where we want to optimistically hold on to buffer allocate
|
||||
* and potential mmap, but allow the pages to be discarded under memory
|
||||
* pressure.
|
||||
*
|
||||
* Typical usage would involve madvise(DONTNEED) when buffer enters BO
|
||||
* cache, and madvise(WILLNEED) if trying to recycle buffer from BO cache.
|
||||
* In the WILLNEED case, 'retained' indicates to userspace whether the
|
||||
* backing pages still exist.
|
||||
*/
|
||||
#define MSM_MADV_WILLNEED 0 /* backing pages are needed, status returned in 'retained' */
|
||||
#define MSM_MADV_DONTNEED 1 /* backing pages not needed */
|
||||
#define __MSM_MADV_PURGED 2 /* internal state */
|
||||
|
||||
struct drm_msm_gem_madvise {
|
||||
__u32 handle; /* in, GEM handle */
|
||||
__u32 madv; /* in, MSM_MADV_x */
|
||||
__u32 retained; /* out, whether backing store still exists */
|
||||
};
|
||||
|
||||
#define DRM_MSM_GET_PARAM 0x00
|
||||
/* placeholder:
|
||||
#define DRM_MSM_SET_PARAM 0x01
|
||||
@ -211,7 +232,8 @@ struct drm_msm_wait_fence {
|
||||
#define DRM_MSM_GEM_CPU_FINI 0x05
|
||||
#define DRM_MSM_GEM_SUBMIT 0x06
|
||||
#define DRM_MSM_WAIT_FENCE 0x07
|
||||
#define DRM_MSM_NUM_IOCTLS 0x08
|
||||
#define DRM_MSM_GEM_MADVISE 0x08
|
||||
#define DRM_MSM_NUM_IOCTLS 0x09
|
||||
|
||||
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
|
||||
#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
|
||||
@ -220,6 +242,7 @@ struct drm_msm_wait_fence {
|
||||
#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
|
||||
#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
|
||||
#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
|
||||
#define DRM_IOCTL_MSM_GEM_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_MADVISE, struct drm_msm_gem_madvise)
|
||||
|
||||
#if defined(__cplusplus)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user