drm/msm: small fence cleanup

Give ourselves a way to wait for certain fence #..  makes it easier to
wait on a set of bo's, which we'll need for atomic.

Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Rob Clark 2014-11-07 18:10:04 -05:00
parent a8cecf3324
commit 69193e5060
4 changed files with 39 additions and 21 deletions

View File

@ -619,6 +619,26 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
return ret; return ret;
} }
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
} else if (fence > priv->completed_fence) {
cb->fence = fence;
list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else {
queue_work(priv->wq, &cb->work);
}
mutex_unlock(&dev->struct_mutex);
return ret;
}
/* called from workqueue */ /* called from workqueue */
void msm_update_fence(struct drm_device *dev, uint32_t fence) void msm_update_fence(struct drm_device *dev, uint32_t fence)
{ {

View File

@ -154,6 +154,8 @@ int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
struct timespec *timeout); struct timespec *timeout);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence); void msm_update_fence(struct drm_device *dev, uint32_t fence);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data, int msm_ioctl_gem_submit(struct drm_device *dev, void *data,

View File

@ -397,23 +397,10 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb) struct msm_fence_cb *cb)
{ {
struct drm_device *dev = obj->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret = 0; uint32_t fence = msm_gem_fence(msm_obj,
MSM_PREP_READ | MSM_PREP_WRITE);
mutex_lock(&dev->struct_mutex); return msm_queue_fence_cb(obj->dev, cb, fence);
if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
} else if (is_active(msm_obj)) {
cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else {
queue_work(priv->wq, &cb->work);
}
mutex_unlock(&dev->struct_mutex);
return ret;
} }
void msm_gem_move_to_active(struct drm_gem_object *obj, void msm_gem_move_to_active(struct drm_gem_object *obj,
@ -452,12 +439,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
int ret = 0; int ret = 0;
if (is_active(msm_obj)) { if (is_active(msm_obj)) {
uint32_t fence = 0; uint32_t fence = msm_gem_fence(msm_obj, op);
if (op & MSM_PREP_READ)
fence = msm_obj->write_fence;
if (op & MSM_PREP_WRITE)
fence = max(fence, msm_obj->read_fence);
if (op & MSM_PREP_NOSYNC) if (op & MSM_PREP_NOSYNC)
timeout = NULL; timeout = NULL;

View File

@ -70,6 +70,19 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
return msm_obj->gpu != NULL; return msm_obj->gpu != NULL;
} }
static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
uint32_t op)
{
uint32_t fence = 0;
if (op & MSM_PREP_READ)
fence = msm_obj->write_fence;
if (op & MSM_PREP_WRITE)
fence = max(fence, msm_obj->read_fence);
return fence;
}
#define MAX_CMDS 4 #define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, /* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,