mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 04:34:11 +08:00
drm/ttm: use rcu in core ttm
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com>
This commit is contained in:
parent
5fbad9928f
commit
472db7ab30
@ -467,66 +467,6 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
|
||||
((HZ / 100) < 1) ? 1 : HZ / 100);
|
||||
}
|
||||
|
||||
static int ttm_bo_unreserve_and_wait(struct ttm_buffer_object *bo,
|
||||
bool interruptible)
|
||||
{
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
struct reservation_object_list *fobj;
|
||||
struct fence *excl = NULL;
|
||||
struct fence **shared = NULL;
|
||||
u32 shared_count = 0, i;
|
||||
int ret = 0;
|
||||
|
||||
fobj = reservation_object_get_list(bo->resv);
|
||||
if (fobj && fobj->shared_count) {
|
||||
shared = kmalloc(sizeof(*shared) * fobj->shared_count,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!shared) {
|
||||
ret = -ENOMEM;
|
||||
__ttm_bo_unreserve(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < fobj->shared_count; ++i) {
|
||||
if (!fence_is_signaled(fobj->shared[i])) {
|
||||
fence_get(fobj->shared[i]);
|
||||
shared[shared_count++] = fobj->shared[i];
|
||||
}
|
||||
}
|
||||
if (!shared_count) {
|
||||
kfree(shared);
|
||||
shared = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
excl = reservation_object_get_excl(bo->resv);
|
||||
if (excl && !fence_is_signaled(excl))
|
||||
fence_get(excl);
|
||||
else
|
||||
excl = NULL;
|
||||
|
||||
__ttm_bo_unreserve(bo);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
if (excl) {
|
||||
ret = fence_wait(excl, interruptible);
|
||||
fence_put(excl);
|
||||
}
|
||||
|
||||
if (shared_count > 0) {
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
if (!ret)
|
||||
ret = fence_wait(shared[i], interruptible);
|
||||
fence_put(shared[i]);
|
||||
}
|
||||
kfree(shared);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* function ttm_bo_cleanup_refs_and_unlock
|
||||
* If bo idle, remove from delayed- and lru lists, and unref.
|
||||
@ -550,9 +490,19 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
|
||||
ret = ttm_bo_wait(bo, false, false, true);
|
||||
|
||||
if (ret && !no_wait_gpu) {
|
||||
ret = ttm_bo_unreserve_and_wait(bo, interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
long lret;
|
||||
ww_mutex_unlock(&bo->resv->lock);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
lret = reservation_object_wait_timeout_rcu(bo->resv,
|
||||
true,
|
||||
interruptible,
|
||||
30 * HZ);
|
||||
|
||||
if (lret < 0)
|
||||
return lret;
|
||||
else if (lret == 0)
|
||||
return -EBUSY;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ret = __ttm_bo_reserve(bo, false, true, false, NULL);
|
||||
|
Loading…
Reference in New Issue
Block a user