dma-buf: use new iterator in dma_resv_get_fences v3

This makes the function much simpler since the complex
retry logic is now handled elsewhere.

v2: use sizeof(void*) instead
v3: fix rebase bug

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20211005113742.1101-6-christian.koenig@amd.com
This commit is contained in:
Christian König 2021-06-15 15:25:22 +02:00
parent 96601e8a47
commit d3c80698c9

View File

@ -489,99 +489,61 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
* dma_resv_get_fences - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
* @pfence_excl: the returned exclusive fence (or NULL)
* @pshared_count: the number of shared fences returned
* @pshared: the array of shared fence ptrs returned (array is krealloc'd to
* @fence_excl: the returned exclusive fence (or NULL)
* @shared_count: the number of shared fences returned
* @shared: the array of shared fence ptrs returned (array is krealloc'd to
* the required size, and must be freed by caller)
*
* Retrieve all fences from the reservation object. If the pointer for the
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well. Returns either zero or -ENOMEM.
*/
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
unsigned int *pshared_count,
struct dma_fence ***pshared)
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
unsigned int *shared_count, struct dma_fence ***shared)
{
struct dma_fence **shared = NULL;
struct dma_fence *fence_excl;
unsigned int shared_count;
int ret = 1;
struct dma_resv_iter cursor;
struct dma_fence *fence;
do {
struct dma_resv_list *fobj;
unsigned int i, seq;
size_t sz = 0;
*shared_count = 0;
*shared = NULL;
shared_count = i = 0;
if (fence_excl)
*fence_excl = NULL;
rcu_read_lock();
seq = read_seqcount_begin(&obj->seq);
dma_resv_iter_begin(&cursor, obj, true);
dma_resv_for_each_fence_unlocked(&cursor, fence) {
fence_excl = dma_resv_excl_fence(obj);
if (fence_excl && !dma_fence_get_rcu(fence_excl))
goto unlock;
if (dma_resv_iter_is_restarted(&cursor)) {
unsigned int count;
fobj = dma_resv_shared_list(obj);
if (fobj)
sz += sizeof(*shared) * fobj->shared_max;
while (*shared_count)
dma_fence_put((*shared)[--(*shared_count)]);
if (!pfence_excl && fence_excl)
sz += sizeof(*shared);
if (fence_excl)
dma_fence_put(*fence_excl);
if (sz) {
struct dma_fence **nshared;
count = cursor.fences ? cursor.fences->shared_count : 0;
count += fence_excl ? 0 : 1;
nshared = krealloc(shared, sz,
GFP_NOWAIT | __GFP_NOWARN);
if (!nshared) {
rcu_read_unlock();
dma_fence_put(fence_excl);
fence_excl = NULL;
nshared = krealloc(shared, sz, GFP_KERNEL);
if (nshared) {
shared = nshared;
continue;
}
ret = -ENOMEM;
break;
}
shared = nshared;
shared_count = fobj ? fobj->shared_count : 0;
for (i = 0; i < shared_count; ++i) {
shared[i] = rcu_dereference(fobj->shared[i]);
if (!dma_fence_get_rcu(shared[i]))
break;
/* Eventually re-allocate the array */
*shared = krealloc_array(*shared, count,
sizeof(void *),
GFP_KERNEL);
if (count && !*shared) {
dma_resv_iter_end(&cursor);
return -ENOMEM;
}
}
if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
while (i--)
dma_fence_put(shared[i]);
dma_fence_put(fence_excl);
goto unlock;
dma_fence_get(fence);
if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
*fence_excl = fence;
else
(*shared)[(*shared_count)++] = fence;
}
dma_resv_iter_end(&cursor);
ret = 0;
unlock:
rcu_read_unlock();
} while (ret);
if (pfence_excl)
*pfence_excl = fence_excl;
else if (fence_excl)
shared[shared_count++] = fence_excl;
if (!shared_count) {
kfree(shared);
shared = NULL;
}
*pshared_count = shared_count;
*pshared = shared;
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(dma_resv_get_fences);