mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-23 02:54:32 +08:00
5e51cc0005
Cache the count of shared fences in the iterator to avoid dereferencing the dma_resv_object outside the RCU protection. Otherwise iterator and its users can observe an incosistent state which makes it impossible to use safely. Such as: <6> [187.517041] [IGT] gem_sync: executing <7> [187.536343] i915 0000:00:02.0: [drm:i915_gem_context_create_ioctl [i915]] HW context 1 created <7> [187.536793] i915 0000:00:02.0: [drm:i915_gem_context_create_ioctl [i915]] HW context 1 created <6> [187.551235] [IGT] gem_sync: starting subtest basic-many-each <1> [188.935462] BUG: kernel NULL pointer dereference, address: 0000000000000010 <1> [188.935485] #PF: supervisor write access in kernel mode <1> [188.935495] #PF: error_code(0x0002) - not-present page <6> [188.935504] PGD 0 P4D 0 <4> [188.935512] Oops: 0002 [#1] PREEMPT SMP NOPTI <4> [188.935521] CPU: 2 PID: 1467 Comm: gem_sync Not tainted 5.15.0-rc4-CI-Patchwork_21264+ #1 <4> [188.935535] Hardware name: /NUC6CAYB, BIOS AYAPLCEL.86A.0049.2018.0508.1356 05/08/2018 <4> [188.935546] RIP: 0010:dma_resv_get_fences+0x116/0x2d0 <4> [188.935560] Code: 10 85 c0 7f c9 be 03 00 00 00 e8 15 8b df ff eb bd e8 8e c6 ff ff eb b6 41 8b 04 24 49 8b 55 00 48 89 e7 8d 48 01 41 89 0c 24 <4c> 89 34 c2 e8 41 f2 ff ff 49 89 c6 48 85 c0 75 8c 48 8b 44 24 10 <4> [188.935583] RSP: 0018:ffffc900011dbcc8 EFLAGS: 00010202 <4> [188.935593] RAX: 0000000000000000 RBX: 00000000ffffffff RCX: 0000000000000001 <4> [188.935603] RDX: 0000000000000010 RSI: ffffffff822e343c RDI: ffffc900011dbcc8 <4> [188.935613] RBP: ffffc900011dbd48 R08: ffff88812d255bb8 R09: 00000000fffffffe <4> [188.935623] R10: 0000000000000001 R11: 0000000000000000 R12: ffffc900011dbd44 <4> [188.935633] R13: ffffc900011dbd50 R14: ffff888113d29cc0 R15: 0000000000000000 <4> [188.935643] FS: 00007f68d17e9700(0000) GS:ffff888277900000(0000) knlGS:0000000000000000 <4> [188.935655] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 <4> [188.935665] CR2: 0000000000000010 CR3: 000000012d0a4000 CR4: 00000000003506e0 <4> [188.935676] Call Trace: <4> [188.935685] i915_gem_object_wait+0x1ff/0x410 [i915] <4> [188.935988] i915_gem_wait_ioctl+0xf2/0x2a0 [i915] <4> [188.936272] ? i915_gem_object_wait+0x410/0x410 [i915] <4> [188.936533] drm_ioctl_kernel+0xae/0x140 <4> [188.936546] drm_ioctl+0x201/0x3d0 <4> [188.936555] ? i915_gem_object_wait+0x410/0x410 [i915] <4> [188.936820] ? __fget_files+0xc2/0x1c0 <4> [188.936830] ? __fget_files+0xda/0x1c0 <4> [188.936839] __x64_sys_ioctl+0x6d/0xa0 <4> [188.936848] do_syscall_64+0x3a/0xb0 <4> [188.936859] entry_SYSCALL_64_after_hwframe+0x44/0xae If the shared object has changed during the RCU unlocked period callers will correctly handle the restart on the next iteration. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Fixes:96601e8a47
("dma-buf: use new iterator in dma_resv_copy_fences") Fixes:d3c80698c9
("dma-buf: use new iterator in dma_resv_get_fences v3") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/4274 Cc: Christian König <christian.koenig@amd.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: Sumit Semwal <sumit.semwal@linaro.org> Cc: linux-media@vger.kernel.org Cc: dri-devel@lists.freedesktop.org Cc: linaro-mm-sig@lists.linaro.org Link: https://patchwork.freedesktop.org/patch/msgid/20211008095007.972693-1-tvrtko.ursulin@linux.intel.com Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Christian König <christian.koenig@amd.com>
711 lines
19 KiB
C
711 lines
19 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
|
|
*
|
|
* Based on bo.c which bears the following copyright notice,
|
|
* but is dual licensed:
|
|
*
|
|
* Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
|
|
* All Rights Reserved.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the
|
|
* "Software"), to deal in the Software without restriction, including
|
|
* without limitation the rights to use, copy, modify, merge, publish,
|
|
* distribute, sub license, and/or sell copies of the Software, and to
|
|
* permit persons to whom the Software is furnished to do so, subject to
|
|
* the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice (including the
|
|
* next paragraph) shall be included in all copies or substantial portions
|
|
* of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
*
|
|
**************************************************************************/
|
|
/*
|
|
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
|
*/
|
|
|
|
#include <linux/dma-resv.h>
|
|
#include <linux/export.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/mmu_notifier.h>
|
|
|
|
/**
|
|
* DOC: Reservation Object Overview
|
|
*
|
|
* The reservation object provides a mechanism to manage shared and
|
|
* exclusive fences associated with a buffer. A reservation object
|
|
* can have attached one exclusive fence (normally associated with
|
|
* write operations) or N shared fences (read operations). The RCU
|
|
* mechanism is used to protect read access to fences from locked
|
|
* write-side updates.
|
|
*
|
|
* See struct dma_resv for more details.
|
|
*/
|
|
|
|
DEFINE_WD_CLASS(reservation_ww_class);
|
|
EXPORT_SYMBOL(reservation_ww_class);
|
|
|
|
/**
|
|
* dma_resv_list_alloc - allocate fence list
|
|
* @shared_max: number of fences we need space for
|
|
*
|
|
* Allocate a new dma_resv_list and make sure to correctly initialize
|
|
* shared_max.
|
|
*/
|
|
static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
|
|
{
|
|
struct dma_resv_list *list;
|
|
|
|
list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
|
|
if (!list)
|
|
return NULL;
|
|
|
|
list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
|
|
sizeof(*list->shared);
|
|
|
|
return list;
|
|
}
|
|
|
|
/**
|
|
* dma_resv_list_free - free fence list
|
|
* @list: list to free
|
|
*
|
|
* Free a dma_resv_list and make sure to drop all references.
|
|
*/
|
|
static void dma_resv_list_free(struct dma_resv_list *list)
|
|
{
|
|
unsigned int i;
|
|
|
|
if (!list)
|
|
return;
|
|
|
|
for (i = 0; i < list->shared_count; ++i)
|
|
dma_fence_put(rcu_dereference_protected(list->shared[i], true));
|
|
|
|
kfree_rcu(list, rcu);
|
|
}
|
|
|
|
/**
|
|
* dma_resv_init - initialize a reservation object
|
|
* @obj: the reservation object
|
|
*/
|
|
void dma_resv_init(struct dma_resv *obj)
|
|
{
|
|
ww_mutex_init(&obj->lock, &reservation_ww_class);
|
|
seqcount_ww_mutex_init(&obj->seq, &obj->lock);
|
|
|
|
RCU_INIT_POINTER(obj->fence, NULL);
|
|
RCU_INIT_POINTER(obj->fence_excl, NULL);
|
|
}
|
|
EXPORT_SYMBOL(dma_resv_init);
|
|
|
|
/**
|
|
* dma_resv_fini - destroys a reservation object
|
|
* @obj: the reservation object
|
|
*/
|
|
void dma_resv_fini(struct dma_resv *obj)
|
|
{
|
|
struct dma_resv_list *fobj;
|
|
struct dma_fence *excl;
|
|
|
|
/*
|
|
* This object should be dead and all references must have
|
|
* been released to it, so no need to be protected with rcu.
|
|
*/
|
|
excl = rcu_dereference_protected(obj->fence_excl, 1);
|
|
if (excl)
|
|
dma_fence_put(excl);
|
|
|
|
fobj = rcu_dereference_protected(obj->fence, 1);
|
|
dma_resv_list_free(fobj);
|
|
ww_mutex_destroy(&obj->lock);
|
|
}
|
|
EXPORT_SYMBOL(dma_resv_fini);
|
|
|
|
/**
|
|
* dma_resv_reserve_shared - Reserve space to add shared fences to
|
|
* a dma_resv.
|
|
* @obj: reservation object
|
|
* @num_fences: number of fences we want to add
|
|
*
|
|
* Should be called before dma_resv_add_shared_fence(). Must
|
|
* be called with @obj locked through dma_resv_lock().
|
|
*
|
|
* Note that the preallocated slots need to be re-reserved if @obj is unlocked
|
|
* at any time before calling dma_resv_add_shared_fence(). This is validated
|
|
* when CONFIG_DEBUG_MUTEXES is enabled.
|
|
*
|
|
* RETURNS
|
|
* Zero for success, or -errno
|
|
*/
|
|
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
|
|
{
|
|
struct dma_resv_list *old, *new;
|
|
unsigned int i, j, k, max;
|
|
|
|
dma_resv_assert_held(obj);
|
|
|
|
old = dma_resv_shared_list(obj);
|
|
if (old && old->shared_max) {
|
|
if ((old->shared_count + num_fences) <= old->shared_max)
|
|
return 0;
|
|
max = max(old->shared_count + num_fences, old->shared_max * 2);
|
|
} else {
|
|
max = max(4ul, roundup_pow_of_two(num_fences));
|
|
}
|
|
|
|
new = dma_resv_list_alloc(max);
|
|
if (!new)
|
|
return -ENOMEM;
|
|
|
|
/*
|
|
* no need to bump fence refcounts, rcu_read access
|
|
* requires the use of kref_get_unless_zero, and the
|
|
* references from the old struct are carried over to
|
|
* the new.
|
|
*/
|
|
for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
|
|
struct dma_fence *fence;
|
|
|
|
fence = rcu_dereference_protected(old->shared[i],
|
|
dma_resv_held(obj));
|
|
if (dma_fence_is_signaled(fence))
|
|
RCU_INIT_POINTER(new->shared[--k], fence);
|
|
else
|
|
RCU_INIT_POINTER(new->shared[j++], fence);
|
|
}
|
|
new->shared_count = j;
|
|
|
|
/*
|
|
* We are not changing the effective set of fences here so can
|
|
* merely update the pointer to the new array; both existing
|
|
* readers and new readers will see exactly the same set of
|
|
* active (unsignaled) shared fences. Individual fences and the
|
|
* old array are protected by RCU and so will not vanish under
|
|
* the gaze of the rcu_read_lock() readers.
|
|
*/
|
|
rcu_assign_pointer(obj->fence, new);
|
|
|
|
if (!old)
|
|
return 0;
|
|
|
|
/* Drop the references to the signaled fences */
|
|
for (i = k; i < max; ++i) {
|
|
struct dma_fence *fence;
|
|
|
|
fence = rcu_dereference_protected(new->shared[i],
|
|
dma_resv_held(obj));
|
|
dma_fence_put(fence);
|
|
}
|
|
kfree_rcu(old, rcu);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(dma_resv_reserve_shared);
|
|
|
|
#ifdef CONFIG_DEBUG_MUTEXES
|
|
/**
|
|
* dma_resv_reset_shared_max - reset shared fences for debugging
|
|
* @obj: the dma_resv object to reset
|
|
*
|
|
* Reset the number of pre-reserved shared slots to test that drivers do
|
|
* correct slot allocation using dma_resv_reserve_shared(). See also
|
|
* &dma_resv_list.shared_max.
|
|
*/
|
|
void dma_resv_reset_shared_max(struct dma_resv *obj)
|
|
{
|
|
struct dma_resv_list *fences = dma_resv_shared_list(obj);
|
|
|
|
dma_resv_assert_held(obj);
|
|
|
|
/* Test shared fence slot reservation */
|
|
if (fences)
|
|
fences->shared_max = fences->shared_count;
|
|
}
|
|
EXPORT_SYMBOL(dma_resv_reset_shared_max);
|
|
#endif
|
|
|
|
/**
|
|
* dma_resv_add_shared_fence - Add a fence to a shared slot
|
|
* @obj: the reservation object
|
|
* @fence: the shared fence to add
|
|
*
|
|
* Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
|
|
* dma_resv_reserve_shared() has been called.
|
|
*
|
|
* See also &dma_resv.fence for a discussion of the semantics.
|
|
*/
|
|
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
|
|
{
|
|
struct dma_resv_list *fobj;
|
|
struct dma_fence *old;
|
|
unsigned int i, count;
|
|
|
|
dma_fence_get(fence);
|
|
|
|
dma_resv_assert_held(obj);
|
|
|
|
fobj = dma_resv_shared_list(obj);
|
|
count = fobj->shared_count;
|
|
|
|
write_seqcount_begin(&obj->seq);
|
|
|
|
for (i = 0; i < count; ++i) {
|
|
|
|
old = rcu_dereference_protected(fobj->shared[i],
|
|
dma_resv_held(obj));
|
|
if (old->context == fence->context ||
|
|
dma_fence_is_signaled(old))
|
|
goto replace;
|
|
}
|
|
|
|
BUG_ON(fobj->shared_count >= fobj->shared_max);
|
|
old = NULL;
|
|
count++;
|
|
|
|
replace:
|
|
RCU_INIT_POINTER(fobj->shared[i], fence);
|
|
/* pointer update must be visible before we extend the shared_count */
|
|
smp_store_mb(fobj->shared_count, count);
|
|
|
|
write_seqcount_end(&obj->seq);
|
|
dma_fence_put(old);
|
|
}
|
|
EXPORT_SYMBOL(dma_resv_add_shared_fence);
|
|
|
|
/**
|
|
* dma_resv_add_excl_fence - Add an exclusive fence.
|
|
* @obj: the reservation object
|
|
* @fence: the exclusive fence to add
|
|
*
|
|
* Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
|
|
* Note that this function replaces all fences attached to @obj, see also
|
|
* &dma_resv.fence_excl for a discussion of the semantics.
|
|
*/
|
|
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
|
|
{
|
|
struct dma_fence *old_fence = dma_resv_excl_fence(obj);
|
|
struct dma_resv_list *old;
|
|
u32 i = 0;
|
|
|
|
dma_resv_assert_held(obj);
|
|
|
|
old = dma_resv_shared_list(obj);
|
|
if (old)
|
|
i = old->shared_count;
|
|
|
|
if (fence)
|
|
dma_fence_get(fence);
|
|
|
|
write_seqcount_begin(&obj->seq);
|
|
/* write_seqcount_begin provides the necessary memory barrier */
|
|
RCU_INIT_POINTER(obj->fence_excl, fence);
|
|
if (old)
|
|
old->shared_count = 0;
|
|
write_seqcount_end(&obj->seq);
|
|
|
|
/* inplace update, no shared fences */
|
|
while (i--)
|
|
dma_fence_put(rcu_dereference_protected(old->shared[i],
|
|
dma_resv_held(obj)));
|
|
|
|
dma_fence_put(old_fence);
|
|
}
|
|
EXPORT_SYMBOL(dma_resv_add_excl_fence);
|
|
|
|
/**
|
|
* dma_resv_iter_restart_unlocked - restart the unlocked iterator
|
|
* @cursor: The dma_resv_iter object to restart
|
|
*
|
|
* Restart the unlocked iteration by initializing the cursor object.
|
|
*/
|
|
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
|
|
{
|
|
cursor->seq = read_seqcount_begin(&cursor->obj->seq);
|
|
cursor->index = -1;
|
|
cursor->shared_count = 0;
|
|
if (cursor->all_fences) {
|
|
cursor->fences = dma_resv_shared_list(cursor->obj);
|
|
if (cursor->fences)
|
|
cursor->shared_count = cursor->fences->shared_count;
|
|
} else {
|
|
cursor->fences = NULL;
|
|
}
|
|
cursor->is_restarted = true;
|
|
}
|
|
|
|
/**
|
|
* dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
|
|
* @cursor: cursor to record the current position
|
|
*
|
|
* Return all the fences in the dma_resv object which are not yet signaled.
|
|
* The returned fence has an extra local reference so will stay alive.
|
|
* If a concurrent modify is detected the whole iteration is started over again.
|
|
*/
|
|
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
|
|
{
|
|
struct dma_resv *obj = cursor->obj;
|
|
|
|
do {
|
|
/* Drop the reference from the previous round */
|
|
dma_fence_put(cursor->fence);
|
|
|
|
if (cursor->index == -1) {
|
|
cursor->fence = dma_resv_excl_fence(obj);
|
|
cursor->index++;
|
|
if (!cursor->fence)
|
|
continue;
|
|
|
|
} else if (!cursor->fences ||
|
|
cursor->index >= cursor->shared_count) {
|
|
cursor->fence = NULL;
|
|
break;
|
|
|
|
} else {
|
|
struct dma_resv_list *fences = cursor->fences;
|
|
unsigned int idx = cursor->index++;
|
|
|
|
cursor->fence = rcu_dereference(fences->shared[idx]);
|
|
}
|
|
cursor->fence = dma_fence_get_rcu(cursor->fence);
|
|
if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
|
|
break;
|
|
} while (true);
|
|
}
|
|
|
|
/**
|
|
* dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
|
|
* @cursor: the cursor with the current position
|
|
*
|
|
* Returns the first fence from an unlocked dma_resv obj.
|
|
*/
|
|
struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
|
|
{
|
|
rcu_read_lock();
|
|
do {
|
|
dma_resv_iter_restart_unlocked(cursor);
|
|
dma_resv_iter_walk_unlocked(cursor);
|
|
} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
|
|
rcu_read_unlock();
|
|
|
|
return cursor->fence;
|
|
}
|
|
EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
|
|
|
|
/**
|
|
* dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
|
|
* @cursor: the cursor with the current position
|
|
*
|
|
* Returns the next fence from an unlocked dma_resv obj.
|
|
*/
|
|
struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
|
|
{
|
|
bool restart;
|
|
|
|
rcu_read_lock();
|
|
cursor->is_restarted = false;
|
|
restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
|
|
do {
|
|
if (restart)
|
|
dma_resv_iter_restart_unlocked(cursor);
|
|
dma_resv_iter_walk_unlocked(cursor);
|
|
restart = true;
|
|
} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
|
|
rcu_read_unlock();
|
|
|
|
return cursor->fence;
|
|
}
|
|
EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
|
|
|
|
/**
|
|
* dma_resv_iter_first - first fence from a locked dma_resv object
|
|
* @cursor: cursor to record the current position
|
|
*
|
|
* Return the first fence in the dma_resv object while holding the
|
|
* &dma_resv.lock.
|
|
*/
|
|
struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
|
|
{
|
|
struct dma_fence *fence;
|
|
|
|
dma_resv_assert_held(cursor->obj);
|
|
|
|
cursor->index = 0;
|
|
if (cursor->all_fences)
|
|
cursor->fences = dma_resv_shared_list(cursor->obj);
|
|
else
|
|
cursor->fences = NULL;
|
|
|
|
fence = dma_resv_excl_fence(cursor->obj);
|
|
if (!fence)
|
|
fence = dma_resv_iter_next(cursor);
|
|
|
|
cursor->is_restarted = true;
|
|
return fence;
|
|
}
|
|
EXPORT_SYMBOL_GPL(dma_resv_iter_first);
|
|
|
|
/**
|
|
* dma_resv_iter_next - next fence from a locked dma_resv object
|
|
* @cursor: cursor to record the current position
|
|
*
|
|
* Return the next fences from the dma_resv object while holding the
|
|
* &dma_resv.lock.
|
|
*/
|
|
struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
|
|
{
|
|
unsigned int idx;
|
|
|
|
dma_resv_assert_held(cursor->obj);
|
|
|
|
cursor->is_restarted = false;
|
|
if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
|
|
return NULL;
|
|
|
|
idx = cursor->index++;
|
|
return rcu_dereference_protected(cursor->fences->shared[idx],
|
|
dma_resv_held(cursor->obj));
|
|
}
|
|
EXPORT_SYMBOL_GPL(dma_resv_iter_next);
|
|
|
|
/**
|
|
* dma_resv_copy_fences - Copy all fences from src to dst.
|
|
* @dst: the destination reservation object
|
|
* @src: the source reservation object
|
|
*
|
|
* Copy all fences from src to dst. dst-lock must be held.
|
|
*/
|
|
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
|
|
{
|
|
struct dma_resv_iter cursor;
|
|
struct dma_resv_list *list;
|
|
struct dma_fence *f, *excl;
|
|
|
|
dma_resv_assert_held(dst);
|
|
|
|
list = NULL;
|
|
excl = NULL;
|
|
|
|
dma_resv_iter_begin(&cursor, src, true);
|
|
dma_resv_for_each_fence_unlocked(&cursor, f) {
|
|
|
|
if (dma_resv_iter_is_restarted(&cursor)) {
|
|
dma_resv_list_free(list);
|
|
dma_fence_put(excl);
|
|
|
|
if (cursor.shared_count) {
|
|
list = dma_resv_list_alloc(cursor.shared_count);
|
|
if (!list) {
|
|
dma_resv_iter_end(&cursor);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
list->shared_count = 0;
|
|
|
|
} else {
|
|
list = NULL;
|
|
}
|
|
excl = NULL;
|
|
}
|
|
|
|
dma_fence_get(f);
|
|
if (dma_resv_iter_is_exclusive(&cursor))
|
|
excl = f;
|
|
else
|
|
RCU_INIT_POINTER(list->shared[list->shared_count++], f);
|
|
}
|
|
dma_resv_iter_end(&cursor);
|
|
|
|
write_seqcount_begin(&dst->seq);
|
|
excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
|
|
list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
|
|
write_seqcount_end(&dst->seq);
|
|
|
|
dma_resv_list_free(list);
|
|
dma_fence_put(excl);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(dma_resv_copy_fences);
|
|
|
|
/**
|
|
* dma_resv_get_fences - Get an object's shared and exclusive
|
|
* fences without update side lock held
|
|
* @obj: the reservation object
|
|
* @fence_excl: the returned exclusive fence (or NULL)
|
|
* @shared_count: the number of shared fences returned
|
|
* @shared: the array of shared fence ptrs returned (array is krealloc'd to
|
|
* the required size, and must be freed by caller)
|
|
*
|
|
* Retrieve all fences from the reservation object. If the pointer for the
|
|
* exclusive fence is not specified the fence is put into the array of the
|
|
* shared fences as well. Returns either zero or -ENOMEM.
|
|
*/
|
|
int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
|
|
unsigned int *shared_count, struct dma_fence ***shared)
|
|
{
|
|
struct dma_resv_iter cursor;
|
|
struct dma_fence *fence;
|
|
|
|
*shared_count = 0;
|
|
*shared = NULL;
|
|
|
|
if (fence_excl)
|
|
*fence_excl = NULL;
|
|
|
|
dma_resv_iter_begin(&cursor, obj, true);
|
|
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
|
|
|
if (dma_resv_iter_is_restarted(&cursor)) {
|
|
unsigned int count;
|
|
|
|
while (*shared_count)
|
|
dma_fence_put((*shared)[--(*shared_count)]);
|
|
|
|
if (fence_excl)
|
|
dma_fence_put(*fence_excl);
|
|
|
|
count = cursor.shared_count;
|
|
count += fence_excl ? 0 : 1;
|
|
|
|
/* Eventually re-allocate the array */
|
|
*shared = krealloc_array(*shared, count,
|
|
sizeof(void *),
|
|
GFP_KERNEL);
|
|
if (count && !*shared) {
|
|
dma_resv_iter_end(&cursor);
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
dma_fence_get(fence);
|
|
if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
|
|
*fence_excl = fence;
|
|
else
|
|
(*shared)[(*shared_count)++] = fence;
|
|
}
|
|
dma_resv_iter_end(&cursor);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
|
|
|
|
/**
|
|
* dma_resv_wait_timeout - Wait on reservation's objects
|
|
* shared and/or exclusive fences.
|
|
* @obj: the reservation object
|
|
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
|
|
* @intr: if true, do interruptible wait
|
|
* @timeout: timeout value in jiffies or zero to return immediately
|
|
*
|
|
* Callers are not required to hold specific locks, but maybe hold
|
|
* dma_resv_lock() already
|
|
* RETURNS
|
|
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
|
|
* greater than zer on success.
|
|
*/
|
|
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
|
|
unsigned long timeout)
|
|
{
|
|
long ret = timeout ? timeout : 1;
|
|
struct dma_resv_iter cursor;
|
|
struct dma_fence *fence;
|
|
|
|
dma_resv_iter_begin(&cursor, obj, wait_all);
|
|
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
|
|
|
ret = dma_fence_wait_timeout(fence, intr, ret);
|
|
if (ret <= 0) {
|
|
dma_resv_iter_end(&cursor);
|
|
return ret;
|
|
}
|
|
}
|
|
dma_resv_iter_end(&cursor);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
|
|
|
|
|
|
/**
|
|
* dma_resv_test_signaled - Test if a reservation object's fences have been
|
|
* signaled.
|
|
* @obj: the reservation object
|
|
* @test_all: if true, test all fences, otherwise only test the exclusive
|
|
* fence
|
|
*
|
|
* Callers are not required to hold specific locks, but maybe hold
|
|
* dma_resv_lock() already.
|
|
*
|
|
* RETURNS
|
|
*
|
|
* True if all fences signaled, else false.
|
|
*/
|
|
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
|
|
{
|
|
struct dma_resv_iter cursor;
|
|
struct dma_fence *fence;
|
|
|
|
dma_resv_iter_begin(&cursor, obj, test_all);
|
|
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
|
dma_resv_iter_end(&cursor);
|
|
return false;
|
|
}
|
|
dma_resv_iter_end(&cursor);
|
|
return true;
|
|
}
|
|
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
|
|
|
|
#if IS_ENABLED(CONFIG_LOCKDEP)
|
|
static int __init dma_resv_lockdep(void)
|
|
{
|
|
struct mm_struct *mm = mm_alloc();
|
|
struct ww_acquire_ctx ctx;
|
|
struct dma_resv obj;
|
|
struct address_space mapping;
|
|
int ret;
|
|
|
|
if (!mm)
|
|
return -ENOMEM;
|
|
|
|
dma_resv_init(&obj);
|
|
address_space_init_once(&mapping);
|
|
|
|
mmap_read_lock(mm);
|
|
ww_acquire_init(&ctx, &reservation_ww_class);
|
|
ret = dma_resv_lock(&obj, &ctx);
|
|
if (ret == -EDEADLK)
|
|
dma_resv_lock_slow(&obj, &ctx);
|
|
fs_reclaim_acquire(GFP_KERNEL);
|
|
/* for unmap_mapping_range on trylocked buffer objects in shrinkers */
|
|
i_mmap_lock_write(&mapping);
|
|
i_mmap_unlock_write(&mapping);
|
|
#ifdef CONFIG_MMU_NOTIFIER
|
|
lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
|
|
__dma_fence_might_wait();
|
|
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
|
|
#else
|
|
__dma_fence_might_wait();
|
|
#endif
|
|
fs_reclaim_release(GFP_KERNEL);
|
|
ww_mutex_unlock(&obj.lock);
|
|
ww_acquire_fini(&ctx);
|
|
mmap_read_unlock(mm);
|
|
|
|
mmput(mm);
|
|
|
|
return 0;
|
|
}
|
|
subsys_initcall(dma_resv_lockdep);
|
|
#endif
|