mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
dma-buf/fence: make fence context 64 bit v2
Fence contexts are created on the fly (for example) by the GPU scheduler used in the amdgpu driver as a result of an userspace request. Because of this userspace could in theory force a wrap around of the 32bit context number if it doesn't behave well. Avoid this by increasing the context number to 64bits. This way even when userspace manages to allocate a billion contexts per second it takes more than 500 years for the context number to wrap around. v2: fix printf formats as well. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Gustavo Padovan <gustavo.padovan@collabora.co.uk> Acked-by: Sumit Semwal <sumit.semwal@linaro.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: http://patchwork.freedesktop.org/patch/msgid/1464786612-5010-2-git-send-email-deathsimple@vodafone.de
This commit is contained in:
parent
3377900791
commit
76bf0db554
@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
|
|||||||
* context or not. One device can have multiple separate contexts,
|
* context or not. One device can have multiple separate contexts,
|
||||||
* and they're used if some engine can run independently of another.
|
* and they're used if some engine can run independently of another.
|
||||||
*/
|
*/
|
||||||
static atomic_t fence_context_counter = ATOMIC_INIT(0);
|
static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* fence_context_alloc - allocate an array of fence contexts
|
* fence_context_alloc - allocate an array of fence contexts
|
||||||
@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
|
|||||||
* This function will return the first index of the number of fences allocated.
|
* This function will return the first index of the number of fences allocated.
|
||||||
* The fence context is used for setting fence->context to a unique number.
|
* The fence context is used for setting fence->context to a unique number.
|
||||||
*/
|
*/
|
||||||
unsigned fence_context_alloc(unsigned num)
|
u64 fence_context_alloc(unsigned num)
|
||||||
{
|
{
|
||||||
BUG_ON(!num);
|
BUG_ON(!num);
|
||||||
return atomic_add_return(num, &fence_context_counter) - num;
|
return atomic64_add_return(num, &fence_context_counter) - num;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(fence_context_alloc);
|
EXPORT_SYMBOL(fence_context_alloc);
|
||||||
|
|
||||||
@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
|
|||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
fence_init(struct fence *fence, const struct fence_ops *ops,
|
fence_init(struct fence *fence, const struct fence_ops *ops,
|
||||||
spinlock_t *lock, unsigned context, unsigned seqno)
|
spinlock_t *lock, u64 context, unsigned seqno)
|
||||||
{
|
{
|
||||||
BUG_ON(!lock);
|
BUG_ON(!lock);
|
||||||
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
|
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
|
||||||
|
@ -2032,7 +2032,7 @@ struct amdgpu_device {
|
|||||||
struct amdgpu_irq_src hpd_irq;
|
struct amdgpu_irq_src hpd_irq;
|
||||||
|
|
||||||
/* rings */
|
/* rings */
|
||||||
unsigned fence_context;
|
u64 fence_context;
|
||||||
unsigned num_rings;
|
unsigned num_rings;
|
||||||
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
|
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
|
||||||
bool ib_pool_ready;
|
bool ib_pool_ready;
|
||||||
|
@ -427,7 +427,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
|
|||||||
soffset, eoffset, eoffset - soffset);
|
soffset, eoffset, eoffset - soffset);
|
||||||
|
|
||||||
if (i->fence)
|
if (i->fence)
|
||||||
seq_printf(m, " protected by 0x%08x on context %d",
|
seq_printf(m, " protected by 0x%08x on context %llu",
|
||||||
i->fence->seqno, i->fence->context);
|
i->fence->seqno, i->fence->context);
|
||||||
|
|
||||||
seq_printf(m, "\n");
|
seq_printf(m, "\n");
|
||||||
|
@ -125,7 +125,7 @@ struct etnaviv_gpu {
|
|||||||
u32 completed_fence;
|
u32 completed_fence;
|
||||||
u32 retired_fence;
|
u32 retired_fence;
|
||||||
wait_queue_head_t fence_event;
|
wait_queue_head_t fence_event;
|
||||||
unsigned int fence_context;
|
u64 fence_context;
|
||||||
spinlock_t fence_spinlock;
|
spinlock_t fence_spinlock;
|
||||||
|
|
||||||
/* worker for handling active-list retiring: */
|
/* worker for handling active-list retiring: */
|
||||||
|
@ -57,7 +57,8 @@ struct nouveau_fence_priv {
|
|||||||
int (*context_new)(struct nouveau_channel *);
|
int (*context_new)(struct nouveau_channel *);
|
||||||
void (*context_del)(struct nouveau_channel *);
|
void (*context_del)(struct nouveau_channel *);
|
||||||
|
|
||||||
u32 contexts, context_base;
|
u32 contexts;
|
||||||
|
u64 context_base;
|
||||||
bool uevent;
|
bool uevent;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ retry:
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (have_drawable_releases && sc > 300) {
|
if (have_drawable_releases && sc > 300) {
|
||||||
FENCE_WARN(fence, "failed to wait on release %d "
|
FENCE_WARN(fence, "failed to wait on release %llu "
|
||||||
"after spincount %d\n",
|
"after spincount %d\n",
|
||||||
fence->context & ~0xf0000000, sc);
|
fence->context & ~0xf0000000, sc);
|
||||||
goto signaled;
|
goto signaled;
|
||||||
|
@ -2386,7 +2386,7 @@ struct radeon_device {
|
|||||||
struct radeon_mman mman;
|
struct radeon_mman mman;
|
||||||
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
|
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
|
||||||
wait_queue_head_t fence_queue;
|
wait_queue_head_t fence_queue;
|
||||||
unsigned fence_context;
|
u64 fence_context;
|
||||||
struct mutex ring_lock;
|
struct mutex ring_lock;
|
||||||
struct radeon_ring ring[RADEON_NUM_RINGS];
|
struct radeon_ring ring[RADEON_NUM_RINGS];
|
||||||
bool ib_pool_ready;
|
bool ib_pool_ready;
|
||||||
|
@ -46,7 +46,7 @@ struct vmw_fence_manager {
|
|||||||
bool goal_irq_on; /* Protected by @goal_irq_mutex */
|
bool goal_irq_on; /* Protected by @goal_irq_mutex */
|
||||||
bool seqno_valid; /* Protected by @lock, and may not be set to true
|
bool seqno_valid; /* Protected by @lock, and may not be set to true
|
||||||
without the @goal_irq_mutex held. */
|
without the @goal_irq_mutex held. */
|
||||||
unsigned ctx;
|
u64 ctx;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct vmw_user_fence {
|
struct vmw_user_fence {
|
||||||
|
@ -68,7 +68,8 @@ struct sync_timeline {
|
|||||||
|
|
||||||
/* protected by child_list_lock */
|
/* protected by child_list_lock */
|
||||||
bool destroyed;
|
bool destroyed;
|
||||||
int context, value;
|
u64 context;
|
||||||
|
int value;
|
||||||
|
|
||||||
struct list_head child_list_head;
|
struct list_head child_list_head;
|
||||||
spinlock_t child_list_lock;
|
spinlock_t child_list_lock;
|
||||||
|
@ -75,7 +75,8 @@ struct fence {
|
|||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
struct list_head cb_list;
|
struct list_head cb_list;
|
||||||
spinlock_t *lock;
|
spinlock_t *lock;
|
||||||
unsigned context, seqno;
|
u64 context;
|
||||||
|
unsigned seqno;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
ktime_t timestamp;
|
ktime_t timestamp;
|
||||||
int status;
|
int status;
|
||||||
@ -178,7 +179,7 @@ struct fence_ops {
|
|||||||
};
|
};
|
||||||
|
|
||||||
void fence_init(struct fence *fence, const struct fence_ops *ops,
|
void fence_init(struct fence *fence, const struct fence_ops *ops,
|
||||||
spinlock_t *lock, unsigned context, unsigned seqno);
|
spinlock_t *lock, u64 context, unsigned seqno);
|
||||||
|
|
||||||
void fence_release(struct kref *kref);
|
void fence_release(struct kref *kref);
|
||||||
void fence_free(struct fence *fence);
|
void fence_free(struct fence *fence);
|
||||||
@ -352,27 +353,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr)
|
|||||||
return ret < 0 ? ret : 0;
|
return ret < 0 ? ret : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned fence_context_alloc(unsigned num);
|
u64 fence_context_alloc(unsigned num);
|
||||||
|
|
||||||
#define FENCE_TRACE(f, fmt, args...) \
|
#define FENCE_TRACE(f, fmt, args...) \
|
||||||
do { \
|
do { \
|
||||||
struct fence *__ff = (f); \
|
struct fence *__ff = (f); \
|
||||||
if (config_enabled(CONFIG_FENCE_TRACE)) \
|
if (config_enabled(CONFIG_FENCE_TRACE)) \
|
||||||
pr_info("f %u#%u: " fmt, \
|
pr_info("f %llu#%u: " fmt, \
|
||||||
__ff->context, __ff->seqno, ##args); \
|
__ff->context, __ff->seqno, ##args); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define FENCE_WARN(f, fmt, args...) \
|
#define FENCE_WARN(f, fmt, args...) \
|
||||||
do { \
|
do { \
|
||||||
struct fence *__ff = (f); \
|
struct fence *__ff = (f); \
|
||||||
pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
|
pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
|
||||||
##args); \
|
##args); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define FENCE_ERR(f, fmt, args...) \
|
#define FENCE_ERR(f, fmt, args...) \
|
||||||
do { \
|
do { \
|
||||||
struct fence *__ff = (f); \
|
struct fence *__ff = (f); \
|
||||||
pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
|
pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
|
||||||
##args); \
|
##args); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user