linux/drivers/gpu/drm/panfrost/panfrost_gem.h
Boris Brezillon 7d2d6d0129 drm/panfrost: Fix a deadlock between the shrinker and madvise path
panfrost_ioctl_madvise() and panfrost_gem_purge() acquire the mappings
and shmem locks in different orders, thus leading to a potential
the mappings lock first.

Fixes: bdefca2d8d ("drm/panfrost: Add the panfrost_gem_mapping concept")
Cc: <stable@vger.kernel.org>
Cc: Christian Hewitt <christianshewitt@gmail.com>
Reported-by: Christian Hewitt <christianshewitt@gmail.com>
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201101174016.839110-1-boris.brezillon@collabora.com
2020-11-03 09:21:52 +01:00

91 lines
2.5 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
#ifndef __PANFROST_GEM_H__
#define __PANFROST_GEM_H__
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
struct panfrost_mmu;
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
struct sg_table *sgts;
/*
* Use a list for now. If searching a mapping ever becomes the
* bottleneck, we should consider using an RB-tree, or even better,
* let the core store drm_gem_object_mapping entries (where we
* could place driver specific data) instead of drm_gem_object ones
* in its drm_file->object_idr table.
*
* struct drm_gem_object_mapping {
* struct drm_gem_object *obj;
* void *driver_priv;
* };
*/
struct {
struct list_head list;
struct mutex lock;
} mappings;
/*
* Count the number of jobs referencing this BO so we don't let the
* shrinker reclaim this object prematurely.
*/
atomic_t gpu_usecount;
bool noexec :1;
bool is_heap :1;
};
struct panfrost_gem_mapping {
struct list_head node;
struct kref refcount;
struct panfrost_gem_object *obj;
struct drm_mm_node mmnode;
struct panfrost_mmu *mmu;
bool active :1;
};
static inline
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
static inline struct panfrost_gem_mapping *
drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node)
{
return container_of(node, struct panfrost_gem_mapping, mmnode);
}
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
struct drm_gem_object *
panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
struct panfrost_gem_object *
panfrost_gem_create_with_handle(struct drm_file *file_priv,
struct drm_device *dev, size_t size,
u32 flags,
uint32_t *handle);
int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
void panfrost_gem_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
struct panfrost_gem_mapping *
panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
struct panfrost_file_priv *priv);
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
#endif /* __PANFROST_GEM_H__ */