2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-26 22:24:09 +08:00

drm/shmem-helpers: Simplify dma-buf importing

- Ditch the ->pages array
- Make it a private gem bo, which means no shmem object, which means
  fireworks if anyone calls drm_gem_object_get_pages. But we've just
  made sure that's all covered.

v2: Rebase

v3: I forgot to remove the page_count mangling from the free path too.
Noticed by Boris while testing.

Cc: Boris Brezillon <boris.brezillon@collabora.com>
Tested-by: Boris Brezillon <boris.brezillon@collabora.com>
Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Noralf Trønnes <noralf@tronnes.org>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200529140542.2103713-1-daniel.vetter@ffwll.ch
This commit is contained in:
Daniel Vetter 2020-05-29 16:05:42 +02:00
parent 5264083573
commit 7d2cd72a9a

View File

@ -35,22 +35,12 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
.mmap = drm_gem_shmem_mmap,
};
/**
* drm_gem_shmem_create - Allocate an object with the given size
* @dev: DRM device
* @size: Size of the object to allocate
*
* This function creates a shmem GEM object.
*
* Returns:
* A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
static struct drm_gem_shmem_object *
__drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
{
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
int ret;
int ret = 0;
size = PAGE_ALIGN(size);
@ -64,7 +54,10 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
ret = drm_gem_object_init(dev, obj, size);
if (private)
drm_gem_private_object_init(dev, obj, size);
else
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto err_free;
@ -96,6 +89,21 @@ err_free:
return ERR_PTR(ret);
}
/**
* drm_gem_shmem_create - Allocate an object with the given size
* @dev: DRM device
* @size: Size of the object to allocate
*
* This function creates a shmem GEM object.
*
* Returns:
* A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
* error code on failure.
*/
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
{
return __drm_gem_shmem_create(dev, size, false);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
/**
@ -113,9 +121,7 @@ void drm_gem_shmem_free_object(struct drm_gem_object *obj)
WARN_ON(shmem->vmap_use_count);
if (obj->import_attach) {
shmem->pages_use_count--;
drm_prime_gem_destroy(obj, shmem->sgt);
kvfree(shmem->pages);
} else {
if (shmem->sgt) {
dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
@ -371,7 +377,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
struct drm_gem_shmem_object *shmem;
int ret;
shmem = drm_gem_shmem_create(dev, size);
shmem = __drm_gem_shmem_create(dev, size, true);
if (IS_ERR(shmem))
return shmem;
@ -695,36 +701,16 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
size_t size = PAGE_ALIGN(attach->dmabuf->size);
size_t npages = size >> PAGE_SHIFT;
struct drm_gem_shmem_object *shmem;
int ret;
shmem = drm_gem_shmem_create(dev, size);
if (IS_ERR(shmem))
return ERR_CAST(shmem);
shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
if (!shmem->pages) {
ret = -ENOMEM;
goto err_free_gem;
}
ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
if (ret < 0)
goto err_free_array;
shmem->sgt = sgt;
shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
DRM_DEBUG_PRIME("size = %zu\n", size);
return &shmem->base;
err_free_array:
kvfree(shmem->pages);
err_free_gem:
drm_gem_object_put(&shmem->base);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);