2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00

drm: allow to use mmuless SoC

Some SoC without MMU have display driver where a drm/kms driver
could be implemented.

Before doing such kind of thing drm/kms must allow to use mmuless devices.
This patch propose to remove MMU configuration flag and add a cma helper
function to help implementing mmuless display driver

version 4:
- add documentation about drm_gem_cma_get_unmapped_area()
- stub it MMU case

Signed-off-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
[danvet: Use recommended struct member references in kernel-doc.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1483521177-21794-4-git-send-email-benjamin.gaignard@linaro.org
This commit is contained in:
Benjamin Gaignard 2017-01-04 10:12:57 +01:00 committed by Daniel Vetter
parent 99c48e1e38
commit 62a0d98a18
4 changed files with 101 additions and 2 deletions

View File

@ -310,6 +310,17 @@ created.
Drivers that want to map the GEM object upfront instead of handling page Drivers that want to map the GEM object upfront instead of handling page
faults can implement their own mmap file operation handler. faults can implement their own mmap file operation handler.
For platforms without MMU the GEM core provides a helper method
:c:func:`drm_gem_cma_get_unmapped_area`. The mmap() routines will call
this to get a proposed address for the mapping.
To use :c:func:`drm_gem_cma_get_unmapped_area`, drivers must fill the
struct :c:type:`struct file_operations <file_operations>` get_unmapped_area
field with a pointer on :c:func:`drm_gem_cma_get_unmapped_area`.
More detailed information about get_unmapped_area can be found in
Documentation/nommu-mmap.txt
Memory Coherency Memory Coherency
---------------- ----------------

View File

@ -6,7 +6,7 @@
# #
menuconfig DRM menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && HAS_DMA
select HDMI select HDMI
select FB_CMDLINE select FB_CMDLINE
select I2C select I2C
@ -113,7 +113,7 @@ config DRM_LOAD_EDID_FIRMWARE
config DRM_TTM config DRM_TTM
tristate tristate
depends on DRM depends on DRM && MMU
help help
GPU memory management subsystem for devices with multiple GPU memory management subsystem for devices with multiple
GPU memory types. Will be enabled automatically if a device driver GPU memory types. Will be enabled automatically if a device driver

View File

@ -358,6 +358,77 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
} }
EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
#ifndef CONFIG_MMU
/**
* drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases
* @filp: file object
* @addr: memory address
* @len: buffer size
* @pgoff: page offset
* @flags: memory flags
*
* This function is used in noMMU platforms to propose address mapping
* for a given buffer.
* It's intended to be used as a direct handler for the struct
* &file_operations.get_unmapped_area operation.
*
* Returns:
* mapping address on success or a negative error code on failure.
*/
unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
struct drm_gem_cma_object *cma_obj;
struct drm_gem_object *obj = NULL;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_vma_offset_node *node;
if (drm_device_is_unplugged(dev))
return -ENODEV;
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
pgoff,
len >> PAGE_SHIFT);
if (likely(node)) {
obj = container_of(node, struct drm_gem_object, vma_node);
/*
* When the object is being freed, after it hits 0-refcnt it
* proceeds to tear down the object. In the process it will
* attempt to remove the VMA offset and so acquire this
* mgr->vm_lock. Therefore if we find an object with a 0-refcnt
* that matches our range, we know it is in the process of being
* destroyed and will be freed as soon as we release the lock -
* so we have to check for the 0-refcnted object and treat it as
* invalid.
*/
if (!kref_get_unless_zero(&obj->refcount))
obj = NULL;
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
if (!obj)
return -EINVAL;
if (!drm_vma_node_is_allowed(node, priv)) {
drm_gem_object_unreference_unlocked(obj);
return -EACCES;
}
cma_obj = to_drm_gem_cma_obj(obj);
drm_gem_object_unreference_unlocked(obj);
return cma_obj->vaddr ? (unsigned long)cma_obj->vaddr : -EINVAL;
}
EXPORT_SYMBOL_GPL(drm_gem_cma_get_unmapped_area);
#endif
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
/** /**
* drm_gem_cma_describe - describe a CMA GEM object for debugfs * drm_gem_cma_describe - describe a CMA GEM object for debugfs

View File

@ -53,6 +53,23 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
extern const struct vm_operations_struct drm_gem_cma_vm_ops; extern const struct vm_operations_struct drm_gem_cma_vm_ops;
#ifndef CONFIG_MMU
unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags);
#else
static inline unsigned long drm_gem_cma_get_unmapped_area(struct file *filp,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
unsigned long flags)
{
return -EINVAL;
}
#endif
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m); void drm_gem_cma_describe(struct drm_gem_cma_object *obj, struct seq_file *m);
#endif #endif