2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 15:44:02 +08:00

dma-buf: add vmap interface

The main requirement I have for this interface is for scanning out
using the USB gpu devices. Since these devices have to read the
framebuffer on updates and linearly compress it, using kmaps
is a major overhead for every update.

v2: fix warn issues pointed out by Sylwester Nawrocki.

v3: fix compile !CONFIG_DMA_SHARED_BUFFER and add _GPL for now

Signed-off-by: Dave Airlie <airlied@redhat.com>
Reviewed-by: Rob Clark <rob.clark@linaro.org>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
This commit is contained in:
Dave Airlie 2012-05-20 12:33:56 +05:30 committed by Sumit Semwal
parent 4c78513e45
commit 98f86c9e4a
2 changed files with 48 additions and 0 deletions

View File

@ -468,3 +468,37 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return dmabuf->ops->mmap(dmabuf, vma);
}
EXPORT_SYMBOL_GPL(dma_buf_mmap);
/**
* dma_buf_vmap - Create virtual mapping for the buffer object into kernel address space. Same restrictions as for vmap and friends apply.
* @dma_buf: [in] buffer to vmap
*
* This call may fail due to lack of virtual mapping address space.
* These calls are optional in drivers. The intended use for them
* is for mapping objects linear in kernel space for high use objects.
* Please attempt to use kmap/kunmap before thinking about these interfaces.
*/
void *dma_buf_vmap(struct dma_buf *dmabuf)
{
if (WARN_ON(!dmabuf))
return NULL;
if (dmabuf->ops->vmap)
return dmabuf->ops->vmap(dmabuf);
return NULL;
}
EXPORT_SYMBOL_GPL(dma_buf_vmap);
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
* @dma_buf: [in] buffer to vmap
*/
void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
if (WARN_ON(!dmabuf))
return;
if (dmabuf->ops->vunmap)
dmabuf->ops->vunmap(dmabuf, vaddr);
}
EXPORT_SYMBOL_GPL(dma_buf_vunmap);

View File

@ -98,6 +98,9 @@ struct dma_buf_ops {
void (*kunmap)(struct dma_buf *, unsigned long, void *);
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
void *(*vmap)(struct dma_buf *);
void (*vunmap)(struct dma_buf *, void *vaddr);
};
/**
@ -176,6 +179,8 @@ void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
void *dma_buf_vmap(struct dma_buf *);
void dma_buf_vunmap(struct dma_buf *, void *vaddr);
#else
static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@ -264,6 +269,15 @@ static inline int dma_buf_mmap(struct dma_buf *dmabuf,
{
return -ENODEV;
}
static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
{
return NULL;
}
static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
}
#endif /* CONFIG_DMA_SHARED_BUFFER */
#endif /* __DMA_BUF_H__ */