mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
Merge tag 'tee-cleanup-for-5.7' of https://git.linaro.org/people/jens.wiklander/linux-tee into arm/drivers
Cleanup shared memory handing in TEE subsystem The highlights are: - Removing redundant or unused fields in struct tee_shm - Only assign userspace shm IDs for shared memory objects originating from user space * tag 'tee-cleanup-for-5.7' of https://git.linaro.org/people/jens.wiklander/linux-tee: tee: tee_shm_op_mmap(): use TEE_SHM_USER_MAPPED tee: remove redundant teedev in struct tee_shm tee: don't assign shm id for private shms tee: remove unused tee_shm_priv_alloc() tee: remove linked list of struct tee_shm Link: https://lore.kernel.org/r/20200228140925.GA12393@jade Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
021249ec6f
@ -44,7 +44,6 @@ static struct tee_context *teedev_open(struct tee_device *teedev)
|
||||
|
||||
kref_init(&ctx->refcount);
|
||||
ctx->teedev = teedev;
|
||||
INIT_LIST_HEAD(&ctx->list_shm);
|
||||
rc = teedev->desc->ops->open(ctx);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
@ -37,7 +37,8 @@ struct tee_shm_pool {
|
||||
* @num_users: number of active users of this device
|
||||
* @c_no_user: completion used when unregistering the device
|
||||
* @mutex: mutex protecting @num_users and @idr
|
||||
* @idr: register of shared memory object allocated on this device
|
||||
* @idr: register of user space shared memory objects allocated or
|
||||
* registered on this device
|
||||
* @pool: shared memory pool
|
||||
*/
|
||||
struct tee_device {
|
||||
|
@ -13,13 +13,13 @@
|
||||
|
||||
static void tee_shm_release(struct tee_shm *shm)
|
||||
{
|
||||
struct tee_device *teedev = shm->teedev;
|
||||
struct tee_device *teedev = shm->ctx->teedev;
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
if (shm->ctx)
|
||||
list_del(&shm->link);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (shm->flags & TEE_SHM_DMA_BUF) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
|
||||
if (shm->flags & TEE_SHM_POOL) {
|
||||
struct tee_shm_pool_mgr *poolm;
|
||||
@ -44,8 +44,7 @@ static void tee_shm_release(struct tee_shm *shm)
|
||||
kfree(shm->pages);
|
||||
}
|
||||
|
||||
if (shm->ctx)
|
||||
teedev_ctx_put(shm->ctx);
|
||||
teedev_ctx_put(shm->ctx);
|
||||
|
||||
kfree(shm);
|
||||
|
||||
@ -77,7 +76,7 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
size_t size = vma->vm_end - vma->vm_start;
|
||||
|
||||
/* Refuse sharing shared memory provided by application */
|
||||
if (shm->flags & TEE_SHM_REGISTER)
|
||||
if (shm->flags & TEE_SHM_USER_MAPPED)
|
||||
return -EINVAL;
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
|
||||
@ -91,20 +90,14 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
|
||||
.mmap = tee_shm_op_mmap,
|
||||
};
|
||||
|
||||
static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
|
||||
struct tee_device *teedev,
|
||||
size_t size, u32 flags)
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm_pool_mgr *poolm = NULL;
|
||||
struct tee_shm *shm;
|
||||
void *ret;
|
||||
int rc;
|
||||
|
||||
if (ctx && ctx->teedev != teedev) {
|
||||
dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!(flags & TEE_SHM_MAPPED)) {
|
||||
dev_err(teedev->dev.parent,
|
||||
"only mapped allocations supported\n");
|
||||
@ -132,7 +125,6 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
|
||||
}
|
||||
|
||||
shm->flags = flags | TEE_SHM_POOL;
|
||||
shm->teedev = teedev;
|
||||
shm->ctx = ctx;
|
||||
if (flags & TEE_SHM_DMA_BUF)
|
||||
poolm = teedev->pool->dma_buf_mgr;
|
||||
@ -145,17 +137,18 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (shm->id < 0) {
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err_pool_free;
|
||||
}
|
||||
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (shm->id < 0) {
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err_pool_free;
|
||||
}
|
||||
|
||||
exp_info.ops = &tee_shm_dma_buf_ops;
|
||||
exp_info.size = shm->size;
|
||||
exp_info.flags = O_RDWR;
|
||||
@ -168,18 +161,16 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx) {
|
||||
if (ctx)
|
||||
teedev_ctx_get(ctx);
|
||||
mutex_lock(&teedev->mutex);
|
||||
list_add_tail(&shm->link, &ctx->list_shm);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
|
||||
return shm;
|
||||
err_rem:
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
err_pool_free:
|
||||
poolm->ops->free(poolm, shm);
|
||||
err_kfree:
|
||||
@ -188,31 +179,8 @@ err_dev_put:
|
||||
tee_device_put(teedev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_alloc() - Allocate shared memory
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* Memory allocated as global shared memory is automatically freed when the
|
||||
* TEE file pointer is closed. The @flags field uses the bits defined by
|
||||
* TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
|
||||
* set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
|
||||
* associated with a dma-buf handle, else driver private memory.
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
{
|
||||
return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc);
|
||||
|
||||
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
|
||||
{
|
||||
return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
|
||||
|
||||
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags)
|
||||
{
|
||||
@ -245,7 +213,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
}
|
||||
|
||||
shm->flags = flags | TEE_SHM_REGISTER;
|
||||
shm->teedev = teedev;
|
||||
shm->ctx = ctx;
|
||||
shm->id = -1;
|
||||
addr = untagged_addr(addr);
|
||||
@ -301,10 +268,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
list_add_tail(&shm->link, &ctx->list_shm);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
return shm;
|
||||
err:
|
||||
if (shm) {
|
||||
|
@ -49,7 +49,6 @@ struct tee_shm_pool;
|
||||
*/
|
||||
struct tee_context {
|
||||
struct tee_device *teedev;
|
||||
struct list_head list_shm;
|
||||
void *data;
|
||||
struct kref refcount;
|
||||
bool releasing;
|
||||
@ -168,9 +167,7 @@ void tee_device_unregister(struct tee_device *teedev);
|
||||
|
||||
/**
|
||||
* struct tee_shm - shared memory object
|
||||
* @teedev: device used to allocate the object
|
||||
* @ctx: context using the object, if NULL the context is gone
|
||||
* @link link element
|
||||
* @ctx: context using the object
|
||||
* @paddr: physical address of the shared memory
|
||||
* @kaddr: virtual address of the shared memory
|
||||
* @size: size of shared memory
|
||||
@ -185,9 +182,7 @@ void tee_device_unregister(struct tee_device *teedev);
|
||||
* subsystem and from drivers that implements their own shm pool manager.
|
||||
*/
|
||||
struct tee_shm {
|
||||
struct tee_device *teedev;
|
||||
struct tee_context *ctx;
|
||||
struct list_head link;
|
||||
phys_addr_t paddr;
|
||||
void *kaddr;
|
||||
size_t size;
|
||||
@ -318,18 +313,6 @@ void *tee_get_drvdata(struct tee_device *teedev);
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
|
||||
|
||||
/**
|
||||
* tee_shm_priv_alloc() - Allocate shared memory privately
|
||||
* @dev: Device that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
*
|
||||
* Allocates shared memory buffer that is not associated with any client
|
||||
* context. Such buffers are owned by TEE driver and used for internal calls.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
|
||||
|
||||
/**
|
||||
* tee_shm_register() - Register shared memory buffer
|
||||
* @ctx: Context that registers the shared memory
|
||||
|
Loading…
Reference in New Issue
Block a user