mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 01:04:08 +08:00
TEE shared memory cleanup for v5.18
- The TEE shared memory pool based on two pools is replaced with a single somewhat more capable pool. - Replaces tee_shm_alloc() and tee_shm_register() with new functions easier to use and maintain. The TEE subsystem and the TEE drivers are updated to use the new functions instead. - The TEE based Trusted keys routines are updated to use the new simplified functions above. - The OP-TEE based rng driver is updated to use the new simplified functions above. - The TEE_SHM-flags are refactored to better match their usage -----BEGIN PGP SIGNATURE----- iQJOBAABCgA4FiEEFV+gSSXZJY9ZyuB5LinzTIcAHJcFAmIP5coaHGplbnMud2lr bGFuZGVyQGxpbmFyby5vcmcACgkQLinzTIcAHJdLQg/9F+9wtUsM3sJAitaB0DN1 tTd8Iwt0Ir8khuac56/pMcVlFF1dwBAFy5vnAKKSGF0nzToyEhOc4uRsOA0vvQ4s EtTX7ONuHjPR18faxJbtLHPl3tSjGxkwtrBo5IvCapxaYsJK8x2XEamr9ih7r5nZ KwSTNgiGZ+WM+IgwgZIFfOoB4pXnkQZAf2UBAdK2W1dVpIOoJzLpiS28DNgA08C5 sUduKi2GvmaieTqu6QHY2FJNRAVgYrzuyXIgfQ+Kjsa/GDMZ6CS9DB3BBt1D4xyM OzIl4/Y2nZqJ6EFhuFtWIQIuMLlw0HiM08d/PC4vW05EwKmeJ5zTs+I9nJQEnPKN n4SfwqKIIq5KeYeT0ByVuGwm/JET4YXvL3d9EqkDtFAwnkT0Jx7o5GgFQbj1+0Z9 RvmWWy7VFPRhvLGsPpkuJnGc9rD4XVtzFw92K1VRmarQnjQFmD2y7DvNEEXokLCv Z4GV3bz7ntmURJsOoMpt56i9WboXkjFmsaIqg09ShPRC9EB+9RMvRNEwE72FZmlH RIMl5Vx4SkYbY1FlaiJ9BSctcdQD9HES17D5z5l6Im3JjMbFeKvLLQCJ/Ch3vxNj OujRhXsEO2g9uUXwh0OveK8mEh3bz+GE/uj+c7xZXb95ImswyK9fo9uLvG+Z6JVk n9+xgsKUZlHwHkoofuXor4k= =xOBv -----END PGP SIGNATURE----- gpgsig -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEo6/YBQwIrVS28WGKmmx57+YAGNkFAmIY/jcACgkQmmx57+YA GNk94w//VHkLJADLSqwh8AZXUwpBN3nCKtsEay7HbRmC9Nh4Zm/TQyYx79QmRnnK kDxLVAFTapFLxQVo441jPfnjp2cSW0Oomhv3zLIPNE44Gb0jF+hqfbN3sLJLk5hZ EiiRxnMEJ6KCbvLEXaSUqRfGoYkU98HF6vNzN0nme6Zq9f11GO7RsUGL1nLNrU7R mGj4O1hr6QVWMCY5vvEYLkGZFj54jVb64UVYzmyw8Nt5IIbY2upCQFoeQYHtiIle h354Wn3PgE+txzSKGGH0dw9hq2JPNFSwmx3psvw+9HUjYXGC8rfaJeRNTEC9oIaC ALXqNnnu/kkhKt6rfkVaVuJFjefn/WjTCfEFyCRblwmNdi+DZfkcCkDh0Tj/4F6X /hoLYC4XXt+bxvdi8I6+xH8ho+Zwsh3F9WXWAU+p1fR5/kOTpvYe10k+59lK+AUK bbPCN2WQ+IB66Mt8OXAVRycXKrbGU32QTioWF7Jv5/ZgF1aTeAnzyoRnT9OI8FMz sxqSNd/Lasc8czP+xdTrfisOy+7vRfSOPDgZ4BUVQLAZIDmuqiJo3Rtu4J72aZx2 Pyk680b3iGKORBGVFznF8DOWxNdbUbuQFxUzIGGKHtsy9++fyZzrwtjASmT9Bzef ec6FVInEKt+f6nuMUZrMWRaWIZw3KGqWRnkX47k86PsCAh3HDvo= =0vIF -----END PGP SIGNATURE----- Merge tag 'tee-shm-for-v5.18' of git://git.linaro.org:/people/jens.wiklander/linux-tee into arm/drivers TEE shared memory cleanup for v5.18 - The TEE shared memory pool based on two pools is replaced with a single somewhat more capable pool. - Replaces tee_shm_alloc() and tee_shm_register() with new functions easier to use and maintain. The TEE subsystem and the TEE drivers are updated to use the new functions instead. - The TEE based Trusted keys routines are updated to use the new simplified functions above. - The OP-TEE based rng driver is updated to use the new simplified functions above. - The TEE_SHM-flags are refactored to better match their usage * tag 'tee-shm-for-v5.18' of git://git.linaro.org:/people/jens.wiklander/linux-tee: tee: refactor TEE_SHM_* flags tee: replace tee_shm_register() KEYS: trusted: tee: use tee_shm_register_kernel_buf() tee: add tee_shm_register_{user,kernel}_buf() optee: add optee_pool_op_free_helper() tee: replace tee_shm_alloc() tee: simplify shm pool handling tee: add tee_shm_alloc_user_buf() tee: remove unused tee_shm_pool_alloc_res_mem() hwrng: optee-rng: use tee_shm_alloc_kernel_buf() optee: use driver internal tee_context for some rpc Link: https://lore.kernel.org/r/20220218184802.GA968155@jade Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
25b67f373b
@ -145,10 +145,10 @@ static int optee_rng_init(struct hwrng *rng)
|
||||
struct optee_rng_private *pvt_data = to_optee_rng_private(rng);
|
||||
struct tee_shm *entropy_shm_pool = NULL;
|
||||
|
||||
entropy_shm_pool = tee_shm_alloc(pvt_data->ctx, MAX_ENTROPY_REQ_SZ,
|
||||
TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
|
||||
entropy_shm_pool = tee_shm_alloc_kernel_buf(pvt_data->ctx,
|
||||
MAX_ENTROPY_REQ_SZ);
|
||||
if (IS_ERR(entropy_shm_pool)) {
|
||||
dev_err(pvt_data->dev, "tee_shm_alloc failed\n");
|
||||
dev_err(pvt_data->dev, "tee_shm_alloc_kernel_buf failed\n");
|
||||
return PTR_ERR(entropy_shm_pool);
|
||||
}
|
||||
|
||||
|
@ -8,13 +8,17 @@
|
||||
#include <linux/psp-sev.h>
|
||||
#include "amdtee_private.h"
|
||||
|
||||
static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
|
||||
size_t size)
|
||||
static int pool_op_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
|
||||
size_t size, size_t align)
|
||||
{
|
||||
unsigned int order = get_order(size);
|
||||
unsigned long va;
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* Ignore alignment since this is already going to be page aligned
|
||||
* and there's no need for any larger alignment.
|
||||
*/
|
||||
va = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!va)
|
||||
return -ENOMEM;
|
||||
@ -34,7 +38,7 @@ static int pool_op_alloc(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
|
||||
static void pool_op_free(struct tee_shm_pool *pool, struct tee_shm *shm)
|
||||
{
|
||||
/* Unmap the shared memory from TEE */
|
||||
amdtee_unmap_shmem(shm);
|
||||
@ -42,52 +46,25 @@ static void pool_op_free(struct tee_shm_pool_mgr *poolm, struct tee_shm *shm)
|
||||
shm->kaddr = NULL;
|
||||
}
|
||||
|
||||
static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
|
||||
static void pool_op_destroy_pool(struct tee_shm_pool *pool)
|
||||
{
|
||||
kfree(poolm);
|
||||
kfree(pool);
|
||||
}
|
||||
|
||||
static const struct tee_shm_pool_mgr_ops pool_ops = {
|
||||
static const struct tee_shm_pool_ops pool_ops = {
|
||||
.alloc = pool_op_alloc,
|
||||
.free = pool_op_free,
|
||||
.destroy_poolmgr = pool_op_destroy_poolmgr,
|
||||
.destroy_pool = pool_op_destroy_pool,
|
||||
};
|
||||
|
||||
static struct tee_shm_pool_mgr *pool_mem_mgr_alloc(void)
|
||||
{
|
||||
struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||
|
||||
if (!mgr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mgr->ops = &pool_ops;
|
||||
|
||||
return mgr;
|
||||
}
|
||||
|
||||
struct tee_shm_pool *amdtee_config_shm(void)
|
||||
{
|
||||
struct tee_shm_pool_mgr *priv_mgr;
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr;
|
||||
void *rc;
|
||||
struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
||||
|
||||
rc = pool_mem_mgr_alloc();
|
||||
if (IS_ERR(rc))
|
||||
return rc;
|
||||
priv_mgr = rc;
|
||||
if (!pool)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rc = pool_mem_mgr_alloc();
|
||||
if (IS_ERR(rc)) {
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
return rc;
|
||||
}
|
||||
dmabuf_mgr = rc;
|
||||
pool->ops = &pool_ops;
|
||||
|
||||
rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
|
||||
if (IS_ERR(rc)) {
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
tee_shm_pool_mgr_destroy(dmabuf_mgr);
|
||||
}
|
||||
|
||||
return rc;
|
||||
return pool;
|
||||
}
|
||||
|
@ -7,11 +7,3 @@ config OPTEE
|
||||
help
|
||||
This implements the OP-TEE Trusted Execution Environment (TEE)
|
||||
driver.
|
||||
|
||||
config OPTEE_SHM_NUM_PRIV_PAGES
|
||||
int "Private Shared Memory Pages"
|
||||
default 1
|
||||
depends on OPTEE
|
||||
help
|
||||
This sets the number of private shared memory pages to be
|
||||
used by OP-TEE TEE driver.
|
||||
|
@ -120,7 +120,7 @@ struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
|
||||
if (optee->rpc_arg_count)
|
||||
sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count);
|
||||
|
||||
shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
|
||||
shm = tee_shm_alloc_priv_buf(ctx, sz);
|
||||
if (IS_ERR(shm))
|
||||
return shm;
|
||||
|
||||
|
@ -18,8 +18,8 @@
|
||||
#include <linux/workqueue.h>
|
||||
#include "optee_private.h"
|
||||
|
||||
int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm, size_t size,
|
||||
int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
|
||||
size_t size, size_t align,
|
||||
int (*shm_register)(struct tee_context *ctx,
|
||||
struct tee_shm *shm,
|
||||
struct page **pages,
|
||||
@ -30,6 +30,10 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
|
||||
struct page *page;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* Ignore alignment since this is already going to be page aligned
|
||||
* and there's no need for any larger alignment.
|
||||
*/
|
||||
page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
@ -51,7 +55,6 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
pages[i] = page + i;
|
||||
|
||||
shm->flags |= TEE_SHM_REGISTER;
|
||||
rc = shm_register(shm->ctx, shm, pages, nr_pages,
|
||||
(unsigned long)shm->kaddr);
|
||||
kfree(pages);
|
||||
@ -62,10 +65,20 @@ int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
|
||||
return 0;
|
||||
|
||||
err:
|
||||
__free_pages(page, order);
|
||||
free_pages((unsigned long)shm->kaddr, order);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
|
||||
int (*shm_unregister)(struct tee_context *ctx,
|
||||
struct tee_shm *shm))
|
||||
{
|
||||
if (shm_unregister)
|
||||
shm_unregister(shm->ctx, shm);
|
||||
free_pages((unsigned long)shm->kaddr, get_order(shm->size));
|
||||
shm->kaddr = NULL;
|
||||
}
|
||||
|
||||
static void optee_bus_scan(struct work_struct *work)
|
||||
{
|
||||
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
|
||||
@ -158,6 +171,7 @@ void optee_remove_common(struct optee *optee)
|
||||
optee_unregister_devices();
|
||||
|
||||
optee_notif_uninit(optee);
|
||||
teedev_close_context(optee->ctx);
|
||||
/*
|
||||
* The two devices have to be unregistered before we can free the
|
||||
* other resources.
|
||||
|
@ -121,10 +121,9 @@ static int __optee_enumerate_devices(u32 func)
|
||||
if (rc < 0 || !shm_size)
|
||||
goto out_sess;
|
||||
|
||||
device_shm = tee_shm_alloc(ctx, shm_size,
|
||||
TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
|
||||
device_shm = tee_shm_alloc_kernel_buf(ctx, shm_size);
|
||||
if (IS_ERR(device_shm)) {
|
||||
pr_err("tee_shm_alloc failed\n");
|
||||
pr_err("tee_shm_alloc_kernel_buf failed\n");
|
||||
rc = PTR_ERR(device_shm);
|
||||
goto out_sess;
|
||||
}
|
||||
|
@ -369,30 +369,28 @@ static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
|
||||
* The main function is optee_ffa_shm_pool_alloc_pages().
|
||||
*/
|
||||
|
||||
static int pool_ffa_op_alloc(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm, size_t size)
|
||||
static int pool_ffa_op_alloc(struct tee_shm_pool *pool,
|
||||
struct tee_shm *shm, size_t size, size_t align)
|
||||
{
|
||||
return optee_pool_op_alloc_helper(poolm, shm, size,
|
||||
return optee_pool_op_alloc_helper(pool, shm, size, align,
|
||||
optee_ffa_shm_register);
|
||||
}
|
||||
|
||||
static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm,
|
||||
static void pool_ffa_op_free(struct tee_shm_pool *pool,
|
||||
struct tee_shm *shm)
|
||||
{
|
||||
optee_ffa_shm_unregister(shm->ctx, shm);
|
||||
free_pages((unsigned long)shm->kaddr, get_order(shm->size));
|
||||
shm->kaddr = NULL;
|
||||
optee_pool_op_free_helper(pool, shm, optee_ffa_shm_unregister);
|
||||
}
|
||||
|
||||
static void pool_ffa_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
|
||||
static void pool_ffa_op_destroy_pool(struct tee_shm_pool *pool)
|
||||
{
|
||||
kfree(poolm);
|
||||
kfree(pool);
|
||||
}
|
||||
|
||||
static const struct tee_shm_pool_mgr_ops pool_ffa_ops = {
|
||||
static const struct tee_shm_pool_ops pool_ffa_ops = {
|
||||
.alloc = pool_ffa_op_alloc,
|
||||
.free = pool_ffa_op_free,
|
||||
.destroy_poolmgr = pool_ffa_op_destroy_poolmgr,
|
||||
.destroy_pool = pool_ffa_op_destroy_pool,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -401,16 +399,16 @@ static const struct tee_shm_pool_mgr_ops pool_ffa_ops = {
|
||||
* This pool is used with OP-TEE over FF-A. In this case command buffers
|
||||
* and such are allocated from kernel's own memory.
|
||||
*/
|
||||
static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
|
||||
static struct tee_shm_pool *optee_ffa_shm_pool_alloc_pages(void)
|
||||
{
|
||||
struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||
struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
||||
|
||||
if (!mgr)
|
||||
if (!pool)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mgr->ops = &pool_ffa_ops;
|
||||
pool->ops = &pool_ffa_ops;
|
||||
|
||||
return mgr;
|
||||
return pool;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -424,6 +422,7 @@ static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
|
||||
*/
|
||||
|
||||
static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
|
||||
struct optee *optee,
|
||||
struct optee_msg_arg *arg)
|
||||
{
|
||||
struct tee_shm *shm;
|
||||
@ -439,8 +438,8 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
|
||||
shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
|
||||
break;
|
||||
case OPTEE_RPC_SHM_TYPE_KERNEL:
|
||||
shm = tee_shm_alloc(ctx, arg->params[0].u.value.b,
|
||||
TEE_SHM_MAPPED | TEE_SHM_PRIV);
|
||||
shm = tee_shm_alloc_priv_buf(optee->ctx,
|
||||
arg->params[0].u.value.b);
|
||||
break;
|
||||
default:
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
@ -493,14 +492,13 @@ err_bad_param:
|
||||
}
|
||||
|
||||
static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
|
||||
struct optee *optee,
|
||||
struct optee_msg_arg *arg)
|
||||
{
|
||||
struct optee *optee = tee_get_drvdata(ctx->teedev);
|
||||
|
||||
arg->ret_origin = TEEC_ORIGIN_COMMS;
|
||||
switch (arg->cmd) {
|
||||
case OPTEE_RPC_CMD_SHM_ALLOC:
|
||||
handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg);
|
||||
handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
|
||||
break;
|
||||
case OPTEE_RPC_CMD_SHM_FREE:
|
||||
handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
|
||||
@ -510,12 +508,12 @@ static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd,
|
||||
struct optee_msg_arg *arg)
|
||||
static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
|
||||
u32 cmd, struct optee_msg_arg *arg)
|
||||
{
|
||||
switch (cmd) {
|
||||
case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
|
||||
handle_ffa_rpc_func_cmd(ctx, arg);
|
||||
handle_ffa_rpc_func_cmd(ctx, optee, arg);
|
||||
break;
|
||||
case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
|
||||
/* Interrupt delivered by now */
|
||||
@ -582,7 +580,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
|
||||
* above.
|
||||
*/
|
||||
cond_resched();
|
||||
optee_handle_ffa_rpc(ctx, data->data1, rpc_arg);
|
||||
optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
|
||||
cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
|
||||
data->data0 = cmd;
|
||||
data->data1 = 0;
|
||||
@ -691,33 +689,6 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct tee_shm_pool *optee_ffa_config_dyn_shm(void)
|
||||
{
|
||||
struct tee_shm_pool_mgr *priv_mgr;
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr;
|
||||
void *rc;
|
||||
|
||||
rc = optee_ffa_shm_pool_alloc_pages();
|
||||
if (IS_ERR(rc))
|
||||
return rc;
|
||||
priv_mgr = rc;
|
||||
|
||||
rc = optee_ffa_shm_pool_alloc_pages();
|
||||
if (IS_ERR(rc)) {
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
return rc;
|
||||
}
|
||||
dmabuf_mgr = rc;
|
||||
|
||||
rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
|
||||
if (IS_ERR(rc)) {
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
tee_shm_pool_mgr_destroy(dmabuf_mgr);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void optee_ffa_get_version(struct tee_device *teedev,
|
||||
struct tee_ioctl_version_data *vers)
|
||||
{
|
||||
@ -793,7 +764,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
|
||||
{
|
||||
const struct ffa_dev_ops *ffa_ops;
|
||||
unsigned int rpc_arg_count;
|
||||
struct tee_shm_pool *pool;
|
||||
struct tee_device *teedev;
|
||||
struct tee_context *ctx;
|
||||
struct optee *optee;
|
||||
int rc;
|
||||
|
||||
@ -813,12 +786,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
|
||||
if (!optee)
|
||||
return -ENOMEM;
|
||||
|
||||
optee->pool = optee_ffa_config_dyn_shm();
|
||||
if (IS_ERR(optee->pool)) {
|
||||
rc = PTR_ERR(optee->pool);
|
||||
optee->pool = NULL;
|
||||
goto err;
|
||||
pool = optee_ffa_shm_pool_alloc_pages();
|
||||
if (IS_ERR(pool)) {
|
||||
rc = PTR_ERR(pool);
|
||||
goto err_free_optee;
|
||||
}
|
||||
optee->pool = pool;
|
||||
|
||||
optee->ops = &optee_ffa_ops;
|
||||
optee->ffa.ffa_dev = ffa_dev;
|
||||
@ -829,7 +802,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
|
||||
optee);
|
||||
if (IS_ERR(teedev)) {
|
||||
rc = PTR_ERR(teedev);
|
||||
goto err;
|
||||
goto err_free_pool;
|
||||
}
|
||||
optee->teedev = teedev;
|
||||
|
||||
@ -837,50 +810,57 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
|
||||
optee);
|
||||
if (IS_ERR(teedev)) {
|
||||
rc = PTR_ERR(teedev);
|
||||
goto err;
|
||||
goto err_unreg_teedev;
|
||||
}
|
||||
optee->supp_teedev = teedev;
|
||||
|
||||
rc = tee_device_register(optee->teedev);
|
||||
if (rc)
|
||||
goto err;
|
||||
goto err_unreg_supp_teedev;
|
||||
|
||||
rc = tee_device_register(optee->supp_teedev);
|
||||
if (rc)
|
||||
goto err;
|
||||
goto err_unreg_supp_teedev;
|
||||
|
||||
rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
|
||||
if (rc)
|
||||
goto err;
|
||||
goto err_unreg_supp_teedev;
|
||||
mutex_init(&optee->ffa.mutex);
|
||||
mutex_init(&optee->call_queue.mutex);
|
||||
INIT_LIST_HEAD(&optee->call_queue.waiters);
|
||||
optee_supp_init(&optee->supp);
|
||||
ffa_dev_set_drvdata(ffa_dev, optee);
|
||||
ctx = teedev_open(optee->teedev);
|
||||
if (IS_ERR(ctx))
|
||||
goto err_rhashtable_free;
|
||||
optee->ctx = ctx;
|
||||
rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
|
||||
if (rc) {
|
||||
optee_ffa_remove(ffa_dev);
|
||||
return rc;
|
||||
}
|
||||
if (rc)
|
||||
goto err_close_ctx;
|
||||
|
||||
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
|
||||
if (rc) {
|
||||
optee_ffa_remove(ffa_dev);
|
||||
return rc;
|
||||
}
|
||||
if (rc)
|
||||
goto err_unregister_devices;
|
||||
|
||||
pr_info("initialized driver\n");
|
||||
return 0;
|
||||
err:
|
||||
/*
|
||||
* tee_device_unregister() is safe to call even if the
|
||||
* devices hasn't been registered with
|
||||
* tee_device_register() yet.
|
||||
*/
|
||||
|
||||
err_unregister_devices:
|
||||
optee_unregister_devices();
|
||||
optee_notif_uninit(optee);
|
||||
err_close_ctx:
|
||||
teedev_close_context(ctx);
|
||||
err_rhashtable_free:
|
||||
rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
|
||||
optee_supp_uninit(&optee->supp);
|
||||
mutex_destroy(&optee->call_queue.mutex);
|
||||
err_unreg_supp_teedev:
|
||||
tee_device_unregister(optee->supp_teedev);
|
||||
err_unreg_teedev:
|
||||
tee_device_unregister(optee->teedev);
|
||||
if (optee->pool)
|
||||
tee_shm_pool_free(optee->pool);
|
||||
err_free_pool:
|
||||
tee_shm_pool_free(pool);
|
||||
err_free_optee:
|
||||
kfree(optee);
|
||||
return rc;
|
||||
}
|
||||
|
@ -53,7 +53,6 @@ struct optee_call_queue {
|
||||
|
||||
struct optee_notif {
|
||||
u_int max_key;
|
||||
struct tee_context *ctx;
|
||||
/* Serializes access to the elements below in this struct */
|
||||
spinlock_t lock;
|
||||
struct list_head db;
|
||||
@ -134,9 +133,10 @@ struct optee_ops {
|
||||
/**
|
||||
* struct optee - main service struct
|
||||
* @supp_teedev: supplicant device
|
||||
* @teedev: client device
|
||||
* @ops: internal callbacks for different ways to reach secure
|
||||
* world
|
||||
* @teedev: client device
|
||||
* @ctx: driver internal TEE context
|
||||
* @smc: specific to SMC ABI
|
||||
* @ffa: specific to FF-A ABI
|
||||
* @call_queue: queue of threads waiting to call @invoke_fn
|
||||
@ -152,6 +152,7 @@ struct optee {
|
||||
struct tee_device *supp_teedev;
|
||||
struct tee_device *teedev;
|
||||
const struct optee_ops *ops;
|
||||
struct tee_context *ctx;
|
||||
union {
|
||||
struct optee_smc smc;
|
||||
struct optee_ffa ffa;
|
||||
@ -228,13 +229,16 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
|
||||
int optee_enumerate_devices(u32 func);
|
||||
void optee_unregister_devices(void);
|
||||
|
||||
int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm, size_t size,
|
||||
int optee_pool_op_alloc_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
|
||||
size_t size, size_t align,
|
||||
int (*shm_register)(struct tee_context *ctx,
|
||||
struct tee_shm *shm,
|
||||
struct page **pages,
|
||||
size_t num_pages,
|
||||
unsigned long start));
|
||||
void optee_pool_op_free_helper(struct tee_shm_pool *pool, struct tee_shm *shm,
|
||||
int (*shm_unregister)(struct tee_context *ctx,
|
||||
struct tee_shm *shm));
|
||||
|
||||
|
||||
void optee_remove_common(struct optee *optee);
|
||||
|
@ -42,7 +42,15 @@
|
||||
* 6. Driver initialization.
|
||||
*/
|
||||
|
||||
#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
|
||||
/*
|
||||
* A typical OP-TEE private shm allocation is 224 bytes (argument struct
|
||||
* with 6 parameters, needed for open session). So with an alignment of 512
|
||||
* we'll waste a bit more than 50%. However, it's only expected that we'll
|
||||
* have a handful of these structs allocated at a time. Most memory will
|
||||
* be allocated aligned to the page size, So all in all this should scale
|
||||
* up and down quite well.
|
||||
*/
|
||||
#define OPTEE_MIN_STATIC_POOL_ALIGN 9 /* 512 bytes aligned */
|
||||
|
||||
/*
|
||||
* 1. Convert between struct tee_param and struct optee_msg_param
|
||||
@ -230,7 +238,7 @@ static int optee_to_msg_param(struct optee *optee,
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
|
||||
case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
|
||||
if (tee_shm_is_registered(p->u.memref.shm))
|
||||
if (tee_shm_is_dynamic(p->u.memref.shm))
|
||||
rc = to_msg_param_reg_mem(mp, p);
|
||||
else
|
||||
rc = to_msg_param_tmp_mem(mp, p);
|
||||
@ -532,38 +540,38 @@ static int optee_shm_unregister_supp(struct tee_context *ctx,
|
||||
* The main function is optee_shm_pool_alloc_pages().
|
||||
*/
|
||||
|
||||
static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm, size_t size)
|
||||
static int pool_op_alloc(struct tee_shm_pool *pool,
|
||||
struct tee_shm *shm, size_t size, size_t align)
|
||||
{
|
||||
/*
|
||||
* Shared memory private to the OP-TEE driver doesn't need
|
||||
* to be registered with OP-TEE.
|
||||
*/
|
||||
if (shm->flags & TEE_SHM_PRIV)
|
||||
return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
|
||||
return optee_pool_op_alloc_helper(pool, shm, size, align, NULL);
|
||||
|
||||
return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
|
||||
return optee_pool_op_alloc_helper(pool, shm, size, align,
|
||||
optee_shm_register);
|
||||
}
|
||||
|
||||
static void pool_op_free(struct tee_shm_pool_mgr *poolm,
|
||||
static void pool_op_free(struct tee_shm_pool *pool,
|
||||
struct tee_shm *shm)
|
||||
{
|
||||
if (!(shm->flags & TEE_SHM_PRIV))
|
||||
optee_shm_unregister(shm->ctx, shm);
|
||||
|
||||
free_pages((unsigned long)shm->kaddr, get_order(shm->size));
|
||||
shm->kaddr = NULL;
|
||||
optee_pool_op_free_helper(pool, shm, optee_shm_unregister);
|
||||
else
|
||||
optee_pool_op_free_helper(pool, shm, NULL);
|
||||
}
|
||||
|
||||
static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
|
||||
static void pool_op_destroy_pool(struct tee_shm_pool *pool)
|
||||
{
|
||||
kfree(poolm);
|
||||
kfree(pool);
|
||||
}
|
||||
|
||||
static const struct tee_shm_pool_mgr_ops pool_ops = {
|
||||
static const struct tee_shm_pool_ops pool_ops = {
|
||||
.alloc = pool_op_alloc,
|
||||
.free = pool_op_free,
|
||||
.destroy_poolmgr = pool_op_destroy_poolmgr,
|
||||
.destroy_pool = pool_op_destroy_pool,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -572,16 +580,16 @@ static const struct tee_shm_pool_mgr_ops pool_ops = {
|
||||
* This pool is used when OP-TEE supports dymanic SHM. In this case
|
||||
* command buffers and such are allocated from kernel's own memory.
|
||||
*/
|
||||
static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
|
||||
static struct tee_shm_pool *optee_shm_pool_alloc_pages(void)
|
||||
{
|
||||
struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||
struct tee_shm_pool *pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
||||
|
||||
if (!mgr)
|
||||
if (!pool)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mgr->ops = &pool_ops;
|
||||
pool->ops = &pool_ops;
|
||||
|
||||
return mgr;
|
||||
return pool;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -622,6 +630,7 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
|
||||
}
|
||||
|
||||
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
|
||||
struct optee *optee,
|
||||
struct optee_msg_arg *arg,
|
||||
struct optee_call_ctx *call_ctx)
|
||||
{
|
||||
@ -651,7 +660,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
|
||||
shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
|
||||
break;
|
||||
case OPTEE_RPC_SHM_TYPE_KERNEL:
|
||||
shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
|
||||
shm = tee_shm_alloc_priv_buf(optee->ctx, sz);
|
||||
break;
|
||||
default:
|
||||
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
|
||||
@ -670,7 +679,7 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
|
||||
|
||||
sz = tee_shm_get_size(shm);
|
||||
|
||||
if (tee_shm_is_registered(shm)) {
|
||||
if (tee_shm_is_dynamic(shm)) {
|
||||
struct page **pages;
|
||||
u64 *pages_list;
|
||||
size_t page_num;
|
||||
@ -747,7 +756,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
|
||||
switch (arg->cmd) {
|
||||
case OPTEE_RPC_CMD_SHM_ALLOC:
|
||||
free_pages_list(call_ctx);
|
||||
handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
|
||||
handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
|
||||
break;
|
||||
case OPTEE_RPC_CMD_SHM_FREE:
|
||||
handle_rpc_func_cmd_shm_free(ctx, arg);
|
||||
@ -776,8 +785,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
|
||||
|
||||
switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
|
||||
case OPTEE_SMC_RPC_FUNC_ALLOC:
|
||||
shm = tee_shm_alloc(ctx, param->a1,
|
||||
TEE_SHM_MAPPED | TEE_SHM_PRIV);
|
||||
shm = tee_shm_alloc_priv_buf(optee->ctx, param->a1);
|
||||
if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
|
||||
reg_pair_from_64(¶m->a1, ¶m->a2, pa);
|
||||
reg_pair_from_64(¶m->a4, ¶m->a5,
|
||||
@ -954,57 +962,34 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
|
||||
{
|
||||
struct optee *optee = dev_id;
|
||||
|
||||
optee_smc_do_bottom_half(optee->notif.ctx);
|
||||
optee_smc_do_bottom_half(optee->ctx);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
|
||||
{
|
||||
struct tee_context *ctx;
|
||||
int rc;
|
||||
|
||||
ctx = teedev_open(optee->teedev);
|
||||
if (IS_ERR(ctx))
|
||||
return PTR_ERR(ctx);
|
||||
|
||||
optee->notif.ctx = ctx;
|
||||
rc = request_threaded_irq(irq, notif_irq_handler,
|
||||
notif_irq_thread_fn,
|
||||
0, "optee_notification", optee);
|
||||
if (rc)
|
||||
goto err_close_ctx;
|
||||
return rc;
|
||||
|
||||
optee->smc.notif_irq = irq;
|
||||
|
||||
return 0;
|
||||
|
||||
err_close_ctx:
|
||||
teedev_close_context(optee->notif.ctx);
|
||||
optee->notif.ctx = NULL;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void optee_smc_notif_uninit_irq(struct optee *optee)
|
||||
{
|
||||
if (optee->notif.ctx) {
|
||||
optee_smc_stop_async_notif(optee->notif.ctx);
|
||||
if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
|
||||
optee_smc_stop_async_notif(optee->ctx);
|
||||
if (optee->smc.notif_irq) {
|
||||
free_irq(optee->smc.notif_irq, optee);
|
||||
irq_dispose_mapping(optee->smc.notif_irq);
|
||||
}
|
||||
|
||||
/*
|
||||
* The thread normally working with optee->notif.ctx was
|
||||
* stopped with free_irq() above.
|
||||
*
|
||||
* Note we're not using teedev_close_context() or
|
||||
* tee_client_close_context() since we have already called
|
||||
* tee_device_put() while initializing to avoid a circular
|
||||
* reference counting.
|
||||
*/
|
||||
teedev_close_context(optee->notif.ctx);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1174,33 +1159,6 @@ static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
|
||||
return true;
|
||||
}
|
||||
|
||||
static struct tee_shm_pool *optee_config_dyn_shm(void)
|
||||
{
|
||||
struct tee_shm_pool_mgr *priv_mgr;
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr;
|
||||
void *rc;
|
||||
|
||||
rc = optee_shm_pool_alloc_pages();
|
||||
if (IS_ERR(rc))
|
||||
return rc;
|
||||
priv_mgr = rc;
|
||||
|
||||
rc = optee_shm_pool_alloc_pages();
|
||||
if (IS_ERR(rc)) {
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
return rc;
|
||||
}
|
||||
dmabuf_mgr = rc;
|
||||
|
||||
rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
|
||||
if (IS_ERR(rc)) {
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
tee_shm_pool_mgr_destroy(dmabuf_mgr);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct tee_shm_pool *
|
||||
optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
|
||||
{
|
||||
@ -1214,10 +1172,7 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
|
||||
phys_addr_t begin;
|
||||
phys_addr_t end;
|
||||
void *va;
|
||||
struct tee_shm_pool_mgr *priv_mgr;
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr;
|
||||
void *rc;
|
||||
const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
|
||||
|
||||
invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
|
||||
if (res.result.status != OPTEE_SMC_RETURN_OK) {
|
||||
@ -1235,11 +1190,6 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
|
||||
paddr = begin;
|
||||
size = end - begin;
|
||||
|
||||
if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
|
||||
pr_err("too small shared memory area\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
va = memremap(paddr, size, MEMREMAP_WB);
|
||||
if (!va) {
|
||||
pr_err("shared memory ioremap failed\n");
|
||||
@ -1247,35 +1197,13 @@ optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
|
||||
}
|
||||
vaddr = (unsigned long)va;
|
||||
|
||||
rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
|
||||
3 /* 8 bytes aligned */);
|
||||
rc = tee_shm_pool_alloc_res_mem(vaddr, paddr, size,
|
||||
OPTEE_MIN_STATIC_POOL_ALIGN);
|
||||
if (IS_ERR(rc))
|
||||
goto err_memunmap;
|
||||
priv_mgr = rc;
|
||||
memunmap(va);
|
||||
else
|
||||
*memremaped_shm = va;
|
||||
|
||||
vaddr += sz;
|
||||
paddr += sz;
|
||||
size -= sz;
|
||||
|
||||
rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
|
||||
if (IS_ERR(rc))
|
||||
goto err_free_priv_mgr;
|
||||
dmabuf_mgr = rc;
|
||||
|
||||
rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
|
||||
if (IS_ERR(rc))
|
||||
goto err_free_dmabuf_mgr;
|
||||
|
||||
*memremaped_shm = va;
|
||||
|
||||
return rc;
|
||||
|
||||
err_free_dmabuf_mgr:
|
||||
tee_shm_pool_mgr_destroy(dmabuf_mgr);
|
||||
err_free_priv_mgr:
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
err_memunmap:
|
||||
memunmap(va);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -1366,6 +1294,7 @@ static int optee_probe(struct platform_device *pdev)
|
||||
struct optee *optee = NULL;
|
||||
void *memremaped_shm = NULL;
|
||||
struct tee_device *teedev;
|
||||
struct tee_context *ctx;
|
||||
u32 max_notif_value;
|
||||
u32 sec_caps;
|
||||
int rc;
|
||||
@ -1396,7 +1325,7 @@ static int optee_probe(struct platform_device *pdev)
|
||||
* Try to use dynamic shared memory if possible
|
||||
*/
|
||||
if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
|
||||
pool = optee_config_dyn_shm();
|
||||
pool = optee_shm_pool_alloc_pages();
|
||||
|
||||
/*
|
||||
* If dynamic shared memory is not available or failed - try static one
|
||||
@ -1446,9 +1375,13 @@ static int optee_probe(struct platform_device *pdev)
|
||||
optee->pool = pool;
|
||||
|
||||
platform_set_drvdata(pdev, optee);
|
||||
ctx = teedev_open(optee->teedev);
|
||||
if (IS_ERR(ctx))
|
||||
goto err_supp_uninit;
|
||||
optee->ctx = ctx;
|
||||
rc = optee_notif_init(optee, max_notif_value);
|
||||
if (rc)
|
||||
goto err_supp_uninit;
|
||||
goto err_close_ctx;
|
||||
|
||||
if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
|
||||
unsigned int irq;
|
||||
@ -1496,6 +1429,8 @@ err_disable_shm_cache:
|
||||
optee_unregister_devices();
|
||||
err_notif_uninit:
|
||||
optee_notif_uninit(optee);
|
||||
err_close_ctx:
|
||||
teedev_close_context(ctx);
|
||||
err_supp_uninit:
|
||||
optee_supp_uninit(&optee->supp);
|
||||
mutex_destroy(&optee->call_queue.mutex);
|
||||
|
@ -297,7 +297,7 @@ static int tee_ioctl_shm_alloc(struct tee_context *ctx,
|
||||
if (data.flags)
|
||||
return -EINVAL;
|
||||
|
||||
shm = tee_shm_alloc(ctx, data.size, TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
|
||||
shm = tee_shm_alloc_user_buf(ctx, data.size);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
@ -334,8 +334,7 @@ tee_ioctl_shm_register(struct tee_context *ctx,
|
||||
if (data.flags)
|
||||
return -EINVAL;
|
||||
|
||||
shm = tee_shm_register(ctx, data.addr, data.length,
|
||||
TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED);
|
||||
shm = tee_shm_register_user_buf(ctx, data.addr, data.length);
|
||||
if (IS_ERR(shm))
|
||||
return PTR_ERR(shm);
|
||||
|
||||
|
@ -12,17 +12,6 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool - shared memory pool
|
||||
* @private_mgr: pool manager for shared memory only between kernel
|
||||
* and secure world
|
||||
* @dma_buf_mgr: pool manager for shared memory exported to user space
|
||||
*/
|
||||
struct tee_shm_pool {
|
||||
struct tee_shm_pool_mgr *private_mgr;
|
||||
struct tee_shm_pool_mgr *dma_buf_mgr;
|
||||
};
|
||||
|
||||
#define TEE_DEVICE_FLAG_REGISTERED 0x1
|
||||
#define TEE_MAX_DEV_NAME_LEN 32
|
||||
|
||||
@ -68,4 +57,8 @@ void tee_device_put(struct tee_device *teedev);
|
||||
void teedev_ctx_get(struct tee_context *ctx);
|
||||
void teedev_ctx_put(struct tee_context *ctx);
|
||||
|
||||
struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size);
|
||||
struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
|
||||
unsigned long addr, size_t length);
|
||||
|
||||
#endif /*TEE_PRIVATE_H*/
|
||||
|
@ -12,17 +12,43 @@
|
||||
#include <linux/uio.h>
|
||||
#include "tee_private.h"
|
||||
|
||||
static void shm_put_kernel_pages(struct page **pages, size_t page_count)
|
||||
{
|
||||
size_t n;
|
||||
|
||||
for (n = 0; n < page_count; n++)
|
||||
put_page(pages[n]);
|
||||
}
|
||||
|
||||
static int shm_get_kernel_pages(unsigned long start, size_t page_count,
|
||||
struct page **pages)
|
||||
{
|
||||
struct kvec *kiov;
|
||||
size_t n;
|
||||
int rc;
|
||||
|
||||
kiov = kcalloc(page_count, sizeof(*kiov), GFP_KERNEL);
|
||||
if (!kiov)
|
||||
return -ENOMEM;
|
||||
|
||||
for (n = 0; n < page_count; n++) {
|
||||
kiov[n].iov_base = (void *)(start + n * PAGE_SIZE);
|
||||
kiov[n].iov_len = PAGE_SIZE;
|
||||
}
|
||||
|
||||
rc = get_kernel_pages(kiov, page_count, 0, pages);
|
||||
kfree(kiov);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void release_registered_pages(struct tee_shm *shm)
|
||||
{
|
||||
if (shm->pages) {
|
||||
if (shm->flags & TEE_SHM_USER_MAPPED) {
|
||||
if (shm->flags & TEE_SHM_USER_MAPPED)
|
||||
unpin_user_pages(shm->pages, shm->num_pages);
|
||||
} else {
|
||||
size_t n;
|
||||
|
||||
for (n = 0; n < shm->num_pages; n++)
|
||||
put_page(shm->pages[n]);
|
||||
}
|
||||
else
|
||||
shm_put_kernel_pages(shm->pages, shm->num_pages);
|
||||
|
||||
kfree(shm->pages);
|
||||
}
|
||||
@ -31,15 +57,8 @@ static void release_registered_pages(struct tee_shm *shm)
|
||||
static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
|
||||
{
|
||||
if (shm->flags & TEE_SHM_POOL) {
|
||||
struct tee_shm_pool_mgr *poolm;
|
||||
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
poolm = teedev->pool->dma_buf_mgr;
|
||||
else
|
||||
poolm = teedev->pool->private_mgr;
|
||||
|
||||
poolm->ops->free(poolm, shm);
|
||||
} else if (shm->flags & TEE_SHM_REGISTER) {
|
||||
teedev->pool->ops->free(teedev->pool, shm);
|
||||
} else if (shm->flags & TEE_SHM_DYNAMIC) {
|
||||
int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
|
||||
|
||||
if (rc)
|
||||
@ -56,25 +75,14 @@ static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
|
||||
tee_device_put(teedev);
|
||||
}
|
||||
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
static struct tee_shm *shm_alloc_helper(struct tee_context *ctx, size_t size,
|
||||
size_t align, u32 flags, int id)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm_pool_mgr *poolm = NULL;
|
||||
struct tee_shm *shm;
|
||||
void *ret;
|
||||
int rc;
|
||||
|
||||
if (!(flags & TEE_SHM_MAPPED)) {
|
||||
dev_err(teedev->dev.parent,
|
||||
"only mapped allocations supported\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF | TEE_SHM_PRIV))) {
|
||||
dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!tee_device_get(teedev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
@ -91,41 +99,76 @@ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
}
|
||||
|
||||
refcount_set(&shm->refcount, 1);
|
||||
shm->flags = flags | TEE_SHM_POOL;
|
||||
shm->ctx = ctx;
|
||||
if (flags & TEE_SHM_DMA_BUF)
|
||||
poolm = teedev->pool->dma_buf_mgr;
|
||||
else
|
||||
poolm = teedev->pool->private_mgr;
|
||||
shm->flags = flags;
|
||||
shm->id = id;
|
||||
|
||||
rc = poolm->ops->alloc(poolm, shm, size);
|
||||
/*
|
||||
* We're assigning this as it is needed if the shm is to be
|
||||
* registered. If this function returns OK then the caller expected
|
||||
* to call teedev_ctx_get() or clear shm->ctx in case it's not
|
||||
* needed any longer.
|
||||
*/
|
||||
shm->ctx = ctx;
|
||||
|
||||
rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
|
||||
if (rc) {
|
||||
ret = ERR_PTR(rc);
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (shm->id < 0) {
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err_pool_free;
|
||||
}
|
||||
}
|
||||
|
||||
teedev_ctx_get(ctx);
|
||||
|
||||
return shm;
|
||||
err_pool_free:
|
||||
poolm->ops->free(poolm, shm);
|
||||
err_kfree:
|
||||
kfree(shm);
|
||||
err_dev_put:
|
||||
tee_device_put(teedev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc);
|
||||
|
||||
/**
|
||||
* tee_shm_alloc_user_buf() - Allocate shared memory for user space
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
*
|
||||
* Memory allocated as user space shared memory is automatically freed when
|
||||
* the TEE file pointer is closed. The primary usage of this function is
|
||||
* when the TEE driver doesn't support registering ordinary user space
|
||||
* memory.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc_user_buf(struct tee_context *ctx, size_t size)
|
||||
{
|
||||
u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm *shm;
|
||||
void *ret;
|
||||
int id;
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (id < 0)
|
||||
return ERR_PTR(id);
|
||||
|
||||
shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
|
||||
if (IS_ERR(shm)) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
return shm;
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
ret = idr_replace(&teedev->idr, shm, id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (IS_ERR(ret)) {
|
||||
tee_shm_free(shm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return shm;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
|
||||
@ -141,32 +184,54 @@ EXPORT_SYMBOL_GPL(tee_shm_alloc);
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size)
|
||||
{
|
||||
return tee_shm_alloc(ctx, size, TEE_SHM_MAPPED);
|
||||
u32 flags = TEE_SHM_DYNAMIC | TEE_SHM_POOL;
|
||||
|
||||
return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc_kernel_buf);
|
||||
|
||||
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags)
|
||||
/**
|
||||
* tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
|
||||
* kernel buffer
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
*
|
||||
* This function returns similar shared memory as
|
||||
* tee_shm_alloc_kernel_buf(), but with the difference that the memory
|
||||
* might not be registered in secure world in case the driver supports
|
||||
* passing memory not registered in advance.
|
||||
*
|
||||
* This function should normally only be used internally in the TEE
|
||||
* drivers.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size)
|
||||
{
|
||||
u32 flags = TEE_SHM_PRIV | TEE_SHM_POOL;
|
||||
|
||||
return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc_priv_buf);
|
||||
|
||||
static struct tee_shm *
|
||||
register_shm_helper(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags, int id)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
const u32 req_user_flags = TEE_SHM_DMA_BUF | TEE_SHM_USER_MAPPED;
|
||||
const u32 req_kernel_flags = TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED;
|
||||
struct tee_shm *shm;
|
||||
unsigned long start;
|
||||
size_t num_pages;
|
||||
void *ret;
|
||||
int rc;
|
||||
int num_pages;
|
||||
unsigned long start;
|
||||
|
||||
if (flags != req_user_flags && flags != req_kernel_flags)
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
|
||||
if (!tee_device_get(teedev))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!teedev->desc->ops->shm_register ||
|
||||
!teedev->desc->ops->shm_unregister) {
|
||||
tee_device_put(teedev);
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
ret = ERR_PTR(-ENOTSUPP);
|
||||
goto err_dev_put;
|
||||
}
|
||||
|
||||
teedev_ctx_get(ctx);
|
||||
@ -174,13 +239,13 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
shm = kzalloc(sizeof(*shm), GFP_KERNEL);
|
||||
if (!shm) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
goto err_ctx_put;
|
||||
}
|
||||
|
||||
refcount_set(&shm->refcount, 1);
|
||||
shm->flags = flags | TEE_SHM_REGISTER;
|
||||
shm->flags = flags;
|
||||
shm->ctx = ctx;
|
||||
shm->id = -1;
|
||||
shm->id = id;
|
||||
addr = untagged_addr(addr);
|
||||
start = rounddown(addr, PAGE_SIZE);
|
||||
shm->offset = addr - start;
|
||||
@ -189,71 +254,106 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
|
||||
if (!shm->pages) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
goto err_free_shm;
|
||||
}
|
||||
|
||||
if (flags & TEE_SHM_USER_MAPPED) {
|
||||
if (flags & TEE_SHM_USER_MAPPED)
|
||||
rc = pin_user_pages_fast(start, num_pages, FOLL_WRITE,
|
||||
shm->pages);
|
||||
} else {
|
||||
struct kvec *kiov;
|
||||
int i;
|
||||
|
||||
kiov = kcalloc(num_pages, sizeof(*kiov), GFP_KERNEL);
|
||||
if (!kiov) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_pages; i++) {
|
||||
kiov[i].iov_base = (void *)(start + i * PAGE_SIZE);
|
||||
kiov[i].iov_len = PAGE_SIZE;
|
||||
}
|
||||
|
||||
rc = get_kernel_pages(kiov, num_pages, 0, shm->pages);
|
||||
kfree(kiov);
|
||||
}
|
||||
else
|
||||
rc = shm_get_kernel_pages(start, num_pages, shm->pages);
|
||||
if (rc > 0)
|
||||
shm->num_pages = rc;
|
||||
if (rc != num_pages) {
|
||||
if (rc >= 0)
|
||||
rc = -ENOMEM;
|
||||
ret = ERR_PTR(rc);
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
if (shm->id < 0) {
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err;
|
||||
goto err_put_shm_pages;
|
||||
}
|
||||
|
||||
rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
|
||||
shm->num_pages, start);
|
||||
if (rc) {
|
||||
ret = ERR_PTR(rc);
|
||||
goto err;
|
||||
goto err_put_shm_pages;
|
||||
}
|
||||
|
||||
return shm;
|
||||
err:
|
||||
if (shm) {
|
||||
if (shm->id >= 0) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
release_registered_pages(shm);
|
||||
}
|
||||
err_put_shm_pages:
|
||||
if (flags & TEE_SHM_USER_MAPPED)
|
||||
unpin_user_pages(shm->pages, shm->num_pages);
|
||||
else
|
||||
shm_put_kernel_pages(shm->pages, shm->num_pages);
|
||||
kfree(shm->pages);
|
||||
err_free_shm:
|
||||
kfree(shm);
|
||||
err_ctx_put:
|
||||
teedev_ctx_put(ctx);
|
||||
err_dev_put:
|
||||
tee_device_put(teedev);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_register);
|
||||
|
||||
/**
|
||||
* tee_shm_register_user_buf() - Register a userspace shared memory buffer
|
||||
* @ctx: Context that registers the shared memory
|
||||
* @addr: The userspace address of the shared buffer
|
||||
* @length: Length of the shared buffer
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_register_user_buf(struct tee_context *ctx,
|
||||
unsigned long addr, size_t length)
|
||||
{
|
||||
u32 flags = TEE_SHM_USER_MAPPED | TEE_SHM_DYNAMIC;
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm *shm;
|
||||
void *ret;
|
||||
int id;
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (id < 0)
|
||||
return ERR_PTR(id);
|
||||
|
||||
shm = register_shm_helper(ctx, addr, length, flags, id);
|
||||
if (IS_ERR(shm)) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
return shm;
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
ret = idr_replace(&teedev->idr, shm, id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (IS_ERR(ret)) {
|
||||
tee_shm_free(shm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return shm;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_register_kernel_buf() - Register kernel memory to be shared with
|
||||
* secure world
|
||||
* @ctx: Context that registers the shared memory
|
||||
* @addr: The buffer
|
||||
* @length: Length of the buffer
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
|
||||
struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
|
||||
void *addr, size_t length)
|
||||
{
|
||||
u32 flags = TEE_SHM_DYNAMIC;
|
||||
|
||||
return register_shm_helper(ctx, (unsigned long)addr, length, flags, -1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_register_kernel_buf);
|
||||
|
||||
static int tee_shm_fop_release(struct inode *inode, struct file *filp)
|
||||
{
|
||||
@ -293,7 +393,7 @@ int tee_shm_get_fd(struct tee_shm *shm)
|
||||
{
|
||||
int fd;
|
||||
|
||||
if (!(shm->flags & TEE_SHM_DMA_BUF))
|
||||
if (shm->id < 0)
|
||||
return -EINVAL;
|
||||
|
||||
/* matched by tee_shm_put() in tee_shm_op_release() */
|
||||
@ -323,7 +423,7 @@ EXPORT_SYMBOL_GPL(tee_shm_free);
|
||||
*/
|
||||
int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa)
|
||||
{
|
||||
if (!(shm->flags & TEE_SHM_MAPPED))
|
||||
if (!shm->kaddr)
|
||||
return -EINVAL;
|
||||
/* Check that we're in the range of the shm */
|
||||
if ((char *)va < (char *)shm->kaddr)
|
||||
@ -345,7 +445,7 @@ EXPORT_SYMBOL_GPL(tee_shm_va2pa);
|
||||
*/
|
||||
int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va)
|
||||
{
|
||||
if (!(shm->flags & TEE_SHM_MAPPED))
|
||||
if (!shm->kaddr)
|
||||
return -EINVAL;
|
||||
/* Check that we're in the range of the shm */
|
||||
if (pa < shm->paddr)
|
||||
@ -373,7 +473,7 @@ EXPORT_SYMBOL_GPL(tee_shm_pa2va);
|
||||
*/
|
||||
void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
|
||||
{
|
||||
if (!(shm->flags & TEE_SHM_MAPPED))
|
||||
if (!shm->kaddr)
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (offs >= shm->size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
@ -448,7 +548,7 @@ void tee_shm_put(struct tee_shm *shm)
|
||||
* the refcount_inc() in tee_shm_get_from_id() never starts
|
||||
* from 0.
|
||||
*/
|
||||
if (shm->flags & TEE_SHM_DMA_BUF)
|
||||
if (shm->id >= 0)
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
do_release = true;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2015, Linaro Limited
|
||||
* Copyright (c) 2015, 2017, 2022 Linaro Limited
|
||||
*/
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-buf.h>
|
||||
@ -9,14 +9,16 @@
|
||||
#include <linux/tee_drv.h>
|
||||
#include "tee_private.h"
|
||||
|
||||
static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm, size_t size)
|
||||
static int pool_op_gen_alloc(struct tee_shm_pool *pool, struct tee_shm *shm,
|
||||
size_t size, size_t align)
|
||||
{
|
||||
unsigned long va;
|
||||
struct gen_pool *genpool = poolm->private_data;
|
||||
size_t s = roundup(size, 1 << genpool->min_alloc_order);
|
||||
struct gen_pool *genpool = pool->private_data;
|
||||
size_t a = max_t(size_t, align, BIT(genpool->min_alloc_order));
|
||||
struct genpool_data_align data = { .align = a };
|
||||
size_t s = roundup(size, a);
|
||||
|
||||
va = gen_pool_alloc(genpool, s);
|
||||
va = gen_pool_alloc_algo(genpool, s, gen_pool_first_fit_align, &data);
|
||||
if (!va)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -24,163 +26,67 @@ static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm,
|
||||
shm->kaddr = (void *)va;
|
||||
shm->paddr = gen_pool_virt_to_phys(genpool, va);
|
||||
shm->size = s;
|
||||
/*
|
||||
* This is from a static shared memory pool so no need to register
|
||||
* each chunk, and no need to unregister later either.
|
||||
*/
|
||||
shm->flags &= ~TEE_SHM_DYNAMIC;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm,
|
||||
struct tee_shm *shm)
|
||||
static void pool_op_gen_free(struct tee_shm_pool *pool, struct tee_shm *shm)
|
||||
{
|
||||
gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr,
|
||||
gen_pool_free(pool->private_data, (unsigned long)shm->kaddr,
|
||||
shm->size);
|
||||
shm->kaddr = NULL;
|
||||
}
|
||||
|
||||
static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
|
||||
static void pool_op_gen_destroy_pool(struct tee_shm_pool *pool)
|
||||
{
|
||||
gen_pool_destroy(poolm->private_data);
|
||||
kfree(poolm);
|
||||
gen_pool_destroy(pool->private_data);
|
||||
kfree(pool);
|
||||
}
|
||||
|
||||
static const struct tee_shm_pool_mgr_ops pool_ops_generic = {
|
||||
static const struct tee_shm_pool_ops pool_ops_generic = {
|
||||
.alloc = pool_op_gen_alloc,
|
||||
.free = pool_op_gen_free,
|
||||
.destroy_poolmgr = pool_op_gen_destroy_poolmgr,
|
||||
.destroy_pool = pool_op_gen_destroy_pool,
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
|
||||
* memory range
|
||||
* @priv_info: Information for driver private shared memory pool
|
||||
* @dmabuf_info: Information for dma-buf shared memory pool
|
||||
*
|
||||
* Start and end of pools will must be page aligned.
|
||||
*
|
||||
* Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
|
||||
* in @dmabuf, others will use the range provided by @priv.
|
||||
*
|
||||
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
|
||||
*/
|
||||
struct tee_shm_pool *
|
||||
tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
|
||||
struct tee_shm_pool_mem_info *dmabuf_info)
|
||||
{
|
||||
struct tee_shm_pool_mgr *priv_mgr;
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr;
|
||||
void *rc;
|
||||
|
||||
/*
|
||||
* Create the pool for driver private shared memory
|
||||
*/
|
||||
rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr,
|
||||
priv_info->size,
|
||||
3 /* 8 byte aligned */);
|
||||
if (IS_ERR(rc))
|
||||
return rc;
|
||||
priv_mgr = rc;
|
||||
|
||||
/*
|
||||
* Create the pool for dma_buf shared memory
|
||||
*/
|
||||
rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr,
|
||||
dmabuf_info->paddr,
|
||||
dmabuf_info->size, PAGE_SHIFT);
|
||||
if (IS_ERR(rc))
|
||||
goto err_free_priv_mgr;
|
||||
dmabuf_mgr = rc;
|
||||
|
||||
rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
|
||||
if (IS_ERR(rc))
|
||||
goto err_free_dmabuf_mgr;
|
||||
|
||||
return rc;
|
||||
|
||||
err_free_dmabuf_mgr:
|
||||
tee_shm_pool_mgr_destroy(dmabuf_mgr);
|
||||
err_free_priv_mgr:
|
||||
tee_shm_pool_mgr_destroy(priv_mgr);
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
|
||||
|
||||
struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
|
||||
phys_addr_t paddr,
|
||||
size_t size,
|
||||
int min_alloc_order)
|
||||
struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
|
||||
phys_addr_t paddr, size_t size,
|
||||
int min_alloc_order)
|
||||
{
|
||||
const size_t page_mask = PAGE_SIZE - 1;
|
||||
struct tee_shm_pool_mgr *mgr;
|
||||
struct tee_shm_pool *pool;
|
||||
int rc;
|
||||
|
||||
/* Start and end must be page aligned */
|
||||
if (vaddr & page_mask || paddr & page_mask || size & page_mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
|
||||
if (!mgr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mgr->private_data = gen_pool_create(min_alloc_order, -1);
|
||||
if (!mgr->private_data) {
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL);
|
||||
rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1);
|
||||
if (rc) {
|
||||
gen_pool_destroy(mgr->private_data);
|
||||
goto err;
|
||||
}
|
||||
|
||||
mgr->ops = &pool_ops_generic;
|
||||
|
||||
return mgr;
|
||||
err:
|
||||
kfree(mgr);
|
||||
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem);
|
||||
|
||||
static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr)
|
||||
{
|
||||
return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free &&
|
||||
mgr->ops->destroy_poolmgr;
|
||||
}
|
||||
|
||||
struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr)
|
||||
{
|
||||
struct tee_shm_pool *pool;
|
||||
|
||||
if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
|
||||
if (!pool)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pool->private_mgr = priv_mgr;
|
||||
pool->dma_buf_mgr = dmabuf_mgr;
|
||||
pool->private_data = gen_pool_create(min_alloc_order, -1);
|
||||
if (!pool->private_data) {
|
||||
rc = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
rc = gen_pool_add_virt(pool->private_data, vaddr, paddr, size, -1);
|
||||
if (rc) {
|
||||
gen_pool_destroy(pool->private_data);
|
||||
goto err;
|
||||
}
|
||||
|
||||
pool->ops = &pool_ops_generic;
|
||||
|
||||
return pool;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_alloc);
|
||||
|
||||
/**
|
||||
* tee_shm_pool_free() - Free a shared memory pool
|
||||
* @pool: The shared memory pool to free
|
||||
*
|
||||
* There must be no remaining shared memory allocated from this pool when
|
||||
* this function is called.
|
||||
*/
|
||||
void tee_shm_pool_free(struct tee_shm_pool *pool)
|
||||
{
|
||||
if (pool->private_mgr)
|
||||
tee_shm_pool_mgr_destroy(pool->private_mgr);
|
||||
if (pool->dma_buf_mgr)
|
||||
tee_shm_pool_mgr_destroy(pool->dma_buf_mgr);
|
||||
err:
|
||||
kfree(pool);
|
||||
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_free);
|
||||
EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2015-2016, Linaro Limited
|
||||
* Copyright (c) 2015-2022 Linaro Limited
|
||||
*/
|
||||
|
||||
#ifndef __TEE_DRV_H
|
||||
@ -20,14 +20,11 @@
|
||||
* specific TEE driver.
|
||||
*/
|
||||
|
||||
#define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */
|
||||
#define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */
|
||||
#define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */
|
||||
#define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */
|
||||
#define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */
|
||||
#define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */
|
||||
#define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */
|
||||
#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */
|
||||
#define TEE_SHM_DYNAMIC BIT(0) /* Dynamic shared memory registered */
|
||||
/* in secure world */
|
||||
#define TEE_SHM_USER_MAPPED BIT(1) /* Memory mapped in user space */
|
||||
#define TEE_SHM_POOL BIT(2) /* Memory allocated from pool */
|
||||
#define TEE_SHM_PRIV BIT(3) /* Memory private to TEE driver */
|
||||
|
||||
struct device;
|
||||
struct tee_device;
|
||||
@ -221,92 +218,39 @@ struct tee_shm {
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mgr - shared memory manager
|
||||
* struct tee_shm_pool - shared memory pool
|
||||
* @ops: operations
|
||||
* @private_data: private data for the shared memory manager
|
||||
*/
|
||||
struct tee_shm_pool_mgr {
|
||||
const struct tee_shm_pool_mgr_ops *ops;
|
||||
struct tee_shm_pool {
|
||||
const struct tee_shm_pool_ops *ops;
|
||||
void *private_data;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mgr_ops - shared memory pool manager operations
|
||||
* struct tee_shm_pool_ops - shared memory pool operations
|
||||
* @alloc: called when allocating shared memory
|
||||
* @free: called when freeing shared memory
|
||||
* @destroy_poolmgr: called when destroying the pool manager
|
||||
* @destroy_pool: called when destroying the pool
|
||||
*/
|
||||
struct tee_shm_pool_mgr_ops {
|
||||
int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm,
|
||||
size_t size);
|
||||
void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm);
|
||||
void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr);
|
||||
struct tee_shm_pool_ops {
|
||||
int (*alloc)(struct tee_shm_pool *pool, struct tee_shm *shm,
|
||||
size_t size, size_t align);
|
||||
void (*free)(struct tee_shm_pool *pool, struct tee_shm *shm);
|
||||
void (*destroy_pool)(struct tee_shm_pool *pool);
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_shm_pool_alloc() - Create a shared memory pool from shm managers
|
||||
* @priv_mgr: manager for driver private shared memory allocations
|
||||
* @dmabuf_mgr: manager for dma-buf shared memory allocations
|
||||
*
|
||||
* Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
|
||||
* in @dmabuf, others will use the range provided by @priv.
|
||||
*
|
||||
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
|
||||
*/
|
||||
struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr,
|
||||
struct tee_shm_pool_mgr *dmabuf_mgr);
|
||||
|
||||
/*
|
||||
* tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved
|
||||
* memory
|
||||
* tee_shm_pool_alloc_res_mem() - Create a shm manager for reserved memory
|
||||
* @vaddr: Virtual address of start of pool
|
||||
* @paddr: Physical address of start of pool
|
||||
* @size: Size in bytes of the pool
|
||||
*
|
||||
* @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure.
|
||||
*/
|
||||
struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr,
|
||||
phys_addr_t paddr,
|
||||
size_t size,
|
||||
int min_alloc_order);
|
||||
|
||||
/**
|
||||
* tee_shm_pool_mgr_destroy() - Free a shared memory manager
|
||||
*/
|
||||
static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm)
|
||||
{
|
||||
poolm->ops->destroy_poolmgr(poolm);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct tee_shm_pool_mem_info - holds information needed to create a shared
|
||||
* memory pool
|
||||
* @vaddr: Virtual address of start of pool
|
||||
* @paddr: Physical address of start of pool
|
||||
* @size: Size in bytes of the pool
|
||||
*/
|
||||
struct tee_shm_pool_mem_info {
|
||||
unsigned long vaddr;
|
||||
phys_addr_t paddr;
|
||||
size_t size;
|
||||
};
|
||||
|
||||
/**
|
||||
* tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
|
||||
* memory range
|
||||
* @priv_info: Information for driver private shared memory pool
|
||||
* @dmabuf_info: Information for dma-buf shared memory pool
|
||||
*
|
||||
* Start and end of pools will must be page aligned.
|
||||
*
|
||||
* Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
|
||||
* in @dmabuf, others will use the range provided by @priv.
|
||||
*
|
||||
* @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
|
||||
*/
|
||||
struct tee_shm_pool *
|
||||
tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
|
||||
struct tee_shm_pool_mem_info *dmabuf_info);
|
||||
struct tee_shm_pool *tee_shm_pool_alloc_res_mem(unsigned long vaddr,
|
||||
phys_addr_t paddr, size_t size,
|
||||
int min_alloc_order);
|
||||
|
||||
/**
|
||||
* tee_shm_pool_free() - Free a shared memory pool
|
||||
@ -315,7 +259,10 @@ tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
|
||||
* The must be no remaining shared memory allocated from this pool when
|
||||
* this function is called.
|
||||
*/
|
||||
void tee_shm_pool_free(struct tee_shm_pool *pool);
|
||||
static inline void tee_shm_pool_free(struct tee_shm_pool *pool)
|
||||
{
|
||||
pool->ops->destroy_pool(pool);
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_get_drvdata() - Return driver_data pointer
|
||||
@ -323,43 +270,20 @@ void tee_shm_pool_free(struct tee_shm_pool *pool);
|
||||
*/
|
||||
void *tee_get_drvdata(struct tee_device *teedev);
|
||||
|
||||
/**
|
||||
* tee_shm_alloc() - Allocate shared memory
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* Memory allocated as global shared memory is automatically freed when the
|
||||
* TEE file pointer is closed. The @flags field uses the bits defined by
|
||||
* TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
|
||||
* TEE_SHM_DMA_BUF global shared memory will be allocated and associated
|
||||
* with a dma-buf handle, else driver private memory.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
|
||||
struct tee_shm *tee_shm_alloc_priv_buf(struct tee_context *ctx, size_t size);
|
||||
struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size);
|
||||
|
||||
/**
|
||||
* tee_shm_register() - Register shared memory buffer
|
||||
* @ctx: Context that registers the shared memory
|
||||
* @addr: Address is userspace of the shared buffer
|
||||
* @length: Length of the shared buffer
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags);
|
||||
struct tee_shm *tee_shm_register_kernel_buf(struct tee_context *ctx,
|
||||
void *addr, size_t length);
|
||||
|
||||
/**
|
||||
* tee_shm_is_registered() - Check if shared memory object in registered in TEE
|
||||
* tee_shm_is_dynamic() - Check if shared memory object is of the dynamic kind
|
||||
* @shm: Shared memory handle
|
||||
* @returns true if object is registered in TEE
|
||||
* @returns true if object is dynamic shared memory
|
||||
*/
|
||||
static inline bool tee_shm_is_registered(struct tee_shm *shm)
|
||||
static inline bool tee_shm_is_dynamic(struct tee_shm *shm)
|
||||
{
|
||||
return shm && (shm->flags & TEE_SHM_REGISTER);
|
||||
return shm && (shm->flags & TEE_SHM_DYNAMIC);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -70,17 +70,15 @@ static int trusted_tee_seal(struct trusted_key_payload *p, char *datablob)
|
||||
memset(&inv_arg, 0, sizeof(inv_arg));
|
||||
memset(¶m, 0, sizeof(param));
|
||||
|
||||
reg_shm_in = tee_shm_register(pvt_data.ctx, (unsigned long)p->key,
|
||||
p->key_len, TEE_SHM_DMA_BUF |
|
||||
TEE_SHM_KERNEL_MAPPED);
|
||||
reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
|
||||
p->key_len);
|
||||
if (IS_ERR(reg_shm_in)) {
|
||||
dev_err(pvt_data.dev, "key shm register failed\n");
|
||||
return PTR_ERR(reg_shm_in);
|
||||
}
|
||||
|
||||
reg_shm_out = tee_shm_register(pvt_data.ctx, (unsigned long)p->blob,
|
||||
sizeof(p->blob), TEE_SHM_DMA_BUF |
|
||||
TEE_SHM_KERNEL_MAPPED);
|
||||
reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
|
||||
sizeof(p->blob));
|
||||
if (IS_ERR(reg_shm_out)) {
|
||||
dev_err(pvt_data.dev, "blob shm register failed\n");
|
||||
ret = PTR_ERR(reg_shm_out);
|
||||
@ -131,17 +129,15 @@ static int trusted_tee_unseal(struct trusted_key_payload *p, char *datablob)
|
||||
memset(&inv_arg, 0, sizeof(inv_arg));
|
||||
memset(¶m, 0, sizeof(param));
|
||||
|
||||
reg_shm_in = tee_shm_register(pvt_data.ctx, (unsigned long)p->blob,
|
||||
p->blob_len, TEE_SHM_DMA_BUF |
|
||||
TEE_SHM_KERNEL_MAPPED);
|
||||
reg_shm_in = tee_shm_register_kernel_buf(pvt_data.ctx, p->blob,
|
||||
p->blob_len);
|
||||
if (IS_ERR(reg_shm_in)) {
|
||||
dev_err(pvt_data.dev, "blob shm register failed\n");
|
||||
return PTR_ERR(reg_shm_in);
|
||||
}
|
||||
|
||||
reg_shm_out = tee_shm_register(pvt_data.ctx, (unsigned long)p->key,
|
||||
sizeof(p->key), TEE_SHM_DMA_BUF |
|
||||
TEE_SHM_KERNEL_MAPPED);
|
||||
reg_shm_out = tee_shm_register_kernel_buf(pvt_data.ctx, p->key,
|
||||
sizeof(p->key));
|
||||
if (IS_ERR(reg_shm_out)) {
|
||||
dev_err(pvt_data.dev, "key shm register failed\n");
|
||||
ret = PTR_ERR(reg_shm_out);
|
||||
@ -192,8 +188,7 @@ static int trusted_tee_get_random(unsigned char *key, size_t key_len)
|
||||
memset(&inv_arg, 0, sizeof(inv_arg));
|
||||
memset(¶m, 0, sizeof(param));
|
||||
|
||||
reg_shm = tee_shm_register(pvt_data.ctx, (unsigned long)key, key_len,
|
||||
TEE_SHM_DMA_BUF | TEE_SHM_KERNEL_MAPPED);
|
||||
reg_shm = tee_shm_register_kernel_buf(pvt_data.ctx, key, key_len);
|
||||
if (IS_ERR(reg_shm)) {
|
||||
dev_err(pvt_data.dev, "key shm register failed\n");
|
||||
return PTR_ERR(reg_shm);
|
||||
|
Loading…
Reference in New Issue
Block a user