diff --git a/MAINTAINERS b/MAINTAINERS index d6c90161c7bf..03c7215f4437 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -18605,6 +18605,14 @@ F: Documentation/networking/device_drivers/cellular/qualcomm/rmnet.rst F: drivers/net/ethernet/qualcomm/rmnet/ F: include/linux/if_rmnet.h +QUALCOMM TRUST ZONE MEMORY ALLOCATOR +M: Bartosz Golaszewski +L: linux-arm-msm@vger.kernel.org +S: Maintained +F: drivers/firmware/qcom/qcom_tzmem.c +F: drivers/firmware/qcom/qcom_tzmem.h +F: include/linux/firmware/qcom/qcom_tzmem.h + QUALCOMM TSENS THERMAL DRIVER M: Amit Kucheria M: Thara Gopinath diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig index 3f05d9854ddf..3c495f8698e1 100644 --- a/drivers/firmware/qcom/Kconfig +++ b/drivers/firmware/qcom/Kconfig @@ -9,6 +9,26 @@ menu "Qualcomm firmware drivers" config QCOM_SCM tristate +config QCOM_TZMEM + tristate + select GENERIC_ALLOCATOR + +choice + prompt "TrustZone interface memory allocator mode" + default QCOM_TZMEM_MODE_GENERIC + help + Selects the mode of the memory allocator providing memory buffers of + suitable format for sharing with the TrustZone. If in doubt, select + 'Generic'. + +config QCOM_TZMEM_MODE_GENERIC + bool "Generic" + help + Use the generic allocator mode. The memory is page-aligned, non-cachable + and physically contiguous. + +endchoice + config QCOM_SCM_DOWNLOAD_MODE_DEFAULT bool "Qualcomm download mode enabled by default" depends on QCOM_SCM diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile index c9f12ee8224a..0be40a1abc13 100644 --- a/drivers/firmware/qcom/Makefile +++ b/drivers/firmware/qcom/Makefile @@ -5,5 +5,6 @@ obj-$(CONFIG_QCOM_SCM) += qcom-scm.o qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o +obj-$(CONFIG_QCOM_TZMEM) += qcom_tzmem.o obj-$(CONFIG_QCOM_QSEECOM) += qcom_qseecom.o obj-$(CONFIG_QCOM_QSEECOM_UEFISECAPP) += qcom_qseecom_uefisecapp.o diff --git a/drivers/firmware/qcom/qcom_tzmem.c b/drivers/firmware/qcom/qcom_tzmem.c new file mode 100644 index 000000000000..3853385bf215 --- /dev/null +++ b/drivers/firmware/qcom/qcom_tzmem.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Memory allocator for buffers shared with the TrustZone. + * + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qcom_tzmem.h" + +struct qcom_tzmem_area { + struct list_head list; + void *vaddr; + dma_addr_t paddr; + size_t size; + void *priv; +}; + +struct qcom_tzmem_pool { + struct gen_pool *genpool; + struct list_head areas; + enum qcom_tzmem_policy policy; + size_t increment; + size_t max_size; + spinlock_t lock; +}; + +struct qcom_tzmem_chunk { + phys_addr_t paddr; + size_t size; + struct qcom_tzmem_pool *owner; +}; + +static struct device *qcom_tzmem_dev; +static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC); +static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock); + +#if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC) + +static int qcom_tzmem_init(void) +{ + return 0; +} + +static int qcom_tzmem_init_area(struct qcom_tzmem_area *area) +{ + return 0; +} + +static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area) +{ + +} + +#endif /* CONFIG_QCOM_TZMEM_MODE_GENERIC */ + +static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool, + size_t size, gfp_t gfp) +{ + int ret; + + struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area), + gfp); + if (!area) + return -ENOMEM; + + area->size = PAGE_ALIGN(size); + + area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size, + &area->paddr, gfp); + if (!area->vaddr) + return -ENOMEM; + + ret = qcom_tzmem_init_area(area); + if (ret) { + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + return ret; + } + + ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr, + (phys_addr_t)area->paddr, size, -1); + if (ret) { + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + return ret; + } + + scoped_guard(spinlock_irqsave, &pool->lock) + list_add_tail(&area->list, &pool->areas); + + area = NULL; + return 0; +} + +/** + * qcom_tzmem_pool_new() - Create a new TZ memory pool. + * @config: Pool configuration. + * + * Create a new pool of memory suitable for sharing with the TrustZone. + * + * Must not be used in atomic context. + * + * Return: New memory pool address or ERR_PTR() on error. + */ +struct qcom_tzmem_pool * +qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config) +{ + int ret = -ENOMEM; + + might_sleep(); + + switch (config->policy) { + case QCOM_TZMEM_POLICY_STATIC: + if (!config->initial_size) + return ERR_PTR(-EINVAL); + break; + case QCOM_TZMEM_POLICY_MULTIPLIER: + if (!config->increment) + return ERR_PTR(-EINVAL); + break; + case QCOM_TZMEM_POLICY_ON_DEMAND: + break; + default: + return ERR_PTR(-EINVAL); + } + + struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool), + GFP_KERNEL); + if (!pool) + return ERR_PTR(-ENOMEM); + + pool->genpool = gen_pool_create(PAGE_SHIFT, -1); + if (!pool->genpool) + return ERR_PTR(-ENOMEM); + + gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL); + + pool->policy = config->policy; + pool->increment = config->increment; + pool->max_size = config->max_size; + INIT_LIST_HEAD(&pool->areas); + spin_lock_init(&pool->lock); + + if (config->initial_size) { + ret = qcom_tzmem_pool_add_memory(pool, config->initial_size, + GFP_KERNEL); + if (ret) { + gen_pool_destroy(pool->genpool); + return ERR_PTR(ret); + } + } + + return no_free_ptr(pool); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new); + +/** + * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources. + * @pool: Memory pool to free. + * + * Must not be called if any of the allocated chunks has not been freed. + * Must not be used in atomic context. + */ +void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool) +{ + struct qcom_tzmem_area *area, *next; + struct qcom_tzmem_chunk *chunk; + struct radix_tree_iter iter; + bool non_empty = false; + void __rcu **slot; + + might_sleep(); + + if (!pool) + return; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { + radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) { + chunk = radix_tree_deref_slot_protected(slot, + &qcom_tzmem_chunks_lock); + + if (chunk->owner == pool) + non_empty = true; + } + } + + WARN(non_empty, "Freeing TZ memory pool with memory still allocated"); + + list_for_each_entry_safe(area, next, &pool->areas, list) { + list_del(&area->list); + qcom_tzmem_cleanup_area(area); + dma_free_coherent(qcom_tzmem_dev, area->size, + area->vaddr, area->paddr); + kfree(area); + } + + gen_pool_destroy(pool->genpool); + kfree(pool); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free); + +static void devm_qcom_tzmem_pool_free(void *data) +{ + struct qcom_tzmem_pool *pool = data; + + qcom_tzmem_pool_free(pool); +} + +/** + * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new(). + * @dev: Device managing this resource. + * @config: Pool configuration. + * + * Must not be used in atomic context. + * + * Return: Address of the managed pool or ERR_PTR() on failure. + */ +struct qcom_tzmem_pool * +devm_qcom_tzmem_pool_new(struct device *dev, + const struct qcom_tzmem_pool_config *config) +{ + struct qcom_tzmem_pool *pool; + int ret; + + pool = qcom_tzmem_pool_new(config); + if (IS_ERR(pool)) + return pool; + + ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool); + if (ret) + return ERR_PTR(ret); + + return pool; +} + +static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool, + size_t requested, gfp_t gfp) +{ + size_t current_size = gen_pool_size(pool->genpool); + + if (pool->max_size && (current_size + requested) > pool->max_size) + return false; + + switch (pool->policy) { + case QCOM_TZMEM_POLICY_STATIC: + return false; + case QCOM_TZMEM_POLICY_MULTIPLIER: + requested = current_size * pool->increment; + break; + case QCOM_TZMEM_POLICY_ON_DEMAND: + break; + } + + return !qcom_tzmem_pool_add_memory(pool, requested, gfp); +} + +/** + * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ. + * @pool: TZ memory pool from which to allocate memory. + * @size: Number of bytes to allocate. + * @gfp: GFP flags. + * + * Can be used in any context. + * + * Return: + * Address of the allocated buffer or NULL if no more memory can be allocated. + * The buffer must be released using qcom_tzmem_free(). + */ +void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp) +{ + unsigned long vaddr; + int ret; + + if (!size) + return NULL; + + size = PAGE_ALIGN(size); + + struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk), + gfp); + if (!chunk) + return NULL; + +again: + vaddr = gen_pool_alloc(pool->genpool, size); + if (!vaddr) { + if (qcom_tzmem_try_grow_pool(pool, size, gfp)) + goto again; + + return NULL; + } + + chunk->paddr = gen_pool_virt_to_phys(pool->genpool, vaddr); + chunk->size = size; + chunk->owner = pool; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) { + ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk); + if (ret) { + gen_pool_free(pool->genpool, vaddr, size); + return NULL; + } + + chunk = NULL; + } + + return (void *)vaddr; +} +EXPORT_SYMBOL_GPL(qcom_tzmem_alloc); + +/** + * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool. + * @vaddr: Virtual address of the buffer. + * + * Can be used in any context. + */ +void qcom_tzmem_free(void *vaddr) +{ + struct qcom_tzmem_chunk *chunk; + + scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) + chunk = radix_tree_delete_item(&qcom_tzmem_chunks, + (unsigned long)vaddr, NULL); + + if (!chunk) { + WARN(1, "Virtual address %p not owned by TZ memory allocator", + vaddr); + return; + } + + scoped_guard(spinlock_irqsave, &chunk->owner->lock) + gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr, + chunk->size); + kfree(chunk); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_free); + +/** + * qcom_tzmem_to_phys() - Map the virtual address of a TZ buffer to physical. + * @vaddr: Virtual address of the buffer allocated from a TZ memory pool. + * + * Can be used in any context. The address must have been returned by a call + * to qcom_tzmem_alloc(). + * + * Returns: Physical address of the buffer. + */ +phys_addr_t qcom_tzmem_to_phys(void *vaddr) +{ + struct qcom_tzmem_chunk *chunk; + + guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock); + + chunk = radix_tree_lookup(&qcom_tzmem_chunks, (unsigned long)vaddr); + if (!chunk) + return 0; + + return chunk->paddr; +} +EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys); + +int qcom_tzmem_enable(struct device *dev) +{ + if (qcom_tzmem_dev) + return -EBUSY; + + qcom_tzmem_dev = dev; + + return qcom_tzmem_init(); +} +EXPORT_SYMBOL_GPL(qcom_tzmem_enable); + +MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers"); +MODULE_AUTHOR("Bartosz Golaszewski "); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/qcom/qcom_tzmem.h b/drivers/firmware/qcom/qcom_tzmem.h new file mode 100644 index 000000000000..8fa8a3eb940e --- /dev/null +++ b/drivers/firmware/qcom/qcom_tzmem.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#ifndef __QCOM_TZMEM_PRIV_H +#define __QCOM_TZMEM_PRIV_H + +struct device; + +int qcom_tzmem_enable(struct device *dev); + +#endif /* __QCOM_TZMEM_PRIV_H */ diff --git a/include/linux/firmware/qcom/qcom_tzmem.h b/include/linux/firmware/qcom/qcom_tzmem.h new file mode 100644 index 000000000000..b83b63a0c049 --- /dev/null +++ b/include/linux/firmware/qcom/qcom_tzmem.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023-2024 Linaro Ltd. + */ + +#ifndef __QCOM_TZMEM_H +#define __QCOM_TZMEM_H + +#include +#include +#include + +struct device; +struct qcom_tzmem_pool; + +/** + * enum qcom_tzmem_policy - Policy for pool growth. + */ +enum qcom_tzmem_policy { + /**< Static pool, never grow above initial size. */ + QCOM_TZMEM_POLICY_STATIC = 1, + /**< When out of memory, add increment * current size of memory. */ + QCOM_TZMEM_POLICY_MULTIPLIER, + /**< When out of memory add as much as is needed until max_size. */ + QCOM_TZMEM_POLICY_ON_DEMAND, +}; + +/** + * struct qcom_tzmem_pool_config - TZ memory pool configuration. + * @initial_size: Number of bytes to allocate for the pool during its creation. + * @policy: Pool size growth policy. + * @increment: Used with policies that allow pool growth. + * @max_size: Size above which the pool will never grow. + */ +struct qcom_tzmem_pool_config { + size_t initial_size; + enum qcom_tzmem_policy policy; + size_t increment; + size_t max_size; +}; + +struct qcom_tzmem_pool * +qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config); +void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool); +struct qcom_tzmem_pool * +devm_qcom_tzmem_pool_new(struct device *dev, + const struct qcom_tzmem_pool_config *config); + +void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp); +void qcom_tzmem_free(void *ptr); + +DEFINE_FREE(qcom_tzmem, void *, if (_T) qcom_tzmem_free(_T)) + +phys_addr_t qcom_tzmem_to_phys(void *ptr); + +#endif /* __QCOM_TZMEM */