mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
drm/panthor: Fix firmware initialization on systems with a page size > 4k
The system and GPU MMU page size might differ, which becomes a
problem for FW sections that need to be mapped at explicit addresses
since our PAGE_SIZE alignment might cover a VA range that's
expected to be used for another section.
Make sure we never map more than we need.
Changes in v3:
- Add R-bs
Changes in v2:
- Plan for per-VM page sizes so the MCU VM and user VM can
have different pages sizes
Fixes: 2718d91816
("drm/panthor: Add the FW logical block")
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241030150231.768949-1-boris.brezillon@collabora.com
This commit is contained in:
parent
72f7e16ecc
commit
5d01b56f05
@ -487,6 +487,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
|
||||
struct panthor_fw_binary_iter *iter,
|
||||
u32 ehdr)
|
||||
{
|
||||
ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm);
|
||||
struct panthor_fw_binary_section_entry_hdr hdr;
|
||||
struct panthor_fw_section *section;
|
||||
u32 section_size;
|
||||
@ -515,8 +516,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((hdr.va.start & ~PAGE_MASK) != 0 ||
|
||||
(hdr.va.end & ~PAGE_MASK) != 0) {
|
||||
if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) {
|
||||
drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
|
||||
hdr.va.start, hdr.va.end);
|
||||
return -EINVAL;
|
||||
|
@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
|
||||
to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
|
||||
goto out_free_bo;
|
||||
|
||||
ret = panthor_vm_unmap_range(vm, bo->va_node.start,
|
||||
panthor_kernel_bo_size(bo));
|
||||
ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
|
||||
if (ret)
|
||||
goto out_free_bo;
|
||||
|
||||
@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
|
||||
}
|
||||
|
||||
bo = to_panthor_bo(&obj->base);
|
||||
size = obj->base.size;
|
||||
kbo->obj = &obj->base;
|
||||
bo->flags = bo_flags;
|
||||
|
||||
/* The system and GPU MMU page size might differ, which becomes a
|
||||
* problem for FW sections that need to be mapped at explicit address
|
||||
* since our PAGE_SIZE alignment might cover a VA range that's
|
||||
* expected to be used for another section.
|
||||
* Make sure we never map more than we need.
|
||||
*/
|
||||
size = ALIGN(size, panthor_vm_page_size(vm));
|
||||
ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
|
||||
if (ret)
|
||||
goto err_put_obj;
|
||||
|
@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm)
|
||||
mutex_unlock(&ptdev->mmu->as.slots_lock);
|
||||
}
|
||||
|
||||
u32 panthor_vm_page_size(struct panthor_vm *vm)
|
||||
{
|
||||
const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
|
||||
u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
|
||||
|
||||
return 1u << pg_shift;
|
||||
}
|
||||
|
||||
static void panthor_vm_stop(struct panthor_vm *vm)
|
||||
{
|
||||
drm_sched_stop(&vm->sched, NULL);
|
||||
@ -1025,12 +1033,13 @@ int
|
||||
panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
|
||||
struct drm_mm_node *va_node)
|
||||
{
|
||||
ssize_t vm_pgsz = panthor_vm_page_size(vm);
|
||||
int ret;
|
||||
|
||||
if (!size || (size & ~PAGE_MASK))
|
||||
if (!size || !IS_ALIGNED(size, vm_pgsz))
|
||||
return -EINVAL;
|
||||
|
||||
if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK))
|
||||
if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&vm->mm_lock);
|
||||
@ -2366,11 +2375,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
|
||||
const struct drm_panthor_vm_bind_op *op,
|
||||
struct panthor_vm_op_ctx *op_ctx)
|
||||
{
|
||||
ssize_t vm_pgsz = panthor_vm_page_size(vm);
|
||||
struct drm_gem_object *gem;
|
||||
int ret;
|
||||
|
||||
/* Aligned on page size. */
|
||||
if ((op->va | op->size) & ~PAGE_MASK)
|
||||
if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
|
||||
return -EINVAL;
|
||||
|
||||
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {
|
||||
|
@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
|
||||
|
||||
int panthor_vm_active(struct panthor_vm *vm);
|
||||
void panthor_vm_idle(struct panthor_vm *vm);
|
||||
u32 panthor_vm_page_size(struct panthor_vm *vm);
|
||||
int panthor_vm_as(struct panthor_vm *vm);
|
||||
int panthor_vm_flush_all(struct panthor_vm *vm);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user