mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-11 00:04:33 +08:00
72b96ec655
Implement setting specified buffer ranges as read-only. In case if specified range is not 64K aligned and 64K contiguous MMU600 pages are turned on, split 64K mapping to allow 4K granularity for read-only configuration. Signed-off-by: Wachowski, Karol <karol.wachowski@intel.com> Reviewed-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Signed-off-by: Jacek Lawrynowicz <jacek.lawrynowicz@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240611120433.1012423-10-jacek.lawrynowicz@linux.intel.com
53 lines
1.8 KiB
C
53 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2020-2023 Intel Corporation
|
|
*/
|
|
|
|
#ifndef __IVPU_MMU_CONTEXT_H__
|
|
#define __IVPU_MMU_CONTEXT_H__
|
|
|
|
#include <drm/drm_mm.h>
|
|
|
|
struct ivpu_device;
|
|
struct ivpu_file_priv;
|
|
struct ivpu_addr_range;
|
|
|
|
#define IVPU_MMU_PGTABLE_ENTRIES 512ull
|
|
|
|
struct ivpu_mmu_pgtable {
|
|
u64 ***pte_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
|
|
u64 **pmd_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
|
|
u64 *pud_ptrs[IVPU_MMU_PGTABLE_ENTRIES];
|
|
u64 *pgd_dma_ptr;
|
|
dma_addr_t pgd_dma;
|
|
};
|
|
|
|
struct ivpu_mmu_context {
|
|
struct mutex lock; /* Protects: mm, pgtable */
|
|
struct drm_mm mm;
|
|
struct ivpu_mmu_pgtable pgtable;
|
|
u32 id;
|
|
};
|
|
|
|
int ivpu_mmu_global_context_init(struct ivpu_device *vdev);
|
|
void ivpu_mmu_global_context_fini(struct ivpu_device *vdev);
|
|
int ivpu_mmu_reserved_context_init(struct ivpu_device *vdev);
|
|
void ivpu_mmu_reserved_context_fini(struct ivpu_device *vdev);
|
|
|
|
int ivpu_mmu_user_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u32 ctx_id);
|
|
void ivpu_mmu_user_context_fini(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
|
|
void ivpu_mmu_user_context_mark_invalid(struct ivpu_device *vdev, u32 ssid);
|
|
|
|
int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu_addr_range *range,
|
|
u64 size, struct drm_mm_node *node);
|
|
void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node);
|
|
|
|
int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
|
u64 vpu_addr, struct sg_table *sgt, bool llc_coherent);
|
|
void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
|
u64 vpu_addr, struct sg_table *sgt);
|
|
int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
|
|
u64 vpu_addr, size_t size);
|
|
|
|
#endif /* __IVPU_MMU_CONTEXT_H__ */
|