mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 09:34:12 +08:00
media: ipu6: optimize the IPU6 MMU unmapping flow
The MMU mapping flow is optimized for improve the performance, the unmapping flow could also be optimized to follow same flow. Signed-off-by: Bingbu Cao <bingbu.cao@intel.com> Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com> Signed-off-by: Hans Verkuil <hverkuil@xs4all.nl>
This commit is contained in:
parent
f7c924aba3
commit
1faf84ff1c
@ -257,44 +257,51 @@ static u32 *alloc_l2_pt(struct ipu6_mmu_info *mmu_info)
|
|||||||
static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
|
static size_t l2_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
|
||||||
phys_addr_t dummy, size_t size)
|
phys_addr_t dummy, size_t size)
|
||||||
{
|
{
|
||||||
u32 l1_idx = iova >> ISP_L1PT_SHIFT;
|
unsigned int l2_entries;
|
||||||
u32 iova_start = iova;
|
|
||||||
unsigned int l2_idx;
|
unsigned int l2_idx;
|
||||||
size_t unmapped = 0;
|
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
u32 l1_idx;
|
||||||
u32 *l2_pt;
|
u32 *l2_pt;
|
||||||
|
|
||||||
dev_dbg(mmu_info->dev, "unmapping l2 page table for l1 index %u (iova 0x%8.8lx)\n",
|
|
||||||
l1_idx, iova);
|
|
||||||
|
|
||||||
spin_lock_irqsave(&mmu_info->lock, flags);
|
spin_lock_irqsave(&mmu_info->lock, flags);
|
||||||
if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
|
for (l1_idx = iova >> ISP_L1PT_SHIFT;
|
||||||
spin_unlock_irqrestore(&mmu_info->lock, flags);
|
size > 0 && l1_idx < ISP_L1PT_PTES; l1_idx++) {
|
||||||
dev_err(mmu_info->dev,
|
|
||||||
"unmap iova 0x%8.8lx l1 idx %u which was not mapped\n",
|
|
||||||
iova, l1_idx);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (l2_idx = (iova_start & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
|
|
||||||
(iova_start & ISP_L1PT_MASK) + (l2_idx << ISP_PAGE_SHIFT)
|
|
||||||
< iova_start + size && l2_idx < ISP_L2PT_PTES; l2_idx++) {
|
|
||||||
phys_addr_t pteval;
|
|
||||||
|
|
||||||
l2_pt = mmu_info->l2_pts[l1_idx];
|
|
||||||
pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
|
|
||||||
dev_dbg(mmu_info->dev,
|
dev_dbg(mmu_info->dev,
|
||||||
"unmap l2 index %u with pteval 0x%p\n",
|
"unmapping l2 pgtable (l1 index %u (iova 0x%8.8lx))\n",
|
||||||
l2_idx, &pteval);
|
l1_idx, iova);
|
||||||
l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
|
|
||||||
|
|
||||||
clflush_cache_range((void *)&l2_pt[l2_idx],
|
if (mmu_info->l1_pt[l1_idx] == mmu_info->dummy_l2_pteval) {
|
||||||
sizeof(l2_pt[l2_idx]));
|
dev_err(mmu_info->dev,
|
||||||
unmapped++;
|
"unmap not mapped iova 0x%8.8lx l1 index %u\n",
|
||||||
|
iova, l1_idx);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
l2_pt = mmu_info->l2_pts[l1_idx];
|
||||||
|
|
||||||
|
l2_entries = 0;
|
||||||
|
for (l2_idx = (iova & ISP_L2PT_MASK) >> ISP_L2PT_SHIFT;
|
||||||
|
size > 0 && l2_idx < ISP_L2PT_PTES; l2_idx++) {
|
||||||
|
phys_addr_t pteval = TBL_PHYS_ADDR(l2_pt[l2_idx]);
|
||||||
|
|
||||||
|
dev_dbg(mmu_info->dev,
|
||||||
|
"unmap l2 index %u with pteval 0x%p\n",
|
||||||
|
l2_idx, &pteval);
|
||||||
|
l2_pt[l2_idx] = mmu_info->dummy_page_pteval;
|
||||||
|
|
||||||
|
iova += ISP_PAGE_SIZE;
|
||||||
|
size -= ISP_PAGE_SIZE;
|
||||||
|
|
||||||
|
l2_entries++;
|
||||||
|
}
|
||||||
|
|
||||||
|
WARN_ON_ONCE(!l2_entries);
|
||||||
|
clflush_cache_range(&l2_pt[l2_idx - l2_entries],
|
||||||
|
sizeof(l2_pt[0]) * l2_entries);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&mmu_info->lock, flags);
|
|
||||||
|
|
||||||
return unmapped << ISP_PAGE_SHIFT;
|
WARN_ON_ONCE(size);
|
||||||
|
spin_unlock_irqrestore(&mmu_info->lock, flags);
|
||||||
|
return l2_entries << ISP_PAGE_SHIFT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
|
static int l2_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
|
||||||
@ -643,40 +650,13 @@ phys_addr_t ipu6_mmu_iova_to_phys(struct ipu6_mmu_info *mmu_info,
|
|||||||
return phy_addr;
|
return phy_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t ipu6_mmu_pgsize(unsigned long pgsize_bitmap,
|
|
||||||
unsigned long addr_merge, size_t size)
|
|
||||||
{
|
|
||||||
unsigned int pgsize_idx;
|
|
||||||
size_t pgsize;
|
|
||||||
|
|
||||||
/* Max page size that still fits into 'size' */
|
|
||||||
pgsize_idx = __fls(size);
|
|
||||||
|
|
||||||
if (likely(addr_merge)) {
|
|
||||||
/* Max page size allowed by address */
|
|
||||||
unsigned int align_pgsize_idx = __ffs(addr_merge);
|
|
||||||
|
|
||||||
pgsize_idx = min(pgsize_idx, align_pgsize_idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
pgsize = (1UL << (pgsize_idx + 1)) - 1;
|
|
||||||
pgsize &= pgsize_bitmap;
|
|
||||||
|
|
||||||
WARN_ON(!pgsize);
|
|
||||||
|
|
||||||
/* pick the biggest page */
|
|
||||||
pgsize_idx = __fls(pgsize);
|
|
||||||
pgsize = 1UL << pgsize_idx;
|
|
||||||
|
|
||||||
return pgsize;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
|
size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
|
||||||
size_t size)
|
size_t size)
|
||||||
{
|
{
|
||||||
size_t unmapped_page, unmapped = 0;
|
|
||||||
unsigned int min_pagesz;
|
unsigned int min_pagesz;
|
||||||
|
|
||||||
|
dev_dbg(mmu_info->dev, "unmapping iova 0x%lx size 0x%zx\n", iova, size);
|
||||||
|
|
||||||
/* find out the minimum page size supported */
|
/* find out the minimum page size supported */
|
||||||
min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
|
min_pagesz = 1 << __ffs(mmu_info->pgsize_bitmap);
|
||||||
|
|
||||||
@ -688,29 +668,10 @@ size_t ipu6_mmu_unmap(struct ipu6_mmu_info *mmu_info, unsigned long iova,
|
|||||||
if (!IS_ALIGNED(iova | size, min_pagesz)) {
|
if (!IS_ALIGNED(iova | size, min_pagesz)) {
|
||||||
dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
|
dev_err(NULL, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
|
||||||
iova, size, min_pagesz);
|
iova, size, min_pagesz);
|
||||||
return -EINVAL;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
return __ipu6_mmu_unmap(mmu_info, iova, size);
|
||||||
* Keep iterating until we either unmap 'size' bytes (or more)
|
|
||||||
* or we hit an area that isn't mapped.
|
|
||||||
*/
|
|
||||||
while (unmapped < size) {
|
|
||||||
size_t pgsize = ipu6_mmu_pgsize(mmu_info->pgsize_bitmap,
|
|
||||||
iova, size - unmapped);
|
|
||||||
|
|
||||||
unmapped_page = __ipu6_mmu_unmap(mmu_info, iova, pgsize);
|
|
||||||
if (!unmapped_page)
|
|
||||||
break;
|
|
||||||
|
|
||||||
dev_dbg(mmu_info->dev, "unmapped: iova 0x%lx size 0x%zx\n",
|
|
||||||
iova, unmapped_page);
|
|
||||||
|
|
||||||
iova += unmapped_page;
|
|
||||||
unmapped += unmapped_page;
|
|
||||||
}
|
|
||||||
|
|
||||||
return unmapped;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
|
int ipu6_mmu_map(struct ipu6_mmu_info *mmu_info, unsigned long iova,
|
||||||
|
Loading…
Reference in New Issue
Block a user