mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
iommu/vt-d: Avoid duplicate removing in __domain_mapping()
The __domain_mapping() always removes the pages in the range from 'iov_pfn' to 'end_pfn', but the 'end_pfn' is always the last pfn of the range that the caller wants to map. This would introduce too many duplicated removing and leads the map operation take too long, for example: Map iova=0x100000,nr_pages=0x7d61800 iov_pfn: 0x100000, end_pfn: 0x7e617ff iov_pfn: 0x140000, end_pfn: 0x7e617ff iov_pfn: 0x180000, end_pfn: 0x7e617ff iov_pfn: 0x1c0000, end_pfn: 0x7e617ff iov_pfn: 0x200000, end_pfn: 0x7e617ff ... it takes about 50ms in total. We can reduce the cost by recalculate the 'end_pfn' and limit it to the boundary of the end of this pte page. Map iova=0x100000,nr_pages=0x7d61800 iov_pfn: 0x100000, end_pfn: 0x13ffff iov_pfn: 0x140000, end_pfn: 0x17ffff iov_pfn: 0x180000, end_pfn: 0x1bffff iov_pfn: 0x1c0000, end_pfn: 0x1fffff iov_pfn: 0x200000, end_pfn: 0x23ffff ... it only need 9ms now. This also removes a meaningless BUG_ON() in __domain_mapping(). Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Tested-by: Liujunjie <liujunjie23@huawei.com> Link: https://lore.kernel.org/r/20211008000433.1115-1-longpeng2@huawei.com Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Link: https://lore.kernel.org/r/20211014053839.727419-10-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
37c8041a81
commit
9906b9352a
@ -2479,12 +2479,17 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
return -ENOMEM;
|
||||
first_pte = pte;
|
||||
|
||||
lvl_pages = lvl_to_nr_pages(largepage_lvl);
|
||||
|
||||
/* It is large page*/
|
||||
if (largepage_lvl > 1) {
|
||||
unsigned long end_pfn;
|
||||
unsigned long pages_to_remove;
|
||||
|
||||
pteval |= DMA_PTE_LARGE_PAGE;
|
||||
end_pfn = ((iov_pfn + nr_pages) & level_mask(largepage_lvl)) - 1;
|
||||
pages_to_remove = min_t(unsigned long, nr_pages,
|
||||
nr_pte_to_next_page(pte) * lvl_pages);
|
||||
end_pfn = iov_pfn + pages_to_remove - 1;
|
||||
switch_to_super_page(domain, iov_pfn, end_pfn, largepage_lvl);
|
||||
} else {
|
||||
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
|
||||
@ -2506,10 +2511,6 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
lvl_pages = lvl_to_nr_pages(largepage_lvl);
|
||||
|
||||
BUG_ON(nr_pages < lvl_pages);
|
||||
|
||||
nr_pages -= lvl_pages;
|
||||
iov_pfn += lvl_pages;
|
||||
phys_pfn += lvl_pages;
|
||||
|
@ -710,6 +710,12 @@ static inline bool first_pte_in_page(struct dma_pte *pte)
|
||||
return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
|
||||
}
|
||||
|
||||
static inline int nr_pte_to_next_page(struct dma_pte *pte)
|
||||
{
|
||||
return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
|
||||
(struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
|
||||
}
|
||||
|
||||
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
|
||||
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user