mirror of
https://github.com/edk2-porting/linux-next.git
synced 2025-01-12 07:34:08 +08:00
iommu/amd: Page-specific invalidations for more than one page
Currently, IOMMU invalidations and device-IOTLB invalidations using AMD IOMMU fall back to full address-space invalidation if more than a single page need to be flushed. Full flushes are especially inefficient when the IOMMU is virtualized by a hypervisor, since it requires the hypervisor to synchronize the entire address-space. AMD IOMMUs allow to provide a mask to perform page-specific invalidations for multiple pages that match the address. The mask is encoded as part of the address, and the first zero bit in the address (in bits [51:12]) indicates the mask size. Use this hardware feature to perform selective IOMMU and IOTLB flushes. Combine the logic between both for better code reuse. The IOMMU invalidations passed a smoke-test. The device IOTLB invalidations are untested. Cc: Joerg Roedel <joro@8bytes.org> Cc: Will Deacon <will@kernel.org> Cc: Jiajun Cao <caojiajun@vmware.com> Cc: iommu@lists.linux-foundation.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Nadav Amit <namit@vmware.com> Link: https://lore.kernel.org/r/20210323210619.513069-1-namit@vmware.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
fc1b662050
commit
268aa45482
@ -852,33 +852,58 @@ static void build_inv_dte(struct iommu_cmd *cmd, u16 devid)
|
||||
CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY);
|
||||
}
|
||||
|
||||
/*
|
||||
* Builds an invalidation address which is suitable for one page or multiple
|
||||
* pages. Sets the size bit (S) as needed is more than one page is flushed.
|
||||
*/
|
||||
static inline u64 build_inv_address(u64 address, size_t size)
|
||||
{
|
||||
u64 pages, end, msb_diff;
|
||||
|
||||
pages = iommu_num_pages(address, size, PAGE_SIZE);
|
||||
|
||||
if (pages == 1)
|
||||
return address & PAGE_MASK;
|
||||
|
||||
end = address + size - 1;
|
||||
|
||||
/*
|
||||
* msb_diff would hold the index of the most significant bit that
|
||||
* flipped between the start and end.
|
||||
*/
|
||||
msb_diff = fls64(end ^ address) - 1;
|
||||
|
||||
/*
|
||||
* Bits 63:52 are sign extended. If for some reason bit 51 is different
|
||||
* between the start and the end, invalidate everything.
|
||||
*/
|
||||
if (unlikely(msb_diff > 51)) {
|
||||
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
|
||||
} else {
|
||||
/*
|
||||
* The msb-bit must be clear on the address. Just set all the
|
||||
* lower bits.
|
||||
*/
|
||||
address |= 1ull << (msb_diff - 1);
|
||||
}
|
||||
|
||||
/* Clear bits 11:0 */
|
||||
address &= PAGE_MASK;
|
||||
|
||||
/* Set the size bit - we flush more than one 4kb page */
|
||||
return address | CMD_INV_IOMMU_PAGES_SIZE_MASK;
|
||||
}
|
||||
|
||||
static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
|
||||
size_t size, u16 domid, int pde)
|
||||
{
|
||||
u64 pages;
|
||||
bool s;
|
||||
|
||||
pages = iommu_num_pages(address, size, PAGE_SIZE);
|
||||
s = false;
|
||||
|
||||
if (pages > 1) {
|
||||
/*
|
||||
* If we have to flush more than one page, flush all
|
||||
* TLB entries for this domain
|
||||
*/
|
||||
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
|
||||
s = true;
|
||||
}
|
||||
|
||||
address &= PAGE_MASK;
|
||||
u64 inv_address = build_inv_address(address, size);
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->data[1] |= domid;
|
||||
cmd->data[2] = lower_32_bits(address);
|
||||
cmd->data[3] = upper_32_bits(address);
|
||||
cmd->data[2] = lower_32_bits(inv_address);
|
||||
cmd->data[3] = upper_32_bits(inv_address);
|
||||
CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
|
||||
if (s) /* size bit - we flush more than one 4kb page */
|
||||
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
|
||||
if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
|
||||
cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
|
||||
}
|
||||
@ -886,32 +911,15 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
|
||||
static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
|
||||
u64 address, size_t size)
|
||||
{
|
||||
u64 pages;
|
||||
bool s;
|
||||
|
||||
pages = iommu_num_pages(address, size, PAGE_SIZE);
|
||||
s = false;
|
||||
|
||||
if (pages > 1) {
|
||||
/*
|
||||
* If we have to flush more than one page, flush all
|
||||
* TLB entries for this domain
|
||||
*/
|
||||
address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
|
||||
s = true;
|
||||
}
|
||||
|
||||
address &= PAGE_MASK;
|
||||
u64 inv_address = build_inv_address(address, size);
|
||||
|
||||
memset(cmd, 0, sizeof(*cmd));
|
||||
cmd->data[0] = devid;
|
||||
cmd->data[0] |= (qdep & 0xff) << 24;
|
||||
cmd->data[1] = devid;
|
||||
cmd->data[2] = lower_32_bits(address);
|
||||
cmd->data[3] = upper_32_bits(address);
|
||||
cmd->data[2] = lower_32_bits(inv_address);
|
||||
cmd->data[3] = upper_32_bits(inv_address);
|
||||
CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
|
||||
if (s)
|
||||
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
|
||||
}
|
||||
|
||||
static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, u32 pasid,
|
||||
|
Loading…
Reference in New Issue
Block a user