mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 01:54:09 +08:00
iommu/amd: Remove unused flush pasid functions
We have removed iommu_v2 module and converted v2 page table to use common flush functions. Also we have moved GCR3 table to per device. PASID related functions are not used. Hence remove these unused functions. Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20240205115615.6053-15-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
cf70873e3d
commit
02b990253d
@ -49,7 +49,6 @@ int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
|
||||
ioasid_t pasid, unsigned long gcr3);
|
||||
int amd_iommu_clear_gcr3(struct iommu_dev_data *dev_data, ioasid_t pasid);
|
||||
|
||||
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid, u64 address);
|
||||
/*
|
||||
* This function flushes all internal caches of
|
||||
* the IOMMU used by this driver.
|
||||
@ -65,8 +64,6 @@ void amd_iommu_dev_flush_pasid_pages(struct iommu_dev_data *dev_data,
|
||||
void amd_iommu_dev_flush_pasid_all(struct iommu_dev_data *dev_data,
|
||||
ioasid_t pasid);
|
||||
|
||||
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid);
|
||||
|
||||
#ifdef CONFIG_IRQ_REMAP
|
||||
int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
|
||||
#else
|
||||
|
@ -2747,106 +2747,6 @@ const struct iommu_ops amd_iommu_ops = {
|
||||
}
|
||||
};
|
||||
|
||||
static int __flush_pasid(struct protection_domain *domain, u32 pasid,
|
||||
u64 address, size_t size)
|
||||
{
|
||||
struct iommu_dev_data *dev_data;
|
||||
struct iommu_cmd cmd;
|
||||
int i, ret;
|
||||
|
||||
if (!(domain->flags & PD_IOMMUV2_MASK))
|
||||
return -EINVAL;
|
||||
|
||||
build_inv_iommu_pages(&cmd, address, size, domain->id, pasid, true);
|
||||
|
||||
/*
|
||||
* IOMMU TLB needs to be flushed before Device TLB to
|
||||
* prevent device TLB refill from IOMMU TLB
|
||||
*/
|
||||
for (i = 0; i < amd_iommu_get_num_iommus(); ++i) {
|
||||
if (domain->dev_iommu[i] == 0)
|
||||
continue;
|
||||
|
||||
ret = iommu_queue_command(amd_iommus[i], &cmd);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Wait until IOMMU TLB flushes are complete */
|
||||
amd_iommu_domain_flush_complete(domain);
|
||||
|
||||
/* Now flush device TLBs */
|
||||
list_for_each_entry(dev_data, &domain->dev_list, list) {
|
||||
struct amd_iommu *iommu;
|
||||
int qdep;
|
||||
|
||||
/*
|
||||
There might be non-IOMMUv2 capable devices in an IOMMUv2
|
||||
* domain.
|
||||
*/
|
||||
if (!dev_data->ats_enabled)
|
||||
continue;
|
||||
|
||||
qdep = dev_data->ats_qdep;
|
||||
iommu = rlookup_amd_iommu(dev_data->dev);
|
||||
if (!iommu)
|
||||
continue;
|
||||
build_inv_iotlb_pages(&cmd, dev_data->devid, qdep,
|
||||
address, size, pasid, true);
|
||||
|
||||
ret = iommu_queue_command(iommu, &cmd);
|
||||
if (ret != 0)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Wait until all device TLBs are flushed */
|
||||
amd_iommu_domain_flush_complete(domain);
|
||||
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __amd_iommu_flush_page(struct protection_domain *domain, u32 pasid,
|
||||
u64 address)
|
||||
{
|
||||
return __flush_pasid(domain, pasid, address, PAGE_SIZE);
|
||||
}
|
||||
|
||||
int amd_iommu_flush_page(struct iommu_domain *dom, u32 pasid,
|
||||
u64 address)
|
||||
{
|
||||
struct protection_domain *domain = to_pdomain(dom);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
ret = __amd_iommu_flush_page(domain, pasid, address);
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __amd_iommu_flush_tlb(struct protection_domain *domain, u32 pasid)
|
||||
{
|
||||
return __flush_pasid(domain, pasid, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS);
|
||||
}
|
||||
|
||||
int amd_iommu_flush_tlb(struct iommu_domain *dom, u32 pasid)
|
||||
{
|
||||
struct protection_domain *domain = to_pdomain(dom);
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
ret = __amd_iommu_flush_tlb(domain, pasid);
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
|
||||
int status, int tag)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user