mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 11:44:01 +08:00
iommu bitmap instead of iommu pointer in dmar_domain
In order to support assigning multiple devices from different iommus to a domain, iommu bitmap is used to keep all iommus the domain are related to. Signed-off-by: Weidong Han <weidong.han@intel.com> Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
parent
a2bb8459fe
commit
8c11e798ee
@ -208,7 +208,7 @@ static inline bool dma_pte_present(struct dma_pte *pte)
|
|||||||
|
|
||||||
struct dmar_domain {
|
struct dmar_domain {
|
||||||
int id; /* domain id */
|
int id; /* domain id */
|
||||||
struct intel_iommu *iommu; /* back pointer to owning iommu */
|
unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
|
||||||
|
|
||||||
struct list_head devices; /* all devices' list */
|
struct list_head devices; /* all devices' list */
|
||||||
struct iova_domain iovad; /* iova's that belong to this domain */
|
struct iova_domain iovad; /* iova's that belong to this domain */
|
||||||
@ -362,6 +362,18 @@ void free_iova_mem(struct iova *iova)
|
|||||||
kmem_cache_free(iommu_iova_cache, iova);
|
kmem_cache_free(iommu_iova_cache, iova);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* in native case, each domain is related to only one iommu */
|
||||||
|
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
|
||||||
|
{
|
||||||
|
int iommu_id;
|
||||||
|
|
||||||
|
iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
|
||||||
|
if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return g_iommus[iommu_id];
|
||||||
|
}
|
||||||
|
|
||||||
/* Gets context entry for a given bus and devfn */
|
/* Gets context entry for a given bus and devfn */
|
||||||
static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
|
static struct context_entry * device_to_context_entry(struct intel_iommu *iommu,
|
||||||
u8 bus, u8 devfn)
|
u8 bus, u8 devfn)
|
||||||
@ -502,6 +514,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
|
|||||||
int level = agaw_to_level(domain->agaw);
|
int level = agaw_to_level(domain->agaw);
|
||||||
int offset;
|
int offset;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct intel_iommu *iommu = domain_get_iommu(domain);
|
||||||
|
|
||||||
BUG_ON(!domain->pgd);
|
BUG_ON(!domain->pgd);
|
||||||
|
|
||||||
@ -525,7 +538,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
|
|||||||
flags);
|
flags);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
__iommu_flush_cache(domain->iommu, tmp_page,
|
__iommu_flush_cache(iommu, tmp_page,
|
||||||
PAGE_SIZE);
|
PAGE_SIZE);
|
||||||
dma_set_pte_addr(pte, virt_to_phys(tmp_page));
|
dma_set_pte_addr(pte, virt_to_phys(tmp_page));
|
||||||
/*
|
/*
|
||||||
@ -534,7 +547,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr)
|
|||||||
*/
|
*/
|
||||||
dma_set_pte_readable(pte);
|
dma_set_pte_readable(pte);
|
||||||
dma_set_pte_writable(pte);
|
dma_set_pte_writable(pte);
|
||||||
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
|
__iommu_flush_cache(iommu, pte, sizeof(*pte));
|
||||||
}
|
}
|
||||||
parent = phys_to_virt(dma_pte_addr(pte));
|
parent = phys_to_virt(dma_pte_addr(pte));
|
||||||
level--;
|
level--;
|
||||||
@ -571,13 +584,14 @@ static struct dma_pte *dma_addr_level_pte(struct dmar_domain *domain, u64 addr,
|
|||||||
static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
|
static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
|
||||||
{
|
{
|
||||||
struct dma_pte *pte = NULL;
|
struct dma_pte *pte = NULL;
|
||||||
|
struct intel_iommu *iommu = domain_get_iommu(domain);
|
||||||
|
|
||||||
/* get last level pte */
|
/* get last level pte */
|
||||||
pte = dma_addr_level_pte(domain, addr, 1);
|
pte = dma_addr_level_pte(domain, addr, 1);
|
||||||
|
|
||||||
if (pte) {
|
if (pte) {
|
||||||
dma_clear_pte(pte);
|
dma_clear_pte(pte);
|
||||||
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
|
__iommu_flush_cache(iommu, pte, sizeof(*pte));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -608,6 +622,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
|
|||||||
int total = agaw_to_level(domain->agaw);
|
int total = agaw_to_level(domain->agaw);
|
||||||
int level;
|
int level;
|
||||||
u64 tmp;
|
u64 tmp;
|
||||||
|
struct intel_iommu *iommu = domain_get_iommu(domain);
|
||||||
|
|
||||||
start &= (((u64)1) << addr_width) - 1;
|
start &= (((u64)1) << addr_width) - 1;
|
||||||
end &= (((u64)1) << addr_width) - 1;
|
end &= (((u64)1) << addr_width) - 1;
|
||||||
@ -625,7 +640,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
|
|||||||
free_pgtable_page(
|
free_pgtable_page(
|
||||||
phys_to_virt(dma_pte_addr(pte)));
|
phys_to_virt(dma_pte_addr(pte)));
|
||||||
dma_clear_pte(pte);
|
dma_clear_pte(pte);
|
||||||
__iommu_flush_cache(domain->iommu,
|
__iommu_flush_cache(iommu,
|
||||||
pte, sizeof(*pte));
|
pte, sizeof(*pte));
|
||||||
}
|
}
|
||||||
tmp += level_size(level);
|
tmp += level_size(level);
|
||||||
@ -1195,7 +1210,8 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
|
|||||||
|
|
||||||
set_bit(num, iommu->domain_ids);
|
set_bit(num, iommu->domain_ids);
|
||||||
domain->id = num;
|
domain->id = num;
|
||||||
domain->iommu = iommu;
|
memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
|
||||||
|
set_bit(iommu->seq_id, &domain->iommu_bmp);
|
||||||
domain->flags = 0;
|
domain->flags = 0;
|
||||||
iommu->domains[num] = domain;
|
iommu->domains[num] = domain;
|
||||||
spin_unlock_irqrestore(&iommu->lock, flags);
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
@ -1206,10 +1222,13 @@ static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu)
|
|||||||
static void iommu_free_domain(struct dmar_domain *domain)
|
static void iommu_free_domain(struct dmar_domain *domain)
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
spin_lock_irqsave(&domain->iommu->lock, flags);
|
iommu = domain_get_iommu(domain);
|
||||||
clear_bit(domain->id, domain->iommu->domain_ids);
|
|
||||||
spin_unlock_irqrestore(&domain->iommu->lock, flags);
|
spin_lock_irqsave(&iommu->lock, flags);
|
||||||
|
clear_bit(domain->id, iommu->domain_ids);
|
||||||
|
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct iova_domain reserved_iova_list;
|
static struct iova_domain reserved_iova_list;
|
||||||
@ -1288,7 +1307,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
|
|||||||
domain_reserve_special_ranges(domain);
|
domain_reserve_special_ranges(domain);
|
||||||
|
|
||||||
/* calculate AGAW */
|
/* calculate AGAW */
|
||||||
iommu = domain->iommu;
|
iommu = domain_get_iommu(domain);
|
||||||
if (guest_width > cap_mgaw(iommu->cap))
|
if (guest_width > cap_mgaw(iommu->cap))
|
||||||
guest_width = cap_mgaw(iommu->cap);
|
guest_width = cap_mgaw(iommu->cap);
|
||||||
domain->gaw = guest_width;
|
domain->gaw = guest_width;
|
||||||
@ -1341,7 +1360,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
|
|||||||
u8 bus, u8 devfn)
|
u8 bus, u8 devfn)
|
||||||
{
|
{
|
||||||
struct context_entry *context;
|
struct context_entry *context;
|
||||||
struct intel_iommu *iommu = domain->iommu;
|
struct intel_iommu *iommu = domain_get_iommu(domain);
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
pr_debug("Set context mapping for %02x:%02x.%d\n",
|
pr_debug("Set context mapping for %02x:%02x.%d\n",
|
||||||
@ -1413,8 +1432,9 @@ static int domain_context_mapped(struct dmar_domain *domain,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct pci_dev *tmp, *parent;
|
struct pci_dev *tmp, *parent;
|
||||||
|
struct intel_iommu *iommu = domain_get_iommu(domain);
|
||||||
|
|
||||||
ret = device_context_mapped(domain->iommu,
|
ret = device_context_mapped(iommu,
|
||||||
pdev->bus->number, pdev->devfn);
|
pdev->bus->number, pdev->devfn);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -1425,17 +1445,17 @@ static int domain_context_mapped(struct dmar_domain *domain,
|
|||||||
/* Secondary interface's bus number and devfn 0 */
|
/* Secondary interface's bus number and devfn 0 */
|
||||||
parent = pdev->bus->self;
|
parent = pdev->bus->self;
|
||||||
while (parent != tmp) {
|
while (parent != tmp) {
|
||||||
ret = device_context_mapped(domain->iommu, parent->bus->number,
|
ret = device_context_mapped(iommu, parent->bus->number,
|
||||||
parent->devfn);
|
parent->devfn);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
return ret;
|
return ret;
|
||||||
parent = parent->bus->self;
|
parent = parent->bus->self;
|
||||||
}
|
}
|
||||||
if (tmp->is_pcie)
|
if (tmp->is_pcie)
|
||||||
return device_context_mapped(domain->iommu,
|
return device_context_mapped(iommu,
|
||||||
tmp->subordinate->number, 0);
|
tmp->subordinate->number, 0);
|
||||||
else
|
else
|
||||||
return device_context_mapped(domain->iommu,
|
return device_context_mapped(iommu,
|
||||||
tmp->bus->number, tmp->devfn);
|
tmp->bus->number, tmp->devfn);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1447,6 +1467,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
|
|||||||
struct dma_pte *pte;
|
struct dma_pte *pte;
|
||||||
int index;
|
int index;
|
||||||
int addr_width = agaw_to_width(domain->agaw);
|
int addr_width = agaw_to_width(domain->agaw);
|
||||||
|
struct intel_iommu *iommu = domain_get_iommu(domain);
|
||||||
|
|
||||||
hpa &= (((u64)1) << addr_width) - 1;
|
hpa &= (((u64)1) << addr_width) - 1;
|
||||||
|
|
||||||
@ -1466,7 +1487,7 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
|
|||||||
BUG_ON(dma_pte_addr(pte));
|
BUG_ON(dma_pte_addr(pte));
|
||||||
dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
|
dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
|
||||||
dma_set_pte_prot(pte, prot);
|
dma_set_pte_prot(pte, prot);
|
||||||
__iommu_flush_cache(domain->iommu, pte, sizeof(*pte));
|
__iommu_flush_cache(iommu, pte, sizeof(*pte));
|
||||||
start_pfn++;
|
start_pfn++;
|
||||||
index++;
|
index++;
|
||||||
}
|
}
|
||||||
@ -1475,10 +1496,12 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
|
|||||||
|
|
||||||
static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
|
static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn)
|
||||||
{
|
{
|
||||||
clear_context_table(domain->iommu, bus, devfn);
|
struct intel_iommu *iommu = domain_get_iommu(domain);
|
||||||
domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0,
|
|
||||||
|
clear_context_table(iommu, bus, devfn);
|
||||||
|
iommu->flush.flush_context(iommu, 0, 0, 0,
|
||||||
DMA_CCMD_GLOBAL_INVL, 0);
|
DMA_CCMD_GLOBAL_INVL, 0);
|
||||||
domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0,
|
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
|
||||||
DMA_TLB_GLOBAL_FLUSH, 0);
|
DMA_TLB_GLOBAL_FLUSH, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2033,6 +2056,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
|||||||
struct iova *iova;
|
struct iova *iova;
|
||||||
int prot = 0;
|
int prot = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
|
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
|
||||||
@ -2042,6 +2066,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
|||||||
if (!domain)
|
if (!domain)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
iommu = domain_get_iommu(domain);
|
||||||
size = aligned_size((u64)paddr, size);
|
size = aligned_size((u64)paddr, size);
|
||||||
|
|
||||||
iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
|
iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask);
|
||||||
@ -2055,7 +2080,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
|||||||
* mappings..
|
* mappings..
|
||||||
*/
|
*/
|
||||||
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
|
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
|
||||||
!cap_zlr(domain->iommu->cap))
|
!cap_zlr(iommu->cap))
|
||||||
prot |= DMA_PTE_READ;
|
prot |= DMA_PTE_READ;
|
||||||
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
|
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
|
||||||
prot |= DMA_PTE_WRITE;
|
prot |= DMA_PTE_WRITE;
|
||||||
@ -2071,10 +2096,10 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
|
|||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
/* it's a non-present to present mapping */
|
/* it's a non-present to present mapping */
|
||||||
ret = iommu_flush_iotlb_psi(domain->iommu, domain->id,
|
ret = iommu_flush_iotlb_psi(iommu, domain->id,
|
||||||
start_paddr, size >> VTD_PAGE_SHIFT, 1);
|
start_paddr, size >> VTD_PAGE_SHIFT, 1);
|
||||||
if (ret)
|
if (ret)
|
||||||
iommu_flush_write_buffer(domain->iommu);
|
iommu_flush_write_buffer(iommu);
|
||||||
|
|
||||||
return start_paddr + ((u64)paddr & (~PAGE_MASK));
|
return start_paddr + ((u64)paddr & (~PAGE_MASK));
|
||||||
|
|
||||||
@ -2132,12 +2157,14 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
|
|||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int next, iommu_id;
|
int next, iommu_id;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
spin_lock_irqsave(&async_umap_flush_lock, flags);
|
spin_lock_irqsave(&async_umap_flush_lock, flags);
|
||||||
if (list_size == HIGH_WATER_MARK)
|
if (list_size == HIGH_WATER_MARK)
|
||||||
flush_unmaps();
|
flush_unmaps();
|
||||||
|
|
||||||
iommu_id = dom->iommu->seq_id;
|
iommu = domain_get_iommu(dom);
|
||||||
|
iommu_id = iommu->seq_id;
|
||||||
|
|
||||||
next = deferred_flush[iommu_id].next;
|
next = deferred_flush[iommu_id].next;
|
||||||
deferred_flush[iommu_id].domain[next] = dom;
|
deferred_flush[iommu_id].domain[next] = dom;
|
||||||
@ -2159,12 +2186,15 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
|
|||||||
struct dmar_domain *domain;
|
struct dmar_domain *domain;
|
||||||
unsigned long start_addr;
|
unsigned long start_addr;
|
||||||
struct iova *iova;
|
struct iova *iova;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
|
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
|
||||||
return;
|
return;
|
||||||
domain = find_domain(pdev);
|
domain = find_domain(pdev);
|
||||||
BUG_ON(!domain);
|
BUG_ON(!domain);
|
||||||
|
|
||||||
|
iommu = domain_get_iommu(domain);
|
||||||
|
|
||||||
iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
|
iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
|
||||||
if (!iova)
|
if (!iova)
|
||||||
return;
|
return;
|
||||||
@ -2180,9 +2210,9 @@ void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
|
|||||||
/* free page tables */
|
/* free page tables */
|
||||||
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
|
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
|
||||||
if (intel_iommu_strict) {
|
if (intel_iommu_strict) {
|
||||||
if (iommu_flush_iotlb_psi(domain->iommu,
|
if (iommu_flush_iotlb_psi(iommu,
|
||||||
domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
|
domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0))
|
||||||
iommu_flush_write_buffer(domain->iommu);
|
iommu_flush_write_buffer(iommu);
|
||||||
/* free iova */
|
/* free iova */
|
||||||
__free_iova(&domain->iovad, iova);
|
__free_iova(&domain->iovad, iova);
|
||||||
} else {
|
} else {
|
||||||
@ -2243,11 +2273,15 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
|
|||||||
size_t size = 0;
|
size_t size = 0;
|
||||||
void *addr;
|
void *addr;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
|
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
domain = find_domain(pdev);
|
domain = find_domain(pdev);
|
||||||
|
BUG_ON(!domain);
|
||||||
|
|
||||||
|
iommu = domain_get_iommu(domain);
|
||||||
|
|
||||||
iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
|
iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
|
||||||
if (!iova)
|
if (!iova)
|
||||||
@ -2264,9 +2298,9 @@ void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
|
|||||||
/* free page tables */
|
/* free page tables */
|
||||||
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
|
dma_pte_free_pagetable(domain, start_addr, start_addr + size);
|
||||||
|
|
||||||
if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr,
|
if (iommu_flush_iotlb_psi(iommu, domain->id, start_addr,
|
||||||
size >> VTD_PAGE_SHIFT, 0))
|
size >> VTD_PAGE_SHIFT, 0))
|
||||||
iommu_flush_write_buffer(domain->iommu);
|
iommu_flush_write_buffer(iommu);
|
||||||
|
|
||||||
/* free iova */
|
/* free iova */
|
||||||
__free_iova(&domain->iovad, iova);
|
__free_iova(&domain->iovad, iova);
|
||||||
@ -2300,6 +2334,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
|
|||||||
int ret;
|
int ret;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
unsigned long start_addr;
|
unsigned long start_addr;
|
||||||
|
struct intel_iommu *iommu;
|
||||||
|
|
||||||
BUG_ON(dir == DMA_NONE);
|
BUG_ON(dir == DMA_NONE);
|
||||||
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
|
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
|
||||||
@ -2309,6 +2344,8 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
|
|||||||
if (!domain)
|
if (!domain)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
iommu = domain_get_iommu(domain);
|
||||||
|
|
||||||
for_each_sg(sglist, sg, nelems, i) {
|
for_each_sg(sglist, sg, nelems, i) {
|
||||||
addr = SG_ENT_VIRT_ADDRESS(sg);
|
addr = SG_ENT_VIRT_ADDRESS(sg);
|
||||||
addr = (void *)virt_to_phys(addr);
|
addr = (void *)virt_to_phys(addr);
|
||||||
@ -2326,7 +2363,7 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
|
|||||||
* mappings..
|
* mappings..
|
||||||
*/
|
*/
|
||||||
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
|
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \
|
||||||
!cap_zlr(domain->iommu->cap))
|
!cap_zlr(iommu->cap))
|
||||||
prot |= DMA_PTE_READ;
|
prot |= DMA_PTE_READ;
|
||||||
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
|
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
|
||||||
prot |= DMA_PTE_WRITE;
|
prot |= DMA_PTE_WRITE;
|
||||||
@ -2358,9 +2395,9 @@ int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* it's a non-present to present mapping */
|
/* it's a non-present to present mapping */
|
||||||
if (iommu_flush_iotlb_psi(domain->iommu, domain->id,
|
if (iommu_flush_iotlb_psi(iommu, domain->id,
|
||||||
start_addr, offset >> VTD_PAGE_SHIFT, 1))
|
start_addr, offset >> VTD_PAGE_SHIFT, 1))
|
||||||
iommu_flush_write_buffer(domain->iommu);
|
iommu_flush_write_buffer(iommu);
|
||||||
return nelems;
|
return nelems;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user