mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-22 12:14:01 +08:00
iommu/amd: Remove domain->updated
This struct member was used to track whether a domain
change requires updates to the device-table and IOMMU cache
flushes. The problem is, that access to this field is racy
since locking in the common mapping code-paths has been
eliminated.
Move the updated field to the stack to get rid of all
potential races and remove the field from the struct.
Fixes: 92d420ec02
("iommu/amd: Relax locking in dma_ops path")
Reviewed-by: Filippo Sironi <sironi@amazon.de>
Reviewed-by: Jerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
0b15e02f0c
commit
f15d9a992f
@ -1458,10 +1458,11 @@ static void free_pagetable(struct protection_domain *domain)
|
||||
* another level increases the size of the address space by 9 bits to a size up
|
||||
* to 64 bits.
|
||||
*/
|
||||
static void increase_address_space(struct protection_domain *domain,
|
||||
static bool increase_address_space(struct protection_domain *domain,
|
||||
gfp_t gfp)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
u64 *pte;
|
||||
|
||||
spin_lock_irqsave(&domain->lock, flags);
|
||||
@ -1478,19 +1479,21 @@ static void increase_address_space(struct protection_domain *domain,
|
||||
iommu_virt_to_phys(domain->pt_root));
|
||||
domain->pt_root = pte;
|
||||
domain->mode += 1;
|
||||
domain->updated = true;
|
||||
|
||||
ret = true;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&domain->lock, flags);
|
||||
|
||||
return;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u64 *alloc_pte(struct protection_domain *domain,
|
||||
unsigned long address,
|
||||
unsigned long page_size,
|
||||
u64 **pte_page,
|
||||
gfp_t gfp)
|
||||
gfp_t gfp,
|
||||
bool *updated)
|
||||
{
|
||||
int level, end_lvl;
|
||||
u64 *pte, *page;
|
||||
@ -1498,7 +1501,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
|
||||
BUG_ON(!is_power_of_2(page_size));
|
||||
|
||||
while (address > PM_LEVEL_SIZE(domain->mode))
|
||||
increase_address_space(domain, gfp);
|
||||
*updated = increase_address_space(domain, gfp) || *updated;
|
||||
|
||||
level = domain->mode - 1;
|
||||
pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
|
||||
@ -1530,7 +1533,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
|
||||
for (i = 0; i < count; ++i)
|
||||
cmpxchg64(&lpte[i], __pte, 0ULL);
|
||||
|
||||
domain->updated = true;
|
||||
*updated = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1547,7 +1550,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
|
||||
if (cmpxchg64(pte, __pte, __npte) != __pte)
|
||||
free_page((unsigned long)page);
|
||||
else if (IOMMU_PTE_PRESENT(__pte))
|
||||
domain->updated = true;
|
||||
*updated = true;
|
||||
|
||||
continue;
|
||||
}
|
||||
@ -1656,28 +1659,29 @@ static int iommu_map_page(struct protection_domain *dom,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct page *freelist = NULL;
|
||||
bool updated = false;
|
||||
u64 __pte, *pte;
|
||||
int i, count;
|
||||
int ret, i, count;
|
||||
|
||||
BUG_ON(!IS_ALIGNED(bus_addr, page_size));
|
||||
BUG_ON(!IS_ALIGNED(phys_addr, page_size));
|
||||
|
||||
ret = -EINVAL;
|
||||
if (!(prot & IOMMU_PROT_MASK))
|
||||
return -EINVAL;
|
||||
goto out;
|
||||
|
||||
count = PAGE_SIZE_PTE_COUNT(page_size);
|
||||
pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp);
|
||||
pte = alloc_pte(dom, bus_addr, page_size, NULL, gfp, &updated);
|
||||
|
||||
if (!pte) {
|
||||
update_domain(dom);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = -ENOMEM;
|
||||
if (!pte)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < count; ++i)
|
||||
freelist = free_clear_pte(&pte[i], pte[i], freelist);
|
||||
|
||||
if (freelist != NULL)
|
||||
dom->updated = true;
|
||||
updated = true;
|
||||
|
||||
if (count > 1) {
|
||||
__pte = PAGE_SIZE_PTE(__sme_set(phys_addr), page_size);
|
||||
@ -1693,12 +1697,16 @@ static int iommu_map_page(struct protection_domain *dom,
|
||||
for (i = 0; i < count; ++i)
|
||||
pte[i] = __pte;
|
||||
|
||||
update_domain(dom);
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
if (updated)
|
||||
update_domain(dom);
|
||||
|
||||
/* Everything flushed out, free pages now */
|
||||
free_page_list(freelist);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static unsigned long iommu_unmap_page(struct protection_domain *dom,
|
||||
@ -2399,15 +2407,10 @@ static void update_device_table(struct protection_domain *domain)
|
||||
|
||||
static void update_domain(struct protection_domain *domain)
|
||||
{
|
||||
if (!domain->updated)
|
||||
return;
|
||||
|
||||
update_device_table(domain);
|
||||
|
||||
domain_flush_devices(domain);
|
||||
domain_flush_tlb_pde(domain);
|
||||
|
||||
domain->updated = false;
|
||||
}
|
||||
|
||||
static int dir2prot(enum dma_data_direction direction)
|
||||
@ -3333,7 +3336,6 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom)
|
||||
|
||||
/* Update data structure */
|
||||
domain->mode = PAGE_MODE_NONE;
|
||||
domain->updated = true;
|
||||
|
||||
/* Make changes visible to IOMMUs */
|
||||
update_domain(domain);
|
||||
@ -3379,7 +3381,6 @@ int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
|
||||
|
||||
domain->glx = levels;
|
||||
domain->flags |= PD_IOMMUV2_MASK;
|
||||
domain->updated = true;
|
||||
|
||||
update_domain(domain);
|
||||
|
||||
|
@ -475,7 +475,6 @@ struct protection_domain {
|
||||
int glx; /* Number of levels for GCR3 table */
|
||||
u64 *gcr3_tbl; /* Guest CR3 table */
|
||||
unsigned long flags; /* flags to find out type of domain */
|
||||
bool updated; /* complete domain flush required */
|
||||
unsigned dev_cnt; /* devices assigned to this domain */
|
||||
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user