diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b549172e88ef..e3b04d5d87b0 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -455,36 +455,6 @@ __setup("intel_iommu=", intel_iommu_setup); static struct kmem_cache *iommu_domain_cache; static struct kmem_cache *iommu_devinfo_cache; -static struct dmar_domain* get_iommu_domain(struct intel_iommu *iommu, u16 did) -{ - struct dmar_domain **domains; - int idx = did >> 8; - - domains = iommu->domains[idx]; - if (!domains) - return NULL; - - return domains[did & 0xff]; -} - -static void set_iommu_domain(struct intel_iommu *iommu, u16 did, - struct dmar_domain *domain) -{ - struct dmar_domain **domains; - int idx = did >> 8; - - if (!iommu->domains[idx]) { - size_t size = 256 * sizeof(struct dmar_domain *); - iommu->domains[idx] = kzalloc(size, GFP_ATOMIC); - } - - domains = iommu->domains[idx]; - if (WARN_ON(!domains)) - return; - else - domains[did & 0xff] = domain; -} - void *alloc_pgtable_page(int node) { struct page *page; @@ -1751,8 +1721,7 @@ static void intel_flush_iotlb_all(struct iommu_domain *domain) DMA_TLB_DSI_FLUSH); if (!cap_caching_mode(iommu->cap)) - iommu_flush_dev_iotlb(get_iommu_domain(iommu, did), - 0, MAX_AGAW_PFN_WIDTH); + iommu_flush_dev_iotlb(dmar_domain, 0, MAX_AGAW_PFN_WIDTH); } } @@ -1815,7 +1784,6 @@ static void iommu_disable_translation(struct intel_iommu *iommu) static int iommu_init_domains(struct intel_iommu *iommu) { u32 ndomains; - size_t size; ndomains = cap_ndoms(iommu->cap); pr_debug("%s: Number of Domains supported <%d>\n", @@ -1827,24 +1795,6 @@ static int iommu_init_domains(struct intel_iommu *iommu) if (!iommu->domain_ids) return -ENOMEM; - size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **); - iommu->domains = kzalloc(size, GFP_KERNEL); - - if (iommu->domains) { - size = 256 * sizeof(struct dmar_domain *); - iommu->domains[0] = kzalloc(size, GFP_KERNEL); - } - - if (!iommu->domains || !iommu->domains[0]) { - pr_err("%s: Allocating domain array failed\n", - iommu->name); - bitmap_free(iommu->domain_ids); - kfree(iommu->domains); - iommu->domain_ids = NULL; - iommu->domains = NULL; - return -ENOMEM; - } - /* * If Caching mode is set, then invalid translations are tagged * with domain-id 0, hence we need to pre-allocate it. We also @@ -1871,7 +1821,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) struct device_domain_info *info, *tmp; unsigned long flags; - if (!iommu->domains || !iommu->domain_ids) + if (!iommu->domain_ids) return; spin_lock_irqsave(&device_domain_lock, flags); @@ -1892,15 +1842,8 @@ static void disable_dmar_iommu(struct intel_iommu *iommu) static void free_dmar_iommu(struct intel_iommu *iommu) { - if ((iommu->domains) && (iommu->domain_ids)) { - int elems = ALIGN(cap_ndoms(iommu->cap), 256) >> 8; - int i; - - for (i = 0; i < elems; i++) - kfree(iommu->domains[i]); - kfree(iommu->domains); + if (iommu->domain_ids) { bitmap_free(iommu->domain_ids); - iommu->domains = NULL; iommu->domain_ids = NULL; } @@ -1978,11 +1921,8 @@ static int domain_attach_iommu(struct dmar_domain *domain, } set_bit(num, iommu->domain_ids); - set_iommu_domain(iommu, num, domain); - domain->iommu_did[iommu->seq_id] = num; domain->nid = iommu->node; - domain_update_iommu_cap(domain); } @@ -2001,8 +1941,6 @@ static void domain_detach_iommu(struct dmar_domain *domain, if (domain->iommu_refcnt[iommu->seq_id] == 0) { num = domain->iommu_did[iommu->seq_id]; clear_bit(num, iommu->domain_ids); - set_iommu_domain(iommu, num, NULL); - domain_update_iommu_cap(domain); domain->iommu_did[iommu->seq_id] = 0; } diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 5cfda90b2cca..8c7591b5f3e2 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -578,7 +578,6 @@ struct intel_iommu { #ifdef CONFIG_INTEL_IOMMU unsigned long *domain_ids; /* bitmap of domains */ - struct dmar_domain ***domains; /* ptr to domains */ spinlock_t lock; /* protect context, domain ids */ struct root_entry *root_entry; /* virtual address */