mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 20:48:49 +08:00
iommu/amd: Reevaluate vector configuration on activate()
With the upcoming reservation/management scheme, early activation will assign a special vector. The final activation at request_irq() assigns a real vector, which needs to be updated in the tables. Split out the reconfiguration code in set_affinity and use it for reactivation. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Juergen Gross <jgross@suse.com> Tested-by: Yu Chen <yu.c.chen@intel.com> Acked-by: Juergen Gross <jgross@suse.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Alok Kataria <akataria@vmware.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: iommu@lists.linux-foundation.org Cc: Borislav Petkov <bp@alien8.de> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Rui Zhang <rui.zhang@intel.com> Cc: "K. Y. Srinivasan" <kys@microsoft.com> Cc: Arjan van de Ven <arjan@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Len Brown <lenb@kernel.org> Link: https://lkml.kernel.org/r/20170913213155.944883733@linutronix.de
This commit is contained in:
parent
d491bdff88
commit
5ba204a181
@ -4170,16 +4170,25 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
|
||||
irq_domain_free_irqs_common(domain, virq, nr_irqs);
|
||||
}
|
||||
|
||||
static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
|
||||
struct amd_ir_data *ir_data,
|
||||
struct irq_2_irte *irte_info,
|
||||
struct irq_cfg *cfg);
|
||||
|
||||
static int irq_remapping_activate(struct irq_domain *domain,
|
||||
struct irq_data *irq_data, bool early)
|
||||
{
|
||||
struct amd_ir_data *data = irq_data->chip_data;
|
||||
struct irq_2_irte *irte_info = &data->irq_2_irte;
|
||||
struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
|
||||
struct irq_cfg *cfg = irqd_cfg(irq_data);
|
||||
|
||||
if (!iommu)
|
||||
return 0;
|
||||
|
||||
if (iommu)
|
||||
iommu->irte_ops->activate(data->entry, irte_info->devid,
|
||||
irte_info->index);
|
||||
amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4267,6 +4276,22 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info)
|
||||
return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
|
||||
}
|
||||
|
||||
|
||||
static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
|
||||
struct amd_ir_data *ir_data,
|
||||
struct irq_2_irte *irte_info,
|
||||
struct irq_cfg *cfg)
|
||||
{
|
||||
|
||||
/*
|
||||
* Atomically updates the IRTE with the new destination, vector
|
||||
* and flushes the interrupt entry cache.
|
||||
*/
|
||||
iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
|
||||
irte_info->index, cfg->vector,
|
||||
cfg->dest_apicid);
|
||||
}
|
||||
|
||||
static int amd_ir_set_affinity(struct irq_data *data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
@ -4284,13 +4309,7 @@ static int amd_ir_set_affinity(struct irq_data *data,
|
||||
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Atomically updates the IRTE with the new destination, vector
|
||||
* and flushes the interrupt entry cache.
|
||||
*/
|
||||
iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
|
||||
irte_info->index, cfg->vector, cfg->dest_apicid);
|
||||
|
||||
amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
|
||||
/*
|
||||
* After this point, all the interrupts will start arriving
|
||||
* at the new destination. So, time to cleanup the previous
|
||||
|
Loading…
Reference in New Issue
Block a user