mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 17:23:55 +08:00
IOMMU Fixes for Linux v5.2-rc4
Including: - Three Fixes for Intel VT-d to fix a potential dead-lock, a formatting fix and a bit setting fix. - One fix for the ARM-SMMU to make it work on some platforms with sub-optimal SMMU emulation. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEr9jSbILcajRFYWYyK/BELZcBGuMFAl0DaloACgkQK/BELZcB GuNZzw//cqRLfD7Gnk4nyDBKAp3RPH12Tm6aFXkRr0/Y2IDNj0zVkZCOENe+BQhu L8F0Nv0t5C0oUtCbx8nqtMUXqeU4Dvvb+pEuWHE+sH1uqqbwzvXNA1vHbJQy0Yht BJOPf3C/1J3LmwjZm8QjrbMk7+HF//BED8wzdp5Advu3+tJfIEUAErBKty5kg5rv ljXG4DxryOVHswEUu2i1HXLoDvCORCIrfphGFjWhIC7MkMQRRUb5NeHydN0TzCdz XfGH3B32vKXMn/sBmi4zeu3ORtvFGaj94WUsRa72qI6pDBk60mcDzZfF/LuBePhS UQQsmImdvfFlzPNr19Pf51B6s7Sv2Xcr0UiSAXb/GSk/Zvh7TTm0y7H+ZfBIUIa6 5pzWl0wmkk/e9cHrNs6gP2P0UT71xRcCWiko+EZCw7N5cgn/d46I47JMeBzZ57CF cV9zfbI8pqF0kOdG3xIbPA7oYf/trdSP2yoY6P/72A9Z7U5uNDxyPwFlwwIRqgC/ oFLPtEhglmgbPc7/5XrBNPMHbKYRbVLNNHJoy99cqMQybu7vfZ5wh51W0gedLxNR GgDDpbSKHyEdK3bUaaSuZMpHG7dzgUhezV51lOeqn8RUSEX9twO2nBwguhehFS3R ADnxqYdphwY5bNAIdqsX+Q0icH88TjpW61gNML5uLu+xhabkuqw= =wyOl -----END PGP SIGNATURE----- Merge tag 'iommu-fixes-v5.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu Pull iommu fixes from Joerg Roedel: - three fixes for Intel VT-d to fix a potential dead-lock, a formatting fix and a bit setting fix - one fix for the ARM-SMMU to make it work on some platforms with sub-optimal SMMU emulation * tag 'iommu-fixes-v5.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: iommu/arm-smmu: Avoid constant zero in TLBI writes iommu/vt-d: Set the right field for Page Walk Snoop iommu/vt-d: Fix lock inversion between iommu->lock and device_domain_lock iommu: Add missing new line for dma type
This commit is contained in:
commit
c78ad1be4b
@ -47,6 +47,15 @@
|
||||
|
||||
#include "arm-smmu-regs.h"
|
||||
|
||||
/*
|
||||
* Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
|
||||
* global register space are still, in fact, using a hypervisor to mediate it
|
||||
* by trapping and emulating register accesses. Sadly, some deployed versions
|
||||
* of said trapping code have bugs wherein they go horribly wrong for stores
|
||||
* using r31 (i.e. XZR/WZR) as the source register.
|
||||
*/
|
||||
#define QCOM_DUMMY_VAL -1
|
||||
|
||||
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
|
||||
|
||||
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
|
||||
@ -411,7 +420,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
|
||||
{
|
||||
unsigned int spin_cnt, delay;
|
||||
|
||||
writel_relaxed(0, sync);
|
||||
writel_relaxed(QCOM_DUMMY_VAL, sync);
|
||||
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
|
||||
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
|
||||
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
|
||||
@ -1751,8 +1760,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
||||
}
|
||||
|
||||
/* Invalidate the TLB, just in case */
|
||||
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
|
||||
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
|
||||
writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
|
||||
writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
|
||||
|
||||
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
||||
|
||||
|
@ -2504,6 +2504,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&iommu->lock);
|
||||
spin_lock_irqsave(&device_domain_lock, flags);
|
||||
if (dev)
|
||||
found = find_domain(dev);
|
||||
@ -2519,17 +2520,16 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
||||
|
||||
if (found) {
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
spin_unlock(&iommu->lock);
|
||||
free_devinfo_mem(info);
|
||||
/* Caller must free the original domain */
|
||||
return found;
|
||||
}
|
||||
|
||||
spin_lock(&iommu->lock);
|
||||
ret = domain_attach_iommu(domain, iommu);
|
||||
spin_unlock(&iommu->lock);
|
||||
|
||||
if (ret) {
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
spin_unlock(&iommu->lock);
|
||||
free_devinfo_mem(info);
|
||||
return NULL;
|
||||
}
|
||||
@ -2539,6 +2539,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
||||
if (dev)
|
||||
dev->archdata.iommu = info;
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
spin_unlock(&iommu->lock);
|
||||
|
||||
/* PASID table is mandatory for a PCI device in scalable mode. */
|
||||
if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
|
||||
|
@ -389,7 +389,7 @@ static inline void pasid_set_present(struct pasid_entry *pe)
|
||||
*/
|
||||
static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
|
||||
{
|
||||
pasid_set_bits(&pe->val[1], 1 << 23, value);
|
||||
pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -329,7 +329,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
|
||||
type = "unmanaged\n";
|
||||
break;
|
||||
case IOMMU_DOMAIN_DMA:
|
||||
type = "DMA";
|
||||
type = "DMA\n";
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user