From 56b75b51ed6d5e7bffda59440404409bca2dff00 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 19 Oct 2020 11:23:21 -0700 Subject: [PATCH 01/50] iommu/arm-smmu: Allow implementation specific write_s2cr The firmware found in some Qualcomm platforms intercepts writes to the S2CR register in order to replace the BYPASS type with FAULT. Further more it treats faults at this level as catastrophic and restarts the device. Add support for providing implementation specific versions of the S2CR write function, to allow the Qualcomm driver to work around this behavior. Cc: Signed-off-by: Bjorn Andersson Tested-by: Steev Klimaszewski Reviewed-by: Robin Murphy Link: https://lore.kernel.org/r/20201019182323.3162386-2-bjorn.andersson@linaro.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu.c | 13 ++++++++++--- drivers/iommu/arm/arm-smmu/arm-smmu.h | 1 + 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index dad7fa86fbd4..bcbacf22331d 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -929,9 +929,16 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx) static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) { struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; - u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | - FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | - FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); + u32 reg; + + if (smmu->impl && smmu->impl->write_s2cr) { + smmu->impl->write_s2cr(smmu, idx); + return; + } + + reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | + FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | + FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && smmu->smrs[idx].valid) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index 1a746476927c..b71647eaa319 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -436,6 +436,7 @@ struct arm_smmu_impl { int (*alloc_context_bank)(struct arm_smmu_domain *smmu_domain, struct arm_smmu_device *smmu, struct device *dev, int start); + void (*write_s2cr)(struct arm_smmu_device *smmu, int idx); }; #define INVALID_SMENDX -1 From 07a7f2caaa5a2619934491bab3c47b261c554fb0 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 19 Oct 2020 11:23:22 -0700 Subject: [PATCH 02/50] iommu/arm-smmu-qcom: Read back stream mappings The Qualcomm boot loader configures stream mapping for the peripherals that it accesses and in particular it sets up the stream mapping for the display controller to be allowed to scan out a splash screen or EFI framebuffer. Read back the stream mappings during initialization and make the arm-smmu driver maintain the streams in bypass mode. Cc: Signed-off-by: Bjorn Andersson Tested-by: Steev Klimaszewski Acked-by: Robin Murphy Link: https://lore.kernel.org/r/20201019182323.3162386-3-bjorn.andersson@linaro.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 23 ++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index be4318044f96..48627fcf6bed 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -23,6 +23,28 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { } }; +static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) +{ + u32 smr; + int i; + + for (i = 0; i < smmu->num_mapping_groups; i++) { + smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); + + if (FIELD_GET(ARM_SMMU_SMR_VALID, smr)) { + smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr); + smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); + smmu->smrs[i].valid = true; + + smmu->s2crs[i].type = S2CR_TYPE_BYPASS; + smmu->s2crs[i].privcfg = S2CR_PRIVCFG_DEFAULT; + smmu->s2crs[i].cbndx = 0xff; + } + } + + return 0; +} + static int qcom_smmu_def_domain_type(struct device *dev) { const struct of_device_id *match = @@ -61,6 +83,7 @@ static int qcom_smmu500_reset(struct arm_smmu_device *smmu) } static const struct arm_smmu_impl qcom_smmu_impl = { + .cfg_probe = qcom_smmu_cfg_probe, .def_domain_type = qcom_smmu_def_domain_type, .reset = qcom_smmu500_reset, }; From f9081b8ff5934b8d69c748d0200e844cadd2c667 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Mon, 19 Oct 2020 11:23:23 -0700 Subject: [PATCH 03/50] iommu/arm-smmu-qcom: Implement S2CR quirk The firmware found in some Qualcomm platforms intercepts writes to S2CR in order to replace bypass type streams with fault; and ignore S2CR updates of type fault. Detect this behavior and implement a custom write_s2cr function in order to trick the firmware into supporting bypass streams by the means of configuring the stream for translation using a reserved and disabled context bank. Also circumvent the problem of configuring faulting streams by configuring the stream as bypass. Cc: Signed-off-by: Bjorn Andersson Tested-by: Steev Klimaszewski Acked-by: Robin Murphy Link: https://lore.kernel.org/r/20201019182323.3162386-4-bjorn.andersson@linaro.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 67 ++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 48627fcf6bed..66ba4870659f 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -10,8 +10,15 @@ struct qcom_smmu { struct arm_smmu_device smmu; + bool bypass_quirk; + u8 bypass_cbndx; }; +static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) +{ + return container_of(smmu, struct qcom_smmu, smmu); +} + static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,adreno" }, { .compatible = "qcom,mdp4" }, @@ -25,9 +32,33 @@ static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) { + unsigned int last_s2cr = ARM_SMMU_GR0_S2CR(smmu->num_mapping_groups - 1); + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); + u32 reg; u32 smr; int i; + /* + * With some firmware versions writes to S2CR of type FAULT are + * ignored, and writing BYPASS will end up written as FAULT in the + * register. Perform a write to S2CR to detect if this is the case and + * if so reserve a context bank to emulate bypass streams. + */ + reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, S2CR_TYPE_BYPASS) | + FIELD_PREP(ARM_SMMU_S2CR_CBNDX, 0xff) | + FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, S2CR_PRIVCFG_DEFAULT); + arm_smmu_gr0_write(smmu, last_s2cr, reg); + reg = arm_smmu_gr0_read(smmu, last_s2cr); + if (FIELD_GET(ARM_SMMU_S2CR_TYPE, reg) != S2CR_TYPE_BYPASS) { + qsmmu->bypass_quirk = true; + qsmmu->bypass_cbndx = smmu->num_context_banks - 1; + + set_bit(qsmmu->bypass_cbndx, smmu->context_map); + + reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS); + arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg); + } + for (i = 0; i < smmu->num_mapping_groups; i++) { smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i)); @@ -45,6 +76,41 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu) return 0; } +static void qcom_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx) +{ + struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; + struct qcom_smmu *qsmmu = to_qcom_smmu(smmu); + u32 cbndx = s2cr->cbndx; + u32 type = s2cr->type; + u32 reg; + + if (qsmmu->bypass_quirk) { + if (type == S2CR_TYPE_BYPASS) { + /* + * Firmware with quirky S2CR handling will substitute + * BYPASS writes with FAULT, so point the stream to the + * reserved context bank and ask for translation on the + * stream + */ + type = S2CR_TYPE_TRANS; + cbndx = qsmmu->bypass_cbndx; + } else if (type == S2CR_TYPE_FAULT) { + /* + * Firmware with quirky S2CR handling will ignore FAULT + * writes, so trick it to write FAULT by asking for a + * BYPASS. + */ + type = S2CR_TYPE_BYPASS; + cbndx = 0xff; + } + } + + reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, type) | + FIELD_PREP(ARM_SMMU_S2CR_CBNDX, cbndx) | + FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_S2CR(idx), reg); +} + static int qcom_smmu_def_domain_type(struct device *dev) { const struct of_device_id *match = @@ -86,6 +152,7 @@ static const struct arm_smmu_impl qcom_smmu_impl = { .cfg_probe = qcom_smmu_cfg_probe, .def_domain_type = qcom_smmu_def_domain_type, .reset = qcom_smmu500_reset, + .write_s2cr = qcom_smmu_write_s2cr, }; struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) From af9da91493e5ff6179c2ecbfafa05ef203b25b5f Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Mon, 26 Oct 2020 12:00:22 +0000 Subject: [PATCH 04/50] iommu/arm-smmu: Use new devm_krealloc() The implementation-specific subclassing of struct arm_smmu_device really wanted an appropriate version of realloc(). Now that one exists, take full advantage of it to clarify what's actually being done here. Signed-off-by: Robin Murphy Link: https://lore.kernel.org/r/355e8d70c7f47d462d85b386aa09f2b5c655f023.1603713428.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu-impl.c | 5 +---- drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c | 17 +---------------- drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 5 +---- 3 files changed, 3 insertions(+), 24 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 88f17cc33023..336f36ed9ed7 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -91,15 +91,12 @@ static struct arm_smmu_device *cavium_smmu_impl_init(struct arm_smmu_device *smm { struct cavium_smmu *cs; - cs = devm_kzalloc(smmu->dev, sizeof(*cs), GFP_KERNEL); + cs = devm_krealloc(smmu->dev, smmu, sizeof(*cs), GFP_KERNEL); if (!cs) return ERR_PTR(-ENOMEM); - cs->smmu = *smmu; cs->smmu.impl = &cavium_impl; - devm_kfree(smmu->dev, smmu); - return &cs->smmu; } diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c index 31368057e9be..29117444e5a0 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c @@ -242,18 +242,10 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) struct nvidia_smmu *nvidia_smmu; struct platform_device *pdev = to_platform_device(dev); - nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL); + nvidia_smmu = devm_krealloc(dev, smmu, sizeof(*nvidia_smmu), GFP_KERNEL); if (!nvidia_smmu) return ERR_PTR(-ENOMEM); - /* - * Copy the data from struct arm_smmu_device *smmu allocated in - * arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu - * pointer used in arm-smmu.c once this function returns. - * This is necessary to derive nvidia_smmu from smmu pointer passed - * through arm_smmu_impl function calls subsequently. - */ - nvidia_smmu->smmu = *smmu; /* Instance 0 is ioremapped by arm-smmu.c. */ nvidia_smmu->bases[0] = smmu->base; @@ -267,12 +259,5 @@ struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) nvidia_smmu->smmu.impl = &nvidia_smmu_impl; - /* - * Free the struct arm_smmu_device *smmu allocated in arm-smmu.c. - * Once this function returns, arm-smmu.c would use arm_smmu_device - * allocated as part of struct nvidia_smmu. - */ - devm_kfree(dev, smmu); - return &nvidia_smmu->smmu; } diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 66ba4870659f..86f0d182703e 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -159,14 +159,11 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) { struct qcom_smmu *qsmmu; - qsmmu = devm_kzalloc(smmu->dev, sizeof(*qsmmu), GFP_KERNEL); + qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL); if (!qsmmu) return ERR_PTR(-ENOMEM); - qsmmu->smmu = *smmu; - qsmmu->smmu.impl = &qcom_smmu_impl; - devm_kfree(smmu->dev, smmu); return &qsmmu->smmu; } From 3045fe45abbcba2ae4c3ce9b3c610523651be1c7 Mon Sep 17 00:00:00 2001 From: Kaixu Xia Date: Sat, 7 Nov 2020 18:20:39 +0800 Subject: [PATCH 05/50] iommu/arm-smmu-v3: Assign boolean values to a bool variable Fix the following coccinelle warnings: ./drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c:36:12-26: WARNING: Assignment of 0/1 to bool variable Signed-off-by: Kaixu Xia Link: https://lore.kernel.org/r/1604744439-6846-1-git-send-email-kaixuxia@tencent.com Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index e634bbe60573..c9cafce51835 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -33,7 +33,7 @@ #include "arm-smmu-v3.h" -static bool disable_bypass = 1; +static bool disable_bypass = true; module_param(disable_bypass, bool, 0444); MODULE_PARM_DESC(disable_bypass, "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU."); From 5c7469c66f953a2eb223468d7aa40062af9c14ab Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 9 Nov 2020 11:47:25 -0700 Subject: [PATCH 06/50] iommu/arm-smmu-qcom: Add implementation for the adreno GPU SMMU Add a special implementation for the SMMU attached to most Adreno GPU target triggered from the qcom,adreno-smmu compatible string. The new Adreno SMMU implementation will enable split pagetables (TTBR1) for the domain attached to the GPU device (SID 0) and hard code it context bank 0 so the GPU hardware can implement per-instance pagetables. Co-developed-by: Rob Clark Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark Reviewed-by: Bjorn Andersson Link: https://lore.kernel.org/r/20201109184728.2463097-2-jcrouse@codeaurora.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu-impl.c | 3 + drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 151 ++++++++++++++++++++- drivers/iommu/arm/arm-smmu/arm-smmu.h | 1 + 3 files changed, 153 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 336f36ed9ed7..7fed89c9d18a 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -220,6 +220,9 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) of_device_is_compatible(np, "qcom,sm8250-smmu-500")) return qcom_smmu_impl_init(smmu); + if (of_device_is_compatible(smmu->dev->of_node, "qcom,adreno-smmu")) + return qcom_adreno_smmu_impl_init(smmu); + if (of_device_is_compatible(np, "marvell,ap806-smmu-500")) smmu->impl = &mrvl_mmu500_impl; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index 86f0d182703e..b5384c4d92c8 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -3,6 +3,7 @@ * Copyright (c) 2019, The Linux Foundation. All rights reserved. */ +#include #include #include @@ -19,6 +20,134 @@ static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) return container_of(smmu, struct qcom_smmu, smmu); } +#define QCOM_ADRENO_SMMU_GPU_SID 0 + +static bool qcom_adreno_smmu_is_gpu_device(struct device *dev) +{ + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + int i; + + /* + * The GPU will always use SID 0 so that is a handy way to uniquely + * identify it and configure it for per-instance pagetables + */ + for (i = 0; i < fwspec->num_ids; i++) { + u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); + + if (sid == QCOM_ADRENO_SMMU_GPU_SID) + return true; + } + + return false; +} + +static const struct io_pgtable_cfg *qcom_adreno_smmu_get_ttbr1_cfg( + const void *cookie) +{ + struct arm_smmu_domain *smmu_domain = (void *)cookie; + struct io_pgtable *pgtable = + io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); + return &pgtable->cfg; +} + +/* + * Local implementation to configure TTBR0 with the specified pagetable config. + * The GPU driver will call this to enable TTBR0 when per-instance pagetables + * are active + */ + +static int qcom_adreno_smmu_set_ttbr0_cfg(const void *cookie, + const struct io_pgtable_cfg *pgtbl_cfg) +{ + struct arm_smmu_domain *smmu_domain = (void *)cookie; + struct io_pgtable *pgtable = io_pgtable_ops_to_pgtable(smmu_domain->pgtbl_ops); + struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; + + /* The domain must have split pagetables already enabled */ + if (cb->tcr[0] & ARM_SMMU_TCR_EPD1) + return -EINVAL; + + /* If the pagetable config is NULL, disable TTBR0 */ + if (!pgtbl_cfg) { + /* Do nothing if it is already disabled */ + if ((cb->tcr[0] & ARM_SMMU_TCR_EPD0)) + return -EINVAL; + + /* Set TCR to the original configuration */ + cb->tcr[0] = arm_smmu_lpae_tcr(&pgtable->cfg); + cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid); + } else { + u32 tcr = cb->tcr[0]; + + /* Don't call this again if TTBR0 is already enabled */ + if (!(cb->tcr[0] & ARM_SMMU_TCR_EPD0)) + return -EINVAL; + + tcr |= arm_smmu_lpae_tcr(pgtbl_cfg); + tcr &= ~(ARM_SMMU_TCR_EPD0 | ARM_SMMU_TCR_EPD1); + + cb->tcr[0] = tcr; + cb->ttbr[0] = pgtbl_cfg->arm_lpae_s1_cfg.ttbr; + cb->ttbr[0] |= FIELD_PREP(ARM_SMMU_TTBRn_ASID, cb->cfg->asid); + } + + arm_smmu_write_context_bank(smmu_domain->smmu, cb->cfg->cbndx); + + return 0; +} + +static int qcom_adreno_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_device *smmu, + struct device *dev, int start) +{ + int count; + + /* + * Assign context bank 0 to the GPU device so the GPU hardware can + * switch pagetables + */ + if (qcom_adreno_smmu_is_gpu_device(dev)) { + start = 0; + count = 1; + } else { + start = 1; + count = smmu->num_context_banks; + } + + return __arm_smmu_alloc_bitmap(smmu->context_map, start, count); +} + +static int qcom_adreno_smmu_init_context(struct arm_smmu_domain *smmu_domain, + struct io_pgtable_cfg *pgtbl_cfg, struct device *dev) +{ + struct adreno_smmu_priv *priv; + + /* Only enable split pagetables for the GPU device (SID 0) */ + if (!qcom_adreno_smmu_is_gpu_device(dev)) + return 0; + + /* + * All targets that use the qcom,adreno-smmu compatible string *should* + * be AARCH64 stage 1 but double check because the arm-smmu code assumes + * that is the case when the TTBR1 quirk is enabled + */ + if ((smmu_domain->stage == ARM_SMMU_DOMAIN_S1) && + (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64)) + pgtbl_cfg->quirks |= IO_PGTABLE_QUIRK_ARM_TTBR1; + + /* + * Initialize private interface with GPU: + */ + + priv = dev_get_drvdata(dev); + priv->cookie = smmu_domain; + priv->get_ttbr1_cfg = qcom_adreno_smmu_get_ttbr1_cfg; + priv->set_ttbr0_cfg = qcom_adreno_smmu_set_ttbr0_cfg; + + return 0; +} + static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = { { .compatible = "qcom,adreno" }, { .compatible = "qcom,mdp4" }, @@ -155,7 +284,15 @@ static const struct arm_smmu_impl qcom_smmu_impl = { .write_s2cr = qcom_smmu_write_s2cr, }; -struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) +static const struct arm_smmu_impl qcom_adreno_smmu_impl = { + .init_context = qcom_adreno_smmu_init_context, + .def_domain_type = qcom_smmu_def_domain_type, + .reset = qcom_smmu500_reset, + .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, +}; + +static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, + const struct arm_smmu_impl *impl) { struct qcom_smmu *qsmmu; @@ -163,7 +300,17 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) if (!qsmmu) return ERR_PTR(-ENOMEM); - qsmmu->smmu.impl = &qcom_smmu_impl; + qsmmu->smmu.impl = impl; return &qsmmu->smmu; } + +struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) +{ + return qcom_smmu_create(smmu, &qcom_smmu_impl); +} + +struct arm_smmu_device *qcom_adreno_smmu_impl_init(struct arm_smmu_device *smmu) +{ + return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl); +} diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index b71647eaa319..9a5eb6782918 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -521,6 +521,7 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu); +struct arm_smmu_device *qcom_adreno_smmu_impl_init(struct arm_smmu_device *smmu); void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx); int arm_mmu500_reset(struct arm_smmu_device *smmu); From bffb2eaf0ba2c0bdd7fc2c4e194dab6783f5d8c1 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Mon, 9 Nov 2020 11:47:26 -0700 Subject: [PATCH 07/50] iommu/arm-smmu: Add a way for implementations to influence SCTLR For the Adreno GPU's SMMU, we want SCTLR.HUPCF set to ensure that pending translations are not terminated on iova fault. Otherwise a terminated CP read could hang the GPU by returning invalid command-stream data. Add a hook to for the implementation to modify the sctlr value if it wishes. Co-developed-by: Jordan Crouse Signed-off-by: Rob Clark Signed-off-by: Jordan Crouse Link: https://lore.kernel.org/r/20201109184728.2463097-3-jcrouse@codeaurora.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 13 +++++++++++++ drivers/iommu/arm/arm-smmu/arm-smmu.c | 5 ++++- drivers/iommu/arm/arm-smmu/arm-smmu.h | 2 ++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index b5384c4d92c8..d0636c803a36 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -20,6 +20,18 @@ static struct qcom_smmu *to_qcom_smmu(struct arm_smmu_device *smmu) return container_of(smmu, struct qcom_smmu, smmu); } +static void qcom_adreno_smmu_write_sctlr(struct arm_smmu_device *smmu, int idx, + u32 reg) +{ + /* + * On the GPU device we want to process subsequent transactions after a + * fault to keep the GPU from hanging + */ + reg |= ARM_SMMU_SCTLR_HUPCF; + + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); +} + #define QCOM_ADRENO_SMMU_GPU_SID 0 static bool qcom_adreno_smmu_is_gpu_device(struct device *dev) @@ -289,6 +301,7 @@ static const struct arm_smmu_impl qcom_adreno_smmu_impl = { .def_domain_type = qcom_smmu_def_domain_type, .reset = qcom_smmu500_reset, .alloc_context_bank = qcom_adreno_smmu_alloc_context_bank, + .write_sctlr = qcom_adreno_smmu_write_sctlr, }; static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index bcbacf22331d..0f28a8614da3 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -617,7 +617,10 @@ void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx) if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) reg |= ARM_SMMU_SCTLR_E; - arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); + if (smmu->impl && smmu->impl->write_sctlr) + smmu->impl->write_sctlr(smmu, idx, reg); + else + arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, reg); } static int arm_smmu_alloc_context_bank(struct arm_smmu_domain *smmu_domain, diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index 9a5eb6782918..04288b6fc619 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -144,6 +144,7 @@ enum arm_smmu_cbar_type { #define ARM_SMMU_CB_SCTLR 0x0 #define ARM_SMMU_SCTLR_S1_ASIDPNE BIT(12) #define ARM_SMMU_SCTLR_CFCFG BIT(7) +#define ARM_SMMU_SCTLR_HUPCF BIT(8) #define ARM_SMMU_SCTLR_CFIE BIT(6) #define ARM_SMMU_SCTLR_CFRE BIT(5) #define ARM_SMMU_SCTLR_E BIT(4) @@ -437,6 +438,7 @@ struct arm_smmu_impl { struct arm_smmu_device *smmu, struct device *dev, int start); void (*write_s2cr)(struct arm_smmu_device *smmu, int idx); + void (*write_sctlr)(struct arm_smmu_device *smmu, int idx, u32 reg); }; #define INVALID_SMENDX -1 From a29bbb0861f487a5e144dc997a9f71a36c7a2404 Mon Sep 17 00:00:00 2001 From: Jordan Crouse Date: Mon, 9 Nov 2020 11:47:27 -0700 Subject: [PATCH 08/50] dt-bindings: arm-smmu: Add compatible string for Adreno GPU SMMU Every Qcom Adreno GPU has an embedded SMMU for its own use. These devices depend on unique features such as split pagetables, different stall/halt requirements and other settings. Identify them with a compatible string so that they can be identified in the arm-smmu implementation specific code. Signed-off-by: Jordan Crouse Signed-off-by: Rob Clark Reviewed-by: Rob Herring Reviewed-by: Bjorn Andersson Link: https://lore.kernel.org/r/20201109184728.2463097-4-jcrouse@codeaurora.org Signed-off-by: Will Deacon --- Documentation/devicetree/bindings/iommu/arm,smmu.yaml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml index 503160a7b9a0..3b63f2ae24db 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml +++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml @@ -28,8 +28,6 @@ properties: - enum: - qcom,msm8996-smmu-v2 - qcom,msm8998-smmu-v2 - - qcom,sc7180-smmu-v2 - - qcom,sdm845-smmu-v2 - const: qcom,smmu-v2 - description: Qcom SoCs implementing "arm,mmu-500" @@ -40,6 +38,13 @@ properties: - qcom,sm8150-smmu-500 - qcom,sm8250-smmu-500 - const: arm,mmu-500 + - description: Qcom Adreno GPUs implementing "arm,smmu-v2" + items: + - enum: + - qcom,sc7180-smmu-v2 + - qcom,sdm845-smmu-v2 + - const: qcom,adreno-smmu + - const: qcom,smmu-v2 - description: Marvell SoCs implementing "arm,mmu-500" items: - const: marvell,ap806-smmu-500 From 68dd9d89eaf56dfab8d46bf25610aa4650247617 Mon Sep 17 00:00:00 2001 From: Lukas Bulwahn Date: Sun, 15 Nov 2020 21:59:51 +0100 Subject: [PATCH 09/50] iommu/vt-d: include conditionally on CONFIG_INTEL_IOMMU_SVM Commit 6ee1b77ba3ac ("iommu/vt-d: Add svm/sva invalidate function") introduced intel_iommu_sva_invalidate() when CONFIG_INTEL_IOMMU_SVM. This function uses the dedicated static variable inv_type_granu_table and functions to_vtd_granularity() and to_vtd_size(). These parts are unused when !CONFIG_INTEL_IOMMU_SVM, and hence, make CC=clang W=1 warns with an -Wunused-function warning. Include these parts conditionally on CONFIG_INTEL_IOMMU_SVM. Fixes: 6ee1b77ba3ac ("iommu/vt-d: Add svm/sva invalidate function") Signed-off-by: Lukas Bulwahn Acked-by: Lu Baolu Link: https://lore.kernel.org/r/20201115205951.20698-1-lukas.bulwahn@gmail.com Signed-off-by: Will Deacon --- drivers/iommu/intel/iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 1b1ca63e6bbe..c1a209273767 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5387,6 +5387,7 @@ static void intel_iommu_aux_detach_device(struct iommu_domain *domain, aux_domain_remove_dev(to_dmar_domain(domain), dev); } +#ifdef CONFIG_INTEL_IOMMU_SVM /* * 2D array for converting and sanitizing IOMMU generic TLB granularity to * VT-d granularity. Invalidation is typically included in the unmap operation @@ -5433,7 +5434,6 @@ static inline u64 to_vtd_size(u64 granu_size, u64 nr_granules) return order_base_2(nr_pages); } -#ifdef CONFIG_INTEL_IOMMU_SVM static int intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, struct iommu_cache_invalidate_info *inv_info) From 4e89dce725213d3d0b0475211b500eda4ef4bf2f Mon Sep 17 00:00:00 2001 From: Vijayanand Jitta Date: Wed, 30 Sep 2020 13:14:23 +0530 Subject: [PATCH 10/50] iommu/iova: Retry from last rb tree node if iova search fails When ever a new iova alloc request comes iova is always searched from the cached node and the nodes which are previous to cached node. So, even if there is free iova space available in the nodes which are next to the cached node iova allocation can still fail because of this approach. Consider the following sequence of iova alloc and frees on 1GB of iova space 1) alloc - 500MB 2) alloc - 12MB 3) alloc - 499MB 4) free - 12MB which was allocated in step 2 5) alloc - 13MB After the above sequence we will have 12MB of free iova space and cached node will be pointing to the iova pfn of last alloc of 13MB which will be the lowest iova pfn of that iova space. Now if we get an alloc request of 2MB we just search from cached node and then look for lower iova pfn's for free iova and as they aren't any, iova alloc fails though there is 12MB of free iova space. To avoid such iova search failures do a retry from the last rb tree node when iova search fails, this will search the entire tree and get an iova if its available. Signed-off-by: Vijayanand Jitta Reviewed-by: Robin Murphy Link: https://lore.kernel.org/r/1601451864-5956-1-git-send-email-vjitta@codeaurora.org Signed-off-by: Will Deacon --- drivers/iommu/iova.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 30d969a4c5fd..c3a1a8e6f2c1 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -184,8 +184,9 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, struct rb_node *curr, *prev; struct iova *curr_iova; unsigned long flags; - unsigned long new_pfn; + unsigned long new_pfn, retry_pfn; unsigned long align_mask = ~0UL; + unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn; if (size_aligned) align_mask <<= fls_long(size - 1); @@ -198,15 +199,25 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, curr = __get_cached_rbnode(iovad, limit_pfn); curr_iova = rb_entry(curr, struct iova, node); + retry_pfn = curr_iova->pfn_hi + 1; + +retry: do { - limit_pfn = min(limit_pfn, curr_iova->pfn_lo); - new_pfn = (limit_pfn - size) & align_mask; + high_pfn = min(high_pfn, curr_iova->pfn_lo); + new_pfn = (high_pfn - size) & align_mask; prev = curr; curr = rb_prev(curr); curr_iova = rb_entry(curr, struct iova, node); - } while (curr && new_pfn <= curr_iova->pfn_hi); + } while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn); - if (limit_pfn < size || new_pfn < iovad->start_pfn) { + if (high_pfn < size || new_pfn < low_pfn) { + if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) { + high_pfn = limit_pfn; + low_pfn = retry_pfn; + curr = &iovad->anchor.node; + curr_iova = rb_entry(curr, struct iova, node); + goto retry; + } iovad->max32_alloc_size = size; goto iova32_full; } From 6fa3525b455ae1fde5b424907141b33651f137b0 Mon Sep 17 00:00:00 2001 From: Vijayanand Jitta Date: Wed, 30 Sep 2020 13:14:24 +0530 Subject: [PATCH 11/50] iommu/iova: Free global iova rcache on iova alloc failure When ever an iova alloc request fails we free the iova ranges present in the percpu iova rcaches and then retry but the global iova rcache is not freed as a result we could still see iova alloc failure even after retry as global rcache is holding the iova's which can cause fragmentation. So, free the global iova rcache as well and then go for the retry. Signed-off-by: Vijayanand Jitta Reviewed-by: Robin Murphy Acked-by: John Garry Link: https://lore.kernel.org/r/1601451864-5956-2-git-send-email-vjitta@codeaurora.org Signed-off-by: Will Deacon --- drivers/iommu/iova.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index c3a1a8e6f2c1..ea04a88c673d 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -25,6 +25,7 @@ static void init_iova_rcaches(struct iova_domain *iovad); static void free_iova_rcaches(struct iova_domain *iovad); static void fq_destroy_all_entries(struct iova_domain *iovad); static void fq_flush_timeout(struct timer_list *t); +static void free_global_cached_iovas(struct iova_domain *iovad); void init_iova_domain(struct iova_domain *iovad, unsigned long granule, @@ -442,6 +443,7 @@ retry: flush_rcache = false; for_each_online_cpu(cpu) free_cpu_cached_iovas(cpu, iovad); + free_global_cached_iovas(iovad); goto retry; } @@ -1057,5 +1059,25 @@ void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) } } +/* + * free all the IOVA ranges of global cache + */ +static void free_global_cached_iovas(struct iova_domain *iovad) +{ + struct iova_rcache *rcache; + unsigned long flags; + int i, j; + + for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) { + rcache = &iovad->rcaches[i]; + spin_lock_irqsave(&rcache->lock, flags); + for (j = 0; j < rcache->depot_size; ++j) { + iova_magazine_free_pfns(rcache->depot[j], iovad); + iova_magazine_free(rcache->depot[j]); + } + rcache->depot_size = 0; + spin_unlock_irqrestore(&rcache->lock, flags); + } +} MODULE_AUTHOR("Anil S Keshavamurthy "); MODULE_LICENSE("GPL"); From 6243f572a18db99607f29517b2d6b4209356b9fa Mon Sep 17 00:00:00 2001 From: Chen Jun Date: Fri, 23 Oct 2020 06:48:27 +0000 Subject: [PATCH 12/50] iommu: Modify the description of iommu_sva_unbind_device iommu_sva_unbind_device has no return value. Remove the description of the return value of the function. Signed-off-by: Chen Jun Link: https://lore.kernel.org/r/20201023064827.74794-1-chenjun102@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/iommu.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b53446bb8c6b..88b0c9192d8c 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -2995,8 +2995,6 @@ EXPORT_SYMBOL_GPL(iommu_sva_bind_device); * Put reference to a bond between device and address space. The device should * not be issuing any more transaction for this PASID. All outstanding page * requests for this PASID must have been flushed to the IOMMU. - * - * Returns 0 on success, or an error value */ void iommu_sva_unbind_device(struct iommu_sva *handle) { From cb4789b0d19ff231ce9f73376a023341300aed96 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 6 Nov 2020 16:50:47 +0100 Subject: [PATCH 13/50] iommu/ioasid: Add ioasid references Let IOASID users take references to existing ioasids with ioasid_get(). ioasid_put() drops a reference and only frees the ioasid when its reference number is zero. It returns true if the ioasid was freed. For drivers that don't call ioasid_get(), ioasid_put() is the same as ioasid_free(). Signed-off-by: Jean-Philippe Brucker Reviewed-by: Eric Auger Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/20201106155048.997886-2-jean-philippe@linaro.org Signed-off-by: Will Deacon --- drivers/iommu/intel/iommu.c | 4 ++-- drivers/iommu/intel/svm.c | 6 +++--- drivers/iommu/ioasid.c | 40 ++++++++++++++++++++++++++++++++----- include/linux/ioasid.h | 10 ++++++++-- 4 files changed, 48 insertions(+), 12 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 1b1ca63e6bbe..8b26223979a2 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5198,7 +5198,7 @@ static void auxiliary_unlink_device(struct dmar_domain *domain, domain->auxd_refcnt--; if (!domain->auxd_refcnt && domain->default_pasid > 0) - ioasid_free(domain->default_pasid); + ioasid_put(domain->default_pasid); } static int aux_domain_add_dev(struct dmar_domain *domain, @@ -5259,7 +5259,7 @@ attach_failed: spin_unlock(&iommu->lock); spin_unlock_irqrestore(&device_domain_lock, flags); if (!domain->auxd_refcnt && domain->default_pasid > 0) - ioasid_free(domain->default_pasid); + ioasid_put(domain->default_pasid); return ret; } diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 3242ebd0bca3..4fa248b98031 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -598,7 +598,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, if (mm) { ret = mmu_notifier_register(&svm->notifier, mm); if (ret) { - ioasid_free(svm->pasid); + ioasid_put(svm->pasid); kfree(svm); kfree(sdev); goto out; @@ -616,7 +616,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags, if (ret) { if (mm) mmu_notifier_unregister(&svm->notifier, mm); - ioasid_free(svm->pasid); + ioasid_put(svm->pasid); kfree(svm); kfree(sdev); goto out; @@ -689,7 +689,7 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { - ioasid_free(svm->pasid); + ioasid_put(svm->pasid); if (svm->mm) { mmu_notifier_unregister(&svm->notifier, svm->mm); /* Clear mm's pasid. */ diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c index 0f8dd377aada..50ee27bbd04e 100644 --- a/drivers/iommu/ioasid.c +++ b/drivers/iommu/ioasid.c @@ -2,7 +2,7 @@ /* * I/O Address Space ID allocator. There is one global IOASID space, split into * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and - * free IOASIDs with ioasid_alloc and ioasid_free. + * free IOASIDs with ioasid_alloc and ioasid_put. */ #include #include @@ -15,6 +15,7 @@ struct ioasid_data { struct ioasid_set *set; void *private; struct rcu_head rcu; + refcount_t refs; }; /* @@ -314,6 +315,7 @@ ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, data->set = set; data->private = private; + refcount_set(&data->refs, 1); /* * Custom allocator needs allocator data to perform platform specific @@ -346,13 +348,36 @@ exit_free: EXPORT_SYMBOL_GPL(ioasid_alloc); /** - * ioasid_free - Free an IOASID - * @ioasid: the ID to remove + * ioasid_get - obtain a reference to the IOASID */ -void ioasid_free(ioasid_t ioasid) +void ioasid_get(ioasid_t ioasid) { struct ioasid_data *ioasid_data; + spin_lock(&ioasid_allocator_lock); + ioasid_data = xa_load(&active_allocator->xa, ioasid); + if (ioasid_data) + refcount_inc(&ioasid_data->refs); + else + WARN_ON(1); + spin_unlock(&ioasid_allocator_lock); +} +EXPORT_SYMBOL_GPL(ioasid_get); + +/** + * ioasid_put - Release a reference to an ioasid + * @ioasid: the ID to remove + * + * Put a reference to the IOASID, free it when the number of references drops to + * zero. + * + * Return: %true if the IOASID was freed, %false otherwise. + */ +bool ioasid_put(ioasid_t ioasid) +{ + bool free = false; + struct ioasid_data *ioasid_data; + spin_lock(&ioasid_allocator_lock); ioasid_data = xa_load(&active_allocator->xa, ioasid); if (!ioasid_data) { @@ -360,6 +385,10 @@ void ioasid_free(ioasid_t ioasid) goto exit_unlock; } + free = refcount_dec_and_test(&ioasid_data->refs); + if (!free) + goto exit_unlock; + active_allocator->ops->free(ioasid, active_allocator->ops->pdata); /* Custom allocator needs additional steps to free the xa element */ if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) { @@ -369,8 +398,9 @@ void ioasid_free(ioasid_t ioasid) exit_unlock: spin_unlock(&ioasid_allocator_lock); + return free; } -EXPORT_SYMBOL_GPL(ioasid_free); +EXPORT_SYMBOL_GPL(ioasid_put); /** * ioasid_find - Find IOASID data diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h index 6f000d7a0ddc..e9dacd4b9f6b 100644 --- a/include/linux/ioasid.h +++ b/include/linux/ioasid.h @@ -34,7 +34,8 @@ struct ioasid_allocator_ops { #if IS_ENABLED(CONFIG_IOASID) ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max, void *private); -void ioasid_free(ioasid_t ioasid); +void ioasid_get(ioasid_t ioasid); +bool ioasid_put(ioasid_t ioasid); void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, bool (*getter)(void *)); int ioasid_register_allocator(struct ioasid_allocator_ops *allocator); @@ -48,10 +49,15 @@ static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, return INVALID_IOASID; } -static inline void ioasid_free(ioasid_t ioasid) +static inline void ioasid_get(ioasid_t ioasid) { } +static inline bool ioasid_put(ioasid_t ioasid) +{ + return false; +} + static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid, bool (*getter)(void *)) { From cfc78dfd9b36dcda7c3ca9cdfca343f84c72252f Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 6 Nov 2020 16:50:48 +0100 Subject: [PATCH 14/50] iommu/sva: Add PASID helpers Let IOMMU drivers allocate a single PASID per mm. Store the mm in the IOASID set to allow refcounting and searching mm by PASID, when handling an I/O page fault. Signed-off-by: Jean-Philippe Brucker Reviewed-by: Lu Baolu Link: https://lore.kernel.org/r/20201106155048.997886-3-jean-philippe@linaro.org Signed-off-by: Will Deacon --- drivers/iommu/Kconfig | 5 ++ drivers/iommu/Makefile | 1 + drivers/iommu/iommu-sva-lib.c | 86 +++++++++++++++++++++++++++++++++++ drivers/iommu/iommu-sva-lib.h | 15 ++++++ 4 files changed, 107 insertions(+) create mode 100644 drivers/iommu/iommu-sva-lib.c create mode 100644 drivers/iommu/iommu-sva-lib.h diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 04878caf6da4..f5ebbdfbf636 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -103,6 +103,11 @@ config IOMMU_DMA select IRQ_MSI_IOMMU select NEED_SG_DMA_LENGTH +# Shared Virtual Addressing library +config IOMMU_SVA_LIB + bool + select IOASID + config FSL_PAMU bool "Freescale IOMMU support" depends on PCI diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 11f1771104f3..61bd30cd8369 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -27,3 +27,4 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o +obj-$(CONFIG_IOMMU_SVA_LIB) += iommu-sva-lib.o diff --git a/drivers/iommu/iommu-sva-lib.c b/drivers/iommu/iommu-sva-lib.c new file mode 100644 index 000000000000..bd41405d34e9 --- /dev/null +++ b/drivers/iommu/iommu-sva-lib.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Helpers for IOMMU drivers implementing SVA + */ +#include +#include + +#include "iommu-sva-lib.h" + +static DEFINE_MUTEX(iommu_sva_lock); +static DECLARE_IOASID_SET(iommu_sva_pasid); + +/** + * iommu_sva_alloc_pasid - Allocate a PASID for the mm + * @mm: the mm + * @min: minimum PASID value (inclusive) + * @max: maximum PASID value (inclusive) + * + * Try to allocate a PASID for this mm, or take a reference to the existing one + * provided it fits within the [@min, @max] range. On success the PASID is + * available in mm->pasid, and must be released with iommu_sva_free_pasid(). + * @min must be greater than 0, because 0 indicates an unused mm->pasid. + * + * Returns 0 on success and < 0 on error. + */ +int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max) +{ + int ret = 0; + ioasid_t pasid; + + if (min == INVALID_IOASID || max == INVALID_IOASID || + min == 0 || max < min) + return -EINVAL; + + mutex_lock(&iommu_sva_lock); + if (mm->pasid) { + if (mm->pasid >= min && mm->pasid <= max) + ioasid_get(mm->pasid); + else + ret = -EOVERFLOW; + } else { + pasid = ioasid_alloc(&iommu_sva_pasid, min, max, mm); + if (pasid == INVALID_IOASID) + ret = -ENOMEM; + else + mm->pasid = pasid; + } + mutex_unlock(&iommu_sva_lock); + return ret; +} +EXPORT_SYMBOL_GPL(iommu_sva_alloc_pasid); + +/** + * iommu_sva_free_pasid - Release the mm's PASID + * @mm: the mm + * + * Drop one reference to a PASID allocated with iommu_sva_alloc_pasid() + */ +void iommu_sva_free_pasid(struct mm_struct *mm) +{ + mutex_lock(&iommu_sva_lock); + if (ioasid_put(mm->pasid)) + mm->pasid = 0; + mutex_unlock(&iommu_sva_lock); +} +EXPORT_SYMBOL_GPL(iommu_sva_free_pasid); + +/* ioasid_find getter() requires a void * argument */ +static bool __mmget_not_zero(void *mm) +{ + return mmget_not_zero(mm); +} + +/** + * iommu_sva_find() - Find mm associated to the given PASID + * @pasid: Process Address Space ID assigned to the mm + * + * On success a reference to the mm is taken, and must be released with mmput(). + * + * Returns the mm corresponding to this PASID, or an error if not found. + */ +struct mm_struct *iommu_sva_find(ioasid_t pasid) +{ + return ioasid_find(&iommu_sva_pasid, pasid, __mmget_not_zero); +} +EXPORT_SYMBOL_GPL(iommu_sva_find); diff --git a/drivers/iommu/iommu-sva-lib.h b/drivers/iommu/iommu-sva-lib.h new file mode 100644 index 000000000000..b40990aef3fd --- /dev/null +++ b/drivers/iommu/iommu-sva-lib.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * SVA library for IOMMU drivers + */ +#ifndef _IOMMU_SVA_LIB_H +#define _IOMMU_SVA_LIB_H + +#include +#include + +int iommu_sva_alloc_pasid(struct mm_struct *mm, ioasid_t min, ioasid_t max); +void iommu_sva_free_pasid(struct mm_struct *mm); +struct mm_struct *iommu_sva_find(ioasid_t pasid); + +#endif /* _IOMMU_SVA_LIB_H */ From 32784a9562fb0518b12e9797ee2aec52214adf6f Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 6 Nov 2020 16:50:49 +0100 Subject: [PATCH 15/50] iommu/arm-smmu-v3: Implement iommu_sva_bind/unbind() The sva_bind() function allows devices to access process address spaces using a PASID (aka SSID). (1) bind() allocates or gets an existing MMU notifier tied to the (domain, mm) pair. Each mm gets one PASID. (2) Any change to the address space calls invalidate_range() which sends ATC invalidations (in a subsequent patch). (3) When the process address space dies, the release() notifier disables the CD to allow reclaiming the page tables. Since release() has to be light we do not instruct device drivers to stop DMA here, we just ignore incoming page faults from this point onwards. To avoid any event 0x0a print (C_BAD_CD) we disable translation without clearing CD.V. PCIe Translation Requests and Page Requests are silently denied. Don't clear the R bit because the S bit can't be cleared when STALL_MODEL==0b10 (forced), and clearing R without clearing S is useless. Faulting transactions will stall and will be aborted by the IOPF handler. (4) After stopping DMA, the device driver releases the bond by calling unbind(). We release the MMU notifier, free the PASID and the bond. Three structures keep track of bonds: * arm_smmu_bond: one per {device, mm} pair, the handle returned to the device driver for a bind() request. * arm_smmu_mmu_notifier: one per {domain, mm} pair, deals with ATS/TLB invalidations and clearing the context descriptor on mm exit. * arm_smmu_ctx_desc: one per mm, holds the pinned ASID and pgd. Signed-off-by: Jean-Philippe Brucker Link: https://lore.kernel.org/r/20201106155048.997886-4-jean-philippe@linaro.org Signed-off-by: Will Deacon --- drivers/iommu/Kconfig | 2 + .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 230 +++++++++++++++++- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 32 ++- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 28 +++ 4 files changed, 282 insertions(+), 10 deletions(-) diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index f5ebbdfbf636..192ef8f61310 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -316,6 +316,8 @@ config ARM_SMMU_V3 config ARM_SMMU_V3_SVA bool "Shared Virtual Addressing support for the ARM SMMUv3" depends on ARM_SMMU_V3 + select IOMMU_SVA_LIB + select MMU_NOTIFIER help Support for sharing process address spaces with devices using the SMMUv3. diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 9255c9600fb8..4d03481289ff 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -5,11 +5,35 @@ #include #include +#include #include #include "arm-smmu-v3.h" +#include "../../iommu-sva-lib.h" #include "../../io-pgtable-arm.h" +struct arm_smmu_mmu_notifier { + struct mmu_notifier mn; + struct arm_smmu_ctx_desc *cd; + bool cleared; + refcount_t refs; + struct list_head list; + struct arm_smmu_domain *domain; +}; + +#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn) + +struct arm_smmu_bond { + struct iommu_sva sva; + struct mm_struct *mm; + struct arm_smmu_mmu_notifier *smmu_mn; + struct list_head list; + refcount_t refs; +}; + +#define sva_to_bond(handle) \ + container_of(handle, struct arm_smmu_bond, sva) + static DEFINE_MUTEX(sva_lock); /* @@ -64,7 +88,6 @@ arm_smmu_share_asid(struct mm_struct *mm, u16 asid) return NULL; } -__maybe_unused static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) { u16 asid; @@ -145,7 +168,6 @@ out_put_context: return err < 0 ? ERR_PTR(err) : ret; } -__maybe_unused static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) { if (arm_smmu_free_asid(cd)) { @@ -155,6 +177,201 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) } } +static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) +{ + struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); + struct arm_smmu_domain *smmu_domain = smmu_mn->domain; + + mutex_lock(&sva_lock); + if (smmu_mn->cleared) { + mutex_unlock(&sva_lock); + return; + } + + /* + * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, + * but disable translation. + */ + arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd); + + arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); + + smmu_mn->cleared = true; + mutex_unlock(&sva_lock); +} + +static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn) +{ + kfree(mn_to_smmu(mn)); +} + +static struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { + .release = arm_smmu_mm_release, + .free_notifier = arm_smmu_mmu_notifier_free, +}; + +/* Allocate or get existing MMU notifier for this {domain, mm} pair */ +static struct arm_smmu_mmu_notifier * +arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain, + struct mm_struct *mm) +{ + int ret; + struct arm_smmu_ctx_desc *cd; + struct arm_smmu_mmu_notifier *smmu_mn; + + list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { + if (smmu_mn->mn.mm == mm) { + refcount_inc(&smmu_mn->refs); + return smmu_mn; + } + } + + cd = arm_smmu_alloc_shared_cd(mm); + if (IS_ERR(cd)) + return ERR_CAST(cd); + + smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL); + if (!smmu_mn) { + ret = -ENOMEM; + goto err_free_cd; + } + + refcount_set(&smmu_mn->refs, 1); + smmu_mn->cd = cd; + smmu_mn->domain = smmu_domain; + smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops; + + ret = mmu_notifier_register(&smmu_mn->mn, mm); + if (ret) { + kfree(smmu_mn); + goto err_free_cd; + } + + ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd); + if (ret) + goto err_put_notifier; + + list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers); + return smmu_mn; + +err_put_notifier: + /* Frees smmu_mn */ + mmu_notifier_put(&smmu_mn->mn); +err_free_cd: + arm_smmu_free_shared_cd(cd); + return ERR_PTR(ret); +} + +static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) +{ + struct mm_struct *mm = smmu_mn->mn.mm; + struct arm_smmu_ctx_desc *cd = smmu_mn->cd; + struct arm_smmu_domain *smmu_domain = smmu_mn->domain; + + if (!refcount_dec_and_test(&smmu_mn->refs)) + return; + + list_del(&smmu_mn->list); + arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL); + + /* + * If we went through clear(), we've already invalidated, and no + * new TLB entry can have been formed. + */ + if (!smmu_mn->cleared) + arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); + + /* Frees smmu_mn */ + mmu_notifier_put(&smmu_mn->mn); + arm_smmu_free_shared_cd(cd); +} + +static struct iommu_sva * +__arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm) +{ + int ret; + struct arm_smmu_bond *bond; + struct arm_smmu_master *master = dev_iommu_priv_get(dev); + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + if (!master || !master->sva_enabled) + return ERR_PTR(-ENODEV); + + /* If bind() was already called for this {dev, mm} pair, reuse it. */ + list_for_each_entry(bond, &master->bonds, list) { + if (bond->mm == mm) { + refcount_inc(&bond->refs); + return &bond->sva; + } + } + + bond = kzalloc(sizeof(*bond), GFP_KERNEL); + if (!bond) + return ERR_PTR(-ENOMEM); + + /* Allocate a PASID for this mm if necessary */ + ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1); + if (ret) + goto err_free_bond; + + bond->mm = mm; + bond->sva.dev = dev; + refcount_set(&bond->refs, 1); + + bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); + if (IS_ERR(bond->smmu_mn)) { + ret = PTR_ERR(bond->smmu_mn); + goto err_free_pasid; + } + + list_add(&bond->list, &master->bonds); + return &bond->sva; + +err_free_pasid: + iommu_sva_free_pasid(mm); +err_free_bond: + kfree(bond); + return ERR_PTR(ret); +} + +struct iommu_sva * +arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata) +{ + struct iommu_sva *handle; + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + + if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) + return ERR_PTR(-EINVAL); + + mutex_lock(&sva_lock); + handle = __arm_smmu_sva_bind(dev, mm); + mutex_unlock(&sva_lock); + return handle; +} + +void arm_smmu_sva_unbind(struct iommu_sva *handle) +{ + struct arm_smmu_bond *bond = sva_to_bond(handle); + + mutex_lock(&sva_lock); + if (refcount_dec_and_test(&bond->refs)) { + list_del(&bond->list); + arm_smmu_mmu_notifier_put(bond->smmu_mn); + iommu_sva_free_pasid(bond->mm); + kfree(bond); + } + mutex_unlock(&sva_lock); +} + +u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle) +{ + struct arm_smmu_bond *bond = sva_to_bond(handle); + + return bond->mm->pasid; +} + bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { unsigned long reg, fld; @@ -246,3 +463,12 @@ int arm_smmu_master_disable_sva(struct arm_smmu_master *master) return 0; } + +void arm_smmu_sva_notifier_synchronize(void) +{ + /* + * Some MMU notifiers may still be waiting to be freed, using + * arm_smmu_mmu_notifier_free(). Wait for them. + */ + mmu_notifier_synchronize(); +} diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index e634bbe60573..034ed126e5c8 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -76,6 +76,12 @@ struct arm_smmu_option_prop { DEFINE_XARRAY_ALLOC1(arm_smmu_asid_xa); DEFINE_MUTEX(arm_smmu_asid_lock); +/* + * Special value used by SVA when a process dies, to quiesce a CD without + * disabling it. + */ +struct arm_smmu_ctx_desc quiet_cd = { 0 }; + static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, @@ -91,11 +97,6 @@ static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset, return smmu->base + offset; } -static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) -{ - return container_of(dom, struct arm_smmu_domain, domain); -} - static void parse_driver_options(struct arm_smmu_device *smmu) { int i = 0; @@ -983,7 +984,9 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid, * (2) Install a secondary CD, for SID+SSID traffic. * (3) Update ASID of a CD. Atomically write the first 64 bits of the * CD, then invalidate the old entry and mappings. - * (4) Remove a secondary CD. + * (4) Quiesce the context without clearing the valid bit. Disable + * translation, and ignore any translation fault. + * (5) Remove a secondary CD. */ u64 val; bool cd_live; @@ -1000,8 +1003,10 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid, val = le64_to_cpu(cdptr[0]); cd_live = !!(val & CTXDESC_CD_0_V); - if (!cd) { /* (4) */ + if (!cd) { /* (5) */ val = 0; + } else if (cd == &quiet_cd) { /* (4) */ + val |= CTXDESC_CD_0_TCR_EPD0; } else if (cd_live) { /* (3) */ val &= ~CTXDESC_CD_0_ASID; val |= FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid); @@ -1794,6 +1799,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) mutex_init(&smmu_domain->init_mutex); INIT_LIST_HEAD(&smmu_domain->devices); spin_lock_init(&smmu_domain->devices_lock); + INIT_LIST_HEAD(&smmu_domain->mmu_notifiers); return &smmu_domain->domain; } @@ -2589,6 +2595,9 @@ static struct iommu_ops arm_smmu_ops = { .dev_feat_enabled = arm_smmu_dev_feature_enabled, .dev_enable_feat = arm_smmu_dev_enable_feature, .dev_disable_feat = arm_smmu_dev_disable_feature, + .sva_bind = arm_smmu_sva_bind, + .sva_unbind = arm_smmu_sva_unbind, + .sva_get_pasid = arm_smmu_sva_get_pasid, .pgsize_bitmap = -1UL, /* Restricted during device attach */ }; @@ -3611,6 +3620,12 @@ static const struct of_device_id arm_smmu_of_match[] = { }; MODULE_DEVICE_TABLE(of, arm_smmu_of_match); +static void arm_smmu_driver_unregister(struct platform_driver *drv) +{ + arm_smmu_sva_notifier_synchronize(); + platform_driver_unregister(drv); +} + static struct platform_driver arm_smmu_driver = { .driver = { .name = "arm-smmu-v3", @@ -3621,7 +3636,8 @@ static struct platform_driver arm_smmu_driver = { .remove = arm_smmu_device_remove, .shutdown = arm_smmu_device_shutdown, }; -module_platform_driver(arm_smmu_driver); +module_driver(arm_smmu_driver, platform_driver_register, + arm_smmu_driver_unregister); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); MODULE_AUTHOR("Will Deacon "); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index d4b7f40ccb02..e03ca01e0908 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -678,10 +678,18 @@ struct arm_smmu_domain { struct list_head devices; spinlock_t devices_lock; + + struct list_head mmu_notifiers; }; +static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct arm_smmu_domain, domain); +} + extern struct xarray arm_smmu_asid_xa; extern struct mutex arm_smmu_asid_lock; +extern struct arm_smmu_ctx_desc quiet_cd; int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid, struct arm_smmu_ctx_desc *cd); @@ -694,6 +702,11 @@ bool arm_smmu_master_sva_supported(struct arm_smmu_master *master); bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master); int arm_smmu_master_enable_sva(struct arm_smmu_master *master); int arm_smmu_master_disable_sva(struct arm_smmu_master *master); +struct iommu_sva *arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, + void *drvdata); +void arm_smmu_sva_unbind(struct iommu_sva *handle); +u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle); +void arm_smmu_sva_notifier_synchronize(void); #else /* CONFIG_ARM_SMMU_V3_SVA */ static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) { @@ -719,5 +732,20 @@ static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master) { return -ENODEV; } + +static inline struct iommu_sva * +arm_smmu_sva_bind(struct device *dev, struct mm_struct *mm, void *drvdata) +{ + return ERR_PTR(-ENODEV); +} + +static inline void arm_smmu_sva_unbind(struct iommu_sva *handle) {} + +static inline u32 arm_smmu_sva_get_pasid(struct iommu_sva *handle) +{ + return IOMMU_PASID_INVALID; +} + +static inline void arm_smmu_sva_notifier_synchronize(void) {} #endif /* CONFIG_ARM_SMMU_V3_SVA */ #endif /* _ARM_SMMU_V3_H */ From 2f7e8c553e98d6fcddeaf18aa90ea908e3f1418e Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Fri, 6 Nov 2020 16:50:50 +0100 Subject: [PATCH 16/50] iommu/arm-smmu-v3: Hook up ATC invalidation to mm ops The invalidate_range() notifier is called for any change to the address space. Perform the required ATC invalidations. Signed-off-by: Jean-Philippe Brucker Link: https://lore.kernel.org/r/20201106155048.997886-5-jean-philippe@linaro.org Signed-off-by: Will Deacon --- .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 16 +++++++++++++++- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 18 ++++++++++++++++-- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 2 ++ 3 files changed, 33 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c index 4d03481289ff..e13b092e6004 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c @@ -177,6 +177,16 @@ static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd) } } +static void arm_smmu_mm_invalidate_range(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); + + arm_smmu_atc_inv_domain(smmu_mn->domain, mm->pasid, start, + end - start + 1); +} + static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) { struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); @@ -195,6 +205,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd); arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); + arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); smmu_mn->cleared = true; mutex_unlock(&sva_lock); @@ -206,6 +217,7 @@ static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn) } static struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { + .invalidate_range = arm_smmu_mm_invalidate_range, .release = arm_smmu_mm_release, .free_notifier = arm_smmu_mmu_notifier_free, }; @@ -278,8 +290,10 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn) * If we went through clear(), we've already invalidated, and no * new TLB entry can have been formed. */ - if (!smmu_mn->cleared) + if (!smmu_mn->cleared) { arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); + arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); + } /* Frees smmu_mn */ mmu_notifier_put(&smmu_mn->mn); diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 034ed126e5c8..bfa953a07cf7 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1524,6 +1524,20 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, size_t inval_grain_shift = 12; unsigned long page_start, page_end; + /* + * ATS and PASID: + * + * If substream_valid is clear, the PCIe TLP is sent without a PASID + * prefix. In that case all ATC entries within the address range are + * invalidated, including those that were requested with a PASID! There + * is no way to invalidate only entries without PASID. + * + * When using STRTAB_STE_1_S1DSS_SSID0 (reserving CD 0 for non-PASID + * traffic), translation requests without PASID create ATC entries + * without PASID, which must be invalidated with substream_valid clear. + * This has the unpleasant side-effect of invalidating all PASID-tagged + * ATC entries within the address range. + */ *cmd = (struct arm_smmu_cmdq_ent) { .opcode = CMDQ_OP_ATC_INV, .substream_valid = !!ssid, @@ -1582,8 +1596,8 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) return arm_smmu_cmdq_issue_sync(master->smmu); } -static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, - int ssid, unsigned long iova, size_t size) +int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, + unsigned long iova, size_t size) { int i; unsigned long flags; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index e03ca01e0908..96c2e9565e00 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -695,6 +695,8 @@ int arm_smmu_write_ctx_desc(struct arm_smmu_domain *smmu_domain, int ssid, struct arm_smmu_ctx_desc *cd); void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid); bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd); +int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, + unsigned long iova, size_t size); #ifdef CONFIG_ARM_SMMU_V3_SVA bool arm_smmu_sva_supported(struct arm_smmu_device *smmu); From cf910f61aff3c1c7cac4dc0706811389051c0f98 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 25 Nov 2020 02:10:09 -0800 Subject: [PATCH 17/50] iommu/tegra-smmu: Unwrap tegra_smmu_group_get The tegra_smmu_group_get was added to group devices in different SWGROUPs and it'd return a NULL group pointer upon a mismatch at tegra_smmu_find_group(), so for most of clients/devices, it very likely would mismatch and need a fallback generic_device_group(). But now tegra_smmu_group_get handles devices in same SWGROUP too, which means that it would allocate a group for every new SWGROUP or would directly return an existing one upon matching a SWGROUP, i.e. any device will go through this function. So possibility of having a NULL group pointer in device_group() is upon failure of either devm_kzalloc() or iommu_group_alloc(). In either case, calling generic_device_group() no longer makes a sense. Especially for devm_kzalloc() failing case, it'd cause a problem if it fails at devm_kzalloc() yet succeeds at a fallback generic_device_group(), because it does not create a group->list for other devices to match. This patch simply unwraps the function to clean it up. Signed-off-by: Nicolin Chen Tested-by: Dmitry Osipenko Reviewed-by: Dmitry Osipenko Acked-by: Thierry Reding Link: https://lore.kernel.org/r/20201125101013.14953-2-nicoleotsuka@gmail.com Signed-off-by: Will Deacon --- drivers/iommu/tegra-smmu.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 0becdbfea306..ec4c9dafff95 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -903,10 +903,12 @@ static void tegra_smmu_group_release(void *iommu_data) mutex_unlock(&smmu->lock); } -static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu, - unsigned int swgroup) +static struct iommu_group *tegra_smmu_device_group(struct device *dev) { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct tegra_smmu *smmu = dev_iommu_priv_get(dev); const struct tegra_smmu_group_soc *soc; + unsigned int swgroup = fwspec->ids[0]; struct tegra_smmu_group *group; struct iommu_group *grp; @@ -950,19 +952,6 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu, return group->group; } -static struct iommu_group *tegra_smmu_device_group(struct device *dev) -{ - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct tegra_smmu *smmu = dev_iommu_priv_get(dev); - struct iommu_group *group; - - group = tegra_smmu_group_get(smmu, fwspec->ids[0]); - if (!group) - group = generic_device_group(dev); - - return group; -} - static int tegra_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) { From d5f583bf8654c231b781096bc1a186065cda72b3 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 25 Nov 2020 02:10:10 -0800 Subject: [PATCH 18/50] iommu/tegra-smmu: Expand mutex protection range This is used to protect potential race condition at use_count. since probes of client drivers, calling attach_dev(), may run concurrently. Signed-off-by: Nicolin Chen Tested-by: Dmitry Osipenko Reviewed-by: Dmitry Osipenko Acked-by: Thierry Reding Link: https://lore.kernel.org/r/20201125101013.14953-3-nicoleotsuka@gmail.com Signed-off-by: Will Deacon --- drivers/iommu/tegra-smmu.c | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index ec4c9dafff95..6a3ecc334481 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -256,26 +256,19 @@ static int tegra_smmu_alloc_asid(struct tegra_smmu *smmu, unsigned int *idp) { unsigned long id; - mutex_lock(&smmu->lock); - id = find_first_zero_bit(smmu->asids, smmu->soc->num_asids); - if (id >= smmu->soc->num_asids) { - mutex_unlock(&smmu->lock); + if (id >= smmu->soc->num_asids) return -ENOSPC; - } set_bit(id, smmu->asids); *idp = id; - mutex_unlock(&smmu->lock); return 0; } static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id) { - mutex_lock(&smmu->lock); clear_bit(id, smmu->asids); - mutex_unlock(&smmu->lock); } static bool tegra_smmu_capable(enum iommu_cap cap) @@ -420,17 +413,21 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, struct tegra_smmu_as *as) { u32 value; - int err; + int err = 0; + + mutex_lock(&smmu->lock); if (as->use_count > 0) { as->use_count++; - return 0; + goto unlock; } as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, DMA_TO_DEVICE); - if (dma_mapping_error(smmu->dev, as->pd_dma)) - return -ENOMEM; + if (dma_mapping_error(smmu->dev, as->pd_dma)) { + err = -ENOMEM; + goto unlock; + } /* We can't handle 64-bit DMA addresses */ if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { @@ -453,24 +450,35 @@ static int tegra_smmu_as_prepare(struct tegra_smmu *smmu, as->smmu = smmu; as->use_count++; + mutex_unlock(&smmu->lock); + return 0; err_unmap: dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); +unlock: + mutex_unlock(&smmu->lock); + return err; } static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, struct tegra_smmu_as *as) { - if (--as->use_count > 0) + mutex_lock(&smmu->lock); + + if (--as->use_count > 0) { + mutex_unlock(&smmu->lock); return; + } tegra_smmu_free_asid(smmu, as->id); dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); as->smmu = NULL; + + mutex_unlock(&smmu->lock); } static int tegra_smmu_attach_dev(struct iommu_domain *domain, From 8750d207dc98d5f743c28ae41d50ebf8887a2106 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 25 Nov 2020 02:10:11 -0800 Subject: [PATCH 19/50] iommu/tegra-smmu: Use fwspec in tegra_smmu_(de)attach_dev In tegra_smmu_(de)attach_dev() functions, we poll DTB for each client's iommus property to get swgroup ID in order to prepare "as" and enable smmu. Actually tegra_smmu_configure() prepared an fwspec for each client, and added to the fwspec all swgroup IDs of client DT node in DTB. So this patch uses fwspec in tegra_smmu_(de)attach_dev() so as to replace the redundant DT polling code. Signed-off-by: Nicolin Chen Tested-by: Dmitry Osipenko Reviewed-by: Dmitry Osipenko Acked-by: Thierry Reding Link: https://lore.kernel.org/r/20201125101013.14953-4-nicoleotsuka@gmail.com Signed-off-by: Will Deacon --- drivers/iommu/tegra-smmu.c | 56 ++++++++++++++++---------------------- 1 file changed, 23 insertions(+), 33 deletions(-) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 6a3ecc334481..297d49f3f80e 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -484,60 +484,50 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, static int tegra_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct tegra_smmu *smmu = dev_iommu_priv_get(dev); struct tegra_smmu_as *as = to_smmu_as(domain); - struct device_node *np = dev->of_node; - struct of_phandle_args args; - unsigned int index = 0; - int err = 0; + unsigned int index; + int err; - while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, - &args)) { - unsigned int swgroup = args.args[0]; - - if (args.np != smmu->dev->of_node) { - of_node_put(args.np); - continue; - } - - of_node_put(args.np); + if (!fwspec) + return -ENOENT; + for (index = 0; index < fwspec->num_ids; index++) { err = tegra_smmu_as_prepare(smmu, as); - if (err < 0) - return err; + if (err) + goto disable; - tegra_smmu_enable(smmu, swgroup, as->id); - index++; + tegra_smmu_enable(smmu, fwspec->ids[index], as->id); } if (index == 0) return -ENODEV; return 0; + +disable: + while (index--) { + tegra_smmu_disable(smmu, fwspec->ids[index], as->id); + tegra_smmu_as_unprepare(smmu, as); + } + + return err; } static void tegra_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct tegra_smmu_as *as = to_smmu_as(domain); - struct device_node *np = dev->of_node; struct tegra_smmu *smmu = as->smmu; - struct of_phandle_args args; - unsigned int index = 0; + unsigned int index; - while (!of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, - &args)) { - unsigned int swgroup = args.args[0]; + if (!fwspec) + return; - if (args.np != smmu->dev->of_node) { - of_node_put(args.np); - continue; - } - - of_node_put(args.np); - - tegra_smmu_disable(smmu, swgroup, as->id); + for (index = 0; index < fwspec->num_ids; index++) { + tegra_smmu_disable(smmu, fwspec->ids[index], as->id); tegra_smmu_as_unprepare(smmu, as); - index++; } } From 25938c73cd7918169f80196e288fc3abb81053e5 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 25 Nov 2020 02:10:12 -0800 Subject: [PATCH 20/50] iommu/tegra-smmu: Rework tegra_smmu_probe_device() The bus_set_iommu() in tegra_smmu_probe() enumerates all clients to call in tegra_smmu_probe_device() where each client searches its DT node for smmu pointer and swgroup ID, so as to configure an fwspec. But this requires a valid smmu pointer even before mc and smmu drivers are probed. So in tegra_smmu_probe() we added a line of code to fill mc->smmu, marking "a bit of a hack". This works for most of clients in the DTB, however, doesn't work for a client that doesn't exist in DTB, a PCI device for example. Actually, if we return ERR_PTR(-ENODEV) in ->probe_device() when it's called from bus_set_iommu(), iommu core will let everything carry on. Then when a client gets probed, of_iommu_configure() in iommu core will search DTB for swgroup ID and call ->of_xlate() to prepare an fwspec, similar to tegra_smmu_probe_device() and tegra_smmu_configure(). Then it'll call tegra_smmu_probe_device() again, and this time we shall return smmu->iommu pointer properly. So we can get rid of tegra_smmu_find() and tegra_smmu_configure() along with DT polling code by letting the iommu core handle every thing, except a problem that we search iommus property in DTB not only for swgroup ID but also for mc node to get mc->smmu pointer to call dev_iommu_priv_set() and return the smmu->iommu pointer. So we'll need to find another way to get smmu pointer. Referencing the implementation of sun50i-iommu driver, of_xlate() has client's dev pointer, mc node and swgroup ID. This means that we can call dev_iommu_priv_set() in of_xlate() instead, so we can simply get smmu pointer in ->probe_device(). This patch reworks tegra_smmu_probe_device() by: 1) Removing mc->smmu hack in tegra_smmu_probe() so as to return ERR_PTR(-ENODEV) in tegra_smmu_probe_device() during stage of tegra_smmu_probe/tegra_mc_probe(). 2) Moving dev_iommu_priv_set() to of_xlate() so we can get smmu pointer in tegra_smmu_probe_device() to replace DTB polling. 3) Removing tegra_smmu_configure() accordingly since iommu core takes care of it. This also fixes a problem that previously we could add clients to iommu groups before iommu core initializes its default domain: ubuntu@jetson:~$ dmesg | grep iommu platform 50000000.host1x: Adding to iommu group 1 platform 57000000.gpu: Adding to iommu group 2 iommu: Default domain type: Translated platform 54200000.dc: Adding to iommu group 3 platform 54240000.dc: Adding to iommu group 3 platform 54340000.vic: Adding to iommu group 4 Though it works fine with IOMMU_DOMAIN_UNMANAGED, but will have warnings if switching to IOMMU_DOMAIN_DMA: iommu: Failed to allocate default IOMMU domain of type 0 for group (null) - Falling back to IOMMU_DOMAIN_DMA iommu: Failed to allocate default IOMMU domain of type 0 for group (null) - Falling back to IOMMU_DOMAIN_DMA Now, bypassing the first probe_device() call from bus_set_iommu() fixes the sequence: ubuntu@jetson:~$ dmesg | grep iommu iommu: Default domain type: Translated tegra-host1x 50000000.host1x: Adding to iommu group 0 tegra-dc 54200000.dc: Adding to iommu group 1 tegra-dc 54240000.dc: Adding to iommu group 1 tegra-vic 54340000.vic: Adding to iommu group 2 nouveau 57000000.gpu: Adding to iommu group 3 Note that dmesg log above is testing with IOMMU_DOMAIN_UNMANAGED. Signed-off-by: Nicolin Chen Tested-by: Dmitry Osipenko Reviewed-by: Dmitry Osipenko Acked-by: Thierry Reding Link: https://lore.kernel.org/r/20201125101013.14953-5-nicoleotsuka@gmail.com Signed-off-by: Will Deacon --- drivers/iommu/tegra-smmu.c | 96 ++++++-------------------------------- 1 file changed, 15 insertions(+), 81 deletions(-) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 297d49f3f80e..f45ed43cf8db 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -797,75 +797,9 @@ static phys_addr_t tegra_smmu_iova_to_phys(struct iommu_domain *domain, return SMMU_PFN_PHYS(pfn) + SMMU_OFFSET_IN_PAGE(iova); } -static struct tegra_smmu *tegra_smmu_find(struct device_node *np) -{ - struct platform_device *pdev; - struct tegra_mc *mc; - - pdev = of_find_device_by_node(np); - if (!pdev) - return NULL; - - mc = platform_get_drvdata(pdev); - if (!mc) - return NULL; - - return mc->smmu; -} - -static int tegra_smmu_configure(struct tegra_smmu *smmu, struct device *dev, - struct of_phandle_args *args) -{ - const struct iommu_ops *ops = smmu->iommu.ops; - int err; - - err = iommu_fwspec_init(dev, &dev->of_node->fwnode, ops); - if (err < 0) { - dev_err(dev, "failed to initialize fwspec: %d\n", err); - return err; - } - - err = ops->of_xlate(dev, args); - if (err < 0) { - dev_err(dev, "failed to parse SW group ID: %d\n", err); - iommu_fwspec_free(dev); - return err; - } - - return 0; -} - static struct iommu_device *tegra_smmu_probe_device(struct device *dev) { - struct device_node *np = dev->of_node; - struct tegra_smmu *smmu = NULL; - struct of_phandle_args args; - unsigned int index = 0; - int err; - - while (of_parse_phandle_with_args(np, "iommus", "#iommu-cells", index, - &args) == 0) { - smmu = tegra_smmu_find(args.np); - if (smmu) { - err = tegra_smmu_configure(smmu, dev, &args); - of_node_put(args.np); - - if (err < 0) - return ERR_PTR(err); - - /* - * Only a single IOMMU master interface is currently - * supported by the Linux kernel, so abort after the - * first match. - */ - dev_iommu_priv_set(dev, smmu); - - break; - } - - of_node_put(args.np); - index++; - } + struct tegra_smmu *smmu = dev_iommu_priv_get(dev); if (!smmu) return ERR_PTR(-ENODEV); @@ -873,10 +807,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev) return &smmu->iommu; } -static void tegra_smmu_release_device(struct device *dev) -{ - dev_iommu_priv_set(dev, NULL); -} +static void tegra_smmu_release_device(struct device *dev) {} static const struct tegra_smmu_group_soc * tegra_smmu_find_group(struct tegra_smmu *smmu, unsigned int swgroup) @@ -953,8 +884,21 @@ static struct iommu_group *tegra_smmu_device_group(struct device *dev) static int tegra_smmu_of_xlate(struct device *dev, struct of_phandle_args *args) { + struct platform_device *iommu_pdev = of_find_device_by_node(args->np); + struct tegra_mc *mc = platform_get_drvdata(iommu_pdev); u32 id = args->args[0]; + /* + * Note: we are here releasing the reference of &iommu_pdev->dev, which + * is mc->dev. Although some functions in tegra_smmu_ops may keep using + * its private data beyond this point, it's still safe to do so because + * the SMMU parent device is the same as the MC, so the reference count + * isn't strictly necessary. + */ + put_device(&iommu_pdev->dev); + + dev_iommu_priv_set(dev, mc->smmu); + return iommu_fwspec_add_ids(dev, &id, 1); } @@ -1079,16 +1023,6 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, if (!smmu) return ERR_PTR(-ENOMEM); - /* - * This is a bit of a hack. Ideally we'd want to simply return this - * value. However the IOMMU registration process will attempt to add - * all devices to the IOMMU when bus_set_iommu() is called. In order - * not to rely on global variables to track the IOMMU instance, we - * set it here so that it can be looked up from the .probe_device() - * callback via the IOMMU device's .drvdata field. - */ - mc->smmu = smmu; - size = BITS_TO_LONGS(soc->num_asids) * sizeof(long); smmu->asids = devm_kzalloc(dev, size, GFP_KERNEL); From 541f29bb064345897843c02a9ac684fddefd87e5 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Wed, 25 Nov 2020 02:10:13 -0800 Subject: [PATCH 21/50] iommu/tegra-smmu: Add PCI support This patch simply adds support for PCI devices. Signed-off-by: Nicolin Chen Tested-by: Dmitry Osipenko Reviewed-by: Dmitry Osipenko Acked-by: Thierry Reding Link: https://lore.kernel.org/r/20201125101013.14953-6-nicoleotsuka@gmail.com Signed-off-by: Will Deacon --- drivers/iommu/tegra-smmu.c | 35 +++++++++++++++++++++++++---------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index f45ed43cf8db..4a3f095a1c26 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -865,7 +866,11 @@ static struct iommu_group *tegra_smmu_device_group(struct device *dev) group->smmu = smmu; group->soc = soc; - group->group = iommu_group_alloc(); + if (dev_is_pci(dev)) + group->group = pci_device_group(dev); + else + group->group = generic_device_group(dev); + if (IS_ERR(group->group)) { devm_kfree(smmu->dev, group); mutex_unlock(&smmu->lock); @@ -1075,22 +1080,32 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); err = iommu_device_register(&smmu->iommu); - if (err) { - iommu_device_sysfs_remove(&smmu->iommu); - return ERR_PTR(err); - } + if (err) + goto remove_sysfs; err = bus_set_iommu(&platform_bus_type, &tegra_smmu_ops); - if (err < 0) { - iommu_device_unregister(&smmu->iommu); - iommu_device_sysfs_remove(&smmu->iommu); - return ERR_PTR(err); - } + if (err < 0) + goto unregister; + +#ifdef CONFIG_PCI + err = bus_set_iommu(&pci_bus_type, &tegra_smmu_ops); + if (err < 0) + goto unset_platform_bus; +#endif if (IS_ENABLED(CONFIG_DEBUG_FS)) tegra_smmu_debugfs_init(smmu); return smmu; + +unset_platform_bus: __maybe_unused; + bus_set_iommu(&platform_bus_type, NULL); +unregister: + iommu_device_unregister(&smmu->iommu); +remove_sysfs: + iommu_device_sysfs_remove(&smmu->iommu); + + return ERR_PTR(err); } void tegra_smmu_remove(struct tegra_smmu *smmu) From 2a2b8eaa5b25668a6f717f94b55f4e3aaf87629d Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Tue, 24 Nov 2020 16:20:51 +0800 Subject: [PATCH 22/50] iommu: Handle freelists when using deferred flushing in iommu drivers Allow the iommu_unmap_fast to return newly freed page table pages and pass the freelist to queue_iova in the dma-iommu ops path. This is useful for iommu drivers (in this case the intel iommu driver) which need to wait for the ioTLB to be flushed before newly free/unmapped page table pages can be freed. This way we can still batch ioTLB free operations and handle the freelists. Signed-off-by: Tom Murphy Signed-off-by: Lu Baolu Tested-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20201124082057.2614359-2-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/dma-iommu.c | 29 +++++++++++++------ drivers/iommu/intel/iommu.c | 55 ++++++++++++++++++++++++------------- include/linux/iommu.h | 1 + 3 files changed, 58 insertions(+), 27 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 0cbcd3fc3e7e..9c827a4d2207 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -49,6 +49,18 @@ struct iommu_dma_cookie { struct iommu_domain *fq_domain; }; +static void iommu_dma_entry_dtor(unsigned long data) +{ + struct page *freelist = (struct page *)data; + + while (freelist) { + unsigned long p = (unsigned long)page_address(freelist); + + freelist = freelist->freelist; + free_page(p); + } +} + static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie) { if (cookie->type == IOMMU_DMA_IOVA_COOKIE) @@ -343,7 +355,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, if (!cookie->fq_domain && !iommu_domain_get_attr(domain, DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE, &attr) && attr) { if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all, - NULL)) + iommu_dma_entry_dtor)) pr_warn("iova flush queue initialization failed\n"); else cookie->fq_domain = domain; @@ -440,7 +452,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, } static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, - dma_addr_t iova, size_t size) + dma_addr_t iova, size_t size, struct page *freelist) { struct iova_domain *iovad = &cookie->iovad; @@ -449,7 +461,8 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, cookie->msi_iova -= size; else if (cookie->fq_domain) /* non-strict mode */ queue_iova(iovad, iova_pfn(iovad, iova), - size >> iova_shift(iovad), 0); + size >> iova_shift(iovad), + (unsigned long)freelist); else free_iova_fast(iovad, iova_pfn(iovad, iova), size >> iova_shift(iovad)); @@ -474,7 +487,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, if (!cookie->fq_domain) iommu_iotlb_sync(domain, &iotlb_gather); - iommu_dma_free_iova(cookie, dma_addr, size); + iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist); } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, @@ -496,7 +509,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, return DMA_MAPPING_ERROR; if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { - iommu_dma_free_iova(cookie, iova, size); + iommu_dma_free_iova(cookie, iova, size, NULL); return DMA_MAPPING_ERROR; } return iova + iova_off; @@ -649,7 +662,7 @@ out_unmap: out_free_sg: sg_free_table(&sgt); out_free_iova: - iommu_dma_free_iova(cookie, iova, size); + iommu_dma_free_iova(cookie, iova, size, NULL); out_free_pages: __iommu_dma_free_pages(pages, count); return NULL; @@ -900,7 +913,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, return __finalise_sg(dev, sg, nents, iova); out_free_iova: - iommu_dma_free_iova(cookie, iova, iova_len); + iommu_dma_free_iova(cookie, iova, iova_len, NULL); out_restore_sg: __invalidate_sg(sg, nents); return 0; @@ -1228,7 +1241,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, return msi_page; out_free_iova: - iommu_dma_free_iova(cookie, iova, size); + iommu_dma_free_iova(cookie, iova, size, NULL); out_free_page: kfree(msi_page); return NULL; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 7b32703c0b47..fed0b3f0e25b 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -1243,17 +1243,17 @@ next: pages can only be freed after the IOTLB flush has been done. */ static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + struct page *freelist) { - struct page *freelist; - BUG_ON(!domain_pfn_supported(domain, start_pfn)); BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); /* we don't need lock here; nobody else touches the iova range */ freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw), - domain->pgd, 0, start_pfn, last_pfn, NULL); + domain->pgd, 0, start_pfn, last_pfn, + freelist); /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { @@ -2011,7 +2011,8 @@ static void domain_exit(struct dmar_domain *domain) if (domain->pgd) { struct page *freelist; - freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw)); + freelist = domain_unmap(domain, 0, + DOMAIN_MAX_PFN(domain->gaw), NULL); dma_free_pagelist(freelist); } @@ -3570,7 +3571,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) if (dev_is_pci(dev)) pdev = to_pci_dev(dev); - freelist = domain_unmap(domain, start_pfn, last_pfn); + freelist = domain_unmap(domain, start_pfn, last_pfn, NULL); if (intel_iommu_strict || (pdev && pdev->untrusted) || !has_iova_flush_queue(&domain->iovad)) { iommu_flush_iotlb_psi(iommu, domain, start_pfn, @@ -4636,7 +4637,8 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb, struct page *freelist; freelist = domain_unmap(si_domain, - start_vpfn, last_vpfn); + start_vpfn, last_vpfn, + NULL); rcu_read_lock(); for_each_active_iommu(iommu, drhd) @@ -5608,10 +5610,8 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, struct iommu_iotlb_gather *gather) { struct dmar_domain *dmar_domain = to_dmar_domain(domain); - struct page *freelist = NULL; unsigned long start_pfn, last_pfn; - unsigned int npages; - int iommu_id, level = 0; + int level = 0; /* Cope with horrid API which requires us to unmap more than the size argument if it happens to be a large-page mapping. */ @@ -5623,22 +5623,38 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain, start_pfn = iova >> VTD_PAGE_SHIFT; last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT; - freelist = domain_unmap(dmar_domain, start_pfn, last_pfn); - - npages = last_pfn - start_pfn + 1; - - for_each_domain_iommu(iommu_id, dmar_domain) - iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain, - start_pfn, npages, !freelist, 0); - - dma_free_pagelist(freelist); + gather->freelist = domain_unmap(dmar_domain, start_pfn, + last_pfn, gather->freelist); if (dmar_domain->max_addr == iova + size) dmar_domain->max_addr = iova; + iommu_iotlb_gather_add_page(domain, gather, iova, size); + return size; } +static void intel_iommu_tlb_sync(struct iommu_domain *domain, + struct iommu_iotlb_gather *gather) +{ + struct dmar_domain *dmar_domain = to_dmar_domain(domain); + unsigned long iova_pfn = IOVA_PFN(gather->start); + size_t size = gather->end - gather->start; + unsigned long start_pfn, last_pfn; + unsigned long nrpages; + int iommu_id; + + nrpages = aligned_nrpages(gather->start, size); + start_pfn = mm_to_dma_pfn(iova_pfn); + last_pfn = start_pfn + nrpages - 1; + + for_each_domain_iommu(iommu_id, dmar_domain) + iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain, + start_pfn, nrpages, !gather->freelist, 0); + + dma_free_pagelist(gather->freelist); +} + static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { @@ -6098,6 +6114,7 @@ const struct iommu_ops intel_iommu_ops = { .aux_get_pasid = intel_iommu_aux_get_pasid, .map = intel_iommu_map, .unmap = intel_iommu_unmap, + .iotlb_sync = intel_iommu_tlb_sync, .iova_to_phys = intel_iommu_iova_to_phys, .probe_device = intel_iommu_probe_device, .probe_finalize = intel_iommu_probe_finalize, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b95a6f8db6ff..e56bae327e35 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -180,6 +180,7 @@ struct iommu_iotlb_gather { unsigned long start; unsigned long end; size_t pgsize; + struct page *freelist; }; /** From 230309d08b871e439f8618db3610f2cc9b5f7c72 Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Tue, 24 Nov 2020 16:20:52 +0800 Subject: [PATCH 23/50] iommu: Add iommu_dma_free_cpu_cached_iovas() Add a iommu_dma_free_cpu_cached_iovas function to allow drivers which use the dma-iommu ops to free cached cpu iovas. Signed-off-by: Tom Murphy Signed-off-by: Lu Baolu Tested-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20201124082057.2614359-3-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/dma-iommu.c | 9 +++++++++ include/linux/dma-iommu.h | 8 ++++++++ 2 files changed, 17 insertions(+) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9c827a4d2207..de521e22bafb 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -49,6 +49,15 @@ struct iommu_dma_cookie { struct iommu_domain *fq_domain; }; +void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, + struct iommu_domain *domain) +{ + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + + free_cpu_cached_iovas(cpu, iovad); +} + static void iommu_dma_entry_dtor(unsigned long data) { struct page *freelist = (struct page *)data; diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 2112f21f73d8..706b68d1359b 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -37,6 +37,9 @@ void iommu_dma_compose_msi_msg(struct msi_desc *desc, void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list); +void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, + struct iommu_domain *domain); + #else /* CONFIG_IOMMU_DMA */ struct iommu_domain; @@ -78,5 +81,10 @@ static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_he { } +static inline void iommu_dma_free_cpu_cached_iovas(unsigned int cpu, + struct iommu_domain *domain) +{ +} + #endif /* CONFIG_IOMMU_DMA */ #endif /* __DMA_IOMMU_H */ From 82612d66d51d3bacdd789e31d2e875d2494b7514 Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Tue, 24 Nov 2020 16:20:53 +0800 Subject: [PATCH 24/50] iommu: Allow the dma-iommu api to use bounce buffers Allow the dma-iommu api to use bounce buffers for untrusted devices. This is a copy of the intel bounce buffer code. Co-developed-by: Lu Baolu Signed-off-by: Tom Murphy Signed-off-by: Lu Baolu Tested-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20201124082057.2614359-4-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/dma-iommu.c | 162 +++++++++++++++++++++++++++++++++++--- 1 file changed, 149 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index de521e22bafb..83eb99bfb990 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -20,9 +20,11 @@ #include #include #include +#include #include #include #include +#include struct iommu_dma_msi_page { struct list_head list; @@ -499,6 +501,31 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist); } +static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + phys_addr_t phys; + + phys = iommu_iova_to_phys(domain, dma_addr); + if (WARN_ON(!phys)) + return; + + __iommu_dma_unmap(dev, dma_addr, size); + + if (unlikely(is_swiotlb_buffer(phys))) + swiotlb_tbl_unmap_single(dev, phys, size, + iova_align(iovad, size), dir, attrs); +} + +static bool dev_is_untrusted(struct device *dev) +{ + return dev_is_pci(dev) && to_pci_dev(dev)->untrusted; +} + static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t size, int prot, u64 dma_mask) { @@ -524,6 +551,54 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, return iova + iova_off; } +static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys, + size_t org_size, dma_addr_t dma_mask, bool coherent, + enum dma_data_direction dir, unsigned long attrs) +{ + int prot = dma_info_to_prot(dir, coherent, attrs); + struct iommu_domain *domain = iommu_get_dma_domain(dev); + struct iommu_dma_cookie *cookie = domain->iova_cookie; + struct iova_domain *iovad = &cookie->iovad; + size_t aligned_size = org_size; + void *padding_start; + size_t padding_size; + dma_addr_t iova; + + /* + * If both the physical buffer start address and size are + * page aligned, we don't need to use a bounce page. + */ + if (IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev) && + iova_offset(iovad, phys | org_size)) { + aligned_size = iova_align(iovad, org_size); + phys = swiotlb_tbl_map_single(dev, phys, org_size, + aligned_size, dir, attrs); + + if (phys == DMA_MAPPING_ERROR) + return DMA_MAPPING_ERROR; + + /* Cleanup the padding area. */ + padding_start = phys_to_virt(phys); + padding_size = aligned_size; + + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && + (dir == DMA_TO_DEVICE || + dir == DMA_BIDIRECTIONAL)) { + padding_start += org_size; + padding_size -= org_size; + } + + memset(padding_start, 0, padding_size); + } + + iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask); + if ((iova == DMA_MAPPING_ERROR) && is_swiotlb_buffer(phys)) + swiotlb_tbl_unmap_single(dev, phys, org_size, + aligned_size, dir, attrs); + + return iova; +} + static void __iommu_dma_free_pages(struct page **pages, int count) { while (count--) @@ -697,11 +772,15 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev)) + if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); - arch_sync_dma_for_cpu(phys, size, dir); + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(phys, size, dir); + + if (is_swiotlb_buffer(phys)) + swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_CPU); } static void iommu_dma_sync_single_for_device(struct device *dev, @@ -709,11 +788,15 @@ static void iommu_dma_sync_single_for_device(struct device *dev, { phys_addr_t phys; - if (dev_is_dma_coherent(dev)) + if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) return; phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); - arch_sync_dma_for_device(phys, size, dir); + if (is_swiotlb_buffer(phys)) + swiotlb_tbl_sync_single(dev, phys, size, dir, SYNC_FOR_DEVICE); + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_device(phys, size, dir); } static void iommu_dma_sync_sg_for_cpu(struct device *dev, @@ -723,11 +806,17 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg; int i; - if (dev_is_dma_coherent(dev)) + if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) return; - for_each_sg(sgl, sg, nelems, i) - arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); + for_each_sg(sgl, sg, nelems, i) { + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); + + if (is_swiotlb_buffer(sg_phys(sg))) + swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, + dir, SYNC_FOR_CPU); + } } static void iommu_dma_sync_sg_for_device(struct device *dev, @@ -737,11 +826,17 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg; int i; - if (dev_is_dma_coherent(dev)) + if (dev_is_dma_coherent(dev) && !dev_is_untrusted(dev)) return; - for_each_sg(sgl, sg, nelems, i) - arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); + for_each_sg(sgl, sg, nelems, i) { + if (is_swiotlb_buffer(sg_phys(sg))) + swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, + dir, SYNC_FOR_DEVICE); + + if (!dev_is_dma_coherent(dev)) + arch_sync_dma_for_device(sg_phys(sg), sg->length, dir); + } } static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, @@ -750,10 +845,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, { phys_addr_t phys = page_to_phys(page) + offset; bool coherent = dev_is_dma_coherent(dev); - int prot = dma_info_to_prot(dir, coherent, attrs); dma_addr_t dma_handle; - dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); + dma_handle = __iommu_dma_map_swiotlb(dev, phys, size, dma_get_mask(dev), + coherent, dir, attrs); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) arch_sync_dma_for_device(phys, size, dir); @@ -765,7 +860,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, { if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); - __iommu_dma_unmap(dev, dma_handle, size); + __iommu_dma_unmap_swiotlb(dev, dma_handle, size, dir, attrs); } /* @@ -843,6 +938,39 @@ static void __invalidate_sg(struct scatterlist *sg, int nents) } } +static void iommu_dma_unmap_sg_swiotlb(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) + __iommu_dma_unmap_swiotlb(dev, sg_dma_address(s), + sg_dma_len(s), dir, attrs); +} + +static int iommu_dma_map_sg_swiotlb(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *s; + int i; + + for_each_sg(sg, s, nents, i) { + sg_dma_address(s) = __iommu_dma_map_swiotlb(dev, sg_phys(s), + s->length, dma_get_mask(dev), + dev_is_dma_coherent(dev), dir, attrs); + if (sg_dma_address(s) == DMA_MAPPING_ERROR) + goto out_unmap; + sg_dma_len(s) = s->length; + } + + return nents; + +out_unmap: + iommu_dma_unmap_sg_swiotlb(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + return 0; +} + /* * The DMA API client is passing in a scatterlist which could describe * any old buffer layout, but the IOMMU API requires everything to be @@ -869,6 +997,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); + if (dev_is_untrusted(dev)) + return iommu_dma_map_sg_swiotlb(dev, sg, nents, dir, attrs); + /* * Work out how much IOVA space we need, and align the segments to * IOVA granules for the IOMMU driver to handle. With some clever @@ -938,6 +1069,11 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_cpu(dev, sg, nents, dir); + if (dev_is_untrusted(dev)) { + iommu_dma_unmap_sg_swiotlb(dev, sg, nents, dir, attrs); + return; + } + /* * The scatterlist segments are mapped into a single * contiguous IOVA allocation, so this is incredibly easy. From 65f746e8285f0a67d43517d86fedb9e29ead49f2 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 24 Nov 2020 16:20:54 +0800 Subject: [PATCH 25/50] iommu: Add quirk for Intel graphic devices in map_sg Combining the sg segments exposes a bug in the Intel i915 driver which causes visual artifacts and the screen to freeze. This is most likely because of how the i915 handles the returned list. It probably doesn't respect the returned value specifying the number of elements in the list and instead depends on the previous behaviour of the Intel iommu driver which would return the same number of elements in the output list as in the input list. [ This has been fixed in the i915 tree, but we agreed to carry this fix temporarily in the iommu tree and revert it before 5.11 is released: https://lore.kernel.org/linux-iommu/20201103105442.GD22888@8bytes.org/ -- Will ] Signed-off-by: Tom Murphy Signed-off-by: Lu Baolu Tested-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20201124082057.2614359-5-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/dma-iommu.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 83eb99bfb990..5f49ed653f98 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -878,6 +878,33 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); int i, count = 0; + /* + * The Intel graphic driver is used to assume that the returned + * sg list is not combound. This blocks the efforts of converting + * Intel IOMMU driver to dma-iommu api's. Add this quirk to make the + * device driver work and should be removed once it's fixed in i915 + * driver. + */ + if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) && + to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL && + (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) { + for_each_sg(sg, s, nents, i) { + unsigned int s_iova_off = sg_dma_address(s); + unsigned int s_length = sg_dma_len(s); + unsigned int s_iova_len = s->length; + + s->offset += s_iova_off; + s->length = s_length; + sg_dma_address(s) = dma_addr + s_iova_off; + sg_dma_len(s) = s_length; + dma_addr += s_iova_len; + + pr_info_once("sg combining disabled due to i915 driver\n"); + } + + return nents; + } + for_each_sg(sg, s, nents, i) { /* Restore this segment's original unaligned fields first */ unsigned int s_iova_off = sg_dma_address(s); From c062db039f40e868c371c36afe8d0fac64305b5d Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 24 Nov 2020 16:20:55 +0800 Subject: [PATCH 26/50] iommu/vt-d: Update domain geometry in iommu_ops.at(de)tach_dev The iommu-dma constrains IOVA allocation based on the domain geometry that the driver reports. Update domain geometry everytime a domain is attached to or detached from a device. Signed-off-by: Lu Baolu Tested-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20201124082057.2614359-6-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/intel/iommu.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index fed0b3f0e25b..b47372be4256 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -67,8 +67,8 @@ #define MAX_AGAW_WIDTH 64 #define MAX_AGAW_PFN_WIDTH (MAX_AGAW_WIDTH - VTD_PAGE_SHIFT) -#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1) -#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1) +#define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << ((gaw) - VTD_PAGE_SHIFT)) - 1) +#define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << (gaw)) - 1) /* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR to match. That way, we can use 'unsigned long' for PFNs with impunity. */ @@ -739,6 +739,18 @@ static void domain_update_iommu_cap(struct dmar_domain *domain) */ if (domain->nid == NUMA_NO_NODE) domain->nid = domain_update_device_node(domain); + + /* + * First-level translation restricts the input-address to a + * canonical address (i.e., address bits 63:N have the same + * value as address bit [N-1], where N is 48-bits with 4-level + * paging and 57-bits with 5-level paging). Hence, skip bit + * [N-1]. + */ + if (domain_use_first_level(domain)) + domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1); + else + domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw); } struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, From c588072bba6b54b4b946485228b0409f23cd68a6 Mon Sep 17 00:00:00 2001 From: Tom Murphy Date: Tue, 24 Nov 2020 16:20:56 +0800 Subject: [PATCH 27/50] iommu/vt-d: Convert intel iommu driver to the iommu ops Convert the intel iommu driver to the dma-iommu api. Remove the iova handling and reserve region code from the intel iommu driver. Signed-off-by: Tom Murphy Signed-off-by: Lu Baolu Tested-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20201124082057.2614359-7-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/intel/Kconfig | 1 + drivers/iommu/intel/iommu.c | 745 ++---------------------------------- 2 files changed, 43 insertions(+), 703 deletions(-) diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index 5337ee1584b0..28a3d1596c76 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -13,6 +13,7 @@ config INTEL_IOMMU select DMAR_TABLE select SWIOTLB select IOASID + select IOMMU_DMA help DMA remapping (DMAR) devices support enables independent address translations for Direct Memory Access (DMA) from devices. diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index b47372be4256..30f69097264f 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -41,7 +42,6 @@ #include #include #include -#include #include #include #include @@ -382,9 +382,6 @@ struct device_domain_info *get_domain_info(struct device *dev) DEFINE_SPINLOCK(device_domain_lock); static LIST_HEAD(device_domain_list); -#define device_needs_bounce(d) (!intel_no_bounce && dev_is_pci(d) && \ - to_pci_dev(d)->untrusted) - /* * Iterate over elements in device_domain_list and call the specified * callback @fn against each element. @@ -1289,13 +1286,6 @@ static void dma_free_pagelist(struct page *freelist) } } -static void iova_entry_free(unsigned long data) -{ - struct page *freelist = (struct page *)data; - - dma_free_pagelist(freelist); -} - /* iommu handling */ static int iommu_alloc_root_entry(struct intel_iommu *iommu) { @@ -1660,19 +1650,17 @@ static inline void __mapping_notify_one(struct intel_iommu *iommu, iommu_flush_write_buffer(iommu); } -static void iommu_flush_iova(struct iova_domain *iovad) +static void intel_flush_iotlb_all(struct iommu_domain *domain) { - struct dmar_domain *domain; + struct dmar_domain *dmar_domain = to_dmar_domain(domain); int idx; - domain = container_of(iovad, struct dmar_domain, iovad); - - for_each_domain_iommu(idx, domain) { + for_each_domain_iommu(idx, dmar_domain) { struct intel_iommu *iommu = g_iommus[idx]; - u16 did = domain->iommu_did[iommu->seq_id]; + u16 did = dmar_domain->iommu_did[iommu->seq_id]; - if (domain_use_first_level(domain)) - domain_flush_piotlb(iommu, domain, 0, -1, 0); + if (domain_use_first_level(dmar_domain)) + domain_flush_piotlb(iommu, dmar_domain, 0, -1, 0); else iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); @@ -1954,48 +1942,6 @@ static int domain_detach_iommu(struct dmar_domain *domain, return count; } -static struct iova_domain reserved_iova_list; -static struct lock_class_key reserved_rbtree_key; - -static int dmar_init_reserved_ranges(void) -{ - struct pci_dev *pdev = NULL; - struct iova *iova; - int i; - - init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN); - - lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, - &reserved_rbtree_key); - - /* IOAPIC ranges shouldn't be accessed by DMA */ - iova = reserve_iova(&reserved_iova_list, IOVA_PFN(IOAPIC_RANGE_START), - IOVA_PFN(IOAPIC_RANGE_END)); - if (!iova) { - pr_err("Reserve IOAPIC range failed\n"); - return -ENODEV; - } - - /* Reserve all PCI MMIO to avoid peer-to-peer access */ - for_each_pci_dev(pdev) { - struct resource *r; - - for (i = 0; i < PCI_NUM_RESOURCES; i++) { - r = &pdev->resource[i]; - if (!r->flags || !(r->flags & IORESOURCE_MEM)) - continue; - iova = reserve_iova(&reserved_iova_list, - IOVA_PFN(r->start), - IOVA_PFN(r->end)); - if (!iova) { - pci_err(pdev, "Reserve iova for %pR failed\n", r); - return -ENODEV; - } - } - } - return 0; -} - static inline int guestwidth_to_adjustwidth(int gaw) { int agaw; @@ -2018,7 +1964,7 @@ static void domain_exit(struct dmar_domain *domain) /* destroy iovas */ if (domain->domain.type == IOMMU_DOMAIN_DMA) - put_iova_domain(&domain->iovad); + iommu_put_dma_cookie(&domain->domain); if (domain->pgd) { struct page *freelist; @@ -2552,16 +2498,6 @@ struct dmar_domain *find_domain(struct device *dev) return NULL; } -static void do_deferred_attach(struct device *dev) -{ - struct iommu_domain *domain; - - dev_iommu_priv_set(dev, NULL); - domain = iommu_get_domain_for_dev(dev); - if (domain) - intel_iommu_attach_device(domain, dev); -} - static inline struct device_domain_info * dmar_search_domain_by_dev_info(int segment, int bus, int devfn) { @@ -3434,594 +3370,6 @@ error: return ret; } -/* This takes a number of _MM_ pages, not VTD pages */ -static unsigned long intel_alloc_iova(struct device *dev, - struct dmar_domain *domain, - unsigned long nrpages, uint64_t dma_mask) -{ - unsigned long iova_pfn; - - /* - * Restrict dma_mask to the width that the iommu can handle. - * First-level translation restricts the input-address to a - * canonical address (i.e., address bits 63:N have the same - * value as address bit [N-1], where N is 48-bits with 4-level - * paging and 57-bits with 5-level paging). Hence, skip bit - * [N-1]. - */ - if (domain_use_first_level(domain)) - dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw - 1), - dma_mask); - else - dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), - dma_mask); - - /* Ensure we reserve the whole size-aligned region */ - nrpages = __roundup_pow_of_two(nrpages); - - if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { - /* - * First try to allocate an io virtual address in - * DMA_BIT_MASK(32) and if that fails then try allocating - * from higher range - */ - iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, - IOVA_PFN(DMA_BIT_MASK(32)), false); - if (iova_pfn) - return iova_pfn; - } - iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, - IOVA_PFN(dma_mask), true); - if (unlikely(!iova_pfn)) { - dev_err_once(dev, "Allocating %ld-page iova failed\n", - nrpages); - return 0; - } - - return iova_pfn; -} - -static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr, - size_t size, int dir, u64 dma_mask) -{ - struct dmar_domain *domain; - phys_addr_t start_paddr; - unsigned long iova_pfn; - int prot = 0; - int ret; - struct intel_iommu *iommu; - unsigned long paddr_pfn = paddr >> PAGE_SHIFT; - - BUG_ON(dir == DMA_NONE); - - if (unlikely(attach_deferred(dev))) - do_deferred_attach(dev); - - domain = find_domain(dev); - if (!domain) - return DMA_MAPPING_ERROR; - - iommu = domain_get_iommu(domain); - size = aligned_nrpages(paddr, size); - - iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask); - if (!iova_pfn) - goto error; - - /* - * Check if DMAR supports zero-length reads on write only - * mappings.. - */ - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ - !cap_zlr(iommu->cap)) - prot |= DMA_PTE_READ; - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) - prot |= DMA_PTE_WRITE; - /* - * paddr - (paddr + size) might be partial page, we should map the whole - * page. Note: if two part of one page are separately mapped, we - * might have two guest_addr mapping to the same host paddr, but this - * is not a big problem - */ - ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), - mm_to_dma_pfn(paddr_pfn), size, prot); - if (ret) - goto error; - - start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT; - start_paddr += paddr & ~PAGE_MASK; - - trace_map_single(dev, start_paddr, paddr, size << VTD_PAGE_SHIFT); - - return start_paddr; - -error: - if (iova_pfn) - free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); - dev_err(dev, "Device request: %zx@%llx dir %d --- failed\n", - size, (unsigned long long)paddr, dir); - return DMA_MAPPING_ERROR; -} - -static dma_addr_t intel_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - return __intel_map_single(dev, page_to_phys(page) + offset, - size, dir, *dev->dma_mask); -} - -static dma_addr_t intel_map_resource(struct device *dev, phys_addr_t phys_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - return __intel_map_single(dev, phys_addr, size, dir, *dev->dma_mask); -} - -static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) -{ - struct dmar_domain *domain; - unsigned long start_pfn, last_pfn; - unsigned long nrpages; - unsigned long iova_pfn; - struct intel_iommu *iommu; - struct page *freelist; - struct pci_dev *pdev = NULL; - - domain = find_domain(dev); - BUG_ON(!domain); - - iommu = domain_get_iommu(domain); - - iova_pfn = IOVA_PFN(dev_addr); - - nrpages = aligned_nrpages(dev_addr, size); - start_pfn = mm_to_dma_pfn(iova_pfn); - last_pfn = start_pfn + nrpages - 1; - - if (dev_is_pci(dev)) - pdev = to_pci_dev(dev); - - freelist = domain_unmap(domain, start_pfn, last_pfn, NULL); - if (intel_iommu_strict || (pdev && pdev->untrusted) || - !has_iova_flush_queue(&domain->iovad)) { - iommu_flush_iotlb_psi(iommu, domain, start_pfn, - nrpages, !freelist, 0); - /* free iova */ - free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); - dma_free_pagelist(freelist); - } else { - queue_iova(&domain->iovad, iova_pfn, nrpages, - (unsigned long)freelist); - /* - * queue up the release of the unmap to save the 1/6th of the - * cpu used up by the iotlb flush operation... - */ - } - - trace_unmap_single(dev, dev_addr, size); -} - -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - intel_unmap(dev, dev_addr, size); -} - -static void intel_unmap_resource(struct device *dev, dma_addr_t dev_addr, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - intel_unmap(dev, dev_addr, size); -} - -static void *intel_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) -{ - struct page *page = NULL; - int order; - - if (unlikely(attach_deferred(dev))) - do_deferred_attach(dev); - - size = PAGE_ALIGN(size); - order = get_order(size); - - if (gfpflags_allow_blocking(flags)) { - unsigned int count = size >> PAGE_SHIFT; - - page = dma_alloc_from_contiguous(dev, count, order, - flags & __GFP_NOWARN); - } - - if (!page) - page = alloc_pages(flags, order); - if (!page) - return NULL; - memset(page_address(page), 0, size); - - *dma_handle = __intel_map_single(dev, page_to_phys(page), size, - DMA_BIDIRECTIONAL, - dev->coherent_dma_mask); - if (*dma_handle != DMA_MAPPING_ERROR) - return page_address(page); - if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) - __free_pages(page, order); - - return NULL; -} - -static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - int order; - struct page *page = virt_to_page(vaddr); - - size = PAGE_ALIGN(size); - order = get_order(size); - - intel_unmap(dev, dma_handle, size); - if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) - __free_pages(page, order); -} - -static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK; - unsigned long nrpages = 0; - struct scatterlist *sg; - int i; - - for_each_sg(sglist, sg, nelems, i) { - nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg)); - } - - intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT); - - trace_unmap_sg(dev, startaddr, nrpages << VTD_PAGE_SHIFT); -} - -static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir, unsigned long attrs) -{ - int i; - struct dmar_domain *domain; - size_t size = 0; - int prot = 0; - unsigned long iova_pfn; - int ret; - struct scatterlist *sg; - unsigned long start_vpfn; - struct intel_iommu *iommu; - - BUG_ON(dir == DMA_NONE); - - if (unlikely(attach_deferred(dev))) - do_deferred_attach(dev); - - domain = find_domain(dev); - if (!domain) - return 0; - - iommu = domain_get_iommu(domain); - - for_each_sg(sglist, sg, nelems, i) - size += aligned_nrpages(sg->offset, sg->length); - - iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), - *dev->dma_mask); - if (!iova_pfn) { - sglist->dma_length = 0; - return 0; - } - - /* - * Check if DMAR supports zero-length reads on write only - * mappings.. - */ - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || \ - !cap_zlr(iommu->cap)) - prot |= DMA_PTE_READ; - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) - prot |= DMA_PTE_WRITE; - - start_vpfn = mm_to_dma_pfn(iova_pfn); - - ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); - if (unlikely(ret)) { - dma_pte_free_pagetable(domain, start_vpfn, - start_vpfn + size - 1, - agaw_to_level(domain->agaw) + 1); - free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size)); - return 0; - } - - for_each_sg(sglist, sg, nelems, i) - trace_map_sg(dev, i + 1, nelems, sg); - - return nelems; -} - -static u64 intel_get_required_mask(struct device *dev) -{ - return DMA_BIT_MASK(32); -} - -static const struct dma_map_ops intel_dma_ops = { - .alloc = intel_alloc_coherent, - .free = intel_free_coherent, - .map_sg = intel_map_sg, - .unmap_sg = intel_unmap_sg, - .map_page = intel_map_page, - .unmap_page = intel_unmap_page, - .map_resource = intel_map_resource, - .unmap_resource = intel_unmap_resource, - .dma_supported = dma_direct_supported, - .mmap = dma_common_mmap, - .get_sgtable = dma_common_get_sgtable, - .alloc_pages = dma_common_alloc_pages, - .free_pages = dma_common_free_pages, - .get_required_mask = intel_get_required_mask, -}; - -static void -bounce_sync_single(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir, enum dma_sync_target target) -{ - struct dmar_domain *domain; - phys_addr_t tlb_addr; - - domain = find_domain(dev); - if (WARN_ON(!domain)) - return; - - tlb_addr = intel_iommu_iova_to_phys(&domain->domain, addr); - if (is_swiotlb_buffer(tlb_addr)) - swiotlb_tbl_sync_single(dev, tlb_addr, size, dir, target); -} - -static dma_addr_t -bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size, - enum dma_data_direction dir, unsigned long attrs, - u64 dma_mask) -{ - size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); - struct dmar_domain *domain; - struct intel_iommu *iommu; - unsigned long iova_pfn; - unsigned long nrpages; - phys_addr_t tlb_addr; - int prot = 0; - int ret; - - if (unlikely(attach_deferred(dev))) - do_deferred_attach(dev); - - domain = find_domain(dev); - - if (WARN_ON(dir == DMA_NONE || !domain)) - return DMA_MAPPING_ERROR; - - iommu = domain_get_iommu(domain); - if (WARN_ON(!iommu)) - return DMA_MAPPING_ERROR; - - nrpages = aligned_nrpages(0, size); - iova_pfn = intel_alloc_iova(dev, domain, - dma_to_mm_pfn(nrpages), dma_mask); - if (!iova_pfn) - return DMA_MAPPING_ERROR; - - /* - * Check if DMAR supports zero-length reads on write only - * mappings.. - */ - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL || - !cap_zlr(iommu->cap)) - prot |= DMA_PTE_READ; - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) - prot |= DMA_PTE_WRITE; - - /* - * If both the physical buffer start address and size are - * page aligned, we don't need to use a bounce page. - */ - if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) { - tlb_addr = swiotlb_tbl_map_single(dev, paddr, size, - aligned_size, dir, attrs); - if (tlb_addr == DMA_MAPPING_ERROR) { - goto swiotlb_error; - } else { - /* Cleanup the padding area. */ - void *padding_start = phys_to_virt(tlb_addr); - size_t padding_size = aligned_size; - - if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) && - (dir == DMA_TO_DEVICE || - dir == DMA_BIDIRECTIONAL)) { - padding_start += size; - padding_size -= size; - } - - memset(padding_start, 0, padding_size); - } - } else { - tlb_addr = paddr; - } - - ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn), - tlb_addr >> VTD_PAGE_SHIFT, nrpages, prot); - if (ret) - goto mapping_error; - - trace_bounce_map_single(dev, iova_pfn << PAGE_SHIFT, paddr, size); - - return (phys_addr_t)iova_pfn << PAGE_SHIFT; - -mapping_error: - if (is_swiotlb_buffer(tlb_addr)) - swiotlb_tbl_unmap_single(dev, tlb_addr, size, - aligned_size, dir, attrs); -swiotlb_error: - free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages)); - dev_err(dev, "Device bounce map: %zx@%llx dir %d --- failed\n", - size, (unsigned long long)paddr, dir); - - return DMA_MAPPING_ERROR; -} - -static void -bounce_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - size_t aligned_size = ALIGN(size, VTD_PAGE_SIZE); - struct dmar_domain *domain; - phys_addr_t tlb_addr; - - domain = find_domain(dev); - if (WARN_ON(!domain)) - return; - - tlb_addr = intel_iommu_iova_to_phys(&domain->domain, dev_addr); - if (WARN_ON(!tlb_addr)) - return; - - intel_unmap(dev, dev_addr, size); - if (is_swiotlb_buffer(tlb_addr)) - swiotlb_tbl_unmap_single(dev, tlb_addr, size, - aligned_size, dir, attrs); - - trace_bounce_unmap_single(dev, dev_addr, size); -} - -static dma_addr_t -bounce_map_page(struct device *dev, struct page *page, unsigned long offset, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - return bounce_map_single(dev, page_to_phys(page) + offset, - size, dir, attrs, *dev->dma_mask); -} - -static dma_addr_t -bounce_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - return bounce_map_single(dev, phys_addr, size, - dir, attrs, *dev->dma_mask); -} - -static void -bounce_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - bounce_unmap_single(dev, dev_addr, size, dir, attrs); -} - -static void -bounce_unmap_resource(struct device *dev, dma_addr_t dev_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - bounce_unmap_single(dev, dev_addr, size, dir, attrs); -} - -static void -bounce_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir, unsigned long attrs) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sglist, sg, nelems, i) - bounce_unmap_page(dev, sg->dma_address, - sg_dma_len(sg), dir, attrs); -} - -static int -bounce_map_sg(struct device *dev, struct scatterlist *sglist, int nelems, - enum dma_data_direction dir, unsigned long attrs) -{ - int i; - struct scatterlist *sg; - - for_each_sg(sglist, sg, nelems, i) { - sg->dma_address = bounce_map_page(dev, sg_page(sg), - sg->offset, sg->length, - dir, attrs); - if (sg->dma_address == DMA_MAPPING_ERROR) - goto out_unmap; - sg_dma_len(sg) = sg->length; - } - - for_each_sg(sglist, sg, nelems, i) - trace_bounce_map_sg(dev, i + 1, nelems, sg); - - return nelems; - -out_unmap: - bounce_unmap_sg(dev, sglist, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); - return 0; -} - -static void -bounce_sync_single_for_cpu(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - bounce_sync_single(dev, addr, size, dir, SYNC_FOR_CPU); -} - -static void -bounce_sync_single_for_device(struct device *dev, dma_addr_t addr, - size_t size, enum dma_data_direction dir) -{ - bounce_sync_single(dev, addr, size, dir, SYNC_FOR_DEVICE); -} - -static void -bounce_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sglist, sg, nelems, i) - bounce_sync_single(dev, sg_dma_address(sg), - sg_dma_len(sg), dir, SYNC_FOR_CPU); -} - -static void -bounce_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, - int nelems, enum dma_data_direction dir) -{ - struct scatterlist *sg; - int i; - - for_each_sg(sglist, sg, nelems, i) - bounce_sync_single(dev, sg_dma_address(sg), - sg_dma_len(sg), dir, SYNC_FOR_DEVICE); -} - -static const struct dma_map_ops bounce_dma_ops = { - .alloc = intel_alloc_coherent, - .free = intel_free_coherent, - .map_sg = bounce_map_sg, - .unmap_sg = bounce_unmap_sg, - .map_page = bounce_map_page, - .unmap_page = bounce_unmap_page, - .sync_single_for_cpu = bounce_sync_single_for_cpu, - .sync_single_for_device = bounce_sync_single_for_device, - .sync_sg_for_cpu = bounce_sync_sg_for_cpu, - .sync_sg_for_device = bounce_sync_sg_for_device, - .map_resource = bounce_map_resource, - .unmap_resource = bounce_unmap_resource, - .alloc_pages = dma_common_alloc_pages, - .free_pages = dma_common_free_pages, - .dma_supported = dma_direct_supported, -}; - static inline int iommu_domain_cache_init(void) { int ret = 0; @@ -4689,7 +4037,7 @@ static void free_all_cpu_cached_iovas(unsigned int cpu) if (!domain || domain->domain.type != IOMMU_DOMAIN_DMA) continue; - free_cpu_cached_iovas(cpu, &domain->iovad); + iommu_dma_free_cpu_cached_iovas(cpu, &domain->domain); } } } @@ -4961,12 +4309,6 @@ int __init intel_iommu_init(void) if (list_empty(&dmar_atsr_units)) pr_info("No ATSR found\n"); - if (dmar_init_reserved_ranges()) { - if (force_on) - panic("tboot: Failed to reserve iommu ranges\n"); - goto out_free_reserved_range; - } - if (dmar_map_gfx) intel_iommu_gfx_mapped = 1; @@ -4977,7 +4319,7 @@ int __init intel_iommu_init(void) if (force_on) panic("tboot: Failed to initialize DMARs\n"); pr_err("Initialization failed\n"); - goto out_free_reserved_range; + goto out_free_dmar; } up_write(&dmar_global_lock); @@ -5018,8 +4360,6 @@ int __init intel_iommu_init(void) return 0; -out_free_reserved_range: - put_iova_domain(&reserved_iova_list); out_free_dmar: intel_iommu_free_dmars(); up_write(&dmar_global_lock); @@ -5117,17 +4457,6 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) return 0; } -static void intel_init_iova_domain(struct dmar_domain *dmar_domain) -{ - init_iova_domain(&dmar_domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN); - copy_reserved_iova(&reserved_iova_list, &dmar_domain->iovad); - - if (!intel_iommu_strict && - init_iova_flush_queue(&dmar_domain->iovad, - iommu_flush_iova, iova_entry_free)) - pr_info("iova flush queue initialization failed\n"); -} - static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) { struct dmar_domain *dmar_domain; @@ -5147,8 +4476,9 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return NULL; } - if (type == IOMMU_DOMAIN_DMA) - intel_init_iova_domain(dmar_domain); + if (type == IOMMU_DOMAIN_DMA && + iommu_get_dma_cookie(&dmar_domain->domain)) + return NULL; domain = &dmar_domain->domain; domain->geometry.aperture_start = 0; @@ -5777,13 +5107,13 @@ static void intel_iommu_release_device(struct device *dev) static void intel_iommu_probe_finalize(struct device *dev) { - struct iommu_domain *domain; + dma_addr_t base = IOVA_START_PFN << VTD_PAGE_SHIFT; + struct iommu_domain *domain = iommu_get_domain_for_dev(dev); + struct dmar_domain *dmar_domain = to_dmar_domain(domain); - domain = iommu_get_domain_for_dev(dev); - if (device_needs_bounce(dev)) - set_dma_ops(dev, &bounce_dma_ops); - else if (domain && domain->type == IOMMU_DOMAIN_DMA) - set_dma_ops(dev, &intel_dma_ops); + if (domain && domain->type == IOMMU_DOMAIN_DMA) + iommu_setup_dma_ops(dev, base, + __DOMAIN_MAX_ADDR(dmar_domain->gaw) - base); else set_dma_ops(dev, NULL); } @@ -5896,19 +5226,6 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev) return ret; } -static void intel_iommu_apply_resv_region(struct device *dev, - struct iommu_domain *domain, - struct iommu_resv_region *region) -{ - struct dmar_domain *dmar_domain = to_dmar_domain(domain); - unsigned long start, end; - - start = IOVA_PFN(region->start); - end = IOVA_PFN(region->start + region->length - 1); - - WARN_ON_ONCE(!reserve_iova(&dmar_domain->iovad, start, end)); -} - static struct iommu_group *intel_iommu_device_group(struct device *dev) { if (dev_is_pci(dev)) @@ -6097,6 +5414,27 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain, return ret; } +static int +intel_iommu_domain_get_attr(struct iommu_domain *domain, + enum iommu_attr attr, void *data) +{ + switch (domain->type) { + case IOMMU_DOMAIN_UNMANAGED: + return -ENODEV; + case IOMMU_DOMAIN_DMA: + switch (attr) { + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: + *(int *)data = !intel_iommu_strict; + return 0; + default: + return -ENODEV; + } + break; + default: + return -EINVAL; + } +} + /* * Check that the device does not live on an external facing PCI port that is * marked as untrusted. Such devices should not be able to apply quirks and @@ -6118,6 +5456,7 @@ const struct iommu_ops intel_iommu_ops = { .capable = intel_iommu_capable, .domain_alloc = intel_iommu_domain_alloc, .domain_free = intel_iommu_domain_free, + .domain_get_attr = intel_iommu_domain_get_attr, .domain_set_attr = intel_iommu_domain_set_attr, .attach_dev = intel_iommu_attach_device, .detach_dev = intel_iommu_detach_device, @@ -6126,6 +5465,7 @@ const struct iommu_ops intel_iommu_ops = { .aux_get_pasid = intel_iommu_aux_get_pasid, .map = intel_iommu_map, .unmap = intel_iommu_unmap, + .flush_iotlb_all = intel_flush_iotlb_all, .iotlb_sync = intel_iommu_tlb_sync, .iova_to_phys = intel_iommu_iova_to_phys, .probe_device = intel_iommu_probe_device, @@ -6133,7 +5473,6 @@ const struct iommu_ops intel_iommu_ops = { .release_device = intel_iommu_release_device, .get_resv_regions = intel_iommu_get_resv_regions, .put_resv_regions = generic_iommu_put_resv_regions, - .apply_resv_region = intel_iommu_apply_resv_region, .device_group = intel_iommu_device_group, .dev_has_feat = intel_iommu_dev_has_feat, .dev_feat_enabled = intel_iommu_dev_feat_enabled, From 58a8bb39490db31acbe8f4e24593a88533b4d947 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 24 Nov 2020 16:20:57 +0800 Subject: [PATCH 28/50] iommu/vt-d: Cleanup after converting to dma-iommu ops Some cleanups after converting the driver to use dma-iommu ops. - Remove nobounce option; - Cleanup and simplify the path in domain mapping. Signed-off-by: Lu Baolu Tested-by: Logan Gunthorpe Link: https://lore.kernel.org/r/20201124082057.2614359-8-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- .../admin-guide/kernel-parameters.txt | 5 -- drivers/iommu/intel/iommu.c | 90 ++++++------------- 2 files changed, 28 insertions(+), 67 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 526d65d8573a..76b2a2063fd0 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1883,11 +1883,6 @@ Note that using this option lowers the security provided by tboot because it makes the system vulnerable to DMA attacks. - nobounce [Default off] - Disable bounce buffer for untrusted devices such as - the Thunderbolt devices. This will treat the untrusted - devices as the trusted ones, hence might expose security - risks of DMA attacks. intel_idle.max_cstate= [KNL,HW,ACPI,X86] 0 disables intel_idle and fall back on acpi_idle. diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 30f69097264f..6d519a699ac2 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -355,7 +355,6 @@ static int dmar_forcedac; static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; -static int intel_no_bounce; static int iommu_skip_te_disable; #define IDENTMAP_GFX 2 @@ -457,9 +456,6 @@ static int __init intel_iommu_setup(char *str) } else if (!strncmp(str, "tboot_noforce", 13)) { pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); intel_iommu_tboot_noforce = 1; - } else if (!strncmp(str, "nobounce", 8)) { - pr_info("Intel-IOMMU: No bounce buffer. This could expose security risks of DMA attacks\n"); - intel_no_bounce = 1; } str += strcspn(str, ","); @@ -2277,15 +2273,14 @@ static inline int hardware_largepage_caps(struct dmar_domain *domain, return level; } -static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, - struct scatterlist *sg, unsigned long phys_pfn, - unsigned long nr_pages, int prot) +static int +__domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + unsigned long phys_pfn, unsigned long nr_pages, int prot) { struct dma_pte *first_pte = NULL, *pte = NULL; - phys_addr_t pteval; - unsigned long sg_res = 0; unsigned int largepage_lvl = 0; unsigned long lvl_pages = 0; + phys_addr_t pteval; u64 attr; BUG_ON(!domain_pfn_supported(domain, iov_pfn + nr_pages - 1)); @@ -2297,26 +2292,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, if (domain_use_first_level(domain)) attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US; - if (!sg) { - sg_res = nr_pages; - pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr; - } + pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr; while (nr_pages > 0) { uint64_t tmp; - if (!sg_res) { - unsigned int pgoff = sg->offset & ~PAGE_MASK; - - sg_res = aligned_nrpages(sg->offset, sg->length); - sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff; - sg->dma_length = sg->length; - pteval = (sg_phys(sg) - pgoff) | attr; - phys_pfn = pteval >> VTD_PAGE_SHIFT; - } - if (!pte) { - largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res); + largepage_lvl = hardware_largepage_caps(domain, iov_pfn, + phys_pfn, nr_pages); first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, &largepage_lvl); if (!pte) @@ -2328,7 +2311,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, pteval |= DMA_PTE_LARGE_PAGE; lvl_pages = lvl_to_nr_pages(largepage_lvl); - nr_superpages = sg_res / lvl_pages; + nr_superpages = nr_pages / lvl_pages; end_pfn = iov_pfn + nr_superpages * lvl_pages - 1; /* @@ -2362,48 +2345,45 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, lvl_pages = lvl_to_nr_pages(largepage_lvl); BUG_ON(nr_pages < lvl_pages); - BUG_ON(sg_res < lvl_pages); nr_pages -= lvl_pages; iov_pfn += lvl_pages; phys_pfn += lvl_pages; pteval += lvl_pages * VTD_PAGE_SIZE; - sg_res -= lvl_pages; /* If the next PTE would be the first in a new page, then we - need to flush the cache on the entries we've just written. - And then we'll need to recalculate 'pte', so clear it and - let it get set again in the if (!pte) block above. - - If we're done (!nr_pages) we need to flush the cache too. - - Also if we've been setting superpages, we may need to - recalculate 'pte' and switch back to smaller pages for the - end of the mapping, if the trailing size is not enough to - use another superpage (i.e. sg_res < lvl_pages). */ + * need to flush the cache on the entries we've just written. + * And then we'll need to recalculate 'pte', so clear it and + * let it get set again in the if (!pte) block above. + * + * If we're done (!nr_pages) we need to flush the cache too. + * + * Also if we've been setting superpages, we may need to + * recalculate 'pte' and switch back to smaller pages for the + * end of the mapping, if the trailing size is not enough to + * use another superpage (i.e. nr_pages < lvl_pages). + */ pte++; if (!nr_pages || first_pte_in_page(pte) || - (largepage_lvl > 1 && sg_res < lvl_pages)) { + (largepage_lvl > 1 && nr_pages < lvl_pages)) { domain_flush_cache(domain, first_pte, (void *)pte - (void *)first_pte); pte = NULL; } - - if (!sg_res && nr_pages) - sg = sg_next(sg); } + return 0; } -static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, - struct scatterlist *sg, unsigned long phys_pfn, - unsigned long nr_pages, int prot) +static int +domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, + unsigned long phys_pfn, unsigned long nr_pages, int prot) { int iommu_id, ret; struct intel_iommu *iommu; /* Do the real mapping first */ - ret = __domain_mapping(domain, iov_pfn, sg, phys_pfn, nr_pages, prot); + ret = __domain_mapping(domain, iov_pfn, phys_pfn, nr_pages, prot); if (ret) return ret; @@ -2415,20 +2395,6 @@ static int domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, return 0; } -static inline int domain_sg_mapping(struct dmar_domain *domain, unsigned long iov_pfn, - struct scatterlist *sg, unsigned long nr_pages, - int prot) -{ - return domain_mapping(domain, iov_pfn, sg, 0, nr_pages, prot); -} - -static inline int domain_pfn_mapping(struct dmar_domain *domain, unsigned long iov_pfn, - unsigned long phys_pfn, unsigned long nr_pages, - int prot) -{ - return domain_mapping(domain, iov_pfn, NULL, phys_pfn, nr_pages, prot); -} - static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) { unsigned long flags; @@ -2688,7 +2654,7 @@ static int iommu_domain_identity_map(struct dmar_domain *domain, */ dma_pte_clear_range(domain, first_vpfn, last_vpfn); - return __domain_mapping(domain, first_vpfn, NULL, + return __domain_mapping(domain, first_vpfn, first_vpfn, last_vpfn - first_vpfn + 1, DMA_PTE_READ|DMA_PTE_WRITE); } @@ -4942,8 +4908,8 @@ static int intel_iommu_map(struct iommu_domain *domain, /* Round up size to next multiple of PAGE_SIZE, if it and the low bits of hpa would take us onto the next page */ size = aligned_nrpages(hpa, size); - ret = domain_pfn_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, - hpa >> VTD_PAGE_SHIFT, size, prot); + ret = domain_mapping(dmar_domain, iova >> VTD_PAGE_SHIFT, + hpa >> VTD_PAGE_SHIFT, size, prot); return ret; } From 28b41e2c6aebd3caf99a77a76843c0175876bc72 Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Tue, 24 Nov 2020 21:06:01 +0800 Subject: [PATCH 29/50] iommu: Move def_domain type check for untrusted device into core So that the vendor iommu drivers are no more required to provide the def_domain_type callback to always isolate the untrusted devices. Signed-off-by: Lu Baolu Cc: Shameerali Kolothum Thodi Link: https://lore.kernel.org/linux-iommu/243ce89c33fe4b9da4c56ba35acebf81@huawei.com/ Link: https://lore.kernel.org/r/20201124130604.2912899-2-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/intel/iommu.c | 7 ------- drivers/iommu/iommu.c | 16 +++++++--------- 2 files changed, 7 insertions(+), 16 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 1b1ca63e6bbe..83674e32e58b 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2916,13 +2916,6 @@ static int device_def_domain_type(struct device *dev) if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); - /* - * Prevent any device marked as untrusted from getting - * placed into the statically identity mapping domain. - */ - if (pdev->untrusted) - return IOMMU_DOMAIN_DMA; - if ((iommu_identity_mapping & IDENTMAP_AZALIA) && IS_AZALIA(pdev)) return IOMMU_DOMAIN_IDENTITY; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b53446bb8c6b..9a5ec1c7a8ac 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -1460,12 +1460,14 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group); static int iommu_get_def_domain_type(struct device *dev) { const struct iommu_ops *ops = dev->bus->iommu_ops; - unsigned int type = 0; + + if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted) + return IOMMU_DOMAIN_DMA; if (ops->def_domain_type) - type = ops->def_domain_type(dev); + return ops->def_domain_type(dev); - return (type == 0) ? iommu_def_domain_type : type; + return 0; } static int iommu_group_alloc_default_domain(struct bus_type *bus, @@ -1507,7 +1509,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group, if (group->default_domain) return 0; - type = iommu_get_def_domain_type(dev); + type = iommu_get_def_domain_type(dev) ? : iommu_def_domain_type; return iommu_group_alloc_default_domain(dev->bus, group, type); } @@ -1645,12 +1647,8 @@ struct __group_domain_type { static int probe_get_default_domain_type(struct device *dev, void *data) { - const struct iommu_ops *ops = dev->bus->iommu_ops; struct __group_domain_type *gtype = data; - unsigned int type = 0; - - if (ops->def_domain_type) - type = ops->def_domain_type(dev); + unsigned int type = iommu_get_def_domain_type(dev); if (type) { if (gtype->type && gtype->type != type) { From 08a27c1c3ecf5e1da193ce5f8fc97c3be16e75f0 Mon Sep 17 00:00:00 2001 From: Sai Praneeth Prakhya Date: Tue, 24 Nov 2020 21:06:02 +0800 Subject: [PATCH 30/50] iommu: Add support to change default domain of an iommu group Presently, the default domain of an iommu group is allocated during boot time and it cannot be changed later. So, the device would typically be either in identity (also known as pass_through) mode or the device would be in DMA mode as long as the machine is up and running. There is no way to change the default domain type dynamically i.e. after booting, a device cannot switch between identity mode and DMA mode. But, assume a use case wherein the user trusts the device and believes that the OS is secure enough and hence wants *only* this device to bypass IOMMU (so that it could be high performing) whereas all the other devices to go through IOMMU (so that the system is protected). Presently, this use case is not supported. It will be helpful if there is some way to change the default domain of an iommu group dynamically. Hence, add such support. A privileged user could request the kernel to change the default domain type of a iommu group by writing to "/sys/kernel/iommu_groups//type" file. Presently, only three values are supported 1. identity: all the DMA transactions from the device in this group are *not* translated by the iommu 2. DMA: all the DMA transactions from the device in this group are translated by the iommu 3. auto: change to the type the device was booted with Note: 1. Default domain of an iommu group with two or more devices cannot be changed. 2. The device in the iommu group shouldn't be bound to any driver. 3. The device shouldn't be assigned to user for direct access. 4. The change request will fail if any device in the group has a mandatory default domain type and the requested one conflicts with that. Please see "Documentation/ABI/testing/sysfs-kernel-iommu_groups" for more information. Signed-off-by: Sai Praneeth Prakhya Signed-off-by: Lu Baolu Cc: Christoph Hellwig Cc: Joerg Roedel Cc: Ashok Raj Cc: Will Deacon Cc: Sohil Mehta Cc: Robin Murphy Cc: Jacob Pan Link: https://lore.kernel.org/r/20201124130604.2912899-3-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/iommu.c | 230 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 229 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 9a5ec1c7a8ac..b326389deea4 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -93,6 +93,8 @@ static void __iommu_detach_group(struct iommu_domain *domain, static int iommu_create_device_direct_mappings(struct iommu_group *group, struct device *dev); static struct iommu_group *iommu_group_get_for_dev(struct device *dev); +static ssize_t iommu_group_store_type(struct iommu_group *group, + const char *buf, size_t count); #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ struct iommu_group_attribute iommu_group_attr_##_name = \ @@ -525,7 +527,8 @@ static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL); static IOMMU_GROUP_ATTR(reserved_regions, 0444, iommu_group_show_resv_regions, NULL); -static IOMMU_GROUP_ATTR(type, 0444, iommu_group_show_type, NULL); +static IOMMU_GROUP_ATTR(type, 0644, iommu_group_show_type, + iommu_group_store_type); static void iommu_group_release(struct kobject *kobj) { @@ -3027,3 +3030,228 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle) return ops->sva_get_pasid(handle); } EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); + +/* + * Changes the default domain of an iommu group that has *only* one device + * + * @group: The group for which the default domain should be changed + * @prev_dev: The device in the group (this is used to make sure that the device + * hasn't changed after the caller has called this function) + * @type: The type of the new default domain that gets associated with the group + * + * Returns 0 on success and error code on failure + * + * Note: + * 1. Presently, this function is called only when user requests to change the + * group's default domain type through /sys/kernel/iommu_groups//type + * Please take a closer look if intended to use for other purposes. + */ +static int iommu_change_dev_def_domain(struct iommu_group *group, + struct device *prev_dev, int type) +{ + struct iommu_domain *prev_dom; + struct group_device *grp_dev; + int ret, dev_def_dom; + struct device *dev; + + if (!group) + return -EINVAL; + + mutex_lock(&group->mutex); + + if (group->default_domain != group->domain) { + dev_err_ratelimited(prev_dev, "Group not assigned to default domain\n"); + ret = -EBUSY; + goto out; + } + + /* + * iommu group wasn't locked while acquiring device lock in + * iommu_group_store_type(). So, make sure that the device count hasn't + * changed while acquiring device lock. + * + * Changing default domain of an iommu group with two or more devices + * isn't supported because there could be a potential deadlock. Consider + * the following scenario. T1 is trying to acquire device locks of all + * the devices in the group and before it could acquire all of them, + * there could be another thread T2 (from different sub-system and use + * case) that has already acquired some of the device locks and might be + * waiting for T1 to release other device locks. + */ + if (iommu_group_device_count(group) != 1) { + dev_err_ratelimited(prev_dev, "Cannot change default domain: Group has more than one device\n"); + ret = -EINVAL; + goto out; + } + + /* Since group has only one device */ + grp_dev = list_first_entry(&group->devices, struct group_device, list); + dev = grp_dev->dev; + + if (prev_dev != dev) { + dev_err_ratelimited(prev_dev, "Cannot change default domain: Device has been changed\n"); + ret = -EBUSY; + goto out; + } + + prev_dom = group->default_domain; + if (!prev_dom) { + ret = -EINVAL; + goto out; + } + + dev_def_dom = iommu_get_def_domain_type(dev); + if (!type) { + /* + * If the user hasn't requested any specific type of domain and + * if the device supports both the domains, then default to the + * domain the device was booted with + */ + type = dev_def_dom ? : iommu_def_domain_type; + } else if (dev_def_dom && type != dev_def_dom) { + dev_err_ratelimited(prev_dev, "Device cannot be in %s domain\n", + iommu_domain_type_str(type)); + ret = -EINVAL; + goto out; + } + + /* + * Switch to a new domain only if the requested domain type is different + * from the existing default domain type + */ + if (prev_dom->type == type) { + ret = 0; + goto out; + } + + /* Sets group->default_domain to the newly allocated domain */ + ret = iommu_group_alloc_default_domain(dev->bus, group, type); + if (ret) + goto out; + + ret = iommu_create_device_direct_mappings(group, dev); + if (ret) + goto free_new_domain; + + ret = __iommu_attach_device(group->default_domain, dev); + if (ret) + goto free_new_domain; + + group->domain = group->default_domain; + + /* + * Release the mutex here because ops->probe_finalize() call-back of + * some vendor IOMMU drivers calls arm_iommu_attach_device() which + * in-turn might call back into IOMMU core code, where it tries to take + * group->mutex, resulting in a deadlock. + */ + mutex_unlock(&group->mutex); + + /* Make sure dma_ops is appropriatley set */ + iommu_group_do_probe_finalize(dev, group->default_domain); + iommu_domain_free(prev_dom); + return 0; + +free_new_domain: + iommu_domain_free(group->default_domain); + group->default_domain = prev_dom; + group->domain = prev_dom; + +out: + mutex_unlock(&group->mutex); + + return ret; +} + +/* + * Changing the default domain through sysfs requires the users to ubind the + * drivers from the devices in the iommu group. Return failure if this doesn't + * meet. + * + * We need to consider the race between this and the device release path. + * device_lock(dev) is used here to guarantee that the device release path + * will not be entered at the same time. + */ +static ssize_t iommu_group_store_type(struct iommu_group *group, + const char *buf, size_t count) +{ + struct group_device *grp_dev; + struct device *dev; + int ret, req_type; + + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) + return -EACCES; + + if (WARN_ON(!group)) + return -EINVAL; + + if (sysfs_streq(buf, "identity")) + req_type = IOMMU_DOMAIN_IDENTITY; + else if (sysfs_streq(buf, "DMA")) + req_type = IOMMU_DOMAIN_DMA; + else if (sysfs_streq(buf, "auto")) + req_type = 0; + else + return -EINVAL; + + /* + * Lock/Unlock the group mutex here before device lock to + * 1. Make sure that the iommu group has only one device (this is a + * prerequisite for step 2) + * 2. Get struct *dev which is needed to lock device + */ + mutex_lock(&group->mutex); + if (iommu_group_device_count(group) != 1) { + mutex_unlock(&group->mutex); + pr_err_ratelimited("Cannot change default domain: Group has more than one device\n"); + return -EINVAL; + } + + /* Since group has only one device */ + grp_dev = list_first_entry(&group->devices, struct group_device, list); + dev = grp_dev->dev; + get_device(dev); + + /* + * Don't hold the group mutex because taking group mutex first and then + * the device lock could potentially cause a deadlock as below. Assume + * two threads T1 and T2. T1 is trying to change default domain of an + * iommu group and T2 is trying to hot unplug a device or release [1] VF + * of a PCIe device which is in the same iommu group. T1 takes group + * mutex and before it could take device lock assume T2 has taken device + * lock and is yet to take group mutex. Now, both the threads will be + * waiting for the other thread to release lock. Below, lock order was + * suggested. + * device_lock(dev); + * mutex_lock(&group->mutex); + * iommu_change_dev_def_domain(); + * mutex_unlock(&group->mutex); + * device_unlock(dev); + * + * [1] Typical device release path + * device_lock() from device/driver core code + * -> bus_notifier() + * -> iommu_bus_notifier() + * -> iommu_release_device() + * -> ops->release_device() vendor driver calls back iommu core code + * -> mutex_lock() from iommu core code + */ + mutex_unlock(&group->mutex); + + /* Check if the device in the group still has a driver bound to it */ + device_lock(dev); + if (device_is_bound(dev)) { + pr_err_ratelimited("Device is still bound to driver\n"); + ret = -EBUSY; + goto out; + } + + ret = iommu_change_dev_def_domain(group, dev, req_type); + ret = ret ?: count; + +out: + device_unlock(dev); + put_device(dev); + + return ret; +} From 0b8a96a3120ffe4d3571d93902693c59f90c3d0c Mon Sep 17 00:00:00 2001 From: Sai Praneeth Prakhya Date: Tue, 24 Nov 2020 21:06:03 +0800 Subject: [PATCH 31/50] iommu: Take lock before reading iommu group default domain type "/sys/kernel/iommu_groups//type" file could be read to find out the default domain type of an iommu group. The default domain of an iommu group doesn't change after booting and hence could be read directly. But, after addding support to dynamically change iommu group default domain, the above assumption no longer stays valid. iommu group default domain type could be changed at any time by writing to "/sys/kernel/iommu_groups//type". So, take group mutex before reading iommu group default domain type so that the user wouldn't see stale values or iommu_group_show_type() doesn't try to derefernce stale pointers. Signed-off-by: Sai Praneeth Prakhya Signed-off-by: Lu Baolu Cc: Christoph Hellwig Cc: Joerg Roedel Cc: Ashok Raj Cc: Will Deacon Cc: Sohil Mehta Cc: Robin Murphy Cc: Jacob Pan Link: https://lore.kernel.org/r/20201124130604.2912899-4-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/iommu.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b326389deea4..cc06dcadff8f 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -501,6 +501,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group, { char *type = "unknown\n"; + mutex_lock(&group->mutex); if (group->default_domain) { switch (group->default_domain->type) { case IOMMU_DOMAIN_BLOCKED: @@ -517,6 +518,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group, break; } } + mutex_unlock(&group->mutex); strcpy(buf, type); return strlen(type); From 63a816749d8670e4a92da5aecfb91238821a3d97 Mon Sep 17 00:00:00 2001 From: Sai Praneeth Prakhya Date: Tue, 24 Nov 2020 21:06:04 +0800 Subject: [PATCH 32/50] iommu: Document usage of "/sys/kernel/iommu_groups//type" file The default domain type of an iommu group can be changed by writing to "/sys/kernel/iommu_groups//type" file. Hence, document it's usage and more importantly spell out its limitations. Signed-off-by: Sai Praneeth Prakhya Signed-off-by: Lu Baolu Cc: Christoph Hellwig Cc: Joerg Roedel Cc: Ashok Raj Cc: Will Deacon Cc: Sohil Mehta Cc: Robin Murphy Cc: Jacob Pan Link: https://lore.kernel.org/r/20201124130604.2912899-5-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- .../ABI/testing/sysfs-kernel-iommu_groups | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups b/Documentation/ABI/testing/sysfs-kernel-iommu_groups index 017f5bc3920c..407b1628d7fd 100644 --- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups +++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups @@ -33,3 +33,32 @@ Description: In case an RMRR is used only by graphics or USB devices it is now exposed as "direct-relaxable" instead of "direct". In device assignment use case, for instance, those RMRR are considered to be relaxable and safe. + +What: /sys/kernel/iommu_groups//type +Date: November 2020 +KernelVersion: v5.11 +Contact: Sai Praneeth Prakhya +Description: /sys/kernel/iommu_groups//type shows the type of default + domain in use by iommu for this group. See include/linux/iommu.h + for possible values. A privileged user could request kernel to + change the group type by writing to this file. Presently, only + three types of request are supported: + 1. DMA: All the DMA transactions from the device in this group + are translated by the iommu. + 2. identity: All the DMA transactions from the device in this + group are *not* translated by the iommu. + 3. auto: Change to the type the device was booted with. + Note: + ----- + The default domain type of a group may be modified only when + 1. The group has *only* one device + 2. The device in the group is not bound to any device driver. + So, the users must unbind the appropriate driver before + changing the default domain type. + Caution: + -------- + Unbinding a device driver will take away the driver's control + over the device and if done on devices that host root file + system could lead to catastrophic effects (the users might + need to reboot the machine to get it to normal state). So, it's + expected that the users understand what they're doing. From c99110a865a3b0e3203a8b3101eae03ae49a1cf2 Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Wed, 25 Nov 2020 12:30:12 +0530 Subject: [PATCH 33/50] iommu/arm-smmu: Add support for pagetable config domain attribute Add support for domain attribute DOMAIN_ATTR_IO_PGTABLE_CFG to get/set pagetable configuration data which initially will be used to set quirks and later can be extended to include other pagetable configuration data. Signed-off-by: Sai Prakash Ranjan Link: https://lore.kernel.org/r/2ab52ced2f853115c32461259a075a2877feffa6.1606287059.git.saiprakash.ranjan@codeaurora.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu.c | 20 ++++++++++++++++++++ drivers/iommu/arm/arm-smmu/arm-smmu.h | 1 + 2 files changed, 21 insertions(+) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 0f28a8614da3..4b9b10fe50ed 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -789,6 +789,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, if (smmu_domain->non_strict) pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; + if (smmu_domain->pgtbl_cfg.quirks) + pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks; + pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) { ret = -ENOMEM; @@ -1511,6 +1514,12 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, case DOMAIN_ATTR_NESTING: *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); return 0; + case DOMAIN_ATTR_IO_PGTABLE_CFG: { + struct io_pgtable_domain_attr *pgtbl_cfg = data; + *pgtbl_cfg = smmu_domain->pgtbl_cfg; + + return 0; + } default: return -ENODEV; } @@ -1551,6 +1560,17 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, else smmu_domain->stage = ARM_SMMU_DOMAIN_S1; break; + case DOMAIN_ATTR_IO_PGTABLE_CFG: { + struct io_pgtable_domain_attr *pgtbl_cfg = data; + + if (smmu_domain->smmu) { + ret = -EPERM; + goto out_unlock; + } + + smmu_domain->pgtbl_cfg = *pgtbl_cfg; + break; + } default: ret = -ENODEV; } diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index 04288b6fc619..bb5a419f240f 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -364,6 +364,7 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; + struct io_pgtable_domain_attr pgtbl_cfg; const struct iommu_flush_ops *flush_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; From 12bc36793fd6dbc910a6d7c5bec707274815b3c0 Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Wed, 25 Nov 2020 12:30:13 +0530 Subject: [PATCH 34/50] iommu/arm-smmu: Move non-strict mode to use io_pgtable_domain_attr Now that we have a struct io_pgtable_domain_attr with quirks, use that for non_strict mode as well thereby removing the need for more members of arm_smmu_domain in the future. Signed-off-by: Sai Prakash Ranjan Link: https://lore.kernel.org/r/c191265f3db1f6b3e136d4057ca917666680a066.1606287059.git.saiprakash.ranjan@codeaurora.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu.c | 15 +++++++++------ drivers/iommu/arm/arm-smmu/arm-smmu.h | 1 - 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 4b9b10fe50ed..d8979bb71fc0 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -786,9 +786,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, goto out_clear_smmu; } - if (smmu_domain->non_strict) - pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; - if (smmu_domain->pgtbl_cfg.quirks) pgtbl_cfg.quirks |= smmu_domain->pgtbl_cfg.quirks; @@ -1526,9 +1523,12 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain, break; case IOMMU_DOMAIN_DMA: switch (attr) { - case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = smmu_domain->non_strict; + case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: { + bool non_strict = smmu_domain->pgtbl_cfg.quirks & + IO_PGTABLE_QUIRK_NON_STRICT; + *(int *)data = non_strict; return 0; + } default: return -ENODEV; } @@ -1578,7 +1578,10 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, case IOMMU_DOMAIN_DMA: switch (attr) { case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - smmu_domain->non_strict = *(int *)data; + if (*(int *)data) + smmu_domain->pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; + else + smmu_domain->pgtbl_cfg.quirks &= ~IO_PGTABLE_QUIRK_NON_STRICT; break; default: ret = -ENODEV; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index bb5a419f240f..cb7ca3a444c9 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -368,7 +368,6 @@ struct arm_smmu_domain { const struct iommu_flush_ops *flush_ops; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; - bool non_strict; struct mutex init_mutex; /* Protects smmu pointer */ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ struct iommu_domain domain; From 00597f9ff5eccd8b90e34cbd963471c6befcad98 Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Wed, 25 Nov 2020 12:30:17 +0530 Subject: [PATCH 35/50] iommu: arm-smmu-impl: Use table to list QCOM implementations Use table and of_match_node() to match qcom implementation instead of multiple of_device_compatible() calls for each QCOM SMMU implementation. Signed-off-by: Sai Prakash Ranjan Acked-by: Will Deacon Link: https://lore.kernel.org/r/4e11899bc02102a6e6155db215911e8b5aaba950.1606287059.git.saiprakash.ranjan@codeaurora.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu-impl.c | 9 +-------- drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c | 21 ++++++++++++++++----- drivers/iommu/arm/arm-smmu/arm-smmu.h | 1 - 3 files changed, 17 insertions(+), 14 deletions(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 7fed89c9d18a..26e2734eb4d7 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -214,14 +214,7 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) if (of_device_is_compatible(np, "nvidia,tegra194-smmu")) return nvidia_smmu_impl_init(smmu); - if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") || - of_device_is_compatible(np, "qcom,sc7180-smmu-500") || - of_device_is_compatible(np, "qcom,sm8150-smmu-500") || - of_device_is_compatible(np, "qcom,sm8250-smmu-500")) - return qcom_smmu_impl_init(smmu); - - if (of_device_is_compatible(smmu->dev->of_node, "qcom,adreno-smmu")) - return qcom_adreno_smmu_impl_init(smmu); + smmu = qcom_smmu_impl_init(smmu); if (of_device_is_compatible(np, "marvell,ap806-smmu-500")) smmu->impl = &mrvl_mmu500_impl; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index d0636c803a36..add1859b2899 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c @@ -318,12 +318,23 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, return &qsmmu->smmu; } +static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = { + { .compatible = "qcom,sc7180-smmu-500" }, + { .compatible = "qcom,sdm845-smmu-500" }, + { .compatible = "qcom,sm8150-smmu-500" }, + { .compatible = "qcom,sm8250-smmu-500" }, + { } +}; + struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) { - return qcom_smmu_create(smmu, &qcom_smmu_impl); -} + const struct device_node *np = smmu->dev->of_node; -struct arm_smmu_device *qcom_adreno_smmu_impl_init(struct arm_smmu_device *smmu) -{ - return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl); + if (of_match_node(qcom_smmu_impl_of_match, np)) + return qcom_smmu_create(smmu, &qcom_smmu_impl); + + if (of_device_is_compatible(np, "qcom,adreno-smmu")) + return qcom_smmu_create(smmu, &qcom_adreno_smmu_impl); + + return smmu; } diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index cb7ca3a444c9..d2a2d1bc58ba 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -523,7 +523,6 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu); -struct arm_smmu_device *qcom_adreno_smmu_impl_init(struct arm_smmu_device *smmu); void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx); int arm_mmu500_reset(struct arm_smmu_device *smmu); From 7f575a6087f47cf2a320d32a102be6276e9fd5bc Mon Sep 17 00:00:00 2001 From: Sai Prakash Ranjan Date: Wed, 25 Nov 2020 12:30:18 +0530 Subject: [PATCH 36/50] iommu: arm-smmu-impl: Add a space before open parenthesis Fix the checkpatch warning for space required before the open parenthesis. Signed-off-by: Sai Prakash Ranjan Acked-by: Will Deacon Link: https://lore.kernel.org/r/0b4c3718a87992f11340a1cdd99fd746c647e485.1606287059.git.saiprakash.ranjan@codeaurora.org Signed-off-by: Will Deacon --- drivers/iommu/arm/arm-smmu/arm-smmu-impl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index 26e2734eb4d7..136872e77195 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -12,7 +12,7 @@ static int arm_smmu_gr0_ns(int offset) { - switch(offset) { + switch (offset) { case ARM_SMMU_GR0_sCR0: case ARM_SMMU_GR0_sACR: case ARM_SMMU_GR0_sGFSR: From 62c9917d9c1041ba175ccf1bc4c010efc0188a2e Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Thu, 26 Nov 2020 17:06:03 +0800 Subject: [PATCH 37/50] iommu: Fix htmldocs warnings in sysfs-kernel-iommu_groups Below warnings are fixed: Documentation/ABI/testing/sysfs-kernel-iommu_groups:38: WARNING: Unexpected indentation. Documentation/ABI/testing/sysfs-kernel-iommu_groups:38: WARNING: Block quote ends without a blank line; unexpected unindent. Documentation/ABI/testing/sysfs-kernel-iommu_groups:38: WARNING: Enumerated list ends without a blank line; unexpected unindent. Documentation/ABI/testing/sysfs-kernel-iommu_groups:38: WARNING: Unexpected indentation. Documentation/ABI/testing/sysfs-kernel-iommu_groups:38: WARNING: Block quote ends without a blank line; unexpected unindent. Fixes: 63a816749d86 ("iommu: Document usage of "/sys/kernel/iommu_groups//type" file") Reported-by: Stephen Rothwell Link: https://lore.kernel.org/linux-next/20201126174851.200e0e58@canb.auug.org.au/ Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20201126090603.1511589-1-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- .../ABI/testing/sysfs-kernel-iommu_groups | 33 ++++++++++--------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-kernel-iommu_groups b/Documentation/ABI/testing/sysfs-kernel-iommu_groups index 407b1628d7fd..0fedbb0f94e4 100644 --- a/Documentation/ABI/testing/sysfs-kernel-iommu_groups +++ b/Documentation/ABI/testing/sysfs-kernel-iommu_groups @@ -40,23 +40,24 @@ KernelVersion: v5.11 Contact: Sai Praneeth Prakhya Description: /sys/kernel/iommu_groups//type shows the type of default domain in use by iommu for this group. See include/linux/iommu.h - for possible values. A privileged user could request kernel to - change the group type by writing to this file. Presently, only - three types of request are supported: - 1. DMA: All the DMA transactions from the device in this group - are translated by the iommu. - 2. identity: All the DMA transactions from the device in this - group are *not* translated by the iommu. - 3. auto: Change to the type the device was booted with. - Note: - ----- + for possible read values. A privileged user could request kernel to + change the group type by writing to this file. Valid write values: + + ======== ====================================================== + DMA All the DMA transactions from the device in this group + are translated by the iommu. + identity All the DMA transactions from the device in this group + are not translated by the iommu. + auto Change to the type the device was booted with. + ======== ====================================================== + The default domain type of a group may be modified only when - 1. The group has *only* one device - 2. The device in the group is not bound to any device driver. - So, the users must unbind the appropriate driver before - changing the default domain type. - Caution: - -------- + + - The group has only one device. + - The device in the group is not bound to any device driver. + So, the users must unbind the appropriate driver before + changing the default domain type. + Unbinding a device driver will take away the driver's control over the device and if done on devices that host root file system could lead to catastrophic effects (the users might From 058236eef606ea53ea7317afc20e9469cf3c3b91 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Thu, 26 Nov 2020 21:38:25 +0800 Subject: [PATCH 38/50] iommu: return error code when it can't get group Although iommu_group_get() in iommu_probe_device() will always succeed thanks to __iommu_probe_device() creating the group if it's not present, it's still worth initialising 'ret' to -ENODEV in case this path is reachable in the future. For now, this patch results in no functional change. Reported-by: Hulk Robot Signed-off-by: Yang Yingliang Link: https://lore.kernel.org/r/20201126133825.3643852-1-yangyingliang@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/iommu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 88b0c9192d8c..dd617ed854a3 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -253,8 +253,10 @@ int iommu_probe_device(struct device *dev) goto err_out; group = iommu_group_get(dev); - if (!group) + if (!group) { + ret = -ENODEV; goto err_release; + } /* * Try to allocate a default domain - needs support from the From 405a43cc00471d9f06c58704448f1a43e331826a Mon Sep 17 00:00:00 2001 From: Lu Baolu Date: Fri, 27 Nov 2020 09:33:08 +0800 Subject: [PATCH 39/50] iommu/vt-d: Remove set but not used variable Fixes gcc '-Wunused-but-set-variable' warning: drivers/iommu/intel/iommu.c:5643:27: warning: variable 'last_pfn' set but not used [-Wunused-but-set-variable] 5643 | unsigned long start_pfn, last_pfn; | ^~~~~~~~ This variable is never used, so remove it. Fixes: 2a2b8eaa5b25 ("iommu: Handle freelists when using deferred flushing in iommu drivers") Reported-by: kernel test robot Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20201127013308.1833610-1-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/intel/iommu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 6d519a699ac2..e27eb6fc15f7 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4948,13 +4948,12 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain, struct dmar_domain *dmar_domain = to_dmar_domain(domain); unsigned long iova_pfn = IOVA_PFN(gather->start); size_t size = gather->end - gather->start; - unsigned long start_pfn, last_pfn; + unsigned long start_pfn; unsigned long nrpages; int iommu_id; nrpages = aligned_nrpages(gather->start, size); start_pfn = mm_to_dma_pfn(iova_pfn); - last_pfn = start_pfn + nrpages - 1; for_each_domain_iommu(iommu_id, dmar_domain) iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain, From 33e07157105e472b746b70b3ed4197c57c43ab68 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 1 Dec 2020 09:31:49 +0800 Subject: [PATCH 40/50] iommu/vt-d: Avoid GFP_ATOMIC where it is not needed There is no reason to use GFP_ATOMIC in a 'suspend' function. Use GFP_KERNEL instead to give more opportunities to allocate the requested memory. Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/20201030182630.5154-1-christophe.jaillet@wanadoo.fr Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20201201013149.2466272-2-baolu.lu@linux.intel.com Signed-off-by: Will Deacon --- drivers/iommu/intel/iommu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index e27eb6fc15f7..770c53762b61 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -3492,7 +3492,7 @@ static int iommu_suspend(void) for_each_active_iommu(iommu, drhd) { iommu->iommu_state = kcalloc(MAX_SR_DMAR_REGS, sizeof(u32), - GFP_ATOMIC); + GFP_KERNEL); if (!iommu->iommu_state) goto nomem; } From 3a651b3a27a1ee35879499ead3942dc854a20968 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Tue, 17 Nov 2020 18:25:34 +0800 Subject: [PATCH 41/50] iommu: avoid taking iova_rbtree_lock twice Both find_iova() and __free_iova() take iova_rbtree_lock, there is no reason to take and release it twice inside free_iova(). Fold them into one critical section by calling the unlock versions instead. Signed-off-by: Cong Wang Reviewed-by: Robin Murphy Signed-off-by: John Garry Link: https://lore.kernel.org/r/1605608734-84416-5-git-send-email-john.garry@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/iova.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index ea04a88c673d..ff59d8aa3041 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -402,10 +402,14 @@ EXPORT_SYMBOL_GPL(__free_iova); void free_iova(struct iova_domain *iovad, unsigned long pfn) { - struct iova *iova = find_iova(iovad, pfn); + unsigned long flags; + struct iova *iova; + spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); + iova = private_find_iova(iovad, pfn); if (iova) - __free_iova(iovad, iova); + private_free_iova(iovad, iova); + spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); } EXPORT_SYMBOL_GPL(free_iova); From 093b32a849b336b5b48bdde1041fc06f91ae475c Mon Sep 17 00:00:00 2001 From: Yong Wu Date: Mon, 7 Dec 2020 17:35:53 +0800 Subject: [PATCH 42/50] iommu: Improve the performance for direct_mapping Currently direct_mapping always use the smallest pgsize which is SZ_4K normally to mapping. This is unnecessary. we could gather the size, and call iommu_map then, iommu_map could decide how to map better with the just right pgsize. >From the original comment, we should take care overlap, otherwise, iommu_map may return -EEXIST. In this overlap case, we should map the previous region before overlap firstly. then map the left part. Each a iommu device will call this direct_mapping when its iommu initialize, This patch is effective to improve the boot/initialization time especially while it only needs level 1 mapping. Signed-off-by: Anan Sun Signed-off-by: Yong Wu Link: https://lore.kernel.org/r/20201207093553.8635-1-yong.wu@mediatek.com Signed-off-by: Will Deacon --- drivers/iommu/iommu.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index dd617ed854a3..ae70a313c090 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -739,6 +739,7 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, /* We need to consider overlapping regions for different devices */ list_for_each_entry(entry, &mappings, list) { dma_addr_t start, end, addr; + size_t map_size = 0; if (domain->ops->apply_resv_region) domain->ops->apply_resv_region(dev, domain, entry); @@ -750,16 +751,27 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group, entry->type != IOMMU_RESV_DIRECT_RELAXABLE) continue; - for (addr = start; addr < end; addr += pg_size) { + for (addr = start; addr <= end; addr += pg_size) { phys_addr_t phys_addr; - phys_addr = iommu_iova_to_phys(domain, addr); - if (phys_addr) - continue; + if (addr == end) + goto map_end; - ret = iommu_map(domain, addr, addr, pg_size, entry->prot); - if (ret) - goto out; + phys_addr = iommu_iova_to_phys(domain, addr); + if (!phys_addr) { + map_size += pg_size; + continue; + } + +map_end: + if (map_size) { + ret = iommu_map(domain, addr - map_size, + addr - map_size, map_size, + entry->prot); + if (ret) + goto out; + map_size = 0; + } } } From f12e0d22903e8fb653168efa67ae3308712ea97e Mon Sep 17 00:00:00 2001 From: Keqian Zhu Date: Mon, 7 Dec 2020 19:57:58 +0800 Subject: [PATCH 43/50] iommu: Defer the early return in arm_(v7s/lpae)_map Although handling a mapping request with no permissions is a trivial no-op, defer the early return until after the size/range checks so that we are consistent with other mapping requests. Signed-off-by: Keqian Zhu Link: https://lore.kernel.org/r/20201207115758.9400-1-zhukeqian1@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm-v7s.c | 8 ++++---- drivers/iommu/io-pgtable-arm.c | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index a688f22cbe3b..359b96b0fa3e 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -522,14 +522,14 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, struct io_pgtable *iop = &data->iop; int ret; - /* If no access, then nothing to do */ - if (!(prot & (IOMMU_READ | IOMMU_WRITE))) - return 0; - if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || paddr >= (1ULL << data->iop.cfg.oas))) return -ERANGE; + /* If no access, then nothing to do */ + if (!(prot & (IOMMU_READ | IOMMU_WRITE))) + return 0; + ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp); /* * Synchronise all PTE updates for the new mapping before there's diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index a7a9bc08dcd1..8ade72adab31 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -444,10 +444,6 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, arm_lpae_iopte prot; long iaext = (s64)iova >> cfg->ias; - /* If no access, then nothing to do */ - if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) - return 0; - if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size)) return -EINVAL; @@ -456,6 +452,10 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, if (WARN_ON(iaext || paddr >> cfg->oas)) return -ERANGE; + /* If no access, then nothing to do */ + if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) + return 0; + prot = arm_lpae_prot_to_pte(data, iommu_prot); ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp); /* From f37eb48466d2ef4de33207f7389716d1734d9710 Mon Sep 17 00:00:00 2001 From: Kunkun Jiang Date: Mon, 7 Dec 2020 20:01:50 +0800 Subject: [PATCH 44/50] iommu/io-pgtable-arm: Remove unused 'level' parameter from iopte_type() macro The 'level' parameter to the iopte_type() macro is unused, so remove it. Signed-off-by: Kunkun Jiang Link: https://lore.kernel.org/r/20201207120150.1891-1-jiangkunkun@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/io-pgtable-arm.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 8ade72adab31..f0d1d4c08ca0 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -130,7 +130,7 @@ /* IOPTE accessors */ #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d)) -#define iopte_type(pte,l) \ +#define iopte_type(pte) \ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) @@ -151,9 +151,9 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, enum io_pgtable_fmt fmt) { if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE) - return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE; + return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE; - return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK; + return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK; } static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, @@ -280,7 +280,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; - } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { + } else if (iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE) { /* * We need to unmap and free the old table before * overwriting it with a block entry. @@ -548,7 +548,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, * block, but anything else is invalid. We can't misinterpret * a page entry here since we're never at the last level. */ - if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE) + if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE) return 0; tablep = iopte_deref(pte, data); From 2f24dfb71208eeb0174f08dd56ca6d3c17b279a5 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 4 Dec 2020 02:34:50 +0800 Subject: [PATCH 45/50] iommu: Delete split_and_remove_iova() Function split_and_remove_iova() has not been referenced since commit e70b081c6f37 ("iommu/vt-d: Remove IOVA handling code from the non-dma_ops path"), so delete it. Signed-off-by: John Garry Link: https://lore.kernel.org/r/1607020492-189471-2-git-send-email-john.garry@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/iova.c | 41 ----------------------------------------- include/linux/iova.h | 10 ---------- 2 files changed, 51 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index ff59d8aa3041..3331c040b2b0 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -742,47 +742,6 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) } EXPORT_SYMBOL_GPL(copy_reserved_iova); -struct iova * -split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, - unsigned long pfn_lo, unsigned long pfn_hi) -{ - unsigned long flags; - struct iova *prev = NULL, *next = NULL; - - spin_lock_irqsave(&iovad->iova_rbtree_lock, flags); - if (iova->pfn_lo < pfn_lo) { - prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); - if (prev == NULL) - goto error; - } - if (iova->pfn_hi > pfn_hi) { - next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); - if (next == NULL) - goto error; - } - - __cached_rbnode_delete_update(iovad, iova); - rb_erase(&iova->node, &iovad->rbroot); - - if (prev) { - iova_insert_rbtree(&iovad->rbroot, prev, NULL); - iova->pfn_lo = pfn_lo; - } - if (next) { - iova_insert_rbtree(&iovad->rbroot, next, NULL); - iova->pfn_hi = pfn_hi; - } - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - - return iova; - -error: - spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); - if (prev) - free_iova_mem(prev); - return NULL; -} - /* * Magazine caches for IOVA ranges. For an introduction to magazines, * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab diff --git a/include/linux/iova.h b/include/linux/iova.h index a0637abffee8..a77add571c50 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -160,8 +160,6 @@ int init_iova_flush_queue(struct iova_domain *iovad, iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); void put_iova_domain(struct iova_domain *iovad); -struct iova *split_and_remove_iova(struct iova_domain *iovad, - struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi); void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad); #else static inline int iova_cache_get(void) @@ -258,14 +256,6 @@ static inline void put_iova_domain(struct iova_domain *iovad) { } -static inline struct iova *split_and_remove_iova(struct iova_domain *iovad, - struct iova *iova, - unsigned long pfn_lo, - unsigned long pfn_hi) -{ - return NULL; -} - static inline void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad) { From 51b70b817b187e48155fc492adb9ce80bdb21b70 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 4 Dec 2020 02:34:51 +0800 Subject: [PATCH 46/50] iommu: Stop exporting alloc_iova_mem() It is not used outside iova.c Signed-off-by: John Garry Link: https://lore.kernel.org/r/1607020492-189471-3-git-send-email-john.garry@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/iova.c | 3 +-- include/linux/iova.h | 6 ------ 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 3331c040b2b0..bb5cb67ee311 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -243,11 +243,10 @@ static struct kmem_cache *iova_cache; static unsigned int iova_cache_users; static DEFINE_MUTEX(iova_cache_mutex); -struct iova *alloc_iova_mem(void) +static struct iova *alloc_iova_mem(void) { return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN); } -EXPORT_SYMBOL(alloc_iova_mem); void free_iova_mem(struct iova *iova) { diff --git a/include/linux/iova.h b/include/linux/iova.h index a77add571c50..d8c011b6d4ed 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -136,7 +136,6 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) int iova_cache_get(void); void iova_cache_put(void); -struct iova *alloc_iova_mem(void); void free_iova_mem(struct iova *iova); void free_iova(struct iova_domain *iovad, unsigned long pfn); void __free_iova(struct iova_domain *iovad, struct iova *iova); @@ -171,11 +170,6 @@ static inline void iova_cache_put(void) { } -static inline struct iova *alloc_iova_mem(void) -{ - return NULL; -} - static inline void free_iova_mem(struct iova *iova) { } From 176cfc187c24287a363e7612cd2385ba40c2042b Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 4 Dec 2020 02:34:52 +0800 Subject: [PATCH 47/50] iommu: Stop exporting free_iova_mem() It has no user outside iova.c Signed-off-by: John Garry Link: https://lore.kernel.org/r/1607020492-189471-4-git-send-email-john.garry@huawei.com Signed-off-by: Will Deacon --- drivers/iommu/iova.c | 3 +-- include/linux/iova.h | 5 ----- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index bb5cb67ee311..4bb3293ae4d7 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -248,12 +248,11 @@ static struct iova *alloc_iova_mem(void) return kmem_cache_zalloc(iova_cache, GFP_ATOMIC | __GFP_NOWARN); } -void free_iova_mem(struct iova *iova) +static void free_iova_mem(struct iova *iova) { if (iova->pfn_lo != IOVA_ANCHOR) kmem_cache_free(iova_cache, iova); } -EXPORT_SYMBOL(free_iova_mem); int iova_cache_get(void) { diff --git a/include/linux/iova.h b/include/linux/iova.h index d8c011b6d4ed..76e16ae20729 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -136,7 +136,6 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) int iova_cache_get(void); void iova_cache_put(void); -void free_iova_mem(struct iova *iova); void free_iova(struct iova_domain *iovad, unsigned long pfn); void __free_iova(struct iova_domain *iovad, struct iova *iova); struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, @@ -170,10 +169,6 @@ static inline void iova_cache_put(void) { } -static inline void free_iova_mem(struct iova *iova) -{ -} - static inline void free_iova(struct iova_domain *iovad, unsigned long pfn) { } From fefe8527a1e0e0014946c6b5b3b2e40cb32bb5d3 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 25 Nov 2020 17:29:39 +0000 Subject: [PATCH 48/50] iommu/io-pgtable: Remove tlb_flush_leaf The only user of tlb_flush_leaf is a particularly hairy corner of the Arm short-descriptor code, which wants a synchronous invalidation to minimise the races inherent in trying to split a large page mapping. This is already far enough into "here be dragons" territory that no sensible caller should ever hit it, and thus it really doesn't need optimising. Although using tlb_flush_walk there may technically be more heavyweight than needed, it does the job and saves everyone else having to carry around useless baggage. Signed-off-by: Robin Murphy Reviewed-by: Steven Price Link: https://lore.kernel.org/r/9844ab0c5cb3da8b2f89c6c2da16941910702b41.1606324115.git.robin.murphy@arm.com Signed-off-by: Will Deacon --- drivers/gpu/drm/msm/msm_iommu.c | 1 - drivers/gpu/drm/panfrost/panfrost_mmu.c | 7 ------ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 7 ------ drivers/iommu/arm/arm-smmu/arm-smmu.c | 25 +++------------------ drivers/iommu/arm/arm-smmu/qcom_iommu.c | 8 ------- drivers/iommu/io-pgtable-arm-v7s.c | 3 +-- drivers/iommu/io-pgtable-arm.c | 1 - drivers/iommu/ipmmu-vmsa.c | 1 - drivers/iommu/msm_iommu.c | 7 ------ drivers/iommu/mtk_iommu.c | 1 - include/linux/io-pgtable.h | 11 --------- 11 files changed, 4 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 22ac7c692a81..50d881794758 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -139,7 +139,6 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops null_tlb_ops = { .tlb_flush_all = msm_iommu_tlb_flush_all, .tlb_flush_walk = msm_iommu_tlb_flush_walk, - .tlb_flush_leaf = msm_iommu_tlb_flush_walk, .tlb_add_page = msm_iommu_tlb_add_page, }; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 776448c527ea..c186914cc4f9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -347,16 +347,9 @@ static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, mmu_tlb_sync_context(cookie); } -static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule, - void *cookie) -{ - mmu_tlb_sync_context(cookie); -} - static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, .tlb_flush_walk = mmu_tlb_flush_walk, - .tlb_flush_leaf = mmu_tlb_flush_leaf, }; int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 2ddf5ecc75f8..8ca7415d785d 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1760,16 +1760,9 @@ static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, arm_smmu_tlb_inv_range(iova, size, granule, false, cookie); } -static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ - arm_smmu_tlb_inv_range(iova, size, granule, true, cookie); -} - static const struct iommu_flush_ops arm_smmu_flush_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context, .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_add_page = arm_smmu_tlb_inv_page_nosync, }; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index d8979bb71fc0..d8c6bfde6a61 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -333,14 +333,6 @@ static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, arm_smmu_tlb_sync_context(cookie); } -static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ - arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, - ARM_SMMU_CB_S1_TLBIVAL); - arm_smmu_tlb_sync_context(cookie); -} - static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) @@ -357,14 +349,6 @@ static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, arm_smmu_tlb_sync_context(cookie); } -static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ - arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, - ARM_SMMU_CB_S2_TLBIIPAS2L); - arm_smmu_tlb_sync_context(cookie); -} - static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) @@ -373,8 +357,8 @@ static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, ARM_SMMU_CB_S2_TLBIIPAS2L); } -static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size, - size_t granule, void *cookie) +static void arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size, + size_t granule, void *cookie) { arm_smmu_tlb_inv_context_s2(cookie); } @@ -401,21 +385,18 @@ static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops arm_smmu_s1_tlb_ops = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1, .tlb_add_page = arm_smmu_tlb_add_page_s1, }; static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2, .tlb_add_page = arm_smmu_tlb_add_page_s2, }; static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1, - .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2_v1, .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, }; diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index b30d6c966e2c..7f280c8d5c53 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -185,13 +185,6 @@ static void qcom_iommu_tlb_flush_walk(unsigned long iova, size_t size, qcom_iommu_tlb_sync(cookie); } -static void qcom_iommu_tlb_flush_leaf(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ - qcom_iommu_tlb_inv_range_nosync(iova, size, granule, true, cookie); - qcom_iommu_tlb_sync(cookie); -} - static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) @@ -202,7 +195,6 @@ static void qcom_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops qcom_flush_ops = { .tlb_flush_all = qcom_iommu_tlb_inv_context, .tlb_flush_walk = qcom_iommu_tlb_flush_walk, - .tlb_flush_leaf = qcom_iommu_tlb_flush_leaf, .tlb_add_page = qcom_iommu_tlb_add_page, }; diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 359b96b0fa3e..1d92ac948db7 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -584,7 +584,7 @@ static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg); size *= ARM_V7S_CONT_PAGES; - io_pgtable_tlb_flush_leaf(iop, iova, size, size); + io_pgtable_tlb_flush_walk(iop, iova, size, size); return pte; } @@ -866,7 +866,6 @@ static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, - .tlb_flush_leaf = dummy_tlb_flush, .tlb_add_page = dummy_tlb_add_page, }; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 135f57b37bbd..49ad8d2a82df 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -1085,7 +1085,6 @@ static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops dummy_tlb_ops __initconst = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_walk = dummy_tlb_flush, - .tlb_flush_leaf = dummy_tlb_flush, .tlb_add_page = dummy_tlb_add_page, }; diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 0f18abda0e20..d71f10257f15 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -325,7 +325,6 @@ static void ipmmu_tlb_flush(unsigned long iova, size_t size, static const struct iommu_flush_ops ipmmu_flush_ops = { .tlb_flush_all = ipmmu_tlb_flush_all, .tlb_flush_walk = ipmmu_tlb_flush, - .tlb_flush_leaf = ipmmu_tlb_flush, }; /* ----------------------------------------------------------------------------- diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 3615cd6241c4..040e85f70861 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -174,12 +174,6 @@ static void __flush_iotlb_walk(unsigned long iova, size_t size, __flush_iotlb_range(iova, size, granule, false, cookie); } -static void __flush_iotlb_leaf(unsigned long iova, size_t size, - size_t granule, void *cookie) -{ - __flush_iotlb_range(iova, size, granule, true, cookie); -} - static void __flush_iotlb_page(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) { @@ -189,7 +183,6 @@ static void __flush_iotlb_page(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops msm_iommu_flush_ops = { .tlb_flush_all = __flush_iotlb, .tlb_flush_walk = __flush_iotlb_walk, - .tlb_flush_leaf = __flush_iotlb_leaf, .tlb_add_page = __flush_iotlb_page, }; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index c072cee532c2..8e56cec532e7 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -240,7 +240,6 @@ static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops mtk_iommu_flush_ops = { .tlb_flush_all = mtk_iommu_tlb_flush_all, .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync, - .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync, .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, }; diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index fb4d5a763e0c..ea727eb1a1a9 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -25,8 +25,6 @@ enum io_pgtable_fmt { * @tlb_flush_walk: Synchronously invalidate all intermediate TLB state * (sometimes referred to as the "walk cache") for a virtual * address range. - * @tlb_flush_leaf: Synchronously invalidate all leaf TLB state for a virtual - * address range. * @tlb_add_page: Optional callback to queue up leaf TLB invalidation for a * single page. IOMMUs that cannot batch TLB invalidation * operations efficiently will typically issue them here, but @@ -40,8 +38,6 @@ struct iommu_flush_ops { void (*tlb_flush_all)(void *cookie); void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule, void *cookie); - void (*tlb_flush_leaf)(unsigned long iova, size_t size, size_t granule, - void *cookie); void (*tlb_add_page)(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie); }; @@ -228,13 +224,6 @@ io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, iop->cfg.tlb->tlb_flush_walk(iova, size, granule, iop->cookie); } -static inline void -io_pgtable_tlb_flush_leaf(struct io_pgtable *iop, unsigned long iova, - size_t size, size_t granule) -{ - iop->cfg.tlb->tlb_flush_leaf(iova, size, granule, iop->cookie); -} - static inline void io_pgtable_tlb_add_page(struct io_pgtable *iop, struct iommu_iotlb_gather * gather, unsigned long iova, From 71fe89ceb55bc0a85121b757cc8d3fb6ff457263 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 9 Dec 2020 12:20:19 +0100 Subject: [PATCH 49/50] dma-iommu: remove __iommu_dma_mmap The function has a single caller, so open code it there and take advantage of the precalculated page count variable. Signed-off-by: Christoph Hellwig Link: https://lore.kernel.org/r/20201209112019.2625029-1-hch@lst.de Signed-off-by: Will Deacon --- drivers/iommu/dma-iommu.c | 17 +---------------- 1 file changed, 1 insertion(+), 16 deletions(-) diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 5f49ed653f98..f0305e6aac1b 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -752,21 +752,6 @@ out_free_pages: return NULL; } -/** - * __iommu_dma_mmap - Map a buffer into provided user VMA - * @pages: Array representing buffer from __iommu_dma_alloc() - * @size: Size of buffer in bytes - * @vma: VMA describing requested userspace mapping - * - * Maps the pages of the buffer in @pages into @vma. The caller is responsible - * for verifying the correct size and protection of @vma beforehand. - */ -static int __iommu_dma_mmap(struct page **pages, size_t size, - struct vm_area_struct *vma) -{ - return vm_map_pages(vma, pages, PAGE_ALIGN(size) >> PAGE_SHIFT); -} - static void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) { @@ -1287,7 +1272,7 @@ static int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, struct page **pages = dma_common_find_pages(cpu_addr); if (pages) - return __iommu_dma_mmap(pages, size, vma); + return vm_map_pages(vma, pages, nr_pages); pfn = vmalloc_to_pfn(cpu_addr); } else { pfn = page_to_pfn(virt_to_page(cpu_addr)); From 5ae9a046a452d60b6a6c076f6df7e3f8e34f918f Mon Sep 17 00:00:00 2001 From: Suravee Suthikulpanit Date: Thu, 10 Dec 2020 10:24:36 -0600 Subject: [PATCH 50/50] iommu/amd: Add sanity check for interrupt remapping table length macros Currently, macros related to the interrupt remapping table length are defined separately. This has resulted in an oversight in which one of the macros were missed when changing the length. To prevent this, redefine the macros to add built-in sanity check. Also, rename macros to use the name of the DTE[IntTabLen] field as specified in the AMD IOMMU specification. There is no functional change. Suggested-by: Linus Torvalds Reviewed-by: Tom Lendacky Signed-off-by: Suravee Suthikulpanit Cc: Will Deacon Cc: Jerry Snitselaar Cc: Joerg Roedel Reviewed-by: Jerry Snitselaar Link: https://lore.kernel.org/r/20201210162436.126321-1-suravee.suthikulpanit@amd.com Signed-off-by: Will Deacon --- drivers/iommu/amd/amd_iommu_types.h | 19 ++++++++++--------- drivers/iommu/amd/init.c | 6 +++--- drivers/iommu/amd/iommu.c | 2 +- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 494b42a31b7a..899ce62df3f0 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -255,11 +255,19 @@ /* Bit value definition for dte irq remapping fields*/ #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) #define DTE_IRQ_REMAP_INTCTL_MASK (0x3ULL << 60) -#define DTE_IRQ_TABLE_LEN_MASK (0xfULL << 1) #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) -#define DTE_IRQ_TABLE_LEN (9ULL << 1) #define DTE_IRQ_REMAP_ENABLE 1ULL +/* + * AMD IOMMU hardware only support 512 IRTEs despite + * the architectural limitation of 2048 entries. + */ +#define DTE_INTTAB_ALIGNMENT 128 +#define DTE_INTTABLEN_VALUE 9ULL +#define DTE_INTTABLEN (DTE_INTTABLEN_VALUE << 1) +#define DTE_INTTABLEN_MASK (0xfULL << 1) +#define MAX_IRQS_PER_TABLE (1 << DTE_INTTABLEN_VALUE) + #define PAGE_MODE_NONE 0x00 #define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_2_LEVEL 0x02 @@ -409,13 +417,6 @@ extern bool amd_iommu_np_cache; /* Only true if all IOMMUs support device IOTLBs */ extern bool amd_iommu_iotlb_sup; -/* - * AMD IOMMU hardware only support 512 IRTEs despite - * the architectural limitation of 2048 entries. - */ -#define MAX_IRQS_PER_TABLE 512 -#define IRQ_TABLE_ALIGNMENT 128 - struct irq_remap_table { raw_spinlock_t lock; unsigned min_index; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 23a790f8f550..6bec8913d064 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -989,10 +989,10 @@ static bool copy_device_table(void) irq_v = old_devtb[devid].data[2] & DTE_IRQ_REMAP_ENABLE; int_ctl = old_devtb[devid].data[2] & DTE_IRQ_REMAP_INTCTL_MASK; - int_tab_len = old_devtb[devid].data[2] & DTE_IRQ_TABLE_LEN_MASK; + int_tab_len = old_devtb[devid].data[2] & DTE_INTTABLEN_MASK; if (irq_v && (int_ctl || int_tab_len)) { if ((int_ctl != DTE_IRQ_REMAP_INTCTL) || - (int_tab_len != DTE_IRQ_TABLE_LEN)) { + (int_tab_len != DTE_INTTABLEN)) { pr_err("Wrong old irq remapping flag: %#x\n", devid); return false; } @@ -2674,7 +2674,7 @@ static int __init early_amd_iommu_init(void) remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2); amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", remap_cache_sz, - IRQ_TABLE_ALIGNMENT, + DTE_INTTAB_ALIGNMENT, 0, NULL); if (!amd_iommu_irq_cache) goto out; diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index b9cf59443843..f7abf16d1e3a 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -3191,7 +3191,7 @@ static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) dte &= ~DTE_IRQ_PHYS_ADDR_MASK; dte |= iommu_virt_to_phys(table->table); dte |= DTE_IRQ_REMAP_INTCTL; - dte |= DTE_IRQ_TABLE_LEN; + dte |= DTE_INTTABLEN; dte |= DTE_IRQ_REMAP_ENABLE; amd_iommu_dev_table[devid].data[2] = dte;