Merge branches 'arm/mediatek', 'arm/msm', 'arm/renesas', 'arm/rockchip', 'arm/smmu', 'x86/vt-d' and 'x86/amd' into next

This commit is contained in:
Joerg Roedel 2022-03-08 12:21:31 +01:00
41 changed files with 689 additions and 2278 deletions

View File

@ -44,6 +44,10 @@ properties:
- renesas,ipmmu-r8a77990 # R-Car E3
- renesas,ipmmu-r8a77995 # R-Car D3
- renesas,ipmmu-r8a779a0 # R-Car V3U
- items:
- enum:
- renesas,ipmmu-r8a779f0 # R-Car S4-8
- const: renesas,rcar-gen4-ipmmu-vmsa # R-Car Gen4
reg:
maxItems: 1

View File

@ -133,7 +133,7 @@ nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
* or equal to the system's PAGE_SIZE, with a preference if
* both are equal.
*/
pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
pgsize_bitmap = tdev->iommu.domain->pgsize_bitmap;
if (pgsize_bitmap & PAGE_SIZE) {
tdev->iommu.pgshift = PAGE_SHIFT;
} else {

View File

@ -15,7 +15,6 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_apply_erratum_63(u16 devid);
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu);
extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
@ -117,8 +116,7 @@ void amd_iommu_domain_clr_pt_root(struct protection_domain *domain)
extern bool translation_pre_enabled(struct amd_iommu *iommu);
extern bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev);
extern bool amd_iommu_is_attach_deferred(struct device *dev);
extern int __init add_special_device(u8 type, u8 id, u16 *devid,
bool cmd_line);

View File

@ -671,7 +671,7 @@ void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
* This function resets the command buffer if the IOMMU stopped fetching
* commands from it.
*/
void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
static void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu)
{
iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
@ -990,6 +990,7 @@ static bool copy_device_table(void)
get_order(dev_table_size));
if (old_dev_tbl_cpy == NULL) {
pr_err("Failed to allocate memory for copying old device table!\n");
memunmap(old_devtb);
return false;
}
@ -1020,6 +1021,7 @@ static bool copy_device_table(void)
if ((int_ctl != DTE_IRQ_REMAP_INTCTL) ||
(int_tab_len != DTE_INTTABLEN)) {
pr_err("Wrong old irq remapping flag: %#x\n", devid);
memunmap(old_devtb);
return false;
}
@ -1953,9 +1955,11 @@ static int __init amd_iommu_init_pci(void)
for_each_iommu(iommu) {
ret = iommu_init_pci(iommu);
if (ret)
break;
if (ret) {
pr_err("IOMMU%d: Failed to initialize IOMMU Hardware (error=%d)!\n",
iommu->index, ret);
goto out;
}
/* Need to setup range after PCI init */
iommu_set_cwwb_range(iommu);
}
@ -1971,6 +1975,11 @@ static int __init amd_iommu_init_pci(void)
* active.
*/
ret = amd_iommu_init_api();
if (ret) {
pr_err("IOMMU: Failed to initialize IOMMU-API interface (error=%d)!\n",
ret);
goto out;
}
init_device_table_dma();
@ -1980,6 +1989,7 @@ static int __init amd_iommu_init_pci(void)
if (!ret)
print_iommu_info();
out:
return ret;
}

View File

@ -2221,8 +2221,7 @@ static void amd_iommu_get_resv_regions(struct device *dev,
list_add_tail(&region->list, head);
}
bool amd_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev)
bool amd_iommu_is_attach_deferred(struct device *dev)
{
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
@ -2275,13 +2274,6 @@ static int amd_iommu_def_domain_type(struct device *dev)
const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
.domain_free = amd_iommu_domain_free,
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.probe_device = amd_iommu_probe_device,
.release_device = amd_iommu_release_device,
.probe_finalize = amd_iommu_probe_finalize,
@ -2290,9 +2282,18 @@ const struct iommu_ops amd_iommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions,
.is_attach_deferred = amd_iommu_is_attach_deferred,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
.def_domain_type = amd_iommu_def_domain_type,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
.unmap = amd_iommu_unmap,
.iotlb_sync_map = amd_iommu_iotlb_sync_map,
.iova_to_phys = amd_iommu_iova_to_phys,
.flush_iotlb_all = amd_iommu_flush_iotlb_all,
.iotlb_sync = amd_iommu_iotlb_sync,
.free = amd_iommu_domain_free,
}
};
/*****************************************************************************

View File

@ -24,7 +24,6 @@
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Joerg Roedel <jroedel@suse.de>");
#define MAX_DEVICES 0x10000
#define PRI_QUEUE_SIZE 512
struct pri_queue {
@ -71,7 +70,6 @@ struct fault {
struct pasid_state *state;
struct mm_struct *mm;
u64 address;
u16 devid;
u32 pasid;
u16 tag;
u16 finish;
@ -125,6 +123,15 @@ static void free_device_state(struct device_state *dev_state)
{
struct iommu_group *group;
/* Get rid of any remaining pasid states */
free_pasid_states(dev_state);
/*
* Wait until the last reference is dropped before freeing
* the device state.
*/
wait_event(dev_state->wq, !atomic_read(&dev_state->count));
/*
* First detach device from domain - No more PRI requests will arrive
* from that device after it is unbound from the IOMMUv2 domain.
@ -537,7 +544,7 @@ static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
ret = NOTIFY_DONE;
/* In kdump kernel pci dev is not initialized yet -> send INVALID */
if (amd_iommu_is_attach_deferred(NULL, &pdev->dev)) {
if (amd_iommu_is_attach_deferred(&pdev->dev)) {
amd_iommu_complete_ppr(pdev, iommu_fault->pasid,
PPR_INVALID, tag);
goto out;
@ -850,15 +857,7 @@ void amd_iommu_free_device(struct pci_dev *pdev)
spin_unlock_irqrestore(&state_lock, flags);
/* Get rid of any remaining pasid states */
free_pasid_states(dev_state);
put_device_state(dev_state);
/*
* Wait until the last reference is dropped before freeing
* the device state.
*/
wait_event(dev_state->wq, !atomic_read(&dev_state->count));
free_device_state(dev_state);
}
EXPORT_SYMBOL(amd_iommu_free_device);
@ -955,8 +954,8 @@ out:
static void __exit amd_iommu_v2_exit(void)
{
struct device_state *dev_state;
int i;
struct device_state *dev_state, *next;
unsigned long flags;
if (!amd_iommu_v2_supported())
return;
@ -969,18 +968,18 @@ static void __exit amd_iommu_v2_exit(void)
* The loop below might call flush_workqueue(), so call
* destroy_workqueue() after it
*/
for (i = 0; i < MAX_DEVICES; ++i) {
dev_state = get_device_state(i);
if (dev_state == NULL)
continue;
spin_lock_irqsave(&state_lock, flags);
list_for_each_entry_safe(dev_state, next, &state_list, list) {
WARN_ON_ONCE(1);
put_device_state(dev_state);
amd_iommu_free_device(dev_state->pdev);
list_del(&dev_state->list);
free_device_state(dev_state);
}
spin_unlock_irqrestore(&state_lock, flags);
destroy_workqueue(iommu_wq);
}

View File

@ -765,15 +765,6 @@ static void apple_dart_get_resv_regions(struct device *dev,
static const struct iommu_ops apple_dart_iommu_ops = {
.domain_alloc = apple_dart_domain_alloc,
.domain_free = apple_dart_domain_free,
.attach_dev = apple_dart_attach_dev,
.detach_dev = apple_dart_detach_dev,
.map_pages = apple_dart_map_pages,
.unmap_pages = apple_dart_unmap_pages,
.flush_iotlb_all = apple_dart_flush_iotlb_all,
.iotlb_sync = apple_dart_iotlb_sync,
.iotlb_sync_map = apple_dart_iotlb_sync_map,
.iova_to_phys = apple_dart_iova_to_phys,
.probe_device = apple_dart_probe_device,
.release_device = apple_dart_release_device,
.device_group = apple_dart_device_group,
@ -782,6 +773,17 @@ static const struct iommu_ops apple_dart_iommu_ops = {
.get_resv_regions = apple_dart_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during dart probe */
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = apple_dart_attach_dev,
.detach_dev = apple_dart_detach_dev,
.map_pages = apple_dart_map_pages,
.unmap_pages = apple_dart_unmap_pages,
.flush_iotlb_all = apple_dart_flush_iotlb_all,
.iotlb_sync = apple_dart_iotlb_sync,
.iotlb_sync_map = apple_dart_iotlb_sync_map,
.iova_to_phys = apple_dart_iova_to_phys,
.free = apple_dart_domain_free,
}
};
static irqreturn_t apple_dart_irq(int irq, void *dev)

View File

@ -1558,6 +1558,7 @@ static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
dev_info(smmu->dev, "\t0x%016llx\n",
(unsigned long long)evt[i]);
cond_resched();
}
/*
@ -2841,17 +2842,9 @@ static int arm_smmu_dev_disable_feature(struct device *dev,
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
.map_pages = arm_smmu_map_pages,
.unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group,
.enable_nesting = arm_smmu_enable_nesting,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
@ -2865,6 +2858,16 @@ static struct iommu_ops arm_smmu_ops = {
.page_response = arm_smmu_page_response,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = arm_smmu_attach_dev,
.map_pages = arm_smmu_map_pages,
.unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.enable_nesting = arm_smmu_enable_nesting,
.free = arm_smmu_domain_free,
}
};
/* Probing and initialisation functions */
@ -2911,32 +2914,20 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
return 0;
}
static void arm_smmu_cmdq_free_bitmap(void *data)
{
unsigned long *bitmap = data;
bitmap_free(bitmap);
}
static int arm_smmu_cmdq_init(struct arm_smmu_device *smmu)
{
int ret = 0;
struct arm_smmu_cmdq *cmdq = &smmu->cmdq;
unsigned int nents = 1 << cmdq->q.llq.max_n_shift;
atomic_long_t *bitmap;
atomic_set(&cmdq->owner_prod, 0);
atomic_set(&cmdq->lock, 0);
bitmap = (atomic_long_t *)bitmap_zalloc(nents, GFP_KERNEL);
if (!bitmap) {
dev_err(smmu->dev, "failed to allocate cmdq bitmap\n");
ret = -ENOMEM;
} else {
cmdq->valid_map = bitmap;
devm_add_action(smmu->dev, arm_smmu_cmdq_free_bitmap, bitmap);
}
cmdq->valid_map = (atomic_long_t *)devm_bitmap_zalloc(smmu->dev, nents,
GFP_KERNEL);
if (!cmdq->valid_map)
return -ENOMEM;
return ret;
return 0;
}
static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
@ -2981,10 +2972,10 @@ static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
{
unsigned int i;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
void *strtab = smmu->strtab_cfg.strtab;
cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
sizeof(*cfg->l1_desc), GFP_KERNEL);
if (!cfg->l1_desc)
return -ENOMEM;

View File

@ -807,7 +807,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
* Request context fault interrupt. Do this last to avoid the
* handler seeing a half-initialised domain state.
*/
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
irq = smmu->irqs[cfg->irptndx];
if (smmu->impl && smmu->impl->context_fault)
context_fault = smmu->impl->context_fault;
@ -858,7 +858,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
arm_smmu_write_context_bank(smmu, cfg->cbndx);
if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
irq = smmu->irqs[cfg->irptndx];
devm_free_irq(smmu->dev, irq, domain);
}
@ -1583,25 +1583,27 @@ static int arm_smmu_def_domain_type(struct device *dev)
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
.domain_free = arm_smmu_domain_free,
.attach_dev = arm_smmu_attach_dev,
.map_pages = arm_smmu_map_pages,
.unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device,
.probe_finalize = arm_smmu_probe_finalize,
.device_group = arm_smmu_device_group,
.enable_nesting = arm_smmu_enable_nesting,
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
.of_xlate = arm_smmu_of_xlate,
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = generic_iommu_put_resv_regions,
.def_domain_type = arm_smmu_def_domain_type,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = arm_smmu_attach_dev,
.map_pages = arm_smmu_map_pages,
.unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.enable_nesting = arm_smmu_enable_nesting,
.set_pgtable_quirks = arm_smmu_set_pgtable_quirks,
.free = arm_smmu_domain_free,
}
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@ -1951,8 +1953,8 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
return ret;
}
static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
struct arm_smmu_device *smmu)
static int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
u32 *global_irqs, u32 *pmu_irqs)
{
struct device *dev = smmu->dev;
struct acpi_iort_node *node =
@ -1968,7 +1970,8 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
return ret;
/* Ignore the configuration access interrupt */
smmu->num_global_irqs = 1;
*global_irqs = 1;
*pmu_irqs = 0;
if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
@ -1976,25 +1979,24 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
return 0;
}
#else
static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
struct arm_smmu_device *smmu)
static inline int arm_smmu_device_acpi_probe(struct arm_smmu_device *smmu,
u32 *global_irqs, u32 *pmu_irqs)
{
return -ENODEV;
}
#endif
static int arm_smmu_device_dt_probe(struct platform_device *pdev,
struct arm_smmu_device *smmu)
static int arm_smmu_device_dt_probe(struct arm_smmu_device *smmu,
u32 *global_irqs, u32 *pmu_irqs)
{
const struct arm_smmu_match_data *data;
struct device *dev = &pdev->dev;
struct device *dev = smmu->dev;
bool legacy_binding;
if (of_property_read_u32(dev->of_node, "#global-interrupts",
&smmu->num_global_irqs)) {
dev_err(dev, "missing #global-interrupts property\n");
return -ENODEV;
}
if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs))
return dev_err_probe(dev, -ENODEV,
"missing #global-interrupts property\n");
*pmu_irqs = 0;
data = of_device_get_match_data(dev);
smmu->version = data->version;
@ -2073,6 +2075,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
u32 global_irqs, pmu_irqs;
irqreturn_t (*global_fault)(int irq, void *dev);
smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
@ -2083,10 +2086,9 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
smmu->dev = dev;
if (dev->of_node)
err = arm_smmu_device_dt_probe(pdev, smmu);
err = arm_smmu_device_dt_probe(smmu, &global_irqs, &pmu_irqs);
else
err = arm_smmu_device_acpi_probe(pdev, smmu);
err = arm_smmu_device_acpi_probe(smmu, &global_irqs, &pmu_irqs);
if (err)
return err;
@ -2105,31 +2107,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
if (IS_ERR(smmu))
return PTR_ERR(smmu);
num_irqs = 0;
while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
num_irqs++;
if (num_irqs > smmu->num_global_irqs)
smmu->num_context_irqs++;
}
num_irqs = platform_irq_count(pdev);
if (!smmu->num_context_irqs) {
dev_err(dev, "found %d interrupts but expected at least %d\n",
num_irqs, smmu->num_global_irqs + 1);
return -ENODEV;
}
smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs;
if (smmu->num_context_irqs <= 0)
return dev_err_probe(dev, -ENODEV,
"found %d interrupts but expected at least %d\n",
num_irqs, global_irqs + pmu_irqs + 1);
smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs),
GFP_KERNEL);
if (!smmu->irqs) {
dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
return -ENOMEM;
}
smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs,
sizeof(*smmu->irqs), GFP_KERNEL);
if (!smmu->irqs)
return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n",
smmu->num_context_irqs);
for (i = 0; i < num_irqs; ++i) {
int irq = platform_get_irq(pdev, i);
for (i = 0; i < smmu->num_context_irqs; i++) {
int irq = platform_get_irq(pdev, global_irqs + pmu_irqs + i);
if (irq < 0)
return -ENODEV;
return irq;
smmu->irqs[i] = irq;
}
@ -2165,17 +2161,18 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
else
global_fault = arm_smmu_global_fault;
for (i = 0; i < smmu->num_global_irqs; ++i) {
err = devm_request_irq(smmu->dev, smmu->irqs[i],
global_fault,
IRQF_SHARED,
"arm-smmu global fault",
smmu);
if (err) {
dev_err(dev, "failed to request global IRQ %d (%u)\n",
i, smmu->irqs[i]);
return err;
}
for (i = 0; i < global_irqs; i++) {
int irq = platform_get_irq(pdev, i);
if (irq < 0)
return irq;
err = devm_request_irq(dev, irq, global_fault, IRQF_SHARED,
"arm-smmu global fault", smmu);
if (err)
return dev_err_probe(dev, err,
"failed to request global IRQ %d (%u)\n",
i, irq);
}
err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,

View File

@ -318,11 +318,10 @@ struct arm_smmu_device {
unsigned long pa_size;
unsigned long pgsize_bitmap;
u32 num_global_irqs;
u32 num_context_irqs;
int num_context_irqs;
int num_clks;
unsigned int *irqs;
struct clk_bulk_data *clks;
int num_clks;
spinlock_t global_sync_lock;

View File

@ -590,19 +590,21 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops qcom_iommu_ops = {
.capable = qcom_iommu_capable,
.domain_alloc = qcom_iommu_domain_alloc,
.domain_free = qcom_iommu_domain_free,
.attach_dev = qcom_iommu_attach_dev,
.detach_dev = qcom_iommu_detach_dev,
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
.probe_device = qcom_iommu_probe_device,
.release_device = qcom_iommu_release_device,
.device_group = generic_device_group,
.of_xlate = qcom_iommu_of_xlate,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = qcom_iommu_attach_dev,
.detach_dev = qcom_iommu_detach_dev,
.map = qcom_iommu_map,
.unmap = qcom_iommu_unmap,
.flush_iotlb_all = qcom_iommu_flush_iotlb_all,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys,
.free = qcom_iommu_domain_free,
}
};
static int qcom_iommu_sec_ptbl_init(struct device *dev)
@ -827,20 +829,20 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
ret = devm_of_platform_populate(dev);
if (ret) {
dev_err(dev, "Failed to populate iommu contexts\n");
return ret;
goto err_pm_disable;
}
ret = iommu_device_sysfs_add(&qcom_iommu->iommu, dev, NULL,
dev_name(dev));
if (ret) {
dev_err(dev, "Failed to register iommu in sysfs\n");
return ret;
goto err_pm_disable;
}
ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev);
if (ret) {
dev_err(dev, "Failed to register iommu\n");
return ret;
goto err_pm_disable;
}
bus_set_iommu(&platform_bus_type, &qcom_iommu_ops);
@ -852,6 +854,10 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
}
return 0;
err_pm_disable:
pm_runtime_disable(dev);
return ret;
}
static int qcom_iommu_device_remove(struct platform_device *pdev)

View File

@ -525,6 +525,7 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
struct iommu_dma_cookie *cookie = domain->iova_cookie;
unsigned long order, base_pfn;
struct iova_domain *iovad;
int ret;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@ -559,6 +560,9 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
init_iova_domain(iovad, 1UL << order, base_pfn);
ret = iova_domain_init_rcaches(iovad);
if (ret)
return ret;
/* If the FQ fails we can simply fall back to strict mode */
if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))

View File

@ -1309,17 +1309,19 @@ static int exynos_iommu_of_xlate(struct device *dev,
static const struct iommu_ops exynos_iommu_ops = {
.domain_alloc = exynos_iommu_domain_alloc,
.domain_free = exynos_iommu_domain_free,
.attach_dev = exynos_iommu_attach_device,
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
.device_group = generic_device_group,
.probe_device = exynos_iommu_probe_device,
.release_device = exynos_iommu_release_device,
.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
.of_xlate = exynos_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = exynos_iommu_attach_device,
.detach_dev = exynos_iommu_detach_device,
.map = exynos_iommu_map,
.unmap = exynos_iommu_unmap,
.iova_to_phys = exynos_iommu_iova_to_phys,
.free = exynos_iommu_domain_free,
}
};
static int __init exynos_iommu_init(void)

View File

@ -453,13 +453,15 @@ static void fsl_pamu_release_device(struct device *dev)
static const struct iommu_ops fsl_pamu_ops = {
.capable = fsl_pamu_capable,
.domain_alloc = fsl_pamu_domain_alloc,
.domain_free = fsl_pamu_domain_free,
.attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device,
.iova_to_phys = fsl_pamu_iova_to_phys,
.probe_device = fsl_pamu_probe_device,
.release_device = fsl_pamu_release_device,
.device_group = fsl_pamu_device_group,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = fsl_pamu_attach_device,
.detach_dev = fsl_pamu_detach_device,
.iova_to_phys = fsl_pamu_iova_to_phys,
.free = fsl_pamu_domain_free,
}
};
int __init pamu_domain_init(void)

View File

@ -344,15 +344,15 @@ static void pgtable_walk_level(struct seq_file *m, struct dma_pte *pde,
static int show_device_domain_translation(struct device *dev, void *data)
{
struct dmar_domain *domain = find_domain(dev);
struct device_domain_info *info = dev_iommu_priv_get(dev);
struct dmar_domain *domain = info->domain;
struct seq_file *m = data;
u64 path[6] = { 0 };
if (!domain)
return 0;
seq_printf(m, "Device %s with pasid %d @0x%llx\n",
dev_name(dev), domain->default_pasid,
seq_printf(m, "Device %s @0x%llx\n", dev_name(dev),
(u64)virt_to_phys(domain->pgd));
seq_puts(m, "IOVA_PFN\t\tPML5E\t\t\tPML4E\t\t\tPDPE\t\t\tPDE\t\t\tPTE\n");

View File

@ -66,8 +66,6 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
extern const struct iommu_ops intel_iommu_ops;
static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
@ -789,7 +787,8 @@ static int __init dmar_acpi_dev_scope_init(void)
andd->device_name);
continue;
}
if (acpi_bus_get_device(h, &adev)) {
adev = acpi_fetch_acpi_dev(h);
if (!adev) {
pr_err("Failed to get device for ACPI object %s\n",
andd->device_name);
continue;

File diff suppressed because it is too large Load Diff

View File

@ -150,7 +150,7 @@ int intel_pasid_alloc_table(struct device *dev)
int size;
might_sleep();
info = get_domain_info(dev);
info = dev_iommu_priv_get(dev);
if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table))
return -EINVAL;
@ -197,7 +197,7 @@ void intel_pasid_free_table(struct device *dev)
struct pasid_entry *table;
int i, max_pde;
info = get_domain_info(dev);
info = dev_iommu_priv_get(dev);
if (!info || !dev_is_pci(dev) || !info->pasid_table)
return;
@ -223,7 +223,7 @@ struct pasid_table *intel_pasid_get_table(struct device *dev)
{
struct device_domain_info *info;
info = get_domain_info(dev);
info = dev_iommu_priv_get(dev);
if (!info)
return NULL;
@ -234,7 +234,7 @@ static int intel_pasid_get_dev_max_id(struct device *dev)
{
struct device_domain_info *info;
info = get_domain_info(dev);
info = dev_iommu_priv_get(dev);
if (!info || !info->pasid_table)
return 0;
@ -254,7 +254,7 @@ static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid)
return NULL;
dir = pasid_table->table;
info = get_domain_info(dev);
info = dev_iommu_priv_get(dev);
dir_index = pasid >> PASID_PDE_SHIFT;
index = pasid & PASID_PTE_MASK;
@ -487,7 +487,7 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu,
struct device_domain_info *info;
u16 sid, qdep, pfsid;
info = get_domain_info(dev);
info = dev_iommu_priv_get(dev);
if (!info || !info->ats_enabled)
return;
@ -762,164 +762,3 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
return 0;
}
static int
intel_pasid_setup_bind_data(struct intel_iommu *iommu, struct pasid_entry *pte,
struct iommu_gpasid_bind_data_vtd *pasid_data)
{
/*
* Not all guest PASID table entry fields are passed down during bind,
* here we only set up the ones that are dependent on guest settings.
* Execution related bits such as NXE, SMEP are not supported.
* Other fields, such as snoop related, are set based on host needs
* regardless of guest settings.
*/
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_SRE) {
if (!ecap_srs(iommu->ecap)) {
pr_err_ratelimited("No supervisor request support on %s\n",
iommu->name);
return -EINVAL;
}
pasid_set_sre(pte);
/* Enable write protect WP if guest requested */
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_WPE)
pasid_set_wpe(pte);
}
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_EAFE) {
if (!ecap_eafs(iommu->ecap)) {
pr_err_ratelimited("No extended access flag support on %s\n",
iommu->name);
return -EINVAL;
}
pasid_set_eafe(pte);
}
/*
* Memory type is only applicable to devices inside processor coherent
* domain. Will add MTS support once coherent devices are available.
*/
if (pasid_data->flags & IOMMU_SVA_VTD_GPASID_MTS_MASK) {
pr_warn_ratelimited("No memory type support %s\n",
iommu->name);
return -EINVAL;
}
return 0;
}
/**
* intel_pasid_setup_nested() - Set up PASID entry for nested translation.
* This could be used for guest shared virtual address. In this case, the
* first level page tables are used for GVA-GPA translation in the guest,
* second level page tables are used for GPA-HPA translation.
*
* @iommu: IOMMU which the device belong to
* @dev: Device to be set up for translation
* @gpgd: FLPTPTR: First Level Page translation pointer in GPA
* @pasid: PASID to be programmed in the device PASID table
* @pasid_data: Additional PASID info from the guest bind request
* @domain: Domain info for setting up second level page tables
* @addr_width: Address width of the first level (guest)
*/
int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
pgd_t *gpgd, u32 pasid,
struct iommu_gpasid_bind_data_vtd *pasid_data,
struct dmar_domain *domain, int addr_width)
{
struct pasid_entry *pte;
struct dma_pte *pgd;
int ret = 0;
u64 pgd_val;
int agaw;
u16 did;
if (!ecap_nest(iommu->ecap)) {
pr_err_ratelimited("IOMMU: %s: No nested translation support\n",
iommu->name);
return -EINVAL;
}
if (!(domain->flags & DOMAIN_FLAG_NESTING_MODE)) {
pr_err_ratelimited("Domain is not in nesting mode, %x\n",
domain->flags);
return -EINVAL;
}
pte = intel_pasid_get_entry(dev, pasid);
if (WARN_ON(!pte))
return -EINVAL;
/*
* Caller must ensure PASID entry is not in use, i.e. not bind the
* same PASID to the same device twice.
*/
if (pasid_pte_is_present(pte))
return -EBUSY;
pasid_clear_entry(pte);
/* Sanity checking performed by caller to make sure address
* width matching in two dimensions:
* 1. CPU vs. IOMMU
* 2. Guest vs. Host.
*/
switch (addr_width) {
#ifdef CONFIG_X86
case ADDR_WIDTH_5LEVEL:
if (!cpu_feature_enabled(X86_FEATURE_LA57) ||
!cap_5lp_support(iommu->cap)) {
dev_err_ratelimited(dev,
"5-level paging not supported\n");
return -EINVAL;
}
pasid_set_flpm(pte, 1);
break;
#endif
case ADDR_WIDTH_4LEVEL:
pasid_set_flpm(pte, 0);
break;
default:
dev_err_ratelimited(dev, "Invalid guest address width %d\n",
addr_width);
return -EINVAL;
}
/* First level PGD is in GPA, must be supported by the second level */
if ((uintptr_t)gpgd > domain->max_addr) {
dev_err_ratelimited(dev,
"Guest PGD %lx not supported, max %llx\n",
(uintptr_t)gpgd, domain->max_addr);
return -EINVAL;
}
pasid_set_flptr(pte, (uintptr_t)gpgd);
ret = intel_pasid_setup_bind_data(iommu, pte, pasid_data);
if (ret)
return ret;
/* Setup the second level based on the given domain */
pgd = domain->pgd;
agaw = iommu_skip_agaw(domain, iommu, &pgd);
if (agaw < 0) {
dev_err_ratelimited(dev, "Invalid domain page table\n");
return -EINVAL;
}
pgd_val = virt_to_phys(pgd);
pasid_set_slptr(pte, pgd_val);
pasid_set_fault_enable(pte);
did = domain->iommu_did[iommu->seq_id];
pasid_set_domain_id(pte, did);
pasid_set_address_width(pte, agaw);
pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap));
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_NESTED);
pasid_set_present(pte);
pasid_flush_caches(iommu, pte, pasid, did);
return ret;
}

View File

@ -118,10 +118,6 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu,
int intel_pasid_setup_pass_through(struct intel_iommu *iommu,
struct dmar_domain *domain,
struct device *dev, u32 pasid);
int intel_pasid_setup_nested(struct intel_iommu *iommu,
struct device *dev, pgd_t *pgd, u32 pasid,
struct iommu_gpasid_bind_data_vtd *pasid_data,
struct dmar_domain *domain, int addr_width);
void intel_pasid_tear_down_entry(struct intel_iommu *iommu,
struct device *dev, u32 pasid,
bool fault_ignore);

View File

@ -168,11 +168,6 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
return 0;
}
static inline bool intel_svm_capable(struct intel_iommu *iommu)
{
return iommu->flags & VTD_FLAG_SVM_CAPABLE;
}
void intel_svm_check(struct intel_iommu *iommu)
{
if (!pasid_supported(iommu))
@ -200,7 +195,7 @@ static void __flush_svm_range_dev(struct intel_svm *svm,
unsigned long address,
unsigned long pages, int ih)
{
struct device_domain_info *info = get_domain_info(sdev->dev);
struct device_domain_info *info = dev_iommu_priv_get(sdev->dev);
if (WARN_ON(!pages))
return;
@ -318,193 +313,6 @@ out:
return 0;
}
int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct iommu_gpasid_bind_data *data)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev = NULL;
struct dmar_domain *dmar_domain;
struct device_domain_info *info;
struct intel_svm *svm = NULL;
unsigned long iflags;
int ret = 0;
if (WARN_ON(!iommu) || !data)
return -EINVAL;
if (data->format != IOMMU_PASID_FORMAT_INTEL_VTD)
return -EINVAL;
/* IOMMU core ensures argsz is more than the start of the union */
if (data->argsz < offsetofend(struct iommu_gpasid_bind_data, vendor.vtd))
return -EINVAL;
/* Make sure no undefined flags are used in vendor data */
if (data->vendor.vtd.flags & ~(IOMMU_SVA_VTD_GPASID_LAST - 1))
return -EINVAL;
if (!dev_is_pci(dev))
return -ENOTSUPP;
/* VT-d supports devices with full 20 bit PASIDs only */
if (pci_max_pasids(to_pci_dev(dev)) != PASID_MAX)
return -EINVAL;
/*
* We only check host PASID range, we have no knowledge to check
* guest PASID range.
*/
if (data->hpasid <= 0 || data->hpasid >= PASID_MAX)
return -EINVAL;
info = get_domain_info(dev);
if (!info)
return -EINVAL;
dmar_domain = to_dmar_domain(domain);
mutex_lock(&pasid_mutex);
ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev);
if (ret)
goto out;
if (sdev) {
/*
* Do not allow multiple bindings of the same device-PASID since
* there is only one SL page tables per PASID. We may revisit
* once sharing PGD across domains are supported.
*/
dev_warn_ratelimited(dev, "Already bound with PASID %u\n",
svm->pasid);
ret = -EBUSY;
goto out;
}
if (!svm) {
/* We come here when PASID has never been bond to a device. */
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
if (!svm) {
ret = -ENOMEM;
goto out;
}
/* REVISIT: upper layer/VFIO can track host process that bind
* the PASID. ioasid_set = mm might be sufficient for vfio to
* check pasid VMM ownership. We can drop the following line
* once VFIO and IOASID set check is in place.
*/
svm->mm = get_task_mm(current);
svm->pasid = data->hpasid;
if (data->flags & IOMMU_SVA_GPASID_VAL) {
svm->gpasid = data->gpasid;
svm->flags |= SVM_FLAG_GUEST_PASID;
}
pasid_private_add(data->hpasid, svm);
INIT_LIST_HEAD_RCU(&svm->devs);
mmput(svm->mm);
}
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
if (!sdev) {
ret = -ENOMEM;
goto out;
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
sdev->users = 1;
/* Set up device context entry for PASID if not enabled already */
ret = intel_iommu_enable_pasid(iommu, sdev->dev);
if (ret) {
dev_err_ratelimited(dev, "Failed to enable PASID capability\n");
kfree(sdev);
goto out;
}
/*
* PASID table is per device for better security. Therefore, for
* each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here.
*/
spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret);
/*
* PASID entry should be in cleared state if nested mode
* set up failed. So we only need to clear IOASID tracking
* data such that free call will succeed.
*/
kfree(sdev);
goto out;
}
svm->flags |= SVM_FLAG_GUEST_MODE;
init_rcu_head(&sdev->rcu);
list_add_rcu(&sdev->list, &svm->devs);
out:
if (!IS_ERR_OR_NULL(svm) && list_empty(&svm->devs)) {
pasid_private_remove(data->hpasid);
kfree(svm);
}
mutex_unlock(&pasid_mutex);
return ret;
}
int intel_svm_unbind_gpasid(struct device *dev, u32 pasid)
{
struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL);
struct intel_svm_dev *sdev;
struct intel_svm *svm;
int ret;
if (WARN_ON(!iommu))
return -EINVAL;
mutex_lock(&pasid_mutex);
ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev);
if (ret)
goto out;
if (sdev) {
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
sdev->users--;
if (!sdev->users) {
list_del_rcu(&sdev->list);
intel_pasid_tear_down_entry(iommu, dev,
svm->pasid, false);
intel_svm_drain_prq(dev, svm->pasid);
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
/*
* We do not free the IOASID here in that
* IOMMU driver did not allocate it.
* Unlike native SVM, IOASID for guest use was
* allocated prior to the bind call.
* In any case, if the free call comes before
* the unbind, IOMMU driver will get notified
* and perform cleanup.
*/
pasid_private_remove(pasid);
kfree(svm);
}
}
}
out:
mutex_unlock(&pasid_mutex);
return ret;
}
static int intel_svm_alloc_pasid(struct device *dev, struct mm_struct *mm,
unsigned int flags)
{
@ -524,7 +332,7 @@ static struct iommu_sva *intel_svm_bind_mm(struct intel_iommu *iommu,
struct mm_struct *mm,
unsigned int flags)
{
struct device_domain_info *info = get_domain_info(dev);
struct device_domain_info *info = dev_iommu_priv_get(dev);
unsigned long iflags, sflags;
struct intel_svm_dev *sdev;
struct intel_svm *svm;
@ -732,7 +540,7 @@ static void intel_svm_drain_prq(struct device *dev, u32 pasid)
u16 sid, did;
int qdep;
info = get_domain_info(dev);
info = dev_iommu_priv_get(dev);
if (WARN_ON(!info || !dev_is_pci(dev)))
return;
@ -1125,28 +933,6 @@ int intel_svm_page_response(struct device *dev,
goto out;
}
/*
* For responses from userspace, need to make sure that the
* pasid has been bound to its mm.
*/
if (svm->flags & SVM_FLAG_GUEST_MODE) {
struct mm_struct *mm;
mm = get_task_mm(current);
if (!mm) {
ret = -EINVAL;
goto out;
}
if (mm != svm->mm) {
ret = -ENODEV;
mmput(mm);
goto out;
}
mmput(mm);
}
/*
* Per VT-d spec. v3.0 ch7.7, system software must respond
* with page group response if private data is present (PDP)

View File

@ -323,13 +323,14 @@ err_out:
void iommu_release_device(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops;
if (!dev->iommu)
return;
iommu_device_unlink(dev->iommu->iommu_dev, dev);
ops = dev_iommu_ops(dev);
ops->release_device(dev);
iommu_group_remove_device(dev);
@ -790,9 +791,6 @@ static int iommu_create_device_direct_mappings(struct iommu_group *group,
dma_addr_t start, end, addr;
size_t map_size = 0;
if (domain->ops->apply_resv_region)
domain->ops->apply_resv_region(dev, domain, entry);
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
@ -833,11 +831,12 @@ out:
return ret;
}
static bool iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev)
static bool iommu_is_attach_deferred(struct device *dev)
{
if (domain->ops->is_attach_deferred)
return domain->ops->is_attach_deferred(domain, dev);
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops->is_attach_deferred)
return ops->is_attach_deferred(dev);
return false;
}
@ -894,7 +893,7 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain && !iommu_is_attach_deferred(group->domain, dev))
if (group->domain && !iommu_is_attach_deferred(dev))
ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
if (ret)
@ -1255,10 +1254,10 @@ int iommu_page_response(struct device *dev,
struct iommu_fault_event *evt;
struct iommu_fault_page_request *prm;
struct dev_iommu *param = dev->iommu;
const struct iommu_ops *ops = dev_iommu_ops(dev);
bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
if (!domain || !domain->ops->page_response)
if (!ops->page_response)
return -ENODEV;
if (!param || !param->fault_param)
@ -1299,7 +1298,7 @@ int iommu_page_response(struct device *dev,
msg->pasid = 0;
}
ret = domain->ops->page_response(dev, evt, msg);
ret = ops->page_response(dev, evt, msg);
list_del(&evt->list);
kfree(evt);
break;
@ -1524,7 +1523,7 @@ EXPORT_SYMBOL_GPL(fsl_mc_device_group);
static int iommu_get_def_domain_type(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (dev_is_pci(dev) && to_pci_dev(dev)->untrusted)
return IOMMU_DOMAIN_DMA;
@ -1583,7 +1582,7 @@ static int iommu_alloc_default_domain(struct iommu_group *group,
*/
static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_group *group;
int ret;
@ -1591,9 +1590,6 @@ static struct iommu_group *iommu_group_get_for_dev(struct device *dev)
if (group)
return group;
if (!ops)
return ERR_PTR(-EINVAL);
group = ops->device_group(dev);
if (WARN_ON_ONCE(group == NULL))
return ERR_PTR(-EINVAL);
@ -1748,7 +1744,7 @@ static int iommu_group_do_dma_attach(struct device *dev, void *data)
struct iommu_domain *domain = data;
int ret = 0;
if (!iommu_is_attach_deferred(domain, dev))
if (!iommu_is_attach_deferred(dev))
ret = __iommu_attach_device(domain, dev);
return ret;
@ -1762,10 +1758,10 @@ static int __iommu_group_dma_attach(struct iommu_group *group)
static int iommu_group_do_probe_finalize(struct device *dev, void *data)
{
struct iommu_domain *domain = data;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (domain->ops->probe_finalize)
domain->ops->probe_finalize(dev);
if (ops->probe_finalize)
ops->probe_finalize(dev);
return 0;
}
@ -1954,10 +1950,11 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
if (!domain)
return NULL;
domain->ops = bus->iommu_ops;
domain->type = type;
/* Assume all sizes by default; the driver may override this later */
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
domain->pgsize_bitmap = bus->iommu_ops->pgsize_bitmap;
if (!domain->ops)
domain->ops = bus->iommu_ops->default_domain_ops;
if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
iommu_domain_free(domain);
@ -1975,7 +1972,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
iommu_put_dma_cookie(domain);
domain->ops->domain_free(domain);
domain->ops->free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
@ -2023,228 +2020,16 @@ EXPORT_SYMBOL_GPL(iommu_attach_device);
int iommu_deferred_attach(struct device *dev, struct iommu_domain *domain)
{
const struct iommu_ops *ops = domain->ops;
if (ops->is_attach_deferred && ops->is_attach_deferred(domain, dev))
if (iommu_is_attach_deferred(dev))
return __iommu_attach_device(domain, dev);
return 0;
}
/*
* Check flags and other user provided data for valid combinations. We also
* make sure no reserved fields or unused flags are set. This is to ensure
* not breaking userspace in the future when these fields or flags are used.
*/
static int iommu_check_cache_invl_data(struct iommu_cache_invalidate_info *info)
{
u32 mask;
int i;
if (info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
return -EINVAL;
mask = (1 << IOMMU_CACHE_INV_TYPE_NR) - 1;
if (info->cache & ~mask)
return -EINVAL;
if (info->granularity >= IOMMU_INV_GRANU_NR)
return -EINVAL;
switch (info->granularity) {
case IOMMU_INV_GRANU_ADDR:
if (info->cache & IOMMU_CACHE_INV_TYPE_PASID)
return -EINVAL;
mask = IOMMU_INV_ADDR_FLAGS_PASID |
IOMMU_INV_ADDR_FLAGS_ARCHID |
IOMMU_INV_ADDR_FLAGS_LEAF;
if (info->granu.addr_info.flags & ~mask)
return -EINVAL;
break;
case IOMMU_INV_GRANU_PASID:
mask = IOMMU_INV_PASID_FLAGS_PASID |
IOMMU_INV_PASID_FLAGS_ARCHID;
if (info->granu.pasid_info.flags & ~mask)
return -EINVAL;
break;
case IOMMU_INV_GRANU_DOMAIN:
if (info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB)
return -EINVAL;
break;
default:
return -EINVAL;
}
/* Check reserved padding fields */
for (i = 0; i < sizeof(info->padding); i++) {
if (info->padding[i])
return -EINVAL;
}
return 0;
}
int iommu_uapi_cache_invalidate(struct iommu_domain *domain, struct device *dev,
void __user *uinfo)
{
struct iommu_cache_invalidate_info inv_info = { 0 };
u32 minsz;
int ret;
if (unlikely(!domain->ops->cache_invalidate))
return -ENODEV;
/*
* No new spaces can be added before the variable sized union, the
* minimum size is the offset to the union.
*/
minsz = offsetof(struct iommu_cache_invalidate_info, granu);
/* Copy minsz from user to get flags and argsz */
if (copy_from_user(&inv_info, uinfo, minsz))
return -EFAULT;
/* Fields before the variable size union are mandatory */
if (inv_info.argsz < minsz)
return -EINVAL;
/* PASID and address granu require additional info beyond minsz */
if (inv_info.granularity == IOMMU_INV_GRANU_PASID &&
inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.pasid_info))
return -EINVAL;
if (inv_info.granularity == IOMMU_INV_GRANU_ADDR &&
inv_info.argsz < offsetofend(struct iommu_cache_invalidate_info, granu.addr_info))
return -EINVAL;
/*
* User might be using a newer UAPI header which has a larger data
* size, we shall support the existing flags within the current
* size. Copy the remaining user data _after_ minsz but not more
* than the current kernel supported size.
*/
if (copy_from_user((void *)&inv_info + minsz, uinfo + minsz,
min_t(u32, inv_info.argsz, sizeof(inv_info)) - minsz))
return -EFAULT;
/* Now the argsz is validated, check the content */
ret = iommu_check_cache_invl_data(&inv_info);
if (ret)
return ret;
return domain->ops->cache_invalidate(domain, dev, &inv_info);
}
EXPORT_SYMBOL_GPL(iommu_uapi_cache_invalidate);
static int iommu_check_bind_data(struct iommu_gpasid_bind_data *data)
{
u64 mask;
int i;
if (data->version != IOMMU_GPASID_BIND_VERSION_1)
return -EINVAL;
/* Check the range of supported formats */
if (data->format >= IOMMU_PASID_FORMAT_LAST)
return -EINVAL;
/* Check all flags */
mask = IOMMU_SVA_GPASID_VAL;
if (data->flags & ~mask)
return -EINVAL;
/* Check reserved padding fields */
for (i = 0; i < sizeof(data->padding); i++) {
if (data->padding[i])
return -EINVAL;
}
return 0;
}
static int iommu_sva_prepare_bind_data(void __user *udata,
struct iommu_gpasid_bind_data *data)
{
u32 minsz;
/*
* No new spaces can be added before the variable sized union, the
* minimum size is the offset to the union.
*/
minsz = offsetof(struct iommu_gpasid_bind_data, vendor);
/* Copy minsz from user to get flags and argsz */
if (copy_from_user(data, udata, minsz))
return -EFAULT;
/* Fields before the variable size union are mandatory */
if (data->argsz < minsz)
return -EINVAL;
/*
* User might be using a newer UAPI header, we shall let IOMMU vendor
* driver decide on what size it needs. Since the guest PASID bind data
* can be vendor specific, larger argsz could be the result of extension
* for one vendor but it should not affect another vendor.
* Copy the remaining user data _after_ minsz
*/
if (copy_from_user((void *)data + minsz, udata + minsz,
min_t(u32, data->argsz, sizeof(*data)) - minsz))
return -EFAULT;
return iommu_check_bind_data(data);
}
int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain, struct device *dev,
void __user *udata)
{
struct iommu_gpasid_bind_data data = { 0 };
int ret;
if (unlikely(!domain->ops->sva_bind_gpasid))
return -ENODEV;
ret = iommu_sva_prepare_bind_data(udata, &data);
if (ret)
return ret;
return domain->ops->sva_bind_gpasid(domain, dev, &data);
}
EXPORT_SYMBOL_GPL(iommu_uapi_sva_bind_gpasid);
int iommu_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid)
{
if (unlikely(!domain->ops->sva_unbind_gpasid))
return -ENODEV;
return domain->ops->sva_unbind_gpasid(dev, pasid);
}
EXPORT_SYMBOL_GPL(iommu_sva_unbind_gpasid);
int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain, struct device *dev,
void __user *udata)
{
struct iommu_gpasid_bind_data data = { 0 };
int ret;
if (unlikely(!domain->ops->sva_bind_gpasid))
return -ENODEV;
ret = iommu_sva_prepare_bind_data(udata, &data);
if (ret)
return ret;
return iommu_sva_unbind_gpasid(domain, dev, data.hpasid);
}
EXPORT_SYMBOL_GPL(iommu_uapi_sva_unbind_gpasid);
static void __iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
if (iommu_is_attach_deferred(domain, dev))
if (iommu_is_attach_deferred(dev))
return;
if (unlikely(domain->ops->detach_dev == NULL))
@ -2458,7 +2243,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot,
gfp_t gfp, size_t *mapped)
{
const struct iommu_ops *ops = domain->ops;
const struct iommu_domain_ops *ops = domain->ops;
size_t pgsize, count;
int ret;
@ -2481,7 +2266,7 @@ static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
const struct iommu_domain_ops *ops = domain->ops;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
size_t orig_size = size;
@ -2541,7 +2326,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
static int _iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
const struct iommu_domain_ops *ops = domain->ops;
int ret;
ret = __iommu_map(domain, iova, paddr, size, prot, gfp);
@ -2570,7 +2355,7 @@ static size_t __iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
{
const struct iommu_ops *ops = domain->ops;
const struct iommu_domain_ops *ops = domain->ops;
size_t pgsize, count;
pgsize = iommu_pgsize(domain, iova, iova, size, &count);
@ -2583,7 +2368,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
{
const struct iommu_ops *ops = domain->ops;
const struct iommu_domain_ops *ops = domain->ops;
size_t unmapped_page, unmapped = 0;
unsigned long orig_iova = iova;
unsigned int min_pagesz;
@ -2659,7 +2444,7 @@ static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot,
gfp_t gfp)
{
const struct iommu_ops *ops = domain->ops;
const struct iommu_domain_ops *ops = domain->ops;
size_t len = 0, mapped = 0;
phys_addr_t start;
unsigned int i = 0;
@ -2792,17 +2577,17 @@ EXPORT_SYMBOL_GPL(iommu_set_pgtable_quirks);
void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops && ops->get_resv_regions)
if (ops->get_resv_regions)
ops->get_resv_regions(dev, list);
}
void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (ops && ops->put_resv_regions)
if (ops->put_resv_regions)
ops->put_resv_regions(dev, list);
}
@ -2959,8 +2744,6 @@ EXPORT_SYMBOL_GPL(iommu_dev_enable_feature);
/*
* The device drivers should do the necessary cleanups before calling this.
* For example, before disabling the aux-domain feature, the device driver
* should detach all aux-domains. Otherwise, this will return -EBUSY.
*/
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
{
@ -2988,50 +2771,6 @@ bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features feat)
}
EXPORT_SYMBOL_GPL(iommu_dev_feature_enabled);
/*
* Aux-domain specific attach/detach.
*
* Only works if iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX) returns
* true. Also, as long as domains are attached to a device through this
* interface, any tries to call iommu_attach_device() should fail
* (iommu_detach_device() can't fail, so we fail when trying to re-attach).
* This should make us safe against a device being attached to a guest as a
* whole while there are still pasid users on it (aux and sva).
*/
int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
{
int ret = -ENODEV;
if (domain->ops->aux_attach_dev)
ret = domain->ops->aux_attach_dev(domain, dev);
if (!ret)
trace_attach_device_to_domain(dev);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_aux_attach_device);
void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
{
if (domain->ops->aux_detach_dev) {
domain->ops->aux_detach_dev(domain, dev);
trace_detach_device_from_domain(dev);
}
}
EXPORT_SYMBOL_GPL(iommu_aux_detach_device);
int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
{
int ret = -ENODEV;
if (domain->ops->aux_get_pasid)
ret = domain->ops->aux_get_pasid(domain, dev);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
/**
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
@ -3053,9 +2792,9 @@ iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
{
struct iommu_group *group;
struct iommu_sva *handle = ERR_PTR(-EINVAL);
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (!ops || !ops->sva_bind)
if (!ops->sva_bind)
return ERR_PTR(-ENODEV);
group = iommu_group_get(dev);
@ -3096,9 +2835,9 @@ void iommu_sva_unbind_device(struct iommu_sva *handle)
{
struct iommu_group *group;
struct device *dev = handle->dev;
const struct iommu_ops *ops = dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(dev);
if (!ops || !ops->sva_unbind)
if (!ops->sva_unbind)
return;
group = iommu_group_get(dev);
@ -3115,9 +2854,9 @@ EXPORT_SYMBOL_GPL(iommu_sva_unbind_device);
u32 iommu_sva_get_pasid(struct iommu_sva *handle)
{
const struct iommu_ops *ops = handle->dev->bus->iommu_ops;
const struct iommu_ops *ops = dev_iommu_ops(handle->dev);
if (!ops || !ops->sva_get_pasid)
if (!ops->sva_get_pasid)
return IOMMU_PASID_INVALID;
return ops->sva_get_pasid(handle);

View File

@ -15,13 +15,14 @@
/* The anchor node sits above the top of the usable address space */
#define IOVA_ANCHOR ~0UL
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
unsigned long size);
static unsigned long iova_rcache_get(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn);
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
@ -64,8 +65,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD, &iovad->cpuhp_dead);
init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@ -95,10 +94,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
cached_iova = to_iova(iovad->cached32_node);
if (free == cached_iova ||
(free->pfn_hi < iovad->dma_32bit_pfn &&
free->pfn_lo >= cached_iova->pfn_lo)) {
free->pfn_lo >= cached_iova->pfn_lo))
iovad->cached32_node = rb_next(&free->node);
if (free->pfn_lo < iovad->dma_32bit_pfn)
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
}
cached_iova = to_iova(iovad->cached_node);
if (free->pfn_lo >= cached_iova->pfn_lo)
@ -488,6 +488,13 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
}
EXPORT_SYMBOL_GPL(free_iova_fast);
static void iova_domain_free_rcaches(struct iova_domain *iovad)
{
cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
&iovad->cpuhp_dead);
free_iova_rcaches(iovad);
}
/**
* put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
@ -497,9 +504,9 @@ void put_iova_domain(struct iova_domain *iovad)
{
struct iova *iova, *tmp;
cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
&iovad->cpuhp_dead);
free_iova_rcaches(iovad);
if (iovad->rcaches)
iova_domain_free_rcaches(iovad);
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
free_iova_mem(iova);
}
@ -608,6 +615,7 @@ EXPORT_SYMBOL_GPL(reserve_iova);
*/
#define IOVA_MAG_SIZE 128
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
struct iova_magazine {
unsigned long size;
@ -620,6 +628,13 @@ struct iova_cpu_rcache {
struct iova_magazine *prev;
};
struct iova_rcache {
spinlock_t lock;
unsigned long depot_size;
struct iova_magazine *depot[MAX_GLOBAL_MAGS];
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
{
return kzalloc(sizeof(struct iova_magazine), flags);
@ -693,28 +708,54 @@ static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
mag->pfns[mag->size++] = pfn;
}
static void init_iova_rcaches(struct iova_domain *iovad)
int iova_domain_init_rcaches(struct iova_domain *iovad)
{
struct iova_cpu_rcache *cpu_rcache;
struct iova_rcache *rcache;
unsigned int cpu;
int i;
int i, ret;
iovad->rcaches = kcalloc(IOVA_RANGE_CACHE_MAX_SIZE,
sizeof(struct iova_rcache),
GFP_KERNEL);
if (!iovad->rcaches)
return -ENOMEM;
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
struct iova_cpu_rcache *cpu_rcache;
struct iova_rcache *rcache;
rcache = &iovad->rcaches[i];
spin_lock_init(&rcache->lock);
rcache->depot_size = 0;
rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
if (WARN_ON(!rcache->cpu_rcaches))
continue;
rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache),
cache_line_size());
if (!rcache->cpu_rcaches) {
ret = -ENOMEM;
goto out_err;
}
for_each_possible_cpu(cpu) {
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
spin_lock_init(&cpu_rcache->lock);
cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
if (!cpu_rcache->loaded || !cpu_rcache->prev) {
ret = -ENOMEM;
goto out_err;
}
}
}
ret = cpuhp_state_add_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
&iovad->cpuhp_dead);
if (ret)
goto out_err;
return 0;
out_err:
free_iova_rcaches(iovad);
return ret;
}
EXPORT_SYMBOL_GPL(iova_domain_init_rcaches);
/*
* Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
@ -831,7 +872,7 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
{
unsigned int log_size = order_base_2(size);
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE || !iovad->rcaches)
return 0;
return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn - size);
@ -849,6 +890,8 @@ static void free_iova_rcaches(struct iova_domain *iovad)
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad->rcaches[i];
if (!rcache->cpu_rcaches)
break;
for_each_possible_cpu(cpu) {
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
iova_magazine_free(cpu_rcache->loaded);
@ -858,6 +901,9 @@ static void free_iova_rcaches(struct iova_domain *iovad)
for (j = 0; j < rcache->depot_size; ++j)
iova_magazine_free(rcache->depot[j]);
}
kfree(iovad->rcaches);
iovad->rcaches = NULL;
}
/*

View File

@ -719,6 +719,7 @@ static int ipmmu_init_platform_device(struct device *dev,
static const struct soc_device_attribute soc_needs_opt_in[] = {
{ .family = "R-Car Gen3", },
{ .family = "R-Car Gen4", },
{ .family = "RZ/G2", },
{ /* sentinel */ }
};
@ -743,7 +744,7 @@ static bool ipmmu_device_is_allowed(struct device *dev)
unsigned int i;
/*
* R-Car Gen3 and RZ/G2 use the allow list to opt-in devices.
* R-Car Gen3/4 and RZ/G2 use the allow list to opt-in devices.
* For Other SoCs, this returns true anyway.
*/
if (!soc_device_match(soc_needs_opt_in))
@ -868,14 +869,6 @@ static struct iommu_group *ipmmu_find_group(struct device *dev)
static const struct iommu_ops ipmmu_ops = {
.domain_alloc = ipmmu_domain_alloc,
.domain_free = ipmmu_domain_free,
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
.flush_iotlb_all = ipmmu_flush_iotlb_all,
.iotlb_sync = ipmmu_iotlb_sync,
.iova_to_phys = ipmmu_iova_to_phys,
.probe_device = ipmmu_probe_device,
.release_device = ipmmu_release_device,
.probe_finalize = ipmmu_probe_finalize,
@ -883,6 +876,16 @@ static const struct iommu_ops ipmmu_ops = {
? generic_device_group : ipmmu_find_group,
.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K,
.of_xlate = ipmmu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = ipmmu_attach_device,
.detach_dev = ipmmu_detach_device,
.map = ipmmu_map,
.unmap = ipmmu_unmap,
.flush_iotlb_all = ipmmu_flush_iotlb_all,
.iotlb_sync = ipmmu_iotlb_sync,
.iova_to_phys = ipmmu_iova_to_phys,
.free = ipmmu_domain_free,
}
};
/* -----------------------------------------------------------------------------
@ -926,7 +929,7 @@ static const struct ipmmu_features ipmmu_features_rcar_gen3 = {
.utlb_offset_base = 0,
};
static const struct ipmmu_features ipmmu_features_r8a779a0 = {
static const struct ipmmu_features ipmmu_features_rcar_gen4 = {
.use_ns_alias_offset = false,
.has_cache_leaf_nodes = true,
.number_of_contexts = 16,
@ -982,7 +985,10 @@ static const struct of_device_id ipmmu_of_ids[] = {
.data = &ipmmu_features_rcar_gen3,
}, {
.compatible = "renesas,ipmmu-r8a779a0",
.data = &ipmmu_features_r8a779a0,
.data = &ipmmu_features_rcar_gen4,
}, {
.compatible = "renesas,rcar-gen4-ipmmu",
.data = &ipmmu_features_rcar_gen4,
}, {
/* Terminator */
},
@ -1006,7 +1012,9 @@ static int ipmmu_probe(struct platform_device *pdev)
bitmap_zero(mmu->ctx, IPMMU_CTX_MAX);
mmu->features = of_device_get_match_data(&pdev->dev);
memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs);
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
if (ret)
return ret;
/* Map I/O memory and request IRQ. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

View File

@ -558,11 +558,6 @@ fail:
return ret;
}
static bool msm_iommu_capable(enum iommu_cap cap)
{
return false;
}
static void print_ctx_regs(void __iomem *base, int ctx)
{
unsigned int fsr = GET_FSR(base, ctx);
@ -672,27 +667,28 @@ fail:
}
static struct iommu_ops msm_iommu_ops = {
.capable = msm_iommu_capable,
.domain_alloc = msm_iommu_domain_alloc,
.domain_free = msm_iommu_domain_free,
.attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
/*
* Nothing is needed here, the barrier to guarantee
* completion of the tlb sync operation is implicitly
* taken care when the iommu client does a writel before
* kick starting the other master.
*/
.iotlb_sync = NULL,
.iotlb_sync_map = msm_iommu_sync_map,
.iova_to_phys = msm_iommu_iova_to_phys,
.probe_device = msm_iommu_probe_device,
.release_device = msm_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = msm_iommu_attach_dev,
.detach_dev = msm_iommu_detach_dev,
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
/*
* Nothing is needed here, the barrier to guarantee
* completion of the tlb sync operation is implicitly
* taken care when the iommu client does a writel before
* kick starting the other master.
*/
.iotlb_sync = NULL,
.iotlb_sync_map = msm_iommu_sync_map,
.iova_to_phys = msm_iommu_iova_to_phys,
.free = msm_iommu_domain_free,
}
};
static int msm_iommu_probe(struct platform_device *pdev)
@ -710,36 +706,32 @@ static int msm_iommu_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&iommu->ctx_list);
iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk");
if (IS_ERR(iommu->pclk)) {
dev_err(iommu->dev, "could not get smmu_pclk\n");
return PTR_ERR(iommu->pclk);
}
if (IS_ERR(iommu->pclk))
return dev_err_probe(iommu->dev, PTR_ERR(iommu->pclk),
"could not get smmu_pclk\n");
ret = clk_prepare(iommu->pclk);
if (ret) {
dev_err(iommu->dev, "could not prepare smmu_pclk\n");
return ret;
}
if (ret)
return dev_err_probe(iommu->dev, ret,
"could not prepare smmu_pclk\n");
iommu->clk = devm_clk_get(iommu->dev, "iommu_clk");
if (IS_ERR(iommu->clk)) {
dev_err(iommu->dev, "could not get iommu_clk\n");
clk_unprepare(iommu->pclk);
return PTR_ERR(iommu->clk);
return dev_err_probe(iommu->dev, PTR_ERR(iommu->clk),
"could not get iommu_clk\n");
}
ret = clk_prepare(iommu->clk);
if (ret) {
dev_err(iommu->dev, "could not prepare iommu_clk\n");
clk_unprepare(iommu->pclk);
return ret;
return dev_err_probe(iommu->dev, ret, "could not prepare iommu_clk\n");
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iommu->base = devm_ioremap_resource(iommu->dev, r);
if (IS_ERR(iommu->base)) {
dev_err(iommu->dev, "could not get iommu base\n");
ret = PTR_ERR(iommu->base);
ret = dev_err_probe(iommu->dev, PTR_ERR(iommu->base), "could not get iommu base\n");
goto fail;
}
ioaddr = r->start;
@ -831,16 +823,4 @@ static struct platform_driver msm_iommu_driver = {
.probe = msm_iommu_probe,
.remove = msm_iommu_remove,
};
static int __init msm_iommu_driver_init(void)
{
int ret;
ret = platform_driver_register(&msm_iommu_driver);
if (ret != 0)
pr_err("Failed to register IOMMU driver\n");
return ret;
}
subsys_initcall(msm_iommu_driver_init);
builtin_platform_driver(msm_iommu_driver);

View File

@ -210,33 +210,27 @@ static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
static void mtk_iommu_tlb_flush_all(struct mtk_iommu_data *data)
{
for_each_m4u(data) {
if (pm_runtime_get_if_in_use(data->dev) <= 0)
continue;
unsigned long flags;
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + data->plat_data->inv_sel_reg);
writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
wmb(); /* Make sure the tlb flush all done */
pm_runtime_put(data->dev);
}
spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
data->base + data->plat_data->inv_sel_reg);
writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
wmb(); /* Make sure the tlb flush all done */
spin_unlock_irqrestore(&data->tlb_lock, flags);
}
static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
size_t granule,
struct mtk_iommu_data *data)
{
bool has_pm = !!data->dev->pm_domain;
unsigned long flags;
int ret;
u32 tmp;
for_each_m4u(data) {
if (has_pm) {
if (pm_runtime_get_if_in_use(data->dev) <= 0)
continue;
}
if (pm_runtime_get_if_in_use(data->dev) <= 0)
continue;
spin_lock_irqsave(&data->tlb_lock, flags);
writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
@ -252,17 +246,18 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size,
/* tlb sync */
ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
tmp, tmp != 0, 10, 1000);
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
spin_unlock_irqrestore(&data->tlb_lock, flags);
if (ret) {
dev_warn(data->dev,
"Partial TLB flush timed out, falling back to full flush\n");
mtk_iommu_tlb_flush_all(data);
}
/* Clear the CPE status */
writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
spin_unlock_irqrestore(&data->tlb_lock, flags);
if (has_pm)
pm_runtime_put(data->dev);
pm_runtime_put(data->dev);
}
}
@ -658,15 +653,6 @@ static void mtk_iommu_get_resv_regions(struct device *dev,
static const struct iommu_ops mtk_iommu_ops = {
.domain_alloc = mtk_iommu_domain_alloc,
.domain_free = mtk_iommu_domain_free,
.attach_dev = mtk_iommu_attach_device,
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iotlb_sync_map = mtk_iommu_sync_map,
.iova_to_phys = mtk_iommu_iova_to_phys,
.probe_device = mtk_iommu_probe_device,
.release_device = mtk_iommu_release_device,
.device_group = mtk_iommu_device_group,
@ -675,6 +661,17 @@ static const struct iommu_ops mtk_iommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions,
.pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_attach_device,
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.flush_iotlb_all = mtk_iommu_flush_iotlb_all,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iotlb_sync_map = mtk_iommu_sync_map,
.iova_to_phys = mtk_iommu_iova_to_phys,
.free = mtk_iommu_domain_free,
}
};
static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
@ -980,6 +977,13 @@ static int __maybe_unused mtk_iommu_runtime_resume(struct device *dev)
writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG);
writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, base + REG_MMU_PT_BASE_ADDR);
/*
* Users may allocate dma buffer before they call pm_runtime_get,
* in which case it will lack the necessary tlb flush.
* Thus, make sure to update the tlb after each PM resume.
*/
mtk_iommu_tlb_flush_all(data);
return 0;
}

View File

@ -514,12 +514,6 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
static const struct iommu_ops mtk_iommu_ops = {
.domain_alloc = mtk_iommu_domain_alloc,
.domain_free = mtk_iommu_domain_free,
.attach_dev = mtk_iommu_attach_device,
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.iova_to_phys = mtk_iommu_iova_to_phys,
.probe_device = mtk_iommu_probe_device,
.probe_finalize = mtk_iommu_probe_finalize,
.release_device = mtk_iommu_release_device,
@ -527,6 +521,14 @@ static const struct iommu_ops mtk_iommu_ops = {
.device_group = generic_device_group,
.pgsize_bitmap = ~0UL << MT2701_IOMMU_PAGE_SHIFT,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = mtk_iommu_attach_device,
.detach_dev = mtk_iommu_detach_device,
.map = mtk_iommu_map,
.unmap = mtk_iommu_unmap,
.iova_to_phys = mtk_iommu_iova_to_phys,
.free = mtk_iommu_domain_free,
}
};
static const struct of_device_id mtk_iommu_of_ids[] = {

View File

@ -1734,16 +1734,18 @@ static struct iommu_group *omap_iommu_device_group(struct device *dev)
static const struct iommu_ops omap_iommu_ops = {
.domain_alloc = omap_iommu_domain_alloc,
.domain_free = omap_iommu_domain_free,
.attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.probe_device = omap_iommu_probe_device,
.release_device = omap_iommu_release_device,
.device_group = omap_iommu_device_group,
.pgsize_bitmap = OMAP_IOMMU_PGSIZES,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = omap_iommu_attach_dev,
.detach_dev = omap_iommu_detach_dev,
.map = omap_iommu_map,
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.free = omap_iommu_domain_free,
}
};
static int __init omap_iommu_init(void)

View File

@ -1187,17 +1187,19 @@ static int rk_iommu_of_xlate(struct device *dev,
static const struct iommu_ops rk_iommu_ops = {
.domain_alloc = rk_iommu_domain_alloc,
.domain_free = rk_iommu_domain_free,
.attach_dev = rk_iommu_attach_device,
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
.probe_device = rk_iommu_probe_device,
.release_device = rk_iommu_release_device,
.iova_to_phys = rk_iommu_iova_to_phys,
.device_group = rk_iommu_device_group,
.pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
.of_xlate = rk_iommu_of_xlate,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = rk_iommu_attach_device,
.detach_dev = rk_iommu_detach_device,
.map = rk_iommu_map,
.unmap = rk_iommu_unmap,
.iova_to_phys = rk_iommu_iova_to_phys,
.free = rk_iommu_domain_free,
}
};
static int rk_iommu_probe(struct platform_device *pdev)
@ -1407,9 +1409,4 @@ static struct platform_driver rk_iommu_driver = {
.suppress_bind_attrs = true,
},
};
static int __init rk_iommu_init(void)
{
return platform_driver_register(&rk_iommu_driver);
}
subsys_initcall(rk_iommu_init);
builtin_platform_driver(rk_iommu_driver);

View File

@ -363,16 +363,18 @@ void zpci_destroy_iommu(struct zpci_dev *zdev)
static const struct iommu_ops s390_iommu_ops = {
.capable = s390_iommu_capable,
.domain_alloc = s390_domain_alloc,
.domain_free = s390_domain_free,
.attach_dev = s390_iommu_attach_device,
.detach_dev = s390_iommu_detach_device,
.map = s390_iommu_map,
.unmap = s390_iommu_unmap,
.iova_to_phys = s390_iommu_iova_to_phys,
.probe_device = s390_iommu_probe_device,
.release_device = s390_iommu_release_device,
.device_group = generic_device_group,
.pgsize_bitmap = S390_IOMMU_PGSIZES,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = s390_iommu_attach_device,
.detach_dev = s390_iommu_detach_device,
.map = s390_iommu_map,
.unmap = s390_iommu_unmap,
.iova_to_phys = s390_iommu_iova_to_phys,
.free = s390_domain_free,
}
};
static int __init s390_iommu_init(void)

View File

@ -416,20 +416,22 @@ static int sprd_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static const struct iommu_ops sprd_iommu_ops = {
.domain_alloc = sprd_iommu_domain_alloc,
.domain_free = sprd_iommu_domain_free,
.attach_dev = sprd_iommu_attach_device,
.detach_dev = sprd_iommu_detach_device,
.map = sprd_iommu_map,
.unmap = sprd_iommu_unmap,
.iotlb_sync_map = sprd_iommu_sync_map,
.iotlb_sync = sprd_iommu_sync,
.iova_to_phys = sprd_iommu_iova_to_phys,
.probe_device = sprd_iommu_probe_device,
.release_device = sprd_iommu_release_device,
.device_group = sprd_iommu_device_group,
.of_xlate = sprd_iommu_of_xlate,
.pgsize_bitmap = ~0UL << SPRD_IOMMU_PAGE_SHIFT,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sprd_iommu_attach_device,
.detach_dev = sprd_iommu_detach_device,
.map = sprd_iommu_map,
.unmap = sprd_iommu_unmap,
.iotlb_sync_map = sprd_iommu_sync_map,
.iotlb_sync = sprd_iommu_sync,
.iova_to_phys = sprd_iommu_iova_to_phys,
.free = sprd_iommu_domain_free,
}
};
static const struct of_device_id sprd_iommu_of_match[] = {

View File

@ -760,19 +760,21 @@ static int sun50i_iommu_of_xlate(struct device *dev,
static const struct iommu_ops sun50i_iommu_ops = {
.pgsize_bitmap = SZ_4K,
.attach_dev = sun50i_iommu_attach_device,
.detach_dev = sun50i_iommu_detach_device,
.device_group = sun50i_iommu_device_group,
.domain_alloc = sun50i_iommu_domain_alloc,
.domain_free = sun50i_iommu_domain_free,
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
.iotlb_sync = sun50i_iommu_iotlb_sync,
.iova_to_phys = sun50i_iommu_iova_to_phys,
.map = sun50i_iommu_map,
.of_xlate = sun50i_iommu_of_xlate,
.probe_device = sun50i_iommu_probe_device,
.release_device = sun50i_iommu_release_device,
.unmap = sun50i_iommu_unmap,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = sun50i_iommu_attach_device,
.detach_dev = sun50i_iommu_detach_device,
.flush_iotlb_all = sun50i_iommu_flush_iotlb_all,
.iotlb_sync = sun50i_iommu_iotlb_sync,
.iova_to_phys = sun50i_iommu_iova_to_phys,
.map = sun50i_iommu_map,
.unmap = sun50i_iommu_unmap,
.free = sun50i_iommu_domain_free,
}
};
static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu,

View File

@ -238,11 +238,6 @@ static phys_addr_t gart_iommu_iova_to_phys(struct iommu_domain *domain,
return pte & GART_PAGE_MASK;
}
static bool gart_iommu_capable(enum iommu_cap cap)
{
return false;
}
static struct iommu_device *gart_iommu_probe_device(struct device *dev)
{
if (!dev_iommu_fwspec_get(dev))
@ -276,21 +271,22 @@ static void gart_iommu_sync(struct iommu_domain *domain,
}
static const struct iommu_ops gart_iommu_ops = {
.capable = gart_iommu_capable,
.domain_alloc = gart_iommu_domain_alloc,
.domain_free = gart_iommu_domain_free,
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.probe_device = gart_iommu_probe_device,
.release_device = gart_iommu_release_device,
.device_group = generic_device_group,
.map = gart_iommu_map,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
.of_xlate = gart_iommu_of_xlate,
.iotlb_sync_map = gart_iommu_sync_map,
.iotlb_sync = gart_iommu_sync,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.iotlb_sync_map = gart_iommu_sync_map,
.iotlb_sync = gart_iommu_sync,
.free = gart_iommu_domain_free,
}
};
int tegra_gart_suspend(struct gart_device *gart)

View File

@ -272,11 +272,6 @@ static void tegra_smmu_free_asid(struct tegra_smmu *smmu, unsigned int id)
clear_bit(id, smmu->asids);
}
static bool tegra_smmu_capable(enum iommu_cap cap)
{
return false;
}
static struct iommu_domain *tegra_smmu_domain_alloc(unsigned type)
{
struct tegra_smmu_as *as;
@ -969,19 +964,20 @@ static int tegra_smmu_of_xlate(struct device *dev,
}
static const struct iommu_ops tegra_smmu_ops = {
.capable = tegra_smmu_capable,
.domain_alloc = tegra_smmu_domain_alloc,
.domain_free = tegra_smmu_domain_free,
.attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev,
.probe_device = tegra_smmu_probe_device,
.release_device = tegra_smmu_release_device,
.device_group = tegra_smmu_device_group,
.map = tegra_smmu_map,
.unmap = tegra_smmu_unmap,
.iova_to_phys = tegra_smmu_iova_to_phys,
.of_xlate = tegra_smmu_of_xlate,
.pgsize_bitmap = SZ_4K,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = tegra_smmu_attach_dev,
.detach_dev = tegra_smmu_detach_dev,
.map = tegra_smmu_map,
.unmap = tegra_smmu_unmap,
.iova_to_phys = tegra_smmu_iova_to_phys,
.free = tegra_smmu_domain_free,
}
};
static void tegra_smmu_ahb_enable(void)

View File

@ -1008,12 +1008,6 @@ static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
static struct iommu_ops viommu_ops = {
.domain_alloc = viommu_domain_alloc,
.domain_free = viommu_domain_free,
.attach_dev = viommu_attach_dev,
.map = viommu_map,
.unmap = viommu_unmap,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.probe_device = viommu_probe_device,
.probe_finalize = viommu_probe_finalize,
.release_device = viommu_release_device,
@ -1022,6 +1016,14 @@ static struct iommu_ops viommu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions,
.of_xlate = viommu_of_xlate,
.owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = viommu_attach_dev,
.map = viommu_map,
.unmap = viommu_unmap,
.iova_to_phys = viommu_iova_to_phys,
.iotlb_sync = viommu_iotlb_sync,
.free = viommu_domain_free,
}
};
static int viommu_init_vqs(struct viommu_dev *viommu)

View File

@ -654,6 +654,7 @@ static int smmu_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
{
struct smmu_pmu *smmu_pmu = data;
DECLARE_BITMAP(ovs, BITS_PER_TYPE(u64));
u64 ovsr;
unsigned int idx;
@ -663,7 +664,8 @@ static irqreturn_t smmu_pmu_handle_irq(int irq_num, void *data)
writeq(ovsr, smmu_pmu->reloc_base + SMMU_PMCG_OVSCLR0);
for_each_set_bit(idx, (unsigned long *)&ovsr, smmu_pmu->num_counters) {
bitmap_from_u64(ovs, ovsr);
for_each_set_bit(idx, ovs, smmu_pmu->num_counters) {
struct perf_event *event = smmu_pmu->events[idx];
struct hw_perf_event *hwc;

View File

@ -480,6 +480,7 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
struct file *file;
struct vduse_bounce_map *map;
unsigned long pfn, bounce_pfns;
int ret;
bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
if (iova_limit <= bounce_size)
@ -513,10 +514,20 @@ vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
spin_lock_init(&domain->iotlb_lock);
init_iova_domain(&domain->stream_iovad,
PAGE_SIZE, IOVA_START_PFN);
ret = iova_domain_init_rcaches(&domain->stream_iovad);
if (ret)
goto err_iovad_stream;
init_iova_domain(&domain->consistent_iovad,
PAGE_SIZE, bounce_pfns);
ret = iova_domain_init_rcaches(&domain->consistent_iovad);
if (ret)
goto err_iovad_consistent;
return domain;
err_iovad_consistent:
put_iova_domain(&domain->stream_iovad);
err_iovad_stream:
fput(file);
err_file:
vfree(domain->bounce_maps);
err_map:

View File

@ -525,12 +525,6 @@ struct context_entry {
*/
#define DOMAIN_FLAG_USE_FIRST_LEVEL BIT(1)
/*
* Domain represents a virtual machine which demands iommu nested
* translation mode support.
*/
#define DOMAIN_FLAG_NESTING_MODE BIT(2)
struct dmar_domain {
int nid; /* node id */
@ -548,7 +542,6 @@ struct dmar_domain {
u8 iommu_snooping: 1; /* indicate snooping control feature */
struct list_head devices; /* all devices' list */
struct list_head subdevices; /* all subdevices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
struct dma_pte *pgd; /* virtual address */
@ -563,11 +556,6 @@ struct dmar_domain {
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
u64 max_addr; /* maximum mapped address */
u32 default_pasid; /*
* The default pasid used for non-SVM
* traffic on mediated devices.
*/
struct iommu_domain domain; /* generic domain data structure for
iommu core */
};
@ -590,7 +578,6 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */
struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */
@ -620,21 +607,11 @@ struct intel_iommu {
void *perf_statistic;
};
/* Per subdevice private data */
struct subdev_domain_info {
struct list_head link_phys; /* link to phys device siblings */
struct list_head link_domain; /* link to domain siblings */
struct device *pdev; /* physical device derived from */
struct dmar_domain *domain; /* aux-domain */
int users; /* user count */
};
/* PCI domain-device relationship */
struct device_domain_info {
struct list_head link; /* link to domain siblings */
struct list_head global; /* link to global list */
struct list_head table; /* link to pasid table */
struct list_head subdevices; /* subdevices sibling */
u32 segment; /* PCI segment number */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
@ -645,7 +622,6 @@ struct device_domain_info {
u8 pri_enabled:1;
u8 ats_supported:1;
u8 ats_enabled:1;
u8 auxd_enabled:1; /* Multiple domains per device */
u8 ats_qdep;
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
@ -717,7 +693,6 @@ static inline int nr_pte_to_next_page(struct dma_pte *pte)
}
extern struct dmar_drhd_unit * dmar_find_matched_drhd_unit(struct pci_dev *dev);
extern int dmar_find_matched_atsr_unit(struct pci_dev *dev);
extern int dmar_enable_qi(struct intel_iommu *iommu);
extern void dmar_disable_qi(struct intel_iommu *iommu);
@ -757,17 +732,12 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,
void *data), void *data);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
struct dmar_domain *find_domain(struct device *dev);
struct device_domain_info *get_domain_info(struct device *dev);
struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn);
#ifdef CONFIG_INTEL_IOMMU_SVM
extern void intel_svm_check(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct iommu_gpasid_bind_data *data);
int intel_svm_unbind_gpasid(struct device *dev, u32 pasid);
struct iommu_sva *intel_svm_bind(struct device *dev, struct mm_struct *mm,
void *drvdata);
void intel_svm_unbind(struct iommu_sva *handle);
@ -795,7 +765,6 @@ struct intel_svm {
unsigned int flags;
u32 pasid;
int gpasid; /* In case that guest PASID is different from host PASID */
struct list_head devs;
};
#else
@ -813,6 +782,8 @@ bool context_present(struct context_entry *context);
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
u8 devfn, int alloc);
extern const struct iommu_ops intel_iommu_ops;
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);

View File

@ -25,17 +25,5 @@
* do such IOTLB flushes automatically.
*/
#define SVM_FLAG_SUPERVISOR_MODE BIT(0)
/*
* The SVM_FLAG_GUEST_MODE flag is used when a PASID bind is for guest
* processes. Compared to the host bind, the primary differences are:
* 1. mm life cycle management
* 2. fault reporting
*/
#define SVM_FLAG_GUEST_MODE BIT(1)
/*
* The SVM_FLAG_GUEST_PASID flag is used when a guest has its own PASID space,
* which requires guest and host PASID translation at both directions.
*/
#define SVM_FLAG_GUEST_PASID BIT(2)
#endif /* __INTEL_SVM_H__ */

View File

@ -37,6 +37,7 @@ struct iommu_group;
struct bus_type;
struct device;
struct iommu_domain;
struct iommu_domain_ops;
struct notifier_block;
struct iommu_sva;
struct iommu_fault_event;
@ -88,7 +89,7 @@ struct iommu_domain_geometry {
struct iommu_domain {
unsigned type;
const struct iommu_ops *ops;
const struct iommu_domain_ops *ops;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
iommu_fault_handler_t handler;
void *handler_token;
@ -144,7 +145,6 @@ struct iommu_resv_region {
/**
* enum iommu_dev_features - Per device IOMMU features
* @IOMMU_DEV_FEAT_AUX: Auxiliary domain feature
* @IOMMU_DEV_FEAT_SVA: Shared Virtual Addresses
* @IOMMU_DEV_FEAT_IOPF: I/O Page Faults such as PRI or Stall. Generally
* enabling %IOMMU_DEV_FEAT_SVA requires
@ -157,7 +157,6 @@ struct iommu_resv_region {
* iommu_dev_has_feature(), and enable it using iommu_dev_enable_feature().
*/
enum iommu_dev_features {
IOMMU_DEV_FEAT_AUX,
IOMMU_DEV_FEAT_SVA,
IOMMU_DEV_FEAT_IOPF,
};
@ -194,9 +193,75 @@ struct iommu_iotlb_gather {
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
* @domain_alloc: allocate iommu domain
* @domain_free: free iommu domain
* @attach_dev: attach device to an iommu domain
* @detach_dev: detach device from an iommu domain
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
* group and attached to the groups domain
* @device_group: find iommu group for a particular device
* @get_resv_regions: Request list of reserved regions for a device
* @put_resv_regions: Free list of reserved regions for a device
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
* @dev_has/enable/disable_feat: per device entries to check/enable/disable
* iommu specific features.
* @dev_feat_enabled: check enabled feature
* @sva_bind: Bind process address space to device
* @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
* @def_domain_type: device default domain type, return value:
* - IOMMU_DOMAIN_IDENTITY: must use an identity domain
* - IOMMU_DOMAIN_DMA: must use a dma domain
* - 0: use the default setting
* @default_domain_ops: the default ops for domains
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
void (*probe_finalize)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
void (*put_resv_regions)(struct device *dev, struct list_head *list);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
bool (*is_attach_deferred)(struct device *dev);
/* Per device IOMMU features */
bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
void *drvdata);
void (*sva_unbind)(struct iommu_sva *handle);
u32 (*sva_get_pasid)(struct iommu_sva *handle);
int (*page_response)(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg);
int (*def_domain_type)(struct device *dev);
const struct iommu_domain_ops *default_domain_ops;
unsigned long pgsize_bitmap;
struct module *owner;
};
/**
* struct iommu_domain_ops - domain specific operations
* @attach_dev: attach an iommu domain to a device
* @detach_dev: detach an iommu domain from a device
* @map: map a physically contiguous memory region to an iommu domain
* @map_pages: map a physically contiguous set of pages of the same size to
* an iommu domain.
@ -207,111 +272,39 @@ struct iommu_iotlb_gather {
* @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
* queue
* @iova_to_phys: translate iova to physical address
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
* group and attached to the groups domain
* @device_group: find iommu group for a particular device
* @enable_nesting: Enable nesting
* @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*)
* @get_resv_regions: Request list of reserved regions for a device
* @put_resv_regions: Free list of reserved regions for a device
* @apply_resv_region: Temporary helper call-back for iova reserved ranges
* @of_xlate: add OF master IDs to iommu grouping
* @is_attach_deferred: Check if domain attach should be deferred from iommu
* driver init to device driver init (default no)
* @dev_has/enable/disable_feat: per device entries to check/enable/disable
* iommu specific features.
* @dev_feat_enabled: check enabled feature
* @aux_attach/detach_dev: aux-domain specific attach/detach entries.
* @aux_get_pasid: get the pasid given an aux-domain
* @sva_bind: Bind process address space to device
* @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
* @cache_invalidate: invalidate translation caches
* @sva_bind_gpasid: bind guest pasid and mm
* @sva_unbind_gpasid: unbind guest pasid and mm
* @def_domain_type: device default domain type, return value:
* - IOMMU_DOMAIN_IDENTITY: must use an identity domain
* - IOMMU_DOMAIN_DMA: must use a dma domain
* - 0: use the default setting
* @pgsize_bitmap: bitmap of all possible supported page sizes
* @owner: Driver module providing these ops
* @free: Release the domain after use.
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
void (*domain_free)(struct iommu_domain *);
struct iommu_domain_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather);
size_t size, struct iommu_iotlb_gather *iotlb_gather);
size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova,
size_t size);
void (*iotlb_sync)(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
void (*probe_finalize)(struct device *dev);
struct iommu_group *(*device_group)(struct device *dev);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
dma_addr_t iova);
int (*enable_nesting)(struct iommu_domain *domain);
int (*set_pgtable_quirks)(struct iommu_domain *domain,
unsigned long quirks);
/* Request/Free a list of reserved regions for a device */
void (*get_resv_regions)(struct device *dev, struct list_head *list);
void (*put_resv_regions)(struct device *dev, struct list_head *list);
void (*apply_resv_region)(struct device *dev,
struct iommu_domain *domain,
struct iommu_resv_region *region);
int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
/* Per device IOMMU features */
bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
/* Aux-domain specific attach/detach entries */
int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,
void *drvdata);
void (*sva_unbind)(struct iommu_sva *handle);
u32 (*sva_get_pasid)(struct iommu_sva *handle);
int (*page_response)(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg);
int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
struct iommu_cache_invalidate_info *inv_info);
int (*sva_bind_gpasid)(struct iommu_domain *domain,
struct device *dev, struct iommu_gpasid_bind_data *data);
int (*sva_unbind_gpasid)(struct device *dev, u32 pasid);
int (*def_domain_type)(struct device *dev);
unsigned long pgsize_bitmap;
struct module *owner;
void (*free)(struct iommu_domain *domain);
};
/**
@ -403,6 +396,17 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
};
}
static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
{
/*
* Assume that valid ops must be installed if iommu_probe_device()
* has succeeded. The device ops are essentially for internal use
* within the IOMMU subsystem itself, so we should be able to trust
* ourselves not to misuse the helper.
*/
return dev->iommu->iommu_dev->ops;
}
#define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
#define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
#define IOMMU_GROUP_NOTIFY_BIND_DRIVER 3 /* Pre Driver bind */
@ -421,14 +425,6 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
extern int iommu_uapi_cache_invalidate(struct iommu_domain *domain,
struct device *dev,
void __user *uinfo);
extern int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, void __user *udata);
extern int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, void __user *udata);
extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
@ -672,9 +668,6 @@ void iommu_release_device(struct device *dev);
int iommu_dev_enable_feature(struct device *dev, enum iommu_dev_features f);
int iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features f);
bool iommu_dev_feature_enabled(struct device *dev, enum iommu_dev_features f);
int iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev);
void iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev);
int iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev);
struct iommu_sva *iommu_sva_bind_device(struct device *dev,
struct mm_struct *mm,
@ -1019,23 +1012,6 @@ iommu_dev_disable_feature(struct device *dev, enum iommu_dev_features feat)
return -ENODEV;
}
static inline int
iommu_aux_attach_device(struct iommu_domain *domain, struct device *dev)
{
return -ENODEV;
}
static inline void
iommu_aux_detach_device(struct iommu_domain *domain, struct device *dev)
{
}
static inline int
iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
{
return -ENODEV;
}
static inline struct iommu_sva *
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm, void *drvdata)
{
@ -1051,33 +1027,6 @@ static inline u32 iommu_sva_get_pasid(struct iommu_sva *handle)
return IOMMU_PASID_INVALID;
}
static inline int
iommu_uapi_cache_invalidate(struct iommu_domain *domain,
struct device *dev,
struct iommu_cache_invalidate_info *inv_info)
{
return -ENODEV;
}
static inline int iommu_uapi_sva_bind_gpasid(struct iommu_domain *domain,
struct device *dev, void __user *udata)
{
return -ENODEV;
}
static inline int iommu_uapi_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev, void __user *udata)
{
return -ENODEV;
}
static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
struct device *dev,
ioasid_t pasid)
{
return -ENODEV;
}
static inline struct iommu_fwspec *dev_iommu_fwspec_get(struct device *dev)
{
return NULL;

View File

@ -21,18 +21,8 @@ struct iova {
unsigned long pfn_lo; /* Lowest allocated pfn */
};
struct iova_magazine;
struct iova_cpu_rcache;
#define IOVA_RANGE_CACHE_MAX_SIZE 6 /* log of max cached IOVA range size (in pages) */
#define MAX_GLOBAL_MAGS 32 /* magazines per bin */
struct iova_rcache {
spinlock_t lock;
unsigned long depot_size;
struct iova_magazine *depot[MAX_GLOBAL_MAGS];
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
struct iova_rcache;
/* holds all the iova translations for a domain */
struct iova_domain {
@ -46,7 +36,7 @@ struct iova_domain {
unsigned long max32_alloc_size; /* Size of last failed allocation */
struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
struct iova_rcache *rcaches;
struct hlist_node cpuhp_dead;
};
@ -102,6 +92,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
int iova_domain_init_rcaches(struct iova_domain *iovad);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
#else

View File

@ -158,185 +158,4 @@ struct iommu_page_response {
__u32 code;
};
/* defines the granularity of the invalidation */
enum iommu_inv_granularity {
IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
};
/**
* struct iommu_inv_addr_info - Address Selective Invalidation Structure
*
* @flags: indicates the granularity of the address-selective invalidation
* - If the PASID bit is set, the @pasid field is populated and the invalidation
* relates to cache entries tagged with this PASID and matching the address
* range.
* - If ARCHID bit is set, @archid is populated and the invalidation relates
* to cache entries tagged with this architecture specific ID and matching
* the address range.
* - Both PASID and ARCHID can be set as they may tag different caches.
* - If neither PASID or ARCHID is set, global addr invalidation applies.
* - The LEAF flag indicates whether only the leaf PTE caching needs to be
* invalidated and other paging structure caches can be preserved.
* @pasid: process address space ID
* @archid: architecture-specific ID
* @addr: first stage/level input address
* @granule_size: page/block size of the mapping in bytes
* @nb_granules: number of contiguous granules to be invalidated
*/
struct iommu_inv_addr_info {
#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
__u32 flags;
__u32 archid;
__u64 pasid;
__u64 addr;
__u64 granule_size;
__u64 nb_granules;
};
/**
* struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
*
* @flags: indicates the granularity of the PASID-selective invalidation
* - If the PASID bit is set, the @pasid field is populated and the invalidation
* relates to cache entries tagged with this PASID and matching the address
* range.
* - If the ARCHID bit is set, the @archid is populated and the invalidation
* relates to cache entries tagged with this architecture specific ID and
* matching the address range.
* - Both PASID and ARCHID can be set as they may tag different caches.
* - At least one of PASID or ARCHID must be set.
* @pasid: process address space ID
* @archid: architecture-specific ID
*/
struct iommu_inv_pasid_info {
#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
__u32 flags;
__u32 archid;
__u64 pasid;
};
/**
* struct iommu_cache_invalidate_info - First level/stage invalidation
* information
* @argsz: User filled size of this data
* @version: API version of this structure
* @cache: bitfield that allows to select which caches to invalidate
* @granularity: defines the lowest granularity used for the invalidation:
* domain > PASID > addr
* @padding: reserved for future use (should be zero)
* @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
* @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
*
* Not all the combinations of cache/granularity are valid:
*
* +--------------+---------------+---------------+---------------+
* | type / | DEV_IOTLB | IOTLB | PASID |
* | granularity | | | cache |
* +==============+===============+===============+===============+
* | DOMAIN | N/A | Y | Y |
* +--------------+---------------+---------------+---------------+
* | PASID | Y | Y | Y |
* +--------------+---------------+---------------+---------------+
* | ADDR | Y | Y | N/A |
* +--------------+---------------+---------------+---------------+
*
* Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
* @version and @cache.
*
* If multiple cache types are invalidated simultaneously, they all
* must support the used granularity.
*/
struct iommu_cache_invalidate_info {
__u32 argsz;
#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
__u32 version;
/* IOMMU paging structure cache */
#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
#define IOMMU_CACHE_INV_TYPE_NR (3)
__u8 cache;
__u8 granularity;
__u8 padding[6];
union {
struct iommu_inv_pasid_info pasid_info;
struct iommu_inv_addr_info addr_info;
} granu;
};
/**
* struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
* SVA binding.
*
* @flags: VT-d PASID table entry attributes
* @pat: Page attribute table data to compute effective memory type
* @emt: Extended memory type
*
* Only guest vIOMMU selectable and effective options are passed down to
* the host IOMMU.
*/
struct iommu_gpasid_bind_data_vtd {
#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
#define IOMMU_SVA_VTD_GPASID_WPE (1 << 6) /* Write protect enable */
#define IOMMU_SVA_VTD_GPASID_LAST (1 << 7)
__u64 flags;
__u32 pat;
__u32 emt;
};
#define IOMMU_SVA_VTD_GPASID_MTS_MASK (IOMMU_SVA_VTD_GPASID_CD | \
IOMMU_SVA_VTD_GPASID_EMTE | \
IOMMU_SVA_VTD_GPASID_PCD | \
IOMMU_SVA_VTD_GPASID_PWT)
/**
* struct iommu_gpasid_bind_data - Information about device and guest PASID binding
* @argsz: User filled size of this data
* @version: Version of this data structure
* @format: PASID table entry format
* @flags: Additional information on guest bind request
* @gpgd: Guest page directory base of the guest mm to bind
* @hpasid: Process address space ID used for the guest mm in host IOMMU
* @gpasid: Process address space ID used for the guest mm in guest IOMMU
* @addr_width: Guest virtual address width
* @padding: Reserved for future use (should be zero)
* @vtd: Intel VT-d specific data
*
* Guest to host PASID mapping can be an identity or non-identity, where guest
* has its own PASID space. For non-identify mapping, guest to host PASID lookup
* is needed when VM programs guest PASID into an assigned device. VMM may
* trap such PASID programming then request host IOMMU driver to convert guest
* PASID to host PASID based on this bind data.
*/
struct iommu_gpasid_bind_data {
__u32 argsz;
#define IOMMU_GPASID_BIND_VERSION_1 1
__u32 version;
#define IOMMU_PASID_FORMAT_INTEL_VTD 1
#define IOMMU_PASID_FORMAT_LAST 2
__u32 format;
__u32 addr_width;
#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
__u64 flags;
__u64 gpgd;
__u64 hpasid;
__u64 gpasid;
__u8 padding[8];
/* Vendor specific data */
union {
struct iommu_gpasid_bind_data_vtd vtd;
} vendor;
};
#endif /* _UAPI_IOMMU_H */