2019-05-29 22:18:02 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-03-24 22:54:56 +08:00
|
|
|
/*
|
|
|
|
* Copyright © 2015 Intel Corporation.
|
|
|
|
*
|
|
|
|
* Authors: David Woodhouse <dwmw2@infradead.org>
|
|
|
|
*/
|
|
|
|
|
2015-09-09 18:40:47 +08:00
|
|
|
#include <linux/mmu_notifier.h>
|
|
|
|
#include <linux/sched.h>
|
2017-02-09 01:51:29 +08:00
|
|
|
#include <linux/sched/mm.h>
|
2015-09-09 18:40:47 +08:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/pci-ats.h>
|
2015-10-08 06:35:18 +08:00
|
|
|
#include <linux/dmar.h>
|
|
|
|
#include <linux/interrupt.h>
|
2018-08-18 06:44:47 +08:00
|
|
|
#include <linux/mm_types.h>
|
2021-06-10 10:00:58 +08:00
|
|
|
#include <linux/xarray.h>
|
2017-08-09 04:29:27 +08:00
|
|
|
#include <asm/page.h>
|
2020-09-16 00:30:13 +08:00
|
|
|
#include <asm/fpu/api.h>
|
2015-10-08 06:35:18 +08:00
|
|
|
|
2022-07-12 08:08:50 +08:00
|
|
|
#include "iommu.h"
|
2020-07-24 09:49:25 +08:00
|
|
|
#include "pasid.h"
|
2021-06-10 10:01:08 +08:00
|
|
|
#include "perf.h"
|
2022-10-31 08:59:17 +08:00
|
|
|
#include "../iommu-sva.h"
|
2022-07-12 08:08:44 +08:00
|
|
|
#include "trace.h"
|
2018-07-14 15:46:56 +08:00
|
|
|
|
2015-10-08 06:35:18 +08:00
|
|
|
static irqreturn_t prq_event_thread(int irq, void *d);
|
2015-09-09 18:40:47 +08:00
|
|
|
|
2021-06-10 10:00:58 +08:00
|
|
|
static DEFINE_XARRAY_ALLOC(pasid_private_array);
|
|
|
|
static int pasid_private_add(ioasid_t pasid, void *priv)
|
|
|
|
{
|
|
|
|
return xa_alloc(&pasid_private_array, &pasid, priv,
|
|
|
|
XA_LIMIT(pasid, pasid), GFP_ATOMIC);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pasid_private_remove(ioasid_t pasid)
|
|
|
|
{
|
|
|
|
xa_erase(&pasid_private_array, pasid);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *pasid_private_find(ioasid_t pasid)
|
|
|
|
{
|
|
|
|
return xa_load(&pasid_private_array, pasid);
|
|
|
|
}
|
|
|
|
|
2021-06-10 10:01:00 +08:00
|
|
|
static struct intel_svm_dev *
|
|
|
|
svm_lookup_device_by_dev(struct intel_svm *svm, struct device *dev)
|
|
|
|
{
|
|
|
|
struct intel_svm_dev *sdev = NULL, *t;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(t, &svm->devs, list) {
|
|
|
|
if (t->dev == dev) {
|
|
|
|
sdev = t;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return sdev;
|
|
|
|
}
|
|
|
|
|
2015-10-08 06:35:18 +08:00
|
|
|
int intel_svm_enable_prq(struct intel_iommu *iommu)
|
|
|
|
{
|
2021-06-10 10:01:02 +08:00
|
|
|
struct iopf_queue *iopfq;
|
2015-10-08 06:35:18 +08:00
|
|
|
struct page *pages;
|
|
|
|
int irq, ret;
|
|
|
|
|
|
|
|
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
|
|
|
|
if (!pages) {
|
|
|
|
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
|
|
|
|
iommu->name);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
iommu->prq = page_address(pages);
|
|
|
|
|
2023-01-31 15:37:39 +08:00
|
|
|
irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu);
|
2015-10-08 06:35:18 +08:00
|
|
|
if (irq <= 0) {
|
|
|
|
pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
|
|
|
|
iommu->name);
|
|
|
|
ret = -EINVAL;
|
2021-06-10 10:01:02 +08:00
|
|
|
goto free_prq;
|
2015-10-08 06:35:18 +08:00
|
|
|
}
|
|
|
|
iommu->pr_irq = irq;
|
|
|
|
|
2021-06-10 10:01:02 +08:00
|
|
|
snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
|
|
|
|
"dmar%d-iopfq", iommu->seq_id);
|
|
|
|
iopfq = iopf_queue_alloc(iommu->iopfq_name);
|
|
|
|
if (!iopfq) {
|
|
|
|
pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto free_hwirq;
|
|
|
|
}
|
|
|
|
iommu->iopf_queue = iopfq;
|
|
|
|
|
2015-10-08 06:35:18 +08:00
|
|
|
snprintf(iommu->prq_name, sizeof(iommu->prq_name), "dmar%d-prq", iommu->seq_id);
|
|
|
|
|
|
|
|
ret = request_threaded_irq(irq, NULL, prq_event_thread, IRQF_ONESHOT,
|
|
|
|
iommu->prq_name, iommu);
|
|
|
|
if (ret) {
|
|
|
|
pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n",
|
|
|
|
iommu->name);
|
2021-06-10 10:01:02 +08:00
|
|
|
goto free_iopfq;
|
2015-10-08 06:35:18 +08:00
|
|
|
}
|
|
|
|
dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
|
|
|
|
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
|
|
|
|
dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER);
|
|
|
|
|
2020-05-16 14:20:58 +08:00
|
|
|
init_completion(&iommu->prq_complete);
|
|
|
|
|
2015-10-08 06:35:18 +08:00
|
|
|
return 0;
|
2021-06-10 10:01:02 +08:00
|
|
|
|
|
|
|
free_iopfq:
|
|
|
|
iopf_queue_free(iommu->iopf_queue);
|
|
|
|
iommu->iopf_queue = NULL;
|
|
|
|
free_hwirq:
|
|
|
|
dmar_free_hwirq(irq);
|
|
|
|
iommu->pr_irq = 0;
|
|
|
|
free_prq:
|
|
|
|
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
|
|
|
|
iommu->prq = NULL;
|
|
|
|
|
|
|
|
return ret;
|
2015-10-08 06:35:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int intel_svm_finish_prq(struct intel_iommu *iommu)
|
|
|
|
{
|
|
|
|
dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL);
|
|
|
|
dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL);
|
|
|
|
dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL);
|
|
|
|
|
2017-12-21 00:48:56 +08:00
|
|
|
if (iommu->pr_irq) {
|
|
|
|
free_irq(iommu->pr_irq, iommu);
|
|
|
|
dmar_free_hwirq(iommu->pr_irq);
|
|
|
|
iommu->pr_irq = 0;
|
|
|
|
}
|
2015-10-08 06:35:18 +08:00
|
|
|
|
2021-06-10 10:01:02 +08:00
|
|
|
if (iommu->iopf_queue) {
|
|
|
|
iopf_queue_free(iommu->iopf_queue);
|
|
|
|
iommu->iopf_queue = NULL;
|
|
|
|
}
|
|
|
|
|
2015-10-08 06:35:18 +08:00
|
|
|
free_pages((unsigned long)iommu->prq, PRQ_ORDER);
|
|
|
|
iommu->prq = NULL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-01-02 08:18:03 +08:00
|
|
|
void intel_svm_check(struct intel_iommu *iommu)
|
|
|
|
{
|
|
|
|
if (!pasid_supported(iommu))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_GBPAGES) &&
|
|
|
|
!cap_fl1gp_support(iommu->cap)) {
|
|
|
|
pr_err("%s SVM disabled, incompatible 1GB page capability\n",
|
|
|
|
iommu->name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cpu_feature_enabled(X86_FEATURE_LA57) &&
|
2022-09-26 21:15:27 +08:00
|
|
|
!cap_fl5lp_support(iommu->cap)) {
|
2020-01-02 08:18:03 +08:00
|
|
|
pr_err("%s SVM disabled, incompatible paging mode\n",
|
|
|
|
iommu->name);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
iommu->flags |= VTD_FLAG_SVM_CAPABLE;
|
|
|
|
}
|
|
|
|
|
2020-12-31 08:53:20 +08:00
|
|
|
static void __flush_svm_range_dev(struct intel_svm *svm,
|
|
|
|
struct intel_svm_dev *sdev,
|
|
|
|
unsigned long address,
|
|
|
|
unsigned long pages, int ih)
|
2015-09-09 18:40:47 +08:00
|
|
|
{
|
2022-03-01 10:01:52 +08:00
|
|
|
struct device_domain_info *info = dev_iommu_priv_get(sdev->dev);
|
2021-01-14 16:50:21 +08:00
|
|
|
|
|
|
|
if (WARN_ON(!pages))
|
|
|
|
return;
|
|
|
|
|
|
|
|
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, address, pages, ih);
|
2022-12-01 12:01:24 +08:00
|
|
|
if (info->ats_enabled) {
|
2021-01-14 16:50:21 +08:00
|
|
|
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
|
|
|
|
svm->pasid, sdev->qdep, address,
|
|
|
|
order_base_2(pages));
|
2022-12-01 12:01:24 +08:00
|
|
|
quirk_extra_dev_tlb_flush(info, address, order_base_2(pages),
|
|
|
|
svm->pasid, sdev->qdep);
|
|
|
|
}
|
2015-09-09 18:40:47 +08:00
|
|
|
}
|
|
|
|
|
2020-12-31 08:53:20 +08:00
|
|
|
static void intel_flush_svm_range_dev(struct intel_svm *svm,
|
|
|
|
struct intel_svm_dev *sdev,
|
|
|
|
unsigned long address,
|
|
|
|
unsigned long pages, int ih)
|
|
|
|
{
|
|
|
|
unsigned long shift = ilog2(__roundup_pow_of_two(pages));
|
|
|
|
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
|
|
|
|
unsigned long start = ALIGN_DOWN(address, align);
|
|
|
|
unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
|
|
|
|
|
|
|
|
while (start < end) {
|
|
|
|
__flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
|
|
|
|
start += align;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-09 18:40:47 +08:00
|
|
|
static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
|
2019-08-26 23:53:29 +08:00
|
|
|
unsigned long pages, int ih)
|
2015-09-09 18:40:47 +08:00
|
|
|
{
|
|
|
|
struct intel_svm_dev *sdev;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(sdev, &svm->devs, list)
|
2019-08-26 23:53:29 +08:00
|
|
|
intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
|
2015-09-09 18:40:47 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2023-11-22 11:26:07 +08:00
|
|
|
static void intel_flush_svm_all(struct intel_svm *svm)
|
|
|
|
{
|
|
|
|
struct device_domain_info *info;
|
|
|
|
struct intel_svm_dev *sdev;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
list_for_each_entry_rcu(sdev, &svm->devs, list) {
|
|
|
|
info = dev_iommu_priv_get(sdev->dev);
|
|
|
|
|
|
|
|
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
|
|
|
|
if (info->ats_enabled) {
|
|
|
|
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
|
|
|
|
svm->pasid, sdev->qdep,
|
|
|
|
0, 64 - VTD_PAGE_SHIFT);
|
|
|
|
quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
|
|
|
|
svm->pasid, sdev->qdep);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
|
2015-09-09 18:40:47 +08:00
|
|
|
/* Pages have been freed at this point */
|
2023-07-25 21:42:07 +08:00
|
|
|
static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
|
|
|
|
struct mm_struct *mm,
|
|
|
|
unsigned long start, unsigned long end)
|
2015-09-09 18:40:47 +08:00
|
|
|
{
|
|
|
|
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
|
|
|
|
2023-11-22 11:26:07 +08:00
|
|
|
if (start == 0 && end == -1UL) {
|
|
|
|
intel_flush_svm_all(svm);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-09-09 18:40:47 +08:00
|
|
|
intel_flush_svm_range(svm, start,
|
2019-08-26 23:53:29 +08:00
|
|
|
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
|
2015-09-09 18:40:47 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
|
|
|
{
|
|
|
|
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
Holding mm_users works OK for graphics, which was the first user of SVM
with VT-d. However, it works less well for other devices, where we actually
do a mmap() from the file descriptor to which the SVM PASID state is tied.
In this case on process exit we end up with a recursive reference count:
- The MM remains alive until the file is closed and the driver's release()
call ends up unbinding the PASID.
- The VMA corresponding to the mmap() remains intact until the MM is
destroyed.
- Thus the file isn't closed, even when exit_files() runs, because the
VMA is still holding a reference to it. And the MM remains alive…
To address this issue, we *stop* holding mm_users while the PASID is bound.
We already hold mm_count by virtue of the MMU notifier, and that can be
made to be sufficient.
It means that for a period during process exit, the fun part of mmput()
has happened and exit_mmap() has been called so the MM is basically
defunct. But the PGD still exists and the PASID is still bound to it.
During this period, we have to be very careful — exit_mmap() doesn't use
mm->mmap_sem because it doesn't expect anyone else to be touching the MM
(quite reasonably, since mm_users is zero). So we also need to fix the
fault handler to just report failure if mm_users is already zero, and to
temporarily bump mm_users while handling any faults.
Additionally, exit_mmap() calls mmu_notifier_release() *before* it tears
down the page tables, which is too early for us to flush the IOTLB for
this PASID. And __mmu_notifier_release() removes every notifier from the
list, so when exit_mmap() finally *does* tear down the mappings and
clear the page tables, we don't get notified. So we work around this by
clearing the PASID table entry in our MMU notifier release() callback.
That way, the hardware *can't* get any pages back from the page tables
before they get cleared.
Hardware designers have confirmed that the resulting 'PASID not present'
faults should be handled just as gracefully as 'page not present' faults,
the important criterion being that they don't perturb the operation for
any *other* PASID in the system.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@vger.kernel.org
2016-01-13 03:18:06 +08:00
|
|
|
struct intel_svm_dev *sdev;
|
2015-09-09 18:40:47 +08:00
|
|
|
|
iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
Holding mm_users works OK for graphics, which was the first user of SVM
with VT-d. However, it works less well for other devices, where we actually
do a mmap() from the file descriptor to which the SVM PASID state is tied.
In this case on process exit we end up with a recursive reference count:
- The MM remains alive until the file is closed and the driver's release()
call ends up unbinding the PASID.
- The VMA corresponding to the mmap() remains intact until the MM is
destroyed.
- Thus the file isn't closed, even when exit_files() runs, because the
VMA is still holding a reference to it. And the MM remains alive…
To address this issue, we *stop* holding mm_users while the PASID is bound.
We already hold mm_count by virtue of the MMU notifier, and that can be
made to be sufficient.
It means that for a period during process exit, the fun part of mmput()
has happened and exit_mmap() has been called so the MM is basically
defunct. But the PGD still exists and the PASID is still bound to it.
During this period, we have to be very careful — exit_mmap() doesn't use
mm->mmap_sem because it doesn't expect anyone else to be touching the MM
(quite reasonably, since mm_users is zero). So we also need to fix the
fault handler to just report failure if mm_users is already zero, and to
temporarily bump mm_users while handling any faults.
Additionally, exit_mmap() calls mmu_notifier_release() *before* it tears
down the page tables, which is too early for us to flush the IOTLB for
this PASID. And __mmu_notifier_release() removes every notifier from the
list, so when exit_mmap() finally *does* tear down the mappings and
clear the page tables, we don't get notified. So we work around this by
clearing the PASID table entry in our MMU notifier release() callback.
That way, the hardware *can't* get any pages back from the page tables
before they get cleared.
Hardware designers have confirmed that the resulting 'PASID not present'
faults should be handled just as gracefully as 'page not present' faults,
the important criterion being that they don't perturb the operation for
any *other* PASID in the system.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@vger.kernel.org
2016-01-13 03:18:06 +08:00
|
|
|
/* This might end up being called from exit_mmap(), *before* the page
|
|
|
|
* tables are cleared. And __mmu_notifier_release() will delete us from
|
|
|
|
* the list of notifiers so that our invalidate_range() callback doesn't
|
|
|
|
* get called when the page tables are cleared. So we need to protect
|
|
|
|
* against hardware accessing those page tables.
|
|
|
|
*
|
|
|
|
* We do it by clearing the entry in the PASID table and then flushing
|
|
|
|
* the IOTLB and the PASID table caches. This might upset hardware;
|
|
|
|
* perhaps we'll want to point the PASID to a dummy PGD (like the zero
|
|
|
|
* page) so that we end up taking a fault that the hardware really
|
|
|
|
* *has* to handle gracefully without affecting other processes.
|
|
|
|
*/
|
|
|
|
rcu_read_lock();
|
2020-05-16 14:20:59 +08:00
|
|
|
list_for_each_entry_rcu(sdev, &svm->devs, list)
|
2021-01-07 00:03:55 +08:00
|
|
|
intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
|
2020-05-16 14:20:57 +08:00
|
|
|
svm->pasid, true);
|
iommu/vt-d: Fix mm refcounting to hold mm_count not mm_users
Holding mm_users works OK for graphics, which was the first user of SVM
with VT-d. However, it works less well for other devices, where we actually
do a mmap() from the file descriptor to which the SVM PASID state is tied.
In this case on process exit we end up with a recursive reference count:
- The MM remains alive until the file is closed and the driver's release()
call ends up unbinding the PASID.
- The VMA corresponding to the mmap() remains intact until the MM is
destroyed.
- Thus the file isn't closed, even when exit_files() runs, because the
VMA is still holding a reference to it. And the MM remains alive…
To address this issue, we *stop* holding mm_users while the PASID is bound.
We already hold mm_count by virtue of the MMU notifier, and that can be
made to be sufficient.
It means that for a period during process exit, the fun part of mmput()
has happened and exit_mmap() has been called so the MM is basically
defunct. But the PGD still exists and the PASID is still bound to it.
During this period, we have to be very careful — exit_mmap() doesn't use
mm->mmap_sem because it doesn't expect anyone else to be touching the MM
(quite reasonably, since mm_users is zero). So we also need to fix the
fault handler to just report failure if mm_users is already zero, and to
temporarily bump mm_users while handling any faults.
Additionally, exit_mmap() calls mmu_notifier_release() *before* it tears
down the page tables, which is too early for us to flush the IOTLB for
this PASID. And __mmu_notifier_release() removes every notifier from the
list, so when exit_mmap() finally *does* tear down the mappings and
clear the page tables, we don't get notified. So we work around this by
clearing the PASID table entry in our MMU notifier release() callback.
That way, the hardware *can't* get any pages back from the page tables
before they get cleared.
Hardware designers have confirmed that the resulting 'PASID not present'
faults should be handled just as gracefully as 'page not present' faults,
the important criterion being that they don't perturb the operation for
any *other* PASID in the system.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Cc: stable@vger.kernel.org
2016-01-13 03:18:06 +08:00
|
|
|
rcu_read_unlock();
|
2015-09-09 18:40:47 +08:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct mmu_notifier_ops intel_mmuops = {
|
|
|
|
.release = intel_mm_release,
|
2023-07-25 21:42:07 +08:00
|
|
|
.arch_invalidate_secondary_tlbs = intel_arch_invalidate_secondary_tlbs,
|
2015-09-09 18:40:47 +08:00
|
|
|
};
|
|
|
|
|
2020-07-24 09:49:22 +08:00
|
|
|
static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid,
|
|
|
|
struct intel_svm **rsvm,
|
|
|
|
struct intel_svm_dev **rsdev)
|
|
|
|
{
|
2021-06-10 10:01:00 +08:00
|
|
|
struct intel_svm_dev *sdev = NULL;
|
2020-07-24 09:49:22 +08:00
|
|
|
struct intel_svm *svm;
|
|
|
|
|
2023-03-23 04:08:02 +08:00
|
|
|
if (pasid == IOMMU_PASID_INVALID || pasid >= PASID_MAX)
|
2020-07-24 09:49:22 +08:00
|
|
|
return -EINVAL;
|
|
|
|
|
2021-06-10 10:00:58 +08:00
|
|
|
svm = pasid_private_find(pasid);
|
2020-07-24 09:49:22 +08:00
|
|
|
if (IS_ERR(svm))
|
|
|
|
return PTR_ERR(svm);
|
|
|
|
|
|
|
|
if (!svm)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we found svm for the PASID, there must be at least one device
|
|
|
|
* bond.
|
|
|
|
*/
|
|
|
|
if (WARN_ON(list_empty(&svm->devs)))
|
|
|
|
return -EINVAL;
|
2021-06-10 10:01:00 +08:00
|
|
|
sdev = svm_lookup_device_by_dev(svm, dev);
|
2020-07-24 09:49:22 +08:00
|
|
|
|
|
|
|
out:
|
|
|
|
*rsvm = svm;
|
|
|
|
*rsdev = sdev;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-01-31 15:37:32 +08:00
|
|
|
static int intel_svm_bind_mm(struct intel_iommu *iommu, struct device *dev,
|
2023-10-27 08:05:21 +08:00
|
|
|
struct iommu_domain *domain, ioasid_t pasid)
|
2021-06-10 10:00:59 +08:00
|
|
|
{
|
2022-03-01 10:01:52 +08:00
|
|
|
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
2023-10-27 08:05:21 +08:00
|
|
|
struct mm_struct *mm = domain->mm;
|
2021-06-10 10:00:59 +08:00
|
|
|
struct intel_svm_dev *sdev;
|
|
|
|
struct intel_svm *svm;
|
2022-07-12 08:08:56 +08:00
|
|
|
unsigned long sflags;
|
2021-06-10 10:00:59 +08:00
|
|
|
int ret = 0;
|
2015-09-09 18:40:47 +08:00
|
|
|
|
2023-10-27 08:05:21 +08:00
|
|
|
svm = pasid_private_find(pasid);
|
2021-06-10 10:00:59 +08:00
|
|
|
if (!svm) {
|
|
|
|
svm = kzalloc(sizeof(*svm), GFP_KERNEL);
|
|
|
|
if (!svm)
|
2023-01-31 15:37:32 +08:00
|
|
|
return -ENOMEM;
|
2015-10-15 22:52:15 +08:00
|
|
|
|
2023-10-27 08:05:21 +08:00
|
|
|
svm->pasid = pasid;
|
2021-06-10 10:00:59 +08:00
|
|
|
svm->mm = mm;
|
|
|
|
INIT_LIST_HEAD_RCU(&svm->devs);
|
2015-09-09 18:40:47 +08:00
|
|
|
|
2022-10-31 08:59:07 +08:00
|
|
|
svm->notifier.ops = &intel_mmuops;
|
|
|
|
ret = mmu_notifier_register(&svm->notifier, mm);
|
|
|
|
if (ret) {
|
|
|
|
kfree(svm);
|
2023-01-31 15:37:32 +08:00
|
|
|
return ret;
|
2021-03-23 09:05:58 +08:00
|
|
|
}
|
2015-09-09 18:40:47 +08:00
|
|
|
|
2021-06-10 10:00:59 +08:00
|
|
|
ret = pasid_private_add(svm->pasid, svm);
|
|
|
|
if (ret) {
|
2022-10-31 08:59:07 +08:00
|
|
|
mmu_notifier_unregister(&svm->notifier, mm);
|
2021-06-10 10:00:59 +08:00
|
|
|
kfree(svm);
|
2023-01-31 15:37:32 +08:00
|
|
|
return ret;
|
2015-09-09 18:40:47 +08:00
|
|
|
}
|
2021-06-10 10:00:59 +08:00
|
|
|
}
|
2021-03-23 09:05:58 +08:00
|
|
|
|
2015-09-09 18:40:47 +08:00
|
|
|
sdev = kzalloc(sizeof(*sdev), GFP_KERNEL);
|
|
|
|
if (!sdev) {
|
|
|
|
ret = -ENOMEM;
|
2021-06-10 10:00:59 +08:00
|
|
|
goto free_svm;
|
2015-09-09 18:40:47 +08:00
|
|
|
}
|
2021-06-10 10:00:59 +08:00
|
|
|
|
2015-09-09 18:40:47 +08:00
|
|
|
sdev->dev = dev;
|
2021-01-07 00:03:55 +08:00
|
|
|
sdev->iommu = iommu;
|
2019-03-25 09:30:29 +08:00
|
|
|
sdev->did = FLPT_DEFAULT_DID;
|
|
|
|
sdev->sid = PCI_DEVID(info->bus, info->devfn);
|
2021-06-10 10:00:59 +08:00
|
|
|
init_rcu_head(&sdev->rcu);
|
2019-03-25 09:30:29 +08:00
|
|
|
if (info->ats_enabled) {
|
|
|
|
sdev->qdep = info->ats_qdep;
|
|
|
|
if (sdev->qdep >= QI_DEV_EIOTLB_MAX_INVS)
|
|
|
|
sdev->qdep = 0;
|
|
|
|
}
|
|
|
|
|
2021-06-10 10:00:59 +08:00
|
|
|
/* Setup the pasid table: */
|
2022-10-31 08:59:07 +08:00
|
|
|
sflags = cpu_feature_enabled(X86_FEATURE_LA57) ? PASID_FLAG_FL5LP : 0;
|
2023-10-27 08:05:21 +08:00
|
|
|
ret = intel_pasid_setup_first_level(iommu, dev, mm->pgd, pasid,
|
2021-06-10 10:00:59 +08:00
|
|
|
FLPT_DEFAULT_DID, sflags);
|
|
|
|
if (ret)
|
|
|
|
goto free_sdev;
|
2018-03-16 12:31:36 +08:00
|
|
|
|
2015-09-09 18:40:47 +08:00
|
|
|
list_add_rcu(&sdev->list, &svm->devs);
|
2023-01-31 15:37:31 +08:00
|
|
|
|
2023-01-31 15:37:32 +08:00
|
|
|
return 0;
|
2021-06-10 10:00:59 +08:00
|
|
|
|
|
|
|
free_sdev:
|
|
|
|
kfree(sdev);
|
|
|
|
free_svm:
|
|
|
|
if (list_empty(&svm->devs)) {
|
2022-10-31 08:59:07 +08:00
|
|
|
mmu_notifier_unregister(&svm->notifier, mm);
|
2023-10-27 08:05:21 +08:00
|
|
|
pasid_private_remove(pasid);
|
2021-06-10 10:00:59 +08:00
|
|
|
kfree(svm);
|
|
|
|
}
|
|
|
|
|
2023-01-31 15:37:32 +08:00
|
|
|
return ret;
|
2015-09-09 18:40:47 +08:00
|
|
|
}
|
|
|
|
|
2023-08-09 20:47:57 +08:00
|
|
|
void intel_svm_remove_dev_pasid(struct device *dev, u32 pasid)
|
2015-09-09 18:40:47 +08:00
|
|
|
{
|
|
|
|
struct intel_svm_dev *sdev;
|
|
|
|
struct intel_svm *svm;
|
2021-06-10 10:00:59 +08:00
|
|
|
struct mm_struct *mm;
|
2015-09-09 18:40:47 +08:00
|
|
|
|
2023-08-09 20:47:57 +08:00
|
|
|
if (pasid_to_svm_sdev(dev, pasid, &svm, &sdev))
|
|
|
|
return;
|
2021-06-10 10:00:59 +08:00
|
|
|
mm = svm->mm;
|
2020-01-02 08:18:08 +08:00
|
|
|
|
2020-07-24 09:49:22 +08:00
|
|
|
if (sdev) {
|
2023-01-31 15:37:31 +08:00
|
|
|
list_del_rcu(&sdev->list);
|
|
|
|
kfree_rcu(sdev, rcu);
|
|
|
|
|
|
|
|
if (list_empty(&svm->devs)) {
|
|
|
|
if (svm->notifier.ops)
|
|
|
|
mmu_notifier_unregister(&svm->notifier, mm);
|
|
|
|
pasid_private_remove(svm->pasid);
|
|
|
|
/*
|
|
|
|
* We mandate that no page faults may be outstanding
|
|
|
|
* for the PASID when intel_svm_unbind_mm() is called.
|
|
|
|
* If that is not obeyed, subtle errors will happen.
|
|
|
|
* Let's make them less subtle...
|
|
|
|
*/
|
|
|
|
memset(svm, 0x6b, sizeof(*svm));
|
|
|
|
kfree(svm);
|
2015-09-09 18:40:47 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-05-11 02:39:03 +08:00
|
|
|
|
2015-10-08 06:35:18 +08:00
|
|
|
/* Page request queue descriptor */
|
|
|
|
struct page_req_dsc {
|
2019-01-11 13:04:57 +08:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
u64 type:8;
|
|
|
|
u64 pasid_present:1;
|
|
|
|
u64 priv_data_present:1;
|
|
|
|
u64 rsvd:6;
|
|
|
|
u64 rid:16;
|
|
|
|
u64 pasid:20;
|
|
|
|
u64 exe_req:1;
|
|
|
|
u64 pm_req:1;
|
|
|
|
u64 rsvd2:10;
|
|
|
|
};
|
|
|
|
u64 qw_0;
|
|
|
|
};
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
u64 rd_req:1;
|
|
|
|
u64 wr_req:1;
|
|
|
|
u64 lpig:1;
|
|
|
|
u64 prg_index:9;
|
|
|
|
u64 addr:52;
|
|
|
|
};
|
|
|
|
u64 qw_1;
|
|
|
|
};
|
|
|
|
u64 priv_data[2];
|
2015-10-08 06:35:18 +08:00
|
|
|
};
|
|
|
|
|
2017-08-09 04:29:27 +08:00
|
|
|
static bool is_canonical_address(u64 addr)
|
|
|
|
{
|
|
|
|
int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
|
|
|
|
long saddr = (long) addr;
|
|
|
|
|
|
|
|
return (((saddr << shift) >> shift) == saddr);
|
|
|
|
}
|
|
|
|
|
2020-05-16 14:20:58 +08:00
|
|
|
/**
|
2023-08-09 20:47:58 +08:00
|
|
|
* intel_drain_pasid_prq - Drain page requests and responses for a pasid
|
2020-05-16 14:20:58 +08:00
|
|
|
* @dev: target device
|
|
|
|
* @pasid: pasid for draining
|
|
|
|
*
|
|
|
|
* Drain all pending page requests and responses related to @pasid in both
|
|
|
|
* software and hardware. This is supposed to be called after the device
|
|
|
|
* driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
|
|
|
|
* and DevTLB have been invalidated.
|
|
|
|
*
|
|
|
|
* It waits until all pending page requests for @pasid in the page fault
|
|
|
|
* queue are completed by the prq handling thread. Then follow the steps
|
|
|
|
* described in VT-d spec CH7.10 to drain all page requests and page
|
|
|
|
* responses pending in the hardware.
|
|
|
|
*/
|
2023-08-09 20:47:58 +08:00
|
|
|
void intel_drain_pasid_prq(struct device *dev, u32 pasid)
|
2020-05-16 14:20:58 +08:00
|
|
|
{
|
|
|
|
struct device_domain_info *info;
|
|
|
|
struct dmar_domain *domain;
|
|
|
|
struct intel_iommu *iommu;
|
|
|
|
struct qi_desc desc[3];
|
|
|
|
struct pci_dev *pdev;
|
|
|
|
int head, tail;
|
|
|
|
u16 sid, did;
|
|
|
|
int qdep;
|
|
|
|
|
2022-03-01 10:01:52 +08:00
|
|
|
info = dev_iommu_priv_get(dev);
|
2020-05-16 14:20:58 +08:00
|
|
|
if (WARN_ON(!info || !dev_is_pci(dev)))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!info->pri_enabled)
|
|
|
|
return;
|
|
|
|
|
|
|
|
iommu = info->iommu;
|
|
|
|
domain = info->domain;
|
|
|
|
pdev = to_pci_dev(dev);
|
|
|
|
sid = PCI_DEVID(info->bus, info->devfn);
|
2022-07-12 08:09:05 +08:00
|
|
|
did = domain_id_iommu(domain, iommu);
|
2020-05-16 14:20:58 +08:00
|
|
|
qdep = pci_ats_queue_depth(pdev);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check and wait until all pending page requests in the queue are
|
|
|
|
* handled by the prq handling thread.
|
|
|
|
*/
|
|
|
|
prq_retry:
|
|
|
|
reinit_completion(&iommu->prq_complete);
|
|
|
|
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
|
|
|
|
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
|
|
|
|
while (head != tail) {
|
|
|
|
struct page_req_dsc *req;
|
|
|
|
|
|
|
|
req = &iommu->prq[head / sizeof(*req)];
|
|
|
|
if (!req->pasid_present || req->pasid != pasid) {
|
|
|
|
head = (head + sizeof(*req)) & PRQ_RING_MASK;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
wait_for_completion(&iommu->prq_complete);
|
|
|
|
goto prq_retry;
|
|
|
|
}
|
|
|
|
|
2021-06-10 10:01:03 +08:00
|
|
|
iopf_queue_flush_dev(dev);
|
|
|
|
|
2020-05-16 14:20:58 +08:00
|
|
|
/*
|
|
|
|
* Perform steps described in VT-d spec CH7.10 to drain page
|
|
|
|
* requests and responses in hardware.
|
|
|
|
*/
|
|
|
|
memset(desc, 0, sizeof(desc));
|
|
|
|
desc[0].qw0 = QI_IWD_STATUS_DATA(QI_DONE) |
|
|
|
|
QI_IWD_FENCE |
|
|
|
|
QI_IWD_TYPE;
|
|
|
|
desc[1].qw0 = QI_EIOTLB_PASID(pasid) |
|
|
|
|
QI_EIOTLB_DID(did) |
|
|
|
|
QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
|
|
|
|
QI_EIOTLB_TYPE;
|
|
|
|
desc[2].qw0 = QI_DEV_EIOTLB_PASID(pasid) |
|
|
|
|
QI_DEV_EIOTLB_SID(sid) |
|
|
|
|
QI_DEV_EIOTLB_QDEP(qdep) |
|
|
|
|
QI_DEIOTLB_TYPE |
|
|
|
|
QI_DEV_IOTLB_PFSID(info->pfsid);
|
|
|
|
qi_retry:
|
|
|
|
reinit_completion(&iommu->prq_complete);
|
|
|
|
qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
|
|
|
|
if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
|
|
|
|
wait_for_completion(&iommu->prq_complete);
|
|
|
|
goto qi_retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-24 09:49:23 +08:00
|
|
|
static int prq_to_iommu_prot(struct page_req_dsc *req)
|
|
|
|
{
|
|
|
|
int prot = 0;
|
|
|
|
|
|
|
|
if (req->rd_req)
|
|
|
|
prot |= IOMMU_FAULT_PERM_READ;
|
|
|
|
if (req->wr_req)
|
|
|
|
prot |= IOMMU_FAULT_PERM_WRITE;
|
|
|
|
if (req->exe_req)
|
|
|
|
prot |= IOMMU_FAULT_PERM_EXEC;
|
|
|
|
if (req->pm_req)
|
|
|
|
prot |= IOMMU_FAULT_PERM_PRIV;
|
|
|
|
|
|
|
|
return prot;
|
|
|
|
}
|
|
|
|
|
2021-06-10 10:01:08 +08:00
|
|
|
static int intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
|
|
|
|
struct page_req_dsc *desc)
|
2020-07-24 09:49:23 +08:00
|
|
|
{
|
|
|
|
struct iommu_fault_event event;
|
|
|
|
|
|
|
|
if (!dev || !dev_is_pci(dev))
|
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/* Fill in event data for device specific processing */
|
|
|
|
memset(&event, 0, sizeof(struct iommu_fault_event));
|
|
|
|
event.fault.type = IOMMU_FAULT_PAGE_REQ;
|
2021-03-20 10:54:11 +08:00
|
|
|
event.fault.prm.addr = (u64)desc->addr << VTD_PAGE_SHIFT;
|
2020-07-24 09:49:23 +08:00
|
|
|
event.fault.prm.pasid = desc->pasid;
|
|
|
|
event.fault.prm.grpid = desc->prg_index;
|
|
|
|
event.fault.prm.perm = prq_to_iommu_prot(desc);
|
|
|
|
|
|
|
|
if (desc->lpig)
|
|
|
|
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
|
|
|
|
if (desc->pasid_present) {
|
|
|
|
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
|
|
|
event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
|
|
|
|
}
|
|
|
|
if (desc->priv_data_present) {
|
|
|
|
/*
|
|
|
|
* Set last page in group bit if private data is present,
|
|
|
|
* page response is required as it does for LPIG.
|
|
|
|
* iommu_report_device_fault() doesn't understand this vendor
|
|
|
|
* specific requirement thus we set last_page as a workaround.
|
|
|
|
*/
|
|
|
|
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
|
|
|
|
event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
|
2021-06-10 10:01:09 +08:00
|
|
|
event.fault.prm.private_data[0] = desc->priv_data[0];
|
|
|
|
event.fault.prm.private_data[1] = desc->priv_data[1];
|
2021-06-10 10:01:08 +08:00
|
|
|
} else if (dmar_latency_enabled(iommu, DMAR_LATENCY_PRQ)) {
|
|
|
|
/*
|
|
|
|
* If the private data fields are not used by hardware, use it
|
|
|
|
* to monitor the prq handle latency.
|
|
|
|
*/
|
|
|
|
event.fault.prm.private_data[0] = ktime_to_ns(ktime_get());
|
2020-07-24 09:49:23 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return iommu_report_device_fault(dev, &event);
|
|
|
|
}
|
|
|
|
|
2021-06-10 10:01:01 +08:00
|
|
|
static void handle_bad_prq_event(struct intel_iommu *iommu,
|
|
|
|
struct page_req_dsc *req, int result)
|
|
|
|
{
|
|
|
|
struct qi_desc desc;
|
|
|
|
|
|
|
|
pr_err("%s: Invalid page request: %08llx %08llx\n",
|
|
|
|
iommu->name, ((unsigned long long *)req)[0],
|
|
|
|
((unsigned long long *)req)[1]);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per VT-d spec. v3.0 ch7.7, system software must
|
|
|
|
* respond with page group response if private data
|
|
|
|
* is present (PDP) or last page in group (LPIG) bit
|
|
|
|
* is set. This is an additional VT-d feature beyond
|
|
|
|
* PCI ATS spec.
|
|
|
|
*/
|
|
|
|
if (!req->lpig && !req->priv_data_present)
|
|
|
|
return;
|
|
|
|
|
|
|
|
desc.qw0 = QI_PGRP_PASID(req->pasid) |
|
|
|
|
QI_PGRP_DID(req->rid) |
|
|
|
|
QI_PGRP_PASID_P(req->pasid_present) |
|
|
|
|
QI_PGRP_PDP(req->priv_data_present) |
|
|
|
|
QI_PGRP_RESP_CODE(result) |
|
|
|
|
QI_PGRP_RESP_TYPE;
|
|
|
|
desc.qw1 = QI_PGRP_IDX(req->prg_index) |
|
|
|
|
QI_PGRP_LPIG(req->lpig);
|
|
|
|
|
2021-06-10 10:01:09 +08:00
|
|
|
if (req->priv_data_present) {
|
|
|
|
desc.qw2 = req->priv_data[0];
|
|
|
|
desc.qw3 = req->priv_data[1];
|
|
|
|
} else {
|
|
|
|
desc.qw2 = 0;
|
|
|
|
desc.qw3 = 0;
|
|
|
|
}
|
|
|
|
|
2021-06-10 10:01:01 +08:00
|
|
|
qi_submit_sync(iommu, &desc, 1, 0);
|
|
|
|
}
|
|
|
|
|
2015-10-08 06:35:18 +08:00
|
|
|
static irqreturn_t prq_event_thread(int irq, void *d)
|
|
|
|
{
|
|
|
|
struct intel_iommu *iommu = d;
|
2021-06-10 10:01:01 +08:00
|
|
|
struct page_req_dsc *req;
|
|
|
|
int head, tail, handled;
|
2022-09-26 21:15:24 +08:00
|
|
|
struct pci_dev *pdev;
|
2021-06-10 10:01:01 +08:00
|
|
|
u64 address;
|
2015-10-08 06:35:18 +08:00
|
|
|
|
2021-06-10 10:01:01 +08:00
|
|
|
/*
|
|
|
|
* Clear PPR bit before reading head/tail registers, to ensure that
|
|
|
|
* we get a new interrupt if needed.
|
|
|
|
*/
|
2016-02-15 20:42:38 +08:00
|
|
|
writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
|
|
|
|
|
2015-10-08 06:35:18 +08:00
|
|
|
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
|
|
|
|
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
|
2021-06-10 10:01:01 +08:00
|
|
|
handled = (head != tail);
|
2015-10-08 06:35:18 +08:00
|
|
|
while (head != tail) {
|
|
|
|
req = &iommu->prq[head / sizeof(*req)];
|
2015-10-17 00:22:31 +08:00
|
|
|
address = (u64)req->addr << VTD_PAGE_SHIFT;
|
2021-06-10 10:01:01 +08:00
|
|
|
|
|
|
|
if (unlikely(!req->pasid_present)) {
|
|
|
|
pr_err("IOMMU: %s: Page request without PASID\n",
|
|
|
|
iommu->name);
|
|
|
|
bad_req:
|
|
|
|
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
|
|
|
|
goto prq_advance;
|
2015-10-08 06:35:18 +08:00
|
|
|
}
|
2021-06-10 10:01:01 +08:00
|
|
|
|
|
|
|
if (unlikely(!is_canonical_address(address))) {
|
|
|
|
pr_err("IOMMU: %s: Address is not canonical\n",
|
|
|
|
iommu->name);
|
|
|
|
goto bad_req;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) {
|
|
|
|
pr_err("IOMMU: %s: Page request in Privilege Mode\n",
|
|
|
|
iommu->name);
|
|
|
|
goto bad_req;
|
2021-03-02 18:13:59 +08:00
|
|
|
}
|
2021-06-10 10:01:01 +08:00
|
|
|
|
|
|
|
if (unlikely(req->exe_req && req->rd_req)) {
|
|
|
|
pr_err("IOMMU: %s: Execution request not supported\n",
|
|
|
|
iommu->name);
|
|
|
|
goto bad_req;
|
2021-03-02 18:13:59 +08:00
|
|
|
}
|
2021-06-10 10:01:01 +08:00
|
|
|
|
2022-04-23 16:23:30 +08:00
|
|
|
/* Drop Stop Marker message. No need for a response. */
|
|
|
|
if (unlikely(req->lpig && !req->rd_req && !req->wr_req))
|
|
|
|
goto prq_advance;
|
|
|
|
|
2022-09-26 21:15:24 +08:00
|
|
|
pdev = pci_get_domain_bus_and_slot(iommu->segment,
|
|
|
|
PCI_BUS_NUM(req->rid),
|
|
|
|
req->rid & 0xff);
|
2020-07-24 09:49:23 +08:00
|
|
|
/*
|
|
|
|
* If prq is to be handled outside iommu driver via receiver of
|
|
|
|
* the fault notifiers, we skip the page response here.
|
|
|
|
*/
|
2022-12-01 12:01:25 +08:00
|
|
|
if (!pdev)
|
|
|
|
goto bad_req;
|
2021-06-10 10:01:04 +08:00
|
|
|
|
2022-12-01 12:01:25 +08:00
|
|
|
if (intel_svm_prq_report(iommu, &pdev->dev, req))
|
|
|
|
handle_bad_prq_event(iommu, req, QI_RESP_INVALID);
|
|
|
|
else
|
|
|
|
trace_prq_report(iommu, &pdev->dev, req->qw_0, req->qw_1,
|
|
|
|
req->priv_data[0], req->priv_data[1],
|
|
|
|
iommu->prq_seq_number++);
|
|
|
|
pci_dev_put(pdev);
|
2020-07-24 09:49:23 +08:00
|
|
|
prq_advance:
|
2015-10-08 06:35:18 +08:00
|
|
|
head = (head + sizeof(*req)) & PRQ_RING_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
dmar_writeq(iommu->reg + DMAR_PQH_REG, tail);
|
|
|
|
|
2020-05-16 14:20:58 +08:00
|
|
|
/*
|
|
|
|
* Clear the page request overflow bit and wake up all threads that
|
|
|
|
* are waiting for the completion of this handling.
|
|
|
|
*/
|
2021-01-26 16:07:29 +08:00
|
|
|
if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
|
|
|
|
pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
|
|
|
|
iommu->name);
|
|
|
|
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
|
|
|
|
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
|
|
|
|
if (head == tail) {
|
2021-06-10 10:01:03 +08:00
|
|
|
iopf_queue_discard_partial(iommu->iopf_queue);
|
2021-01-26 16:07:29 +08:00
|
|
|
writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
|
|
|
|
pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
|
|
|
|
iommu->name);
|
|
|
|
}
|
|
|
|
}
|
2020-05-16 14:20:58 +08:00
|
|
|
|
|
|
|
if (!completion_done(&iommu->prq_complete))
|
|
|
|
complete(&iommu->prq_complete);
|
|
|
|
|
2015-10-08 06:35:18 +08:00
|
|
|
return IRQ_RETVAL(handled);
|
|
|
|
}
|
2020-05-16 14:20:54 +08:00
|
|
|
|
2020-07-24 09:49:24 +08:00
|
|
|
int intel_svm_page_response(struct device *dev,
|
|
|
|
struct iommu_fault_event *evt,
|
|
|
|
struct iommu_page_response *msg)
|
|
|
|
{
|
2023-12-18 15:34:42 +08:00
|
|
|
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
|
|
|
struct intel_iommu *iommu = info->iommu;
|
|
|
|
u8 bus = info->bus, devfn = info->devfn;
|
2020-07-24 09:49:24 +08:00
|
|
|
struct iommu_fault_page_request *prm;
|
|
|
|
bool private_present;
|
|
|
|
bool pasid_present;
|
|
|
|
bool last_page;
|
|
|
|
int ret = 0;
|
|
|
|
u16 sid;
|
|
|
|
|
|
|
|
prm = &evt->fault.prm;
|
|
|
|
sid = PCI_DEVID(bus, devfn);
|
|
|
|
pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
|
|
|
|
private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA;
|
|
|
|
last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
|
|
|
|
|
|
|
|
if (!pasid_present) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (prm->pasid == 0 || prm->pasid >= PASID_MAX) {
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Per VT-d spec. v3.0 ch7.7, system software must respond
|
|
|
|
* with page group response if private data is present (PDP)
|
|
|
|
* or last page in group (LPIG) bit is set. This is an
|
|
|
|
* additional VT-d requirement beyond PCI ATS spec.
|
|
|
|
*/
|
|
|
|
if (last_page || private_present) {
|
|
|
|
struct qi_desc desc;
|
|
|
|
|
|
|
|
desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) |
|
|
|
|
QI_PGRP_PASID_P(pasid_present) |
|
|
|
|
QI_PGRP_PDP(private_present) |
|
|
|
|
QI_PGRP_RESP_CODE(msg->code) |
|
|
|
|
QI_PGRP_RESP_TYPE;
|
|
|
|
desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page);
|
|
|
|
desc.qw2 = 0;
|
|
|
|
desc.qw3 = 0;
|
2021-06-10 10:01:09 +08:00
|
|
|
|
|
|
|
if (private_present) {
|
|
|
|
desc.qw2 = prm->private_data[0];
|
|
|
|
desc.qw3 = prm->private_data[1];
|
|
|
|
} else if (prm->private_data[0]) {
|
2021-06-10 10:01:08 +08:00
|
|
|
dmar_latency_update(iommu, DMAR_LATENCY_PRQ,
|
|
|
|
ktime_to_ns(ktime_get()) - prm->private_data[0]);
|
2021-06-10 10:01:09 +08:00
|
|
|
}
|
2020-07-24 09:49:24 +08:00
|
|
|
|
|
|
|
qi_submit_sync(iommu, &desc, 1, 0);
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
2022-10-31 08:59:11 +08:00
|
|
|
|
|
|
|
static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
|
|
|
|
struct device *dev, ioasid_t pasid)
|
|
|
|
{
|
|
|
|
struct device_domain_info *info = dev_iommu_priv_get(dev);
|
|
|
|
struct intel_iommu *iommu = info->iommu;
|
|
|
|
|
2023-10-27 08:05:21 +08:00
|
|
|
return intel_svm_bind_mm(iommu, dev, domain, pasid);
|
2022-10-31 08:59:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void intel_svm_domain_free(struct iommu_domain *domain)
|
|
|
|
{
|
|
|
|
kfree(to_dmar_domain(domain));
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct iommu_domain_ops intel_svm_domain_ops = {
|
|
|
|
.set_dev_pasid = intel_svm_set_dev_pasid,
|
|
|
|
.free = intel_svm_domain_free
|
|
|
|
};
|
|
|
|
|
|
|
|
struct iommu_domain *intel_svm_domain_alloc(void)
|
|
|
|
{
|
|
|
|
struct dmar_domain *domain;
|
|
|
|
|
|
|
|
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
|
|
|
|
if (!domain)
|
|
|
|
return NULL;
|
|
|
|
domain->domain.ops = &intel_svm_domain_ops;
|
|
|
|
|
|
|
|
return &domain->domain;
|
|
|
|
}
|