mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-27 03:55:37 +08:00
iommu: Separate SVA and IOPF
Add CONFIG_IOMMU_IOPF for page fault handling framework and select it from its real consumer. Move iopf function declaration from iommu-sva.h to iommu.h and remove iommu-sva.h as it's empty now. Consolidate all SVA related code into iommu-sva.c: - Move iommu_sva_domain_alloc() from iommu.c to iommu-sva.c. - Move sva iopf handling code from io-pgfault.c to iommu-sva.c. Consolidate iommu_report_device_fault() and iommu_page_response() into io-pgfault.c. Export iopf_free_group() and iopf_group_response() for iopf handlers implemented in modules. Some functions are renamed with more meaningful names. No other intentional functionality changes. Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Kevin Tian <kevin.tian@intel.com> Tested-by: Yan Zhao <yan.y.zhao@intel.com> Tested-by: Longfang Liu <liulongfang@huawei.com> Link: https://lore.kernel.org/r/20240212012227.119381-11-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
351ffcb11c
commit
17c51a0ea3
@ -163,6 +163,9 @@ config IOMMU_SVA
|
||||
select IOMMU_MM_DATA
|
||||
bool
|
||||
|
||||
config IOMMU_IOPF
|
||||
bool
|
||||
|
||||
config FSL_PAMU
|
||||
bool "Freescale IOMMU support"
|
||||
depends on PCI
|
||||
@ -398,6 +401,7 @@ config ARM_SMMU_V3_SVA
|
||||
bool "Shared Virtual Addressing support for the ARM SMMUv3"
|
||||
depends on ARM_SMMU_V3
|
||||
select IOMMU_SVA
|
||||
select IOMMU_IOPF
|
||||
select MMU_NOTIFIER
|
||||
help
|
||||
Support for sharing process address spaces with devices using the
|
||||
|
@ -26,6 +26,7 @@ obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o
|
||||
obj-$(CONFIG_S390_IOMMU) += s390-iommu.o
|
||||
obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o
|
||||
obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o
|
||||
obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o io-pgfault.o
|
||||
obj-$(CONFIG_IOMMU_SVA) += iommu-sva.o
|
||||
obj-$(CONFIG_IOMMU_IOPF) += io-pgfault.o
|
||||
obj-$(CONFIG_SPRD_IOMMU) += sprd-iommu.o
|
||||
obj-$(CONFIG_APPLE_DART) += apple-dart.o
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "arm-smmu-v3.h"
|
||||
#include "../../iommu-sva.h"
|
||||
#include "../../io-pgtable-arm.h"
|
||||
|
||||
struct arm_smmu_mmu_notifier {
|
||||
|
@ -29,7 +29,6 @@
|
||||
|
||||
#include "arm-smmu-v3.h"
|
||||
#include "../../dma-iommu.h"
|
||||
#include "../../iommu-sva.h"
|
||||
|
||||
static bool disable_bypass = true;
|
||||
module_param(disable_bypass, bool, 0444);
|
||||
|
@ -51,6 +51,7 @@ config INTEL_IOMMU_SVM
|
||||
depends on X86_64
|
||||
select MMU_NOTIFIER
|
||||
select IOMMU_SVA
|
||||
select IOMMU_IOPF
|
||||
help
|
||||
Shared Virtual Memory (SVM) provides a facility for devices
|
||||
to access DMA resources through process address space by
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include "iommu.h"
|
||||
#include "../dma-iommu.h"
|
||||
#include "../irq_remapping.h"
|
||||
#include "../iommu-sva.h"
|
||||
#include "pasid.h"
|
||||
#include "cap_audit.h"
|
||||
#include "perfmon.h"
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include "iommu.h"
|
||||
#include "pasid.h"
|
||||
#include "perf.h"
|
||||
#include "../iommu-sva.h"
|
||||
#include "trace.h"
|
||||
|
||||
static irqreturn_t prq_event_thread(int irq, void *d);
|
||||
|
@ -11,12 +11,9 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "iommu-sva.h"
|
||||
#include "iommu-priv.h"
|
||||
|
||||
enum iommu_page_response_code
|
||||
iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm);
|
||||
|
||||
static void iopf_free_group(struct iopf_group *group)
|
||||
void iopf_free_group(struct iopf_group *group)
|
||||
{
|
||||
struct iopf_fault *iopf, *next;
|
||||
|
||||
@ -27,44 +24,7 @@ static void iopf_free_group(struct iopf_group *group)
|
||||
|
||||
kfree(group);
|
||||
}
|
||||
|
||||
static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf,
|
||||
enum iommu_page_response_code status)
|
||||
{
|
||||
struct iommu_page_response resp = {
|
||||
.pasid = iopf->fault.prm.pasid,
|
||||
.grpid = iopf->fault.prm.grpid,
|
||||
.code = status,
|
||||
};
|
||||
|
||||
if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
|
||||
(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
|
||||
resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
|
||||
|
||||
return iommu_page_response(dev, &resp);
|
||||
}
|
||||
|
||||
static void iopf_handler(struct work_struct *work)
|
||||
{
|
||||
struct iopf_fault *iopf;
|
||||
struct iopf_group *group;
|
||||
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
|
||||
|
||||
group = container_of(work, struct iopf_group, work);
|
||||
list_for_each_entry(iopf, &group->faults, list) {
|
||||
/*
|
||||
* For the moment, errors are sticky: don't handle subsequent
|
||||
* faults in the group if there is an error.
|
||||
*/
|
||||
if (status != IOMMU_PAGE_RESP_SUCCESS)
|
||||
break;
|
||||
|
||||
status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
|
||||
}
|
||||
|
||||
iopf_complete_group(group->dev, &group->last_fault, status);
|
||||
iopf_free_group(group);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iopf_free_group);
|
||||
|
||||
static struct iommu_domain *get_domain_for_iopf(struct device *dev,
|
||||
struct iommu_fault *fault)
|
||||
@ -91,7 +51,7 @@ static struct iommu_domain *get_domain_for_iopf(struct device *dev,
|
||||
}
|
||||
|
||||
/**
|
||||
* iommu_queue_iopf - IO Page Fault handler
|
||||
* iommu_handle_iopf - IO Page Fault handler
|
||||
* @fault: fault event
|
||||
* @dev: struct device.
|
||||
*
|
||||
@ -130,7 +90,7 @@ static struct iommu_domain *get_domain_for_iopf(struct device *dev,
|
||||
*
|
||||
* Return: 0 on success and <0 on error.
|
||||
*/
|
||||
int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
|
||||
static int iommu_handle_iopf(struct iommu_fault *fault, struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct iopf_group *group;
|
||||
@ -212,18 +172,117 @@ cleanup_partial:
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_queue_iopf);
|
||||
|
||||
int iommu_sva_handle_iopf(struct iopf_group *group)
|
||||
/**
|
||||
* iommu_report_device_fault() - Report fault event to device driver
|
||||
* @dev: the device
|
||||
* @evt: fault event data
|
||||
*
|
||||
* Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
|
||||
* handler. When this function fails and the fault is recoverable, it is the
|
||||
* caller's responsibility to complete the fault.
|
||||
*
|
||||
* Return 0 on success, or an error.
|
||||
*/
|
||||
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
||||
{
|
||||
struct iommu_fault_param *fault_param = group->dev->iommu->fault_param;
|
||||
struct dev_iommu *param = dev->iommu;
|
||||
struct iopf_fault *evt_pending = NULL;
|
||||
struct iommu_fault_param *fparam;
|
||||
int ret = 0;
|
||||
|
||||
INIT_WORK(&group->work, iopf_handler);
|
||||
if (!queue_work(fault_param->queue->wq, &group->work))
|
||||
return -EBUSY;
|
||||
if (!param || !evt)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
/* we only report device fault if there is a handler registered */
|
||||
mutex_lock(¶m->lock);
|
||||
fparam = param->fault_param;
|
||||
|
||||
if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
|
||||
(evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
|
||||
evt_pending = kmemdup(evt, sizeof(struct iopf_fault),
|
||||
GFP_KERNEL);
|
||||
if (!evt_pending) {
|
||||
ret = -ENOMEM;
|
||||
goto done_unlock;
|
||||
}
|
||||
mutex_lock(&fparam->lock);
|
||||
list_add_tail(&evt_pending->list, &fparam->faults);
|
||||
mutex_unlock(&fparam->lock);
|
||||
}
|
||||
|
||||
ret = iommu_handle_iopf(&evt->fault, dev);
|
||||
if (ret && evt_pending) {
|
||||
mutex_lock(&fparam->lock);
|
||||
list_del(&evt_pending->list);
|
||||
mutex_unlock(&fparam->lock);
|
||||
kfree(evt_pending);
|
||||
}
|
||||
done_unlock:
|
||||
mutex_unlock(¶m->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_report_device_fault);
|
||||
|
||||
int iommu_page_response(struct device *dev,
|
||||
struct iommu_page_response *msg)
|
||||
{
|
||||
bool needs_pasid;
|
||||
int ret = -EINVAL;
|
||||
struct iopf_fault *evt;
|
||||
struct iommu_fault_page_request *prm;
|
||||
struct dev_iommu *param = dev->iommu;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
|
||||
|
||||
if (!ops->page_response)
|
||||
return -ENODEV;
|
||||
|
||||
if (!param || !param->fault_param)
|
||||
return -EINVAL;
|
||||
|
||||
/* Only send response if there is a fault report pending */
|
||||
mutex_lock(¶m->fault_param->lock);
|
||||
if (list_empty(¶m->fault_param->faults)) {
|
||||
dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
|
||||
goto done_unlock;
|
||||
}
|
||||
/*
|
||||
* Check if we have a matching page request pending to respond,
|
||||
* otherwise return -EINVAL
|
||||
*/
|
||||
list_for_each_entry(evt, ¶m->fault_param->faults, list) {
|
||||
prm = &evt->fault.prm;
|
||||
if (prm->grpid != msg->grpid)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If the PASID is required, the corresponding request is
|
||||
* matched using the group ID, the PASID valid bit and the PASID
|
||||
* value. Otherwise only the group ID matches request and
|
||||
* response.
|
||||
*/
|
||||
needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
|
||||
if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
|
||||
continue;
|
||||
|
||||
if (!needs_pasid && has_pasid) {
|
||||
/* No big deal, just clear it. */
|
||||
msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
|
||||
msg->pasid = 0;
|
||||
}
|
||||
|
||||
ret = ops->page_response(dev, evt, msg);
|
||||
list_del(&evt->list);
|
||||
kfree(evt);
|
||||
break;
|
||||
}
|
||||
|
||||
done_unlock:
|
||||
mutex_unlock(¶m->fault_param->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_page_response);
|
||||
|
||||
/**
|
||||
* iopf_queue_flush_dev - Ensure that all queued faults have been processed
|
||||
@ -258,6 +317,31 @@ int iopf_queue_flush_dev(struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
|
||||
|
||||
/**
|
||||
* iopf_group_response - Respond a group of page faults
|
||||
* @group: the group of faults with the same group id
|
||||
* @status: the response code
|
||||
*
|
||||
* Return 0 on success and <0 on error.
|
||||
*/
|
||||
int iopf_group_response(struct iopf_group *group,
|
||||
enum iommu_page_response_code status)
|
||||
{
|
||||
struct iopf_fault *iopf = &group->last_fault;
|
||||
struct iommu_page_response resp = {
|
||||
.pasid = iopf->fault.prm.pasid,
|
||||
.grpid = iopf->fault.prm.grpid,
|
||||
.code = status,
|
||||
};
|
||||
|
||||
if ((iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) &&
|
||||
(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID))
|
||||
resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
|
||||
|
||||
return iommu_page_response(group->dev, &resp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iopf_group_response);
|
||||
|
||||
/**
|
||||
* iopf_queue_discard_partial - Remove all pending partial fault
|
||||
* @queue: the queue whose partial faults need to be discarded
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/iommu.h>
|
||||
|
||||
#include "iommu-sva.h"
|
||||
#include "iommu-priv.h"
|
||||
|
||||
static DEFINE_MUTEX(iommu_sva_lock);
|
||||
|
||||
@ -159,10 +159,21 @@ u32 iommu_sva_get_pasid(struct iommu_sva *handle)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_sva_get_pasid);
|
||||
|
||||
void mm_pasid_drop(struct mm_struct *mm)
|
||||
{
|
||||
struct iommu_mm_data *iommu_mm = mm->iommu_mm;
|
||||
|
||||
if (!iommu_mm)
|
||||
return;
|
||||
|
||||
iommu_free_global_pasid(iommu_mm->pasid);
|
||||
kfree(iommu_mm);
|
||||
}
|
||||
|
||||
/*
|
||||
* I/O page fault handler for SVA
|
||||
*/
|
||||
enum iommu_page_response_code
|
||||
static enum iommu_page_response_code
|
||||
iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm)
|
||||
{
|
||||
vm_fault_t ret;
|
||||
@ -216,13 +227,54 @@ out_put_mm:
|
||||
return status;
|
||||
}
|
||||
|
||||
void mm_pasid_drop(struct mm_struct *mm)
|
||||
static void iommu_sva_handle_iopf(struct work_struct *work)
|
||||
{
|
||||
struct iommu_mm_data *iommu_mm = mm->iommu_mm;
|
||||
struct iopf_fault *iopf;
|
||||
struct iopf_group *group;
|
||||
enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS;
|
||||
|
||||
if (!iommu_mm)
|
||||
return;
|
||||
group = container_of(work, struct iopf_group, work);
|
||||
list_for_each_entry(iopf, &group->faults, list) {
|
||||
/*
|
||||
* For the moment, errors are sticky: don't handle subsequent
|
||||
* faults in the group if there is an error.
|
||||
*/
|
||||
if (status != IOMMU_PAGE_RESP_SUCCESS)
|
||||
break;
|
||||
|
||||
iommu_free_global_pasid(iommu_mm->pasid);
|
||||
kfree(iommu_mm);
|
||||
status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm);
|
||||
}
|
||||
|
||||
iopf_group_response(group, status);
|
||||
iopf_free_group(group);
|
||||
}
|
||||
|
||||
static int iommu_sva_iopf_handler(struct iopf_group *group)
|
||||
{
|
||||
struct iommu_fault_param *fault_param = group->dev->iommu->fault_param;
|
||||
|
||||
INIT_WORK(&group->work, iommu_sva_handle_iopf);
|
||||
if (!queue_work(fault_param->queue->wq, &group->work))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
struct iommu_domain *domain;
|
||||
|
||||
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
domain->type = IOMMU_DOMAIN_SVA;
|
||||
mmgrab(mm);
|
||||
domain->mm = mm;
|
||||
domain->owner = ops;
|
||||
domain->iopf_handler = iommu_sva_iopf_handler;
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
@ -1,69 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* SVA library for IOMMU drivers
|
||||
*/
|
||||
#ifndef _IOMMU_SVA_H
|
||||
#define _IOMMU_SVA_H
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
/* I/O Page fault */
|
||||
struct device;
|
||||
struct iommu_fault;
|
||||
struct iopf_queue;
|
||||
|
||||
#ifdef CONFIG_IOMMU_SVA
|
||||
int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev);
|
||||
|
||||
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
|
||||
int iopf_queue_remove_device(struct iopf_queue *queue,
|
||||
struct device *dev);
|
||||
int iopf_queue_flush_dev(struct device *dev);
|
||||
struct iopf_queue *iopf_queue_alloc(const char *name);
|
||||
void iopf_queue_free(struct iopf_queue *queue);
|
||||
int iopf_queue_discard_partial(struct iopf_queue *queue);
|
||||
int iommu_sva_handle_iopf(struct iopf_group *group);
|
||||
|
||||
#else /* CONFIG_IOMMU_SVA */
|
||||
static inline int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iopf_queue_add_device(struct iopf_queue *queue,
|
||||
struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iopf_queue_remove_device(struct iopf_queue *queue,
|
||||
struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iopf_queue_flush_dev(struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct iopf_queue *iopf_queue_alloc(const char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void iopf_queue_free(struct iopf_queue *queue)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_sva_handle_iopf(struct iopf_group *group)
|
||||
{
|
||||
return IOMMU_PAGE_RESP_INVALID;
|
||||
}
|
||||
#endif /* CONFIG_IOMMU_SVA */
|
||||
#endif /* _IOMMU_SVA_H */
|
@ -36,8 +36,6 @@
|
||||
#include "dma-iommu.h"
|
||||
#include "iommu-priv.h"
|
||||
|
||||
#include "iommu-sva.h"
|
||||
|
||||
static struct kset *iommu_group_kset;
|
||||
static DEFINE_IDA(iommu_group_ida);
|
||||
static DEFINE_IDA(iommu_global_pasid_ida);
|
||||
@ -1330,117 +1328,6 @@ void iommu_group_put(struct iommu_group *group)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_group_put);
|
||||
|
||||
/**
|
||||
* iommu_report_device_fault() - Report fault event to device driver
|
||||
* @dev: the device
|
||||
* @evt: fault event data
|
||||
*
|
||||
* Called by IOMMU drivers when a fault is detected, typically in a threaded IRQ
|
||||
* handler. When this function fails and the fault is recoverable, it is the
|
||||
* caller's responsibility to complete the fault.
|
||||
*
|
||||
* Return 0 on success, or an error.
|
||||
*/
|
||||
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
||||
{
|
||||
struct dev_iommu *param = dev->iommu;
|
||||
struct iopf_fault *evt_pending = NULL;
|
||||
struct iommu_fault_param *fparam;
|
||||
int ret = 0;
|
||||
|
||||
if (!param || !evt)
|
||||
return -EINVAL;
|
||||
|
||||
/* we only report device fault if there is a handler registered */
|
||||
mutex_lock(¶m->lock);
|
||||
fparam = param->fault_param;
|
||||
|
||||
if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
|
||||
(evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
|
||||
evt_pending = kmemdup(evt, sizeof(struct iopf_fault),
|
||||
GFP_KERNEL);
|
||||
if (!evt_pending) {
|
||||
ret = -ENOMEM;
|
||||
goto done_unlock;
|
||||
}
|
||||
mutex_lock(&fparam->lock);
|
||||
list_add_tail(&evt_pending->list, &fparam->faults);
|
||||
mutex_unlock(&fparam->lock);
|
||||
}
|
||||
|
||||
ret = iommu_queue_iopf(&evt->fault, dev);
|
||||
if (ret && evt_pending) {
|
||||
mutex_lock(&fparam->lock);
|
||||
list_del(&evt_pending->list);
|
||||
mutex_unlock(&fparam->lock);
|
||||
kfree(evt_pending);
|
||||
}
|
||||
done_unlock:
|
||||
mutex_unlock(¶m->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_report_device_fault);
|
||||
|
||||
int iommu_page_response(struct device *dev,
|
||||
struct iommu_page_response *msg)
|
||||
{
|
||||
bool needs_pasid;
|
||||
int ret = -EINVAL;
|
||||
struct iopf_fault *evt;
|
||||
struct iommu_fault_page_request *prm;
|
||||
struct dev_iommu *param = dev->iommu;
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID;
|
||||
|
||||
if (!ops->page_response)
|
||||
return -ENODEV;
|
||||
|
||||
if (!param || !param->fault_param)
|
||||
return -EINVAL;
|
||||
|
||||
/* Only send response if there is a fault report pending */
|
||||
mutex_lock(¶m->fault_param->lock);
|
||||
if (list_empty(¶m->fault_param->faults)) {
|
||||
dev_warn_ratelimited(dev, "no pending PRQ, drop response\n");
|
||||
goto done_unlock;
|
||||
}
|
||||
/*
|
||||
* Check if we have a matching page request pending to respond,
|
||||
* otherwise return -EINVAL
|
||||
*/
|
||||
list_for_each_entry(evt, ¶m->fault_param->faults, list) {
|
||||
prm = &evt->fault.prm;
|
||||
if (prm->grpid != msg->grpid)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If the PASID is required, the corresponding request is
|
||||
* matched using the group ID, the PASID valid bit and the PASID
|
||||
* value. Otherwise only the group ID matches request and
|
||||
* response.
|
||||
*/
|
||||
needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
|
||||
if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid))
|
||||
continue;
|
||||
|
||||
if (!needs_pasid && has_pasid) {
|
||||
/* No big deal, just clear it. */
|
||||
msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID;
|
||||
msg->pasid = 0;
|
||||
}
|
||||
|
||||
ret = ops->page_response(dev, evt, msg);
|
||||
list_del(&evt->list);
|
||||
kfree(evt);
|
||||
break;
|
||||
}
|
||||
|
||||
done_unlock:
|
||||
mutex_unlock(¶m->fault_param->lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_page_response);
|
||||
|
||||
/**
|
||||
* iommu_group_id - Return ID for a group
|
||||
* @group: the group to ID
|
||||
@ -3523,26 +3410,6 @@ struct iommu_domain *iommu_get_domain_for_dev_pasid(struct device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_get_domain_for_dev_pasid);
|
||||
|
||||
struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
const struct iommu_ops *ops = dev_iommu_ops(dev);
|
||||
struct iommu_domain *domain;
|
||||
|
||||
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
|
||||
if (!domain)
|
||||
return NULL;
|
||||
|
||||
domain->type = IOMMU_DOMAIN_SVA;
|
||||
mmgrab(mm);
|
||||
domain->mm = mm;
|
||||
domain->owner = ops;
|
||||
domain->iopf_handler = iommu_sva_handle_iopf;
|
||||
domain->fault_data = mm;
|
||||
|
||||
return domain;
|
||||
}
|
||||
|
||||
ioasid_t iommu_alloc_global_pasid(struct device *dev)
|
||||
{
|
||||
int ret;
|
||||
|
@ -810,10 +810,6 @@ extern struct iommu_group *iommu_group_get(struct device *dev);
|
||||
extern struct iommu_group *iommu_group_ref_get(struct iommu_group *group);
|
||||
extern void iommu_group_put(struct iommu_group *group);
|
||||
|
||||
extern int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
|
||||
extern int iommu_page_response(struct device *dev,
|
||||
struct iommu_page_response *msg);
|
||||
|
||||
extern int iommu_group_id(struct iommu_group *group);
|
||||
extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *);
|
||||
|
||||
@ -1029,8 +1025,6 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group);
|
||||
int iommu_device_claim_dma_owner(struct device *dev, void *owner);
|
||||
void iommu_device_release_dma_owner(struct device *dev);
|
||||
|
||||
struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
||||
struct mm_struct *mm);
|
||||
int iommu_attach_device_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid);
|
||||
void iommu_detach_device_pasid(struct iommu_domain *domain,
|
||||
@ -1219,18 +1213,6 @@ static inline void iommu_group_put(struct iommu_group *group)
|
||||
{
|
||||
}
|
||||
|
||||
static inline
|
||||
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_page_response(struct device *dev,
|
||||
struct iommu_page_response *msg)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iommu_group_id(struct iommu_group *group)
|
||||
{
|
||||
return -ENODEV;
|
||||
@ -1379,12 +1361,6 @@ static inline int iommu_device_claim_dma_owner(struct device *dev, void *owner)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct iommu_domain *
|
||||
iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
@ -1524,6 +1500,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev,
|
||||
struct mm_struct *mm);
|
||||
void iommu_sva_unbind_device(struct iommu_sva *handle);
|
||||
u32 iommu_sva_get_pasid(struct iommu_sva *handle);
|
||||
struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
|
||||
struct mm_struct *mm);
|
||||
#else
|
||||
static inline struct iommu_sva *
|
||||
iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
|
||||
@ -1548,6 +1526,78 @@ static inline u32 mm_get_enqcmd_pasid(struct mm_struct *mm)
|
||||
}
|
||||
|
||||
static inline void mm_pasid_drop(struct mm_struct *mm) {}
|
||||
|
||||
static inline struct iommu_domain *
|
||||
iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_IOMMU_SVA */
|
||||
|
||||
#ifdef CONFIG_IOMMU_IOPF
|
||||
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
|
||||
int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
|
||||
int iopf_queue_flush_dev(struct device *dev);
|
||||
struct iopf_queue *iopf_queue_alloc(const char *name);
|
||||
void iopf_queue_free(struct iopf_queue *queue);
|
||||
int iopf_queue_discard_partial(struct iopf_queue *queue);
|
||||
void iopf_free_group(struct iopf_group *group);
|
||||
int iommu_report_device_fault(struct device *dev, struct iopf_fault *evt);
|
||||
int iommu_page_response(struct device *dev, struct iommu_page_response *msg);
|
||||
int iopf_group_response(struct iopf_group *group,
|
||||
enum iommu_page_response_code status);
|
||||
#else
|
||||
static inline int
|
||||
iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int
|
||||
iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iopf_queue_flush_dev(struct device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline struct iopf_queue *iopf_queue_alloc(const char *name)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void iopf_queue_free(struct iopf_queue *queue)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int iopf_queue_discard_partial(struct iopf_queue *queue)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void iopf_free_group(struct iopf_group *group)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int
|
||||
iommu_report_device_fault(struct device *dev, struct iopf_fault *evt)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int
|
||||
iommu_page_response(struct device *dev, struct iommu_page_response *msg)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline int iopf_group_response(struct iopf_group *group,
|
||||
enum iommu_page_response_code status)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif /* CONFIG_IOMMU_IOPF */
|
||||
#endif /* __LINUX_IOMMU_H */
|
||||
|
Loading…
Reference in New Issue
Block a user