2019-05-30 07:57:47 +08:00
// SPDX-License-Identifier: GPL-2.0-only
2013-06-25 01:31:25 +08:00
/*
* IOMMU API for ARM architected SMMU implementations .
*
* Copyright ( C ) 2013 ARM Limited
*
* Author : Will Deacon < will . deacon @ arm . com >
*
* This driver currently supports :
* - SMMUv1 and v2 implementations
* - Stream - matching and stream - indexing
* - v7 / v8 long - descriptor format
* - Non - secure access to the SMMU
* - Context fault reporting
2017-01-19 22:36:36 +08:00
* - Extended Stream ID ( 16 bit )
2013-06-25 01:31:25 +08:00
*/
# define pr_fmt(fmt) "arm-smmu: " fmt
2016-11-21 18:01:45 +08:00
# include <linux/acpi.h>
# include <linux/acpi_iort.h>
2019-08-16 02:37:23 +08:00
# include <linux/bitfield.h>
2013-06-25 01:31:25 +08:00
# include <linux/delay.h>
2016-01-27 02:06:36 +08:00
# include <linux/dma-iommu.h>
2013-06-25 01:31:25 +08:00
# include <linux/dma-mapping.h>
# include <linux/err.h>
# include <linux/interrupt.h>
# include <linux/io.h>
2014-10-30 05:13:40 +08:00
# include <linux/iopoll.h>
2018-12-02 03:19:16 +08:00
# include <linux/init.h>
# include <linux/moduleparam.h>
2013-06-25 01:31:25 +08:00
# include <linux/of.h>
2015-07-30 02:46:05 +08:00
# include <linux/of_address.h>
2016-09-13 00:13:52 +08:00
# include <linux/of_device.h>
2016-09-13 00:13:55 +08:00
# include <linux/of_iommu.h>
2014-05-02 01:05:08 +08:00
# include <linux/pci.h>
2013-06-25 01:31:25 +08:00
# include <linux/platform_device.h>
2018-12-04 14:22:09 +08:00
# include <linux/pm_runtime.h>
2013-06-25 01:31:25 +08:00
# include <linux/slab.h>
# include <linux/amba/bus.h>
2018-09-10 21:49:18 +08:00
# include <linux/fsl/mc.h>
2013-06-25 01:31:25 +08:00
2019-08-16 02:37:32 +08:00
# include "arm-smmu.h"
2017-08-09 22:43:03 +08:00
2019-06-03 20:15:37 +08:00
/*
* Apparently , some Qualcomm arm64 platforms which appear to expose their SMMU
* global register space are still , in fact , using a hypervisor to mediate it
* by trapping and emulating register accesses . Sadly , some deployed versions
* of said trapping code have bugs wherein they go horribly wrong for stores
* using r31 ( i . e . XZR / WZR ) as the source register .
*/
# define QCOM_DUMMY_VAL -1
2017-08-09 22:43:03 +08:00
# define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
# define TLB_SPIN_COUNT 10
2013-06-25 01:31:25 +08:00
2017-01-20 04:57:55 +08:00
# define MSI_IOVA_BASE 0x8000000
# define MSI_IOVA_LENGTH 0x100000
2014-07-15 02:47:39 +08:00
static int force_stage ;
2018-12-02 03:19:16 +08:00
/*
* not really modular , but the easiest way to keep compat with existing
* bootargs behaviour is to continue using module_param ( ) here .
*/
2016-02-10 22:25:33 +08:00
module_param ( force_stage , int , S_IRUGO ) ;
2014-07-15 02:47:39 +08:00
MODULE_PARM_DESC ( force_stage ,
" Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation. " ) ;
2019-03-02 03:20:17 +08:00
static bool disable_bypass =
IS_ENABLED ( CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT ) ;
2016-02-10 22:25:33 +08:00
module_param ( disable_bypass , bool , S_IRUGO ) ;
MODULE_PARM_DESC ( disable_bypass ,
" Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU. " ) ;
2014-07-15 02:47:39 +08:00
2016-09-13 00:13:50 +08:00
struct arm_smmu_s2cr {
2016-09-13 00:13:54 +08:00
struct iommu_group * group ;
int count ;
2016-09-13 00:13:50 +08:00
enum arm_smmu_s2cr_type type ;
enum arm_smmu_s2cr_privcfg privcfg ;
u8 cbndx ;
} ;
# define s2cr_init_val (struct arm_smmu_s2cr){ \
. type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS , \
}
2013-06-25 01:31:25 +08:00
struct arm_smmu_smr {
u16 mask ;
u16 id ;
2016-09-13 00:13:49 +08:00
bool valid ;
2013-06-25 01:31:25 +08:00
} ;
2017-08-08 21:56:14 +08:00
struct arm_smmu_cb {
u64 ttbr [ 2 ] ;
u32 tcr [ 2 ] ;
u32 mair [ 2 ] ;
struct arm_smmu_cfg * cfg ;
} ;
2014-05-02 01:05:08 +08:00
struct arm_smmu_master_cfg {
2016-09-14 22:21:39 +08:00
struct arm_smmu_device * smmu ;
2016-09-13 00:13:55 +08:00
s16 smendx [ ] ;
2013-06-25 01:31:25 +08:00
} ;
2016-09-13 00:13:49 +08:00
# define INVALID_SMENDX -1
2016-09-13 00:13:55 +08:00
# define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
# define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
2016-11-08 02:25:09 +08:00
# define fwspec_smendx(fw, i) \
( i > = fw - > num_ids ? INVALID_SMENDX : __fwspec_cfg ( fw ) - > smendx [ i ] )
2016-09-13 00:13:55 +08:00
# define for_each_cfg_sme(fw, i, idx) \
2016-11-08 02:25:09 +08:00
for ( i = 0 ; idx = fwspec_smendx ( fw , i ) , i < fw - > num_ids ; + + i )
2013-06-25 01:31:25 +08:00
2016-09-14 22:26:46 +08:00
static bool using_legacy_binding , using_generic_binding ;
2018-12-04 14:22:10 +08:00
static inline int arm_smmu_rpm_get ( struct arm_smmu_device * smmu )
{
if ( pm_runtime_enabled ( smmu - > dev ) )
return pm_runtime_get_sync ( smmu - > dev ) ;
return 0 ;
}
static inline void arm_smmu_rpm_put ( struct arm_smmu_device * smmu )
{
if ( pm_runtime_enabled ( smmu - > dev ) )
pm_runtime_put ( smmu - > dev ) ;
}
2015-03-26 20:43:10 +08:00
static struct arm_smmu_domain * to_smmu_domain ( struct iommu_domain * dom )
{
return container_of ( dom , struct arm_smmu_domain , domain ) ;
}
2014-07-15 18:27:08 +08:00
static struct device_node * dev_get_dev_node ( struct device * dev )
2014-05-02 01:05:08 +08:00
{
if ( dev_is_pci ( dev ) ) {
struct pci_bus * bus = to_pci_dev ( dev ) - > bus ;
2014-07-09 00:52:18 +08:00
2014-05-02 01:05:08 +08:00
while ( ! pci_is_root_bus ( bus ) )
bus = bus - > parent ;
2016-09-14 22:21:39 +08:00
return of_node_get ( bus - > bridge - > parent - > of_node ) ;
2014-05-02 01:05:08 +08:00
}
2016-09-14 22:21:39 +08:00
return of_node_get ( dev - > of_node ) ;
2014-05-02 01:05:08 +08:00
}
2016-09-14 22:21:39 +08:00
static int __arm_smmu_get_pci_sid ( struct pci_dev * pdev , u16 alias , void * data )
2013-06-25 01:31:25 +08:00
{
2016-09-14 22:21:39 +08:00
* ( ( __be32 * ) data ) = cpu_to_be32 ( alias ) ;
return 0 ; /* Continue walking */
2013-06-25 01:31:25 +08:00
}
2016-09-14 22:21:39 +08:00
static int __find_legacy_master_phandle ( struct device * dev , void * data )
2014-05-02 01:05:08 +08:00
{
2016-09-14 22:21:39 +08:00
struct of_phandle_iterator * it = * ( void * * ) data ;
struct device_node * np = it - > node ;
int err ;
of_for_each_phandle ( it , err , dev - > of_node , " mmu-masters " ,
2019-08-24 21:28:45 +08:00
" #stream-id-cells " , - 1 )
2016-09-14 22:21:39 +08:00
if ( it - > node = = np ) {
* ( void * * ) data = dev ;
return 1 ;
}
it - > node = np ;
return err = = - ENOENT ? 0 : err ;
2014-05-02 01:05:08 +08:00
}
2016-09-13 00:13:52 +08:00
static struct platform_driver arm_smmu_driver ;
2016-09-13 00:13:55 +08:00
static struct iommu_ops arm_smmu_ops ;
2016-09-13 00:13:52 +08:00
2016-09-13 00:13:55 +08:00
static int arm_smmu_register_legacy_master ( struct device * dev ,
struct arm_smmu_device * * smmu )
2013-06-25 01:31:25 +08:00
{
2016-09-13 00:13:55 +08:00
struct device * smmu_dev ;
2016-09-14 22:21:39 +08:00
struct device_node * np ;
struct of_phandle_iterator it ;
void * data = & it ;
2016-09-13 00:13:55 +08:00
u32 * sids ;
2016-09-14 22:21:39 +08:00
__be32 pci_sid ;
int err ;
2013-06-25 01:31:25 +08:00
2016-09-14 22:21:39 +08:00
np = dev_get_dev_node ( dev ) ;
if ( ! np | | ! of_find_property ( np , " #stream-id-cells " , NULL ) ) {
of_node_put ( np ) ;
return - ENODEV ;
}
2013-06-25 01:31:25 +08:00
2016-09-14 22:21:39 +08:00
it . node = np ;
2016-09-13 00:13:52 +08:00
err = driver_for_each_device ( & arm_smmu_driver . driver , NULL , & data ,
__find_legacy_master_phandle ) ;
2016-09-13 00:13:55 +08:00
smmu_dev = data ;
2016-09-14 22:21:39 +08:00
of_node_put ( np ) ;
if ( err = = 0 )
return - ENODEV ;
if ( err < 0 )
return err ;
2013-06-25 01:31:25 +08:00
2016-09-14 22:21:39 +08:00
if ( dev_is_pci ( dev ) ) {
/* "mmu-masters" assumes Stream ID == Requester ID */
pci_for_each_dma_alias ( to_pci_dev ( dev ) , __arm_smmu_get_pci_sid ,
& pci_sid ) ;
it . cur = & pci_sid ;
it . cur_count = 1 ;
}
2013-06-25 01:31:25 +08:00
2016-09-13 00:13:55 +08:00
err = iommu_fwspec_init ( dev , & smmu_dev - > of_node - > fwnode ,
& arm_smmu_ops ) ;
if ( err )
return err ;
2013-06-25 01:31:25 +08:00
2016-09-13 00:13:55 +08:00
sids = kcalloc ( it . cur_count , sizeof ( * sids ) , GFP_KERNEL ) ;
if ( ! sids )
return - ENOMEM ;
2014-06-25 18:29:12 +08:00
2016-09-13 00:13:55 +08:00
* smmu = dev_get_drvdata ( smmu_dev ) ;
of_phandle_iterator_args ( & it , sids , it . cur_count ) ;
err = iommu_fwspec_add_ids ( dev , sids , it . cur_count ) ;
kfree ( sids ) ;
return err ;
2013-06-25 01:31:25 +08:00
}
static int __arm_smmu_alloc_bitmap ( unsigned long * map , int start , int end )
{
int idx ;
do {
idx = find_next_zero_bit ( map , end , start ) ;
if ( idx = = end )
return - ENOSPC ;
} while ( test_and_set_bit ( idx , map ) ) ;
return idx ;
}
static void __arm_smmu_free_bitmap ( unsigned long * map , int idx )
{
clear_bit ( idx , map ) ;
}
/* Wait for any pending TLB invalidations to complete */
2019-08-16 02:37:30 +08:00
static void __arm_smmu_tlb_sync ( struct arm_smmu_device * smmu , int page ,
int sync , int status )
2013-06-25 01:31:25 +08:00
{
2017-03-31 00:56:32 +08:00
unsigned int spin_cnt , delay ;
2019-08-16 02:37:30 +08:00
u32 reg ;
2013-06-25 01:31:25 +08:00
2019-08-16 02:37:30 +08:00
arm_smmu_writel ( smmu , page , sync , QCOM_DUMMY_VAL ) ;
2017-03-31 00:56:32 +08:00
for ( delay = 1 ; delay < TLB_LOOP_TIMEOUT ; delay * = 2 ) {
for ( spin_cnt = TLB_SPIN_COUNT ; spin_cnt > 0 ; spin_cnt - - ) {
2019-08-16 02:37:30 +08:00
reg = arm_smmu_readl ( smmu , page , status ) ;
if ( ! ( reg & sTLBGSTATUS_GSACTIVE ) )
2017-03-31 00:56:32 +08:00
return ;
cpu_relax ( ) ;
2013-06-25 01:31:25 +08:00
}
2017-03-31 00:56:32 +08:00
udelay ( delay ) ;
2013-06-25 01:31:25 +08:00
}
2017-03-31 00:56:32 +08:00
dev_err_ratelimited ( smmu - > dev ,
" TLB sync timed out -- SMMU may be deadlocked \n " ) ;
2013-06-25 01:31:25 +08:00
}
2017-03-31 00:56:31 +08:00
static void arm_smmu_tlb_sync_global ( struct arm_smmu_device * smmu )
{
2017-07-06 22:55:48 +08:00
unsigned long flags ;
2017-03-31 00:56:31 +08:00
2017-07-06 22:55:48 +08:00
spin_lock_irqsave ( & smmu - > global_sync_lock , flags ) ;
2019-08-16 02:37:31 +08:00
__arm_smmu_tlb_sync ( smmu , ARM_SMMU_GR0 , ARM_SMMU_GR0_sTLBGSYNC ,
2019-08-16 02:37:30 +08:00
ARM_SMMU_GR0_sTLBGSTATUS ) ;
2017-07-06 22:55:48 +08:00
spin_unlock_irqrestore ( & smmu - > global_sync_lock , flags ) ;
2017-03-31 00:56:31 +08:00
}
static void arm_smmu_tlb_sync_context ( void * cookie )
2014-11-15 01:17:54 +08:00
{
struct arm_smmu_domain * smmu_domain = cookie ;
2017-03-31 00:56:31 +08:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2017-07-06 22:55:48 +08:00
unsigned long flags ;
2017-03-31 00:56:31 +08:00
2017-07-06 22:55:48 +08:00
spin_lock_irqsave ( & smmu_domain - > cb_lock , flags ) ;
2019-08-16 02:37:30 +08:00
__arm_smmu_tlb_sync ( smmu , ARM_SMMU_CB ( smmu , smmu_domain - > cfg . cbndx ) ,
ARM_SMMU_CB_TLBSYNC , ARM_SMMU_CB_TLBSTATUS ) ;
2017-07-06 22:55:48 +08:00
spin_unlock_irqrestore ( & smmu_domain - > cb_lock , flags ) ;
2014-11-15 01:17:54 +08:00
}
2017-03-31 00:56:31 +08:00
static void arm_smmu_tlb_sync_vmid ( void * cookie )
2014-11-15 01:17:54 +08:00
{
struct arm_smmu_domain * smmu_domain = cookie ;
2017-03-31 00:56:31 +08:00
arm_smmu_tlb_sync_global ( smmu_domain - > smmu ) ;
2014-11-15 01:17:54 +08:00
}
2017-03-31 00:56:31 +08:00
static void arm_smmu_tlb_inv_context_s1 ( void * cookie )
2013-08-01 02:21:27 +08:00
{
2014-11-15 01:17:54 +08:00
struct arm_smmu_domain * smmu_domain = cookie ;
2018-09-21 00:10:27 +08:00
/*
2019-08-16 02:37:30 +08:00
* The TLBI write may be relaxed , so ensure that PTEs cleared by the
* current CPU are visible beforehand .
2018-09-21 00:10:27 +08:00
*/
2019-08-16 02:37:30 +08:00
wmb ( ) ;
arm_smmu_cb_write ( smmu_domain - > smmu , smmu_domain - > cfg . cbndx ,
ARM_SMMU_CB_S1_TLBIASID , smmu_domain - > cfg . asid ) ;
2017-03-31 00:56:31 +08:00
arm_smmu_tlb_sync_context ( cookie ) ;
}
2013-08-01 02:21:27 +08:00
2017-03-31 00:56:31 +08:00
static void arm_smmu_tlb_inv_context_s2 ( void * cookie )
{
struct arm_smmu_domain * smmu_domain = cookie ;
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2013-08-01 02:21:27 +08:00
2019-08-16 02:37:31 +08:00
/* See above */
wmb ( ) ;
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_TLBIVMID , smmu_domain - > cfg . vmid ) ;
2017-03-31 00:56:31 +08:00
arm_smmu_tlb_sync_global ( smmu ) ;
2014-11-15 01:17:54 +08:00
}
2019-08-16 02:37:27 +08:00
static void arm_smmu_tlb_inv_range_s1 ( unsigned long iova , size_t size ,
size_t granule , bool leaf , void * cookie )
2014-11-15 01:17:54 +08:00
{
struct arm_smmu_domain * smmu_domain = cookie ;
2019-08-16 02:37:27 +08:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2014-11-15 01:17:54 +08:00
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
2019-08-16 02:37:30 +08:00
int reg , idx = cfg - > cbndx ;
2014-11-15 01:17:54 +08:00
2019-08-16 02:37:27 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_COHERENT_WALK )
2018-10-01 19:42:49 +08:00
wmb ( ) ;
2019-08-16 02:37:30 +08:00
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA ;
2019-08-16 02:37:27 +08:00
if ( cfg - > fmt ! = ARM_SMMU_CTX_FMT_AARCH64 ) {
iova = ( iova > > 12 ) < < 12 ;
iova | = cfg - > asid ;
do {
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , idx , reg , iova ) ;
2019-08-16 02:37:27 +08:00
iova + = granule ;
} while ( size - = granule ) ;
2017-03-31 00:56:31 +08:00
} else {
2015-12-08 02:18:52 +08:00
iova > > = 12 ;
2019-08-16 02:37:27 +08:00
iova | = ( u64 ) cfg - > asid < < 48 ;
2015-12-08 02:18:52 +08:00
do {
2019-08-16 02:37:30 +08:00
arm_smmu_cb_writeq ( smmu , idx , reg , iova ) ;
2015-12-08 02:18:52 +08:00
iova + = granule > > 12 ;
} while ( size - = granule ) ;
2014-11-15 01:17:54 +08:00
}
}
2019-08-16 02:37:27 +08:00
static void arm_smmu_tlb_inv_range_s2 ( unsigned long iova , size_t size ,
size_t granule , bool leaf , void * cookie )
{
struct arm_smmu_domain * smmu_domain = cookie ;
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2019-08-16 02:37:30 +08:00
int reg , idx = smmu_domain - > cfg . cbndx ;
2019-08-16 02:37:27 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_COHERENT_WALK )
wmb ( ) ;
2019-08-16 02:37:30 +08:00
reg = leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2 ;
2019-08-16 02:37:27 +08:00
iova > > = 12 ;
do {
2019-08-16 02:37:28 +08:00
if ( smmu_domain - > cfg . fmt = = ARM_SMMU_CTX_FMT_AARCH64 )
2019-08-16 02:37:30 +08:00
arm_smmu_cb_writeq ( smmu , idx , reg , iova ) ;
2019-08-16 02:37:28 +08:00
else
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , idx , reg , iova ) ;
2019-08-16 02:37:27 +08:00
iova + = granule > > 12 ;
} while ( size - = granule ) ;
}
2017-03-31 00:56:31 +08:00
/*
* On MMU - 401 at least , the cost of firing off multiple TLBIVMIDs appears
* almost negligible , but the benefit of getting the first one in as far ahead
* of the sync as possible is significant , hence we don ' t just make this a
2019-07-02 23:44:50 +08:00
* no - op and set . tlb_sync to arm_smmu_tlb_inv_context_s2 ( ) as you might think .
2017-03-31 00:56:31 +08:00
*/
static void arm_smmu_tlb_inv_vmid_nosync ( unsigned long iova , size_t size ,
size_t granule , bool leaf , void * cookie )
{
struct arm_smmu_domain * smmu_domain = cookie ;
2019-08-16 02:37:31 +08:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2017-03-31 00:56:31 +08:00
2019-08-16 02:37:31 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_COHERENT_WALK )
2018-10-01 19:42:49 +08:00
wmb ( ) ;
2019-08-16 02:37:31 +08:00
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_TLBIVMID , smmu_domain - > cfg . vmid ) ;
2017-03-31 00:56:31 +08:00
}
2019-07-02 23:44:25 +08:00
static void arm_smmu_tlb_inv_walk ( unsigned long iova , size_t size ,
size_t granule , void * cookie )
{
struct arm_smmu_domain * smmu_domain = cookie ;
2019-07-02 23:44:41 +08:00
const struct arm_smmu_flush_ops * ops = smmu_domain - > flush_ops ;
2019-07-02 23:44:25 +08:00
2019-07-02 23:44:41 +08:00
ops - > tlb_inv_range ( iova , size , granule , false , cookie ) ;
2019-07-02 23:44:50 +08:00
ops - > tlb_sync ( cookie ) ;
2019-07-02 23:44:25 +08:00
}
static void arm_smmu_tlb_inv_leaf ( unsigned long iova , size_t size ,
size_t granule , void * cookie )
{
struct arm_smmu_domain * smmu_domain = cookie ;
2019-07-02 23:44:41 +08:00
const struct arm_smmu_flush_ops * ops = smmu_domain - > flush_ops ;
ops - > tlb_inv_range ( iova , size , granule , true , cookie ) ;
2019-07-02 23:44:50 +08:00
ops - > tlb_sync ( cookie ) ;
2019-07-02 23:44:41 +08:00
}
2019-07-02 23:45:15 +08:00
static void arm_smmu_tlb_add_page ( struct iommu_iotlb_gather * gather ,
unsigned long iova , size_t granule ,
2019-07-02 23:44:41 +08:00
void * cookie )
{
struct arm_smmu_domain * smmu_domain = cookie ;
const struct arm_smmu_flush_ops * ops = smmu_domain - > flush_ops ;
2019-07-02 23:44:25 +08:00
2019-07-02 23:44:41 +08:00
ops - > tlb_inv_range ( iova , granule , granule , true , cookie ) ;
2019-07-02 23:44:25 +08:00
}
2019-07-02 23:44:41 +08:00
static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
. tlb = {
. tlb_flush_all = arm_smmu_tlb_inv_context_s1 ,
. tlb_flush_walk = arm_smmu_tlb_inv_walk ,
. tlb_flush_leaf = arm_smmu_tlb_inv_leaf ,
. tlb_add_page = arm_smmu_tlb_add_page ,
} ,
2019-08-23 22:05:45 +08:00
. tlb_inv_range = arm_smmu_tlb_inv_range_s1 ,
2019-07-02 23:44:50 +08:00
. tlb_sync = arm_smmu_tlb_sync_context ,
2017-03-31 00:56:31 +08:00
} ;
2019-07-02 23:44:41 +08:00
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
. tlb = {
. tlb_flush_all = arm_smmu_tlb_inv_context_s2 ,
. tlb_flush_walk = arm_smmu_tlb_inv_walk ,
. tlb_flush_leaf = arm_smmu_tlb_inv_leaf ,
. tlb_add_page = arm_smmu_tlb_add_page ,
} ,
2019-08-23 22:05:45 +08:00
. tlb_inv_range = arm_smmu_tlb_inv_range_s2 ,
2019-07-02 23:44:50 +08:00
. tlb_sync = arm_smmu_tlb_sync_context ,
2017-03-31 00:56:31 +08:00
} ;
2019-07-02 23:44:41 +08:00
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
. tlb = {
. tlb_flush_all = arm_smmu_tlb_inv_context_s2 ,
. tlb_flush_walk = arm_smmu_tlb_inv_walk ,
. tlb_flush_leaf = arm_smmu_tlb_inv_leaf ,
. tlb_add_page = arm_smmu_tlb_add_page ,
} ,
. tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync ,
2019-07-02 23:44:50 +08:00
. tlb_sync = arm_smmu_tlb_sync_vmid ,
2014-11-15 01:17:54 +08:00
} ;
2013-06-25 01:31:25 +08:00
static irqreturn_t arm_smmu_context_fault ( int irq , void * dev )
{
2019-04-22 15:10:36 +08:00
u32 fsr , fsynr , cbfrsynra ;
2013-06-25 01:31:25 +08:00
unsigned long iova ;
struct iommu_domain * domain = dev ;
2015-03-26 20:43:10 +08:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-25 18:29:12 +08:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2019-08-16 02:37:30 +08:00
int idx = smmu_domain - > cfg . cbndx ;
2013-06-25 01:31:25 +08:00
2019-08-16 02:37:30 +08:00
fsr = arm_smmu_cb_read ( smmu , idx , ARM_SMMU_CB_FSR ) ;
2013-06-25 01:31:25 +08:00
if ( ! ( fsr & FSR_FAULT ) )
return IRQ_NONE ;
2019-08-16 02:37:30 +08:00
fsynr = arm_smmu_cb_read ( smmu , idx , ARM_SMMU_CB_FSYNR0 ) ;
iova = arm_smmu_cb_readq ( smmu , idx , ARM_SMMU_CB_FAR ) ;
cbfrsynra = arm_smmu_gr1_read ( smmu , ARM_SMMU_GR1_CBFRSYNRA ( idx ) ) ;
2013-06-25 01:31:25 +08:00
2016-08-06 02:49:45 +08:00
dev_err_ratelimited ( smmu - > dev ,
2019-04-22 15:10:36 +08:00
" Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d \n " ,
2019-08-16 02:37:30 +08:00
fsr , iova , fsynr , cbfrsynra , idx ) ;
2013-06-25 01:31:25 +08:00
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_FSR , fsr ) ;
2016-08-06 02:49:45 +08:00
return IRQ_HANDLED ;
2013-06-25 01:31:25 +08:00
}
static irqreturn_t arm_smmu_global_fault ( int irq , void * dev )
{
u32 gfsr , gfsynr0 , gfsynr1 , gfsynr2 ;
struct arm_smmu_device * smmu = dev ;
2019-08-16 02:37:31 +08:00
gfsr = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_sGFSR ) ;
gfsynr0 = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_sGFSYNR0 ) ;
gfsynr1 = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_sGFSYNR1 ) ;
gfsynr2 = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_sGFSYNR2 ) ;
2013-06-25 01:31:25 +08:00
2014-01-31 02:18:04 +08:00
if ( ! gfsr )
return IRQ_NONE ;
2013-06-25 01:31:25 +08:00
dev_err_ratelimited ( smmu - > dev ,
" Unexpected global fault, this could be serious \n " ) ;
dev_err_ratelimited ( smmu - > dev ,
" \t GFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x \n " ,
gfsr , gfsynr0 , gfsynr1 , gfsynr2 ) ;
2019-08-16 02:37:31 +08:00
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_sGFSR , gfsr ) ;
2013-08-01 02:21:26 +08:00
return IRQ_HANDLED ;
2013-06-25 01:31:25 +08:00
}
2014-11-15 01:17:54 +08:00
static void arm_smmu_init_context_bank ( struct arm_smmu_domain * smmu_domain ,
struct io_pgtable_cfg * pgtbl_cfg )
2013-06-25 01:31:25 +08:00
{
2014-06-25 18:29:12 +08:00
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
2017-08-08 21:56:14 +08:00
struct arm_smmu_cb * cb = & smmu_domain - > smmu - > cbs [ cfg - > cbndx ] ;
bool stage1 = cfg - > cbar ! = CBAR_TYPE_S2_TRANS ;
cb - > cfg = cfg ;
2019-08-16 02:37:25 +08:00
/* TCR */
2017-08-08 21:56:14 +08:00
if ( stage1 ) {
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH32_S ) {
cb - > tcr [ 0 ] = pgtbl_cfg - > arm_v7s_cfg . tcr ;
} else {
cb - > tcr [ 0 ] = pgtbl_cfg - > arm_lpae_s1_cfg . tcr ;
cb - > tcr [ 1 ] = pgtbl_cfg - > arm_lpae_s1_cfg . tcr > > 32 ;
2019-08-16 02:37:25 +08:00
cb - > tcr [ 1 ] | = FIELD_PREP ( TCR2_SEP , TCR2_SEP_UPSTREAM ) ;
2017-08-08 21:56:14 +08:00
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH64 )
2019-08-16 02:37:25 +08:00
cb - > tcr [ 1 ] | = TCR2_AS ;
2017-08-08 21:56:14 +08:00
}
} else {
cb - > tcr [ 0 ] = pgtbl_cfg - > arm_lpae_s2_cfg . vtcr ;
}
/* TTBRs */
if ( stage1 ) {
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH32_S ) {
cb - > ttbr [ 0 ] = pgtbl_cfg - > arm_v7s_cfg . ttbr [ 0 ] ;
cb - > ttbr [ 1 ] = pgtbl_cfg - > arm_v7s_cfg . ttbr [ 1 ] ;
} else {
cb - > ttbr [ 0 ] = pgtbl_cfg - > arm_lpae_s1_cfg . ttbr [ 0 ] ;
2019-08-16 02:37:25 +08:00
cb - > ttbr [ 0 ] | = FIELD_PREP ( TTBRn_ASID , cfg - > asid ) ;
2017-08-08 21:56:14 +08:00
cb - > ttbr [ 1 ] = pgtbl_cfg - > arm_lpae_s1_cfg . ttbr [ 1 ] ;
2019-08-16 02:37:25 +08:00
cb - > ttbr [ 1 ] | = FIELD_PREP ( TTBRn_ASID , cfg - > asid ) ;
2017-08-08 21:56:14 +08:00
}
} else {
cb - > ttbr [ 0 ] = pgtbl_cfg - > arm_lpae_s2_cfg . vttbr ;
}
/* MAIRs (stage-1 only) */
if ( stage1 ) {
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH32_S ) {
cb - > mair [ 0 ] = pgtbl_cfg - > arm_v7s_cfg . prrr ;
cb - > mair [ 1 ] = pgtbl_cfg - > arm_v7s_cfg . nmrr ;
} else {
cb - > mair [ 0 ] = pgtbl_cfg - > arm_lpae_s1_cfg . mair [ 0 ] ;
cb - > mair [ 1 ] = pgtbl_cfg - > arm_lpae_s1_cfg . mair [ 1 ] ;
}
}
}
static void arm_smmu_write_context_bank ( struct arm_smmu_device * smmu , int idx )
{
u32 reg ;
bool stage1 ;
struct arm_smmu_cb * cb = & smmu - > cbs [ idx ] ;
struct arm_smmu_cfg * cfg = cb - > cfg ;
/* Unassigned context banks only need disabling */
if ( ! cfg ) {
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_SCTLR , 0 ) ;
2017-08-08 21:56:14 +08:00
return ;
}
2014-06-25 18:29:12 +08:00
stage1 = cfg - > cbar ! = CBAR_TYPE_S2_TRANS ;
2013-06-25 01:31:25 +08:00
2017-08-08 21:56:14 +08:00
/* CBA2R */
2015-03-04 20:21:03 +08:00
if ( smmu - > version > ARM_SMMU_V1 ) {
2016-04-29 00:12:09 +08:00
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH64 )
2019-08-16 02:37:24 +08:00
reg = CBA2R_VA64 ;
2016-04-29 00:12:09 +08:00
else
2019-08-16 02:37:24 +08:00
reg = 0 ;
2016-02-24 02:19:00 +08:00
/* 16-bit VMIDs live in CBA2R */
if ( smmu - > features & ARM_SMMU_FEAT_VMID16 )
2019-08-16 02:37:24 +08:00
reg | = FIELD_PREP ( CBA2R_VMID16 , cfg - > vmid ) ;
2016-02-24 02:19:00 +08:00
2019-08-16 02:37:29 +08:00
arm_smmu_gr1_write ( smmu , ARM_SMMU_GR1_CBA2R ( idx ) , reg ) ;
2015-03-04 20:21:03 +08:00
}
2013-06-25 01:31:25 +08:00
/* CBAR */
2019-08-16 02:37:24 +08:00
reg = FIELD_PREP ( CBAR_TYPE , cfg - > cbar ) ;
2016-04-14 01:13:03 +08:00
if ( smmu - > version < ARM_SMMU_V2 )
2019-08-16 02:37:24 +08:00
reg | = FIELD_PREP ( CBAR_IRPTNDX , cfg - > irptndx ) ;
2013-06-25 01:31:25 +08:00
2014-02-06 22:59:05 +08:00
/*
* Use the weakest shareability / memory types , so they are
* overridden by the ttbcr / pte .
*/
if ( stage1 ) {
2019-08-16 02:37:24 +08:00
reg | = FIELD_PREP ( CBAR_S1_BPSHCFG , CBAR_S1_BPSHCFG_NSH ) |
FIELD_PREP ( CBAR_S1_MEMATTR , CBAR_S1_MEMATTR_WB ) ;
2016-02-24 02:19:00 +08:00
} else if ( ! ( smmu - > features & ARM_SMMU_FEAT_VMID16 ) ) {
/* 8-bit VMIDs live in CBAR */
2019-08-16 02:37:24 +08:00
reg | = FIELD_PREP ( CBAR_VMID , cfg - > vmid ) ;
2014-02-06 22:59:05 +08:00
}
2019-08-16 02:37:29 +08:00
arm_smmu_gr1_write ( smmu , ARM_SMMU_GR1_CBAR ( idx ) , reg ) ;
2013-06-25 01:31:25 +08:00
2017-03-28 18:41:12 +08:00
/*
2019-08-16 02:37:25 +08:00
* TCR
2017-03-28 18:41:12 +08:00
* We must write this before the TTBRs , since it determines the
* access behaviour of some fields ( in particular , ASID [ 15 : 8 ] ) .
*/
2017-08-08 21:56:14 +08:00
if ( stage1 & & smmu - > version > ARM_SMMU_V1 )
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_TCR2 , cb - > tcr [ 1 ] ) ;
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_TCR , cb - > tcr [ 0 ] ) ;
2013-06-25 01:31:25 +08:00
2014-11-15 01:17:54 +08:00
/* TTBRs */
2017-08-08 21:56:14 +08:00
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH32_S ) {
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_CONTEXTIDR , cfg - > asid ) ;
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_TTBR0 , cb - > ttbr [ 0 ] ) ;
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_TTBR1 , cb - > ttbr [ 1 ] ) ;
2014-11-15 01:17:54 +08:00
} else {
2019-08-16 02:37:30 +08:00
arm_smmu_cb_writeq ( smmu , idx , ARM_SMMU_CB_TTBR0 , cb - > ttbr [ 0 ] ) ;
2017-08-08 21:56:14 +08:00
if ( stage1 )
2019-08-16 02:37:30 +08:00
arm_smmu_cb_writeq ( smmu , idx , ARM_SMMU_CB_TTBR1 ,
cb - > ttbr [ 1 ] ) ;
2014-11-15 01:17:54 +08:00
}
2014-06-25 01:26:26 +08:00
2014-11-15 01:17:54 +08:00
/* MAIRs (stage-1 only) */
2013-06-25 01:31:25 +08:00
if ( stage1 ) {
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_S1_MAIR0 , cb - > mair [ 0 ] ) ;
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_S1_MAIR1 , cb - > mair [ 1 ] ) ;
2013-06-25 01:31:25 +08:00
}
/* SCTLR */
2016-08-12 00:44:06 +08:00
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M ;
2013-06-25 01:31:25 +08:00
if ( stage1 )
reg | = SCTLR_S1_ASIDPNE ;
2017-08-08 21:56:14 +08:00
if ( IS_ENABLED ( CONFIG_CPU_BIG_ENDIAN ) )
reg | = SCTLR_E ;
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_SCTLR , reg ) ;
2013-06-25 01:31:25 +08:00
}
static int arm_smmu_init_domain_context ( struct iommu_domain * domain ,
2014-06-25 18:29:12 +08:00
struct arm_smmu_device * smmu )
2013-06-25 01:31:25 +08:00
{
2014-07-31 01:58:13 +08:00
int irq , start , ret = 0 ;
2014-11-15 01:17:54 +08:00
unsigned long ias , oas ;
struct io_pgtable_ops * pgtbl_ops ;
struct io_pgtable_cfg pgtbl_cfg ;
enum io_pgtable_fmt fmt ;
2015-03-26 20:43:10 +08:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-25 18:29:12 +08:00
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
2013-06-25 01:31:25 +08:00
2014-11-15 01:17:54 +08:00
mutex_lock ( & smmu_domain - > init_mutex ) ;
2014-07-31 01:58:13 +08:00
if ( smmu_domain - > smmu )
goto out_unlock ;
2017-01-07 00:56:03 +08:00
if ( domain - > type = = IOMMU_DOMAIN_IDENTITY ) {
smmu_domain - > stage = ARM_SMMU_DOMAIN_BYPASS ;
smmu_domain - > smmu = smmu ;
goto out_unlock ;
}
2014-06-26 05:46:31 +08:00
/*
* Mapping the requested stage onto what we support is surprisingly
* complicated , mainly because the spec allows S1 + S2 SMMUs without
* support for nested translation . That means we end up with the
* following table :
*
* Requested Supported Actual
* S1 N S1
* S1 S1 + S2 S1
* S1 S2 S2
* S1 S1 S1
* N N N
* N S1 + S2 S2
* N S2 S2
* N S1 S1
*
* Note that you can ' t actually request stage - 2 mappings .
*/
if ( ! ( smmu - > features & ARM_SMMU_FEAT_TRANS_S1 ) )
smmu_domain - > stage = ARM_SMMU_DOMAIN_S2 ;
if ( ! ( smmu - > features & ARM_SMMU_FEAT_TRANS_S2 ) )
smmu_domain - > stage = ARM_SMMU_DOMAIN_S1 ;
2016-04-29 00:12:09 +08:00
/*
* Choosing a suitable context format is even more fiddly . Until we
* grow some way for the caller to express a preference , and / or move
* the decision into the io - pgtable code where it arguably belongs ,
* just aim for the closest thing to the rest of the system , and hope
* that the hardware isn ' t esoteric enough that we can ' t assume AArch64
* support to be a superset of AArch32 support . . .
*/
if ( smmu - > features & ARM_SMMU_FEAT_FMT_AARCH32_L )
cfg - > fmt = ARM_SMMU_CTX_FMT_AARCH32_L ;
2016-08-12 00:44:06 +08:00
if ( IS_ENABLED ( CONFIG_IOMMU_IO_PGTABLE_ARMV7S ) & &
! IS_ENABLED ( CONFIG_64BIT ) & & ! IS_ENABLED ( CONFIG_ARM_LPAE ) & &
( smmu - > features & ARM_SMMU_FEAT_FMT_AARCH32_S ) & &
( smmu_domain - > stage = = ARM_SMMU_DOMAIN_S1 ) )
cfg - > fmt = ARM_SMMU_CTX_FMT_AARCH32_S ;
2016-04-29 00:12:09 +08:00
if ( ( IS_ENABLED ( CONFIG_64BIT ) | | cfg - > fmt = = ARM_SMMU_CTX_FMT_NONE ) & &
( smmu - > features & ( ARM_SMMU_FEAT_FMT_AARCH64_64K |
ARM_SMMU_FEAT_FMT_AARCH64_16K |
ARM_SMMU_FEAT_FMT_AARCH64_4K ) ) )
cfg - > fmt = ARM_SMMU_CTX_FMT_AARCH64 ;
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_NONE ) {
ret = - EINVAL ;
goto out_unlock ;
}
2014-06-26 05:46:31 +08:00
switch ( smmu_domain - > stage ) {
case ARM_SMMU_DOMAIN_S1 :
cfg - > cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS ;
start = smmu - > num_s2_context_banks ;
2014-11-15 01:17:54 +08:00
ias = smmu - > va_size ;
oas = smmu - > ipa_size ;
2016-04-29 00:12:09 +08:00
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH64 ) {
2014-11-15 01:17:54 +08:00
fmt = ARM_64_LPAE_S1 ;
2016-08-12 00:44:06 +08:00
} else if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH32_L ) {
2014-11-15 01:17:54 +08:00
fmt = ARM_32_LPAE_S1 ;
2016-04-29 00:12:09 +08:00
ias = min ( ias , 32UL ) ;
oas = min ( oas , 40UL ) ;
2016-08-12 00:44:06 +08:00
} else {
fmt = ARM_V7S ;
ias = min ( ias , 32UL ) ;
oas = min ( oas , 32UL ) ;
2016-04-29 00:12:09 +08:00
}
2019-07-02 23:44:41 +08:00
smmu_domain - > flush_ops = & arm_smmu_s1_tlb_ops ;
2014-06-26 05:46:31 +08:00
break ;
case ARM_SMMU_DOMAIN_NESTED :
2013-06-25 01:31:25 +08:00
/*
* We will likely want to change this if / when KVM gets
* involved .
*/
2014-06-26 05:46:31 +08:00
case ARM_SMMU_DOMAIN_S2 :
2014-06-25 19:12:41 +08:00
cfg - > cbar = CBAR_TYPE_S2_TRANS ;
start = 0 ;
2014-11-15 01:17:54 +08:00
ias = smmu - > ipa_size ;
oas = smmu - > pa_size ;
2016-04-29 00:12:09 +08:00
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH64 ) {
2014-11-15 01:17:54 +08:00
fmt = ARM_64_LPAE_S2 ;
2016-04-29 00:12:09 +08:00
} else {
2014-11-15 01:17:54 +08:00
fmt = ARM_32_LPAE_S2 ;
2016-04-29 00:12:09 +08:00
ias = min ( ias , 40UL ) ;
oas = min ( oas , 40UL ) ;
}
2017-03-31 00:56:31 +08:00
if ( smmu - > version = = ARM_SMMU_V2 )
2019-07-02 23:44:41 +08:00
smmu_domain - > flush_ops = & arm_smmu_s2_tlb_ops_v2 ;
2017-03-31 00:56:31 +08:00
else
2019-07-02 23:44:41 +08:00
smmu_domain - > flush_ops = & arm_smmu_s2_tlb_ops_v1 ;
2014-06-26 05:46:31 +08:00
break ;
default :
ret = - EINVAL ;
goto out_unlock ;
2013-06-25 01:31:25 +08:00
}
ret = __arm_smmu_alloc_bitmap ( smmu - > context_map , start ,
smmu - > num_context_banks ) ;
remove lots of IS_ERR_VALUE abuses
Most users of IS_ERR_VALUE() in the kernel are wrong, as they
pass an 'int' into a function that takes an 'unsigned long'
argument. This happens to work because the type is sign-extended
on 64-bit architectures before it gets converted into an
unsigned type.
However, anything that passes an 'unsigned short' or 'unsigned int'
argument into IS_ERR_VALUE() is guaranteed to be broken, as are
8-bit integers and types that are wider than 'unsigned long'.
Andrzej Hajda has already fixed a lot of the worst abusers that
were causing actual bugs, but it would be nice to prevent any
users that are not passing 'unsigned long' arguments.
This patch changes all users of IS_ERR_VALUE() that I could find
on 32-bit ARM randconfig builds and x86 allmodconfig. For the
moment, this doesn't change the definition of IS_ERR_VALUE()
because there are probably still architecture specific users
elsewhere.
Almost all the warnings I got are for files that are better off
using 'if (err)' or 'if (err < 0)'.
The only legitimate user I could find that we get a warning for
is the (32-bit only) freescale fman driver, so I did not remove
the IS_ERR_VALUE() there but changed the type to 'unsigned long'.
For 9pfs, I just worked around one user whose calling conventions
are so obscure that I did not dare change the behavior.
I was using this definition for testing:
#define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \
unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO))
which ends up making all 16-bit or wider types work correctly with
the most plausible interpretation of what IS_ERR_VALUE() was supposed
to return according to its users, but also causes a compile-time
warning for any users that do not pass an 'unsigned long' argument.
I suggested this approach earlier this year, but back then we ended
up deciding to just fix the users that are obviously broken. After
the initial warning that caused me to get involved in the discussion
(fs/gfs2/dir.c) showed up again in the mainline kernel, Linus
asked me to send the whole thing again.
[ Updated the 9p parts as per Al Viro - Linus ]
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Andrzej Hajda <a.hajda@samsung.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lkml.org/lkml/2016/1/7/363
Link: https://lkml.org/lkml/2016/5/27/486
Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:23:25 +08:00
if ( ret < 0 )
2014-07-31 01:58:13 +08:00
goto out_unlock ;
2013-06-25 01:31:25 +08:00
2014-06-25 18:29:12 +08:00
cfg - > cbndx = ret ;
2016-04-14 01:13:03 +08:00
if ( smmu - > version < ARM_SMMU_V2 ) {
2014-06-25 18:29:12 +08:00
cfg - > irptndx = atomic_inc_return ( & smmu - > irptndx ) ;
cfg - > irptndx % = smmu - > num_context_irqs ;
2013-06-25 01:31:25 +08:00
} else {
2014-06-25 18:29:12 +08:00
cfg - > irptndx = cfg - > cbndx ;
2013-06-25 01:31:25 +08:00
}
2017-03-31 00:56:29 +08:00
if ( smmu_domain - > stage = = ARM_SMMU_DOMAIN_S2 )
2019-08-16 02:37:37 +08:00
cfg - > vmid = cfg - > cbndx + 1 ;
2017-03-31 00:56:29 +08:00
else
2019-08-16 02:37:37 +08:00
cfg - > asid = cfg - > cbndx ;
smmu_domain - > smmu = smmu ;
if ( smmu - > impl & & smmu - > impl - > init_context ) {
ret = smmu - > impl - > init_context ( smmu_domain ) ;
if ( ret )
goto out_unlock ;
}
2017-03-31 00:56:29 +08:00
2014-11-15 01:17:54 +08:00
pgtbl_cfg = ( struct io_pgtable_cfg ) {
2016-05-10 00:20:09 +08:00
. pgsize_bitmap = smmu - > pgsize_bitmap ,
2014-11-15 01:17:54 +08:00
. ias = ias ,
. oas = oas ,
2019-06-25 19:51:25 +08:00
. coherent_walk = smmu - > features & ARM_SMMU_FEAT_COHERENT_WALK ,
2019-07-02 23:44:41 +08:00
. tlb = & smmu_domain - > flush_ops - > tlb ,
2015-07-30 02:46:06 +08:00
. iommu_dev = smmu - > dev ,
2014-11-15 01:17:54 +08:00
} ;
2018-09-21 00:10:27 +08:00
if ( smmu_domain - > non_strict )
pgtbl_cfg . quirks | = IO_PGTABLE_QUIRK_NON_STRICT ;
2014-11-15 01:17:54 +08:00
pgtbl_ops = alloc_io_pgtable_ops ( fmt , & pgtbl_cfg , smmu_domain ) ;
if ( ! pgtbl_ops ) {
ret = - ENOMEM ;
goto out_clear_smmu ;
}
2016-05-10 00:20:09 +08:00
/* Update the domain's page sizes to reflect the page table format */
domain - > pgsize_bitmap = pgtbl_cfg . pgsize_bitmap ;
2016-09-13 00:13:58 +08:00
domain - > geometry . aperture_end = ( 1UL < < ias ) - 1 ;
domain - > geometry . force_aperture = true ;
2014-07-31 01:58:13 +08:00
2014-11-15 01:17:54 +08:00
/* Initialise the context bank with our page table cfg */
arm_smmu_init_context_bank ( smmu_domain , & pgtbl_cfg ) ;
2017-08-08 21:56:14 +08:00
arm_smmu_write_context_bank ( smmu , cfg - > cbndx ) ;
2014-11-15 01:17:54 +08:00
/*
* Request context fault interrupt . Do this last to avoid the
* handler seeing a half - initialised domain state .
*/
2014-06-25 18:29:12 +08:00
irq = smmu - > irqs [ smmu - > num_global_irqs + cfg - > irptndx ] ;
2016-07-04 17:38:22 +08:00
ret = devm_request_irq ( smmu - > dev , irq , arm_smmu_context_fault ,
IRQF_SHARED , " arm-smmu-context-fault " , domain ) ;
remove lots of IS_ERR_VALUE abuses
Most users of IS_ERR_VALUE() in the kernel are wrong, as they
pass an 'int' into a function that takes an 'unsigned long'
argument. This happens to work because the type is sign-extended
on 64-bit architectures before it gets converted into an
unsigned type.
However, anything that passes an 'unsigned short' or 'unsigned int'
argument into IS_ERR_VALUE() is guaranteed to be broken, as are
8-bit integers and types that are wider than 'unsigned long'.
Andrzej Hajda has already fixed a lot of the worst abusers that
were causing actual bugs, but it would be nice to prevent any
users that are not passing 'unsigned long' arguments.
This patch changes all users of IS_ERR_VALUE() that I could find
on 32-bit ARM randconfig builds and x86 allmodconfig. For the
moment, this doesn't change the definition of IS_ERR_VALUE()
because there are probably still architecture specific users
elsewhere.
Almost all the warnings I got are for files that are better off
using 'if (err)' or 'if (err < 0)'.
The only legitimate user I could find that we get a warning for
is the (32-bit only) freescale fman driver, so I did not remove
the IS_ERR_VALUE() there but changed the type to 'unsigned long'.
For 9pfs, I just worked around one user whose calling conventions
are so obscure that I did not dare change the behavior.
I was using this definition for testing:
#define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \
unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO))
which ends up making all 16-bit or wider types work correctly with
the most plausible interpretation of what IS_ERR_VALUE() was supposed
to return according to its users, but also causes a compile-time
warning for any users that do not pass an 'unsigned long' argument.
I suggested this approach earlier this year, but back then we ended
up deciding to just fix the users that are obviously broken. After
the initial warning that caused me to get involved in the discussion
(fs/gfs2/dir.c) showed up again in the mainline kernel, Linus
asked me to send the whole thing again.
[ Updated the 9p parts as per Al Viro - Linus ]
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Andrzej Hajda <a.hajda@samsung.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lkml.org/lkml/2016/1/7/363
Link: https://lkml.org/lkml/2016/5/27/486
Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:23:25 +08:00
if ( ret < 0 ) {
2013-06-25 01:31:25 +08:00
dev_err ( smmu - > dev , " failed to request context IRQ %d (%u) \n " ,
2014-06-25 18:29:12 +08:00
cfg - > irptndx , irq ) ;
cfg - > irptndx = INVALID_IRPTNDX ;
2013-06-25 01:31:25 +08:00
}
2014-11-15 01:17:54 +08:00
mutex_unlock ( & smmu_domain - > init_mutex ) ;
/* Publish page table ops for map/unmap */
smmu_domain - > pgtbl_ops = pgtbl_ops ;
2014-05-02 01:05:08 +08:00
return 0 ;
2013-06-25 01:31:25 +08:00
2014-11-15 01:17:54 +08:00
out_clear_smmu :
2019-09-16 21:53:00 +08:00
__arm_smmu_free_bitmap ( smmu - > context_map , cfg - > cbndx ) ;
2014-11-15 01:17:54 +08:00
smmu_domain - > smmu = NULL ;
2014-07-31 01:58:13 +08:00
out_unlock :
2014-11-15 01:17:54 +08:00
mutex_unlock ( & smmu_domain - > init_mutex ) ;
2013-06-25 01:31:25 +08:00
return ret ;
}
static void arm_smmu_destroy_domain_context ( struct iommu_domain * domain )
{
2015-03-26 20:43:10 +08:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-25 18:29:12 +08:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
2018-12-04 14:22:10 +08:00
int ret , irq ;
2013-06-25 01:31:25 +08:00
2017-01-07 00:56:03 +08:00
if ( ! smmu | | domain - > type = = IOMMU_DOMAIN_IDENTITY )
2013-06-25 01:31:25 +08:00
return ;
2018-12-04 14:22:10 +08:00
ret = arm_smmu_rpm_get ( smmu ) ;
if ( ret < 0 )
return ;
2014-11-15 01:17:54 +08:00
/*
* Disable the context bank and free the page tables before freeing
* it .
*/
2017-08-08 21:56:14 +08:00
smmu - > cbs [ cfg - > cbndx ] . cfg = NULL ;
arm_smmu_write_context_bank ( smmu , cfg - > cbndx ) ;
2013-08-01 02:21:27 +08:00
2014-06-25 18:29:12 +08:00
if ( cfg - > irptndx ! = INVALID_IRPTNDX ) {
irq = smmu - > irqs [ smmu - > num_global_irqs + cfg - > irptndx ] ;
2016-07-04 17:38:22 +08:00
devm_free_irq ( smmu - > dev , irq , domain ) ;
2013-06-25 01:31:25 +08:00
}
2015-11-07 01:32:41 +08:00
free_io_pgtable_ops ( smmu_domain - > pgtbl_ops ) ;
2014-06-25 18:29:12 +08:00
__arm_smmu_free_bitmap ( smmu - > context_map , cfg - > cbndx ) ;
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_put ( smmu ) ;
2013-06-25 01:31:25 +08:00
}
2015-03-26 20:43:10 +08:00
static struct iommu_domain * arm_smmu_domain_alloc ( unsigned type )
2013-06-25 01:31:25 +08:00
{
struct arm_smmu_domain * smmu_domain ;
2017-01-07 00:56:03 +08:00
if ( type ! = IOMMU_DOMAIN_UNMANAGED & &
type ! = IOMMU_DOMAIN_DMA & &
type ! = IOMMU_DOMAIN_IDENTITY )
2015-03-26 20:43:10 +08:00
return NULL ;
2013-06-25 01:31:25 +08:00
/*
* Allocate the domain and initialise some of its data structures .
* We can ' t really do anything meaningful until we ' ve added a
* master .
*/
smmu_domain = kzalloc ( sizeof ( * smmu_domain ) , GFP_KERNEL ) ;
if ( ! smmu_domain )
2015-03-26 20:43:10 +08:00
return NULL ;
2013-06-25 01:31:25 +08:00
2016-09-14 22:26:46 +08:00
if ( type = = IOMMU_DOMAIN_DMA & & ( using_legacy_binding | |
iommu_get_dma_cookie ( & smmu_domain - > domain ) ) ) {
2016-01-27 02:06:36 +08:00
kfree ( smmu_domain ) ;
return NULL ;
}
2014-11-15 01:17:54 +08:00
mutex_init ( & smmu_domain - > init_mutex ) ;
2017-06-22 23:53:56 +08:00
spin_lock_init ( & smmu_domain - > cb_lock ) ;
2015-03-26 20:43:10 +08:00
return & smmu_domain - > domain ;
2013-06-25 01:31:25 +08:00
}
2015-03-26 20:43:10 +08:00
static void arm_smmu_domain_free ( struct iommu_domain * domain )
2013-06-25 01:31:25 +08:00
{
2015-03-26 20:43:10 +08:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2013-08-01 02:21:27 +08:00
/*
* Free the domain resources . We assume that all devices have
* already been detached .
*/
2016-01-27 02:06:36 +08:00
iommu_put_dma_cookie ( domain ) ;
2013-06-25 01:31:25 +08:00
arm_smmu_destroy_domain_context ( domain ) ;
kfree ( smmu_domain ) ;
}
2016-09-13 00:13:49 +08:00
static void arm_smmu_write_smr ( struct arm_smmu_device * smmu , int idx )
{
struct arm_smmu_smr * smr = smmu - > smrs + idx ;
2019-08-16 02:37:23 +08:00
u32 reg = FIELD_PREP ( SMR_ID , smr - > id ) | FIELD_PREP ( SMR_MASK , smr - > mask ) ;
2016-09-13 00:13:49 +08:00
2017-01-19 22:36:36 +08:00
if ( ! ( smmu - > features & ARM_SMMU_FEAT_EXIDS ) & & smr - > valid )
2016-09-13 00:13:49 +08:00
reg | = SMR_VALID ;
2019-08-16 02:37:31 +08:00
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_SMR ( idx ) , reg ) ;
2016-09-13 00:13:49 +08:00
}
2016-09-13 00:13:50 +08:00
static void arm_smmu_write_s2cr ( struct arm_smmu_device * smmu , int idx )
{
struct arm_smmu_s2cr * s2cr = smmu - > s2crs + idx ;
2019-08-16 02:37:23 +08:00
u32 reg = FIELD_PREP ( S2CR_TYPE , s2cr - > type ) |
FIELD_PREP ( S2CR_CBNDX , s2cr - > cbndx ) |
FIELD_PREP ( S2CR_PRIVCFG , s2cr - > privcfg ) ;
2016-09-13 00:13:50 +08:00
2017-01-19 22:36:36 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_EXIDS & & smmu - > smrs & &
smmu - > smrs [ idx ] . valid )
reg | = S2CR_EXIDVALID ;
2019-08-16 02:37:31 +08:00
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_S2CR ( idx ) , reg ) ;
2016-09-13 00:13:50 +08:00
}
static void arm_smmu_write_sme ( struct arm_smmu_device * smmu , int idx )
{
arm_smmu_write_s2cr ( smmu , idx ) ;
if ( smmu - > smrs )
arm_smmu_write_smr ( smmu , idx ) ;
}
2017-01-19 22:36:36 +08:00
/*
* The width of SMR ' s mask field depends on sCR0_EXIDENABLE , so this function
* should be called after sCR0 is written .
*/
static void arm_smmu_test_smr_masks ( struct arm_smmu_device * smmu )
{
u32 smr ;
if ( ! smmu - > smrs )
return ;
/*
* SMR . ID bits may not be preserved if the corresponding MASK
* bits are set , so check each one separately . We can reject
* masters later if they try to claim IDs outside these masks .
*/
2019-08-16 02:37:23 +08:00
smr = FIELD_PREP ( SMR_ID , smmu - > streamid_mask ) ;
2019-08-16 02:37:31 +08:00
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_SMR ( 0 ) , smr ) ;
smr = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_SMR ( 0 ) ) ;
2019-08-16 02:37:23 +08:00
smmu - > streamid_mask = FIELD_GET ( SMR_ID , smr ) ;
2017-01-19 22:36:36 +08:00
2019-08-16 02:37:23 +08:00
smr = FIELD_PREP ( SMR_MASK , smmu - > streamid_mask ) ;
2019-08-16 02:37:31 +08:00
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_SMR ( 0 ) , smr ) ;
smr = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_SMR ( 0 ) ) ;
2019-08-16 02:37:23 +08:00
smmu - > smr_mask_mask = FIELD_GET ( SMR_MASK , smr ) ;
2017-01-19 22:36:36 +08:00
}
2016-09-13 00:13:54 +08:00
static int arm_smmu_find_sme ( struct arm_smmu_device * smmu , u16 id , u16 mask )
2016-09-13 00:13:49 +08:00
{
struct arm_smmu_smr * smrs = smmu - > smrs ;
2016-09-13 00:13:54 +08:00
int i , free_idx = - ENOSPC ;
2016-09-13 00:13:49 +08:00
2016-09-13 00:13:54 +08:00
/* Stream indexing is blissfully easy */
if ( ! smrs )
return id ;
/* Validating SMRs is... less so */
for ( i = 0 ; i < smmu - > num_mapping_groups ; + + i ) {
if ( ! smrs [ i ] . valid ) {
/*
* Note the first free entry we come across , which
* we ' ll claim in the end if nothing else matches .
*/
if ( free_idx < 0 )
free_idx = i ;
2016-09-13 00:13:49 +08:00
continue ;
}
2016-09-13 00:13:54 +08:00
/*
* If the new entry is _entirely_ matched by an existing entry ,
* then reuse that , with the guarantee that there also cannot
* be any subsequent conflicting entries . In normal use we ' d
* expect simply identical entries for this case , but there ' s
* no harm in accommodating the generalisation .
*/
if ( ( mask & smrs [ i ] . mask ) = = mask & &
! ( ( id ^ smrs [ i ] . id ) & ~ smrs [ i ] . mask ) )
return i ;
/*
* If the new entry has any other overlap with an existing one ,
* though , then there always exists at least one stream ID
* which would cause a conflict , and we can ' t allow that risk .
*/
if ( ! ( ( id ^ smrs [ i ] . id ) & ~ ( smrs [ i ] . mask | mask ) ) )
return - EINVAL ;
}
2016-09-13 00:13:49 +08:00
2016-09-13 00:13:54 +08:00
return free_idx ;
}
static bool arm_smmu_free_sme ( struct arm_smmu_device * smmu , int idx )
{
if ( - - smmu - > s2crs [ idx ] . count )
return false ;
smmu - > s2crs [ idx ] = s2cr_init_val ;
if ( smmu - > smrs )
smmu - > smrs [ idx ] . valid = false ;
return true ;
}
static int arm_smmu_master_alloc_smes ( struct device * dev )
{
2018-11-29 21:01:00 +08:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2016-09-13 00:13:55 +08:00
struct arm_smmu_master_cfg * cfg = fwspec - > iommu_priv ;
2016-09-13 00:13:54 +08:00
struct arm_smmu_device * smmu = cfg - > smmu ;
struct arm_smmu_smr * smrs = smmu - > smrs ;
struct iommu_group * group ;
int i , idx , ret ;
mutex_lock ( & smmu - > stream_map_mutex ) ;
/* Figure out a viable stream map entry allocation */
2016-09-13 00:13:55 +08:00
for_each_cfg_sme ( fwspec , i , idx ) {
2019-08-16 02:37:23 +08:00
u16 sid = FIELD_GET ( SMR_ID , fwspec - > ids [ i ] ) ;
u16 mask = FIELD_GET ( SMR_MASK , fwspec - > ids [ i ] ) ;
2016-09-14 22:26:46 +08:00
2016-09-13 00:13:54 +08:00
if ( idx ! = INVALID_SMENDX ) {
ret = - EEXIST ;
goto out_err ;
2013-06-25 01:31:25 +08:00
}
2016-09-14 22:26:46 +08:00
ret = arm_smmu_find_sme ( smmu , sid , mask ) ;
2016-09-13 00:13:54 +08:00
if ( ret < 0 )
goto out_err ;
idx = ret ;
if ( smrs & & smmu - > s2crs [ idx ] . count = = 0 ) {
2016-09-14 22:26:46 +08:00
smrs [ idx ] . id = sid ;
smrs [ idx ] . mask = mask ;
2016-09-13 00:13:54 +08:00
smrs [ idx ] . valid = true ;
}
smmu - > s2crs [ idx ] . count + + ;
cfg - > smendx [ i ] = ( s16 ) idx ;
2013-06-25 01:31:25 +08:00
}
2016-09-13 00:13:54 +08:00
group = iommu_group_get_for_dev ( dev ) ;
if ( ! group )
group = ERR_PTR ( - ENOMEM ) ;
if ( IS_ERR ( group ) ) {
ret = PTR_ERR ( group ) ;
goto out_err ;
}
iommu_group_put ( group ) ;
2016-09-13 00:13:49 +08:00
2013-06-25 01:31:25 +08:00
/* It worked! Now, poke the actual hardware */
2016-09-13 00:13:55 +08:00
for_each_cfg_sme ( fwspec , i , idx ) {
2016-09-13 00:13:54 +08:00
arm_smmu_write_sme ( smmu , idx ) ;
smmu - > s2crs [ idx ] . group = group ;
}
2013-06-25 01:31:25 +08:00
2016-09-13 00:13:54 +08:00
mutex_unlock ( & smmu - > stream_map_mutex ) ;
2013-06-25 01:31:25 +08:00
return 0 ;
2016-09-13 00:13:54 +08:00
out_err :
2016-09-13 00:13:49 +08:00
while ( i - - ) {
2016-09-13 00:13:54 +08:00
arm_smmu_free_sme ( smmu , cfg - > smendx [ i ] ) ;
2016-09-13 00:13:49 +08:00
cfg - > smendx [ i ] = INVALID_SMENDX ;
}
2016-09-13 00:13:54 +08:00
mutex_unlock ( & smmu - > stream_map_mutex ) ;
return ret ;
2013-06-25 01:31:25 +08:00
}
2016-09-13 00:13:55 +08:00
static void arm_smmu_master_free_smes ( struct iommu_fwspec * fwspec )
2013-06-25 01:31:25 +08:00
{
2016-09-13 00:13:55 +08:00
struct arm_smmu_device * smmu = fwspec_smmu ( fwspec ) ;
struct arm_smmu_master_cfg * cfg = fwspec - > iommu_priv ;
2016-09-13 00:13:53 +08:00
int i , idx ;
2014-07-15 18:22:24 +08:00
2016-09-13 00:13:54 +08:00
mutex_lock ( & smmu - > stream_map_mutex ) ;
2016-09-13 00:13:55 +08:00
for_each_cfg_sme ( fwspec , i , idx ) {
2016-09-13 00:13:54 +08:00
if ( arm_smmu_free_sme ( smmu , idx ) )
arm_smmu_write_sme ( smmu , idx ) ;
2016-09-13 00:13:49 +08:00
cfg - > smendx [ i ] = INVALID_SMENDX ;
2013-06-25 01:31:25 +08:00
}
2016-09-13 00:13:54 +08:00
mutex_unlock ( & smmu - > stream_map_mutex ) ;
2013-06-25 01:31:25 +08:00
}
static int arm_smmu_domain_add_master ( struct arm_smmu_domain * smmu_domain ,
2016-09-13 00:13:55 +08:00
struct iommu_fwspec * fwspec )
2013-06-25 01:31:25 +08:00
{
2014-06-25 18:29:12 +08:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2016-09-13 00:13:50 +08:00
struct arm_smmu_s2cr * s2cr = smmu - > s2crs ;
u8 cbndx = smmu_domain - > cfg . cbndx ;
2017-01-07 00:56:03 +08:00
enum arm_smmu_s2cr_type type ;
2016-09-13 00:13:54 +08:00
int i , idx ;
2013-06-25 01:31:25 +08:00
2017-01-07 00:56:03 +08:00
if ( smmu_domain - > stage = = ARM_SMMU_DOMAIN_BYPASS )
type = S2CR_TYPE_BYPASS ;
else
type = S2CR_TYPE_TRANS ;
2016-09-13 00:13:55 +08:00
for_each_cfg_sme ( fwspec , i , idx ) {
2016-09-13 00:13:50 +08:00
if ( type = = s2cr [ idx ] . type & & cbndx = = s2cr [ idx ] . cbndx )
2016-09-13 00:13:54 +08:00
continue ;
2016-09-13 00:13:49 +08:00
2016-09-13 00:13:50 +08:00
s2cr [ idx ] . type = type ;
2017-01-06 21:28:15 +08:00
s2cr [ idx ] . privcfg = S2CR_PRIVCFG_DEFAULT ;
2016-09-13 00:13:50 +08:00
s2cr [ idx ] . cbndx = cbndx ;
arm_smmu_write_s2cr ( smmu , idx ) ;
2014-07-15 18:22:24 +08:00
}
2016-09-13 00:13:50 +08:00
return 0 ;
2016-02-18 01:41:57 +08:00
}
2013-06-25 01:31:25 +08:00
static int arm_smmu_attach_dev ( struct iommu_domain * domain , struct device * dev )
{
2014-07-31 01:58:13 +08:00
int ret ;
2018-11-29 21:01:00 +08:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2016-09-13 00:13:55 +08:00
struct arm_smmu_device * smmu ;
2015-03-26 20:43:10 +08:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2013-06-25 01:31:25 +08:00
2016-09-13 00:13:55 +08:00
if ( ! fwspec | | fwspec - > ops ! = & arm_smmu_ops ) {
2013-06-25 01:31:25 +08:00
dev_err ( dev , " cannot attach to SMMU, is it on the same bus? \n " ) ;
return - ENXIO ;
}
2016-10-17 19:06:21 +08:00
/*
* FIXME : The arch / arm DMA API code tries to attach devices to its own
* domains between of_xlate ( ) and add_device ( ) - we have no way to cope
* with that , so until ARM gets converted to rely on groups and default
* domains , just say no ( but more politely than by dereferencing NULL ) .
* This should be at least a WARN_ON once that ' s sorted .
*/
if ( ! fwspec - > iommu_priv )
return - ENODEV ;
2016-09-13 00:13:55 +08:00
smmu = fwspec_smmu ( fwspec ) ;
2018-12-04 14:22:10 +08:00
ret = arm_smmu_rpm_get ( smmu ) ;
if ( ret < 0 )
return ret ;
2014-11-15 01:17:54 +08:00
/* Ensure that the domain is finalised */
2016-09-13 00:13:55 +08:00
ret = arm_smmu_init_domain_context ( domain , smmu ) ;
remove lots of IS_ERR_VALUE abuses
Most users of IS_ERR_VALUE() in the kernel are wrong, as they
pass an 'int' into a function that takes an 'unsigned long'
argument. This happens to work because the type is sign-extended
on 64-bit architectures before it gets converted into an
unsigned type.
However, anything that passes an 'unsigned short' or 'unsigned int'
argument into IS_ERR_VALUE() is guaranteed to be broken, as are
8-bit integers and types that are wider than 'unsigned long'.
Andrzej Hajda has already fixed a lot of the worst abusers that
were causing actual bugs, but it would be nice to prevent any
users that are not passing 'unsigned long' arguments.
This patch changes all users of IS_ERR_VALUE() that I could find
on 32-bit ARM randconfig builds and x86 allmodconfig. For the
moment, this doesn't change the definition of IS_ERR_VALUE()
because there are probably still architecture specific users
elsewhere.
Almost all the warnings I got are for files that are better off
using 'if (err)' or 'if (err < 0)'.
The only legitimate user I could find that we get a warning for
is the (32-bit only) freescale fman driver, so I did not remove
the IS_ERR_VALUE() there but changed the type to 'unsigned long'.
For 9pfs, I just worked around one user whose calling conventions
are so obscure that I did not dare change the behavior.
I was using this definition for testing:
#define IS_ERR_VALUE(x) ((unsigned long*)NULL == (typeof (x)*)NULL && \
unlikely((unsigned long long)(x) >= (unsigned long long)(typeof(x))-MAX_ERRNO))
which ends up making all 16-bit or wider types work correctly with
the most plausible interpretation of what IS_ERR_VALUE() was supposed
to return according to its users, but also causes a compile-time
warning for any users that do not pass an 'unsigned long' argument.
I suggested this approach earlier this year, but back then we ended
up deciding to just fix the users that are obviously broken. After
the initial warning that caused me to get involved in the discussion
(fs/gfs2/dir.c) showed up again in the mainline kernel, Linus
asked me to send the whole thing again.
[ Updated the 9p parts as per Al Viro - Linus ]
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Cc: Andrzej Hajda <a.hajda@samsung.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: https://lkml.org/lkml/2016/1/7/363
Link: https://lkml.org/lkml/2016/5/27/486
Acked-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> # For nvmem part
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-28 05:23:25 +08:00
if ( ret < 0 )
2018-12-04 14:22:10 +08:00
goto rpm_put ;
2014-11-15 01:17:54 +08:00
2013-06-25 01:31:25 +08:00
/*
2014-06-25 18:29:12 +08:00
* Sanity check the domain . We don ' t support domains across
* different SMMUs .
2013-06-25 01:31:25 +08:00
*/
2016-09-13 00:13:55 +08:00
if ( smmu_domain - > smmu ! = smmu ) {
2013-06-25 01:31:25 +08:00
dev_err ( dev ,
" cannot attach to SMMU %s whilst already attached to domain on SMMU %s \n " ,
2016-09-13 00:13:55 +08:00
dev_name ( smmu_domain - > smmu - > dev ) , dev_name ( smmu - > dev ) ) ;
2018-12-04 14:22:10 +08:00
ret = - EINVAL ;
goto rpm_put ;
2013-06-25 01:31:25 +08:00
}
/* Looks ok, so add the device to the domain */
2018-12-04 14:22:10 +08:00
ret = arm_smmu_domain_add_master ( smmu_domain , fwspec ) ;
rpm_put :
arm_smmu_rpm_put ( smmu ) ;
return ret ;
2013-06-25 01:31:25 +08:00
}
static int arm_smmu_map ( struct iommu_domain * domain , unsigned long iova ,
2014-02-21 00:31:06 +08:00
phys_addr_t paddr , size_t size , int prot )
2013-06-25 01:31:25 +08:00
{
2017-06-22 23:53:56 +08:00
struct io_pgtable_ops * ops = to_smmu_domain ( domain ) - > pgtbl_ops ;
2018-12-04 14:22:10 +08:00
struct arm_smmu_device * smmu = to_smmu_domain ( domain ) - > smmu ;
int ret ;
2013-06-25 01:31:25 +08:00
2014-11-15 01:17:54 +08:00
if ( ! ops )
2013-06-25 01:31:25 +08:00
return - ENODEV ;
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_get ( smmu ) ;
ret = ops - > map ( ops , iova , paddr , size , prot ) ;
arm_smmu_rpm_put ( smmu ) ;
return ret ;
2013-06-25 01:31:25 +08:00
}
static size_t arm_smmu_unmap ( struct iommu_domain * domain , unsigned long iova ,
2019-07-02 23:44:06 +08:00
size_t size , struct iommu_iotlb_gather * gather )
2013-06-25 01:31:25 +08:00
{
2017-06-22 23:53:56 +08:00
struct io_pgtable_ops * ops = to_smmu_domain ( domain ) - > pgtbl_ops ;
2018-12-04 14:22:10 +08:00
struct arm_smmu_device * smmu = to_smmu_domain ( domain ) - > smmu ;
size_t ret ;
2013-06-25 01:31:25 +08:00
2014-11-15 01:17:54 +08:00
if ( ! ops )
return 0 ;
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_get ( smmu ) ;
2019-07-02 23:44:58 +08:00
ret = ops - > unmap ( ops , iova , size , gather ) ;
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_put ( smmu ) ;
return ret ;
2013-06-25 01:31:25 +08:00
}
2018-09-21 00:10:27 +08:00
static void arm_smmu_flush_iotlb_all ( struct iommu_domain * domain )
{
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2018-12-04 14:22:10 +08:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2018-09-21 00:10:27 +08:00
2019-07-02 23:44:41 +08:00
if ( smmu_domain - > flush_ops ) {
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_get ( smmu ) ;
2019-07-02 23:44:41 +08:00
smmu_domain - > flush_ops - > tlb . tlb_flush_all ( smmu_domain ) ;
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_put ( smmu ) ;
}
2018-09-21 00:10:27 +08:00
}
2019-07-02 23:44:06 +08:00
static void arm_smmu_iotlb_sync ( struct iommu_domain * domain ,
struct iommu_iotlb_gather * gather )
2017-09-28 22:55:01 +08:00
{
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2018-12-04 14:22:10 +08:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
2017-09-28 22:55:01 +08:00
2019-07-02 23:44:41 +08:00
if ( smmu_domain - > flush_ops ) {
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_get ( smmu ) ;
2019-07-02 23:44:50 +08:00
smmu_domain - > flush_ops - > tlb_sync ( smmu_domain ) ;
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_put ( smmu ) ;
}
2017-09-28 22:55:01 +08:00
}
2014-10-30 05:13:40 +08:00
static phys_addr_t arm_smmu_iova_to_phys_hard ( struct iommu_domain * domain ,
dma_addr_t iova )
{
2015-03-26 20:43:10 +08:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-10-30 05:13:40 +08:00
struct arm_smmu_device * smmu = smmu_domain - > smmu ;
struct arm_smmu_cfg * cfg = & smmu_domain - > cfg ;
struct io_pgtable_ops * ops = smmu_domain - > pgtbl_ops ;
struct device * dev = smmu - > dev ;
2019-08-16 02:37:30 +08:00
void __iomem * reg ;
2014-10-30 05:13:40 +08:00
u32 tmp ;
u64 phys ;
2017-06-22 23:53:56 +08:00
unsigned long va , flags ;
2019-08-16 02:37:30 +08:00
int ret , idx = cfg - > cbndx ;
2018-12-04 14:22:10 +08:00
ret = arm_smmu_rpm_get ( smmu ) ;
if ( ret < 0 )
return 0 ;
2014-10-30 05:13:40 +08:00
2017-06-22 23:53:56 +08:00
spin_lock_irqsave ( & smmu_domain - > cb_lock , flags ) ;
2015-05-28 00:09:34 +08:00
va = iova & ~ 0xfffUL ;
2019-08-16 02:37:28 +08:00
if ( cfg - > fmt = = ARM_SMMU_CTX_FMT_AARCH64 )
2019-08-16 02:37:30 +08:00
arm_smmu_cb_writeq ( smmu , idx , ARM_SMMU_CB_ATS1PR , va ) ;
2019-08-16 02:37:28 +08:00
else
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , idx , ARM_SMMU_CB_ATS1PR , va ) ;
2014-10-30 05:13:40 +08:00
2019-08-16 02:37:30 +08:00
reg = arm_smmu_page ( smmu , ARM_SMMU_CB ( smmu , idx ) ) + ARM_SMMU_CB_ATSR ;
if ( readl_poll_timeout_atomic ( reg , tmp , ! ( tmp & ATSR_ACTIVE ) , 5 , 50 ) ) {
2017-06-22 23:53:56 +08:00
spin_unlock_irqrestore ( & smmu_domain - > cb_lock , flags ) ;
2014-10-30 05:13:40 +08:00
dev_err ( dev ,
2015-08-19 00:12:24 +08:00
" iova to phys timed out on %pad. Falling back to software table walk. \n " ,
2014-10-30 05:13:40 +08:00
& iova ) ;
return ops - > iova_to_phys ( ops , iova ) ;
}
2019-08-16 02:37:30 +08:00
phys = arm_smmu_cb_readq ( smmu , idx , ARM_SMMU_CB_PAR ) ;
2017-06-22 23:53:56 +08:00
spin_unlock_irqrestore ( & smmu_domain - > cb_lock , flags ) ;
2014-10-30 05:13:40 +08:00
if ( phys & CB_PAR_F ) {
dev_err ( dev , " translation fault! \n " ) ;
dev_err ( dev , " PAR = 0x%llx \n " , phys ) ;
return 0 ;
}
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_put ( smmu ) ;
2014-10-30 05:13:40 +08:00
return ( phys & GENMASK_ULL ( 39 , 12 ) ) | ( iova & 0xfff ) ;
}
2013-06-25 01:31:25 +08:00
static phys_addr_t arm_smmu_iova_to_phys ( struct iommu_domain * domain ,
2014-10-30 05:13:40 +08:00
dma_addr_t iova )
2013-06-25 01:31:25 +08:00
{
2015-03-26 20:43:10 +08:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2017-06-22 23:53:56 +08:00
struct io_pgtable_ops * ops = smmu_domain - > pgtbl_ops ;
2013-06-25 01:31:25 +08:00
2017-04-25 17:57:52 +08:00
if ( domain - > type = = IOMMU_DOMAIN_IDENTITY )
return iova ;
2014-11-15 01:17:54 +08:00
if ( ! ops )
2013-11-08 02:47:50 +08:00
return 0 ;
2013-06-25 01:31:25 +08:00
2015-03-04 23:51:06 +08:00
if ( smmu_domain - > smmu - > features & ARM_SMMU_FEAT_TRANS_OPS & &
2017-06-22 23:53:56 +08:00
smmu_domain - > stage = = ARM_SMMU_DOMAIN_S1 )
return arm_smmu_iova_to_phys_hard ( domain , iova ) ;
2014-10-30 05:13:40 +08:00
2017-06-22 23:53:56 +08:00
return ops - > iova_to_phys ( ops , iova ) ;
2013-06-25 01:31:25 +08:00
}
2014-09-05 16:49:34 +08:00
static bool arm_smmu_capable ( enum iommu_cap cap )
2013-06-25 01:31:25 +08:00
{
2014-06-25 00:30:10 +08:00
switch ( cap ) {
case IOMMU_CAP_CACHE_COHERENCY :
2014-09-05 16:49:34 +08:00
/*
* Return true here as the SMMU can always send out coherent
* requests .
*/
return true ;
2014-10-13 21:06:18 +08:00
case IOMMU_CAP_NOEXEC :
return true ;
2014-06-25 00:30:10 +08:00
default :
2014-09-05 16:49:34 +08:00
return false ;
2014-06-25 00:30:10 +08:00
}
2013-06-25 01:31:25 +08:00
}
2016-11-21 18:01:37 +08:00
static
struct arm_smmu_device * arm_smmu_get_by_fwnode ( struct fwnode_handle * fwnode )
2016-09-14 22:26:46 +08:00
{
2019-07-24 06:18:34 +08:00
struct device * dev = driver_find_device_by_fwnode ( & arm_smmu_driver . driver ,
fwnode ) ;
2016-09-14 22:26:46 +08:00
put_device ( dev ) ;
return dev ? dev_get_drvdata ( dev ) : NULL ;
}
2016-09-14 22:21:39 +08:00
static int arm_smmu_add_device ( struct device * dev )
2013-06-25 01:31:25 +08:00
{
2016-09-13 00:13:55 +08:00
struct arm_smmu_device * smmu ;
2015-01-19 22:27:33 +08:00
struct arm_smmu_master_cfg * cfg ;
2018-11-29 21:01:00 +08:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2016-09-14 22:21:39 +08:00
int i , ret ;
2014-07-15 18:27:08 +08:00
2016-09-14 22:26:46 +08:00
if ( using_legacy_binding ) {
ret = arm_smmu_register_legacy_master ( dev , & smmu ) ;
2017-08-08 18:26:02 +08:00
/*
* If dev - > iommu_fwspec is initally NULL , arm_smmu_register_legacy_master ( )
* will allocate / initialise a new one . Thus we need to update fwspec for
* later use .
*/
2018-11-29 21:01:00 +08:00
fwspec = dev_iommu_fwspec_get ( dev ) ;
2016-09-14 22:26:46 +08:00
if ( ret )
goto out_free ;
2016-11-03 01:31:32 +08:00
} else if ( fwspec & & fwspec - > ops = = & arm_smmu_ops ) {
2016-11-21 18:01:37 +08:00
smmu = arm_smmu_get_by_fwnode ( fwspec - > iommu_fwnode ) ;
2016-09-14 22:26:46 +08:00
} else {
return - ENODEV ;
}
2014-05-02 01:05:08 +08:00
2016-09-14 22:21:39 +08:00
ret = - EINVAL ;
2016-09-13 00:13:55 +08:00
for ( i = 0 ; i < fwspec - > num_ids ; i + + ) {
2019-08-16 02:37:23 +08:00
u16 sid = FIELD_GET ( SMR_ID , fwspec - > ids [ i ] ) ;
u16 mask = FIELD_GET ( SMR_MASK , fwspec - > ids [ i ] ) ;
2015-01-19 22:27:33 +08:00
2016-09-13 00:13:55 +08:00
if ( sid & ~ smmu - > streamid_mask ) {
2016-09-14 22:21:39 +08:00
dev_err ( dev , " stream ID 0x%x out of range for SMMU (0x%x) \n " ,
2016-09-14 22:26:46 +08:00
sid , smmu - > streamid_mask ) ;
goto out_free ;
}
if ( mask & ~ smmu - > smr_mask_mask ) {
dev_err ( dev , " SMR mask 0x%x out of range for SMMU (0x%x) \n " ,
2017-04-21 17:03:36 +08:00
mask , smmu - > smr_mask_mask ) ;
2016-09-14 22:21:39 +08:00
goto out_free ;
}
2016-09-13 00:13:49 +08:00
}
2013-10-18 23:08:29 +08:00
2016-09-13 00:13:55 +08:00
ret = - ENOMEM ;
cfg = kzalloc ( offsetof ( struct arm_smmu_master_cfg , smendx [ i ] ) ,
GFP_KERNEL ) ;
if ( ! cfg )
goto out_free ;
cfg - > smmu = smmu ;
fwspec - > iommu_priv = cfg ;
while ( i - - )
cfg - > smendx [ i ] = INVALID_SMENDX ;
2018-12-04 14:22:10 +08:00
ret = arm_smmu_rpm_get ( smmu ) ;
if ( ret < 0 )
goto out_cfg_free ;
2016-09-13 00:13:54 +08:00
ret = arm_smmu_master_alloc_smes ( dev ) ;
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_put ( smmu ) ;
2016-09-13 00:13:55 +08:00
if ( ret )
2017-07-06 17:37:00 +08:00
goto out_cfg_free ;
2016-09-13 00:13:55 +08:00
2017-02-02 01:11:36 +08:00
iommu_device_link ( & smmu - > iommu , dev ) ;
2018-12-04 14:22:11 +08:00
device_link_add ( dev , smmu - > dev ,
DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_SUPPLIER ) ;
2016-09-13 00:13:55 +08:00
return 0 ;
2016-09-14 22:21:39 +08:00
2017-07-06 17:37:00 +08:00
out_cfg_free :
kfree ( cfg ) ;
2016-09-14 22:21:39 +08:00
out_free :
2016-09-13 00:13:55 +08:00
iommu_fwspec_free ( dev ) ;
2016-09-14 22:21:39 +08:00
return ret ;
2015-01-19 22:27:33 +08:00
}
2013-06-25 01:31:25 +08:00
static void arm_smmu_remove_device ( struct device * dev )
{
2018-11-29 21:01:00 +08:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2017-02-02 01:11:36 +08:00
struct arm_smmu_master_cfg * cfg ;
struct arm_smmu_device * smmu ;
2018-12-04 14:22:10 +08:00
int ret ;
2016-09-13 00:13:50 +08:00
2016-09-13 00:13:55 +08:00
if ( ! fwspec | | fwspec - > ops ! = & arm_smmu_ops )
2016-09-14 22:21:39 +08:00
return ;
2016-09-13 00:13:50 +08:00
2017-02-02 01:11:36 +08:00
cfg = fwspec - > iommu_priv ;
smmu = cfg - > smmu ;
2018-12-04 14:22:10 +08:00
ret = arm_smmu_rpm_get ( smmu ) ;
if ( ret < 0 )
return ;
2017-02-02 01:11:36 +08:00
iommu_device_unlink ( & smmu - > iommu , dev ) ;
2016-09-13 00:13:55 +08:00
arm_smmu_master_free_smes ( fwspec ) ;
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_put ( smmu ) ;
2013-10-18 23:08:29 +08:00
iommu_group_remove_device ( dev ) ;
2016-09-13 00:13:55 +08:00
kfree ( fwspec - > iommu_priv ) ;
iommu_fwspec_free ( dev ) ;
2013-06-25 01:31:25 +08:00
}
2015-10-22 05:51:41 +08:00
static struct iommu_group * arm_smmu_device_group ( struct device * dev )
{
2018-11-29 21:01:00 +08:00
struct iommu_fwspec * fwspec = dev_iommu_fwspec_get ( dev ) ;
2016-09-13 00:13:55 +08:00
struct arm_smmu_device * smmu = fwspec_smmu ( fwspec ) ;
2016-09-13 00:13:54 +08:00
struct iommu_group * group = NULL ;
int i , idx ;
2016-09-13 00:13:55 +08:00
for_each_cfg_sme ( fwspec , i , idx ) {
2016-09-13 00:13:54 +08:00
if ( group & & smmu - > s2crs [ idx ] . group & &
group ! = smmu - > s2crs [ idx ] . group )
return ERR_PTR ( - EINVAL ) ;
group = smmu - > s2crs [ idx ] . group ;
}
if ( group )
2016-11-12 01:59:22 +08:00
return iommu_group_ref_get ( group ) ;
2015-10-22 05:51:41 +08:00
if ( dev_is_pci ( dev ) )
group = pci_device_group ( dev ) ;
2018-09-10 21:49:18 +08:00
else if ( dev_is_fsl_mc ( dev ) )
group = fsl_mc_device_group ( dev ) ;
2015-10-22 05:51:41 +08:00
else
group = generic_device_group ( dev ) ;
return group ;
}
2014-06-26 05:46:31 +08:00
static int arm_smmu_domain_get_attr ( struct iommu_domain * domain ,
enum iommu_attr attr , void * data )
{
2015-03-26 20:43:10 +08:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-26 05:46:31 +08:00
2018-09-21 00:10:27 +08:00
switch ( domain - > type ) {
case IOMMU_DOMAIN_UNMANAGED :
switch ( attr ) {
case DOMAIN_ATTR_NESTING :
* ( int * ) data = ( smmu_domain - > stage = = ARM_SMMU_DOMAIN_NESTED ) ;
return 0 ;
default :
return - ENODEV ;
}
break ;
case IOMMU_DOMAIN_DMA :
switch ( attr ) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE :
* ( int * ) data = smmu_domain - > non_strict ;
return 0 ;
default :
return - ENODEV ;
}
break ;
2014-06-26 05:46:31 +08:00
default :
2018-09-21 00:10:27 +08:00
return - EINVAL ;
2014-06-26 05:46:31 +08:00
}
}
static int arm_smmu_domain_set_attr ( struct iommu_domain * domain ,
enum iommu_attr attr , void * data )
{
2014-11-15 01:17:54 +08:00
int ret = 0 ;
2015-03-26 20:43:10 +08:00
struct arm_smmu_domain * smmu_domain = to_smmu_domain ( domain ) ;
2014-06-26 05:46:31 +08:00
2014-11-15 01:17:54 +08:00
mutex_lock ( & smmu_domain - > init_mutex ) ;
2018-09-21 00:10:27 +08:00
switch ( domain - > type ) {
case IOMMU_DOMAIN_UNMANAGED :
switch ( attr ) {
case DOMAIN_ATTR_NESTING :
if ( smmu_domain - > smmu ) {
ret = - EPERM ;
goto out_unlock ;
}
if ( * ( int * ) data )
smmu_domain - > stage = ARM_SMMU_DOMAIN_NESTED ;
else
smmu_domain - > stage = ARM_SMMU_DOMAIN_S1 ;
break ;
default :
ret = - ENODEV ;
}
break ;
case IOMMU_DOMAIN_DMA :
switch ( attr ) {
case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE :
smmu_domain - > non_strict = * ( int * ) data ;
break ;
default :
ret = - ENODEV ;
2014-11-15 01:17:54 +08:00
}
break ;
2014-06-26 05:46:31 +08:00
default :
2018-09-21 00:10:27 +08:00
ret = - EINVAL ;
2014-06-26 05:46:31 +08:00
}
2014-11-15 01:17:54 +08:00
out_unlock :
mutex_unlock ( & smmu_domain - > init_mutex ) ;
return ret ;
2014-06-26 05:46:31 +08:00
}
2016-09-14 22:26:46 +08:00
static int arm_smmu_of_xlate ( struct device * dev , struct of_phandle_args * args )
{
2017-03-31 19:03:33 +08:00
u32 mask , fwid = 0 ;
2016-09-14 22:26:46 +08:00
if ( args - > args_count > 0 )
2019-08-16 02:37:23 +08:00
fwid | = FIELD_PREP ( SMR_ID , args - > args [ 0 ] ) ;
2016-09-14 22:26:46 +08:00
if ( args - > args_count > 1 )
2019-08-16 02:37:23 +08:00
fwid | = FIELD_PREP ( SMR_MASK , args - > args [ 1 ] ) ;
2017-03-31 19:03:33 +08:00
else if ( ! of_property_read_u32 ( args - > np , " stream-match-mask " , & mask ) )
2019-08-16 02:37:23 +08:00
fwid | = FIELD_PREP ( SMR_MASK , mask ) ;
2016-09-14 22:26:46 +08:00
return iommu_fwspec_add_ids ( dev , & fwid , 1 ) ;
}
2017-01-20 04:57:55 +08:00
static void arm_smmu_get_resv_regions ( struct device * dev ,
struct list_head * head )
{
struct iommu_resv_region * region ;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO ;
region = iommu_alloc_resv_region ( MSI_IOVA_BASE , MSI_IOVA_LENGTH ,
iommu: Disambiguate MSI region types
The introduction of reserved regions has left a couple of rough edges
which we could do with sorting out sooner rather than later. Since we
are not yet addressing the potential dynamic aspect of software-managed
reservations and presenting them at arbitrary fixed addresses, it is
incongruous that we end up displaying hardware vs. software-managed MSI
regions to userspace differently, especially since ARM-based systems may
actually require one or the other, or even potentially both at once,
(which iommu-dma currently has no hope of dealing with at all). Let's
resolve the former user-visible inconsistency ASAP before the ABI has
been baked into a kernel release, in a way that also lays the groundwork
for the latter shortcoming to be addressed by follow-up patches.
For clarity, rename the software-managed type to IOMMU_RESV_SW_MSI, use
IOMMU_RESV_MSI to describe the hardware type, and document everything a
little bit. Since the x86 MSI remapping hardware falls squarely under
this meaning of IOMMU_RESV_MSI, apply that type to their regions as well,
so that we tell the same story to userspace across all platforms.
Secondly, as the various region types require quite different handling,
and it really makes little sense to ever try combining them, convert the
bitfield-esque #defines to a plain enum in the process before anyone
gets the wrong impression.
Fixes: d30ddcaa7b02 ("iommu: Add a new type field in iommu_resv_region")
Reviewed-by: Eric Auger <eric.auger@redhat.com>
CC: Alex Williamson <alex.williamson@redhat.com>
CC: David Woodhouse <dwmw2@infradead.org>
CC: kvm@vger.kernel.org
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
2017-03-17 01:00:16 +08:00
prot , IOMMU_RESV_SW_MSI ) ;
2017-01-20 04:57:55 +08:00
if ( ! region )
return ;
list_add_tail ( & region - > list , head ) ;
2017-03-17 01:00:19 +08:00
iommu_dma_get_resv_regions ( dev , head ) ;
2017-01-20 04:57:55 +08:00
}
static void arm_smmu_put_resv_regions ( struct device * dev ,
struct list_head * head )
{
struct iommu_resv_region * entry , * next ;
list_for_each_entry_safe ( entry , next , head , list )
kfree ( entry ) ;
}
2014-11-15 01:17:54 +08:00
static struct iommu_ops arm_smmu_ops = {
2014-06-26 05:46:31 +08:00
. capable = arm_smmu_capable ,
2015-03-26 20:43:10 +08:00
. domain_alloc = arm_smmu_domain_alloc ,
. domain_free = arm_smmu_domain_free ,
2014-06-26 05:46:31 +08:00
. attach_dev = arm_smmu_attach_dev ,
. map = arm_smmu_map ,
. unmap = arm_smmu_unmap ,
2018-09-21 00:10:27 +08:00
. flush_iotlb_all = arm_smmu_flush_iotlb_all ,
2017-09-28 22:55:01 +08:00
. iotlb_sync = arm_smmu_iotlb_sync ,
2014-06-26 05:46:31 +08:00
. iova_to_phys = arm_smmu_iova_to_phys ,
. add_device = arm_smmu_add_device ,
. remove_device = arm_smmu_remove_device ,
2015-10-22 05:51:41 +08:00
. device_group = arm_smmu_device_group ,
2014-06-26 05:46:31 +08:00
. domain_get_attr = arm_smmu_domain_get_attr ,
. domain_set_attr = arm_smmu_domain_set_attr ,
2016-09-14 22:26:46 +08:00
. of_xlate = arm_smmu_of_xlate ,
2017-01-20 04:57:55 +08:00
. get_resv_regions = arm_smmu_get_resv_regions ,
. put_resv_regions = arm_smmu_put_resv_regions ,
2014-11-15 01:17:54 +08:00
. pgsize_bitmap = - 1UL , /* Restricted during device attach */
2013-06-25 01:31:25 +08:00
} ;
static void arm_smmu_device_reset ( struct arm_smmu_device * smmu )
{
2016-09-13 00:13:49 +08:00
int i ;
2019-08-16 02:37:36 +08:00
u32 reg ;
2013-10-01 20:39:09 +08:00
2014-01-31 02:18:04 +08:00
/* clear global FSR */
2019-08-16 02:37:31 +08:00
reg = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_sGFSR ) ;
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_sGFSR , reg ) ;
2013-06-25 01:31:25 +08:00
2016-09-13 00:13:49 +08:00
/*
* Reset stream mapping groups : Initial values mark all SMRn as
* invalid and all S2CRn as bypass unless overridden .
*/
2016-09-13 00:13:50 +08:00
for ( i = 0 ; i < smmu - > num_mapping_groups ; + + i )
arm_smmu_write_sme ( smmu , i ) ;
2013-06-25 01:31:25 +08:00
2013-10-01 20:39:09 +08:00
/* Make sure all context banks are disabled and clear CB_FSR */
for ( i = 0 ; i < smmu - > num_context_banks ; + + i ) {
2017-08-08 21:56:14 +08:00
arm_smmu_write_context_bank ( smmu , i ) ;
2019-08-16 02:37:30 +08:00
arm_smmu_cb_write ( smmu , i , ARM_SMMU_CB_FSR , FSR_FAULT ) ;
2013-10-01 20:39:09 +08:00
}
2013-08-01 02:21:27 +08:00
2013-06-25 01:31:25 +08:00
/* Invalidate the TLB, just in case */
2019-08-16 02:37:31 +08:00
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_TLBIALLH , QCOM_DUMMY_VAL ) ;
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_TLBIALLNSNH , QCOM_DUMMY_VAL ) ;
2013-06-25 01:31:25 +08:00
2019-08-16 02:37:31 +08:00
reg = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_sCR0 ) ;
2013-10-01 20:39:09 +08:00
2013-06-25 01:31:25 +08:00
/* Enable fault reporting */
2013-10-01 20:39:09 +08:00
reg | = ( sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE ) ;
2013-06-25 01:31:25 +08:00
/* Disable TLB broadcasting. */
2013-10-01 20:39:09 +08:00
reg | = ( sCR0_VMIDPNE | sCR0_PTM ) ;
2013-06-25 01:31:25 +08:00
2016-02-10 22:25:33 +08:00
/* Enable client access, handling unmatched streams as appropriate */
reg & = ~ sCR0_CLIENTPD ;
if ( disable_bypass )
reg | = sCR0_USFCFG ;
else
reg & = ~ sCR0_USFCFG ;
2013-06-25 01:31:25 +08:00
/* Disable forced broadcasting */
2013-10-01 20:39:09 +08:00
reg & = ~ sCR0_FB ;
2013-06-25 01:31:25 +08:00
/* Don't upgrade barriers */
2019-08-16 02:37:23 +08:00
reg & = ~ ( sCR0_BSU ) ;
2013-06-25 01:31:25 +08:00
2016-02-24 02:19:00 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_VMID16 )
reg | = sCR0_VMID16EN ;
2017-01-19 22:36:36 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_EXIDS )
reg | = sCR0_EXIDENABLE ;
2019-08-16 02:37:36 +08:00
if ( smmu - > impl & & smmu - > impl - > reset )
smmu - > impl - > reset ( smmu ) ;
2013-06-25 01:31:25 +08:00
/* Push the button */
2017-03-31 00:56:31 +08:00
arm_smmu_tlb_sync_global ( smmu ) ;
2019-08-16 02:37:31 +08:00
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_sCR0 , reg ) ;
2013-06-25 01:31:25 +08:00
}
static int arm_smmu_id_size_to_bits ( int size )
{
switch ( size ) {
case 0 :
return 32 ;
case 1 :
return 36 ;
case 2 :
return 40 ;
case 3 :
return 42 ;
case 4 :
return 44 ;
case 5 :
default :
return 48 ;
}
}
static int arm_smmu_device_cfg_probe ( struct arm_smmu_device * smmu )
{
2019-08-16 02:37:26 +08:00
unsigned int size ;
2013-06-25 01:31:25 +08:00
u32 id ;
2016-11-21 18:01:44 +08:00
bool cttw_reg , cttw_fw = smmu - > features & ARM_SMMU_FEAT_COHERENT_WALK ;
2016-09-13 00:13:50 +08:00
int i ;
2013-06-25 01:31:25 +08:00
dev_notice ( smmu - > dev , " probing hardware configuration... \n " ) ;
2016-04-14 01:13:03 +08:00
dev_notice ( smmu - > dev , " SMMUv%d with: \n " ,
smmu - > version = = ARM_SMMU_V2 ? 2 : 1 ) ;
2013-06-25 01:31:25 +08:00
/* ID0 */
2019-08-16 02:37:31 +08:00
id = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_ID0 ) ;
2014-07-15 02:47:39 +08:00
/* Restrict available stages based on module parameter */
if ( force_stage = = 1 )
id & = ~ ( ID0_S2TS | ID0_NTS ) ;
else if ( force_stage = = 2 )
id & = ~ ( ID0_S1TS | ID0_NTS ) ;
2013-06-25 01:31:25 +08:00
if ( id & ID0_S1TS ) {
smmu - > features | = ARM_SMMU_FEAT_TRANS_S1 ;
dev_notice ( smmu - > dev , " \t stage 1 translation \n " ) ;
}
if ( id & ID0_S2TS ) {
smmu - > features | = ARM_SMMU_FEAT_TRANS_S2 ;
dev_notice ( smmu - > dev , " \t stage 2 translation \n " ) ;
}
if ( id & ID0_NTS ) {
smmu - > features | = ARM_SMMU_FEAT_TRANS_NESTED ;
dev_notice ( smmu - > dev , " \t nested translation \n " ) ;
}
if ( ! ( smmu - > features &
2014-07-15 02:47:39 +08:00
( ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2 ) ) ) {
2013-06-25 01:31:25 +08:00
dev_err ( smmu - > dev , " \t no translation support! \n " ) ;
return - ENODEV ;
}
2016-04-14 01:13:03 +08:00
if ( ( id & ID0_S1TS ) & &
( ( smmu - > version < ARM_SMMU_V2 ) | | ! ( id & ID0_ATOSNS ) ) ) {
2014-10-30 05:13:40 +08:00
smmu - > features | = ARM_SMMU_FEAT_TRANS_OPS ;
dev_notice ( smmu - > dev , " \t address translation ops \n " ) ;
}
2015-07-30 02:46:05 +08:00
/*
* In order for DMA API calls to work properly , we must defer to what
2016-11-21 18:01:44 +08:00
* the FW says about coherency , regardless of what the hardware claims .
2015-07-30 02:46:05 +08:00
* Fortunately , this also opens up a workaround for systems where the
* ID register value has ended up configured incorrectly .
*/
cttw_reg = ! ! ( id & ID0_CTTW ) ;
2016-11-21 18:01:44 +08:00
if ( cttw_fw | | cttw_reg )
2015-07-30 02:46:05 +08:00
dev_notice ( smmu - > dev , " \t %scoherent table walk \n " ,
2016-11-21 18:01:44 +08:00
cttw_fw ? " " : " non- " ) ;
if ( cttw_fw ! = cttw_reg )
2015-07-30 02:46:05 +08:00
dev_notice ( smmu - > dev ,
2016-11-21 18:01:44 +08:00
" \t (IDR0.CTTW overridden by FW configuration) \n " ) ;
2013-06-25 01:31:25 +08:00
2016-09-13 00:13:48 +08:00
/* Max. number of entries we have for stream matching/indexing */
2017-01-19 22:36:36 +08:00
if ( smmu - > version = = ARM_SMMU_V2 & & id & ID0_EXIDS ) {
smmu - > features | = ARM_SMMU_FEAT_EXIDS ;
size = 1 < < 16 ;
} else {
2019-08-16 02:37:23 +08:00
size = 1 < < FIELD_GET ( ID0_NUMSIDB , id ) ;
2017-01-19 22:36:36 +08:00
}
2016-09-13 00:13:48 +08:00
smmu - > streamid_mask = size - 1 ;
2013-06-25 01:31:25 +08:00
if ( id & ID0_SMS ) {
smmu - > features | = ARM_SMMU_FEAT_STREAM_MATCH ;
2019-08-16 02:37:23 +08:00
size = FIELD_GET ( ID0_NUMSMRG , id ) ;
2016-09-13 00:13:48 +08:00
if ( size = = 0 ) {
2013-06-25 01:31:25 +08:00
dev_err ( smmu - > dev ,
" stream-matching supported, but no SMRs present! \n " ) ;
return - ENODEV ;
}
2016-09-13 00:13:49 +08:00
/* Zero-initialised to mark as invalid */
smmu - > smrs = devm_kcalloc ( smmu - > dev , size , sizeof ( * smmu - > smrs ) ,
GFP_KERNEL ) ;
if ( ! smmu - > smrs )
return - ENOMEM ;
2013-06-25 01:31:25 +08:00
dev_notice ( smmu - > dev ,
2019-08-16 02:37:26 +08:00
" \t stream matching with %u register groups " , size ) ;
2013-06-25 01:31:25 +08:00
}
2016-09-13 00:13:50 +08:00
/* s2cr->type == 0 means translation, so initialise explicitly */
smmu - > s2crs = devm_kmalloc_array ( smmu - > dev , size , sizeof ( * smmu - > s2crs ) ,
GFP_KERNEL ) ;
if ( ! smmu - > s2crs )
return - ENOMEM ;
for ( i = 0 ; i < size ; i + + )
smmu - > s2crs [ i ] = s2cr_init_val ;
2016-09-13 00:13:48 +08:00
smmu - > num_mapping_groups = size ;
2016-09-13 00:13:54 +08:00
mutex_init ( & smmu - > stream_map_mutex ) ;
2017-07-06 22:55:48 +08:00
spin_lock_init ( & smmu - > global_sync_lock ) ;
2013-06-25 01:31:25 +08:00
2016-04-29 00:12:09 +08:00
if ( smmu - > version < ARM_SMMU_V2 | | ! ( id & ID0_PTFS_NO_AARCH32 ) ) {
smmu - > features | = ARM_SMMU_FEAT_FMT_AARCH32_L ;
if ( ! ( id & ID0_PTFS_NO_AARCH32S ) )
smmu - > features | = ARM_SMMU_FEAT_FMT_AARCH32_S ;
}
2013-06-25 01:31:25 +08:00
/* ID1 */
2019-08-16 02:37:31 +08:00
id = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_ID1 ) ;
2014-07-30 18:33:25 +08:00
smmu - > pgshift = ( id & ID1_PAGESIZE ) ? 16 : 12 ;
2013-06-25 01:31:25 +08:00
2013-10-01 20:39:06 +08:00
/* Check for size mismatch of SMMU address space from mapped region */
2019-08-16 02:37:23 +08:00
size = 1 < < ( FIELD_GET ( ID1_NUMPAGENDXB , id ) + 1 ) ;
2019-08-16 02:37:26 +08:00
if ( smmu - > numpage ! = 2 * size < < smmu - > pgshift )
2014-07-09 00:52:18 +08:00
dev_warn ( smmu - > dev ,
2019-08-16 02:37:26 +08:00
" SMMU address space size (0x%x) differs from mapped region size (0x%x)! \n " ,
2 * size < < smmu - > pgshift , smmu - > numpage ) ;
/* Now properly encode NUMPAGE to subsequently derive SMMU_CB_BASE */
smmu - > numpage = size ;
2013-06-25 01:31:25 +08:00
2019-08-16 02:37:23 +08:00
smmu - > num_s2_context_banks = FIELD_GET ( ID1_NUMS2CB , id ) ;
smmu - > num_context_banks = FIELD_GET ( ID1_NUMCB , id ) ;
2013-06-25 01:31:25 +08:00
if ( smmu - > num_s2_context_banks > smmu - > num_context_banks ) {
dev_err ( smmu - > dev , " impossible number of S2 context banks! \n " ) ;
return - ENODEV ;
}
dev_notice ( smmu - > dev , " \t %u context banks (%u stage-2 only) \n " ,
smmu - > num_context_banks , smmu - > num_s2_context_banks ) ;
2017-08-08 21:56:14 +08:00
smmu - > cbs = devm_kcalloc ( smmu - > dev , smmu - > num_context_banks ,
sizeof ( * smmu - > cbs ) , GFP_KERNEL ) ;
if ( ! smmu - > cbs )
return - ENOMEM ;
2013-06-25 01:31:25 +08:00
/* ID2 */
2019-08-16 02:37:31 +08:00
id = arm_smmu_gr0_read ( smmu , ARM_SMMU_GR0_ID2 ) ;
2019-08-16 02:37:23 +08:00
size = arm_smmu_id_size_to_bits ( FIELD_GET ( ID2_IAS , id ) ) ;
2014-11-15 01:17:54 +08:00
smmu - > ipa_size = size ;
2013-06-25 01:31:25 +08:00
2014-11-15 01:17:54 +08:00
/* The output mask is also applied for bypass */
2019-08-16 02:37:23 +08:00
size = arm_smmu_id_size_to_bits ( FIELD_GET ( ID2_OAS , id ) ) ;
2014-11-15 01:17:54 +08:00
smmu - > pa_size = size ;
2013-06-25 01:31:25 +08:00
2016-02-24 02:19:00 +08:00
if ( id & ID2_VMID16 )
smmu - > features | = ARM_SMMU_FEAT_VMID16 ;
2015-03-05 00:41:05 +08:00
/*
* What the page table walker can address actually depends on which
* descriptor format is in use , but since a ) we don ' t know that yet ,
* and b ) it can vary per context bank , this will have to do . . .
*/
if ( dma_set_mask_and_coherent ( smmu - > dev , DMA_BIT_MASK ( size ) ) )
dev_warn ( smmu - > dev ,
" failed to set DMA mask for table walker \n " ) ;
2016-04-14 01:13:03 +08:00
if ( smmu - > version < ARM_SMMU_V2 ) {
2014-11-15 01:17:54 +08:00
smmu - > va_size = smmu - > ipa_size ;
2016-04-14 01:13:03 +08:00
if ( smmu - > version = = ARM_SMMU_V1_64K )
smmu - > features | = ARM_SMMU_FEAT_FMT_AARCH64_64K ;
2013-06-25 01:31:25 +08:00
} else {
2019-08-16 02:37:23 +08:00
size = FIELD_GET ( ID2_UBS , id ) ;
2014-11-15 01:17:54 +08:00
smmu - > va_size = arm_smmu_id_size_to_bits ( size ) ;
if ( id & ID2_PTFS_4K )
2016-04-29 00:12:09 +08:00
smmu - > features | = ARM_SMMU_FEAT_FMT_AARCH64_4K ;
2014-11-15 01:17:54 +08:00
if ( id & ID2_PTFS_16K )
2016-04-29 00:12:09 +08:00
smmu - > features | = ARM_SMMU_FEAT_FMT_AARCH64_16K ;
2014-11-15 01:17:54 +08:00
if ( id & ID2_PTFS_64K )
2016-04-29 00:12:09 +08:00
smmu - > features | = ARM_SMMU_FEAT_FMT_AARCH64_64K ;
2013-06-25 01:31:25 +08:00
}
2016-04-29 00:12:09 +08:00
/* Now we've corralled the various formats, what'll it do? */
if ( smmu - > features & ARM_SMMU_FEAT_FMT_AARCH32_S )
2016-05-10 00:20:09 +08:00
smmu - > pgsize_bitmap | = SZ_4K | SZ_64K | SZ_1M | SZ_16M ;
2016-04-29 00:12:09 +08:00
if ( smmu - > features &
( ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K ) )
2016-05-10 00:20:09 +08:00
smmu - > pgsize_bitmap | = SZ_4K | SZ_2M | SZ_1G ;
2016-04-29 00:12:09 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_FMT_AARCH64_16K )
2016-05-10 00:20:09 +08:00
smmu - > pgsize_bitmap | = SZ_16K | SZ_32M ;
2016-04-29 00:12:09 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_FMT_AARCH64_64K )
2016-05-10 00:20:09 +08:00
smmu - > pgsize_bitmap | = SZ_64K | SZ_512M ;
if ( arm_smmu_ops . pgsize_bitmap = = - 1UL )
arm_smmu_ops . pgsize_bitmap = smmu - > pgsize_bitmap ;
else
arm_smmu_ops . pgsize_bitmap | = smmu - > pgsize_bitmap ;
dev_notice ( smmu - > dev , " \t Supported page sizes: 0x%08lx \n " ,
smmu - > pgsize_bitmap ) ;
2016-04-29 00:12:09 +08:00
2014-11-15 01:17:54 +08:00
2014-09-01 23:24:48 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_TRANS_S1 )
dev_notice ( smmu - > dev , " \t Stage-1: %lu-bit VA -> %lu-bit IPA \n " ,
2014-11-15 01:17:54 +08:00
smmu - > va_size , smmu - > ipa_size ) ;
2014-09-01 23:24:48 +08:00
if ( smmu - > features & ARM_SMMU_FEAT_TRANS_S2 )
dev_notice ( smmu - > dev , " \t Stage-2: %lu-bit IPA -> %lu-bit PA \n " ,
2014-11-15 01:17:54 +08:00
smmu - > ipa_size , smmu - > pa_size ) ;
2014-09-01 23:24:48 +08:00
2019-08-16 02:37:35 +08:00
if ( smmu - > impl & & smmu - > impl - > cfg_probe )
return smmu - > impl - > cfg_probe ( smmu ) ;
2013-06-25 01:31:25 +08:00
return 0 ;
}
2016-04-14 01:12:57 +08:00
struct arm_smmu_match_data {
enum arm_smmu_arch_version version ;
enum arm_smmu_implementation model ;
} ;
# define ARM_SMMU_MATCH_DATA(name, ver, imp) \
2018-12-04 14:22:09 +08:00
static const struct arm_smmu_match_data name = { . version = ver , . model = imp }
2016-04-14 01:12:57 +08:00
ARM_SMMU_MATCH_DATA ( smmu_generic_v1 , ARM_SMMU_V1 , GENERIC_SMMU ) ;
ARM_SMMU_MATCH_DATA ( smmu_generic_v2 , ARM_SMMU_V2 , GENERIC_SMMU ) ;
2016-04-14 01:13:03 +08:00
ARM_SMMU_MATCH_DATA ( arm_mmu401 , ARM_SMMU_V1_64K , GENERIC_SMMU ) ;
2016-04-14 01:12:59 +08:00
ARM_SMMU_MATCH_DATA ( arm_mmu500 , ARM_SMMU_V2 , ARM_MMU500 ) ;
2016-04-14 01:12:58 +08:00
ARM_SMMU_MATCH_DATA ( cavium_smmuv2 , ARM_SMMU_V2 , CAVIUM_SMMUV2 ) ;
2018-12-04 14:22:13 +08:00
ARM_SMMU_MATCH_DATA ( qcom_smmuv2 , ARM_SMMU_V2 , QCOM_SMMUV2 ) ;
2016-04-14 01:12:57 +08:00
2014-10-02 18:24:45 +08:00
static const struct of_device_id arm_smmu_of_match [ ] = {
2016-04-14 01:12:57 +08:00
{ . compatible = " arm,smmu-v1 " , . data = & smmu_generic_v1 } ,
{ . compatible = " arm,smmu-v2 " , . data = & smmu_generic_v2 } ,
{ . compatible = " arm,mmu-400 " , . data = & smmu_generic_v1 } ,
2016-04-14 01:13:03 +08:00
{ . compatible = " arm,mmu-401 " , . data = & arm_mmu401 } ,
2016-04-14 01:12:59 +08:00
{ . compatible = " arm,mmu-500 " , . data = & arm_mmu500 } ,
2016-04-14 01:12:58 +08:00
{ . compatible = " cavium,smmu-v2 " , . data = & cavium_smmuv2 } ,
2018-12-04 14:22:13 +08:00
{ . compatible = " qcom,smmu-v2 " , . data = & qcom_smmuv2 } ,
2014-08-29 00:51:59 +08:00
{ } ,
} ;
2016-11-21 18:01:45 +08:00
# ifdef CONFIG_ACPI
static int acpi_smmu_get_data ( u32 model , struct arm_smmu_device * smmu )
{
int ret = 0 ;
switch ( model ) {
case ACPI_IORT_SMMU_V1 :
case ACPI_IORT_SMMU_CORELINK_MMU400 :
smmu - > version = ARM_SMMU_V1 ;
smmu - > model = GENERIC_SMMU ;
break ;
2017-06-19 23:41:56 +08:00
case ACPI_IORT_SMMU_CORELINK_MMU401 :
smmu - > version = ARM_SMMU_V1_64K ;
smmu - > model = GENERIC_SMMU ;
break ;
2016-11-21 18:01:45 +08:00
case ACPI_IORT_SMMU_V2 :
smmu - > version = ARM_SMMU_V2 ;
smmu - > model = GENERIC_SMMU ;
break ;
case ACPI_IORT_SMMU_CORELINK_MMU500 :
smmu - > version = ARM_SMMU_V2 ;
smmu - > model = ARM_MMU500 ;
break ;
2017-06-19 23:41:56 +08:00
case ACPI_IORT_SMMU_CAVIUM_THUNDERX :
smmu - > version = ARM_SMMU_V2 ;
smmu - > model = CAVIUM_SMMUV2 ;
break ;
2016-11-21 18:01:45 +08:00
default :
ret = - ENODEV ;
}
return ret ;
}
static int arm_smmu_device_acpi_probe ( struct platform_device * pdev ,
struct arm_smmu_device * smmu )
{
struct device * dev = smmu - > dev ;
struct acpi_iort_node * node =
* ( struct acpi_iort_node * * ) dev_get_platdata ( dev ) ;
struct acpi_iort_smmu * iort_smmu ;
int ret ;
/* Retrieve SMMU1/2 specific data */
iort_smmu = ( struct acpi_iort_smmu * ) node - > node_data ;
ret = acpi_smmu_get_data ( iort_smmu - > model , smmu ) ;
if ( ret < 0 )
return ret ;
/* Ignore the configuration access interrupt */
smmu - > num_global_irqs = 1 ;
if ( iort_smmu - > flags & ACPI_IORT_SMMU_COHERENT_WALK )
smmu - > features | = ARM_SMMU_FEAT_COHERENT_WALK ;
return 0 ;
}
# else
static inline int arm_smmu_device_acpi_probe ( struct platform_device * pdev ,
struct arm_smmu_device * smmu )
{
return - ENODEV ;
}
# endif
2016-11-21 18:01:44 +08:00
static int arm_smmu_device_dt_probe ( struct platform_device * pdev ,
struct arm_smmu_device * smmu )
2013-06-25 01:31:25 +08:00
{
2016-04-14 01:12:57 +08:00
const struct arm_smmu_match_data * data ;
2013-06-25 01:31:25 +08:00
struct device * dev = & pdev - > dev ;
2016-09-14 22:26:46 +08:00
bool legacy_binding ;
2016-11-21 18:01:44 +08:00
if ( of_property_read_u32 ( dev - > of_node , " #global-interrupts " ,
& smmu - > num_global_irqs ) ) {
dev_err ( dev , " missing #global-interrupts property \n " ) ;
return - ENODEV ;
}
data = of_device_get_match_data ( dev ) ;
smmu - > version = data - > version ;
smmu - > model = data - > model ;
2016-09-14 22:26:46 +08:00
legacy_binding = of_find_property ( dev - > of_node , " mmu-masters " , NULL ) ;
if ( legacy_binding & & ! using_generic_binding ) {
if ( ! using_legacy_binding )
pr_notice ( " deprecated \" mmu-masters \" DT property in use; DMA API support unavailable \n " ) ;
using_legacy_binding = true ;
} else if ( ! legacy_binding & & ! using_legacy_binding ) {
using_generic_binding = true ;
} else {
dev_err ( dev , " not probing due to mismatched DT properties \n " ) ;
return - ENODEV ;
}
2013-06-25 01:31:25 +08:00
2016-11-21 18:01:44 +08:00
if ( of_dma_is_coherent ( dev - > of_node ) )
smmu - > features | = ARM_SMMU_FEAT_COHERENT_WALK ;
return 0 ;
}
2017-04-10 19:21:05 +08:00
static void arm_smmu_bus_init ( void )
{
/* Oh, for a proper bus abstraction */
if ( ! iommu_present ( & platform_bus_type ) )
bus_set_iommu ( & platform_bus_type , & arm_smmu_ops ) ;
# ifdef CONFIG_ARM_AMBA
if ( ! iommu_present ( & amba_bustype ) )
bus_set_iommu ( & amba_bustype , & arm_smmu_ops ) ;
# endif
# ifdef CONFIG_PCI
if ( ! iommu_present ( & pci_bus_type ) ) {
pci_request_acs ( ) ;
bus_set_iommu ( & pci_bus_type , & arm_smmu_ops ) ;
}
# endif
2018-09-10 21:49:18 +08:00
# ifdef CONFIG_FSL_MC_BUS
if ( ! iommu_present ( & fsl_mc_bus_type ) )
bus_set_iommu ( & fsl_mc_bus_type , & arm_smmu_ops ) ;
# endif
2017-04-10 19:21:05 +08:00
}
2016-11-21 18:01:44 +08:00
static int arm_smmu_device_probe ( struct platform_device * pdev )
{
struct resource * res ;
2017-02-02 01:11:36 +08:00
resource_size_t ioaddr ;
2016-11-21 18:01:44 +08:00
struct arm_smmu_device * smmu ;
struct device * dev = & pdev - > dev ;
int num_irqs , i , err ;
2013-06-25 01:31:25 +08:00
smmu = devm_kzalloc ( dev , sizeof ( * smmu ) , GFP_KERNEL ) ;
if ( ! smmu ) {
dev_err ( dev , " failed to allocate arm_smmu_device \n " ) ;
return - ENOMEM ;
}
smmu - > dev = dev ;
2016-11-21 18:01:45 +08:00
if ( dev - > of_node )
err = arm_smmu_device_dt_probe ( pdev , smmu ) ;
else
err = arm_smmu_device_acpi_probe ( pdev , smmu ) ;
2016-11-21 18:01:44 +08:00
if ( err )
return err ;
2014-08-29 00:51:59 +08:00
2019-08-16 02:37:33 +08:00
smmu = arm_smmu_impl_init ( smmu ) ;
if ( IS_ERR ( smmu ) )
return PTR_ERR ( smmu ) ;
2013-06-25 01:31:25 +08:00
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
2017-02-02 01:11:36 +08:00
ioaddr = res - > start ;
2013-08-19 19:20:37 +08:00
smmu - > base = devm_ioremap_resource ( dev , res ) ;
if ( IS_ERR ( smmu - > base ) )
return PTR_ERR ( smmu - > base ) ;
2019-08-16 02:37:26 +08:00
/*
* The resource size should effectively match the value of SMMU_TOP ;
* stash that temporarily until we know PAGESIZE to validate it with .
*/
smmu - > numpage = resource_size ( res ) ;
2013-06-25 01:31:25 +08:00
num_irqs = 0 ;
while ( ( res = platform_get_resource ( pdev , IORESOURCE_IRQ , num_irqs ) ) ) {
num_irqs + + ;
if ( num_irqs > smmu - > num_global_irqs )
smmu - > num_context_irqs + + ;
}
2013-10-01 20:39:07 +08:00
if ( ! smmu - > num_context_irqs ) {
dev_err ( dev , " found %d interrupts but expected at least %d \n " ,
num_irqs , smmu - > num_global_irqs + 1 ) ;
return - ENODEV ;
2013-06-25 01:31:25 +08:00
}
treewide: devm_kzalloc() -> devm_kcalloc()
The devm_kzalloc() function has a 2-factor argument form, devm_kcalloc().
This patch replaces cases of:
devm_kzalloc(handle, a * b, gfp)
with:
devm_kcalloc(handle, a * b, gfp)
as well as handling cases of:
devm_kzalloc(handle, a * b * c, gfp)
with:
devm_kzalloc(handle, array3_size(a, b, c), gfp)
as it's slightly less ugly than:
devm_kcalloc(handle, array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
devm_kzalloc(handle, 4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
Some manual whitespace fixes were needed in this patch, as Coccinelle
really liked to write "=devm_kcalloc..." instead of "= devm_kcalloc...".
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
expression HANDLE;
type TYPE;
expression THING, E;
@@
(
devm_kzalloc(HANDLE,
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
devm_kzalloc(HANDLE,
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression HANDLE;
expression COUNT;
typedef u8;
typedef __u8;
@@
(
devm_kzalloc(HANDLE,
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(char) * COUNT
+ COUNT
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
expression HANDLE;
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
expression HANDLE;
identifier SIZE, COUNT;
@@
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression HANDLE;
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression HANDLE;
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
devm_kzalloc(HANDLE,
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
expression HANDLE;
identifier STRIDE, SIZE, COUNT;
@@
(
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
devm_kzalloc(HANDLE,
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression HANDLE;
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE,
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
devm_kzalloc(HANDLE,
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression HANDLE;
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
devm_kzalloc(HANDLE, sizeof(THING) * C2, ...)
|
devm_kzalloc(HANDLE, sizeof(TYPE) * C2, ...)
|
devm_kzalloc(HANDLE, C1 * C2 * C3, ...)
|
devm_kzalloc(HANDLE, C1 * C2, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * E2
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- (E1) * (E2)
+ E1, E2
, ...)
|
- devm_kzalloc
+ devm_kcalloc
(HANDLE,
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 05:07:58 +08:00
smmu - > irqs = devm_kcalloc ( dev , num_irqs , sizeof ( * smmu - > irqs ) ,
2013-06-25 01:31:25 +08:00
GFP_KERNEL ) ;
if ( ! smmu - > irqs ) {
dev_err ( dev , " failed to allocate %d irqs \n " , num_irqs ) ;
return - ENOMEM ;
}
for ( i = 0 ; i < num_irqs ; + + i ) {
int irq = platform_get_irq ( pdev , i ) ;
2014-07-09 00:52:18 +08:00
2013-06-25 01:31:25 +08:00
if ( irq < 0 ) {
dev_err ( dev , " failed to get irq index %d \n " , i ) ;
return - ENODEV ;
}
smmu - > irqs [ i ] = irq ;
}
2018-12-04 14:22:09 +08:00
err = devm_clk_bulk_get_all ( dev , & smmu - > clks ) ;
if ( err < 0 ) {
dev_err ( dev , " failed to get clocks %d \n " , err ) ;
return err ;
}
smmu - > num_clks = err ;
err = clk_bulk_prepare_enable ( smmu - > num_clks , smmu - > clks ) ;
if ( err )
return err ;
2014-08-23 08:12:32 +08:00
err = arm_smmu_device_cfg_probe ( smmu ) ;
if ( err )
return err ;
2018-07-20 01:53:56 +08:00
if ( smmu - > version = = ARM_SMMU_V2 ) {
if ( smmu - > num_context_banks > smmu - > num_context_irqs ) {
dev_err ( dev ,
" found only %d context irq(s) but %d required \n " ,
smmu - > num_context_irqs , smmu - > num_context_banks ) ;
return - ENODEV ;
}
/* Ignore superfluous interrupts */
smmu - > num_context_irqs = smmu - > num_context_banks ;
2013-06-25 01:31:25 +08:00
}
for ( i = 0 ; i < smmu - > num_global_irqs ; + + i ) {
2016-07-04 17:38:22 +08:00
err = devm_request_irq ( smmu - > dev , smmu - > irqs [ i ] ,
arm_smmu_global_fault ,
IRQF_SHARED ,
" arm-smmu global fault " ,
smmu ) ;
2013-06-25 01:31:25 +08:00
if ( err ) {
dev_err ( dev , " failed to request global IRQ %d (%u) \n " ,
i , smmu - > irqs [ i ] ) ;
2016-09-14 22:21:39 +08:00
return err ;
2013-06-25 01:31:25 +08:00
}
}
2017-02-02 01:11:36 +08:00
err = iommu_device_sysfs_add ( & smmu - > iommu , smmu - > dev , NULL ,
" smmu.%pa " , & ioaddr ) ;
if ( err ) {
dev_err ( dev , " Failed to register iommu in sysfs \n " ) ;
return err ;
}
iommu_device_set_ops ( & smmu - > iommu , & arm_smmu_ops ) ;
iommu_device_set_fwnode ( & smmu - > iommu , dev - > fwnode ) ;
err = iommu_device_register ( & smmu - > iommu ) ;
if ( err ) {
dev_err ( dev , " Failed to register iommu \n " ) ;
return err ;
}
2016-09-13 00:13:52 +08:00
platform_set_drvdata ( pdev , smmu ) ;
2013-08-21 20:56:34 +08:00
arm_smmu_device_reset ( smmu ) ;
2017-01-19 22:36:36 +08:00
arm_smmu_test_smr_masks ( smmu ) ;
2016-09-14 22:26:46 +08:00
2018-12-04 14:22:10 +08:00
/*
* We want to avoid touching dev - > power . lock in fastpaths unless
* it ' s really going to do something useful - pm_runtime_enabled ( )
* can serve as an ideal proxy for that decision . So , conditionally
* enable pm_runtime .
*/
if ( dev - > pm_domain ) {
pm_runtime_set_active ( dev ) ;
pm_runtime_enable ( dev ) ;
}
2017-04-10 19:21:05 +08:00
/*
* For ACPI and generic DT bindings , an SMMU will be probed before
* any device which might need it , so we want the bus ops in place
* ready to handle default domain setup as soon as any SMMU exists .
*/
if ( ! using_legacy_binding )
arm_smmu_bus_init ( ) ;
2013-06-25 01:31:25 +08:00
return 0 ;
}
2017-04-10 19:21:05 +08:00
/*
* With the legacy DT binding in play , though , we have no guarantees about
* probe order , but then we ' re also not doing default domains , so we can
* delay setting bus ops until we ' re sure every possible SMMU is ready ,
* and that way ensure that no add_device ( ) calls get missed .
*/
static int arm_smmu_legacy_bus_init ( void )
{
if ( using_legacy_binding )
arm_smmu_bus_init ( ) ;
2013-06-25 01:31:25 +08:00
return 0 ;
}
2017-04-10 19:21:05 +08:00
device_initcall_sync ( arm_smmu_legacy_bus_init ) ;
2013-06-25 01:31:25 +08:00
2018-12-02 03:19:16 +08:00
static void arm_smmu_device_shutdown ( struct platform_device * pdev )
2013-06-25 01:31:25 +08:00
{
2016-09-13 00:13:52 +08:00
struct arm_smmu_device * smmu = platform_get_drvdata ( pdev ) ;
2013-06-25 01:31:25 +08:00
if ( ! smmu )
2018-12-02 03:19:16 +08:00
return ;
2013-06-25 01:31:25 +08:00
2013-08-01 02:21:28 +08:00
if ( ! bitmap_empty ( smmu - > context_map , ARM_SMMU_MAX_CBS ) )
2016-09-13 00:13:52 +08:00
dev_err ( & pdev - > dev , " removing device with active domains! \n " ) ;
2013-06-25 01:31:25 +08:00
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_get ( smmu ) ;
2013-06-25 01:31:25 +08:00
/* Turn the thing off */
2019-08-16 02:37:31 +08:00
arm_smmu_gr0_write ( smmu , ARM_SMMU_GR0_sCR0 , sCR0_CLIENTPD ) ;
2018-12-04 14:22:10 +08:00
arm_smmu_rpm_put ( smmu ) ;
if ( pm_runtime_enabled ( smmu - > dev ) )
pm_runtime_force_suspend ( smmu - > dev ) ;
else
clk_bulk_disable ( smmu - > num_clks , smmu - > clks ) ;
2018-12-04 14:22:09 +08:00
2018-12-04 14:22:10 +08:00
clk_bulk_unprepare ( smmu - > num_clks , smmu - > clks ) ;
2013-06-25 01:31:25 +08:00
}
2018-12-04 14:22:09 +08:00
static int __maybe_unused arm_smmu_runtime_resume ( struct device * dev )
2017-06-30 06:18:15 +08:00
{
2017-08-08 21:56:15 +08:00
struct arm_smmu_device * smmu = dev_get_drvdata ( dev ) ;
2018-12-04 14:22:09 +08:00
int ret ;
ret = clk_bulk_enable ( smmu - > num_clks , smmu - > clks ) ;
if ( ret )
return ret ;
2017-08-08 21:56:15 +08:00
arm_smmu_device_reset ( smmu ) ;
2018-12-04 14:22:09 +08:00
2017-08-08 21:56:15 +08:00
return 0 ;
2017-06-30 06:18:15 +08:00
}
2018-12-04 14:22:09 +08:00
static int __maybe_unused arm_smmu_runtime_suspend ( struct device * dev )
2017-08-08 21:56:15 +08:00
{
struct arm_smmu_device * smmu = dev_get_drvdata ( dev ) ;
2018-12-04 14:22:09 +08:00
clk_bulk_disable ( smmu - > num_clks , smmu - > clks ) ;
2017-08-08 21:56:15 +08:00
return 0 ;
}
2018-12-04 14:22:09 +08:00
static int __maybe_unused arm_smmu_pm_resume ( struct device * dev )
{
if ( pm_runtime_suspended ( dev ) )
return 0 ;
return arm_smmu_runtime_resume ( dev ) ;
}
static int __maybe_unused arm_smmu_pm_suspend ( struct device * dev )
{
if ( pm_runtime_suspended ( dev ) )
return 0 ;
return arm_smmu_runtime_suspend ( dev ) ;
}
static const struct dev_pm_ops arm_smmu_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS ( arm_smmu_pm_suspend , arm_smmu_pm_resume )
SET_RUNTIME_PM_OPS ( arm_smmu_runtime_suspend ,
arm_smmu_runtime_resume , NULL )
} ;
2017-08-08 21:56:15 +08:00
2013-06-25 01:31:25 +08:00
static struct platform_driver arm_smmu_driver = {
. driver = {
2018-12-02 03:19:16 +08:00
. name = " arm-smmu " ,
. of_match_table = of_match_ptr ( arm_smmu_of_match ) ,
. pm = & arm_smmu_pm_ops ,
. suppress_bind_attrs = true ,
2013-06-25 01:31:25 +08:00
} ,
2016-11-21 18:01:44 +08:00
. probe = arm_smmu_device_probe ,
2017-06-30 06:18:15 +08:00
. shutdown = arm_smmu_device_shutdown ,
2013-06-25 01:31:25 +08:00
} ;
2018-12-02 03:19:16 +08:00
builtin_platform_driver ( arm_smmu_driver ) ;