mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-17 01:04:19 +08:00
MHI changes for v5.12
Loic improved the MHI PCI generic controller by adding support for DIAG channel, PCI error handling, suspend/recovery/resume, and health check. Loic also added support for resetting the MHI device as per the MHI specification. This includes writing to a specific register for default cases and looking for controller specific callback when provided. Along with this Loic, also added a new API which gets the number for free TREs (Transfer Ring Elements) from the MHI core. The client drivers can make use of this API and the current consumer is the "mhi-net" driver. For taking both the "mhi-net" driver change and the API change, we created "mhi-net-immutable" branch for this patch and merged the same into net-next and mhi-next. Carl added a patch which lets the controller driver to pass the custom IRQ flags for BHI and MHI event interrupts to the MHI core. The current consumer of this feature is the ath11k MHI controller driver. For taking both the changes, we created "mhi-ath11k-immutable" branch for this patch and merged into ath11k-next and mhi-next. Finally, Loic cleaned up the MHI queue APIs and fixed the shared MSI vector support. -----BEGIN PGP SIGNATURE----- iQEzBAABCgAdFiEEZ6VDKoFIy9ikWCeXVZ8R5v6RzvUFAmAdQIMACgkQVZ8R5v6R zvWtsQgApNckrY1W+CAOi/CkYrwvO7ADNw+nZIEu1FkhnDo5qCTwDYfNogf3K9lq LbtuQLaBBE4ilvrB+o4ubLYBaRN0Bt4B+ImShvPmJw6/HDuuzW2AmdDn66bMMRHz wZ0cXgisnrJn8tSIZUfx05wyxT6AV+A4f0cZv/FzU0rzIgrHsJVdDtrGcxOnoK2w tbUoGuIwCIN8uJp+XXpBufujKz6S+2GSHKP6ELdNhoNhgag7u+fau5IUZVewTkSM ayAYwlzHOR5TCnI+/clOEh7RHodORZ0xvsj5IqEuaFz0PwF/fT/37LMWfNP3Cc11 FjM/ZU++gKh6aa/PEJlGTxSlqVK+NQ== =tdTN -----END PGP SIGNATURE----- Merge tag 'mhi-for-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi into char-misc-next Manivannan writes: MHI changes for v5.12 Loic improved the MHI PCI generic controller by adding support for DIAG channel, PCI error handling, suspend/recovery/resume, and health check. Loic also added support for resetting the MHI device as per the MHI specification. This includes writing to a specific register for default cases and looking for controller specific callback when provided. Along with this Loic, also added a new API which gets the number for free TREs (Transfer Ring Elements) from the MHI core. The client drivers can make use of this API and the current consumer is the "mhi-net" driver. For taking both the "mhi-net" driver change and the API change, we created "mhi-net-immutable" branch for this patch and merged the same into net-next and mhi-next. Carl added a patch which lets the controller driver to pass the custom IRQ flags for BHI and MHI event interrupts to the MHI core. The current consumer of this feature is the ath11k MHI controller driver. For taking both the changes, we created "mhi-ath11k-immutable" branch for this patch and merged into ath11k-next and mhi-next. Finally, Loic cleaned up the MHI queue APIs and fixed the shared MSI vector support. * tag 'mhi-for-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi: bus: mhi: pci_generic: Increase num of elements in hw event ring mhi: pci_generic: Print warning in case of firmware crash bus: mhi: core: Add helper API to return number of free TREs mhi: core: Factorize mhi queuing mhi: use irq_flags if controller driver configures it mhi: pci_generic: Fix shared MSI vector support mhi: unconstify mhi_event_config bus: mhi: Ensure correct ring update ordering with memory barrier mhi: pci_generic: Set irq moderation value to 1ms for hw channels mhi: pci_generic: Add diag channels mhi: pci_generic: Increase controller timeout value mhi: pci_generic: Add health-check mhi: pci_generic: Add PCI error handlers mhi: pci_generic: Add suspend/resume/recovery procedure mhi: pci_generic: Add support for reset mhi: pci_generic: Enable burst mode for hardware channels mhi: pci-generic: Increase number of hardware events bus: mhi: core: Add device hardware reset support
This commit is contained in:
commit
37f1cda438
@ -151,12 +151,17 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
|
||||
int i, ret;
|
||||
|
||||
/* if controller driver has set irq_flags, use it */
|
||||
if (mhi_cntrl->irq_flags)
|
||||
irq_flags = mhi_cntrl->irq_flags;
|
||||
|
||||
/* Setup BHI_INTVEC IRQ */
|
||||
ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
|
||||
mhi_intvec_threaded_handler,
|
||||
IRQF_SHARED | IRQF_NO_SUSPEND,
|
||||
irq_flags,
|
||||
"bhi", mhi_cntrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -174,7 +179,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
|
||||
|
||||
ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
|
||||
mhi_irq_handler,
|
||||
IRQF_SHARED | IRQF_NO_SUSPEND,
|
||||
irq_flags,
|
||||
"mhi", mhi_event);
|
||||
if (ret) {
|
||||
dev_err(dev, "Error requesting irq:%d for ev:%d\n",
|
||||
|
@ -111,7 +111,14 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
|
||||
dma_addr_t db;
|
||||
|
||||
db = ring->iommu_base + (ring->wp - ring->base);
|
||||
|
||||
/*
|
||||
* Writes to the new ring element must be visible to the hardware
|
||||
* before letting h/w know there is new element to fetch.
|
||||
*/
|
||||
dma_wmb();
|
||||
*ring->ctxt_wp = db;
|
||||
|
||||
mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
|
||||
ring->db_addr, db);
|
||||
}
|
||||
@ -135,6 +142,19 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
|
||||
|
||||
void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
if (mhi_cntrl->reset) {
|
||||
mhi_cntrl->reset(mhi_cntrl);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Generic MHI SoC reset */
|
||||
mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
|
||||
MHI_SOC_RESET_REQ);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_soc_reset);
|
||||
|
||||
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
|
||||
struct mhi_buf_info *buf_info)
|
||||
{
|
||||
@ -260,6 +280,18 @@ int mhi_destroy_device(struct device *dev, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
|
||||
mhi_dev->ul_chan : mhi_dev->dl_chan;
|
||||
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
|
||||
|
||||
return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
|
||||
|
||||
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
|
||||
{
|
||||
struct mhi_driver *mhi_drv;
|
||||
@ -947,118 +979,88 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
|
||||
return (tmp == ring->rp);
|
||||
}
|
||||
|
||||
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
|
||||
struct sk_buff *skb, size_t len, enum mhi_flags mflags)
|
||||
static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
|
||||
enum dma_data_direction dir, enum mhi_flags mflags)
|
||||
{
|
||||
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
|
||||
mhi_dev->dl_chan;
|
||||
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
|
||||
struct mhi_buf_info buf_info = { };
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* If MHI host pre-allocates buffers then client drivers cannot queue */
|
||||
if (mhi_chan->pre_alloc)
|
||||
return -EINVAL;
|
||||
|
||||
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
|
||||
return -ENOMEM;
|
||||
|
||||
read_lock_bh(&mhi_cntrl->pm_lock);
|
||||
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
|
||||
read_unlock_bh(&mhi_cntrl->pm_lock);
|
||||
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
|
||||
return -EIO;
|
||||
|
||||
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
|
||||
|
||||
ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
|
||||
if (unlikely(ret)) {
|
||||
ret = -ENOMEM;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
/* we're in M3 or transitioning to M3 */
|
||||
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
|
||||
if (unlikely(ret))
|
||||
goto exit_unlock;
|
||||
|
||||
/* trigger M3 exit if necessary */
|
||||
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
|
||||
mhi_trigger_resume(mhi_cntrl);
|
||||
|
||||
/* Toggle wake to exit out of M2 */
|
||||
/* Assert dev_wake (to exit/prevent M1/M2)*/
|
||||
mhi_cntrl->wake_toggle(mhi_cntrl);
|
||||
|
||||
if (mhi_chan->dir == DMA_TO_DEVICE)
|
||||
atomic_inc(&mhi_cntrl->pending_pkts);
|
||||
|
||||
if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
|
||||
ret = -EIO;
|
||||
goto exit_unlock;
|
||||
}
|
||||
|
||||
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
|
||||
|
||||
exit_unlock:
|
||||
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
|
||||
struct sk_buff *skb, size_t len, enum mhi_flags mflags)
|
||||
{
|
||||
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
|
||||
mhi_dev->dl_chan;
|
||||
struct mhi_buf_info buf_info = { };
|
||||
|
||||
buf_info.v_addr = skb->data;
|
||||
buf_info.cb_buf = skb;
|
||||
buf_info.len = len;
|
||||
|
||||
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
|
||||
if (unlikely(ret)) {
|
||||
read_unlock_bh(&mhi_cntrl->pm_lock);
|
||||
return ret;
|
||||
}
|
||||
if (unlikely(mhi_chan->pre_alloc))
|
||||
return -EINVAL;
|
||||
|
||||
if (mhi_chan->dir == DMA_TO_DEVICE)
|
||||
atomic_inc(&mhi_cntrl->pending_pkts);
|
||||
|
||||
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
|
||||
read_lock_bh(&mhi_chan->lock);
|
||||
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
|
||||
read_unlock_bh(&mhi_chan->lock);
|
||||
}
|
||||
|
||||
read_unlock_bh(&mhi_cntrl->pm_lock);
|
||||
|
||||
return 0;
|
||||
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_queue_skb);
|
||||
|
||||
int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
|
||||
struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
|
||||
{
|
||||
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
|
||||
mhi_dev->dl_chan;
|
||||
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
||||
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
|
||||
struct mhi_buf_info buf_info = { };
|
||||
int ret;
|
||||
|
||||
/* If MHI host pre-allocates buffers then client drivers cannot queue */
|
||||
if (mhi_chan->pre_alloc)
|
||||
return -EINVAL;
|
||||
|
||||
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
|
||||
return -ENOMEM;
|
||||
|
||||
read_lock_bh(&mhi_cntrl->pm_lock);
|
||||
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
|
||||
dev_err(dev, "MHI is not in activate state, PM state: %s\n",
|
||||
to_mhi_pm_state_str(mhi_cntrl->pm_state));
|
||||
read_unlock_bh(&mhi_cntrl->pm_lock);
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* we're in M3 or transitioning to M3 */
|
||||
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
|
||||
mhi_trigger_resume(mhi_cntrl);
|
||||
|
||||
/* Toggle wake to exit out of M2 */
|
||||
mhi_cntrl->wake_toggle(mhi_cntrl);
|
||||
|
||||
buf_info.p_addr = mhi_buf->dma_addr;
|
||||
buf_info.cb_buf = mhi_buf;
|
||||
buf_info.pre_mapped = true;
|
||||
buf_info.len = len;
|
||||
|
||||
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
|
||||
if (unlikely(ret)) {
|
||||
read_unlock_bh(&mhi_cntrl->pm_lock);
|
||||
return ret;
|
||||
}
|
||||
if (unlikely(mhi_chan->pre_alloc))
|
||||
return -EINVAL;
|
||||
|
||||
if (mhi_chan->dir == DMA_TO_DEVICE)
|
||||
atomic_inc(&mhi_cntrl->pending_pkts);
|
||||
|
||||
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
|
||||
read_lock_bh(&mhi_chan->lock);
|
||||
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
|
||||
read_unlock_bh(&mhi_chan->lock);
|
||||
}
|
||||
|
||||
read_unlock_bh(&mhi_cntrl->pm_lock);
|
||||
|
||||
return 0;
|
||||
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_queue_dma);
|
||||
|
||||
@ -1112,57 +1114,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
|
||||
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
|
||||
void *buf, size_t len, enum mhi_flags mflags)
|
||||
{
|
||||
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
|
||||
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
|
||||
mhi_dev->dl_chan;
|
||||
struct mhi_ring *tre_ring;
|
||||
struct mhi_buf_info buf_info = { };
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* this check here only as a guard, it's always
|
||||
* possible mhi can enter error while executing rest of function,
|
||||
* which is not fatal so we do not need to hold pm_lock
|
||||
*/
|
||||
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
|
||||
return -EIO;
|
||||
|
||||
tre_ring = &mhi_chan->tre_ring;
|
||||
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
|
||||
return -ENOMEM;
|
||||
|
||||
buf_info.v_addr = buf;
|
||||
buf_info.cb_buf = buf;
|
||||
buf_info.len = len;
|
||||
|
||||
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
|
||||
|
||||
/* we're in M3 or transitioning to M3 */
|
||||
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
|
||||
mhi_trigger_resume(mhi_cntrl);
|
||||
|
||||
/* Toggle wake to exit out of M2 */
|
||||
mhi_cntrl->wake_toggle(mhi_cntrl);
|
||||
|
||||
if (mhi_chan->dir == DMA_TO_DEVICE)
|
||||
atomic_inc(&mhi_cntrl->pending_pkts);
|
||||
|
||||
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
|
||||
unsigned long flags;
|
||||
|
||||
read_lock_irqsave(&mhi_chan->lock, flags);
|
||||
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
|
||||
read_unlock_irqrestore(&mhi_chan->lock, flags);
|
||||
}
|
||||
|
||||
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
|
||||
|
||||
return 0;
|
||||
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mhi_queue_buf);
|
||||
|
||||
|
@ -8,13 +8,21 @@
|
||||
* Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
|
||||
*/
|
||||
|
||||
#include <linux/aer.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mhi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#define MHI_PCI_DEFAULT_BAR_NUM 0
|
||||
|
||||
#define MHI_POST_RESET_DELAY_MS 500
|
||||
|
||||
#define HEALTH_CHECK_PERIOD (HZ * 2)
|
||||
|
||||
/**
|
||||
* struct mhi_pci_dev_info - MHI PCI device specific information
|
||||
* @config: MHI controller configuration
|
||||
@ -76,6 +84,36 @@ struct mhi_pci_dev_info {
|
||||
.offload_channel = false, \
|
||||
}
|
||||
|
||||
#define MHI_CHANNEL_CONFIG_HW_UL(ch_num, ch_name, el_count, ev_ring) \
|
||||
{ \
|
||||
.num = ch_num, \
|
||||
.name = ch_name, \
|
||||
.num_elements = el_count, \
|
||||
.event_ring = ev_ring, \
|
||||
.dir = DMA_TO_DEVICE, \
|
||||
.ee_mask = BIT(MHI_EE_AMSS), \
|
||||
.pollcfg = 0, \
|
||||
.doorbell = MHI_DB_BRST_ENABLE, \
|
||||
.lpm_notify = false, \
|
||||
.offload_channel = false, \
|
||||
.doorbell_mode_switch = true, \
|
||||
} \
|
||||
|
||||
#define MHI_CHANNEL_CONFIG_HW_DL(ch_num, ch_name, el_count, ev_ring) \
|
||||
{ \
|
||||
.num = ch_num, \
|
||||
.name = ch_name, \
|
||||
.num_elements = el_count, \
|
||||
.event_ring = ev_ring, \
|
||||
.dir = DMA_FROM_DEVICE, \
|
||||
.ee_mask = BIT(MHI_EE_AMSS), \
|
||||
.pollcfg = 0, \
|
||||
.doorbell = MHI_DB_BRST_ENABLE, \
|
||||
.lpm_notify = false, \
|
||||
.offload_channel = false, \
|
||||
.doorbell_mode_switch = true, \
|
||||
}
|
||||
|
||||
#define MHI_EVENT_CONFIG_DATA(ev_ring) \
|
||||
{ \
|
||||
.num_elements = 128, \
|
||||
@ -91,8 +129,8 @@ struct mhi_pci_dev_info {
|
||||
|
||||
#define MHI_EVENT_CONFIG_HW_DATA(ev_ring, ch_num) \
|
||||
{ \
|
||||
.num_elements = 128, \
|
||||
.irq_moderation_ms = 5, \
|
||||
.num_elements = 2048, \
|
||||
.irq_moderation_ms = 1, \
|
||||
.irq = (ev_ring) + 1, \
|
||||
.priority = 1, \
|
||||
.mode = MHI_DB_BRST_DISABLE, \
|
||||
@ -104,27 +142,31 @@ struct mhi_pci_dev_info {
|
||||
}
|
||||
|
||||
static const struct mhi_channel_config modem_qcom_v1_mhi_channels[] = {
|
||||
MHI_CHANNEL_CONFIG_UL(4, "DIAG", 16, 1),
|
||||
MHI_CHANNEL_CONFIG_DL(5, "DIAG", 16, 1),
|
||||
MHI_CHANNEL_CONFIG_UL(12, "MBIM", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_UL(14, "QMI", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_DL(15, "QMI", 4, 0),
|
||||
MHI_CHANNEL_CONFIG_UL(20, "IPCR", 8, 0),
|
||||
MHI_CHANNEL_CONFIG_DL(21, "IPCR", 8, 0),
|
||||
MHI_CHANNEL_CONFIG_UL(100, "IP_HW0", 128, 1),
|
||||
MHI_CHANNEL_CONFIG_DL(101, "IP_HW0", 128, 2),
|
||||
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 2),
|
||||
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 3),
|
||||
};
|
||||
|
||||
static const struct mhi_event_config modem_qcom_v1_mhi_events[] = {
|
||||
static struct mhi_event_config modem_qcom_v1_mhi_events[] = {
|
||||
/* first ring is control+data ring */
|
||||
MHI_EVENT_CONFIG_CTRL(0),
|
||||
/* DIAG dedicated event ring */
|
||||
MHI_EVENT_CONFIG_DATA(1),
|
||||
/* Hardware channels request dedicated hardware event rings */
|
||||
MHI_EVENT_CONFIG_HW_DATA(1, 100),
|
||||
MHI_EVENT_CONFIG_HW_DATA(2, 101)
|
||||
MHI_EVENT_CONFIG_HW_DATA(2, 100),
|
||||
MHI_EVENT_CONFIG_HW_DATA(3, 101)
|
||||
};
|
||||
|
||||
static const struct mhi_controller_config modem_qcom_v1_mhiv_config = {
|
||||
static struct mhi_controller_config modem_qcom_v1_mhiv_config = {
|
||||
.max_channels = 128,
|
||||
.timeout_ms = 5000,
|
||||
.timeout_ms = 8000,
|
||||
.num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels),
|
||||
.ch_cfg = modem_qcom_v1_mhi_channels,
|
||||
.num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events),
|
||||
@ -147,6 +189,18 @@ static const struct pci_device_id mhi_pci_id_table[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, mhi_pci_id_table);
|
||||
|
||||
enum mhi_pci_device_status {
|
||||
MHI_PCI_DEV_STARTED,
|
||||
};
|
||||
|
||||
struct mhi_pci_device {
|
||||
struct mhi_controller mhi_cntrl;
|
||||
struct pci_saved_state *pci_state;
|
||||
struct work_struct recovery_work;
|
||||
struct timer_list health_check_timer;
|
||||
unsigned long status;
|
||||
};
|
||||
|
||||
static int mhi_pci_read_reg(struct mhi_controller *mhi_cntrl,
|
||||
void __iomem *addr, u32 *out)
|
||||
{
|
||||
@ -163,7 +217,31 @@ static void mhi_pci_write_reg(struct mhi_controller *mhi_cntrl,
|
||||
static void mhi_pci_status_cb(struct mhi_controller *mhi_cntrl,
|
||||
enum mhi_callback cb)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
|
||||
|
||||
/* Nothing to do for now */
|
||||
switch (cb) {
|
||||
case MHI_CB_FATAL_ERROR:
|
||||
case MHI_CB_SYS_ERROR:
|
||||
dev_warn(&pdev->dev, "firmware crashed (%u)\n", cb);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static bool mhi_pci_is_alive(struct mhi_controller *mhi_cntrl)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
|
||||
u16 vendor = 0;
|
||||
|
||||
if (pci_read_config_word(pdev, PCI_VENDOR_ID, &vendor))
|
||||
return false;
|
||||
|
||||
if (vendor == (u16) ~0 || vendor == 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
|
||||
@ -227,8 +305,12 @@ static int mhi_pci_get_irqs(struct mhi_controller *mhi_cntrl,
|
||||
}
|
||||
|
||||
if (nr_vectors < mhi_cntrl->nr_irqs) {
|
||||
dev_warn(&pdev->dev, "Not enough MSI vectors (%d/%d), use shared MSI\n",
|
||||
nr_vectors, mhi_cntrl_config->num_events);
|
||||
dev_warn(&pdev->dev, "using shared MSI\n");
|
||||
|
||||
/* Patch msi vectors, use only one (shared) */
|
||||
for (i = 0; i < mhi_cntrl_config->num_events; i++)
|
||||
mhi_cntrl_config->event_cfg[i].irq = 0;
|
||||
mhi_cntrl->nr_irqs = 1;
|
||||
}
|
||||
|
||||
irq = devm_kcalloc(&pdev->dev, mhi_cntrl->nr_irqs, sizeof(int), GFP_KERNEL);
|
||||
@ -257,20 +339,89 @@ static void mhi_pci_runtime_put(struct mhi_controller *mhi_cntrl)
|
||||
/* no PM for now */
|
||||
}
|
||||
|
||||
static void mhi_pci_recovery_work(struct work_struct *work)
|
||||
{
|
||||
struct mhi_pci_device *mhi_pdev = container_of(work, struct mhi_pci_device,
|
||||
recovery_work);
|
||||
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
|
||||
struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
|
||||
int err;
|
||||
|
||||
dev_warn(&pdev->dev, "device recovery started\n");
|
||||
|
||||
del_timer(&mhi_pdev->health_check_timer);
|
||||
|
||||
/* Clean up MHI state */
|
||||
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
|
||||
mhi_power_down(mhi_cntrl, false);
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
}
|
||||
|
||||
/* Check if we can recover without full reset */
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_load_saved_state(pdev, mhi_pdev->pci_state);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
if (!mhi_pci_is_alive(mhi_cntrl))
|
||||
goto err_try_reset;
|
||||
|
||||
err = mhi_prepare_for_power_up(mhi_cntrl);
|
||||
if (err)
|
||||
goto err_try_reset;
|
||||
|
||||
err = mhi_sync_power_up(mhi_cntrl);
|
||||
if (err)
|
||||
goto err_unprepare;
|
||||
|
||||
dev_dbg(&pdev->dev, "Recovery completed\n");
|
||||
|
||||
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
|
||||
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
|
||||
return;
|
||||
|
||||
err_unprepare:
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
err_try_reset:
|
||||
if (pci_reset_function(pdev))
|
||||
dev_err(&pdev->dev, "Recovery failed\n");
|
||||
}
|
||||
|
||||
static void health_check(struct timer_list *t)
|
||||
{
|
||||
struct mhi_pci_device *mhi_pdev = from_timer(mhi_pdev, t, health_check_timer);
|
||||
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
|
||||
|
||||
if (!mhi_pci_is_alive(mhi_cntrl)) {
|
||||
dev_err(mhi_cntrl->cntrl_dev, "Device died\n");
|
||||
queue_work(system_long_wq, &mhi_pdev->recovery_work);
|
||||
return;
|
||||
}
|
||||
|
||||
/* reschedule in two seconds */
|
||||
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
|
||||
}
|
||||
|
||||
static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
const struct mhi_pci_dev_info *info = (struct mhi_pci_dev_info *) id->driver_data;
|
||||
const struct mhi_controller_config *mhi_cntrl_config;
|
||||
struct mhi_pci_device *mhi_pdev;
|
||||
struct mhi_controller *mhi_cntrl;
|
||||
int err;
|
||||
|
||||
dev_dbg(&pdev->dev, "MHI PCI device found: %s\n", info->name);
|
||||
|
||||
mhi_cntrl = mhi_alloc_controller();
|
||||
if (!mhi_cntrl)
|
||||
/* mhi_pdev.mhi_cntrl must be zero-initialized */
|
||||
mhi_pdev = devm_kzalloc(&pdev->dev, sizeof(*mhi_pdev), GFP_KERNEL);
|
||||
if (!mhi_pdev)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&mhi_pdev->recovery_work, mhi_pci_recovery_work);
|
||||
timer_setup(&mhi_pdev->health_check_timer, health_check, 0);
|
||||
|
||||
mhi_cntrl_config = info->config;
|
||||
mhi_cntrl = &mhi_pdev->mhi_cntrl;
|
||||
|
||||
mhi_cntrl->cntrl_dev = &pdev->dev;
|
||||
mhi_cntrl->iova_start = 0;
|
||||
mhi_cntrl->iova_stop = (dma_addr_t)DMA_BIT_MASK(info->dma_data_width);
|
||||
@ -285,17 +436,23 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
||||
err = mhi_pci_claim(mhi_cntrl, info->bar_num, DMA_BIT_MASK(info->dma_data_width));
|
||||
if (err)
|
||||
goto err_release;
|
||||
return err;
|
||||
|
||||
err = mhi_pci_get_irqs(mhi_cntrl, mhi_cntrl_config);
|
||||
if (err)
|
||||
goto err_release;
|
||||
return err;
|
||||
|
||||
pci_set_drvdata(pdev, mhi_cntrl);
|
||||
pci_set_drvdata(pdev, mhi_pdev);
|
||||
|
||||
/* Have stored pci confspace at hand for restore in sudden PCI error */
|
||||
pci_save_state(pdev);
|
||||
mhi_pdev->pci_state = pci_store_saved_state(pdev);
|
||||
|
||||
pci_enable_pcie_error_reporting(pdev);
|
||||
|
||||
err = mhi_register_controller(mhi_cntrl, mhi_cntrl_config);
|
||||
if (err)
|
||||
goto err_release;
|
||||
return err;
|
||||
|
||||
/* MHI bus does not power up the controller by default */
|
||||
err = mhi_prepare_for_power_up(mhi_cntrl);
|
||||
@ -310,33 +467,209 @@ static int mhi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto err_unprepare;
|
||||
}
|
||||
|
||||
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
|
||||
|
||||
/* start health check */
|
||||
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unprepare:
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
err_unregister:
|
||||
mhi_unregister_controller(mhi_cntrl);
|
||||
err_release:
|
||||
mhi_free_controller(mhi_cntrl);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mhi_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct mhi_controller *mhi_cntrl = pci_get_drvdata(pdev);
|
||||
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
|
||||
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
|
||||
|
||||
del_timer(&mhi_pdev->health_check_timer);
|
||||
cancel_work_sync(&mhi_pdev->recovery_work);
|
||||
|
||||
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
|
||||
mhi_power_down(mhi_cntrl, true);
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
}
|
||||
|
||||
mhi_power_down(mhi_cntrl, true);
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
mhi_unregister_controller(mhi_cntrl);
|
||||
mhi_free_controller(mhi_cntrl);
|
||||
}
|
||||
|
||||
static void mhi_pci_reset_prepare(struct pci_dev *pdev)
|
||||
{
|
||||
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
|
||||
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
|
||||
|
||||
dev_info(&pdev->dev, "reset\n");
|
||||
|
||||
del_timer(&mhi_pdev->health_check_timer);
|
||||
|
||||
/* Clean up MHI state */
|
||||
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
|
||||
mhi_power_down(mhi_cntrl, false);
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
}
|
||||
|
||||
/* cause internal device reset */
|
||||
mhi_soc_reset(mhi_cntrl);
|
||||
|
||||
/* Be sure device reset has been executed */
|
||||
msleep(MHI_POST_RESET_DELAY_MS);
|
||||
}
|
||||
|
||||
static void mhi_pci_reset_done(struct pci_dev *pdev)
|
||||
{
|
||||
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
|
||||
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
|
||||
int err;
|
||||
|
||||
/* Restore initial known working PCI state */
|
||||
pci_load_saved_state(pdev, mhi_pdev->pci_state);
|
||||
pci_restore_state(pdev);
|
||||
|
||||
/* Is device status available ? */
|
||||
if (!mhi_pci_is_alive(mhi_cntrl)) {
|
||||
dev_err(&pdev->dev, "reset failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
err = mhi_prepare_for_power_up(mhi_cntrl);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to prepare MHI controller\n");
|
||||
return;
|
||||
}
|
||||
|
||||
err = mhi_sync_power_up(mhi_cntrl);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to power up MHI controller\n");
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
return;
|
||||
}
|
||||
|
||||
set_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status);
|
||||
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
|
||||
}
|
||||
|
||||
static pci_ers_result_t mhi_pci_error_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state)
|
||||
{
|
||||
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
|
||||
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
|
||||
|
||||
dev_err(&pdev->dev, "PCI error detected, state = %u\n", state);
|
||||
|
||||
if (state == pci_channel_io_perm_failure)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
/* Clean up MHI state */
|
||||
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
|
||||
mhi_power_down(mhi_cntrl, false);
|
||||
mhi_unprepare_after_power_down(mhi_cntrl);
|
||||
} else {
|
||||
/* Nothing to do */
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
pci_disable_device(pdev);
|
||||
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
static pci_ers_result_t mhi_pci_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
if (pci_enable_device(pdev)) {
|
||||
dev_err(&pdev->dev, "Cannot re-enable PCI device after reset.\n");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static void mhi_pci_io_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct mhi_pci_device *mhi_pdev = pci_get_drvdata(pdev);
|
||||
|
||||
dev_err(&pdev->dev, "PCI slot reset done\n");
|
||||
|
||||
queue_work(system_long_wq, &mhi_pdev->recovery_work);
|
||||
}
|
||||
|
||||
static const struct pci_error_handlers mhi_pci_err_handler = {
|
||||
.error_detected = mhi_pci_error_detected,
|
||||
.slot_reset = mhi_pci_slot_reset,
|
||||
.resume = mhi_pci_io_resume,
|
||||
.reset_prepare = mhi_pci_reset_prepare,
|
||||
.reset_done = mhi_pci_reset_done,
|
||||
};
|
||||
|
||||
static int __maybe_unused mhi_pci_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
|
||||
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
|
||||
|
||||
del_timer(&mhi_pdev->health_check_timer);
|
||||
cancel_work_sync(&mhi_pdev->recovery_work);
|
||||
|
||||
/* Transition to M3 state */
|
||||
mhi_pm_suspend(mhi_cntrl);
|
||||
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_wake_from_d3(pdev, true);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused mhi_pci_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct mhi_pci_device *mhi_pdev = dev_get_drvdata(dev);
|
||||
struct mhi_controller *mhi_cntrl = &mhi_pdev->mhi_cntrl;
|
||||
int err;
|
||||
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
pci_set_master(pdev);
|
||||
|
||||
err = pci_enable_device(pdev);
|
||||
if (err)
|
||||
goto err_recovery;
|
||||
|
||||
/* Exit M3, transition to M0 state */
|
||||
err = mhi_pm_resume(mhi_cntrl);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to resume device: %d\n", err);
|
||||
goto err_recovery;
|
||||
}
|
||||
|
||||
/* Resume health check */
|
||||
mod_timer(&mhi_pdev->health_check_timer, jiffies + HEALTH_CHECK_PERIOD);
|
||||
|
||||
return 0;
|
||||
|
||||
err_recovery:
|
||||
/* The device may have loose power or crashed, try recovering it */
|
||||
queue_work(system_long_wq, &mhi_pdev->recovery_work);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops mhi_pci_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
|
||||
};
|
||||
|
||||
static struct pci_driver mhi_pci_driver = {
|
||||
.name = "mhi-pci-generic",
|
||||
.id_table = mhi_pci_id_table,
|
||||
.probe = mhi_pci_probe,
|
||||
.remove = mhi_pci_remove
|
||||
.remove = mhi_pci_remove,
|
||||
.err_handler = &mhi_pci_err_handler,
|
||||
.driver.pm = &mhi_pci_pm_ops
|
||||
};
|
||||
module_pci_driver(mhi_pci_driver);
|
||||
|
||||
|
@ -279,7 +279,7 @@ struct mhi_controller_config {
|
||||
u32 num_channels;
|
||||
const struct mhi_channel_config *ch_cfg;
|
||||
u32 num_events;
|
||||
const struct mhi_event_config *event_cfg;
|
||||
struct mhi_event_config *event_cfg;
|
||||
bool use_bounce_buf;
|
||||
bool m2_no_db;
|
||||
};
|
||||
@ -347,12 +347,14 @@ struct mhi_controller_config {
|
||||
* @unmap_single: CB function to destroy TRE buffer
|
||||
* @read_reg: Read a MHI register via the physical link (required)
|
||||
* @write_reg: Write a MHI register via the physical link (required)
|
||||
* @reset: Controller specific reset function (optional)
|
||||
* @buffer_len: Bounce buffer length
|
||||
* @index: Index of the MHI controller instance
|
||||
* @bounce_buf: Use of bounce buffer
|
||||
* @fbc_download: MHI host needs to do complete image transfer (optional)
|
||||
* @pre_init: MHI host needs to do pre-initialization before power up
|
||||
* @wake_set: Device wakeup set flag
|
||||
* @irq_flags: irq flags passed to request_irq (optional)
|
||||
*
|
||||
* Fields marked as (required) need to be populated by the controller driver
|
||||
* before calling mhi_register_controller(). For the fields marked as (optional)
|
||||
@ -437,6 +439,7 @@ struct mhi_controller {
|
||||
u32 *out);
|
||||
void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
|
||||
u32 val);
|
||||
void (*reset)(struct mhi_controller *mhi_cntrl);
|
||||
|
||||
size_t buffer_len;
|
||||
int index;
|
||||
@ -444,6 +447,7 @@ struct mhi_controller {
|
||||
bool fbc_download;
|
||||
bool pre_init;
|
||||
bool wake_set;
|
||||
unsigned long irq_flags;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -598,6 +602,15 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
|
||||
*/
|
||||
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
|
||||
|
||||
/**
|
||||
* mhi_get_free_desc_count - Get transfer ring length
|
||||
* Get # of TD available to queue buffers
|
||||
* @mhi_dev: Device associated with the channels
|
||||
* @dir: Direction of the channel
|
||||
*/
|
||||
int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
|
||||
enum dma_data_direction dir);
|
||||
|
||||
/**
|
||||
* mhi_prepare_for_power_up - Do pre-initialization before power up.
|
||||
* This is optional, call this before power up if
|
||||
@ -672,6 +685,13 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
|
||||
*/
|
||||
enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
|
||||
|
||||
/**
|
||||
* mhi_soc_reset - Trigger a device reset. This can be used as a last resort
|
||||
* to reset and recover a device.
|
||||
* @mhi_cntrl: MHI controller
|
||||
*/
|
||||
void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
|
||||
|
||||
/**
|
||||
* mhi_device_get - Disable device low power mode
|
||||
* @mhi_dev: Device associated with the channel
|
||||
|
Loading…
Reference in New Issue
Block a user