Merge branch 'octeontx2-pf-mbox-fixes'

Subbaraya Sundeep says:

====================
octeontx2-pf: RVU Mailbox fixes

This patchset fixes the problems related to RVU mailbox.
During long run tests some times VF commands like setting
MTU or toggling interface fails because VF mailbox is timedout
waiting for response from PF.

Below are the fixes
Patch 1: There are two types of messages in RVU mailbox namely up and down
messages. Down messages are synchronous messages where a PF/VF sends
a message to AF and AF replies back with response. UP messages are
notifications and are asynchronous like AF sending link events to
PF. When VF sends a down message to PF, PF forwards to AF and sends
the response from AF back to VF. PF has to forward VF messages since
there is no path in hardware for VF to send directly to AF.
There is one mailbox interrupt from AF to PF when raised could mean
two scenarios one is where AF sending reply to PF for a down message
sent by PF and another one is AF sending up message asynchronously
when link changed for that PF. Receiving the up message interrupt while
PF is in middle of forwarding down message causes mailbox errors.
Fix this by receiver detecting the type of message from the mbox data register
set by sender.

Patch 2:
During VF driver remove, VF has to wait until last message is
completed and then turn off mailbox interrupts from PF.

Patch 3:
Do not use ordered workqueue for message processing since multiple works are
queued simultaneously by all the VFs and PF link UP messages.

Patch 4:
When sending link event to VF by PF check whether VF is really up to
receive this message.

Patch 5:
In AF driver, use separate interrupt handlers for the AF-VF interrupt and
AF-PF interrupt. Sometimes both interrupts are raised to two CPUs at same
time and both CPUs execute same function at same time corrupting the data.

v2 changes:
	Added missing mutex unlock in error path in patch 1
	Refactored if else logic in patch 1 as suggested by Paolo Abeni
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2024-03-20 10:49:08 +00:00
commit 9c6a59543a
10 changed files with 225 additions and 88 deletions

View File

@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
}
EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
spin_unlock(&mdev->mbox_lock);
/* Check if interrupt pending */
intr_val = readq((void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
intr_val |= data;
/* The interrupt should be fired after num_msgs is written
* to the shared memory
*/
writeq(1, (void __iomem *)mbox->reg_base +
writeq(intr_val, (void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
}
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
{
otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
}
EXPORT_SYMBOL(otx2_mbox_msg_send);
void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
{
otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
}
EXPORT_SYMBOL(otx2_mbox_msg_send_up);
bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
{
u64 data;
data = readq((void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
/* If data is non-zero wait for ~1ms and return to caller
* whether data has changed to zero or not after the wait.
*/
if (!data)
return true;
usleep_range(950, 1000);
data = readq((void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
return data == 0;
}
EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp)
{

View File

@ -16,6 +16,9 @@
#define MBOX_SIZE SZ_64K
#define MBOX_DOWN_MSG 1
#define MBOX_UP_MSG 2
/* AF/PF: PF initiated, PF/VF VF initiated */
#define MBOX_DOWN_RX_START 0
#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs, unsigned long *bmap);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
}
bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
/* Mailbox message types */
#define MBOX_MSG_MASK 0xFFFF
#define MBOX_MSG_INVALID 0xFFFE

View File

@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
{
struct mcs_intr_info *req;
int err, pf;
int pf;
pf = rvu_get_pf(event->pcifunc);
mutex_lock(&rvu->mbox_lock);
req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
if (!req)
if (!req) {
mutex_unlock(&rvu->mbox_lock);
return -ENOMEM;
}
req->mcs_id = event->mcs_id;
req->intr_mask = event->intr_mask;
@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
req->hdr.pcifunc = event->pcifunc;
req->lmac_id = event->lmac_id;
otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
if (err)
dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
mutex_unlock(&rvu->mbox_lock);
return 0;
}

View File

@ -2119,7 +2119,7 @@ bad_message:
}
}
static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
{
struct rvu *rvu = mwork->rvu;
int offset, err, id, devid;
@ -2186,6 +2186,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
}
mw->mbox_wrk[devid].num_msgs = 0;
if (poll)
otx2_mbox_wait_for_zero(mbox, devid);
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
}
@ -2193,15 +2196,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
static inline void rvu_afpf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
struct rvu *rvu = mwork->rvu;
__rvu_mbox_handler(mwork, TYPE_AFPF);
mutex_lock(&rvu->mbox_lock);
__rvu_mbox_handler(mwork, TYPE_AFPF, true);
mutex_unlock(&rvu->mbox_lock);
}
static inline void rvu_afvf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
__rvu_mbox_handler(mwork, TYPE_AFVF);
__rvu_mbox_handler(mwork, TYPE_AFVF, false);
}
static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
@ -2376,6 +2382,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
mutex_init(&rvu->mbox_lock);
mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
@ -2525,10 +2533,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
}
}
static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
{
struct rvu *rvu = (struct rvu *)rvu_irq;
int vfs = rvu->vfs;
u64 intr;
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
@ -2542,6 +2549,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
return IRQ_HANDLED;
}
static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
{
struct rvu *rvu = (struct rvu *)rvu_irq;
int vfs = rvu->vfs;
u64 intr;
/* Sync with mbox memory region */
rmb();
/* Handle VF interrupts */
if (vfs > 64) {
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
@ -2886,7 +2905,7 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Register mailbox interrupt handler */
sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
rvu_mbox_intr_handler, 0,
rvu_mbox_pf_intr_handler, 0,
&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
if (ret) {
dev_err(rvu->dev,

View File

@ -591,6 +591,8 @@ struct rvu {
spinlock_t mcs_intrq_lock;
/* CPT interrupt lock */
spinlock_t cpt_intr_lock;
struct mutex mbox_lock; /* Serialize mbox up and down msgs */
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)

View File

@ -232,7 +232,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
struct cgx_link_user_info *linfo;
struct cgx_link_info_msg *msg;
unsigned long pfmap;
int err, pfid;
int pfid;
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
@ -255,16 +255,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
continue;
}
mutex_lock(&rvu->mbox_lock);
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
if (!msg)
if (!msg) {
mutex_unlock(&rvu->mbox_lock);
continue;
}
msg->link_info = *linfo;
otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
if (err)
dev_warn(rvu->dev, "notification to pf %d failed\n",
pfid);
otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}

View File

@ -1592,7 +1592,7 @@ int otx2_detach_resources(struct mbox *mbox)
detach->partial = false;
/* Send detach request to AF */
otx2_mbox_msg_send(&mbox->mbox, 0);
otx2_sync_mbox_msg(mbox);
mutex_unlock(&mbox->lock);
return 0;
}

View File

@ -815,7 +815,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
return 0;
otx2_mbox_msg_send(&mbox->mbox_up, devid);
otx2_mbox_msg_send_up(&mbox->mbox_up, devid);
err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
if (err)
return err;

View File

@ -292,8 +292,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
return 0;
}
static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
int first, int mdevs, u64 intr, int type)
static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
int first, int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@ -307,40 +307,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
mbox = &mw->mbox;
mdev = &mbox->dev[i];
if (type == TYPE_PFAF)
otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
/* The hdr->num_msgs is set to zero immediately in the interrupt
* handler to ensure that it holds a correct value next time
* when the interrupt handler is called.
* pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
* pf>mbox.up_num_msgs holds the data for use in
* pfaf_mbox_up_handler.
* handler to ensure that it holds a correct value next time
* when the interrupt handler is called. pf->mw[i].num_msgs
* holds the data for use in otx2_pfvf_mbox_handler and
* pf->mw[i].up_num_msgs holds the data for use in
* otx2_pfvf_mbox_up_handler.
*/
if (hdr->num_msgs) {
mw[i].num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
if (type == TYPE_PFAF)
memset(mbox->hwbase + mbox->rx_start, 0,
ALIGN(sizeof(struct mbox_hdr),
sizeof(u64)));
queue_work(mbox_wq, &mw[i].mbox_wrk);
}
mbox = &mw->mbox_up;
mdev = &mbox->dev[i];
if (type == TYPE_PFAF)
otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs) {
mw[i].up_num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
if (type == TYPE_PFAF)
memset(mbox->hwbase + mbox->rx_start, 0,
ALIGN(sizeof(struct mbox_hdr),
sizeof(u64)));
queue_work(mbox_wq, &mw[i].mbox_up_wrk);
}
}
@ -356,8 +342,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
/* Msgs are already copied, trigger VF's mbox irq */
smp_wmb();
otx2_mbox_wait_for_zero(pfvf_mbox, devid);
offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
/* Restore VF's mbox bounce buffer region address */
src_mdev->mbase = bbuf_base;
@ -547,7 +535,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
end:
offset = mbox->rx_start + msg->next_msgoff;
if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
__otx2_mbox_reset(mbox, 0);
__otx2_mbox_reset(mbox, vf_idx);
mdev->msgs_acked++;
}
}
@ -564,8 +552,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
if (vfs > 64) {
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
TYPE_PFVF);
otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
vfs = 64;
@ -574,7 +561,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
@ -597,8 +584,9 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf)
return -ENOMEM;
pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
WQ_HIGHPRI | WQ_MEM_RECLAIM);
pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
WQ_UNBOUND | WQ_HIGHPRI |
WQ_MEM_RECLAIM, 0);
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
@ -821,20 +809,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
struct mbox *af_mbox;
struct otx2_nic *pf;
int offset, id;
u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
for (id = 0; id < af_mbox->num_msgs; id++) {
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
if (mdev->msgs_acked == (num_msgs - 1))
__otx2_mbox_reset(mbox, 0);
mdev->msgs_acked++;
}
@ -945,12 +935,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
int offset, id, devid = 0;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
u16 num_msgs;
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < af_mbox->up_num_msgs; id++) {
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
@ -959,10 +951,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_process_mbox_msg_up(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
if (devid) {
/* Forward to VF iff VFs are really present */
if (devid && pci_num_vf(pf->pdev)) {
otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
MBOX_DIR_PFVF_UP, devid - 1,
af_mbox->up_num_msgs);
num_msgs);
return;
}
@ -972,16 +965,49 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
struct mbox *mbox;
struct mbox *mw = &pf->mbox;
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
u64 mbox_data;
/* Clear the IRQ */
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
mbox = &pf->mbox;
trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
if (mbox_data & MBOX_UP_MSG) {
mbox_data &= ~MBOX_UP_MSG;
otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
mbox = &mw->mbox_up;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs)
queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
BIT_ULL(0));
}
if (mbox_data & MBOX_DOWN_MSG) {
mbox_data &= ~MBOX_DOWN_MSG;
otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
mbox = &mw->mbox;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs)
queue_work(pf->mbox_wq, &mw->mbox_wrk);
trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
BIT_ULL(0));
}
return IRQ_HANDLED;
}
@ -3087,6 +3113,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
struct otx2_vf_config *config;
struct cgx_link_info_msg *req;
struct mbox_msghdr *msghdr;
struct delayed_work *dwork;
struct otx2_nic *pf;
int vf_idx;
@ -3095,10 +3122,24 @@ static void otx2_vf_link_event_task(struct work_struct *work)
vf_idx = config - config->pf->vf_configs;
pf = config->pf;
if (config->intf_down)
return;
mutex_lock(&pf->mbox.lock);
dwork = &config->link_event_work;
if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
schedule_delayed_work(dwork, msecs_to_jiffies(100));
mutex_unlock(&pf->mbox.lock);
return;
}
msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
sizeof(*req), sizeof(struct msg_rsp));
if (!msghdr) {
dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
mutex_unlock(&pf->mbox.lock);
return;
}
@ -3107,7 +3148,11 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req->hdr.sig = OTX2_MBOX_REQ_SIG;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
mutex_unlock(&pf->mbox.lock);
}
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)

View File

@ -89,16 +89,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
struct otx2_mbox *mbox;
struct mbox *af_mbox;
int offset, id;
u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (af_mbox->num_msgs == 0)
num_msgs = rsp_hdr->num_msgs;
if (num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < af_mbox->num_msgs; id++) {
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@ -151,6 +155,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
struct mbox *vf_mbox;
struct otx2_nic *vf;
int offset, id;
u16 num_msgs;
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
vf = vf_mbox->pfvf;
@ -158,12 +163,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (vf_mbox->up_num_msgs == 0)
num_msgs = rsp_hdr->num_msgs;
if (num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_mbox_msg_up(vf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@ -178,40 +185,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
u64 mbox_data;
/* Clear the IRQ */
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
/* Read latest mbox data */
smp_rmb();
/* Check for PF => VF response messages */
mbox = &vf->mbox.mbox;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
if (mbox_data & MBOX_DOWN_MSG) {
mbox_data &= ~MBOX_DOWN_MSG;
otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
/* Check for PF => VF response messages */
mbox = &vf->mbox.mbox;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
memset(mbox->hwbase + mbox->rx_start, 0,
ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs)
queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
BIT_ULL(0));
}
/* Check for PF => VF notification messages */
mbox = &vf->mbox.mbox_up;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs) {
vf->mbox.up_num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
memset(mbox->hwbase + mbox->rx_start, 0,
ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
if (mbox_data & MBOX_UP_MSG) {
mbox_data &= ~MBOX_UP_MSG;
otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
/* Check for PF => VF notification messages */
mbox = &vf->mbox.mbox_up;
mdev = &mbox->dev[0];
otx2_sync_mbox_bbuf(mbox, 0);
hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
if (hdr->num_msgs)
queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
BIT_ULL(0));
}
return IRQ_HANDLED;
@ -760,8 +775,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
otx2_shutdown_qos(vf);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
otx2vf_disable_mbox_intr(vf);
free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);