linux/drivers/firmware/arm_scmi/mailbox.c
Cristian Marussi 59172b212e firmware: arm_scmi: Make tx_prepare time out eventually
SCMI transports based on shared memory, at start of transmissions, have
to wait for the shared Tx channel area to be eventually freed by the
SCMI platform before accessing the channel. In fact the channel is owned
by the SCMI platform until marked as free by the platform itself and,
as such, cannot be used by the agent until relinquished.

As a consequence a badly misbehaving SCMI platform firmware could lock
the channel indefinitely and make the kernel side SCMI stack loop
forever waiting for such channel to be freed, possibly hanging the
whole boot sequence.

Add a timeout to the existent Tx waiting spin-loop so that, when the
system ends up in this situation, the SCMI stack can at least bail-out,
nosily warn the user, and abort the transmission.

Reported-by: YaxiongTian <iambestgod@outlook.com>
Suggested-by: YaxiongTian <iambestgod@outlook.com>
Cc: Vincent Guittot <vincent.guittot@linaro.org>
Cc: Etienne Carriere <etienne.carriere@linaro.org>
Cc: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
Link: https://lore.kernel.org/r/20221028140833.280091-3-cristian.marussi@arm.com
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
2022-11-01 11:33:24 +00:00

206 lines
5.1 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* System Control and Management Interface (SCMI) Message Mailbox Transport
* driver.
*
* Copyright (C) 2019 ARM Ltd.
*/
#include <linux/err.h>
#include <linux/device.h>
#include <linux/mailbox_client.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include "common.h"
/**
* struct scmi_mailbox - Structure representing a SCMI mailbox transport
*
* @cl: Mailbox Client
* @chan: Transmit/Receive mailbox channel
* @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area
*/
struct scmi_mailbox {
struct mbox_client cl;
struct mbox_chan *chan;
struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem;
};
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
static void tx_prepare(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
shmem_tx_prepare(smbox->shmem, m, smbox->cinfo);
}
static void rx_callback(struct mbox_client *cl, void *m)
{
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
}
static bool mailbox_chan_available(struct device *dev, int idx)
{
return !of_parse_phandle_with_args(dev->of_node, "mboxes",
"#mbox-cells", idx, NULL);
}
static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
const char *desc = tx ? "Tx" : "Rx";
struct device *cdev = cinfo->dev;
struct scmi_mailbox *smbox;
struct device_node *shmem;
int ret, idx = tx ? 0 : 1;
struct mbox_client *cl;
resource_size_t size;
struct resource res;
smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
if (!smbox)
return -ENOMEM;
shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
if (!of_device_is_compatible(shmem, "arm,scmi-shmem"))
return -ENXIO;
ret = of_address_to_resource(shmem, 0, &res);
of_node_put(shmem);
if (ret) {
dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
return ret;
}
size = resource_size(&res);
smbox->shmem = devm_ioremap(dev, res.start, size);
if (!smbox->shmem) {
dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
return -EADDRNOTAVAIL;
}
cl = &smbox->cl;
cl->dev = cdev;
cl->tx_prepare = tx ? tx_prepare : NULL;
cl->rx_callback = rx_callback;
cl->tx_block = false;
cl->knows_txdone = tx;
smbox->chan = mbox_request_channel(cl, tx ? 0 : 1);
if (IS_ERR(smbox->chan)) {
ret = PTR_ERR(smbox->chan);
if (ret != -EPROBE_DEFER)
dev_err(cdev, "failed to request SCMI %s mailbox\n",
tx ? "Tx" : "Rx");
return ret;
}
cinfo->transport_info = smbox;
smbox->cinfo = cinfo;
return 0;
}
static int mailbox_chan_free(int id, void *p, void *data)
{
struct scmi_chan_info *cinfo = p;
struct scmi_mailbox *smbox = cinfo->transport_info;
if (smbox && !IS_ERR(smbox->chan)) {
mbox_free_channel(smbox->chan);
cinfo->transport_info = NULL;
smbox->chan = NULL;
smbox->cinfo = NULL;
}
scmi_free_channel(cinfo, data, id);
return 0;
}
static int mailbox_send_message(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
int ret;
ret = mbox_send_message(smbox->chan, xfer);
/* mbox_send_message returns non-negative value on success, so reset */
if (ret > 0)
ret = 0;
return ret;
}
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
struct scmi_xfer *__unused)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
* Unfortunately, we have to kick the mailbox framework after we have
* received our message.
*/
mbox_client_txdone(smbox->chan, ret);
}
static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
shmem_fetch_response(smbox->shmem, xfer);
}
static void mailbox_fetch_notification(struct scmi_chan_info *cinfo,
size_t max_len, struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
shmem_fetch_notification(smbox->shmem, max_len, xfer);
}
static void mailbox_clear_channel(struct scmi_chan_info *cinfo)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
shmem_clear_channel(smbox->shmem);
}
static bool
mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
{
struct scmi_mailbox *smbox = cinfo->transport_info;
return shmem_poll_done(smbox->shmem, xfer);
}
static const struct scmi_transport_ops scmi_mailbox_ops = {
.chan_available = mailbox_chan_available,
.chan_setup = mailbox_chan_setup,
.chan_free = mailbox_chan_free,
.send_message = mailbox_send_message,
.mark_txdone = mailbox_mark_txdone,
.fetch_response = mailbox_fetch_response,
.fetch_notification = mailbox_fetch_notification,
.clear_channel = mailbox_clear_channel,
.poll_done = mailbox_poll_done,
};
const struct scmi_desc scmi_mailbox_desc = {
.ops = &scmi_mailbox_ops,
.max_rx_timeout_ms = 30, /* We may increase this if required */
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
.max_msg_size = 128,
};