drivers: firmware: scmi: Extend SCMI transport layer by trace events

The SCMI transport layer communicates via mailboxes and shared memory with
firmware running on a microcontroller. It is platform specific how long it
takes to pass a SCMI message. The most sensitive requests are coming from
CPUFreq subsystem, which might be used by the scheduler.
Thus, there is a need to measure these delays and capture anomalies.
This change introduces trace events wrapped around transfer code.

According to Jim's suggestion a unique transfer_id is to distinguish
similar entries which might have the same message id, protocol id and
sequence. This is a case then there are some timeouts in transfers.

Suggested-by: Jim Quinlan <james.quinlan@broadcom.com>
Signed-off-by: Lukasz Luba <lukasz.luba@arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
This commit is contained in:
Lukasz Luba 2019-12-17 13:43:45 +00:00 committed by Sudeep Holla
parent 257d0e20ec
commit 729d3530a5
2 changed files with 20 additions and 0 deletions

View File

@ -81,6 +81,7 @@ struct scmi_msg {
/**
* struct scmi_xfer - Structure representing a message flow
*
* @transfer_id: Unique ID for debug & profiling purpose
* @hdr: Transmit message header
* @tx: Transmit message
* @rx: Receive message, the buffer should be pre-allocated to store
@ -90,6 +91,7 @@ struct scmi_msg {
* @async: pointer to delayed response message received event completion
*/
struct scmi_xfer {
int transfer_id;
struct scmi_msg_hdr hdr;
struct scmi_msg tx;
struct scmi_msg rx;

View File

@ -29,6 +29,9 @@
#include "common.h"
#define CREATE_TRACE_POINTS
#include <trace/events/scmi.h>
#define MSG_ID_MASK GENMASK(7, 0)
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
#define MSG_TYPE_MASK GENMASK(9, 8)
@ -61,6 +64,8 @@ enum scmi_error_codes {
static LIST_HEAD(scmi_list);
/* Protection for the entire list */
static DEFINE_MUTEX(scmi_list_mutex);
/* Track the unique id for the transfers for debug & profiling purpose */
static atomic_t transfer_last_id;
/**
* struct scmi_xfers_info - Structure to manage transfer information
@ -304,6 +309,7 @@ static struct scmi_xfer *scmi_xfer_get(const struct scmi_handle *handle,
xfer = &minfo->xfer_block[xfer_id];
xfer->hdr.seq = xfer_id;
reinit_completion(&xfer->done);
xfer->transfer_id = atomic_inc_return(&transfer_last_id);
return xfer;
}
@ -374,6 +380,10 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
scmi_fetch_response(xfer, mem);
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
msg_type);
if (msg_type == MSG_TYPE_DELAYED_RESP)
complete(xfer->async_done);
else
@ -439,6 +449,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
if (unlikely(!cinfo))
return -EINVAL;
trace_scmi_xfer_begin(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.poll_completion);
ret = mbox_send_message(cinfo->chan, xfer);
if (ret < 0) {
dev_dbg(dev, "mbox send fail %d\n", ret);
@ -478,6 +492,10 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
*/
mbox_client_txdone(cinfo->chan, ret);
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
xfer->hdr.protocol_id, xfer->hdr.seq,
xfer->hdr.status);
return ret;
}