soc: qcom: import rpmh and cmd-db drivers from Linux

Import RPMh and cmd-db framework from Linux 6.10-rc6.

Acked-by: Sumit Garg <sumit.garg@linaro.org>
Signed-off-by: Caleb Connolly <caleb.connolly@linaro.org>
This commit is contained in:
Caleb Connolly 2024-07-15 12:08:02 +02:00
parent 555047df6a
commit 6716e2211f
No known key found for this signature in database
GPG Key ID: 0583312B195F64B6
7 changed files with 2381 additions and 0 deletions

393
drivers/soc/qcom/cmd-db.c Normal file
View File

@ -0,0 +1,393 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/bitfield.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/seq_file.h>
#include <linux/types.h>
#include <soc/qcom/cmd-db.h>
#define NUM_PRIORITY 2
#define MAX_SLV_ID 8
#define SLAVE_ID_MASK 0x7
#define SLAVE_ID_SHIFT 16
#define SLAVE_ID(addr) FIELD_GET(GENMASK(19, 16), addr)
#define VRM_ADDR(addr) FIELD_GET(GENMASK(19, 4), addr)
/**
* struct entry_header: header for each entry in cmddb
*
* @id: resource's identifier
* @priority: unused
* @addr: the address of the resource
* @len: length of the data
* @offset: offset from :@data_offset, start of the data
*/
struct entry_header {
u8 id[8];
__le32 priority[NUM_PRIORITY];
__le32 addr;
__le16 len;
__le16 offset;
};
/**
* struct rsc_hdr: resource header information
*
* @slv_id: id for the resource
* @header_offset: entry's header at offset from the end of the cmd_db_header
* @data_offset: entry's data at offset from the end of the cmd_db_header
* @cnt: number of entries for HW type
* @version: MSB is major, LSB is minor
* @reserved: reserved for future use.
*/
struct rsc_hdr {
__le16 slv_id;
__le16 header_offset;
__le16 data_offset;
__le16 cnt;
__le16 version;
__le16 reserved[3];
};
/**
* struct cmd_db_header: The DB header information
*
* @version: The cmd db version
* @magic: constant expected in the database
* @header: array of resources
* @checksum: checksum for the header. Unused.
* @reserved: reserved memory
* @data: driver specific data
*/
struct cmd_db_header {
__le32 version;
u8 magic[4];
struct rsc_hdr header[MAX_SLV_ID];
__le32 checksum;
__le32 reserved;
u8 data[];
};
/**
* DOC: Description of the Command DB database.
*
* At the start of the command DB memory is the cmd_db_header structure.
* The cmd_db_header holds the version, checksum, magic key as well as an
* array for header for each slave (depicted by the rsc_header). Each h/w
* based accelerator is a 'slave' (shared resource) and has slave id indicating
* the type of accelerator. The rsc_header is the header for such individual
* slaves of a given type. The entries for each of these slaves begin at the
* rsc_hdr.header_offset. In addition each slave could have auxiliary data
* that may be needed by the driver. The data for the slave starts at the
* entry_header.offset to the location pointed to by the rsc_hdr.data_offset.
*
* Drivers have a stringified key to a slave/resource. They can query the slave
* information and get the slave id and the auxiliary data and the length of the
* data. Using this information, they can format the request to be sent to the
* h/w accelerator and request a resource state.
*/
static const u8 CMD_DB_MAGIC[] = { 0xdb, 0x30, 0x03, 0x0c };
static bool cmd_db_magic_matches(const struct cmd_db_header *header)
{
const u8 *magic = header->magic;
return memcmp(magic, CMD_DB_MAGIC, ARRAY_SIZE(CMD_DB_MAGIC)) == 0;
}
static struct cmd_db_header *cmd_db_header;
static inline const void *rsc_to_entry_header(const struct rsc_hdr *hdr)
{
u16 offset = le16_to_cpu(hdr->header_offset);
return cmd_db_header->data + offset;
}
static inline void *
rsc_offset(const struct rsc_hdr *hdr, const struct entry_header *ent)
{
u16 offset = le16_to_cpu(hdr->data_offset);
u16 loffset = le16_to_cpu(ent->offset);
return cmd_db_header->data + offset + loffset;
}
/**
* cmd_db_ready - Indicates if command DB is available
*
* Return: 0 on success, errno otherwise
*/
int cmd_db_ready(void)
{
if (cmd_db_header == NULL)
return -EPROBE_DEFER;
else if (!cmd_db_magic_matches(cmd_db_header))
return -EINVAL;
return 0;
}
EXPORT_SYMBOL_GPL(cmd_db_ready);
static int cmd_db_get_header(const char *id, const struct entry_header **eh,
const struct rsc_hdr **rh)
{
const struct rsc_hdr *rsc_hdr;
const struct entry_header *ent;
int ret, i, j;
u8 query[sizeof(ent->id)] __nonstring;
ret = cmd_db_ready();
if (ret)
return ret;
strtomem_pad(query, id, 0);
for (i = 0; i < MAX_SLV_ID; i++) {
rsc_hdr = &cmd_db_header->header[i];
if (!rsc_hdr->slv_id)
break;
ent = rsc_to_entry_header(rsc_hdr);
for (j = 0; j < le16_to_cpu(rsc_hdr->cnt); j++, ent++) {
if (memcmp(ent->id, query, sizeof(ent->id)) == 0) {
if (eh)
*eh = ent;
if (rh)
*rh = rsc_hdr;
return 0;
}
}
}
return -ENODEV;
}
/**
* cmd_db_read_addr() - Query command db for resource id address.
*
* @id: resource id to query for address
*
* Return: resource address on success, 0 on error
*
* This is used to retrieve resource address based on resource
* id.
*/
u32 cmd_db_read_addr(const char *id)
{
int ret;
const struct entry_header *ent;
ret = cmd_db_get_header(id, &ent, NULL);
return ret < 0 ? 0 : le32_to_cpu(ent->addr);
}
EXPORT_SYMBOL_GPL(cmd_db_read_addr);
/**
* cmd_db_read_aux_data() - Query command db for aux data.
*
* @id: Resource to retrieve AUX Data on
* @len: size of data buffer returned
*
* Return: pointer to data on success, error pointer otherwise
*/
const void *cmd_db_read_aux_data(const char *id, size_t *len)
{
int ret;
const struct entry_header *ent;
const struct rsc_hdr *rsc_hdr;
ret = cmd_db_get_header(id, &ent, &rsc_hdr);
if (ret)
return ERR_PTR(ret);
if (len)
*len = le16_to_cpu(ent->len);
return rsc_offset(rsc_hdr, ent);
}
EXPORT_SYMBOL_GPL(cmd_db_read_aux_data);
/**
* cmd_db_match_resource_addr() - Compare if both Resource addresses are same
*
* @addr1: Resource address to compare
* @addr2: Resource address to compare
*
* Return: true if two addresses refer to the same resource, false otherwise
*/
bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
{
/*
* Each RPMh VRM accelerator resource has 3 or 4 contiguous 4-byte
* aligned addresses associated with it. Ignore the offset to check
* for VRM requests.
*/
if (addr1 == addr2)
return true;
else if (SLAVE_ID(addr1) == CMD_DB_HW_VRM && VRM_ADDR(addr1) == VRM_ADDR(addr2))
return true;
return false;
}
EXPORT_SYMBOL_GPL(cmd_db_match_resource_addr);
/**
* cmd_db_read_slave_id - Get the slave ID for a given resource address
*
* @id: Resource id to query the DB for version
*
* Return: cmd_db_hw_type enum on success, CMD_DB_HW_INVALID on error
*/
enum cmd_db_hw_type cmd_db_read_slave_id(const char *id)
{
int ret;
const struct entry_header *ent;
u32 addr;
ret = cmd_db_get_header(id, &ent, NULL);
if (ret < 0)
return CMD_DB_HW_INVALID;
addr = le32_to_cpu(ent->addr);
return (addr >> SLAVE_ID_SHIFT) & SLAVE_ID_MASK;
}
EXPORT_SYMBOL_GPL(cmd_db_read_slave_id);
#ifdef CONFIG_DEBUG_FS
static int cmd_db_debugfs_dump(struct seq_file *seq, void *p)
{
int i, j;
const struct rsc_hdr *rsc;
const struct entry_header *ent;
const char *name;
u16 len, version;
u8 major, minor;
seq_puts(seq, "Command DB DUMP\n");
for (i = 0; i < MAX_SLV_ID; i++) {
rsc = &cmd_db_header->header[i];
if (!rsc->slv_id)
break;
switch (le16_to_cpu(rsc->slv_id)) {
case CMD_DB_HW_ARC:
name = "ARC";
break;
case CMD_DB_HW_VRM:
name = "VRM";
break;
case CMD_DB_HW_BCM:
name = "BCM";
break;
default:
name = "Unknown";
break;
}
version = le16_to_cpu(rsc->version);
major = version >> 8;
minor = version;
seq_printf(seq, "Slave %s (v%u.%u)\n", name, major, minor);
seq_puts(seq, "-------------------------\n");
ent = rsc_to_entry_header(rsc);
for (j = 0; j < le16_to_cpu(rsc->cnt); j++, ent++) {
seq_printf(seq, "0x%05x: %*pEp", le32_to_cpu(ent->addr),
(int)strnlen(ent->id, sizeof(ent->id)), ent->id);
len = le16_to_cpu(ent->len);
if (len) {
seq_printf(seq, " [%*ph]",
len, rsc_offset(rsc, ent));
}
seq_putc(seq, '\n');
}
}
return 0;
}
static int open_cmd_db_debugfs(struct inode *inode, struct file *file)
{
return single_open(file, cmd_db_debugfs_dump, inode->i_private);
}
#endif
static const struct file_operations cmd_db_debugfs_ops = {
#ifdef CONFIG_DEBUG_FS
.open = open_cmd_db_debugfs,
#endif
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int cmd_db_dev_probe(struct platform_device *pdev)
{
struct reserved_mem *rmem;
int ret = 0;
rmem = of_reserved_mem_lookup(pdev->dev.of_node);
if (!rmem) {
dev_err(&pdev->dev, "failed to acquire memory region\n");
return -EINVAL;
}
cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
if (!cmd_db_header) {
ret = -ENOMEM;
cmd_db_header = NULL;
return ret;
}
if (!cmd_db_magic_matches(cmd_db_header)) {
dev_err(&pdev->dev, "Invalid Command DB Magic\n");
return -EINVAL;
}
debugfs_create_file("cmd-db", 0400, NULL, NULL, &cmd_db_debugfs_ops);
device_set_pm_not_required(&pdev->dev);
return 0;
}
static const struct of_device_id cmd_db_match_table[] = {
{ .compatible = "qcom,cmd-db" },
{ }
};
MODULE_DEVICE_TABLE(of, cmd_db_match_table);
static struct platform_driver cmd_db_dev_driver = {
.probe = cmd_db_dev_probe,
.driver = {
.name = "cmd-db",
.of_match_table = cmd_db_match_table,
.suppress_bind_attrs = true,
},
};
static int __init cmd_db_device_init(void)
{
return platform_driver_register(&cmd_db_dev_driver);
}
core_initcall(cmd_db_device_init);
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,148 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*/
#ifndef __RPM_INTERNAL_H__
#define __RPM_INTERNAL_H__
#include <linux/bitmap.h>
#include <linux/wait.h>
#include <soc/qcom/tcs.h>
#define TCS_TYPE_NR 4
#define MAX_CMDS_PER_TCS 16
#define MAX_TCS_PER_TYPE 3
#define MAX_TCS_NR (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
struct rsc_drv;
/**
* struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
* to the controller
*
* @drv: The controller.
* @type: Type of the TCS in this group - active, sleep, wake.
* @mask: Mask of the TCSes relative to all the TCSes in the RSC.
* @offset: Start of the TCS group relative to the TCSes in the RSC.
* @num_tcs: Number of TCSes in this type.
* @ncpt: Number of commands in each TCS.
* @req: Requests that are sent from the TCS; only used for ACTIVE_ONLY
* transfers (could be on a wake/sleep TCS if we are borrowing for
* an ACTIVE_ONLY transfer).
* Start: grab drv->lock, set req, set tcs_in_use, drop drv->lock,
* trigger
* End: get irq, access req,
* grab drv->lock, clear tcs_in_use, drop drv->lock
* @slots: Indicates which of @cmd_addr are occupied; only used for
* SLEEP / WAKE TCSs. Things are tightly packed in the
* case that (ncpt < MAX_CMDS_PER_TCS). That is if ncpt = 2 and
* MAX_CMDS_PER_TCS = 16 then bit[2] = the first bit in 2nd TCS.
*/
struct tcs_group {
struct rsc_drv *drv;
int type;
u32 mask;
u32 offset;
int num_tcs;
int ncpt;
const struct tcs_request *req[MAX_TCS_PER_TYPE];
DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
};
/**
* struct rpmh_request: the message to be sent to rpmh-rsc
*
* @msg: the request
* @cmd: the payload that will be part of the @msg
* @completion: triggered when request is done
* @dev: the device making the request
* @needs_free: check to free dynamically allocated request object
*/
struct rpmh_request {
struct tcs_request msg;
struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
struct completion *completion;
const struct device *dev;
bool needs_free;
};
/**
* struct rpmh_ctrlr: our representation of the controller
*
* @cache: the list of cached requests
* @cache_lock: synchronize access to the cache data
* @dirty: was the cache updated since flush
* @batch_cache: Cache sleep and wake requests sent as batch
*/
struct rpmh_ctrlr {
struct list_head cache;
spinlock_t cache_lock;
bool dirty;
struct list_head batch_cache;
};
struct rsc_ver {
u32 major;
u32 minor;
};
/**
* struct rsc_drv: the Direct Resource Voter (DRV) of the
* Resource State Coordinator controller (RSC)
*
* @name: Controller identifier.
* @base: Start address of the DRV registers in this controller.
* @tcs_base: Start address of the TCS registers in this controller.
* @id: Instance id in the controller (Direct Resource Voter).
* @num_tcs: Number of TCSes in this DRV.
* @rsc_pm: CPU PM notifier for controller.
* Used when solver mode is not present.
* @cpus_in_pm: Number of CPUs not in idle power collapse.
* Used when solver mode and "power-domains" is not present.
* @genpd_nb: PM Domain notifier for cluster genpd notifications.
* @tcs: TCS groups.
* @tcs_in_use: S/W state of the TCS; only set for ACTIVE_ONLY
* transfers, but might show a sleep/wake TCS in use if
* it was borrowed for an active_only transfer. You
* must hold the lock in this struct (AKA drv->lock) in
* order to update this.
* @lock: Synchronize state of the controller. If RPMH's cache
* lock will also be held, the order is: drv->lock then
* cache_lock.
* @tcs_wait: Wait queue used to wait for @tcs_in_use to free up a
* slot
* @client: Handle to the DRV's client.
* @dev: RSC device.
*/
struct rsc_drv {
const char *name;
void __iomem *base;
void __iomem *tcs_base;
int id;
int num_tcs;
struct notifier_block rsc_pm;
struct notifier_block genpd_nb;
atomic_t cpus_in_pm;
struct tcs_group tcs[TCS_TYPE_NR];
DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
spinlock_t lock;
wait_queue_head_t tcs_wait;
struct rpmh_ctrlr client;
struct device *dev;
struct rsc_ver ver;
u32 *regs;
};
int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
const struct tcs_request *msg);
void rpmh_rsc_invalidate(struct rsc_drv *drv);
void rpmh_rsc_write_next_wakeup(struct rsc_drv *drv);
void rpmh_tx_done(const struct tcs_request *msg);
int rpmh_flush(struct rpmh_ctrlr *ctrlr);
#endif /* __RPM_INTERNAL_H__ */

1162
drivers/soc/qcom/rpmh-rsc.c Normal file

File diff suppressed because it is too large Load Diff

502
drivers/soc/qcom/rpmh.c Normal file
View File

@ -0,0 +1,502 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/atomic.h>
#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <soc/qcom/rpmh.h>
#include "rpmh-internal.h"
#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
struct rpmh_request name = { \
.msg = { \
.state = s, \
.cmds = name.cmd, \
.num_cmds = 0, \
.wait_for_compl = true, \
}, \
.cmd = { { 0 } }, \
.completion = q, \
.dev = device, \
.needs_free = false, \
}
#define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
/**
* struct cache_req: the request object for caching
*
* @addr: the address of the resource
* @sleep_val: the sleep vote
* @wake_val: the wake vote
* @list: linked list obj
*/
struct cache_req {
u32 addr;
u32 sleep_val;
u32 wake_val;
struct list_head list;
};
/**
* struct batch_cache_req - An entry in our batch catch
*
* @list: linked list obj
* @count: number of messages
* @rpm_msgs: the messages
*/
struct batch_cache_req {
struct list_head list;
int count;
struct rpmh_request rpm_msgs[];
};
static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
{
struct rsc_drv *drv = dev_get_drvdata(dev->parent);
return &drv->client;
}
void rpmh_tx_done(const struct tcs_request *msg)
{
struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
msg);
struct completion *compl = rpm_msg->completion;
bool free = rpm_msg->needs_free;
if (!compl)
goto exit;
/* Signal the blocking thread we are done */
complete(compl);
exit:
if (free)
kfree(rpm_msg);
}
static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
{
struct cache_req *p, *req = NULL;
list_for_each_entry(p, &ctrlr->cache, list) {
if (p->addr == addr) {
req = p;
break;
}
}
return req;
}
static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
enum rpmh_state state,
struct tcs_cmd *cmd)
{
struct cache_req *req;
unsigned long flags;
u32 old_sleep_val, old_wake_val;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
req = __find_req(ctrlr, cmd->addr);
if (req)
goto existing;
req = kzalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
req = ERR_PTR(-ENOMEM);
goto unlock;
}
req->addr = cmd->addr;
req->sleep_val = req->wake_val = UINT_MAX;
list_add_tail(&req->list, &ctrlr->cache);
existing:
old_sleep_val = req->sleep_val;
old_wake_val = req->wake_val;
switch (state) {
case RPMH_ACTIVE_ONLY_STATE:
case RPMH_WAKE_ONLY_STATE:
req->wake_val = cmd->data;
break;
case RPMH_SLEEP_STATE:
req->sleep_val = cmd->data;
break;
}
ctrlr->dirty |= (req->sleep_val != old_sleep_val ||
req->wake_val != old_wake_val) &&
req->sleep_val != UINT_MAX &&
req->wake_val != UINT_MAX;
unlock:
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
return req;
}
/**
* __rpmh_write: Cache and send the RPMH request
*
* @dev: The device making the request
* @state: Active/Sleep request type
* @rpm_msg: The data that needs to be sent (cmds).
*
* Cache the RPMH request and send if the state is ACTIVE_ONLY.
* SLEEP/WAKE_ONLY requests are not sent to the controller at
* this time. Use rpmh_flush() to send them to the controller.
*/
static int __rpmh_write(const struct device *dev, enum rpmh_state state,
struct rpmh_request *rpm_msg)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
int ret = -EINVAL;
struct cache_req *req;
int i;
/* Cache the request in our store and link the payload */
for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
if (IS_ERR(req))
return PTR_ERR(req);
}
if (state == RPMH_ACTIVE_ONLY_STATE) {
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
} else {
/* Clean up our call by spoofing tx_done */
ret = 0;
rpmh_tx_done(&rpm_msg->msg);
}
return ret;
}
static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 n)
{
if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
return -EINVAL;
memcpy(req->cmd, cmd, n * sizeof(*cmd));
req->msg.state = state;
req->msg.cmds = req->cmd;
req->msg.num_cmds = n;
return 0;
}
/**
* rpmh_write_async: Write a set of RPMH commands
*
* @dev: The device making the request
* @state: Active/sleep set
* @cmd: The payload data
* @n: The number of elements in payload
*
* Write a set of RPMH commands, the order of commands is maintained
* and will be sent as a single shot.
*/
int rpmh_write_async(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 n)
{
struct rpmh_request *rpm_msg;
int ret;
rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
if (!rpm_msg)
return -ENOMEM;
rpm_msg->needs_free = true;
ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
if (ret) {
kfree(rpm_msg);
return ret;
}
return __rpmh_write(dev, state, rpm_msg);
}
EXPORT_SYMBOL_GPL(rpmh_write_async);
/**
* rpmh_write: Write a set of RPMH commands and block until response
*
* @dev: The device making the request
* @state: Active/sleep set
* @cmd: The payload data
* @n: The number of elements in @cmd
*
* May sleep. Do not call from atomic contexts.
*/
int rpmh_write(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 n)
{
DECLARE_COMPLETION_ONSTACK(compl);
DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
int ret;
ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n);
if (ret)
return ret;
ret = __rpmh_write(dev, state, &rpm_msg);
if (ret)
return ret;
ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
WARN_ON(!ret);
return (ret > 0) ? 0 : -ETIMEDOUT;
}
EXPORT_SYMBOL_GPL(rpmh_write);
static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
{
unsigned long flags;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_add_tail(&req->list, &ctrlr->batch_cache);
ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
static int flush_batch(struct rpmh_ctrlr *ctrlr)
{
struct batch_cache_req *req;
const struct rpmh_request *rpm_msg;
int ret = 0;
int i;
/* Send Sleep/Wake requests to the controller, expect no response */
list_for_each_entry(req, &ctrlr->batch_cache, list) {
for (i = 0; i < req->count; i++) {
rpm_msg = req->rpm_msgs + i;
ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
&rpm_msg->msg);
if (ret)
break;
}
}
return ret;
}
/**
* rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
* batch to finish.
*
* @dev: the device making the request
* @state: Active/sleep set
* @cmd: The payload data
* @n: The array of count of elements in each batch, 0 terminated.
*
* Write a request to the RSC controller without caching. If the request
* state is ACTIVE, then the requests are treated as completion request
* and sent to the controller immediately. The function waits until all the
* commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
* request is sent as fire-n-forget and no ack is expected.
*
* May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
*/
int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 *n)
{
struct batch_cache_req *req;
struct rpmh_request *rpm_msgs;
struct completion *compls;
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
unsigned long time_left;
int count = 0;
int ret, i;
void *ptr;
if (!cmd || !n)
return -EINVAL;
while (n[count] > 0)
count++;
if (!count)
return -EINVAL;
ptr = kzalloc(sizeof(*req) +
count * (sizeof(req->rpm_msgs[0]) + sizeof(*compls)),
GFP_ATOMIC);
if (!ptr)
return -ENOMEM;
req = ptr;
compls = ptr + sizeof(*req) + count * sizeof(*rpm_msgs);
req->count = count;
rpm_msgs = req->rpm_msgs;
for (i = 0; i < count; i++) {
__fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
cmd += n[i];
}
if (state != RPMH_ACTIVE_ONLY_STATE) {
cache_batch(ctrlr, req);
return 0;
}
for (i = 0; i < count; i++) {
struct completion *compl = &compls[i];
init_completion(compl);
rpm_msgs[i].completion = compl;
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
if (ret) {
pr_err("Error(%d) sending RPMH message addr=%#x\n",
ret, rpm_msgs[i].msg.cmds[0].addr);
break;
}
}
time_left = RPMH_TIMEOUT_MS;
while (i--) {
time_left = wait_for_completion_timeout(&compls[i], time_left);
if (!time_left) {
/*
* Better hope they never finish because they'll signal
* the completion that we're going to free once
* we've returned from this function.
*/
WARN_ON(1);
ret = -ETIMEDOUT;
goto exit;
}
}
exit:
kfree(ptr);
return ret;
}
EXPORT_SYMBOL_GPL(rpmh_write_batch);
static int is_req_valid(struct cache_req *req)
{
return (req->sleep_val != UINT_MAX &&
req->wake_val != UINT_MAX &&
req->sleep_val != req->wake_val);
}
static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
u32 addr, u32 data)
{
DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
/* Wake sets are always complete and sleep sets are not */
rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
rpm_msg.cmd[0].addr = addr;
rpm_msg.cmd[0].data = data;
rpm_msg.msg.num_cmds = 1;
return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
}
/**
* rpmh_flush() - Flushes the buffered sleep and wake sets to TCSes
*
* @ctrlr: Controller making request to flush cached data
*
* Return:
* * 0 - Success
* * Error code - Otherwise
*/
int rpmh_flush(struct rpmh_ctrlr *ctrlr)
{
struct cache_req *p;
int ret = 0;
lockdep_assert_irqs_disabled();
/*
* Currently rpmh_flush() is only called when we think we're running
* on the last processor. If the lock is busy it means another
* processor is up and it's better to abort than spin.
*/
if (!spin_trylock(&ctrlr->cache_lock))
return -EBUSY;
if (!ctrlr->dirty) {
pr_debug("Skipping flush, TCS has latest data.\n");
goto write_next_wakeup;
}
/* Invalidate the TCSes first to avoid stale data */
rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
/* First flush the cached batch requests */
ret = flush_batch(ctrlr);
if (ret)
goto exit;
list_for_each_entry(p, &ctrlr->cache, list) {
if (!is_req_valid(p)) {
pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
__func__, p->addr, p->sleep_val, p->wake_val);
continue;
}
ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
p->sleep_val);
if (ret)
goto exit;
ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
p->wake_val);
if (ret)
goto exit;
}
ctrlr->dirty = false;
write_next_wakeup:
rpmh_rsc_write_next_wakeup(ctrlr_to_drv(ctrlr));
exit:
spin_unlock(&ctrlr->cache_lock);
return ret;
}
/**
* rpmh_invalidate: Invalidate sleep and wake sets in batch_cache
*
* @dev: The device making the request
*
* Invalidate the sleep and wake values in batch_cache.
*/
void rpmh_invalidate(const struct device *dev)
{
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
struct batch_cache_req *req, *tmp;
unsigned long flags;
spin_lock_irqsave(&ctrlr->cache_lock, flags);
list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
kfree(req);
INIT_LIST_HEAD(&ctrlr->batch_cache);
ctrlr->dirty = true;
spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
}
EXPORT_SYMBOL_GPL(rpmh_invalidate);

48
include/soc/qcom/cmd-db.h Normal file
View File

@ -0,0 +1,48 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef __QCOM_COMMAND_DB_H__
#define __QCOM_COMMAND_DB_H__
#include <linux/err.h>
enum cmd_db_hw_type {
CMD_DB_HW_INVALID = 0,
CMD_DB_HW_MIN = 3,
CMD_DB_HW_ARC = CMD_DB_HW_MIN,
CMD_DB_HW_VRM = 4,
CMD_DB_HW_BCM = 5,
CMD_DB_HW_MAX = CMD_DB_HW_BCM,
CMD_DB_HW_ALL = 0xff,
};
#if IS_ENABLED(CONFIG_QCOM_COMMAND_DB)
u32 cmd_db_read_addr(const char *resource_id);
const void *cmd_db_read_aux_data(const char *resource_id, size_t *len);
bool cmd_db_match_resource_addr(u32 addr1, u32 addr2);
enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id);
int cmd_db_ready(void);
#else
static inline u32 cmd_db_read_addr(const char *resource_id)
{ return 0; }
static inline const void *cmd_db_read_aux_data(const char *resource_id, size_t *len)
{ return ERR_PTR(-ENODEV); }
static inline bool cmd_db_match_resource_addr(u32 addr1, u32 addr2)
{ return false; }
static inline enum cmd_db_hw_type cmd_db_read_slave_id(const char *resource_id)
{ return -ENODEV; }
static inline int cmd_db_ready(void)
{ return -ENODEV; }
#endif /* CONFIG_QCOM_COMMAND_DB */
#endif /* __QCOM_COMMAND_DB_H__ */

47
include/soc/qcom/rpmh.h Normal file
View File

@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*/
#ifndef __SOC_QCOM_RPMH_H__
#define __SOC_QCOM_RPMH_H__
#include <soc/qcom/tcs.h>
#include <linux/platform_device.h>
#if IS_ENABLED(CONFIG_QCOM_RPMH)
int rpmh_write(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 n);
int rpmh_write_async(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 n);
int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 *n);
void rpmh_invalidate(const struct device *dev);
#else
static inline int rpmh_write(const struct device *dev, enum rpmh_state state,
const struct tcs_cmd *cmd, u32 n)
{ return -ENODEV; }
static inline int rpmh_write_async(const struct device *dev,
enum rpmh_state state,
const struct tcs_cmd *cmd, u32 n)
{ return -ENODEV; }
static inline int rpmh_write_batch(const struct device *dev,
enum rpmh_state state,
const struct tcs_cmd *cmd, u32 *n)
{ return -ENODEV; }
static inline void rpmh_invalidate(const struct device *dev)
{
}
#endif /* CONFIG_QCOM_RPMH */
#endif /* __SOC_QCOM_RPMH_H__ */

81
include/soc/qcom/tcs.h Normal file
View File

@ -0,0 +1,81 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SOC_QCOM_TCS_H__
#define __SOC_QCOM_TCS_H__
#define MAX_RPMH_PAYLOAD 16
/**
* rpmh_state: state for the request
*
* RPMH_SLEEP_STATE: State of the resource when the processor subsystem
* is powered down. There is no client using the
* resource actively.
* RPMH_WAKE_ONLY_STATE: Resume resource state to the value previously
* requested before the processor was powered down.
* RPMH_ACTIVE_ONLY_STATE: Active or AMC mode requests. Resource state
* is aggregated immediately.
*/
enum rpmh_state {
RPMH_SLEEP_STATE,
RPMH_WAKE_ONLY_STATE,
RPMH_ACTIVE_ONLY_STATE,
};
/**
* struct tcs_cmd: an individual request to RPMH.
*
* @addr: the address of the resource slv_id:18:16 | offset:0:15
* @data: the resource state request
* @wait: ensure that this command is complete before returning.
* Setting "wait" here only makes sense during rpmh_write_batch() for
* active-only transfers, this is because:
* rpmh_write() - Always waits.
* (DEFINE_RPMH_MSG_ONSTACK will set .wait_for_compl)
* rpmh_write_async() - Never waits.
* (There's no request completion callback)
*/
struct tcs_cmd {
u32 addr;
u32 data;
u32 wait;
};
/**
* struct tcs_request: A set of tcs_cmds sent together in a TCS
*
* @state: state for the request.
* @wait_for_compl: wait until we get a response from the h/w accelerator
* (same as setting cmd->wait for all commands in the request)
* @num_cmds: the number of @cmds in this request
* @cmds: an array of tcs_cmds
*/
struct tcs_request {
enum rpmh_state state;
u32 wait_for_compl;
u32 num_cmds;
struct tcs_cmd *cmds;
};
#define BCM_TCS_CMD_COMMIT_SHFT 30
#define BCM_TCS_CMD_COMMIT_MASK 0x40000000
#define BCM_TCS_CMD_VALID_SHFT 29
#define BCM_TCS_CMD_VALID_MASK 0x20000000
#define BCM_TCS_CMD_VOTE_X_SHFT 14
#define BCM_TCS_CMD_VOTE_MASK 0x3fff
#define BCM_TCS_CMD_VOTE_Y_SHFT 0
#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000
/* Construct a Bus Clock Manager (BCM) specific TCS command */
#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
(((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \
((valid) << BCM_TCS_CMD_VALID_SHFT) | \
((cpu_to_le32(vote_x) & \
BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \
((cpu_to_le32(vote_y) & \
BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
#endif /* __SOC_QCOM_TCS_H__ */