mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
ARM: driver updates
These are the usual updates for SoC specific device drivers and related subsystems that don't have their own top-level maintainers: - ARM SCMI/SCPI updates to allow pluggable transport layers - TEE subsystem cleanups - A new driver for the Amlogic secure power domain controller - Various driver updates for the NXP Layerscape DPAA2, NXP i.MX SCU and TI OMAP2+ sysc drivers. - Qualcomm SoC driver updates, including a new library module for "protection domain" notifications - Lots of smaller bugfixes and cleanups in other drivers Signed-off-by: Arnd Bergmann <arnd@arndb.de> -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEo6/YBQwIrVS28WGKmmx57+YAGNkFAl6EaKsACgkQmmx57+YA GNng5RAAnSi3jLBG3PsX4XCydurlhbYDnakzp2x7kNi0aeN0mR7YT6nbmm8Iax0A tYpUNt6mhsSnE7thAX6kIm1CIPw7oKzg5tz9TaNRJT6q1i0+MA0bWAX0KOVzruj5 xllUIoV0WsYHRjqjxWOJht7zYKTX5PoKr3weRVqHYR60hjkNFT4Myx3HpXn5nQ46 sEgic+S8WCsbffPqs5HUqkNx9R6D7RIJ72BFSF7o1wy2Brj+g0BxfxAjty+kaWwT LtsJm0naGGRag17iij7wBnZ+odWNi80qhtthUncx/c5s517J8Z7Nq2QZJa7XhhjV 9+/1av7wKyE+V54wnCP4ACZjyE+xE3ghzVxOuzjZfbhkmNCELAx2jmQJbt9gr/t4 Uek8iANq3bo0epy4iJglfeEJuZ8rZal3oC1gU5rCun/VsqPe5OWhFCzvhu7zVgnD Npk/IhCrp7117v5DG2Pvhd9YxigZ4ju3NW2gWukh0TemejRIzyoZyNWux+JD/jCn V3ANtT4aCqTJ3ZOL7IaDcX5Kze4KmZZvNKvSSCndcen7u95Z8eW9sIKkSrlp3P5f cZAdyULX22im6jXzNz4DJYFRrYgRdXfRLyrS555rGoYZGB1FB1Jhl7SUr385XpBZ XD18oPIPm0C3Dn/nhbKC8K44tpKazET8aMCg5lO4bzI5EwqingI= =qbsP -----END PGP SIGNATURE----- Merge tag 'arm-drivers-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc Pull ARM driver updates from Arnd Bergmann: "These are the usual updates for SoC specific device drivers and related subsystems that don't have their own top-level maintainers: - ARM SCMI/SCPI updates to allow pluggable transport layers - TEE subsystem cleanups - A new driver for the Amlogic secure power domain controller - Various driver updates for the NXP Layerscape DPAA2, NXP i.MX SCU and TI OMAP2+ sysc drivers. - Qualcomm SoC driver updates, including a new library module for "protection domain" notifications - Lots of smaller bugfixes and cleanups in other drivers" * tag 'arm-drivers-5.7' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc: (70 commits) soc: fsl: qe: fix sparse warnings for ucc_slow.c soc: fsl: qe: ucc_slow: remove 0 assignment for kzalloc'ed structure soc: fsl: qe: fix sparse warnings for ucc_fast.c soc: fsl: qe: fix sparse warnings for qe_ic.c soc: fsl: qe: fix sparse warnings for ucc.c soc: fsl: qe: fix sparse warning for qe_common.c soc: fsl: qe: fix sparse warnings for qe.c soc: qcom: Fix QCOM_APR dependencies soc: qcom: pdr: Avoid uninitialized use of found in pdr_indication_cb soc: imx: drop COMPILE_TEST for IMX_SCU_SOC firmware: imx: add COMPILE_TEST for IMX_SCU driver soc: imx: gpc: fix power up sequencing soc: imx: increase build coverage for imx8m soc driver soc: qcom: apr: Add avs/audio tracking functionality dt-bindings: soc: qcom: apr: Add protection domain bindings soc: qcom: Introduce Protection Domain Restart helpers devicetree: bindings: firmware: add ipq806x to qcom_scm memory: tegra: Correct debugfs clk rate-range on Tegra124 memory: tegra: Correct debugfs clk rate-range on Tegra30 memory: tegra: Correct debugfs clk rate-range on Tegra20 ...
This commit is contained in:
commit
d18292dc07
@ -38,6 +38,7 @@ Required standard properties:
|
||||
"ti,sysc-dra7-mcasp"
|
||||
"ti,sysc-usb-host-fs"
|
||||
"ti,sysc-dra7-mcan"
|
||||
"ti,sysc-pruss"
|
||||
|
||||
- reg shall have register areas implemented for the interconnect
|
||||
target module in question such as revision, sysc and syss
|
||||
|
@ -10,6 +10,7 @@ Required properties:
|
||||
* "qcom,scm-apq8064"
|
||||
* "qcom,scm-apq8084"
|
||||
* "qcom,scm-ipq4019"
|
||||
* "qcom,scm-ipq806x"
|
||||
* "qcom,scm-msm8660"
|
||||
* "qcom,scm-msm8916"
|
||||
* "qcom,scm-msm8960"
|
||||
|
@ -0,0 +1,40 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
|
||||
# Copyright (c) 2019 Amlogic, Inc
|
||||
# Author: Jianxin Pan <jianxin.pan@amlogic.com>
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: "http://devicetree.org/schemas/power/amlogic,meson-sec-pwrc.yaml#"
|
||||
$schema: "http://devicetree.org/meta-schemas/core.yaml#"
|
||||
|
||||
title: Amlogic Meson Secure Power Domains
|
||||
|
||||
maintainers:
|
||||
- Jianxin Pan <jianxin.pan@amlogic.com>
|
||||
|
||||
description: |+
|
||||
Secure Power Domains used in Meson A1/C1 SoCs, and should be the child node
|
||||
of secure-monitor.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- amlogic,meson-a1-pwrc
|
||||
|
||||
"#power-domain-cells":
|
||||
const: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- "#power-domain-cells"
|
||||
|
||||
examples:
|
||||
- |
|
||||
secure-monitor {
|
||||
compatible = "amlogic,meson-gxbb-sm";
|
||||
|
||||
pwrc: power-controller {
|
||||
compatible = "amlogic,meson-a1-pwrc";
|
||||
#power-domain-cells = <1>;
|
||||
};
|
||||
};
|
||||
|
@ -45,6 +45,18 @@ by the individual bindings for the specific service
|
||||
12 - Ultrasound stream manager.
|
||||
13 - Listen stream manager.
|
||||
|
||||
- qcom,protection-domain
|
||||
Usage: optional
|
||||
Value type: <stringlist>
|
||||
Definition: Must list the protection domain service name and path
|
||||
that the particular apr service has a dependency on.
|
||||
Possible values are :
|
||||
"avs/audio", "msm/adsp/audio_pd".
|
||||
"kernel/elf_loader", "msm/modem/wlan_pd".
|
||||
"tms/servreg", "msm/adsp/audio_pd".
|
||||
"tms/servreg", "msm/modem/wlan_pd".
|
||||
"tms/servreg", "msm/slpi/sensor_pd".
|
||||
|
||||
= EXAMPLE
|
||||
The following example represents a QDSP based sound card on a MSM8996 device
|
||||
which uses apr as communication between Apps and QDSP.
|
||||
@ -82,3 +94,41 @@ which uses apr as communication between Apps and QDSP.
|
||||
...
|
||||
};
|
||||
};
|
||||
|
||||
= EXAMPLE 2
|
||||
The following example represents a QDSP based sound card with protection domain
|
||||
dependencies specified. Here some of the apr services are dependent on services
|
||||
running on protection domain hosted on ADSP/SLPI remote processors while others
|
||||
have no such dependency.
|
||||
|
||||
apr {
|
||||
compatible = "qcom,apr-v2";
|
||||
qcom,glink-channels = "apr_audio_svc";
|
||||
qcom,apr-domain = <APR_DOMAIN_ADSP>;
|
||||
|
||||
q6core {
|
||||
compatible = "qcom,q6core";
|
||||
reg = <APR_SVC_ADSP_CORE>;
|
||||
};
|
||||
|
||||
q6afe: q6afe {
|
||||
compatible = "qcom,q6afe";
|
||||
reg = <APR_SVC_AFE>;
|
||||
qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
|
||||
...
|
||||
};
|
||||
|
||||
q6asm: q6asm {
|
||||
compatible = "qcom,q6asm";
|
||||
reg = <APR_SVC_ASM>;
|
||||
qcom,protection-domain = "tms/servreg", "msm/slpi/sensor_pd";
|
||||
...
|
||||
};
|
||||
|
||||
q6adm: q6adm {
|
||||
compatible = "qcom,q6adm";
|
||||
reg = <APR_SVC_ADM>;
|
||||
qcom,protection-domain = "avs/audio", "msm/adsp/audio_pd";
|
||||
...
|
||||
};
|
||||
};
|
||||
|
@ -397,10 +397,16 @@ static int ti_sysc_shutdown_module(struct device *dev,
|
||||
return omap_hwmod_shutdown(cookie->data);
|
||||
}
|
||||
|
||||
static bool ti_sysc_soc_type_gp(void)
|
||||
{
|
||||
return omap_type() == OMAP2_DEVICE_TYPE_GP;
|
||||
}
|
||||
|
||||
static struct of_dev_auxdata omap_auxdata_lookup[];
|
||||
|
||||
static struct ti_sysc_platform_data ti_sysc_pdata = {
|
||||
.auxdata = omap_auxdata_lookup,
|
||||
.soc_type_gp = ti_sysc_soc_type_gp,
|
||||
.init_clockdomain = ti_sysc_clkdm_init,
|
||||
.clkdm_deny_idle = ti_sysc_clkdm_deny_idle,
|
||||
.clkdm_allow_idle = ti_sysc_clkdm_allow_idle,
|
||||
|
@ -357,6 +357,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Released firmware describes the IO port max address as 0x3fff, which is
|
||||
* the max host bus address. Fixup to a proper range. This will probably
|
||||
* never be fixed in firmware.
|
||||
*/
|
||||
static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
|
||||
struct resource *r)
|
||||
{
|
||||
if (r->end != 0x3fff)
|
||||
return;
|
||||
|
||||
if (r->start == 0xe4)
|
||||
r->end = 0xe4 + 0x04 - 1;
|
||||
else if (r->start == 0x2f8)
|
||||
r->end = 0x2f8 + 0x08 - 1;
|
||||
else
|
||||
dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
|
||||
r);
|
||||
}
|
||||
|
||||
/*
|
||||
* hisi_lpc_acpi_set_io_res - set the resources for a child
|
||||
* @child: the device node to be updated the I/O resource
|
||||
@ -418,8 +438,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
|
||||
return -ENOMEM;
|
||||
}
|
||||
count = 0;
|
||||
list_for_each_entry(rentry, &resource_list, node)
|
||||
resources[count++] = *rentry->res;
|
||||
list_for_each_entry(rentry, &resource_list, node) {
|
||||
resources[count] = *rentry->res;
|
||||
hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
|
||||
count++;
|
||||
}
|
||||
|
||||
acpi_dev_free_resource_list(&resource_list);
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
@ -15,15 +16,47 @@
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/sys_soc.h>
|
||||
#include <linux/iopoll.h>
|
||||
|
||||
#include <linux/platform_data/ti-sysc.h>
|
||||
|
||||
#include <dt-bindings/bus/ti-sysc.h>
|
||||
|
||||
#define DIS_ISP BIT(2)
|
||||
#define DIS_IVA BIT(1)
|
||||
#define DIS_SGX BIT(0)
|
||||
|
||||
#define SOC_FLAG(match, flag) { .machine = match, .data = (void *)(flag), }
|
||||
|
||||
#define MAX_MODULE_SOFTRESET_WAIT 10000
|
||||
|
||||
static const char * const reg_names[] = { "rev", "sysc", "syss", };
|
||||
enum sysc_soc {
|
||||
SOC_UNKNOWN,
|
||||
SOC_2420,
|
||||
SOC_2430,
|
||||
SOC_3430,
|
||||
SOC_3630,
|
||||
SOC_4430,
|
||||
SOC_4460,
|
||||
SOC_4470,
|
||||
SOC_5430,
|
||||
SOC_AM3,
|
||||
SOC_AM4,
|
||||
SOC_DRA7,
|
||||
};
|
||||
|
||||
struct sysc_address {
|
||||
unsigned long base;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
struct sysc_soc_info {
|
||||
unsigned long general_purpose:1;
|
||||
enum sysc_soc soc;
|
||||
struct mutex list_lock; /* disabled modules list lock */
|
||||
struct list_head disabled_modules;
|
||||
};
|
||||
|
||||
enum sysc_clocks {
|
||||
SYSC_FCK,
|
||||
@ -39,6 +72,8 @@ enum sysc_clocks {
|
||||
SYSC_MAX_CLOCKS,
|
||||
};
|
||||
|
||||
static struct sysc_soc_info *sysc_soc;
|
||||
static const char * const reg_names[] = { "rev", "sysc", "syss", };
|
||||
static const char * const clock_names[SYSC_MAX_CLOCKS] = {
|
||||
"fck", "ick", "opt0", "opt1", "opt2", "opt3", "opt4",
|
||||
"opt5", "opt6", "opt7",
|
||||
@ -70,11 +105,13 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
|
||||
* @child_needs_resume: runtime resume needed for child on resume from suspend
|
||||
* @disable_on_idle: status flag used for disabling modules with resets
|
||||
* @idle_work: work structure used to perform delayed idle on a module
|
||||
* @clk_enable_quirk: module specific clock enable quirk
|
||||
* @clk_disable_quirk: module specific clock disable quirk
|
||||
* @pre_reset_quirk: module specific pre-reset quirk
|
||||
* @post_reset_quirk: module specific post-reset quirk
|
||||
* @reset_done_quirk: module specific reset done quirk
|
||||
* @module_enable_quirk: module specific enable quirk
|
||||
* @module_disable_quirk: module specific disable quirk
|
||||
* @module_unlock_quirk: module specific sysconfig unlock quirk
|
||||
* @module_lock_quirk: module specific sysconfig lock quirk
|
||||
*/
|
||||
struct sysc {
|
||||
struct device *dev;
|
||||
@ -97,11 +134,13 @@ struct sysc {
|
||||
unsigned int needs_resume:1;
|
||||
unsigned int child_needs_resume:1;
|
||||
struct delayed_work idle_work;
|
||||
void (*clk_enable_quirk)(struct sysc *sysc);
|
||||
void (*clk_disable_quirk)(struct sysc *sysc);
|
||||
void (*pre_reset_quirk)(struct sysc *sysc);
|
||||
void (*post_reset_quirk)(struct sysc *sysc);
|
||||
void (*reset_done_quirk)(struct sysc *sysc);
|
||||
void (*module_enable_quirk)(struct sysc *sysc);
|
||||
void (*module_disable_quirk)(struct sysc *sysc);
|
||||
void (*module_unlock_quirk)(struct sysc *sysc);
|
||||
void (*module_lock_quirk)(struct sysc *sysc);
|
||||
};
|
||||
|
||||
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
|
||||
@ -624,7 +663,7 @@ static void sysc_check_one_child(struct sysc *ddata,
|
||||
const char *name;
|
||||
|
||||
name = of_get_property(np, "ti,hwmods", NULL);
|
||||
if (name)
|
||||
if (name && !of_device_is_compatible(np, "ti,sysc"))
|
||||
dev_warn(ddata->dev, "really a child ti,hwmods property?");
|
||||
|
||||
sysc_check_quirk_stdout(ddata, np);
|
||||
@ -861,6 +900,22 @@ static void sysc_show_registers(struct sysc *ddata)
|
||||
buf);
|
||||
}
|
||||
|
||||
/**
|
||||
* sysc_write_sysconfig - handle sysconfig quirks for register write
|
||||
* @ddata: device driver data
|
||||
* @value: register value
|
||||
*/
|
||||
static void sysc_write_sysconfig(struct sysc *ddata, u32 value)
|
||||
{
|
||||
if (ddata->module_unlock_quirk)
|
||||
ddata->module_unlock_quirk(ddata);
|
||||
|
||||
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], value);
|
||||
|
||||
if (ddata->module_lock_quirk)
|
||||
ddata->module_lock_quirk(ddata);
|
||||
}
|
||||
|
||||
#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
|
||||
#define SYSC_CLOCACT_ICK 2
|
||||
|
||||
@ -907,7 +962,7 @@ static int sysc_enable_module(struct device *dev)
|
||||
|
||||
reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
|
||||
reg |= best_mode << regbits->sidle_shift;
|
||||
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
|
||||
sysc_write_sysconfig(ddata, reg);
|
||||
|
||||
set_midle:
|
||||
/* Set MIDLE mode */
|
||||
@ -926,14 +981,14 @@ set_midle:
|
||||
|
||||
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
|
||||
reg |= best_mode << regbits->midle_shift;
|
||||
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
|
||||
sysc_write_sysconfig(ddata, reg);
|
||||
|
||||
set_autoidle:
|
||||
/* Autoidle bit must enabled separately if available */
|
||||
if (regbits->autoidle_shift >= 0 &&
|
||||
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
|
||||
reg |= 1 << regbits->autoidle_shift;
|
||||
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
|
||||
sysc_write_sysconfig(ddata, reg);
|
||||
}
|
||||
|
||||
if (ddata->module_enable_quirk)
|
||||
@ -991,7 +1046,7 @@ static int sysc_disable_module(struct device *dev)
|
||||
|
||||
reg &= ~(SYSC_IDLE_MASK << regbits->midle_shift);
|
||||
reg |= best_mode << regbits->midle_shift;
|
||||
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
|
||||
sysc_write_sysconfig(ddata, reg);
|
||||
|
||||
set_sidle:
|
||||
/* Set SIDLE mode */
|
||||
@ -1014,7 +1069,7 @@ set_sidle:
|
||||
if (regbits->autoidle_shift >= 0 &&
|
||||
ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
|
||||
reg |= 1 << regbits->autoidle_shift;
|
||||
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
|
||||
sysc_write_sysconfig(ddata, reg);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1216,16 +1271,16 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
|
||||
SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
|
||||
SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
|
||||
SYSC_QUIRK_LEGACY_IDLE),
|
||||
SYSC_QUIRK("smartreflex", 0, -1, 0x24, -1, 0x00000000, 0xffffffff,
|
||||
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff,
|
||||
SYSC_QUIRK_LEGACY_IDLE),
|
||||
SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
|
||||
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x38, -ENODEV, 0x00000000, 0xffffffff,
|
||||
SYSC_QUIRK_LEGACY_IDLE),
|
||||
SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
|
||||
0),
|
||||
/* Some timers on omap4 and later */
|
||||
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff,
|
||||
SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x50002100, 0xffffffff,
|
||||
0),
|
||||
SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
|
||||
SYSC_QUIRK("timer", 0, 0, 0x10, -ENODEV, 0x4fff1301, 0xffff00ff,
|
||||
0),
|
||||
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
|
||||
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
|
||||
@ -1238,19 +1293,27 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
|
||||
SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
|
||||
|
||||
/* Quirks that need to be set based on the module address */
|
||||
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -1, 0x50000800, 0xffffffff,
|
||||
SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff,
|
||||
SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
|
||||
SYSC_QUIRK_SWSUP_SIDLE),
|
||||
|
||||
/* Quirks that need to be set based on detected module */
|
||||
SYSC_QUIRK("aess", 0, 0, 0x10, -1, 0x40000000, 0xffffffff,
|
||||
SYSC_QUIRK("aess", 0, 0, 0x10, -ENODEV, 0x40000000, 0xffffffff,
|
||||
SYSC_MODULE_QUIRK_AESS),
|
||||
SYSC_QUIRK("dcan", 0x48480000, 0x20, -1, -1, 0xa3170504, 0xffffffff,
|
||||
SYSC_QUIRK("dcan", 0x48480000, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff,
|
||||
SYSC_QUIRK_CLKDM_NOAUTO),
|
||||
SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
|
||||
SYSC_QUIRK("dss", 0x4832a000, 0, 0x10, 0x14, 0x00000020, 0xffffffff,
|
||||
SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
|
||||
SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000040, 0xffffffff,
|
||||
SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
|
||||
SYSC_QUIRK("dss", 0x58000000, 0, -ENODEV, 0x14, 0x00000061, 0xffffffff,
|
||||
SYSC_QUIRK_OPT_CLKS_IN_RESET | SYSC_MODULE_QUIRK_DSS_RESET),
|
||||
SYSC_QUIRK("dwc3", 0x48880000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
|
||||
SYSC_QUIRK_CLKDM_NOAUTO),
|
||||
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -1, 0x500a0200, 0xffffffff,
|
||||
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
|
||||
SYSC_QUIRK_CLKDM_NOAUTO),
|
||||
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
|
||||
SYSC_QUIRK_OPT_CLKS_NEEDED),
|
||||
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
|
||||
SYSC_MODULE_QUIRK_HDQ1W),
|
||||
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
|
||||
@ -1263,72 +1326,92 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
|
||||
SYSC_MODULE_QUIRK_I2C),
|
||||
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
|
||||
SYSC_MODULE_QUIRK_I2C),
|
||||
SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0),
|
||||
SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff,
|
||||
SYSC_QUIRK("gpu", 0x50000000, 0x14, -ENODEV, -ENODEV, 0x00010201, 0xffffffff, 0),
|
||||
SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff,
|
||||
SYSC_MODULE_QUIRK_SGX),
|
||||
SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff,
|
||||
SYSC_QUIRK("lcdc", 0, 0, 0x54, -ENODEV, 0x4f201000, 0xffffffff,
|
||||
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
|
||||
SYSC_QUIRK("rtc", 0, 0x74, 0x78, -ENODEV, 0x4eb01908, 0xffff00f0,
|
||||
SYSC_MODULE_QUIRK_RTC_UNLOCK),
|
||||
SYSC_QUIRK("tptc", 0, 0, 0x10, -ENODEV, 0x40006c00, 0xffffefff,
|
||||
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
|
||||
SYSC_QUIRK("tptc", 0, 0, -ENODEV, -ENODEV, 0x40007c00, 0xffffffff,
|
||||
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
|
||||
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
|
||||
0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
|
||||
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -1, 0x4ea2080d, 0xffffffff,
|
||||
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
|
||||
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
|
||||
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
|
||||
SYSC_MODULE_QUIRK_WDT),
|
||||
/* PRUSS on am3, am4 and am5 */
|
||||
SYSC_QUIRK("pruss", 0, 0x26000, 0x26004, -ENODEV, 0x47000000, 0xff000000,
|
||||
SYSC_MODULE_QUIRK_PRUSS),
|
||||
/* Watchdog on am3 and am4 */
|
||||
SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
|
||||
SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
|
||||
|
||||
#ifdef DEBUG
|
||||
SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
|
||||
SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("cm", 0, 0, -1, -1, 0x40000301, 0xffffffff, 0),
|
||||
SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
|
||||
SYSC_QUIRK("adc", 0, 0, 0x10, -ENODEV, 0x47300001, 0xffffffff, 0),
|
||||
SYSC_QUIRK("atl", 0, 0, -ENODEV, -ENODEV, 0x0a070100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("cm", 0, 0, -ENODEV, -ENODEV, 0x40000301, 0xffffffff, 0),
|
||||
SYSC_QUIRK("control", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
|
||||
SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
|
||||
0xffff00f0, 0),
|
||||
SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0xa3170504, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dcan", 0, 0x20, -ENODEV, -ENODEV, 0x4edb1902, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dispc", 0x4832a400, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dispc", 0x58001000, 0, 0x10, 0x14, 0x00000051, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dmic", 0, 0, 0x10, -ENODEV, 0x50010000, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dsi", 0x58004000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000030, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dsi", 0x58005000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dsi", 0x58009000, 0, 0x10, 0x14, 0x00000040, 0xffffffff, 0),
|
||||
SYSC_QUIRK("dwc3", 0, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff, 0),
|
||||
SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
|
||||
SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
|
||||
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
|
||||
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
|
||||
SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0),
|
||||
SYSC_QUIRK("epwmss", 0, 0, 0x4, -ENODEV, 0x47400001, 0xffffffff, 0),
|
||||
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -ENODEV, 0, 0, 0),
|
||||
SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -ENODEV, 0x40000000 , 0xffffffff, 0),
|
||||
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50031d00, 0xffffffff, 0),
|
||||
SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
|
||||
SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0),
|
||||
SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0),
|
||||
SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0),
|
||||
SYSC_QUIRK("mcbsp", 0, -1, 0x8c, -1, 0, 0, 0),
|
||||
SYSC_QUIRK("mcspi", 0, 0, 0x10, -1, 0x40300a0b, 0xffff00ff, 0),
|
||||
SYSC_QUIRK("iss", 0, 0, 0x10, -ENODEV, 0x40000101, 0xffffffff, 0),
|
||||
SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44306302, 0xffffffff, 0),
|
||||
SYSC_QUIRK("mcasp", 0, 0, 0x4, -ENODEV, 0x44307b02, 0xffffffff, 0),
|
||||
SYSC_QUIRK("mcbsp", 0, -ENODEV, 0x8c, -ENODEV, 0, 0, 0),
|
||||
SYSC_QUIRK("mcspi", 0, 0, 0x10, -ENODEV, 0x40300a0b, 0xffff00ff, 0),
|
||||
SYSC_QUIRK("mcspi", 0, 0, 0x110, 0x114, 0x40300a0b, 0xffffffff, 0),
|
||||
SYSC_QUIRK("mailbox", 0, 0, 0x10, -1, 0x00000400, 0xffffffff, 0),
|
||||
SYSC_QUIRK("m3", 0, 0, -1, -1, 0x5f580105, 0x0fff0f00, 0),
|
||||
SYSC_QUIRK("mailbox", 0, 0, 0x10, -ENODEV, 0x00000400, 0xffffffff, 0),
|
||||
SYSC_QUIRK("m3", 0, 0, -ENODEV, -ENODEV, 0x5f580105, 0x0fff0f00, 0),
|
||||
SYSC_QUIRK("ocp2scp", 0, 0, 0x10, 0x14, 0x50060005, 0xfffffff0, 0),
|
||||
SYSC_QUIRK("ocp2scp", 0, 0, -1, -1, 0x50060007, 0xffffffff, 0),
|
||||
SYSC_QUIRK("padconf", 0, 0, 0x10, -1, 0x4fff0800, 0xffffffff, 0),
|
||||
SYSC_QUIRK("padconf", 0, 0, -1, -1, 0x40001100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x00004102, 0xffffffff, 0),
|
||||
SYSC_QUIRK("prcm", 0, 0, -1, -1, 0x40000400, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scm", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4e8b0100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scm", 0, 0, -1, -1, 0x4f000100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scm", 0, 0, -1, -1, 0x40000900, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scrm", 0, 0, -1, -1, 0x00000010, 0xffffffff, 0),
|
||||
SYSC_QUIRK("sdio", 0, 0, 0x10, -1, 0x40202301, 0xffff0ff0, 0),
|
||||
SYSC_QUIRK("ocp2scp", 0, 0, -ENODEV, -ENODEV, 0x50060007, 0xffffffff, 0),
|
||||
SYSC_QUIRK("padconf", 0, 0, 0x10, -ENODEV, 0x4fff0800, 0xffffffff, 0),
|
||||
SYSC_QUIRK("padconf", 0, 0, -ENODEV, -ENODEV, 0x40001100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x00004102, 0xffffffff, 0),
|
||||
SYSC_QUIRK("prcm", 0, 0, -ENODEV, -ENODEV, 0x40000400, 0xffffffff, 0),
|
||||
SYSC_QUIRK("rfbi", 0x4832a800, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
|
||||
SYSC_QUIRK("rfbi", 0x58002000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scm", 0, 0, 0x10, -ENODEV, 0x40000900, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4e8b0100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x4f000100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scm", 0, 0, -ENODEV, -ENODEV, 0x40000900, 0xffffffff, 0),
|
||||
SYSC_QUIRK("scrm", 0, 0, -ENODEV, -ENODEV, 0x00000010, 0xffffffff, 0),
|
||||
SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
|
||||
SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
|
||||
SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
|
||||
SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40000902, 0xffffffff, 0),
|
||||
SYSC_QUIRK("slimbus", 0, 0, 0x10, -1, 0x40002903, 0xffffffff, 0),
|
||||
SYSC_QUIRK("spinlock", 0, 0, 0x10, -1, 0x50020000, 0xffffffff, 0),
|
||||
SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -1, 0x00000020, 0xffffffff, 0),
|
||||
SYSC_QUIRK("rtc", 0, 0x74, 0x78, -1, 0x4eb01908, 0xffff00f0, 0),
|
||||
SYSC_QUIRK("timer32k", 0, 0, 0x4, -1, 0x00000060, 0xffffffff, 0),
|
||||
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
|
||||
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
|
||||
SYSC_QUIRK("spinlock", 0, 0, 0x10, -ENODEV, 0x50020000, 0xffffffff, 0),
|
||||
SYSC_QUIRK("rng", 0, 0x1fe0, 0x1fe4, -ENODEV, 0x00000020, 0xffffffff, 0),
|
||||
SYSC_QUIRK("timer32k", 0, 0, 0x4, -ENODEV, 0x00000060, 0xffffffff, 0),
|
||||
SYSC_QUIRK("tpcc", 0, 0, -ENODEV, -ENODEV, 0x40014c00, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000004, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usbhstll", 0, 0, 0x10, 0x14, 0x00000008, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, 0x14, 0x50700100, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0),
|
||||
SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0),
|
||||
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff, 0),
|
||||
SYSC_QUIRK("venc", 0x58003000, 0, -ENODEV, -ENODEV, 0x00000002, 0xffffffff, 0),
|
||||
SYSC_QUIRK("vfpe", 0, 0, 0x104, -ENODEV, 0x4d001200, 0xffffffff, 0),
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -1350,16 +1433,13 @@ static void sysc_init_early_quirks(struct sysc *ddata)
|
||||
if (q->base != ddata->module_pa)
|
||||
continue;
|
||||
|
||||
if (q->rev_offset >= 0 &&
|
||||
q->rev_offset != ddata->offsets[SYSC_REVISION])
|
||||
if (q->rev_offset != ddata->offsets[SYSC_REVISION])
|
||||
continue;
|
||||
|
||||
if (q->sysc_offset >= 0 &&
|
||||
q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
|
||||
if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
|
||||
continue;
|
||||
|
||||
if (q->syss_offset >= 0 &&
|
||||
q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
|
||||
if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
|
||||
continue;
|
||||
|
||||
ddata->name = q->name;
|
||||
@ -1379,16 +1459,13 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
|
||||
if (q->base && q->base != ddata->module_pa)
|
||||
continue;
|
||||
|
||||
if (q->rev_offset >= 0 &&
|
||||
q->rev_offset != ddata->offsets[SYSC_REVISION])
|
||||
if (q->rev_offset != ddata->offsets[SYSC_REVISION])
|
||||
continue;
|
||||
|
||||
if (q->sysc_offset >= 0 &&
|
||||
q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
|
||||
if (q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG])
|
||||
continue;
|
||||
|
||||
if (q->syss_offset >= 0 &&
|
||||
q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
|
||||
if (q->syss_offset != ddata->offsets[SYSC_SYSSTATUS])
|
||||
continue;
|
||||
|
||||
if (q->revision == ddata->revision ||
|
||||
@ -1400,6 +1477,128 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* DSS needs dispc outputs disabled to reset modules. Returns mask of
|
||||
* enabled DSS interrupts. Eventually we may be able to do this on
|
||||
* dispc init rather than top-level DSS init.
|
||||
*/
|
||||
static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
|
||||
bool disable)
|
||||
{
|
||||
bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
|
||||
const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
|
||||
int manager_count;
|
||||
bool framedonetv_irq;
|
||||
u32 val, irq_mask = 0;
|
||||
|
||||
switch (sysc_soc->soc) {
|
||||
case SOC_2420 ... SOC_3630:
|
||||
manager_count = 2;
|
||||
framedonetv_irq = false;
|
||||
break;
|
||||
case SOC_4430 ... SOC_4470:
|
||||
manager_count = 3;
|
||||
break;
|
||||
case SOC_5430:
|
||||
case SOC_DRA7:
|
||||
manager_count = 4;
|
||||
break;
|
||||
case SOC_AM4:
|
||||
manager_count = 1;
|
||||
break;
|
||||
case SOC_UNKNOWN:
|
||||
default:
|
||||
return 0;
|
||||
};
|
||||
|
||||
/* Remap the whole module range to be able to reset dispc outputs */
|
||||
devm_iounmap(ddata->dev, ddata->module_va);
|
||||
ddata->module_va = devm_ioremap(ddata->dev,
|
||||
ddata->module_pa,
|
||||
ddata->module_size);
|
||||
if (!ddata->module_va)
|
||||
return -EIO;
|
||||
|
||||
/* DISP_CONTROL */
|
||||
val = sysc_read(ddata, dispc_offset + 0x40);
|
||||
lcd_en = val & lcd_en_mask;
|
||||
digit_en = val & digit_en_mask;
|
||||
if (lcd_en)
|
||||
irq_mask |= BIT(0); /* FRAMEDONE */
|
||||
if (digit_en) {
|
||||
if (framedonetv_irq)
|
||||
irq_mask |= BIT(24); /* FRAMEDONETV */
|
||||
else
|
||||
irq_mask |= BIT(2) | BIT(3); /* EVSYNC bits */
|
||||
}
|
||||
if (disable & (lcd_en | digit_en))
|
||||
sysc_write(ddata, dispc_offset + 0x40,
|
||||
val & ~(lcd_en_mask | digit_en_mask));
|
||||
|
||||
if (manager_count <= 2)
|
||||
return irq_mask;
|
||||
|
||||
/* DISPC_CONTROL2 */
|
||||
val = sysc_read(ddata, dispc_offset + 0x238);
|
||||
lcd2_en = val & lcd_en_mask;
|
||||
if (lcd2_en)
|
||||
irq_mask |= BIT(22); /* FRAMEDONE2 */
|
||||
if (disable && lcd2_en)
|
||||
sysc_write(ddata, dispc_offset + 0x238,
|
||||
val & ~lcd_en_mask);
|
||||
|
||||
if (manager_count <= 3)
|
||||
return irq_mask;
|
||||
|
||||
/* DISPC_CONTROL3 */
|
||||
val = sysc_read(ddata, dispc_offset + 0x848);
|
||||
lcd3_en = val & lcd_en_mask;
|
||||
if (lcd3_en)
|
||||
irq_mask |= BIT(30); /* FRAMEDONE3 */
|
||||
if (disable && lcd3_en)
|
||||
sysc_write(ddata, dispc_offset + 0x848,
|
||||
val & ~lcd_en_mask);
|
||||
|
||||
return irq_mask;
|
||||
}
|
||||
|
||||
/* DSS needs child outputs disabled and SDI registers cleared for reset */
|
||||
static void sysc_pre_reset_quirk_dss(struct sysc *ddata)
|
||||
{
|
||||
const int dispc_offset = 0x1000;
|
||||
int error;
|
||||
u32 irq_mask, val;
|
||||
|
||||
/* Get enabled outputs */
|
||||
irq_mask = sysc_quirk_dispc(ddata, dispc_offset, false);
|
||||
if (!irq_mask)
|
||||
return;
|
||||
|
||||
/* Clear IRQSTATUS */
|
||||
sysc_write(ddata, dispc_offset + 0x18, irq_mask);
|
||||
|
||||
/* Disable outputs */
|
||||
val = sysc_quirk_dispc(ddata, dispc_offset, true);
|
||||
|
||||
/* Poll IRQSTATUS */
|
||||
error = readl_poll_timeout(ddata->module_va + dispc_offset + 0x18,
|
||||
val, val != irq_mask, 100, 50);
|
||||
if (error)
|
||||
dev_warn(ddata->dev, "%s: timed out %08x !+ %08x\n",
|
||||
__func__, val, irq_mask);
|
||||
|
||||
if (sysc_soc->soc == SOC_3430) {
|
||||
/* Clear DSS_SDI_CONTROL */
|
||||
sysc_write(ddata, 0x44, 0);
|
||||
|
||||
/* Clear DSS_PLL_CONTROL */
|
||||
sysc_write(ddata, 0x48, 0);
|
||||
}
|
||||
|
||||
/* Clear DSS_CONTROL to switch DSS clock sources to PRCM if not */
|
||||
sysc_write(ddata, 0x40, 0);
|
||||
}
|
||||
|
||||
/* 1-wire needs module's internal clocks enabled for reset */
|
||||
static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
|
||||
{
|
||||
@ -1419,7 +1618,7 @@ static void sysc_module_enable_quirk_aess(struct sysc *ddata)
|
||||
sysc_write(ddata, offset, 1);
|
||||
}
|
||||
|
||||
/* I2C needs extra enable bit toggling for reset */
|
||||
/* I2C needs to be disabled for reset */
|
||||
static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
|
||||
{
|
||||
int offset;
|
||||
@ -1440,14 +1639,48 @@ static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
|
||||
sysc_write(ddata, offset, val);
|
||||
}
|
||||
|
||||
static void sysc_clk_enable_quirk_i2c(struct sysc *ddata)
|
||||
static void sysc_pre_reset_quirk_i2c(struct sysc *ddata)
|
||||
{
|
||||
sysc_clk_quirk_i2c(ddata, false);
|
||||
}
|
||||
|
||||
static void sysc_post_reset_quirk_i2c(struct sysc *ddata)
|
||||
{
|
||||
sysc_clk_quirk_i2c(ddata, true);
|
||||
}
|
||||
|
||||
static void sysc_clk_disable_quirk_i2c(struct sysc *ddata)
|
||||
/* RTC on am3 and 4 needs to be unlocked and locked for sysconfig */
|
||||
static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
|
||||
{
|
||||
sysc_clk_quirk_i2c(ddata, false);
|
||||
u32 val, kick0_val = 0, kick1_val = 0;
|
||||
unsigned long flags;
|
||||
int error;
|
||||
|
||||
if (!lock) {
|
||||
kick0_val = 0x83e70b13;
|
||||
kick1_val = 0x95a4f1e0;
|
||||
}
|
||||
|
||||
local_irq_save(flags);
|
||||
/* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
|
||||
error = readl_poll_timeout(ddata->module_va + 0x44, val,
|
||||
!(val & BIT(0)), 100, 50);
|
||||
if (error)
|
||||
dev_warn(ddata->dev, "rtc busy timeout\n");
|
||||
/* Now we have ~15 microseconds to read/write various registers */
|
||||
sysc_write(ddata, 0x6c, kick0_val);
|
||||
sysc_write(ddata, 0x70, kick1_val);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static void sysc_module_unlock_quirk_rtc(struct sysc *ddata)
|
||||
{
|
||||
sysc_quirk_rtc(ddata, false);
|
||||
}
|
||||
|
||||
static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
|
||||
{
|
||||
sysc_quirk_rtc(ddata, true);
|
||||
}
|
||||
|
||||
/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
|
||||
@ -1483,20 +1716,30 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
|
||||
dev_warn(ddata->dev, "wdt disable step2 failed\n");
|
||||
}
|
||||
|
||||
/* PRUSS needs to set MSTANDBY_INIT inorder to idle properly */
|
||||
static void sysc_module_disable_quirk_pruss(struct sysc *ddata)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
|
||||
reg |= SYSC_PRUSS_STANDBY_INIT;
|
||||
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
|
||||
}
|
||||
|
||||
static void sysc_init_module_quirks(struct sysc *ddata)
|
||||
{
|
||||
if (ddata->legacy_mode || !ddata->name)
|
||||
return;
|
||||
|
||||
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
|
||||
ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w;
|
||||
ddata->pre_reset_quirk = sysc_pre_reset_quirk_hdq1w;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
|
||||
ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c;
|
||||
ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c;
|
||||
ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
|
||||
ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
|
||||
|
||||
return;
|
||||
}
|
||||
@ -1504,6 +1747,16 @@ static void sysc_init_module_quirks(struct sysc *ddata)
|
||||
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_AESS)
|
||||
ddata->module_enable_quirk = sysc_module_enable_quirk_aess;
|
||||
|
||||
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_DSS_RESET)
|
||||
ddata->pre_reset_quirk = sysc_pre_reset_quirk_dss;
|
||||
|
||||
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_RTC_UNLOCK) {
|
||||
ddata->module_unlock_quirk = sysc_module_unlock_quirk_rtc;
|
||||
ddata->module_lock_quirk = sysc_module_lock_quirk_rtc;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
|
||||
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
|
||||
|
||||
@ -1511,6 +1764,9 @@ static void sysc_init_module_quirks(struct sysc *ddata)
|
||||
ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
|
||||
ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
|
||||
}
|
||||
|
||||
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_PRUSS)
|
||||
ddata->module_disable_quirk = sysc_module_disable_quirk_pruss;
|
||||
}
|
||||
|
||||
static int sysc_clockdomain_init(struct sysc *ddata)
|
||||
@ -1572,7 +1828,7 @@ static int sysc_reset(struct sysc *ddata)
|
||||
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
|
||||
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
|
||||
|
||||
if (ddata->legacy_mode || sysc_offset < 0 ||
|
||||
if (ddata->legacy_mode ||
|
||||
ddata->cap->regbits->srst_shift < 0 ||
|
||||
ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
|
||||
return 0;
|
||||
@ -1584,19 +1840,21 @@ static int sysc_reset(struct sysc *ddata)
|
||||
else
|
||||
syss_done = ddata->cfg.syss_mask;
|
||||
|
||||
if (ddata->clk_disable_quirk)
|
||||
ddata->clk_disable_quirk(ddata);
|
||||
if (ddata->pre_reset_quirk)
|
||||
ddata->pre_reset_quirk(ddata);
|
||||
|
||||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
sysc_val |= sysc_mask;
|
||||
sysc_write(ddata, sysc_offset, sysc_val);
|
||||
if (sysc_offset >= 0) {
|
||||
sysc_val = sysc_read_sysconfig(ddata);
|
||||
sysc_val |= sysc_mask;
|
||||
sysc_write(ddata, sysc_offset, sysc_val);
|
||||
}
|
||||
|
||||
if (ddata->cfg.srst_udelay)
|
||||
usleep_range(ddata->cfg.srst_udelay,
|
||||
ddata->cfg.srst_udelay * 2);
|
||||
|
||||
if (ddata->clk_enable_quirk)
|
||||
ddata->clk_enable_quirk(ddata);
|
||||
if (ddata->post_reset_quirk)
|
||||
ddata->post_reset_quirk(ddata);
|
||||
|
||||
/* Poll on reset status */
|
||||
if (syss_offset >= 0) {
|
||||
@ -2314,6 +2572,16 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
|
||||
.mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
|
||||
};
|
||||
|
||||
/*
|
||||
* PRUSS found on some AM33xx, AM437x and AM57xx SoCs
|
||||
*/
|
||||
static const struct sysc_capabilities sysc_pruss = {
|
||||
.type = TI_SYSC_PRUSS,
|
||||
.sysc_mask = SYSC_PRUSS_STANDBY_INIT | SYSC_PRUSS_SUB_MWAIT,
|
||||
.regbits = &sysc_regbits_omap4_simple,
|
||||
.mod_quirks = SYSC_MODULE_QUIRK_PRUSS,
|
||||
};
|
||||
|
||||
static int sysc_init_pdata(struct sysc *ddata)
|
||||
{
|
||||
struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
|
||||
@ -2387,6 +2655,154 @@ static void ti_sysc_idle(struct work_struct *work)
|
||||
pm_runtime_put_sync(ddata->dev);
|
||||
}
|
||||
|
||||
/*
|
||||
* SoC model and features detection. Only needed for SoCs that need
|
||||
* special handling for quirks, no need to list others.
|
||||
*/
|
||||
static const struct soc_device_attribute sysc_soc_match[] = {
|
||||
SOC_FLAG("OMAP242*", SOC_2420),
|
||||
SOC_FLAG("OMAP243*", SOC_2430),
|
||||
SOC_FLAG("OMAP3[45]*", SOC_3430),
|
||||
SOC_FLAG("OMAP3[67]*", SOC_3630),
|
||||
SOC_FLAG("OMAP443*", SOC_4430),
|
||||
SOC_FLAG("OMAP446*", SOC_4460),
|
||||
SOC_FLAG("OMAP447*", SOC_4470),
|
||||
SOC_FLAG("OMAP54*", SOC_5430),
|
||||
SOC_FLAG("AM433", SOC_AM3),
|
||||
SOC_FLAG("AM43*", SOC_AM4),
|
||||
SOC_FLAG("DRA7*", SOC_DRA7),
|
||||
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
||||
/*
|
||||
* List of SoCs variants with disabled features. By default we assume all
|
||||
* devices in the device tree are available so no need to list those SoCs.
|
||||
*/
|
||||
static const struct soc_device_attribute sysc_soc_feat_match[] = {
|
||||
/* OMAP3430/3530 and AM3517 variants with some accelerators disabled */
|
||||
SOC_FLAG("AM3505", DIS_SGX),
|
||||
SOC_FLAG("OMAP3525", DIS_SGX),
|
||||
SOC_FLAG("OMAP3515", DIS_IVA | DIS_SGX),
|
||||
SOC_FLAG("OMAP3503", DIS_ISP | DIS_IVA | DIS_SGX),
|
||||
|
||||
/* OMAP3630/DM3730 variants with some accelerators disabled */
|
||||
SOC_FLAG("AM3703", DIS_IVA | DIS_SGX),
|
||||
SOC_FLAG("DM3725", DIS_SGX),
|
||||
SOC_FLAG("OMAP3611", DIS_ISP | DIS_IVA | DIS_SGX),
|
||||
SOC_FLAG("OMAP3615/AM3715", DIS_IVA),
|
||||
SOC_FLAG("OMAP3621", DIS_ISP),
|
||||
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
||||
static int sysc_add_disabled(unsigned long base)
|
||||
{
|
||||
struct sysc_address *disabled_module;
|
||||
|
||||
disabled_module = kzalloc(sizeof(*disabled_module), GFP_KERNEL);
|
||||
if (!disabled_module)
|
||||
return -ENOMEM;
|
||||
|
||||
disabled_module->base = base;
|
||||
|
||||
mutex_lock(&sysc_soc->list_lock);
|
||||
list_add(&disabled_module->node, &sysc_soc->disabled_modules);
|
||||
mutex_unlock(&sysc_soc->list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* One time init to detect the booted SoC and disable unavailable features.
|
||||
* Note that we initialize static data shared across all ti-sysc instances
|
||||
* so ddata is only used for SoC type. This can be called from module_init
|
||||
* once we no longer need to rely on platform data.
|
||||
*/
|
||||
static int sysc_init_soc(struct sysc *ddata)
|
||||
{
|
||||
const struct soc_device_attribute *match;
|
||||
struct ti_sysc_platform_data *pdata;
|
||||
unsigned long features = 0;
|
||||
|
||||
if (sysc_soc)
|
||||
return 0;
|
||||
|
||||
sysc_soc = kzalloc(sizeof(*sysc_soc), GFP_KERNEL);
|
||||
if (!sysc_soc)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_init(&sysc_soc->list_lock);
|
||||
INIT_LIST_HEAD(&sysc_soc->disabled_modules);
|
||||
sysc_soc->general_purpose = true;
|
||||
|
||||
pdata = dev_get_platdata(ddata->dev);
|
||||
if (pdata && pdata->soc_type_gp)
|
||||
sysc_soc->general_purpose = pdata->soc_type_gp();
|
||||
|
||||
match = soc_device_match(sysc_soc_match);
|
||||
if (match && match->data)
|
||||
sysc_soc->soc = (int)match->data;
|
||||
|
||||
match = soc_device_match(sysc_soc_feat_match);
|
||||
if (!match)
|
||||
return 0;
|
||||
|
||||
if (match->data)
|
||||
features = (unsigned long)match->data;
|
||||
|
||||
/*
|
||||
* Add disabled devices to the list based on the module base.
|
||||
* Note that this must be done before we attempt to access the
|
||||
* device and have module revision checks working.
|
||||
*/
|
||||
if (features & DIS_ISP)
|
||||
sysc_add_disabled(0x480bd400);
|
||||
if (features & DIS_IVA)
|
||||
sysc_add_disabled(0x5d000000);
|
||||
if (features & DIS_SGX)
|
||||
sysc_add_disabled(0x50000000);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sysc_cleanup_soc(void)
|
||||
{
|
||||
struct sysc_address *disabled_module;
|
||||
struct list_head *pos, *tmp;
|
||||
|
||||
if (!sysc_soc)
|
||||
return;
|
||||
|
||||
mutex_lock(&sysc_soc->list_lock);
|
||||
list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
|
||||
disabled_module = list_entry(pos, struct sysc_address, node);
|
||||
list_del(pos);
|
||||
kfree(disabled_module);
|
||||
}
|
||||
mutex_unlock(&sysc_soc->list_lock);
|
||||
}
|
||||
|
||||
static int sysc_check_disabled_devices(struct sysc *ddata)
|
||||
{
|
||||
struct sysc_address *disabled_module;
|
||||
struct list_head *pos;
|
||||
int error = 0;
|
||||
|
||||
mutex_lock(&sysc_soc->list_lock);
|
||||
list_for_each(pos, &sysc_soc->disabled_modules) {
|
||||
disabled_module = list_entry(pos, struct sysc_address, node);
|
||||
if (ddata->module_pa == disabled_module->base) {
|
||||
dev_dbg(ddata->dev, "module disabled for this SoC\n");
|
||||
error = -ENODEV;
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&sysc_soc->list_lock);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static const struct of_device_id sysc_match_table[] = {
|
||||
{ .compatible = "simple-bus", },
|
||||
{ /* sentinel */ },
|
||||
@ -2405,6 +2821,10 @@ static int sysc_probe(struct platform_device *pdev)
|
||||
ddata->dev = &pdev->dev;
|
||||
platform_set_drvdata(pdev, ddata);
|
||||
|
||||
error = sysc_init_soc(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = sysc_init_match(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
@ -2435,6 +2855,10 @@ static int sysc_probe(struct platform_device *pdev)
|
||||
|
||||
sysc_init_early_quirks(ddata);
|
||||
|
||||
error = sysc_check_disabled_devices(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
error = sysc_get_clocks(ddata);
|
||||
if (error)
|
||||
return error;
|
||||
@ -2539,6 +2963,7 @@ static const struct of_device_id sysc_match[] = {
|
||||
{ .compatible = "ti,sysc-usb-host-fs",
|
||||
.data = &sysc_omap4_usb_host_fs, },
|
||||
{ .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
|
||||
{ .compatible = "ti,sysc-pruss", .data = &sysc_pruss, },
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sysc_match);
|
||||
@ -2565,6 +2990,7 @@ static void __exit sysc_exit(void)
|
||||
{
|
||||
bus_unregister_notifier(&platform_bus_type, &sysc_nb);
|
||||
platform_driver_unregister(&sysc_driver);
|
||||
sysc_cleanup_soc();
|
||||
}
|
||||
module_exit(sysc_exit);
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o
|
||||
obj-y = scmi-bus.o scmi-driver.o scmi-protocols.o scmi-transport.o
|
||||
scmi-bus-y = bus.o
|
||||
scmi-driver-y = driver.o
|
||||
scmi-transport-y = mailbox.o shmem.o
|
||||
scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o
|
||||
obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o
|
||||
|
@ -33,8 +33,8 @@ enum scmi_common_cmd {
|
||||
/**
|
||||
* struct scmi_msg_resp_prot_version - Response for a message
|
||||
*
|
||||
* @major_version: Major version of the ABI that firmware supports
|
||||
* @minor_version: Minor version of the ABI that firmware supports
|
||||
* @major_version: Major version of the ABI that firmware supports
|
||||
*
|
||||
* In general, ABI version changes follow the rule that minor version increments
|
||||
* are backward compatible. Major revision changes in ABI may not be
|
||||
@ -47,6 +47,19 @@ struct scmi_msg_resp_prot_version {
|
||||
__le16 major_version;
|
||||
};
|
||||
|
||||
#define MSG_ID_MASK GENMASK(7, 0)
|
||||
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
|
||||
#define MSG_TYPE_MASK GENMASK(9, 8)
|
||||
#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
|
||||
#define MSG_TYPE_COMMAND 0
|
||||
#define MSG_TYPE_DELAYED_RESP 2
|
||||
#define MSG_TYPE_NOTIFICATION 3
|
||||
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
|
||||
#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
|
||||
#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
|
||||
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
|
||||
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
|
||||
|
||||
/**
|
||||
* struct scmi_msg_hdr - Message(Tx/Rx) header
|
||||
*
|
||||
@ -67,6 +80,33 @@ struct scmi_msg_hdr {
|
||||
bool poll_completion;
|
||||
};
|
||||
|
||||
/**
|
||||
* pack_scmi_header() - packs and returns 32-bit header
|
||||
*
|
||||
* @hdr: pointer to header containing all the information on message id,
|
||||
* protocol id and sequence id.
|
||||
*
|
||||
* Return: 32-bit packed message header to be sent to the platform.
|
||||
*/
|
||||
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
|
||||
{
|
||||
return FIELD_PREP(MSG_ID_MASK, hdr->id) |
|
||||
FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
|
||||
FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* unpack_scmi_header() - unpacks and records message and protocol id
|
||||
*
|
||||
* @msg_hdr: 32-bit packed message header sent from the platform
|
||||
* @hdr: pointer to header to fetch message and protocol id.
|
||||
*/
|
||||
static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
|
||||
{
|
||||
hdr->id = MSG_XTRACT_ID(msg_hdr);
|
||||
hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct scmi_msg - Message(Tx/Rx) structure
|
||||
*
|
||||
@ -88,7 +128,7 @@ struct scmi_msg {
|
||||
* message. If request-ACK protocol is used, we can reuse the same
|
||||
* buffer for the rx path as we use for the tx path.
|
||||
* @done: command message transmit completion event
|
||||
* @async: pointer to delayed response message received event completion
|
||||
* @async_done: pointer to delayed response message received event completion
|
||||
*/
|
||||
struct scmi_xfer {
|
||||
int transfer_id;
|
||||
@ -113,3 +153,74 @@ void scmi_setup_protocol_implemented(const struct scmi_handle *handle,
|
||||
u8 *prot_imp);
|
||||
|
||||
int scmi_base_protocol_init(struct scmi_handle *h);
|
||||
|
||||
/* SCMI Transport */
|
||||
/**
|
||||
* struct scmi_chan_info - Structure representing a SCMI channel information
|
||||
*
|
||||
* @dev: Reference to device in the SCMI hierarchy corresponding to this
|
||||
* channel
|
||||
* @handle: Pointer to SCMI entity handle
|
||||
* @transport_info: Transport layer related information
|
||||
*/
|
||||
struct scmi_chan_info {
|
||||
struct device *dev;
|
||||
struct scmi_handle *handle;
|
||||
void *transport_info;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct scmi_transport_ops - Structure representing a SCMI transport ops
|
||||
*
|
||||
* @chan_available: Callback to check if channel is available or not
|
||||
* @chan_setup: Callback to allocate and setup a channel
|
||||
* @chan_free: Callback to free a channel
|
||||
* @send_message: Callback to send a message
|
||||
* @mark_txdone: Callback to mark tx as done
|
||||
* @fetch_response: Callback to fetch response
|
||||
* @poll_done: Callback to poll transfer status
|
||||
*/
|
||||
struct scmi_transport_ops {
|
||||
bool (*chan_available)(struct device *dev, int idx);
|
||||
int (*chan_setup)(struct scmi_chan_info *cinfo, struct device *dev,
|
||||
bool tx);
|
||||
int (*chan_free)(int id, void *p, void *data);
|
||||
int (*send_message)(struct scmi_chan_info *cinfo,
|
||||
struct scmi_xfer *xfer);
|
||||
void (*mark_txdone)(struct scmi_chan_info *cinfo, int ret);
|
||||
void (*fetch_response)(struct scmi_chan_info *cinfo,
|
||||
struct scmi_xfer *xfer);
|
||||
bool (*poll_done)(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct scmi_desc - Description of SoC integration
|
||||
*
|
||||
* @ops: Pointer to the transport specific ops structure
|
||||
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
|
||||
* @max_msg: Maximum number of messages that can be pending
|
||||
* simultaneously in the system
|
||||
* @max_msg_size: Maximum size of data per message that can be handled.
|
||||
*/
|
||||
struct scmi_desc {
|
||||
struct scmi_transport_ops *ops;
|
||||
int max_rx_timeout_ms;
|
||||
int max_msg;
|
||||
int max_msg_size;
|
||||
};
|
||||
|
||||
extern const struct scmi_desc scmi_mailbox_desc;
|
||||
|
||||
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr);
|
||||
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id);
|
||||
|
||||
/* shmem related declarations */
|
||||
struct scmi_shared_mem;
|
||||
|
||||
void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
|
||||
struct scmi_xfer *xfer);
|
||||
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem);
|
||||
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
|
||||
struct scmi_xfer *xfer);
|
||||
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
|
||||
struct scmi_xfer *xfer);
|
||||
|
@ -19,12 +19,10 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/mailbox_client.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/processor.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "common.h"
|
||||
@ -32,19 +30,6 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/scmi.h>
|
||||
|
||||
#define MSG_ID_MASK GENMASK(7, 0)
|
||||
#define MSG_XTRACT_ID(hdr) FIELD_GET(MSG_ID_MASK, (hdr))
|
||||
#define MSG_TYPE_MASK GENMASK(9, 8)
|
||||
#define MSG_XTRACT_TYPE(hdr) FIELD_GET(MSG_TYPE_MASK, (hdr))
|
||||
#define MSG_TYPE_COMMAND 0
|
||||
#define MSG_TYPE_DELAYED_RESP 2
|
||||
#define MSG_TYPE_NOTIFICATION 3
|
||||
#define MSG_PROTOCOL_ID_MASK GENMASK(17, 10)
|
||||
#define MSG_XTRACT_PROT_ID(hdr) FIELD_GET(MSG_PROTOCOL_ID_MASK, (hdr))
|
||||
#define MSG_TOKEN_ID_MASK GENMASK(27, 18)
|
||||
#define MSG_XTRACT_TOKEN(hdr) FIELD_GET(MSG_TOKEN_ID_MASK, (hdr))
|
||||
#define MSG_TOKEN_MAX (MSG_XTRACT_TOKEN(MSG_TOKEN_ID_MASK) + 1)
|
||||
|
||||
enum scmi_error_codes {
|
||||
SCMI_SUCCESS = 0, /* Success */
|
||||
SCMI_ERR_SUPPORT = -1, /* Not supported */
|
||||
@ -82,46 +67,14 @@ struct scmi_xfers_info {
|
||||
spinlock_t xfer_lock;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct scmi_desc - Description of SoC integration
|
||||
*
|
||||
* @max_rx_timeout_ms: Timeout for communication with SoC (in Milliseconds)
|
||||
* @max_msg: Maximum number of messages that can be pending
|
||||
* simultaneously in the system
|
||||
* @max_msg_size: Maximum size of data per message that can be handled.
|
||||
*/
|
||||
struct scmi_desc {
|
||||
int max_rx_timeout_ms;
|
||||
int max_msg;
|
||||
int max_msg_size;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct scmi_chan_info - Structure representing a SCMI channel information
|
||||
*
|
||||
* @cl: Mailbox Client
|
||||
* @chan: Transmit/Receive mailbox channel
|
||||
* @payload: Transmit/Receive mailbox channel payload area
|
||||
* @dev: Reference to device in the SCMI hierarchy corresponding to this
|
||||
* channel
|
||||
* @handle: Pointer to SCMI entity handle
|
||||
*/
|
||||
struct scmi_chan_info {
|
||||
struct mbox_client cl;
|
||||
struct mbox_chan *chan;
|
||||
void __iomem *payload;
|
||||
struct device *dev;
|
||||
struct scmi_handle *handle;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct scmi_info - Structure representing a SCMI instance
|
||||
*
|
||||
* @dev: Device pointer
|
||||
* @desc: SoC description for this instance
|
||||
* @handle: Instance of SCMI handle to send to clients
|
||||
* @version: SCMI revision information containing protocol version,
|
||||
* implementation version and (sub-)vendor identification.
|
||||
* @handle: Instance of SCMI handle to send to clients
|
||||
* @tx_minfo: Universal Transmit Message management info
|
||||
* @tx_idr: IDR object to map protocol id to Tx channel info pointer
|
||||
* @rx_idr: IDR object to map protocol id to Rx channel info pointer
|
||||
@ -143,27 +96,8 @@ struct scmi_info {
|
||||
int users;
|
||||
};
|
||||
|
||||
#define client_to_scmi_chan_info(c) container_of(c, struct scmi_chan_info, cl)
|
||||
#define handle_to_scmi_info(h) container_of(h, struct scmi_info, handle)
|
||||
|
||||
/*
|
||||
* SCMI specification requires all parameters, message headers, return
|
||||
* arguments or any protocol data to be expressed in little endian
|
||||
* format only.
|
||||
*/
|
||||
struct scmi_shared_mem {
|
||||
__le32 reserved;
|
||||
__le32 channel_status;
|
||||
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
|
||||
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
|
||||
__le32 reserved1[2];
|
||||
__le32 flags;
|
||||
#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
|
||||
__le32 length;
|
||||
__le32 msg_header;
|
||||
u8 msg_payload[0];
|
||||
};
|
||||
|
||||
static const int scmi_linux_errmap[] = {
|
||||
/* better than switch case as long as return value is continuous */
|
||||
0, /* SCMI_SUCCESS */
|
||||
@ -199,77 +133,6 @@ static inline void scmi_dump_header_dbg(struct device *dev,
|
||||
hdr->id, hdr->seq, hdr->protocol_id);
|
||||
}
|
||||
|
||||
static void scmi_fetch_response(struct scmi_xfer *xfer,
|
||||
struct scmi_shared_mem __iomem *mem)
|
||||
{
|
||||
xfer->hdr.status = ioread32(mem->msg_payload);
|
||||
/* Skip the length of header and status in payload area i.e 8 bytes */
|
||||
xfer->rx.len = min_t(size_t, xfer->rx.len, ioread32(&mem->length) - 8);
|
||||
|
||||
/* Take a copy to the rx buffer.. */
|
||||
memcpy_fromio(xfer->rx.buf, mem->msg_payload + 4, xfer->rx.len);
|
||||
}
|
||||
|
||||
/**
|
||||
* pack_scmi_header() - packs and returns 32-bit header
|
||||
*
|
||||
* @hdr: pointer to header containing all the information on message id,
|
||||
* protocol id and sequence id.
|
||||
*
|
||||
* Return: 32-bit packed message header to be sent to the platform.
|
||||
*/
|
||||
static inline u32 pack_scmi_header(struct scmi_msg_hdr *hdr)
|
||||
{
|
||||
return FIELD_PREP(MSG_ID_MASK, hdr->id) |
|
||||
FIELD_PREP(MSG_TOKEN_ID_MASK, hdr->seq) |
|
||||
FIELD_PREP(MSG_PROTOCOL_ID_MASK, hdr->protocol_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* unpack_scmi_header() - unpacks and records message and protocol id
|
||||
*
|
||||
* @msg_hdr: 32-bit packed message header sent from the platform
|
||||
* @hdr: pointer to header to fetch message and protocol id.
|
||||
*/
|
||||
static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr)
|
||||
{
|
||||
hdr->id = MSG_XTRACT_ID(msg_hdr);
|
||||
hdr->protocol_id = MSG_XTRACT_PROT_ID(msg_hdr);
|
||||
}
|
||||
|
||||
/**
|
||||
* scmi_tx_prepare() - mailbox client callback to prepare for the transfer
|
||||
*
|
||||
* @cl: client pointer
|
||||
* @m: mailbox message
|
||||
*
|
||||
* This function prepares the shared memory which contains the header and the
|
||||
* payload.
|
||||
*/
|
||||
static void scmi_tx_prepare(struct mbox_client *cl, void *m)
|
||||
{
|
||||
struct scmi_xfer *t = m;
|
||||
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
|
||||
struct scmi_shared_mem __iomem *mem = cinfo->payload;
|
||||
|
||||
/*
|
||||
* Ideally channel must be free by now unless OS timeout last
|
||||
* request and platform continued to process the same, wait
|
||||
* until it releases the shared memory, otherwise we may endup
|
||||
* overwriting its response with new message payload or vice-versa
|
||||
*/
|
||||
spin_until_cond(ioread32(&mem->channel_status) &
|
||||
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
|
||||
/* Mark channel busy + clear error */
|
||||
iowrite32(0x0, &mem->channel_status);
|
||||
iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
|
||||
&mem->flags);
|
||||
iowrite32(sizeof(mem->msg_header) + t->tx.len, &mem->length);
|
||||
iowrite32(pack_scmi_header(&t->hdr), &mem->msg_header);
|
||||
if (t->tx.buf)
|
||||
memcpy_toio(mem->msg_payload, t->tx.buf, t->tx.len);
|
||||
}
|
||||
|
||||
/**
|
||||
* scmi_xfer_get() - Allocate one message
|
||||
*
|
||||
@ -338,10 +201,10 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
|
||||
}
|
||||
|
||||
/**
|
||||
* scmi_rx_callback() - mailbox client callback for receive messages
|
||||
* scmi_rx_callback() - callback for receiving messages
|
||||
*
|
||||
* @cl: client pointer
|
||||
* @m: mailbox message
|
||||
* @cinfo: SCMI channel info
|
||||
* @msg_hdr: Message header
|
||||
*
|
||||
* Processes one received message to appropriate transfer information and
|
||||
* signals completion of the transfer.
|
||||
@ -349,21 +212,14 @@ __scmi_xfer_put(struct scmi_xfers_info *minfo, struct scmi_xfer *xfer)
|
||||
* NOTE: This function will be invoked in IRQ context, hence should be
|
||||
* as optimal as possible.
|
||||
*/
|
||||
static void scmi_rx_callback(struct mbox_client *cl, void *m)
|
||||
void scmi_rx_callback(struct scmi_chan_info *cinfo, u32 msg_hdr)
|
||||
{
|
||||
u8 msg_type;
|
||||
u32 msg_hdr;
|
||||
u16 xfer_id;
|
||||
struct scmi_xfer *xfer;
|
||||
struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
|
||||
struct device *dev = cinfo->dev;
|
||||
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
|
||||
struct scmi_xfers_info *minfo = &info->tx_minfo;
|
||||
struct scmi_shared_mem __iomem *mem = cinfo->payload;
|
||||
|
||||
msg_hdr = ioread32(&mem->msg_header);
|
||||
msg_type = MSG_XTRACT_TYPE(msg_hdr);
|
||||
xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
|
||||
u16 xfer_id = MSG_XTRACT_TOKEN(msg_hdr);
|
||||
u8 msg_type = MSG_XTRACT_TYPE(msg_hdr);
|
||||
struct device *dev = cinfo->dev;
|
||||
struct scmi_xfer *xfer;
|
||||
|
||||
if (msg_type == MSG_TYPE_NOTIFICATION)
|
||||
return; /* Notifications not yet supported */
|
||||
@ -378,7 +234,7 @@ static void scmi_rx_callback(struct mbox_client *cl, void *m)
|
||||
|
||||
scmi_dump_header_dbg(dev, &xfer->hdr);
|
||||
|
||||
scmi_fetch_response(xfer, mem);
|
||||
info->desc->ops->fetch_response(cinfo, xfer);
|
||||
|
||||
trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id,
|
||||
xfer->hdr.protocol_id, xfer->hdr.seq,
|
||||
@ -403,28 +259,15 @@ void scmi_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
|
||||
__scmi_xfer_put(&info->tx_minfo, xfer);
|
||||
}
|
||||
|
||||
static bool
|
||||
scmi_xfer_poll_done(const struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
|
||||
{
|
||||
struct scmi_shared_mem __iomem *mem = cinfo->payload;
|
||||
u16 xfer_id = MSG_XTRACT_TOKEN(ioread32(&mem->msg_header));
|
||||
|
||||
if (xfer->hdr.seq != xfer_id)
|
||||
return false;
|
||||
|
||||
return ioread32(&mem->channel_status) &
|
||||
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
|
||||
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
|
||||
}
|
||||
|
||||
#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
|
||||
|
||||
static bool scmi_xfer_done_no_timeout(const struct scmi_chan_info *cinfo,
|
||||
static bool scmi_xfer_done_no_timeout(struct scmi_chan_info *cinfo,
|
||||
struct scmi_xfer *xfer, ktime_t stop)
|
||||
{
|
||||
ktime_t __cur = ktime_get();
|
||||
struct scmi_info *info = handle_to_scmi_info(cinfo->handle);
|
||||
|
||||
return scmi_xfer_poll_done(cinfo, xfer) || ktime_after(__cur, stop);
|
||||
return info->desc->ops->poll_done(cinfo, xfer) ||
|
||||
ktime_after(ktime_get(), stop);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -453,29 +296,26 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
|
||||
xfer->hdr.protocol_id, xfer->hdr.seq,
|
||||
xfer->hdr.poll_completion);
|
||||
|
||||
ret = mbox_send_message(cinfo->chan, xfer);
|
||||
ret = info->desc->ops->send_message(cinfo, xfer);
|
||||
if (ret < 0) {
|
||||
dev_dbg(dev, "mbox send fail %d\n", ret);
|
||||
dev_dbg(dev, "Failed to send message %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* mbox_send_message returns non-negative value on success, so reset */
|
||||
ret = 0;
|
||||
|
||||
if (xfer->hdr.poll_completion) {
|
||||
ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
|
||||
|
||||
spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop));
|
||||
|
||||
if (ktime_before(ktime_get(), stop))
|
||||
scmi_fetch_response(xfer, cinfo->payload);
|
||||
info->desc->ops->fetch_response(cinfo, xfer);
|
||||
else
|
||||
ret = -ETIMEDOUT;
|
||||
} else {
|
||||
/* And we wait for the response. */
|
||||
timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
|
||||
if (!wait_for_completion_timeout(&xfer->done, timeout)) {
|
||||
dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
|
||||
dev_err(dev, "timed out in resp(caller: %pS)\n",
|
||||
(void *)_RET_IP_);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
@ -484,13 +324,8 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
|
||||
if (!ret && xfer->hdr.status)
|
||||
ret = scmi_to_linux_errno(xfer->hdr.status);
|
||||
|
||||
/*
|
||||
* NOTE: we might prefer not to need the mailbox ticker to manage the
|
||||
* transfer queueing since the protocol layer queues things by itself.
|
||||
* Unfortunately, we have to kick the mailbox framework after we have
|
||||
* received our message.
|
||||
*/
|
||||
mbox_client_txdone(cinfo->chan, ret);
|
||||
if (info->desc->ops->mark_txdone)
|
||||
info->desc->ops->mark_txdone(cinfo, ret);
|
||||
|
||||
trace_scmi_xfer_end(xfer->transfer_id, xfer->hdr.id,
|
||||
xfer->hdr.protocol_id, xfer->hdr.seq,
|
||||
@ -731,23 +566,12 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scmi_mailbox_check(struct device_node *np, int idx)
|
||||
{
|
||||
return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells",
|
||||
idx, NULL);
|
||||
}
|
||||
|
||||
static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
|
||||
int prot_id, bool tx)
|
||||
static int scmi_chan_setup(struct scmi_info *info, struct device *dev,
|
||||
int prot_id, bool tx)
|
||||
{
|
||||
int ret, idx;
|
||||
struct resource res;
|
||||
resource_size_t size;
|
||||
struct device_node *shmem, *np = dev->of_node;
|
||||
struct scmi_chan_info *cinfo;
|
||||
struct mbox_client *cl;
|
||||
struct idr *idr;
|
||||
const char *desc = tx ? "Tx" : "Rx";
|
||||
|
||||
/* Transmit channel is first entry i.e. index 0 */
|
||||
idx = tx ? 0 : 1;
|
||||
@ -758,7 +582,7 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
|
||||
if (cinfo)
|
||||
return 0;
|
||||
|
||||
if (scmi_mailbox_check(np, idx)) {
|
||||
if (!info->desc->ops->chan_available(dev, idx)) {
|
||||
cinfo = idr_find(idr, SCMI_PROTOCOL_BASE);
|
||||
if (unlikely(!cinfo)) /* Possible only if platform has no Rx */
|
||||
return -EINVAL;
|
||||
@ -771,36 +595,9 @@ static int scmi_mbox_chan_setup(struct scmi_info *info, struct device *dev,
|
||||
|
||||
cinfo->dev = dev;
|
||||
|
||||
cl = &cinfo->cl;
|
||||
cl->dev = dev;
|
||||
cl->rx_callback = scmi_rx_callback;
|
||||
cl->tx_prepare = tx ? scmi_tx_prepare : NULL;
|
||||
cl->tx_block = false;
|
||||
cl->knows_txdone = tx;
|
||||
|
||||
shmem = of_parse_phandle(np, "shmem", idx);
|
||||
ret = of_address_to_resource(shmem, 0, &res);
|
||||
of_node_put(shmem);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to get SCMI %s payload memory\n", desc);
|
||||
ret = info->desc->ops->chan_setup(cinfo, info->dev, tx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
size = resource_size(&res);
|
||||
cinfo->payload = devm_ioremap(info->dev, res.start, size);
|
||||
if (!cinfo->payload) {
|
||||
dev_err(dev, "failed to ioremap SCMI %s payload\n", desc);
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
cinfo->chan = mbox_request_channel(cl, idx);
|
||||
if (IS_ERR(cinfo->chan)) {
|
||||
ret = PTR_ERR(cinfo->chan);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "failed to request SCMI %s mailbox\n",
|
||||
desc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
idr_alloc:
|
||||
ret = idr_alloc(idr, cinfo, prot_id, prot_id + 1, GFP_KERNEL);
|
||||
@ -814,12 +611,12 @@ idr_alloc:
|
||||
}
|
||||
|
||||
static inline int
|
||||
scmi_mbox_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
|
||||
scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id)
|
||||
{
|
||||
int ret = scmi_mbox_chan_setup(info, dev, prot_id, true);
|
||||
int ret = scmi_chan_setup(info, dev, prot_id, true);
|
||||
|
||||
if (!ret) /* Rx is optional, hence no error check */
|
||||
scmi_mbox_chan_setup(info, dev, prot_id, false);
|
||||
scmi_chan_setup(info, dev, prot_id, false);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -837,7 +634,7 @@ scmi_create_protocol_device(struct device_node *np, struct scmi_info *info,
|
||||
return;
|
||||
}
|
||||
|
||||
if (scmi_mbox_txrx_setup(info, &sdev->dev, prot_id)) {
|
||||
if (scmi_txrx_setup(info, &sdev->dev, prot_id)) {
|
||||
dev_err(&sdev->dev, "failed to setup transport\n");
|
||||
scmi_device_destroy(sdev);
|
||||
return;
|
||||
@ -890,12 +687,6 @@ static int scmi_probe(struct platform_device *pdev)
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *child, *np = dev->of_node;
|
||||
|
||||
/* Only mailbox method supported, check for the presence of one */
|
||||
if (scmi_mailbox_check(np, 0)) {
|
||||
dev_err(dev, "no mailbox found in %pOF\n", np);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
desc = of_device_get_match_data(dev);
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
@ -920,7 +711,7 @@ static int scmi_probe(struct platform_device *pdev)
|
||||
handle->dev = info->dev;
|
||||
handle->version = &info->version;
|
||||
|
||||
ret = scmi_mbox_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
|
||||
ret = scmi_txrx_setup(info, dev, SCMI_PROTOCOL_BASE);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -955,19 +746,9 @@ static int scmi_probe(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scmi_mbox_free_channel(int id, void *p, void *data)
|
||||
void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id)
|
||||
{
|
||||
struct scmi_chan_info *cinfo = p;
|
||||
struct idr *idr = data;
|
||||
|
||||
if (!IS_ERR_OR_NULL(cinfo->chan)) {
|
||||
mbox_free_channel(cinfo->chan);
|
||||
cinfo->chan = NULL;
|
||||
}
|
||||
|
||||
idr_remove(idr, id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int scmi_remove(struct platform_device *pdev)
|
||||
@ -987,11 +768,11 @@ static int scmi_remove(struct platform_device *pdev)
|
||||
return ret;
|
||||
|
||||
/* Safe to free channels since no more users */
|
||||
ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
|
||||
ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
|
||||
idr_destroy(&info->tx_idr);
|
||||
|
||||
idr = &info->rx_idr;
|
||||
ret = idr_for_each(idr, scmi_mbox_free_channel, idr);
|
||||
ret = idr_for_each(idr, info->desc->ops->chan_free, idr);
|
||||
idr_destroy(&info->rx_idr);
|
||||
|
||||
return ret;
|
||||
@ -1043,15 +824,9 @@ static struct attribute *versions_attrs[] = {
|
||||
};
|
||||
ATTRIBUTE_GROUPS(versions);
|
||||
|
||||
static const struct scmi_desc scmi_generic_desc = {
|
||||
.max_rx_timeout_ms = 30, /* We may increase this if required */
|
||||
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
|
||||
.max_msg_size = 128,
|
||||
};
|
||||
|
||||
/* Each compatible listed below must have descriptor associated with it */
|
||||
static const struct of_device_id scmi_of_match[] = {
|
||||
{ .compatible = "arm,scmi", .data = &scmi_generic_desc },
|
||||
{ .compatible = "arm,scmi", .data = &scmi_mailbox_desc },
|
||||
{ /* Sentinel */ },
|
||||
};
|
||||
|
||||
|
184
drivers/firmware/arm_scmi/mailbox.c
Normal file
184
drivers/firmware/arm_scmi/mailbox.c
Normal file
@ -0,0 +1,184 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* System Control and Management Interface (SCMI) Message Mailbox Transport
|
||||
* driver.
|
||||
*
|
||||
* Copyright (C) 2019 ARM Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/mailbox_client.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
/**
|
||||
* struct scmi_mailbox - Structure representing a SCMI mailbox transport
|
||||
*
|
||||
* @cl: Mailbox Client
|
||||
* @chan: Transmit/Receive mailbox channel
|
||||
* @cinfo: SCMI channel info
|
||||
* @shmem: Transmit/Receive shared memory area
|
||||
*/
|
||||
struct scmi_mailbox {
|
||||
struct mbox_client cl;
|
||||
struct mbox_chan *chan;
|
||||
struct scmi_chan_info *cinfo;
|
||||
struct scmi_shared_mem __iomem *shmem;
|
||||
};
|
||||
|
||||
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
|
||||
|
||||
static void tx_prepare(struct mbox_client *cl, void *m)
|
||||
{
|
||||
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
|
||||
|
||||
shmem_tx_prepare(smbox->shmem, m);
|
||||
}
|
||||
|
||||
static void rx_callback(struct mbox_client *cl, void *m)
|
||||
{
|
||||
struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
|
||||
|
||||
scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem));
|
||||
}
|
||||
|
||||
static bool mailbox_chan_available(struct device *dev, int idx)
|
||||
{
|
||||
return !of_parse_phandle_with_args(dev->of_node, "mboxes",
|
||||
"#mbox-cells", idx, NULL);
|
||||
}
|
||||
|
||||
static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
|
||||
bool tx)
|
||||
{
|
||||
const char *desc = tx ? "Tx" : "Rx";
|
||||
struct device *cdev = cinfo->dev;
|
||||
struct scmi_mailbox *smbox;
|
||||
struct device_node *shmem;
|
||||
int ret, idx = tx ? 0 : 1;
|
||||
struct mbox_client *cl;
|
||||
resource_size_t size;
|
||||
struct resource res;
|
||||
|
||||
smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
|
||||
if (!smbox)
|
||||
return -ENOMEM;
|
||||
|
||||
shmem = of_parse_phandle(cdev->of_node, "shmem", idx);
|
||||
ret = of_address_to_resource(shmem, 0, &res);
|
||||
of_node_put(shmem);
|
||||
if (ret) {
|
||||
dev_err(cdev, "failed to get SCMI %s shared memory\n", desc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
size = resource_size(&res);
|
||||
smbox->shmem = devm_ioremap(dev, res.start, size);
|
||||
if (!smbox->shmem) {
|
||||
dev_err(dev, "failed to ioremap SCMI %s shared memory\n", desc);
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
|
||||
cl = &smbox->cl;
|
||||
cl->dev = cdev;
|
||||
cl->tx_prepare = tx ? tx_prepare : NULL;
|
||||
cl->rx_callback = rx_callback;
|
||||
cl->tx_block = false;
|
||||
cl->knows_txdone = tx;
|
||||
|
||||
smbox->chan = mbox_request_channel(cl, tx ? 0 : 1);
|
||||
if (IS_ERR(smbox->chan)) {
|
||||
ret = PTR_ERR(smbox->chan);
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(cdev, "failed to request SCMI %s mailbox\n",
|
||||
tx ? "Tx" : "Rx");
|
||||
return ret;
|
||||
}
|
||||
|
||||
cinfo->transport_info = smbox;
|
||||
smbox->cinfo = cinfo;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mailbox_chan_free(int id, void *p, void *data)
|
||||
{
|
||||
struct scmi_chan_info *cinfo = p;
|
||||
struct scmi_mailbox *smbox = cinfo->transport_info;
|
||||
|
||||
if (!IS_ERR(smbox->chan)) {
|
||||
mbox_free_channel(smbox->chan);
|
||||
cinfo->transport_info = NULL;
|
||||
smbox->chan = NULL;
|
||||
smbox->cinfo = NULL;
|
||||
}
|
||||
|
||||
scmi_free_channel(cinfo, data, id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mailbox_send_message(struct scmi_chan_info *cinfo,
|
||||
struct scmi_xfer *xfer)
|
||||
{
|
||||
struct scmi_mailbox *smbox = cinfo->transport_info;
|
||||
int ret;
|
||||
|
||||
ret = mbox_send_message(smbox->chan, xfer);
|
||||
|
||||
/* mbox_send_message returns non-negative value on success, so reset */
|
||||
if (ret > 0)
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret)
|
||||
{
|
||||
struct scmi_mailbox *smbox = cinfo->transport_info;
|
||||
|
||||
/*
|
||||
* NOTE: we might prefer not to need the mailbox ticker to manage the
|
||||
* transfer queueing since the protocol layer queues things by itself.
|
||||
* Unfortunately, we have to kick the mailbox framework after we have
|
||||
* received our message.
|
||||
*/
|
||||
mbox_client_txdone(smbox->chan, ret);
|
||||
}
|
||||
|
||||
static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
|
||||
struct scmi_xfer *xfer)
|
||||
{
|
||||
struct scmi_mailbox *smbox = cinfo->transport_info;
|
||||
|
||||
shmem_fetch_response(smbox->shmem, xfer);
|
||||
}
|
||||
|
||||
static bool
|
||||
mailbox_poll_done(struct scmi_chan_info *cinfo, struct scmi_xfer *xfer)
|
||||
{
|
||||
struct scmi_mailbox *smbox = cinfo->transport_info;
|
||||
|
||||
return shmem_poll_done(smbox->shmem, xfer);
|
||||
}
|
||||
|
||||
static struct scmi_transport_ops scmi_mailbox_ops = {
|
||||
.chan_available = mailbox_chan_available,
|
||||
.chan_setup = mailbox_chan_setup,
|
||||
.chan_free = mailbox_chan_free,
|
||||
.send_message = mailbox_send_message,
|
||||
.mark_txdone = mailbox_mark_txdone,
|
||||
.fetch_response = mailbox_fetch_response,
|
||||
.poll_done = mailbox_poll_done,
|
||||
};
|
||||
|
||||
const struct scmi_desc scmi_mailbox_desc = {
|
||||
.ops = &scmi_mailbox_ops,
|
||||
.max_rx_timeout_ms = 30, /* We may increase this if required */
|
||||
.max_msg = 20, /* Limited by MBOX_TX_QUEUE_LEN */
|
||||
.max_msg_size = 128,
|
||||
};
|
@ -89,7 +89,7 @@ struct scmi_msg_resp_perf_describe_levels {
|
||||
__le32 power;
|
||||
__le16 transition_latency_us;
|
||||
__le16 reserved;
|
||||
} opp[0];
|
||||
} opp[];
|
||||
};
|
||||
|
||||
struct scmi_perf_get_fc_info {
|
||||
|
83
drivers/firmware/arm_scmi/shmem.c
Normal file
83
drivers/firmware/arm_scmi/shmem.c
Normal file
@ -0,0 +1,83 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* For transport using shared mem structure.
|
||||
*
|
||||
* Copyright (C) 2019 ARM Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/processor.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
/*
|
||||
* SCMI specification requires all parameters, message headers, return
|
||||
* arguments or any protocol data to be expressed in little endian
|
||||
* format only.
|
||||
*/
|
||||
struct scmi_shared_mem {
|
||||
__le32 reserved;
|
||||
__le32 channel_status;
|
||||
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR BIT(1)
|
||||
#define SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE BIT(0)
|
||||
__le32 reserved1[2];
|
||||
__le32 flags;
|
||||
#define SCMI_SHMEM_FLAG_INTR_ENABLED BIT(0)
|
||||
__le32 length;
|
||||
__le32 msg_header;
|
||||
u8 msg_payload[];
|
||||
};
|
||||
|
||||
void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem,
|
||||
struct scmi_xfer *xfer)
|
||||
{
|
||||
/*
|
||||
* Ideally channel must be free by now unless OS timeout last
|
||||
* request and platform continued to process the same, wait
|
||||
* until it releases the shared memory, otherwise we may endup
|
||||
* overwriting its response with new message payload or vice-versa
|
||||
*/
|
||||
spin_until_cond(ioread32(&shmem->channel_status) &
|
||||
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
|
||||
/* Mark channel busy + clear error */
|
||||
iowrite32(0x0, &shmem->channel_status);
|
||||
iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
|
||||
&shmem->flags);
|
||||
iowrite32(sizeof(shmem->msg_header) + xfer->tx.len, &shmem->length);
|
||||
iowrite32(pack_scmi_header(&xfer->hdr), &shmem->msg_header);
|
||||
if (xfer->tx.buf)
|
||||
memcpy_toio(shmem->msg_payload, xfer->tx.buf, xfer->tx.len);
|
||||
}
|
||||
|
||||
u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
|
||||
{
|
||||
return ioread32(&shmem->msg_header);
|
||||
}
|
||||
|
||||
void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
|
||||
struct scmi_xfer *xfer)
|
||||
{
|
||||
xfer->hdr.status = ioread32(shmem->msg_payload);
|
||||
/* Skip the length of header and status in shmem area i.e 8 bytes */
|
||||
xfer->rx.len = min_t(size_t, xfer->rx.len,
|
||||
ioread32(&shmem->length) - 8);
|
||||
|
||||
/* Take a copy to the rx buffer.. */
|
||||
memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
|
||||
}
|
||||
|
||||
bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
|
||||
struct scmi_xfer *xfer)
|
||||
{
|
||||
u16 xfer_id;
|
||||
|
||||
xfer_id = MSG_XTRACT_TOKEN(ioread32(&shmem->msg_header));
|
||||
|
||||
if (xfer->hdr.seq != xfer_id)
|
||||
return false;
|
||||
|
||||
return ioread32(&shmem->channel_status) &
|
||||
(SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
|
||||
SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
|
||||
}
|
@ -262,12 +262,12 @@ struct scpi_drvinfo {
|
||||
struct scpi_shared_mem {
|
||||
__le32 command;
|
||||
__le32 status;
|
||||
u8 payload[0];
|
||||
u8 payload[];
|
||||
} __packed;
|
||||
|
||||
struct legacy_scpi_shared_mem {
|
||||
__le32 status;
|
||||
u8 payload[0];
|
||||
u8 payload[];
|
||||
} __packed;
|
||||
|
||||
struct scp_capabilities {
|
||||
|
@ -93,7 +93,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
|
||||
{ "kpp", IMX_SC_R_KPP, 1, false, 0 },
|
||||
{ "fspi", IMX_SC_R_FSPI_0, 2, true, 0 },
|
||||
{ "mu_a", IMX_SC_R_MU_0A, 14, true, 0 },
|
||||
{ "mu_b", IMX_SC_R_MU_13B, 1, true, 13 },
|
||||
{ "mu_b", IMX_SC_R_MU_5B, 9, true, 5 },
|
||||
|
||||
/* CONN SS */
|
||||
{ "usb", IMX_SC_R_USB_0, 2, true, 0 },
|
||||
@ -109,6 +109,7 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
|
||||
{ "audio-pll0", IMX_SC_R_AUDIO_PLL_0, 1, false, 0 },
|
||||
{ "audio-pll1", IMX_SC_R_AUDIO_PLL_1, 1, false, 0 },
|
||||
{ "audio-clk-0", IMX_SC_R_AUDIO_CLK_0, 1, false, 0 },
|
||||
{ "audio-clk-1", IMX_SC_R_AUDIO_CLK_1, 1, false, 0 },
|
||||
{ "dma0-ch", IMX_SC_R_DMA_0_CH0, 16, true, 0 },
|
||||
{ "dma1-ch", IMX_SC_R_DMA_1_CH0, 16, true, 0 },
|
||||
{ "dma2-ch", IMX_SC_R_DMA_2_CH0, 5, true, 0 },
|
||||
@ -116,7 +117,13 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
|
||||
{ "asrc1", IMX_SC_R_ASRC_1, 1, false, 0 },
|
||||
{ "esai0", IMX_SC_R_ESAI_0, 1, false, 0 },
|
||||
{ "spdif0", IMX_SC_R_SPDIF_0, 1, false, 0 },
|
||||
{ "spdif1", IMX_SC_R_SPDIF_1, 1, false, 0 },
|
||||
{ "sai", IMX_SC_R_SAI_0, 3, true, 0 },
|
||||
{ "sai3", IMX_SC_R_SAI_3, 1, false, 0 },
|
||||
{ "sai4", IMX_SC_R_SAI_4, 1, false, 0 },
|
||||
{ "sai5", IMX_SC_R_SAI_5, 1, false, 0 },
|
||||
{ "sai6", IMX_SC_R_SAI_6, 1, false, 0 },
|
||||
{ "sai7", IMX_SC_R_SAI_7, 1, false, 0 },
|
||||
{ "amix", IMX_SC_R_AMIX, 1, false, 0 },
|
||||
{ "mqs0", IMX_SC_R_MQS_0, 1, false, 0 },
|
||||
{ "dsp", IMX_SC_R_DSP, 1, false, 0 },
|
||||
@ -158,6 +165,10 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
|
||||
/* DC SS */
|
||||
{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
|
||||
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
|
||||
|
||||
/* CM40 SS */
|
||||
{ "cm40_i2c", IMX_SC_R_M4_0_I2C, 1, 0 },
|
||||
{ "cm40_intmux", IMX_SC_R_M4_0_INTMUX, 1, 0 },
|
||||
};
|
||||
|
||||
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
|
||||
|
@ -44,6 +44,8 @@ static const struct meson_sm_chip gxbb_chip = {
|
||||
CMD(SM_EFUSE_WRITE, 0x82000031),
|
||||
CMD(SM_EFUSE_USER_MAX, 0x82000033),
|
||||
CMD(SM_GET_CHIP_ID, 0x82000044),
|
||||
CMD(SM_A1_PWRC_SET, 0x82000093),
|
||||
CMD(SM_A1_PWRC_GET, 0x82000095),
|
||||
{ /* sentinel */ },
|
||||
},
|
||||
};
|
||||
|
@ -7,7 +7,7 @@ config TEGRA_IVC
|
||||
help
|
||||
IVC (Inter-VM Communication) protocol is part of the IPC
|
||||
(Inter Processor Communication) framework on Tegra. It maintains the
|
||||
data and the different commuication channels in SysRAM or RAM and
|
||||
data and the different communication channels in SysRAM or RAM and
|
||||
keeps the content is synchronization between host CPU and remote
|
||||
processors.
|
||||
|
||||
|
@ -1348,9 +1348,15 @@ static int dss_component_compare(struct device *dev, void *data)
|
||||
return dev == child;
|
||||
}
|
||||
|
||||
struct dss_component_match_data {
|
||||
struct device *dev;
|
||||
struct component_match **match;
|
||||
};
|
||||
|
||||
static int dss_add_child_component(struct device *dev, void *data)
|
||||
{
|
||||
struct component_match **match = data;
|
||||
struct dss_component_match_data *cmatch = data;
|
||||
struct component_match **match = cmatch->match;
|
||||
|
||||
/*
|
||||
* HACK
|
||||
@ -1361,7 +1367,17 @@ static int dss_add_child_component(struct device *dev, void *data)
|
||||
if (strstr(dev_name(dev), "rfbi"))
|
||||
return 0;
|
||||
|
||||
component_match_add(dev->parent, match, dss_component_compare, dev);
|
||||
/*
|
||||
* Handle possible interconnect target modules defined within the DSS.
|
||||
* The DSS components can be children of an interconnect target module
|
||||
* after the device tree has been updated for the module data.
|
||||
* See also omapdss_boot_init() for compatible fixup.
|
||||
*/
|
||||
if (strstr(dev_name(dev), "target-module"))
|
||||
return device_for_each_child(dev, cmatch,
|
||||
dss_add_child_component);
|
||||
|
||||
component_match_add(cmatch->dev, match, dss_component_compare, dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1404,6 +1420,7 @@ static int dss_probe_hardware(struct dss_device *dss)
|
||||
static int dss_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct soc_device_attribute *soc;
|
||||
struct dss_component_match_data cmatch;
|
||||
struct component_match *match = NULL;
|
||||
struct resource *dss_mem;
|
||||
struct dss_device *dss;
|
||||
@ -1481,7 +1498,9 @@ static int dss_probe(struct platform_device *pdev)
|
||||
|
||||
omapdss_gather_components(&pdev->dev);
|
||||
|
||||
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
|
||||
cmatch.dev = &pdev->dev;
|
||||
cmatch.match = &match;
|
||||
device_for_each_child(&pdev->dev, &cmatch, dss_add_child_component);
|
||||
|
||||
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
|
||||
if (r)
|
||||
|
@ -178,9 +178,24 @@ static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
|
||||
{},
|
||||
};
|
||||
|
||||
static void __init omapdss_find_children(struct device_node *np)
|
||||
{
|
||||
struct device_node *child;
|
||||
|
||||
for_each_available_child_of_node(np, child) {
|
||||
if (!of_find_property(child, "compatible", NULL))
|
||||
continue;
|
||||
|
||||
omapdss_walk_device(child, true);
|
||||
|
||||
if (of_device_is_compatible(child, "ti,sysc"))
|
||||
omapdss_find_children(child);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init omapdss_boot_init(void)
|
||||
{
|
||||
struct device_node *dss, *child;
|
||||
struct device_node *dss;
|
||||
|
||||
INIT_LIST_HEAD(&dss_conv_list);
|
||||
|
||||
@ -190,13 +205,7 @@ static int __init omapdss_boot_init(void)
|
||||
goto put_node;
|
||||
|
||||
omapdss_walk_device(dss, true);
|
||||
|
||||
for_each_available_child_of_node(dss, child) {
|
||||
if (!of_find_property(child, "compatible", NULL))
|
||||
continue;
|
||||
|
||||
omapdss_walk_device(child, true);
|
||||
}
|
||||
omapdss_find_children(dss);
|
||||
|
||||
while (!list_empty(&dss_conv_list)) {
|
||||
struct dss_conv_node *n;
|
||||
|
@ -1158,6 +1158,11 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
|
||||
emc->debugfs.max_rate = emc->timings[i].rate;
|
||||
}
|
||||
|
||||
if (!emc->num_timings) {
|
||||
emc->debugfs.min_rate = clk_get_rate(emc->clk);
|
||||
emc->debugfs.max_rate = emc->debugfs.min_rate;
|
||||
}
|
||||
|
||||
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
|
||||
emc->debugfs.max_rate);
|
||||
if (err < 0) {
|
||||
|
@ -628,6 +628,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
|
||||
emc->debugfs.max_rate = emc->timings[i].rate;
|
||||
}
|
||||
|
||||
if (!emc->num_timings) {
|
||||
emc->debugfs.min_rate = clk_get_rate(emc->clk);
|
||||
emc->debugfs.max_rate = emc->debugfs.min_rate;
|
||||
}
|
||||
|
||||
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
|
||||
emc->debugfs.max_rate);
|
||||
if (err < 0) {
|
||||
|
@ -1256,6 +1256,11 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
|
||||
emc->debugfs.max_rate = emc->timings[i].rate;
|
||||
}
|
||||
|
||||
if (!emc->num_timings) {
|
||||
emc->debugfs.min_rate = clk_get_rate(emc->clk);
|
||||
emc->debugfs.max_rate = emc->debugfs.min_rate;
|
||||
}
|
||||
|
||||
err = clk_set_rate_range(emc->clk, emc->debugfs.min_rate,
|
||||
emc->debugfs.max_rate);
|
||||
if (err < 0) {
|
||||
|
@ -11,7 +11,7 @@ obj-$(CONFIG_ARCH_DOVE) += dove/
|
||||
obj-$(CONFIG_MACH_DOVE) += dove/
|
||||
obj-y += fsl/
|
||||
obj-$(CONFIG_ARCH_GEMINI) += gemini/
|
||||
obj-$(CONFIG_ARCH_MXC) += imx/
|
||||
obj-y += imx/
|
||||
obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
|
||||
obj-$(CONFIG_SOC_XWAY) += lantiq/
|
||||
obj-y += mediatek/
|
||||
|
@ -48,6 +48,19 @@ config MESON_EE_PM_DOMAINS
|
||||
Say yes to expose Amlogic Meson Everything-Else Power Domains as
|
||||
Generic Power Domains.
|
||||
|
||||
config MESON_SECURE_PM_DOMAINS
|
||||
bool "Amlogic Meson Secure Power Domains driver"
|
||||
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
|
||||
depends on PM && OF
|
||||
depends on HAVE_ARM_SMCCC
|
||||
default ARCH_MESON
|
||||
select PM_GENERIC_DOMAINS
|
||||
select PM_GENERIC_DOMAINS_OF
|
||||
help
|
||||
Support for the power controller on Amlogic A1/C1 series.
|
||||
Say yes to expose Amlogic Meson Secure Power Domains as Generic
|
||||
Power Domains.
|
||||
|
||||
config MESON_MX_SOCINFO
|
||||
bool "Amlogic Meson MX SoC Information driver"
|
||||
depends on ARCH_MESON || COMPILE_TEST
|
||||
|
@ -5,3 +5,4 @@ obj-$(CONFIG_MESON_GX_SOCINFO) += meson-gx-socinfo.o
|
||||
obj-$(CONFIG_MESON_GX_PM_DOMAINS) += meson-gx-pwrc-vpu.o
|
||||
obj-$(CONFIG_MESON_MX_SOCINFO) += meson-mx-socinfo.o
|
||||
obj-$(CONFIG_MESON_EE_PM_DOMAINS) += meson-ee-pwrc.o
|
||||
obj-$(CONFIG_MESON_SECURE_PM_DOMAINS) += meson-secure-pwrc.o
|
||||
|
204
drivers/soc/amlogic/meson-secure-pwrc.c
Normal file
204
drivers/soc/amlogic/meson-secure-pwrc.c
Normal file
@ -0,0 +1,204 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
|
||||
/*
|
||||
* Copyright (c) 2019 Amlogic, Inc.
|
||||
* Author: Jianxin Pan <jianxin.pan@amlogic.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <dt-bindings/power/meson-a1-power.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/firmware/meson/meson_sm.h>
|
||||
|
||||
#define PWRC_ON 1
|
||||
#define PWRC_OFF 0
|
||||
|
||||
struct meson_secure_pwrc_domain {
|
||||
struct generic_pm_domain base;
|
||||
unsigned int index;
|
||||
struct meson_secure_pwrc *pwrc;
|
||||
};
|
||||
|
||||
struct meson_secure_pwrc {
|
||||
struct meson_secure_pwrc_domain *domains;
|
||||
struct genpd_onecell_data xlate;
|
||||
struct meson_sm_firmware *fw;
|
||||
};
|
||||
|
||||
struct meson_secure_pwrc_domain_desc {
|
||||
unsigned int index;
|
||||
unsigned int flags;
|
||||
char *name;
|
||||
bool (*is_off)(struct meson_secure_pwrc_domain *pwrc_domain);
|
||||
};
|
||||
|
||||
struct meson_secure_pwrc_domain_data {
|
||||
unsigned int count;
|
||||
struct meson_secure_pwrc_domain_desc *domains;
|
||||
};
|
||||
|
||||
static bool pwrc_secure_is_off(struct meson_secure_pwrc_domain *pwrc_domain)
|
||||
{
|
||||
int is_off = 1;
|
||||
|
||||
if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_GET, &is_off,
|
||||
pwrc_domain->index, 0, 0, 0, 0) < 0)
|
||||
pr_err("failed to get power domain status\n");
|
||||
|
||||
return is_off;
|
||||
}
|
||||
|
||||
static int meson_secure_pwrc_off(struct generic_pm_domain *domain)
|
||||
{
|
||||
int ret = 0;
|
||||
struct meson_secure_pwrc_domain *pwrc_domain =
|
||||
container_of(domain, struct meson_secure_pwrc_domain, base);
|
||||
|
||||
if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
|
||||
pwrc_domain->index, PWRC_OFF, 0, 0, 0) < 0) {
|
||||
pr_err("failed to set power domain off\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int meson_secure_pwrc_on(struct generic_pm_domain *domain)
|
||||
{
|
||||
int ret = 0;
|
||||
struct meson_secure_pwrc_domain *pwrc_domain =
|
||||
container_of(domain, struct meson_secure_pwrc_domain, base);
|
||||
|
||||
if (meson_sm_call(pwrc_domain->pwrc->fw, SM_A1_PWRC_SET, NULL,
|
||||
pwrc_domain->index, PWRC_ON, 0, 0, 0) < 0) {
|
||||
pr_err("failed to set power domain on\n");
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define SEC_PD(__name, __flag) \
|
||||
[PWRC_##__name##_ID] = \
|
||||
{ \
|
||||
.name = #__name, \
|
||||
.index = PWRC_##__name##_ID, \
|
||||
.is_off = pwrc_secure_is_off, \
|
||||
.flags = __flag, \
|
||||
}
|
||||
|
||||
static struct meson_secure_pwrc_domain_desc a1_pwrc_domains[] = {
|
||||
SEC_PD(DSPA, 0),
|
||||
SEC_PD(DSPB, 0),
|
||||
/* UART should keep working in ATF after suspend and before resume */
|
||||
SEC_PD(UART, GENPD_FLAG_ALWAYS_ON),
|
||||
/* DMC is for DDR PHY ana/dig and DMC, and should be always on */
|
||||
SEC_PD(DMC, GENPD_FLAG_ALWAYS_ON),
|
||||
SEC_PD(I2C, 0),
|
||||
SEC_PD(PSRAM, 0),
|
||||
SEC_PD(ACODEC, 0),
|
||||
SEC_PD(AUDIO, 0),
|
||||
SEC_PD(OTP, 0),
|
||||
SEC_PD(DMA, 0),
|
||||
SEC_PD(SD_EMMC, 0),
|
||||
SEC_PD(RAMA, 0),
|
||||
/* SRAMB is used as ATF runtime memory, and should be always on */
|
||||
SEC_PD(RAMB, GENPD_FLAG_ALWAYS_ON),
|
||||
SEC_PD(IR, 0),
|
||||
SEC_PD(SPICC, 0),
|
||||
SEC_PD(SPIFC, 0),
|
||||
SEC_PD(USB, 0),
|
||||
/* NIC is for the Arm NIC-400 interconnect, and should be always on */
|
||||
SEC_PD(NIC, GENPD_FLAG_ALWAYS_ON),
|
||||
SEC_PD(PDMIN, 0),
|
||||
SEC_PD(RSA, 0),
|
||||
};
|
||||
|
||||
static int meson_secure_pwrc_probe(struct platform_device *pdev)
|
||||
{
|
||||
int i;
|
||||
struct device_node *sm_np;
|
||||
struct meson_secure_pwrc *pwrc;
|
||||
const struct meson_secure_pwrc_domain_data *match;
|
||||
|
||||
match = of_device_get_match_data(&pdev->dev);
|
||||
if (!match) {
|
||||
dev_err(&pdev->dev, "failed to get match data\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
sm_np = of_find_compatible_node(NULL, NULL, "amlogic,meson-gxbb-sm");
|
||||
if (!sm_np) {
|
||||
dev_err(&pdev->dev, "no secure-monitor node\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL);
|
||||
if (!pwrc)
|
||||
return -ENOMEM;
|
||||
|
||||
pwrc->fw = meson_sm_get(sm_np);
|
||||
of_node_put(sm_np);
|
||||
if (!pwrc->fw)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
pwrc->xlate.domains = devm_kcalloc(&pdev->dev, match->count,
|
||||
sizeof(*pwrc->xlate.domains),
|
||||
GFP_KERNEL);
|
||||
if (!pwrc->xlate.domains)
|
||||
return -ENOMEM;
|
||||
|
||||
pwrc->domains = devm_kcalloc(&pdev->dev, match->count,
|
||||
sizeof(*pwrc->domains), GFP_KERNEL);
|
||||
if (!pwrc->domains)
|
||||
return -ENOMEM;
|
||||
|
||||
pwrc->xlate.num_domains = match->count;
|
||||
platform_set_drvdata(pdev, pwrc);
|
||||
|
||||
for (i = 0 ; i < match->count ; ++i) {
|
||||
struct meson_secure_pwrc_domain *dom = &pwrc->domains[i];
|
||||
|
||||
if (!match->domains[i].index)
|
||||
continue;
|
||||
|
||||
dom->pwrc = pwrc;
|
||||
dom->index = match->domains[i].index;
|
||||
dom->base.name = match->domains[i].name;
|
||||
dom->base.flags = match->domains[i].flags;
|
||||
dom->base.power_on = meson_secure_pwrc_on;
|
||||
dom->base.power_off = meson_secure_pwrc_off;
|
||||
|
||||
pm_genpd_init(&dom->base, NULL, match->domains[i].is_off(dom));
|
||||
|
||||
pwrc->xlate.domains[i] = &dom->base;
|
||||
}
|
||||
|
||||
return of_genpd_add_provider_onecell(pdev->dev.of_node, &pwrc->xlate);
|
||||
}
|
||||
|
||||
static struct meson_secure_pwrc_domain_data meson_secure_a1_pwrc_data = {
|
||||
.domains = a1_pwrc_domains,
|
||||
.count = ARRAY_SIZE(a1_pwrc_domains),
|
||||
};
|
||||
|
||||
static const struct of_device_id meson_secure_pwrc_match_table[] = {
|
||||
{
|
||||
.compatible = "amlogic,meson-a1-pwrc",
|
||||
.data = &meson_secure_a1_pwrc_data,
|
||||
},
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static struct platform_driver meson_secure_pwrc_driver = {
|
||||
.probe = meson_secure_pwrc_probe,
|
||||
.driver = {
|
||||
.name = "meson_secure_pwrc",
|
||||
.of_match_table = meson_secure_pwrc_match_table,
|
||||
},
|
||||
};
|
||||
builtin_platform_driver(meson_secure_pwrc_driver);
|
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||
/*
|
||||
* Copyright 2014-2016 Freescale Semiconductor Inc.
|
||||
* Copyright 2016 NXP
|
||||
* Copyright 2016-2019 NXP
|
||||
*
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
@ -432,6 +432,69 @@ int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d,
|
||||
}
|
||||
EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq);
|
||||
|
||||
/**
|
||||
* dpaa2_io_service_enqueue_multiple_fq() - Enqueue multiple frames
|
||||
* to a frame queue using one fqid.
|
||||
* @d: the given DPIO service.
|
||||
* @fqid: the given frame queue id.
|
||||
* @fd: the frame descriptor which is enqueued.
|
||||
* @nb: number of frames to be enqueud
|
||||
*
|
||||
* Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
|
||||
* or -ENODEV if there is no dpio service.
|
||||
*/
|
||||
int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
|
||||
u32 fqid,
|
||||
const struct dpaa2_fd *fd,
|
||||
int nb)
|
||||
{
|
||||
struct qbman_eq_desc ed;
|
||||
|
||||
d = service_select(d);
|
||||
if (!d)
|
||||
return -ENODEV;
|
||||
|
||||
qbman_eq_desc_clear(&ed);
|
||||
qbman_eq_desc_set_no_orp(&ed, 0);
|
||||
qbman_eq_desc_set_fq(&ed, fqid);
|
||||
|
||||
return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
|
||||
|
||||
/**
|
||||
* dpaa2_io_service_enqueue_multiple_desc_fq() - Enqueue multiple frames
|
||||
* to different frame queue using a list of fqids.
|
||||
* @d: the given DPIO service.
|
||||
* @fqid: the given list of frame queue ids.
|
||||
* @fd: the frame descriptor which is enqueued.
|
||||
* @nb: number of frames to be enqueud
|
||||
*
|
||||
* Return 0 for successful enqueue, -EBUSY if the enqueue ring is not ready,
|
||||
* or -ENODEV if there is no dpio service.
|
||||
*/
|
||||
int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
|
||||
u32 *fqid,
|
||||
const struct dpaa2_fd *fd,
|
||||
int nb)
|
||||
{
|
||||
int i;
|
||||
struct qbman_eq_desc ed[32];
|
||||
|
||||
d = service_select(d);
|
||||
if (!d)
|
||||
return -ENODEV;
|
||||
|
||||
for (i = 0; i < nb; i++) {
|
||||
qbman_eq_desc_clear(&ed[i]);
|
||||
qbman_eq_desc_set_no_orp(&ed[i], 0);
|
||||
qbman_eq_desc_set_fq(&ed[i], fqid[i]);
|
||||
}
|
||||
|
||||
return qbman_swp_enqueue_multiple_desc(d->swp, &ed[0], fd, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_desc_fq);
|
||||
|
||||
/**
|
||||
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
|
||||
* @d: the given DPIO service.
|
||||
@ -526,7 +589,7 @@ EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
|
||||
|
||||
/**
|
||||
* dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
|
||||
* @max_frames: the maximum number of dequeued result for frames, must be <= 16.
|
||||
* @max_frames: the maximum number of dequeued result for frames, must be <= 32.
|
||||
* @dev: the device to allow mapping/unmapping the DMAable region.
|
||||
*
|
||||
* The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
|
||||
@ -541,7 +604,7 @@ struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
|
||||
struct dpaa2_io_store *ret;
|
||||
size_t size;
|
||||
|
||||
if (!max_frames || (max_frames > 16))
|
||||
if (!max_frames || (max_frames > 32))
|
||||
return NULL;
|
||||
|
||||
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
|
||||
|
@ -1,24 +1,18 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
|
||||
/*
|
||||
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
|
||||
* Copyright 2016 NXP
|
||||
* Copyright 2016-2019 NXP
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <soc/fsl/dpaa2-global.h>
|
||||
|
||||
#include "qbman-portal.h"
|
||||
|
||||
#define QMAN_REV_4000 0x04000000
|
||||
#define QMAN_REV_4100 0x04010000
|
||||
#define QMAN_REV_4101 0x04010001
|
||||
#define QMAN_REV_5000 0x05000000
|
||||
|
||||
#define QMAN_REV_MASK 0xffff0000
|
||||
|
||||
/* All QBMan command and result structures use this "valid bit" encoding */
|
||||
#define QB_VALID_BIT ((u32)0x80)
|
||||
|
||||
@ -28,6 +22,7 @@
|
||||
|
||||
/* CINH register offsets */
|
||||
#define QBMAN_CINH_SWP_EQCR_PI 0x800
|
||||
#define QBMAN_CINH_SWP_EQCR_CI 0x840
|
||||
#define QBMAN_CINH_SWP_EQAR 0x8c0
|
||||
#define QBMAN_CINH_SWP_CR_RT 0x900
|
||||
#define QBMAN_CINH_SWP_VDQCR_RT 0x940
|
||||
@ -51,6 +46,8 @@
|
||||
#define QBMAN_CENA_SWP_CR 0x600
|
||||
#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
|
||||
#define QBMAN_CENA_SWP_VDQCR 0x780
|
||||
#define QBMAN_CENA_SWP_EQCR_CI 0x840
|
||||
#define QBMAN_CENA_SWP_EQCR_CI_MEMBACK 0x1840
|
||||
|
||||
/* CENA register offsets in memory-backed mode */
|
||||
#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
|
||||
@ -78,6 +75,12 @@
|
||||
/* opaque token for static dequeues */
|
||||
#define QMAN_SDQCR_TOKEN 0xbb
|
||||
|
||||
#define QBMAN_EQCR_DCA_IDXMASK 0x0f
|
||||
#define QBMAN_ENQUEUE_FLAG_DCA (1ULL << 31)
|
||||
|
||||
#define EQ_DESC_SIZE_WITHOUT_FD 29
|
||||
#define EQ_DESC_SIZE_FD_START 32
|
||||
|
||||
enum qbman_sdqcr_dct {
|
||||
qbman_sdqcr_dct_null = 0,
|
||||
qbman_sdqcr_dct_prio_ics,
|
||||
@ -90,6 +93,82 @@ enum qbman_sdqcr_fc {
|
||||
qbman_sdqcr_fc_up_to_3 = 1
|
||||
};
|
||||
|
||||
/* Internal Function declaration */
|
||||
static int qbman_swp_enqueue_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd);
|
||||
static int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd);
|
||||
static int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
uint32_t *flags,
|
||||
int num_frames);
|
||||
static int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
uint32_t *flags,
|
||||
int num_frames);
|
||||
static int
|
||||
qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
int num_frames);
|
||||
static
|
||||
int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
int num_frames);
|
||||
static int qbman_swp_pull_direct(struct qbman_swp *s,
|
||||
struct qbman_pull_desc *d);
|
||||
static int qbman_swp_pull_mem_back(struct qbman_swp *s,
|
||||
struct qbman_pull_desc *d);
|
||||
|
||||
const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s);
|
||||
const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s);
|
||||
|
||||
static int qbman_swp_release_direct(struct qbman_swp *s,
|
||||
const struct qbman_release_desc *d,
|
||||
const u64 *buffers,
|
||||
unsigned int num_buffers);
|
||||
static int qbman_swp_release_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_release_desc *d,
|
||||
const u64 *buffers,
|
||||
unsigned int num_buffers);
|
||||
|
||||
/* Function pointers */
|
||||
int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd)
|
||||
= qbman_swp_enqueue_direct;
|
||||
|
||||
int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
uint32_t *flags,
|
||||
int num_frames)
|
||||
= qbman_swp_enqueue_multiple_direct;
|
||||
|
||||
int
|
||||
(*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
int num_frames)
|
||||
= qbman_swp_enqueue_multiple_desc_direct;
|
||||
|
||||
int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d)
|
||||
= qbman_swp_pull_direct;
|
||||
|
||||
const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s)
|
||||
= qbman_swp_dqrr_next_direct;
|
||||
|
||||
int (*qbman_swp_release_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_release_desc *d,
|
||||
const u64 *buffers,
|
||||
unsigned int num_buffers)
|
||||
= qbman_swp_release_direct;
|
||||
|
||||
/* Portal Access */
|
||||
|
||||
static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
|
||||
@ -146,6 +225,15 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
|
||||
|
||||
#define QMAN_RT_MODE 0x00000100
|
||||
|
||||
static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
|
||||
{
|
||||
/* 'first' is included, 'last' is excluded */
|
||||
if (first <= last)
|
||||
return last - first;
|
||||
else
|
||||
return (2 * ringsize) - (first - last);
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_init() - Create a functional object representing the given
|
||||
* QBMan portal descriptor.
|
||||
@ -156,11 +244,16 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
|
||||
*/
|
||||
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
|
||||
{
|
||||
struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL);
|
||||
struct qbman_swp *p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
u32 reg;
|
||||
u32 mask_size;
|
||||
u32 eqcr_pi;
|
||||
|
||||
if (!p)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&p->access_spinlock);
|
||||
|
||||
p->desc = d;
|
||||
p->mc.valid_bit = QB_VALID_BIT;
|
||||
p->sdq = 0;
|
||||
@ -186,25 +279,38 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
|
||||
p->addr_cena = d->cena_bar;
|
||||
p->addr_cinh = d->cinh_bar;
|
||||
|
||||
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
|
||||
memset(p->addr_cena, 0, 64 * 1024);
|
||||
if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
|
||||
|
||||
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
|
||||
1, /* Writes Non-cacheable */
|
||||
0, /* EQCR_CI stashing threshold */
|
||||
3, /* RPM: Valid bit mode, RCR in array mode */
|
||||
2, /* DCM: Discrete consumption ack mode */
|
||||
3, /* EPM: Valid bit mode, EQCR in array mode */
|
||||
1, /* mem stashing drop enable == TRUE */
|
||||
1, /* mem stashing priority == TRUE */
|
||||
1, /* mem stashing enable == TRUE */
|
||||
1, /* dequeue stashing priority == TRUE */
|
||||
0, /* dequeue stashing enable == FALSE */
|
||||
0); /* EQCR_CI stashing priority == FALSE */
|
||||
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
|
||||
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
|
||||
1, /* Writes Non-cacheable */
|
||||
0, /* EQCR_CI stashing threshold */
|
||||
3, /* RPM: RCR in array mode */
|
||||
2, /* DCM: Discrete consumption ack */
|
||||
2, /* EPM: EQCR in ring mode */
|
||||
1, /* mem stashing drop enable enable */
|
||||
1, /* mem stashing priority enable */
|
||||
1, /* mem stashing enable */
|
||||
1, /* dequeue stashing priority enable */
|
||||
0, /* dequeue stashing enable enable */
|
||||
0); /* EQCR_CI stashing priority enable */
|
||||
} else {
|
||||
memset(p->addr_cena, 0, 64 * 1024);
|
||||
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
|
||||
1, /* Writes Non-cacheable */
|
||||
1, /* EQCR_CI stashing threshold */
|
||||
3, /* RPM: RCR in array mode */
|
||||
2, /* DCM: Discrete consumption ack */
|
||||
0, /* EPM: EQCR in ring mode */
|
||||
1, /* mem stashing drop enable */
|
||||
1, /* mem stashing priority enable */
|
||||
1, /* mem stashing enable */
|
||||
1, /* dequeue stashing priority enable */
|
||||
0, /* dequeue stashing enable */
|
||||
0); /* EQCR_CI stashing priority enable */
|
||||
reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
|
||||
1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
|
||||
1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
|
||||
}
|
||||
|
||||
qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
|
||||
reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
|
||||
@ -225,6 +331,30 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
|
||||
* applied when dequeues from a specific channel are enabled.
|
||||
*/
|
||||
qbman_write_register(p, QBMAN_CINH_SWP_SDQCR, 0);
|
||||
|
||||
p->eqcr.pi_ring_size = 8;
|
||||
if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
|
||||
p->eqcr.pi_ring_size = 32;
|
||||
qbman_swp_enqueue_ptr =
|
||||
qbman_swp_enqueue_mem_back;
|
||||
qbman_swp_enqueue_multiple_ptr =
|
||||
qbman_swp_enqueue_multiple_mem_back;
|
||||
qbman_swp_enqueue_multiple_desc_ptr =
|
||||
qbman_swp_enqueue_multiple_desc_mem_back;
|
||||
qbman_swp_pull_ptr = qbman_swp_pull_mem_back;
|
||||
qbman_swp_dqrr_next_ptr = qbman_swp_dqrr_next_mem_back;
|
||||
qbman_swp_release_ptr = qbman_swp_release_mem_back;
|
||||
}
|
||||
|
||||
for (mask_size = p->eqcr.pi_ring_size; mask_size > 0; mask_size >>= 1)
|
||||
p->eqcr.pi_ci_mask = (p->eqcr.pi_ci_mask << 1) + 1;
|
||||
eqcr_pi = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_PI);
|
||||
p->eqcr.pi = eqcr_pi & p->eqcr.pi_ci_mask;
|
||||
p->eqcr.pi_vb = eqcr_pi & QB_VALID_BIT;
|
||||
p->eqcr.ci = qbman_read_register(p, QBMAN_CINH_SWP_EQCR_CI)
|
||||
& p->eqcr.pi_ci_mask;
|
||||
p->eqcr.available = p->eqcr.pi_ring_size;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
@ -378,6 +508,7 @@ enum qb_enqueue_commands {
|
||||
#define QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT 2
|
||||
#define QB_ENQUEUE_CMD_IRQ_ON_DISPATCH_SHIFT 3
|
||||
#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
|
||||
#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
|
||||
|
||||
/**
|
||||
* qbman_eq_desc_clear() - Clear the contents of a descriptor to
|
||||
@ -453,8 +584,9 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
|
||||
QMAN_RT_MODE);
|
||||
}
|
||||
|
||||
#define QB_RT_BIT ((u32)0x100)
|
||||
/**
|
||||
* qbman_swp_enqueue() - Issue an enqueue command
|
||||
* qbman_swp_enqueue_direct() - Issue an enqueue command
|
||||
* @s: the software portal used for enqueue
|
||||
* @d: the enqueue descriptor
|
||||
* @fd: the frame descriptor to be enqueued
|
||||
@ -464,30 +596,351 @@ static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
|
||||
*
|
||||
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
|
||||
*/
|
||||
int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd)
|
||||
static
|
||||
int qbman_swp_enqueue_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd)
|
||||
{
|
||||
struct qbman_eq_desc *p;
|
||||
u32 eqar = qbman_read_register(s, QBMAN_CINH_SWP_EQAR);
|
||||
int flags = 0;
|
||||
int ret = qbman_swp_enqueue_multiple_direct(s, d, fd, &flags, 1);
|
||||
|
||||
if (!EQAR_SUCCESS(eqar))
|
||||
return -EBUSY;
|
||||
if (ret >= 0)
|
||||
ret = 0;
|
||||
else
|
||||
ret = -EBUSY;
|
||||
return ret;
|
||||
}
|
||||
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)));
|
||||
memcpy(&p->dca, &d->dca, 31);
|
||||
memcpy(&p->fd, fd, sizeof(*fd));
|
||||
/**
|
||||
* qbman_swp_enqueue_mem_back() - Issue an enqueue command
|
||||
* @s: the software portal used for enqueue
|
||||
* @d: the enqueue descriptor
|
||||
* @fd: the frame descriptor to be enqueued
|
||||
*
|
||||
* Please note that 'fd' should only be NULL if the "action" of the
|
||||
* descriptor is "orp_hole" or "orp_nesn".
|
||||
*
|
||||
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
|
||||
*/
|
||||
static
|
||||
int qbman_swp_enqueue_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd)
|
||||
{
|
||||
int flags = 0;
|
||||
int ret = qbman_swp_enqueue_multiple_mem_back(s, d, fd, &flags, 1);
|
||||
|
||||
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
dma_wmb();
|
||||
p->verb = d->verb | EQAR_VB(eqar);
|
||||
} else {
|
||||
p->verb = d->verb | EQAR_VB(eqar);
|
||||
dma_wmb();
|
||||
qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
|
||||
if (ret >= 0)
|
||||
ret = 0;
|
||||
else
|
||||
ret = -EBUSY;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_enqueue_multiple_direct() - Issue a multi enqueue command
|
||||
* using one enqueue descriptor
|
||||
* @s: the software portal used for enqueue
|
||||
* @d: the enqueue descriptor
|
||||
* @fd: table pointer of frame descriptor table to be enqueued
|
||||
* @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
|
||||
* @num_frames: number of fd to be enqueued
|
||||
*
|
||||
* Return the number of fd enqueued, or a negative error number.
|
||||
*/
|
||||
static
|
||||
int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
uint32_t *flags,
|
||||
int num_frames)
|
||||
{
|
||||
uint32_t *p = NULL;
|
||||
const uint32_t *cl = (uint32_t *)d;
|
||||
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
|
||||
int i, num_enqueued = 0;
|
||||
uint64_t addr_cena;
|
||||
|
||||
spin_lock(&s->access_spinlock);
|
||||
half_mask = (s->eqcr.pi_ci_mask>>1);
|
||||
full_mask = s->eqcr.pi_ci_mask;
|
||||
|
||||
if (!s->eqcr.available) {
|
||||
eqcr_ci = s->eqcr.ci;
|
||||
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
|
||||
s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
|
||||
|
||||
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
|
||||
eqcr_ci, s->eqcr.ci);
|
||||
if (!s->eqcr.available) {
|
||||
spin_unlock(&s->access_spinlock);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
num_enqueued = (s->eqcr.available < num_frames) ?
|
||||
s->eqcr.available : num_frames;
|
||||
s->eqcr.available -= num_enqueued;
|
||||
/* Fill in the EQCR ring */
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
/* Skip copying the verb */
|
||||
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
|
||||
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
|
||||
&fd[i], sizeof(*fd));
|
||||
eqcr_pi++;
|
||||
}
|
||||
|
||||
dma_wmb();
|
||||
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
p[0] = cl[0] | s->eqcr.pi_vb;
|
||||
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
|
||||
struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
|
||||
|
||||
d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
|
||||
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
|
||||
}
|
||||
eqcr_pi++;
|
||||
if (!(eqcr_pi & half_mask))
|
||||
s->eqcr.pi_vb ^= QB_VALID_BIT;
|
||||
}
|
||||
|
||||
/* Flush all the cacheline without load/store in between */
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
addr_cena = (size_t)s->addr_cena;
|
||||
for (i = 0; i < num_enqueued; i++)
|
||||
eqcr_pi++;
|
||||
s->eqcr.pi = eqcr_pi & full_mask;
|
||||
spin_unlock(&s->access_spinlock);
|
||||
|
||||
return num_enqueued;
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_enqueue_multiple_mem_back() - Issue a multi enqueue command
|
||||
* using one enqueue descriptor
|
||||
* @s: the software portal used for enqueue
|
||||
* @d: the enqueue descriptor
|
||||
* @fd: table pointer of frame descriptor table to be enqueued
|
||||
* @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
|
||||
* @num_frames: number of fd to be enqueued
|
||||
*
|
||||
* Return the number of fd enqueued, or a negative error number.
|
||||
*/
|
||||
static
|
||||
int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
uint32_t *flags,
|
||||
int num_frames)
|
||||
{
|
||||
uint32_t *p = NULL;
|
||||
const uint32_t *cl = (uint32_t *)(d);
|
||||
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
|
||||
int i, num_enqueued = 0;
|
||||
unsigned long irq_flags;
|
||||
|
||||
spin_lock(&s->access_spinlock);
|
||||
local_irq_save(irq_flags);
|
||||
|
||||
half_mask = (s->eqcr.pi_ci_mask>>1);
|
||||
full_mask = s->eqcr.pi_ci_mask;
|
||||
if (!s->eqcr.available) {
|
||||
eqcr_ci = s->eqcr.ci;
|
||||
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
|
||||
s->eqcr.ci = __raw_readl(p) & full_mask;
|
||||
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
|
||||
eqcr_ci, s->eqcr.ci);
|
||||
if (!s->eqcr.available) {
|
||||
local_irq_restore(irq_flags);
|
||||
spin_unlock(&s->access_spinlock);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
num_enqueued = (s->eqcr.available < num_frames) ?
|
||||
s->eqcr.available : num_frames;
|
||||
s->eqcr.available -= num_enqueued;
|
||||
/* Fill in the EQCR ring */
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
/* Skip copying the verb */
|
||||
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
|
||||
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
|
||||
&fd[i], sizeof(*fd));
|
||||
eqcr_pi++;
|
||||
}
|
||||
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
p[0] = cl[0] | s->eqcr.pi_vb;
|
||||
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
|
||||
struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
|
||||
|
||||
d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
|
||||
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
|
||||
}
|
||||
eqcr_pi++;
|
||||
if (!(eqcr_pi & half_mask))
|
||||
s->eqcr.pi_vb ^= QB_VALID_BIT;
|
||||
}
|
||||
s->eqcr.pi = eqcr_pi & full_mask;
|
||||
|
||||
dma_wmb();
|
||||
qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
|
||||
(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
|
||||
local_irq_restore(irq_flags);
|
||||
spin_unlock(&s->access_spinlock);
|
||||
|
||||
return num_enqueued;
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_enqueue_multiple_desc_direct() - Issue a multi enqueue command
|
||||
* using multiple enqueue descriptor
|
||||
* @s: the software portal used for enqueue
|
||||
* @d: table of minimal enqueue descriptor
|
||||
* @fd: table pointer of frame descriptor table to be enqueued
|
||||
* @num_frames: number of fd to be enqueued
|
||||
*
|
||||
* Return the number of fd enqueued, or a negative error number.
|
||||
*/
|
||||
static
|
||||
int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
int num_frames)
|
||||
{
|
||||
uint32_t *p;
|
||||
const uint32_t *cl;
|
||||
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
|
||||
int i, num_enqueued = 0;
|
||||
uint64_t addr_cena;
|
||||
|
||||
half_mask = (s->eqcr.pi_ci_mask>>1);
|
||||
full_mask = s->eqcr.pi_ci_mask;
|
||||
if (!s->eqcr.available) {
|
||||
eqcr_ci = s->eqcr.ci;
|
||||
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI;
|
||||
s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
|
||||
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
|
||||
eqcr_ci, s->eqcr.ci);
|
||||
if (!s->eqcr.available)
|
||||
return 0;
|
||||
}
|
||||
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
num_enqueued = (s->eqcr.available < num_frames) ?
|
||||
s->eqcr.available : num_frames;
|
||||
s->eqcr.available -= num_enqueued;
|
||||
/* Fill in the EQCR ring */
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
cl = (uint32_t *)(&d[i]);
|
||||
/* Skip copying the verb */
|
||||
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
|
||||
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
|
||||
&fd[i], sizeof(*fd));
|
||||
eqcr_pi++;
|
||||
}
|
||||
|
||||
dma_wmb();
|
||||
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
cl = (uint32_t *)(&d[i]);
|
||||
p[0] = cl[0] | s->eqcr.pi_vb;
|
||||
eqcr_pi++;
|
||||
if (!(eqcr_pi & half_mask))
|
||||
s->eqcr.pi_vb ^= QB_VALID_BIT;
|
||||
}
|
||||
|
||||
/* Flush all the cacheline without load/store in between */
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
addr_cena = (uint64_t)s->addr_cena;
|
||||
for (i = 0; i < num_enqueued; i++)
|
||||
eqcr_pi++;
|
||||
s->eqcr.pi = eqcr_pi & full_mask;
|
||||
|
||||
return num_enqueued;
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_enqueue_multiple_desc_mem_back() - Issue a multi enqueue command
|
||||
* using multiple enqueue descriptor
|
||||
* @s: the software portal used for enqueue
|
||||
* @d: table of minimal enqueue descriptor
|
||||
* @fd: table pointer of frame descriptor table to be enqueued
|
||||
* @num_frames: number of fd to be enqueued
|
||||
*
|
||||
* Return the number of fd enqueued, or a negative error number.
|
||||
*/
|
||||
static
|
||||
int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
int num_frames)
|
||||
{
|
||||
uint32_t *p;
|
||||
const uint32_t *cl;
|
||||
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
|
||||
int i, num_enqueued = 0;
|
||||
|
||||
half_mask = (s->eqcr.pi_ci_mask>>1);
|
||||
full_mask = s->eqcr.pi_ci_mask;
|
||||
if (!s->eqcr.available) {
|
||||
eqcr_ci = s->eqcr.ci;
|
||||
p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
|
||||
s->eqcr.ci = __raw_readl(p) & full_mask;
|
||||
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
|
||||
eqcr_ci, s->eqcr.ci);
|
||||
if (!s->eqcr.available)
|
||||
return 0;
|
||||
}
|
||||
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
num_enqueued = (s->eqcr.available < num_frames) ?
|
||||
s->eqcr.available : num_frames;
|
||||
s->eqcr.available -= num_enqueued;
|
||||
/* Fill in the EQCR ring */
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
cl = (uint32_t *)(&d[i]);
|
||||
/* Skip copying the verb */
|
||||
memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
|
||||
memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
|
||||
&fd[i], sizeof(*fd));
|
||||
eqcr_pi++;
|
||||
}
|
||||
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
eqcr_pi = s->eqcr.pi;
|
||||
for (i = 0; i < num_enqueued; i++) {
|
||||
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
|
||||
cl = (uint32_t *)(&d[i]);
|
||||
p[0] = cl[0] | s->eqcr.pi_vb;
|
||||
eqcr_pi++;
|
||||
if (!(eqcr_pi & half_mask))
|
||||
s->eqcr.pi_vb ^= QB_VALID_BIT;
|
||||
}
|
||||
|
||||
s->eqcr.pi = eqcr_pi & full_mask;
|
||||
|
||||
dma_wmb();
|
||||
qbman_write_register(s, QBMAN_CINH_SWP_EQCR_PI,
|
||||
(QB_RT_BIT)|(s->eqcr.pi)|s->eqcr.pi_vb);
|
||||
|
||||
return num_enqueued;
|
||||
}
|
||||
|
||||
/* Static (push) dequeue */
|
||||
@ -645,7 +1098,7 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_pull() - Issue the pull dequeue command
|
||||
* qbman_swp_pull_direct() - Issue the pull dequeue command
|
||||
* @s: the software portal object
|
||||
* @d: the software portal descriptor which has been configured with
|
||||
* the set of qbman_pull_desc_set_*() calls
|
||||
@ -653,7 +1106,44 @@ void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
|
||||
* Return 0 for success, and -EBUSY if the software portal is not ready
|
||||
* to do pull dequeue.
|
||||
*/
|
||||
int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
|
||||
static
|
||||
int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
|
||||
{
|
||||
struct qbman_pull_desc *p;
|
||||
|
||||
if (!atomic_dec_and_test(&s->vdq.available)) {
|
||||
atomic_inc(&s->vdq.available);
|
||||
return -EBUSY;
|
||||
}
|
||||
s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
|
||||
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
|
||||
else
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
|
||||
p->numf = d->numf;
|
||||
p->tok = QMAN_DQ_TOKEN_VALID;
|
||||
p->dq_src = d->dq_src;
|
||||
p->rsp_addr = d->rsp_addr;
|
||||
p->rsp_addr_virt = d->rsp_addr_virt;
|
||||
dma_wmb();
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
p->verb = d->verb | s->vdq.valid_bit;
|
||||
s->vdq.valid_bit ^= QB_VALID_BIT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_pull_mem_back() - Issue the pull dequeue command
|
||||
* @s: the software portal object
|
||||
* @d: the software portal descriptor which has been configured with
|
||||
* the set of qbman_pull_desc_set_*() calls
|
||||
*
|
||||
* Return 0 for success, and -EBUSY if the software portal is not ready
|
||||
* to do pull dequeue.
|
||||
*/
|
||||
static
|
||||
int qbman_swp_pull_mem_back(struct qbman_swp *s, struct qbman_pull_desc *d)
|
||||
{
|
||||
struct qbman_pull_desc *p;
|
||||
|
||||
@ -672,17 +1162,11 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
|
||||
p->rsp_addr = d->rsp_addr;
|
||||
p->rsp_addr_virt = d->rsp_addr_virt;
|
||||
|
||||
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
|
||||
dma_wmb();
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
p->verb = d->verb | s->vdq.valid_bit;
|
||||
s->vdq.valid_bit ^= QB_VALID_BIT;
|
||||
} else {
|
||||
p->verb = d->verb | s->vdq.valid_bit;
|
||||
s->vdq.valid_bit ^= QB_VALID_BIT;
|
||||
dma_wmb();
|
||||
qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
|
||||
}
|
||||
/* Set the verb byte, have to substitute in the valid-bit */
|
||||
p->verb = d->verb | s->vdq.valid_bit;
|
||||
s->vdq.valid_bit ^= QB_VALID_BIT;
|
||||
dma_wmb();
|
||||
qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -690,14 +1174,14 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
|
||||
#define QMAN_DQRR_PI_MASK 0xf
|
||||
|
||||
/**
|
||||
* qbman_swp_dqrr_next() - Get an valid DQRR entry
|
||||
* qbman_swp_dqrr_next_direct() - Get an valid DQRR entry
|
||||
* @s: the software portal object
|
||||
*
|
||||
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
|
||||
* only once, so repeated calls can return a sequence of DQRR entries, without
|
||||
* requiring they be consumed immediately or in any particular order.
|
||||
*/
|
||||
const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
|
||||
const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
|
||||
{
|
||||
u32 verb;
|
||||
u32 response_verb;
|
||||
@ -740,10 +1224,99 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
|
||||
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
|
||||
}
|
||||
|
||||
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
|
||||
else
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
|
||||
verb = p->dq.verb;
|
||||
|
||||
/*
|
||||
* If the valid-bit isn't of the expected polarity, nothing there. Note,
|
||||
* in the DQRR reset bug workaround, we shouldn't need to skip these
|
||||
* check, because we've already determined that a new entry is available
|
||||
* and we've invalidated the cacheline before reading it, so the
|
||||
* valid-bit behaviour is repaired and should tell us what we already
|
||||
* knew from reading PI.
|
||||
*/
|
||||
if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
|
||||
prefetch(qbman_get_cmd(s,
|
||||
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* There's something there. Move "next_idx" attention to the next ring
|
||||
* entry (and prefetch it) before returning what we found.
|
||||
*/
|
||||
s->dqrr.next_idx++;
|
||||
s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */
|
||||
if (!s->dqrr.next_idx)
|
||||
s->dqrr.valid_bit ^= QB_VALID_BIT;
|
||||
|
||||
/*
|
||||
* If this is the final response to a volatile dequeue command
|
||||
* indicate that the vdq is available
|
||||
*/
|
||||
flags = p->dq.stat;
|
||||
response_verb = verb & QBMAN_RESULT_MASK;
|
||||
if ((response_verb == QBMAN_RESULT_DQ) &&
|
||||
(flags & DPAA2_DQ_STAT_VOLATILE) &&
|
||||
(flags & DPAA2_DQ_STAT_EXPIRED))
|
||||
atomic_inc(&s->vdq.available);
|
||||
|
||||
prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_dqrr_next_mem_back() - Get an valid DQRR entry
|
||||
* @s: the software portal object
|
||||
*
|
||||
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
|
||||
* only once, so repeated calls can return a sequence of DQRR entries, without
|
||||
* requiring they be consumed immediately or in any particular order.
|
||||
*/
|
||||
const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
|
||||
{
|
||||
u32 verb;
|
||||
u32 response_verb;
|
||||
u32 flags;
|
||||
struct dpaa2_dq *p;
|
||||
|
||||
/* Before using valid-bit to detect if something is there, we have to
|
||||
* handle the case of the DQRR reset bug...
|
||||
*/
|
||||
if (unlikely(s->dqrr.reset_bug)) {
|
||||
/*
|
||||
* We pick up new entries by cache-inhibited producer index,
|
||||
* which means that a non-coherent mapping would require us to
|
||||
* invalidate and read *only* once that PI has indicated that
|
||||
* there's an entry here. The first trip around the DQRR ring
|
||||
* will be much less efficient than all subsequent trips around
|
||||
* it...
|
||||
*/
|
||||
u8 pi = qbman_read_register(s, QBMAN_CINH_SWP_DQPI) &
|
||||
QMAN_DQRR_PI_MASK;
|
||||
|
||||
/* there are new entries if pi != next_idx */
|
||||
if (pi == s->dqrr.next_idx)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* if next_idx is/was the last ring index, and 'pi' is
|
||||
* different, we can disable the workaround as all the ring
|
||||
* entries have now been DMA'd to so valid-bit checking is
|
||||
* repaired. Note: this logic needs to be based on next_idx
|
||||
* (which increments one at a time), rather than on pi (which
|
||||
* can burst and wrap-around between our snapshots of it).
|
||||
*/
|
||||
if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) {
|
||||
pr_debug("next_idx=%d, pi=%d, clear reset bug\n",
|
||||
s->dqrr.next_idx, pi);
|
||||
s->dqrr.reset_bug = 0;
|
||||
}
|
||||
prefetch(qbman_get_cmd(s,
|
||||
QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
|
||||
}
|
||||
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
|
||||
verb = p->dq.verb;
|
||||
|
||||
/*
|
||||
@ -872,7 +1445,7 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
|
||||
#define RAR_SUCCESS(rar) ((rar) & 0x100)
|
||||
|
||||
/**
|
||||
* qbman_swp_release() - Issue a buffer release command
|
||||
* qbman_swp_release_direct() - Issue a buffer release command
|
||||
* @s: the software portal object
|
||||
* @d: the release descriptor
|
||||
* @buffers: a pointer pointing to the buffer address to be released
|
||||
@ -880,8 +1453,9 @@ void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
|
||||
*
|
||||
* Return 0 for success, -EBUSY if the release command ring is not ready.
|
||||
*/
|
||||
int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
|
||||
const u64 *buffers, unsigned int num_buffers)
|
||||
int qbman_swp_release_direct(struct qbman_swp *s,
|
||||
const struct qbman_release_desc *d,
|
||||
const u64 *buffers, unsigned int num_buffers)
|
||||
{
|
||||
int i;
|
||||
struct qbman_release_desc *p;
|
||||
@ -895,28 +1469,59 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
|
||||
return -EBUSY;
|
||||
|
||||
/* Start the release command */
|
||||
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
|
||||
else
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
|
||||
|
||||
/* Copy the caller's buffer pointers to the command */
|
||||
for (i = 0; i < num_buffers; i++)
|
||||
p->buf[i] = cpu_to_le64(buffers[i]);
|
||||
p->bpid = d->bpid;
|
||||
|
||||
if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
|
||||
/*
|
||||
* Set the verb byte, have to substitute in the valid-bit
|
||||
* and the number of buffers.
|
||||
*/
|
||||
dma_wmb();
|
||||
p->verb = d->verb | RAR_VB(rar) | num_buffers;
|
||||
} else {
|
||||
p->verb = d->verb | RAR_VB(rar) | num_buffers;
|
||||
dma_wmb();
|
||||
qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
|
||||
RAR_IDX(rar) * 4, QMAN_RT_MODE);
|
||||
}
|
||||
/*
|
||||
* Set the verb byte, have to substitute in the valid-bit
|
||||
* and the number of buffers.
|
||||
*/
|
||||
dma_wmb();
|
||||
p->verb = d->verb | RAR_VB(rar) | num_buffers;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_release_mem_back() - Issue a buffer release command
|
||||
* @s: the software portal object
|
||||
* @d: the release descriptor
|
||||
* @buffers: a pointer pointing to the buffer address to be released
|
||||
* @num_buffers: number of buffers to be released, must be less than 8
|
||||
*
|
||||
* Return 0 for success, -EBUSY if the release command ring is not ready.
|
||||
*/
|
||||
int qbman_swp_release_mem_back(struct qbman_swp *s,
|
||||
const struct qbman_release_desc *d,
|
||||
const u64 *buffers, unsigned int num_buffers)
|
||||
{
|
||||
int i;
|
||||
struct qbman_release_desc *p;
|
||||
u32 rar;
|
||||
|
||||
if (!num_buffers || (num_buffers > 7))
|
||||
return -EINVAL;
|
||||
|
||||
rar = qbman_read_register(s, QBMAN_CINH_SWP_RAR);
|
||||
if (!RAR_SUCCESS(rar))
|
||||
return -EBUSY;
|
||||
|
||||
/* Start the release command */
|
||||
p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
|
||||
|
||||
/* Copy the caller's buffer pointers to the command */
|
||||
for (i = 0; i < num_buffers; i++)
|
||||
p->buf[i] = cpu_to_le64(buffers[i]);
|
||||
p->bpid = d->bpid;
|
||||
|
||||
p->verb = d->verb | RAR_VB(rar) | num_buffers;
|
||||
dma_wmb();
|
||||
qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
|
||||
RAR_IDX(rar) * 4, QMAN_RT_MODE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -9,6 +9,13 @@
|
||||
|
||||
#include <soc/fsl/dpaa2-fd.h>
|
||||
|
||||
#define QMAN_REV_4000 0x04000000
|
||||
#define QMAN_REV_4100 0x04010000
|
||||
#define QMAN_REV_4101 0x04010001
|
||||
#define QMAN_REV_5000 0x05000000
|
||||
|
||||
#define QMAN_REV_MASK 0xffff0000
|
||||
|
||||
struct dpaa2_dq;
|
||||
struct qbman_swp;
|
||||
|
||||
@ -81,6 +88,10 @@ struct qbman_eq_desc {
|
||||
u8 wae;
|
||||
u8 rspid;
|
||||
__le64 rsp_addr;
|
||||
};
|
||||
|
||||
struct qbman_eq_desc_with_fd {
|
||||
struct qbman_eq_desc desc;
|
||||
u8 fd[32];
|
||||
};
|
||||
|
||||
@ -132,8 +143,48 @@ struct qbman_swp {
|
||||
u8 dqrr_size;
|
||||
int reset_bug; /* indicates dqrr reset workaround is needed */
|
||||
} dqrr;
|
||||
|
||||
struct {
|
||||
u32 pi;
|
||||
u32 pi_vb;
|
||||
u32 pi_ring_size;
|
||||
u32 pi_ci_mask;
|
||||
u32 ci;
|
||||
int available;
|
||||
u32 pend;
|
||||
u32 no_pfdr;
|
||||
} eqcr;
|
||||
|
||||
spinlock_t access_spinlock;
|
||||
};
|
||||
|
||||
/* Function pointers */
|
||||
extern
|
||||
int (*qbman_swp_enqueue_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd);
|
||||
extern
|
||||
int (*qbman_swp_enqueue_multiple_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
uint32_t *flags,
|
||||
int num_frames);
|
||||
extern
|
||||
int (*qbman_swp_enqueue_multiple_desc_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
int num_frames);
|
||||
extern
|
||||
int (*qbman_swp_pull_ptr)(struct qbman_swp *s, struct qbman_pull_desc *d);
|
||||
extern
|
||||
const struct dpaa2_dq *(*qbman_swp_dqrr_next_ptr)(struct qbman_swp *s);
|
||||
extern
|
||||
int (*qbman_swp_release_ptr)(struct qbman_swp *s,
|
||||
const struct qbman_release_desc *d,
|
||||
const u64 *buffers,
|
||||
unsigned int num_buffers);
|
||||
|
||||
/* Functions */
|
||||
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
|
||||
void qbman_swp_finish(struct qbman_swp *p);
|
||||
u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
|
||||
@ -158,9 +209,6 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
|
||||
void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
|
||||
enum qbman_pull_type_e dct);
|
||||
|
||||
int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
|
||||
|
||||
const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
|
||||
void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
|
||||
|
||||
int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
|
||||
@ -172,15 +220,11 @@ void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
|
||||
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
|
||||
u32 qd_bin, u32 qd_prio);
|
||||
|
||||
int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd);
|
||||
|
||||
void qbman_release_desc_clear(struct qbman_release_desc *d);
|
||||
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
|
||||
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
|
||||
|
||||
int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
|
||||
const u64 *buffers, unsigned int num_buffers);
|
||||
int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
|
||||
unsigned int num_buffers);
|
||||
int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
|
||||
@ -193,6 +237,61 @@ void *qbman_swp_mc_start(struct qbman_swp *p);
|
||||
void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
|
||||
void *qbman_swp_mc_result(struct qbman_swp *p);
|
||||
|
||||
/**
|
||||
* qbman_swp_enqueue() - Issue an enqueue command
|
||||
* @s: the software portal used for enqueue
|
||||
* @d: the enqueue descriptor
|
||||
* @fd: the frame descriptor to be enqueued
|
||||
*
|
||||
* Return 0 for successful enqueue, -EBUSY if the EQCR is not ready.
|
||||
*/
|
||||
static inline int
|
||||
qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd)
|
||||
{
|
||||
return qbman_swp_enqueue_ptr(s, d, fd);
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_enqueue_multiple() - Issue a multi enqueue command
|
||||
* using one enqueue descriptor
|
||||
* @s: the software portal used for enqueue
|
||||
* @d: the enqueue descriptor
|
||||
* @fd: table pointer of frame descriptor table to be enqueued
|
||||
* @flags: table pointer of QBMAN_ENQUEUE_FLAG_DCA flags, not used if NULL
|
||||
* @num_frames: number of fd to be enqueued
|
||||
*
|
||||
* Return the number of fd enqueued, or a negative error number.
|
||||
*/
|
||||
static inline int
|
||||
qbman_swp_enqueue_multiple(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
uint32_t *flags,
|
||||
int num_frames)
|
||||
{
|
||||
return qbman_swp_enqueue_multiple_ptr(s, d, fd, flags, num_frames);
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_enqueue_multiple_desc() - Issue a multi enqueue command
|
||||
* using multiple enqueue descriptor
|
||||
* @s: the software portal used for enqueue
|
||||
* @d: table of minimal enqueue descriptor
|
||||
* @fd: table pointer of frame descriptor table to be enqueued
|
||||
* @num_frames: number of fd to be enqueued
|
||||
*
|
||||
* Return the number of fd enqueued, or a negative error number.
|
||||
*/
|
||||
static inline int
|
||||
qbman_swp_enqueue_multiple_desc(struct qbman_swp *s,
|
||||
const struct qbman_eq_desc *d,
|
||||
const struct dpaa2_fd *fd,
|
||||
int num_frames)
|
||||
{
|
||||
return qbman_swp_enqueue_multiple_desc_ptr(s, d, fd, num_frames);
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_result_is_DQ() - check if the dequeue result is a dequeue response
|
||||
* @dq: the dequeue result to be checked
|
||||
@ -504,4 +603,49 @@ int qbman_bp_query(struct qbman_swp *s, u16 bpid,
|
||||
|
||||
u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a);
|
||||
|
||||
/**
|
||||
* qbman_swp_release() - Issue a buffer release command
|
||||
* @s: the software portal object
|
||||
* @d: the release descriptor
|
||||
* @buffers: a pointer pointing to the buffer address to be released
|
||||
* @num_buffers: number of buffers to be released, must be less than 8
|
||||
*
|
||||
* Return 0 for success, -EBUSY if the release command ring is not ready.
|
||||
*/
|
||||
static inline int qbman_swp_release(struct qbman_swp *s,
|
||||
const struct qbman_release_desc *d,
|
||||
const u64 *buffers,
|
||||
unsigned int num_buffers)
|
||||
{
|
||||
return qbman_swp_release_ptr(s, d, buffers, num_buffers);
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_pull() - Issue the pull dequeue command
|
||||
* @s: the software portal object
|
||||
* @d: the software portal descriptor which has been configured with
|
||||
* the set of qbman_pull_desc_set_*() calls
|
||||
*
|
||||
* Return 0 for success, and -EBUSY if the software portal is not ready
|
||||
* to do pull dequeue.
|
||||
*/
|
||||
static inline int qbman_swp_pull(struct qbman_swp *s,
|
||||
struct qbman_pull_desc *d)
|
||||
{
|
||||
return qbman_swp_pull_ptr(s, d);
|
||||
}
|
||||
|
||||
/**
|
||||
* qbman_swp_dqrr_next() - Get an valid DQRR entry
|
||||
* @s: the software portal object
|
||||
*
|
||||
* Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry
|
||||
* only once, so repeated calls can return a sequence of DQRR entries, without
|
||||
* requiring they be consumed immediately or in any particular order.
|
||||
*/
|
||||
static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
|
||||
{
|
||||
return qbman_swp_dqrr_next_ptr(s);
|
||||
}
|
||||
|
||||
#endif /* __FSL_QBMAN_PORTAL_H */
|
||||
|
@ -423,7 +423,7 @@ static void qe_upload_microcode(const void *base,
|
||||
qe_iowrite32be(be32_to_cpu(code[i]), &qe_immr->iram.idata);
|
||||
|
||||
/* Set I-RAM Ready Register */
|
||||
qe_iowrite32be(be32_to_cpu(QE_IRAM_READY), &qe_immr->iram.iready);
|
||||
qe_iowrite32be(QE_IRAM_READY, &qe_immr->iram.iready);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -525,7 +525,7 @@ int qe_upload_firmware(const struct qe_firmware *firmware)
|
||||
*/
|
||||
memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
|
||||
strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
|
||||
qe_firmware_info.extended_modes = firmware->extended_modes;
|
||||
qe_firmware_info.extended_modes = be64_to_cpu(firmware->extended_modes);
|
||||
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
|
||||
sizeof(firmware->vtraps));
|
||||
|
||||
|
@ -46,7 +46,7 @@ int cpm_muram_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct resource r;
|
||||
u32 zero[OF_MAX_ADDR_CELLS] = {};
|
||||
__be32 zero[OF_MAX_ADDR_CELLS] = {};
|
||||
resource_size_t max = 0;
|
||||
int i = 0;
|
||||
int ret = 0;
|
||||
|
@ -44,7 +44,7 @@
|
||||
|
||||
struct qe_ic {
|
||||
/* Control registers offset */
|
||||
u32 __iomem *regs;
|
||||
__be32 __iomem *regs;
|
||||
|
||||
/* The remapper for this QEIC */
|
||||
struct irq_domain *irqhost;
|
||||
|
@ -632,7 +632,7 @@ int ucc_set_tdm_rxtx_sync(u32 tdm_num, enum qe_clock clock,
|
||||
{
|
||||
int source;
|
||||
u32 shift;
|
||||
struct qe_mux *qe_mux_reg;
|
||||
struct qe_mux __iomem *qe_mux_reg;
|
||||
|
||||
qe_mux_reg = &qe_immr->qmx;
|
||||
|
||||
|
@ -72,7 +72,7 @@ EXPORT_SYMBOL(ucc_slow_restart_tx);
|
||||
|
||||
void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
|
||||
{
|
||||
struct ucc_slow *us_regs;
|
||||
struct ucc_slow __iomem *us_regs;
|
||||
u32 gumr_l;
|
||||
|
||||
us_regs = uccs->us_regs;
|
||||
@ -93,7 +93,7 @@ EXPORT_SYMBOL(ucc_slow_enable);
|
||||
|
||||
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
|
||||
{
|
||||
struct ucc_slow *us_regs;
|
||||
struct ucc_slow __iomem *us_regs;
|
||||
u32 gumr_l;
|
||||
|
||||
us_regs = uccs->us_regs;
|
||||
@ -122,7 +122,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
||||
u32 i;
|
||||
struct ucc_slow __iomem *us_regs;
|
||||
u32 gumr;
|
||||
struct qe_bd *bd;
|
||||
struct qe_bd __iomem *bd;
|
||||
u32 id;
|
||||
u32 command;
|
||||
int ret = 0;
|
||||
@ -168,16 +168,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
uccs->saved_uccm = 0;
|
||||
uccs->p_rx_frame = 0;
|
||||
us_regs = uccs->us_regs;
|
||||
uccs->p_ucce = (u16 *) & (us_regs->ucce);
|
||||
uccs->p_uccm = (u16 *) & (us_regs->uccm);
|
||||
#ifdef STATISTICS
|
||||
uccs->rx_frames = 0;
|
||||
uccs->tx_frames = 0;
|
||||
uccs->rx_discarded = 0;
|
||||
#endif /* STATISTICS */
|
||||
uccs->p_ucce = &us_regs->ucce;
|
||||
uccs->p_uccm = &us_regs->uccm;
|
||||
|
||||
/* Get PRAM base */
|
||||
uccs->us_pram_offset =
|
||||
@ -231,24 +224,24 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
||||
/* clear bd buffer */
|
||||
qe_iowrite32be(0, &bd->buf);
|
||||
/* set bd status and length */
|
||||
qe_iowrite32be(0, (u32 *)bd);
|
||||
qe_iowrite32be(0, (u32 __iomem *)bd);
|
||||
bd++;
|
||||
}
|
||||
/* for last BD set Wrap bit */
|
||||
qe_iowrite32be(0, &bd->buf);
|
||||
qe_iowrite32be(cpu_to_be32(T_W), (u32 *)bd);
|
||||
qe_iowrite32be(T_W, (u32 __iomem *)bd);
|
||||
|
||||
/* Init Rx bds */
|
||||
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
|
||||
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
|
||||
/* set bd status and length */
|
||||
qe_iowrite32be(0, (u32 *)bd);
|
||||
qe_iowrite32be(0, (u32 __iomem *)bd);
|
||||
/* clear bd buffer */
|
||||
qe_iowrite32be(0, &bd->buf);
|
||||
bd++;
|
||||
}
|
||||
/* for last BD set Wrap bit */
|
||||
qe_iowrite32be(cpu_to_be32(R_W), (u32 *)bd);
|
||||
qe_iowrite32be(R_W, (u32 __iomem *)bd);
|
||||
qe_iowrite32be(0, &bd->buf);
|
||||
|
||||
/* Set GUMR (For more details see the hardware spec.). */
|
||||
@ -273,8 +266,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
||||
qe_iowrite32be(gumr, &us_regs->gumr_h);
|
||||
|
||||
/* gumr_l */
|
||||
gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
|
||||
us_info->diag | us_info->mode;
|
||||
gumr = (u32)us_info->tdcr | (u32)us_info->rdcr | (u32)us_info->tenc |
|
||||
(u32)us_info->renc | (u32)us_info->diag | (u32)us_info->mode;
|
||||
if (us_info->tci)
|
||||
gumr |= UCC_SLOW_GUMR_L_TCI;
|
||||
if (us_info->rinv)
|
||||
@ -289,8 +282,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
|
||||
|
||||
/* if the data is in cachable memory, the 'global' */
|
||||
/* in the function code should be set. */
|
||||
uccs->us_pram->tbmr = UCC_BMR_BO_BE;
|
||||
uccs->us_pram->rbmr = UCC_BMR_BO_BE;
|
||||
qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->tbmr);
|
||||
qe_iowrite8(UCC_BMR_BO_BE, &uccs->us_pram->rbmr);
|
||||
|
||||
/* rbase, tbase are offsets from MURAM base */
|
||||
qe_iowrite16be(uccs->rx_base_offset, &uccs->us_pram->rbase);
|
||||
|
@ -10,11 +10,20 @@ config IMX_GPCV2_PM_DOMAINS
|
||||
|
||||
config IMX_SCU_SOC
|
||||
bool "i.MX System Controller Unit SoC info support"
|
||||
depends on IMX_SCU || COMPILE_TEST
|
||||
depends on IMX_SCU
|
||||
select SOC_BUS
|
||||
help
|
||||
If you say yes here you get support for the NXP i.MX System
|
||||
Controller Unit SoC info module, it will provide the SoC info
|
||||
like SoC family, ID and revision etc.
|
||||
|
||||
config SOC_IMX8M
|
||||
bool "i.MX8M SoC family support"
|
||||
depends on ARCH_MXC || COMPILE_TEST
|
||||
default ARCH_MXC && ARM64
|
||||
help
|
||||
If you say yes here you get support for the NXP i.MX8M family
|
||||
support, it will provide the SoC info like SoC family,
|
||||
ID and revision etc.
|
||||
|
||||
endmenu
|
||||
|
@ -1,5 +1,5 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
|
||||
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
|
||||
obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
|
||||
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
|
||||
obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
|
||||
|
@ -87,8 +87,8 @@ static int imx6_pm_domain_power_off(struct generic_pm_domain *genpd)
|
||||
static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct imx_pm_domain *pd = to_imx_pm_domain(genpd);
|
||||
int i, ret, sw, sw2iso;
|
||||
u32 val;
|
||||
int i, ret;
|
||||
u32 val, req;
|
||||
|
||||
if (pd->supply) {
|
||||
ret = regulator_enable(pd->supply);
|
||||
@ -107,17 +107,18 @@ static int imx6_pm_domain_power_on(struct generic_pm_domain *genpd)
|
||||
regmap_update_bits(pd->regmap, pd->reg_offs + GPC_PGC_CTRL_OFFS,
|
||||
0x1, 0x1);
|
||||
|
||||
/* Read ISO and ISO2SW power up delays */
|
||||
regmap_read(pd->regmap, pd->reg_offs + GPC_PGC_PUPSCR_OFFS, &val);
|
||||
sw = val & 0x3f;
|
||||
sw2iso = (val >> 8) & 0x3f;
|
||||
|
||||
/* Request GPC to power up domain */
|
||||
val = BIT(pd->cntr_pdn_bit + 1);
|
||||
regmap_update_bits(pd->regmap, GPC_CNTR, val, val);
|
||||
req = BIT(pd->cntr_pdn_bit + 1);
|
||||
regmap_update_bits(pd->regmap, GPC_CNTR, req, req);
|
||||
|
||||
/* Wait ISO + ISO2SW IPG clock cycles */
|
||||
udelay(DIV_ROUND_UP(sw + sw2iso, pd->ipg_rate_mhz));
|
||||
/* Wait for the PGC to handle the request */
|
||||
ret = regmap_read_poll_timeout(pd->regmap, GPC_CNTR, val, !(val & req),
|
||||
1, 50);
|
||||
if (ret)
|
||||
pr_err("powerup request on domain %s timed out\n", genpd->name);
|
||||
|
||||
/* Wait for reset to propagate through peripherals */
|
||||
usleep_range(5, 10);
|
||||
|
||||
/* Disable reset clocks for all devices in the domain */
|
||||
for (i = 0; i < pd->num_clks; i++)
|
||||
@ -343,6 +344,7 @@ static const struct regmap_config imx_gpc_regmap_config = {
|
||||
.rd_table = &access_table,
|
||||
.wr_table = &access_table,
|
||||
.max_register = 0x2ac,
|
||||
.fast_io = true,
|
||||
};
|
||||
|
||||
static struct generic_pm_domain *imx_gpc_onecell_domains[] = {
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <dt-bindings/power/imx7-power.h>
|
||||
#include <dt-bindings/power/imx8mq-power.h>
|
||||
|
||||
|
@ -76,6 +76,10 @@ config QCOM_OCMEM
|
||||
requirements. This is typically used by the GPU, camera/video, and
|
||||
audio components on some Snapdragon SoCs.
|
||||
|
||||
config QCOM_PDR_HELPERS
|
||||
tristate
|
||||
select QCOM_QMI_HELPERS
|
||||
|
||||
config QCOM_PM
|
||||
bool "Qualcomm Power Management"
|
||||
depends on ARCH_QCOM && !ARM64
|
||||
@ -88,7 +92,6 @@ config QCOM_PM
|
||||
|
||||
config QCOM_QMI_HELPERS
|
||||
tristate
|
||||
depends on ARCH_QCOM || COMPILE_TEST
|
||||
depends on NET
|
||||
|
||||
config QCOM_RMTFS_MEM
|
||||
@ -197,6 +200,8 @@ config QCOM_APR
|
||||
tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
|
||||
depends on ARCH_QCOM || COMPILE_TEST
|
||||
depends on RPMSG
|
||||
depends on NET
|
||||
select QCOM_PDR_HELPERS
|
||||
help
|
||||
Enable APR IPC protocol support between
|
||||
application processor and QDSP6. APR is
|
||||
|
@ -7,6 +7,7 @@ obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
|
||||
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
|
||||
obj-$(CONFIG_QCOM_MDT_LOADER) += mdt_loader.o
|
||||
obj-$(CONFIG_QCOM_OCMEM) += ocmem.o
|
||||
obj-$(CONFIG_QCOM_PDR_HELPERS) += pdr_interface.o
|
||||
obj-$(CONFIG_QCOM_PM) += spm.o
|
||||
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
|
||||
qmi_helpers-y += qmi_encdec.o qmi_interface.o
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/soc/qcom/apr.h>
|
||||
#include <linux/soc/qcom/pdr.h>
|
||||
#include <linux/rpmsg.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
@ -21,6 +22,7 @@ struct apr {
|
||||
spinlock_t rx_lock;
|
||||
struct idr svcs_idr;
|
||||
int dest_domain_id;
|
||||
struct pdr_handle *pdr;
|
||||
struct workqueue_struct *rxwq;
|
||||
struct work_struct rx_work;
|
||||
struct list_head rx_list;
|
||||
@ -289,6 +291,9 @@ static int apr_add_device(struct device *dev, struct device_node *np,
|
||||
id->svc_id + 1, GFP_ATOMIC);
|
||||
spin_unlock(&apr->svcs_lock);
|
||||
|
||||
of_property_read_string_index(np, "qcom,protection-domain",
|
||||
1, &adev->service_path);
|
||||
|
||||
dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
|
||||
|
||||
ret = device_register(&adev->dev);
|
||||
@ -300,14 +305,75 @@ static int apr_add_device(struct device *dev, struct device_node *np,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void of_register_apr_devices(struct device *dev)
|
||||
static int of_apr_add_pd_lookups(struct device *dev)
|
||||
{
|
||||
const char *service_name, *service_path;
|
||||
struct apr *apr = dev_get_drvdata(dev);
|
||||
struct device_node *node;
|
||||
struct pdr_service *pds;
|
||||
int ret;
|
||||
|
||||
for_each_child_of_node(dev->of_node, node) {
|
||||
ret = of_property_read_string_index(node, "qcom,protection-domain",
|
||||
0, &service_name);
|
||||
if (ret < 0)
|
||||
continue;
|
||||
|
||||
ret = of_property_read_string_index(node, "qcom,protection-domain",
|
||||
1, &service_path);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "pdr service path missing: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pds = pdr_add_lookup(apr->pdr, service_name, service_path);
|
||||
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
|
||||
dev_err(dev, "pdr add lookup failed: %d\n", ret);
|
||||
return PTR_ERR(pds);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void of_register_apr_devices(struct device *dev, const char *svc_path)
|
||||
{
|
||||
struct apr *apr = dev_get_drvdata(dev);
|
||||
struct device_node *node;
|
||||
const char *service_path;
|
||||
int ret;
|
||||
|
||||
for_each_child_of_node(dev->of_node, node) {
|
||||
struct apr_device_id id = { {0} };
|
||||
|
||||
/*
|
||||
* This function is called with svc_path NULL during
|
||||
* apr_probe(), in which case we register any apr devices
|
||||
* without a qcom,protection-domain specified.
|
||||
*
|
||||
* Then as the protection domains becomes available
|
||||
* (if applicable) this function is again called, but with
|
||||
* svc_path representing the service becoming available. In
|
||||
* this case we register any apr devices with a matching
|
||||
* qcom,protection-domain.
|
||||
*/
|
||||
|
||||
ret = of_property_read_string_index(node, "qcom,protection-domain",
|
||||
1, &service_path);
|
||||
if (svc_path) {
|
||||
/* skip APR services that are PD independent */
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
/* skip APR services whose PD paths don't match */
|
||||
if (strcmp(service_path, svc_path))
|
||||
continue;
|
||||
} else {
|
||||
/* skip APR services whose PD lookups are registered */
|
||||
if (ret == 0)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(node, "reg", &id.svc_id))
|
||||
continue;
|
||||
|
||||
@ -318,6 +384,34 @@ static void of_register_apr_devices(struct device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static int apr_remove_device(struct device *dev, void *svc_path)
|
||||
{
|
||||
struct apr_device *adev = to_apr_device(dev);
|
||||
|
||||
if (svc_path && adev->service_path) {
|
||||
if (!strcmp(adev->service_path, (char *)svc_path))
|
||||
device_unregister(&adev->dev);
|
||||
} else {
|
||||
device_unregister(&adev->dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void apr_pd_status(int state, char *svc_path, void *priv)
|
||||
{
|
||||
struct apr *apr = (struct apr *)priv;
|
||||
|
||||
switch (state) {
|
||||
case SERVREG_SERVICE_STATE_UP:
|
||||
of_register_apr_devices(apr->dev, svc_path);
|
||||
break;
|
||||
case SERVREG_SERVICE_STATE_DOWN:
|
||||
device_for_each_child(apr->dev, svc_path, apr_remove_device);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int apr_probe(struct rpmsg_device *rpdev)
|
||||
{
|
||||
struct device *dev = &rpdev->dev;
|
||||
@ -343,28 +437,39 @@ static int apr_probe(struct rpmsg_device *rpdev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
INIT_WORK(&apr->rx_work, apr_rxwq);
|
||||
|
||||
apr->pdr = pdr_handle_alloc(apr_pd_status, apr);
|
||||
if (IS_ERR(apr->pdr)) {
|
||||
dev_err(dev, "Failed to init PDR handle\n");
|
||||
ret = PTR_ERR(apr->pdr);
|
||||
goto destroy_wq;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&apr->rx_list);
|
||||
spin_lock_init(&apr->rx_lock);
|
||||
spin_lock_init(&apr->svcs_lock);
|
||||
idr_init(&apr->svcs_idr);
|
||||
of_register_apr_devices(dev);
|
||||
|
||||
ret = of_apr_add_pd_lookups(dev);
|
||||
if (ret)
|
||||
goto handle_release;
|
||||
|
||||
of_register_apr_devices(dev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int apr_remove_device(struct device *dev, void *null)
|
||||
{
|
||||
struct apr_device *adev = to_apr_device(dev);
|
||||
|
||||
device_unregister(&adev->dev);
|
||||
|
||||
return 0;
|
||||
handle_release:
|
||||
pdr_handle_release(apr->pdr);
|
||||
destroy_wq:
|
||||
destroy_workqueue(apr->rxwq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void apr_remove(struct rpmsg_device *rpdev)
|
||||
{
|
||||
struct apr *apr = dev_get_drvdata(&rpdev->dev);
|
||||
|
||||
pdr_handle_release(apr->pdr);
|
||||
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
|
||||
flush_workqueue(apr->rxwq);
|
||||
destroy_workqueue(apr->rxwq);
|
||||
|
757
drivers/soc/qcom/pdr_interface.c
Normal file
757
drivers/soc/qcom/pdr_interface.c
Normal file
@ -0,0 +1,757 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2020 The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include "pdr_internal.h"
|
||||
|
||||
struct pdr_service {
|
||||
char service_name[SERVREG_NAME_LENGTH + 1];
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
|
||||
struct sockaddr_qrtr addr;
|
||||
|
||||
unsigned int instance;
|
||||
unsigned int service;
|
||||
u8 service_data_valid;
|
||||
u32 service_data;
|
||||
int state;
|
||||
|
||||
bool need_notifier_register;
|
||||
bool need_notifier_remove;
|
||||
bool need_locator_lookup;
|
||||
bool service_connected;
|
||||
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
struct pdr_handle {
|
||||
struct qmi_handle locator_hdl;
|
||||
struct qmi_handle notifier_hdl;
|
||||
|
||||
struct sockaddr_qrtr locator_addr;
|
||||
|
||||
struct list_head lookups;
|
||||
struct list_head indack_list;
|
||||
|
||||
/* control access to pdr lookup/indack lists */
|
||||
struct mutex list_lock;
|
||||
|
||||
/* serialize pd status invocation */
|
||||
struct mutex status_lock;
|
||||
|
||||
/* control access to the locator state */
|
||||
struct mutex lock;
|
||||
|
||||
bool locator_init_complete;
|
||||
|
||||
struct work_struct locator_work;
|
||||
struct work_struct notifier_work;
|
||||
struct work_struct indack_work;
|
||||
|
||||
struct workqueue_struct *notifier_wq;
|
||||
struct workqueue_struct *indack_wq;
|
||||
|
||||
void (*status)(int state, char *service_path, void *priv);
|
||||
void *priv;
|
||||
};
|
||||
|
||||
struct pdr_list_node {
|
||||
enum servreg_service_state curr_state;
|
||||
u16 transaction_id;
|
||||
struct pdr_service *pds;
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
static int pdr_locator_new_server(struct qmi_handle *qmi,
|
||||
struct qmi_service *svc)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
locator_hdl);
|
||||
struct pdr_service *pds;
|
||||
|
||||
/* Create a local client port for QMI communication */
|
||||
pdr->locator_addr.sq_family = AF_QIPCRTR;
|
||||
pdr->locator_addr.sq_node = svc->node;
|
||||
pdr->locator_addr.sq_port = svc->port;
|
||||
|
||||
mutex_lock(&pdr->lock);
|
||||
pdr->locator_init_complete = true;
|
||||
mutex_unlock(&pdr->lock);
|
||||
|
||||
/* Service pending lookup requests */
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (pds->need_locator_lookup)
|
||||
schedule_work(&pdr->locator_work);
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdr_locator_del_server(struct qmi_handle *qmi,
|
||||
struct qmi_service *svc)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
locator_hdl);
|
||||
|
||||
mutex_lock(&pdr->lock);
|
||||
pdr->locator_init_complete = false;
|
||||
mutex_unlock(&pdr->lock);
|
||||
|
||||
pdr->locator_addr.sq_node = 0;
|
||||
pdr->locator_addr.sq_port = 0;
|
||||
}
|
||||
|
||||
static struct qmi_ops pdr_locator_ops = {
|
||||
.new_server = pdr_locator_new_server,
|
||||
.del_server = pdr_locator_del_server,
|
||||
};
|
||||
|
||||
static int pdr_register_listener(struct pdr_handle *pdr,
|
||||
struct pdr_service *pds,
|
||||
bool enable)
|
||||
{
|
||||
struct servreg_register_listener_resp resp;
|
||||
struct servreg_register_listener_req req;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
|
||||
servreg_register_listener_resp_ei,
|
||||
&resp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
req.enable = enable;
|
||||
strcpy(req.service_path, pds->service_path);
|
||||
|
||||
ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
|
||||
&txn, SERVREG_REGISTER_LISTENER_REQ,
|
||||
SERVREG_REGISTER_LISTENER_REQ_LEN,
|
||||
servreg_register_listener_req_ei,
|
||||
&req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, 5 * HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("PDR: %s register listener txn wait failed: %d\n",
|
||||
pds->service_path, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("PDR: %s register listener failed: 0x%x\n",
|
||||
pds->service_path, resp.resp.error);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((int)resp.curr_state < INT_MIN || (int)resp.curr_state > INT_MAX)
|
||||
pr_err("PDR: %s notification state invalid: 0x%x\n",
|
||||
pds->service_path, resp.curr_state);
|
||||
|
||||
pds->state = resp.curr_state;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdr_notifier_work(struct work_struct *work)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(work, struct pdr_handle,
|
||||
notifier_work);
|
||||
struct pdr_service *pds;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (pds->service_connected) {
|
||||
if (!pds->need_notifier_register)
|
||||
continue;
|
||||
|
||||
pds->need_notifier_register = false;
|
||||
ret = pdr_register_listener(pdr, pds, true);
|
||||
if (ret < 0)
|
||||
pds->state = SERVREG_SERVICE_STATE_DOWN;
|
||||
} else {
|
||||
if (!pds->need_notifier_remove)
|
||||
continue;
|
||||
|
||||
pds->need_notifier_remove = false;
|
||||
pds->state = SERVREG_SERVICE_STATE_DOWN;
|
||||
}
|
||||
|
||||
mutex_lock(&pdr->status_lock);
|
||||
pdr->status(pds->state, pds->service_path, pdr->priv);
|
||||
mutex_unlock(&pdr->status_lock);
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
}
|
||||
|
||||
static int pdr_notifier_new_server(struct qmi_handle *qmi,
|
||||
struct qmi_service *svc)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
notifier_hdl);
|
||||
struct pdr_service *pds;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (pds->service == svc->service &&
|
||||
pds->instance == svc->instance) {
|
||||
pds->service_connected = true;
|
||||
pds->need_notifier_register = true;
|
||||
pds->addr.sq_family = AF_QIPCRTR;
|
||||
pds->addr.sq_node = svc->node;
|
||||
pds->addr.sq_port = svc->port;
|
||||
queue_work(pdr->notifier_wq, &pdr->notifier_work);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pdr_notifier_del_server(struct qmi_handle *qmi,
|
||||
struct qmi_service *svc)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
notifier_hdl);
|
||||
struct pdr_service *pds;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (pds->service == svc->service &&
|
||||
pds->instance == svc->instance) {
|
||||
pds->service_connected = false;
|
||||
pds->need_notifier_remove = true;
|
||||
pds->addr.sq_node = 0;
|
||||
pds->addr.sq_port = 0;
|
||||
queue_work(pdr->notifier_wq, &pdr->notifier_work);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
}
|
||||
|
||||
static struct qmi_ops pdr_notifier_ops = {
|
||||
.new_server = pdr_notifier_new_server,
|
||||
.del_server = pdr_notifier_del_server,
|
||||
};
|
||||
|
||||
static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
|
||||
u16 tid)
|
||||
{
|
||||
struct servreg_set_ack_resp resp;
|
||||
struct servreg_set_ack_req req;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
ret = qmi_txn_init(&pdr->notifier_hdl, &txn, servreg_set_ack_resp_ei,
|
||||
&resp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
req.transaction_id = tid;
|
||||
strcpy(req.service_path, pds->service_path);
|
||||
|
||||
ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
|
||||
&txn, SERVREG_SET_ACK_REQ,
|
||||
SERVREG_SET_ACK_REQ_LEN,
|
||||
servreg_set_ack_req_ei,
|
||||
&req);
|
||||
|
||||
/* Skip waiting for response */
|
||||
qmi_txn_cancel(&txn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pdr_indack_work(struct work_struct *work)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(work, struct pdr_handle,
|
||||
indack_work);
|
||||
struct pdr_list_node *ind, *tmp;
|
||||
struct pdr_service *pds;
|
||||
|
||||
list_for_each_entry_safe(ind, tmp, &pdr->indack_list, node) {
|
||||
pds = ind->pds;
|
||||
pdr_send_indack_msg(pdr, pds, ind->transaction_id);
|
||||
|
||||
mutex_lock(&pdr->status_lock);
|
||||
pds->state = ind->curr_state;
|
||||
pdr->status(pds->state, pds->service_path, pdr->priv);
|
||||
mutex_unlock(&pdr->status_lock);
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_del(&ind->node);
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
kfree(ind);
|
||||
}
|
||||
}
|
||||
|
||||
static void pdr_indication_cb(struct qmi_handle *qmi,
|
||||
struct sockaddr_qrtr *sq,
|
||||
struct qmi_txn *txn, const void *data)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(qmi, struct pdr_handle,
|
||||
notifier_hdl);
|
||||
const struct servreg_state_updated_ind *ind_msg = data;
|
||||
struct pdr_list_node *ind;
|
||||
struct pdr_service *pds;
|
||||
bool found = false;
|
||||
|
||||
if (!ind_msg || !ind_msg->service_path[0] ||
|
||||
strlen(ind_msg->service_path) > SERVREG_NAME_LENGTH)
|
||||
return;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(pds, &pdr->lookups, node) {
|
||||
if (strcmp(pds->service_path, ind_msg->service_path))
|
||||
continue;
|
||||
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
if (!found)
|
||||
return;
|
||||
|
||||
pr_info("PDR: Indication received from %s, state: 0x%x, trans-id: %d\n",
|
||||
ind_msg->service_path, ind_msg->curr_state,
|
||||
ind_msg->transaction_id);
|
||||
|
||||
ind = kzalloc(sizeof(*ind), GFP_KERNEL);
|
||||
if (!ind)
|
||||
return;
|
||||
|
||||
ind->transaction_id = ind_msg->transaction_id;
|
||||
ind->curr_state = ind_msg->curr_state;
|
||||
ind->pds = pds;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_add_tail(&ind->node, &pdr->indack_list);
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
queue_work(pdr->indack_wq, &pdr->indack_work);
|
||||
}
|
||||
|
||||
static struct qmi_msg_handler qmi_indication_handler[] = {
|
||||
{
|
||||
.type = QMI_INDICATION,
|
||||
.msg_id = SERVREG_STATE_UPDATED_IND_ID,
|
||||
.ei = servreg_state_updated_ind_ei,
|
||||
.decoded_size = sizeof(struct servreg_state_updated_ind),
|
||||
.fn = pdr_indication_cb,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static int pdr_get_domain_list(struct servreg_get_domain_list_req *req,
|
||||
struct servreg_get_domain_list_resp *resp,
|
||||
struct pdr_handle *pdr)
|
||||
{
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
ret = qmi_txn_init(&pdr->locator_hdl, &txn,
|
||||
servreg_get_domain_list_resp_ei, resp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = qmi_send_request(&pdr->locator_hdl,
|
||||
&pdr->locator_addr,
|
||||
&txn, SERVREG_GET_DOMAIN_LIST_REQ,
|
||||
SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN,
|
||||
servreg_get_domain_list_req_ei,
|
||||
req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, 5 * HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("PDR: %s get domain list txn wait failed: %d\n",
|
||||
req->service_name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("PDR: %s get domain list failed: 0x%x\n",
|
||||
req->service_name, resp->resp.error);
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
|
||||
{
|
||||
struct servreg_get_domain_list_resp *resp;
|
||||
struct servreg_get_domain_list_req req;
|
||||
struct servreg_location_entry *entry;
|
||||
int domains_read = 0;
|
||||
int ret, i;
|
||||
|
||||
resp = kzalloc(sizeof(*resp), GFP_KERNEL);
|
||||
if (!resp)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Prepare req message */
|
||||
strcpy(req.service_name, pds->service_name);
|
||||
req.domain_offset_valid = true;
|
||||
req.domain_offset = 0;
|
||||
|
||||
do {
|
||||
req.domain_offset = domains_read;
|
||||
ret = pdr_get_domain_list(&req, resp, pdr);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
for (i = domains_read; i < resp->domain_list_len; i++) {
|
||||
entry = &resp->domain_list[i];
|
||||
|
||||
if (strnlen(entry->name, sizeof(entry->name)) == sizeof(entry->name))
|
||||
continue;
|
||||
|
||||
if (!strcmp(entry->name, pds->service_path)) {
|
||||
pds->service_data_valid = entry->service_data_valid;
|
||||
pds->service_data = entry->service_data;
|
||||
pds->instance = entry->instance;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update ret to indicate that the service is not yet found */
|
||||
ret = -ENXIO;
|
||||
|
||||
/* Always read total_domains from the response msg */
|
||||
if (resp->domain_list_len > resp->total_domains)
|
||||
resp->domain_list_len = resp->total_domains;
|
||||
|
||||
domains_read += resp->domain_list_len;
|
||||
} while (domains_read < resp->total_domains);
|
||||
out:
|
||||
kfree(resp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void pdr_notify_lookup_failure(struct pdr_handle *pdr,
|
||||
struct pdr_service *pds,
|
||||
int err)
|
||||
{
|
||||
pr_err("PDR: service lookup for %s failed: %d\n",
|
||||
pds->service_name, err);
|
||||
|
||||
if (err == -ENXIO)
|
||||
return;
|
||||
|
||||
list_del(&pds->node);
|
||||
pds->state = SERVREG_LOCATOR_ERR;
|
||||
mutex_lock(&pdr->status_lock);
|
||||
pdr->status(pds->state, pds->service_path, pdr->priv);
|
||||
mutex_unlock(&pdr->status_lock);
|
||||
kfree(pds);
|
||||
}
|
||||
|
||||
static void pdr_locator_work(struct work_struct *work)
|
||||
{
|
||||
struct pdr_handle *pdr = container_of(work, struct pdr_handle,
|
||||
locator_work);
|
||||
struct pdr_service *pds, *tmp;
|
||||
int ret = 0;
|
||||
|
||||
/* Bail out early if the SERVREG LOCATOR QMI service is not up */
|
||||
mutex_lock(&pdr->lock);
|
||||
if (!pdr->locator_init_complete) {
|
||||
mutex_unlock(&pdr->lock);
|
||||
pr_debug("PDR: SERVICE LOCATOR service not available\n");
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&pdr->lock);
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
|
||||
if (!pds->need_locator_lookup)
|
||||
continue;
|
||||
|
||||
ret = pdr_locate_service(pdr, pds);
|
||||
if (ret < 0) {
|
||||
pdr_notify_lookup_failure(pdr, pds, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = qmi_add_lookup(&pdr->notifier_hdl, pds->service, 1,
|
||||
pds->instance);
|
||||
if (ret < 0) {
|
||||
pdr_notify_lookup_failure(pdr, pds, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
pds->need_locator_lookup = false;
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* pdr_add_lookup() - register a tracking request for a PD
|
||||
* @pdr: PDR client handle
|
||||
* @service_name: service name of the tracking request
|
||||
* @service_path: service path of the tracking request
|
||||
*
|
||||
* Registering a pdr lookup allows for tracking the life cycle of the PD.
|
||||
*
|
||||
* Return: pdr_service object on success, ERR_PTR on failure. -EALREADY is
|
||||
* returned if a lookup is already in progress for the given service path.
|
||||
*/
|
||||
struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
|
||||
const char *service_name,
|
||||
const char *service_path)
|
||||
{
|
||||
struct pdr_service *pds, *tmp;
|
||||
int ret;
|
||||
|
||||
if (IS_ERR_OR_NULL(pdr))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (!service_name || strlen(service_name) > SERVREG_NAME_LENGTH ||
|
||||
!service_path || strlen(service_path) > SERVREG_NAME_LENGTH)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pds = kzalloc(sizeof(*pds), GFP_KERNEL);
|
||||
if (!pds)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pds->service = SERVREG_NOTIFIER_SERVICE;
|
||||
strcpy(pds->service_name, service_name);
|
||||
strcpy(pds->service_path, service_path);
|
||||
pds->need_locator_lookup = true;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(tmp, &pdr->lookups, node) {
|
||||
if (strcmp(tmp->service_path, service_path))
|
||||
continue;
|
||||
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
ret = -EALREADY;
|
||||
goto err;
|
||||
}
|
||||
|
||||
list_add(&pds->node, &pdr->lookups);
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
schedule_work(&pdr->locator_work);
|
||||
|
||||
return pds;
|
||||
err:
|
||||
kfree(pds);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(pdr_add_lookup);
|
||||
|
||||
/**
|
||||
* pdr_restart_pd() - restart PD
|
||||
* @pdr: PDR client handle
|
||||
* @pds: PD service handle
|
||||
*
|
||||
* Restarts the PD tracked by the PDR client handle for a given service path.
|
||||
*
|
||||
* Return: 0 on success, negative errno on failure.
|
||||
*/
|
||||
int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
|
||||
{
|
||||
struct servreg_restart_pd_resp resp;
|
||||
struct servreg_restart_pd_req req;
|
||||
struct sockaddr_qrtr addr;
|
||||
struct pdr_service *tmp;
|
||||
struct qmi_txn txn;
|
||||
int ret;
|
||||
|
||||
if (IS_ERR_OR_NULL(pdr) || IS_ERR_OR_NULL(pds))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry(tmp, &pdr->lookups, node) {
|
||||
if (tmp != pds)
|
||||
continue;
|
||||
|
||||
if (!pds->service_connected)
|
||||
break;
|
||||
|
||||
/* Prepare req message */
|
||||
strcpy(req.service_path, pds->service_path);
|
||||
addr = pds->addr;
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
if (!req.service_path[0])
|
||||
return -EINVAL;
|
||||
|
||||
ret = qmi_txn_init(&pdr->notifier_hdl, &txn,
|
||||
servreg_restart_pd_resp_ei,
|
||||
&resp);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = qmi_send_request(&pdr->notifier_hdl, &addr,
|
||||
&txn, SERVREG_RESTART_PD_REQ,
|
||||
SERVREG_RESTART_PD_REQ_MAX_LEN,
|
||||
servreg_restart_pd_req_ei, &req);
|
||||
if (ret < 0) {
|
||||
qmi_txn_cancel(&txn);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = qmi_txn_wait(&txn, 5 * HZ);
|
||||
if (ret < 0) {
|
||||
pr_err("PDR: %s PD restart txn wait failed: %d\n",
|
||||
req.service_path, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Check response if PDR is disabled */
|
||||
if (resp.resp.result == QMI_RESULT_FAILURE_V01 &&
|
||||
resp.resp.error == QMI_ERR_DISABLED_V01) {
|
||||
pr_err("PDR: %s PD restart is disabled: 0x%x\n",
|
||||
req.service_path, resp.resp.error);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* Check the response for other error case*/
|
||||
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
|
||||
pr_err("PDR: %s request for PD restart failed: 0x%x\n",
|
||||
req.service_path, resp.resp.error);
|
||||
return -EREMOTEIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(pdr_restart_pd);
|
||||
|
||||
/**
|
||||
* pdr_handle_alloc() - initialize the PDR client handle
|
||||
* @status: function to be called on PD state change
|
||||
* @priv: handle for client's use
|
||||
*
|
||||
* Initializes the PDR client handle to allow for tracking/restart of PDs.
|
||||
*
|
||||
* Return: pdr_handle object on success, ERR_PTR on failure.
|
||||
*/
|
||||
struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
|
||||
char *service_path,
|
||||
void *priv), void *priv)
|
||||
{
|
||||
struct pdr_handle *pdr;
|
||||
int ret;
|
||||
|
||||
if (!status)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pdr = kzalloc(sizeof(*pdr), GFP_KERNEL);
|
||||
if (!pdr)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pdr->status = status;
|
||||
pdr->priv = priv;
|
||||
|
||||
mutex_init(&pdr->status_lock);
|
||||
mutex_init(&pdr->list_lock);
|
||||
mutex_init(&pdr->lock);
|
||||
|
||||
INIT_LIST_HEAD(&pdr->lookups);
|
||||
INIT_LIST_HEAD(&pdr->indack_list);
|
||||
|
||||
INIT_WORK(&pdr->locator_work, pdr_locator_work);
|
||||
INIT_WORK(&pdr->notifier_work, pdr_notifier_work);
|
||||
INIT_WORK(&pdr->indack_work, pdr_indack_work);
|
||||
|
||||
pdr->notifier_wq = create_singlethread_workqueue("pdr_notifier_wq");
|
||||
if (!pdr->notifier_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto free_pdr_handle;
|
||||
}
|
||||
|
||||
pdr->indack_wq = alloc_ordered_workqueue("pdr_indack_wq", WQ_HIGHPRI);
|
||||
if (!pdr->indack_wq) {
|
||||
ret = -ENOMEM;
|
||||
goto destroy_notifier;
|
||||
}
|
||||
|
||||
ret = qmi_handle_init(&pdr->locator_hdl,
|
||||
SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN,
|
||||
&pdr_locator_ops, NULL);
|
||||
if (ret < 0)
|
||||
goto destroy_indack;
|
||||
|
||||
ret = qmi_add_lookup(&pdr->locator_hdl, SERVREG_LOCATOR_SERVICE, 1, 1);
|
||||
if (ret < 0)
|
||||
goto release_qmi_handle;
|
||||
|
||||
ret = qmi_handle_init(&pdr->notifier_hdl,
|
||||
SERVREG_STATE_UPDATED_IND_MAX_LEN,
|
||||
&pdr_notifier_ops,
|
||||
qmi_indication_handler);
|
||||
if (ret < 0)
|
||||
goto release_qmi_handle;
|
||||
|
||||
return pdr;
|
||||
|
||||
release_qmi_handle:
|
||||
qmi_handle_release(&pdr->locator_hdl);
|
||||
destroy_indack:
|
||||
destroy_workqueue(pdr->indack_wq);
|
||||
destroy_notifier:
|
||||
destroy_workqueue(pdr->notifier_wq);
|
||||
free_pdr_handle:
|
||||
kfree(pdr);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
EXPORT_SYMBOL(pdr_handle_alloc);
|
||||
|
||||
/**
|
||||
* pdr_handle_release() - release the PDR client handle
|
||||
* @pdr: PDR client handle
|
||||
*
|
||||
* Cleans up pending tracking requests and releases the underlying qmi handles.
|
||||
*/
|
||||
void pdr_handle_release(struct pdr_handle *pdr)
|
||||
{
|
||||
struct pdr_service *pds, *tmp;
|
||||
|
||||
if (IS_ERR_OR_NULL(pdr))
|
||||
return;
|
||||
|
||||
mutex_lock(&pdr->list_lock);
|
||||
list_for_each_entry_safe(pds, tmp, &pdr->lookups, node) {
|
||||
list_del(&pds->node);
|
||||
kfree(pds);
|
||||
}
|
||||
mutex_unlock(&pdr->list_lock);
|
||||
|
||||
cancel_work_sync(&pdr->locator_work);
|
||||
cancel_work_sync(&pdr->notifier_work);
|
||||
cancel_work_sync(&pdr->indack_work);
|
||||
|
||||
destroy_workqueue(pdr->notifier_wq);
|
||||
destroy_workqueue(pdr->indack_wq);
|
||||
|
||||
qmi_handle_release(&pdr->locator_hdl);
|
||||
qmi_handle_release(&pdr->notifier_hdl);
|
||||
|
||||
kfree(pdr);
|
||||
}
|
||||
EXPORT_SYMBOL(pdr_handle_release);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_DESCRIPTION("Qualcomm Protection Domain Restart helpers");
|
379
drivers/soc/qcom/pdr_internal.h
Normal file
379
drivers/soc/qcom/pdr_internal.h
Normal file
@ -0,0 +1,379 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __QCOM_PDR_HELPER_INTERNAL__
|
||||
#define __QCOM_PDR_HELPER_INTERNAL__
|
||||
|
||||
#include <linux/soc/qcom/pdr.h>
|
||||
|
||||
#define SERVREG_LOCATOR_SERVICE 0x40
|
||||
#define SERVREG_NOTIFIER_SERVICE 0x42
|
||||
|
||||
#define SERVREG_REGISTER_LISTENER_REQ 0x20
|
||||
#define SERVREG_GET_DOMAIN_LIST_REQ 0x21
|
||||
#define SERVREG_STATE_UPDATED_IND_ID 0x22
|
||||
#define SERVREG_SET_ACK_REQ 0x23
|
||||
#define SERVREG_RESTART_PD_REQ 0x24
|
||||
|
||||
#define SERVREG_DOMAIN_LIST_LENGTH 32
|
||||
#define SERVREG_RESTART_PD_REQ_MAX_LEN 67
|
||||
#define SERVREG_REGISTER_LISTENER_REQ_LEN 71
|
||||
#define SERVREG_SET_ACK_REQ_LEN 72
|
||||
#define SERVREG_GET_DOMAIN_LIST_REQ_MAX_LEN 74
|
||||
#define SERVREG_STATE_UPDATED_IND_MAX_LEN 79
|
||||
#define SERVREG_GET_DOMAIN_LIST_RESP_MAX_LEN 2389
|
||||
|
||||
struct servreg_location_entry {
|
||||
char name[SERVREG_NAME_LENGTH + 1];
|
||||
u8 service_data_valid;
|
||||
u32 service_data;
|
||||
u32 instance;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_location_entry_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0,
|
||||
.offset = offsetof(struct servreg_location_entry,
|
||||
name),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_4_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u32),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0,
|
||||
.offset = offsetof(struct servreg_location_entry,
|
||||
instance),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0,
|
||||
.offset = offsetof(struct servreg_location_entry,
|
||||
service_data_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_4_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u32),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0,
|
||||
.offset = offsetof(struct servreg_location_entry,
|
||||
service_data),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_get_domain_list_req {
|
||||
char service_name[SERVREG_NAME_LENGTH + 1];
|
||||
u8 domain_offset_valid;
|
||||
u32 domain_offset;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_get_domain_list_req_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_get_domain_list_req,
|
||||
service_name),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_get_domain_list_req,
|
||||
domain_offset_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_4_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u32),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_get_domain_list_req,
|
||||
domain_offset),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_get_domain_list_resp {
|
||||
struct qmi_response_type_v01 resp;
|
||||
u8 total_domains_valid;
|
||||
u16 total_domains;
|
||||
u8 db_rev_count_valid;
|
||||
u16 db_rev_count;
|
||||
u8 domain_list_valid;
|
||||
u32 domain_list_len;
|
||||
struct servreg_location_entry domain_list[SERVREG_DOMAIN_LIST_LENGTH];
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_get_domain_list_resp_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
total_domains_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_2_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u16),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
total_domains),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
db_rev_count_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_2_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u16),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x11,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
db_rev_count),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
domain_list_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_DATA_LEN,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
domain_list_len),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = SERVREG_DOMAIN_LIST_LENGTH,
|
||||
.elem_size = sizeof(struct servreg_location_entry),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x12,
|
||||
.offset = offsetof(struct servreg_get_domain_list_resp,
|
||||
domain_list),
|
||||
.ei_array = servreg_location_entry_ei,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_register_listener_req {
|
||||
u8 enable;
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_register_listener_req_ei[] = {
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_1_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_register_listener_req,
|
||||
enable),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_register_listener_req,
|
||||
service_path),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_register_listener_resp {
|
||||
struct qmi_response_type_v01 resp;
|
||||
u8 curr_state_valid;
|
||||
enum servreg_service_state curr_state;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_register_listener_resp_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_register_listener_resp,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{
|
||||
.data_type = QMI_OPT_FLAG,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u8),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_register_listener_resp,
|
||||
curr_state_valid),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_SIGNED_4_BYTE_ENUM,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(enum servreg_service_state),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x10,
|
||||
.offset = offsetof(struct servreg_register_listener_resp,
|
||||
curr_state),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_restart_pd_req {
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_restart_pd_req_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_restart_pd_req,
|
||||
service_path),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_restart_pd_resp {
|
||||
struct qmi_response_type_v01 resp;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_restart_pd_resp_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_restart_pd_resp,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_state_updated_ind {
|
||||
enum servreg_service_state curr_state;
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
u16 transaction_id;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_state_updated_ind_ei[] = {
|
||||
{
|
||||
.data_type = QMI_SIGNED_4_BYTE_ENUM,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u32),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_state_updated_ind,
|
||||
curr_state),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_state_updated_ind,
|
||||
service_path),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_2_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u16),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x03,
|
||||
.offset = offsetof(struct servreg_state_updated_ind,
|
||||
transaction_id),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_set_ack_req {
|
||||
char service_path[SERVREG_NAME_LENGTH + 1];
|
||||
u16 transaction_id;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_set_ack_req_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRING,
|
||||
.elem_len = SERVREG_NAME_LENGTH + 1,
|
||||
.elem_size = sizeof(char),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x01,
|
||||
.offset = offsetof(struct servreg_set_ack_req,
|
||||
service_path),
|
||||
},
|
||||
{
|
||||
.data_type = QMI_UNSIGNED_2_BYTE,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(u16),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_set_ack_req,
|
||||
transaction_id),
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct servreg_set_ack_resp {
|
||||
struct qmi_response_type_v01 resp;
|
||||
};
|
||||
|
||||
struct qmi_elem_info servreg_set_ack_resp_ei[] = {
|
||||
{
|
||||
.data_type = QMI_STRUCT,
|
||||
.elem_len = 1,
|
||||
.elem_size = sizeof(struct qmi_response_type_v01),
|
||||
.array_type = NO_ARRAY,
|
||||
.tlv_type = 0x02,
|
||||
.offset = offsetof(struct servreg_set_ack_resp,
|
||||
resp),
|
||||
.ei_array = qmi_response_type_v01_ei,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
#endif
|
@ -200,7 +200,7 @@ static irqreturn_t qmp_intr(int irq, void *data)
|
||||
{
|
||||
struct qmp *qmp = data;
|
||||
|
||||
wake_up_interruptible_all(&qmp->event);
|
||||
wake_up_all(&qmp->event);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -225,6 +225,7 @@ static bool qmp_message_empty(struct qmp *qmp)
|
||||
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
|
||||
{
|
||||
long time_left;
|
||||
size_t tlen;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(len + sizeof(u32) > qmp->size))
|
||||
@ -239,6 +240,9 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
|
||||
__iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
|
||||
data, len / sizeof(u32));
|
||||
writel(len, qmp->msgram + qmp->offset);
|
||||
|
||||
/* Read back len to confirm data written in message RAM */
|
||||
tlen = readl(qmp->msgram + qmp->offset);
|
||||
qmp_kick(qmp);
|
||||
|
||||
time_left = wait_event_interruptible_timeout(qmp->event,
|
||||
|
@ -110,5 +110,6 @@ int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
|
||||
int rpmh_rsc_invalidate(struct rsc_drv *drv);
|
||||
|
||||
void rpmh_tx_done(const struct tcs_request *msg, int r);
|
||||
int rpmh_flush(struct rpmh_ctrlr *ctrlr);
|
||||
|
||||
#endif /* __RPM_INTERNAL_H__ */
|
||||
|
@ -277,7 +277,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
|
||||
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
|
||||
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
|
||||
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
|
||||
trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
|
||||
trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
|
||||
}
|
||||
|
||||
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
|
||||
|
||||
#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
|
||||
#define DEFINE_RPMH_MSG_ONSTACK(device, s, q, name) \
|
||||
struct rpmh_request name = { \
|
||||
.msg = { \
|
||||
.state = s, \
|
||||
@ -33,7 +33,7 @@
|
||||
}, \
|
||||
.cmd = { { 0 } }, \
|
||||
.completion = q, \
|
||||
.dev = dev, \
|
||||
.dev = device, \
|
||||
.needs_free = false, \
|
||||
}
|
||||
|
||||
@ -427,11 +427,10 @@ static int is_req_valid(struct cache_req *req)
|
||||
req->sleep_val != req->wake_val);
|
||||
}
|
||||
|
||||
static int send_single(const struct device *dev, enum rpmh_state state,
|
||||
static int send_single(struct rpmh_ctrlr *ctrlr, enum rpmh_state state,
|
||||
u32 addr, u32 data)
|
||||
{
|
||||
DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
|
||||
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
||||
DEFINE_RPMH_MSG_ONSTACK(NULL, state, NULL, rpm_msg);
|
||||
|
||||
/* Wake sets are always complete and sleep sets are not */
|
||||
rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
|
||||
@ -445,7 +444,7 @@ static int send_single(const struct device *dev, enum rpmh_state state,
|
||||
/**
|
||||
* rpmh_flush: Flushes the buffered active and sleep sets to TCS
|
||||
*
|
||||
* @dev: The device making the request
|
||||
* @ctrlr: controller making request to flush cached data
|
||||
*
|
||||
* Return: -EBUSY if the controller is busy, probably waiting on a response
|
||||
* to a RPMH request sent earlier.
|
||||
@ -454,10 +453,9 @@ static int send_single(const struct device *dev, enum rpmh_state state,
|
||||
* that is powering down the entire system. Since no other RPMH API would be
|
||||
* executing at this time, it is safe to run lockless.
|
||||
*/
|
||||
int rpmh_flush(const struct device *dev)
|
||||
int rpmh_flush(struct rpmh_ctrlr *ctrlr)
|
||||
{
|
||||
struct cache_req *p;
|
||||
struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
|
||||
int ret;
|
||||
|
||||
if (!ctrlr->dirty) {
|
||||
@ -480,11 +478,12 @@ int rpmh_flush(const struct device *dev)
|
||||
__func__, p->addr, p->sleep_val, p->wake_val);
|
||||
continue;
|
||||
}
|
||||
ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
|
||||
ret = send_single(ctrlr, RPMH_SLEEP_STATE, p->addr,
|
||||
p->sleep_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
|
||||
p->addr, p->wake_val);
|
||||
ret = send_single(ctrlr, RPMH_WAKE_ONLY_STATE, p->addr,
|
||||
p->wake_val);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -493,7 +492,6 @@ int rpmh_flush(const struct device *dev)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(rpmh_flush);
|
||||
|
||||
/**
|
||||
* rpmh_invalidate: Invalidate all sleep and active sets
|
||||
|
@ -277,7 +277,7 @@ static int show_image_##type(struct seq_file *seq, void *p) \
|
||||
{ \
|
||||
struct smem_image_version *image_version = seq->private; \
|
||||
seq_puts(seq, image_version->type); \
|
||||
seq_puts(seq, "\n"); \
|
||||
seq_putc(seq, '\n'); \
|
||||
return 0; \
|
||||
} \
|
||||
static int open_image_##type(struct inode *inode, struct file *file) \
|
||||
|
@ -195,19 +195,19 @@ config ARCH_R8A774C0
|
||||
This enables support for the Renesas RZ/G2E SoC.
|
||||
|
||||
config ARCH_R8A77950
|
||||
bool
|
||||
|
||||
config ARCH_R8A77951
|
||||
bool
|
||||
|
||||
config ARCH_R8A7795
|
||||
bool "Renesas R-Car H3 SoC Platform"
|
||||
select ARCH_R8A77950
|
||||
select ARCH_R8A77951
|
||||
bool "Renesas R-Car H3 ES1.x SoC Platform"
|
||||
select ARCH_RCAR_GEN3
|
||||
select SYSC_R8A7795
|
||||
help
|
||||
This enables support for the Renesas R-Car H3 SoC.
|
||||
This enables support for the Renesas R-Car H3 SoC (revision 1.x).
|
||||
|
||||
config ARCH_R8A77951
|
||||
bool "Renesas R-Car H3 ES2.0+ SoC Platform"
|
||||
select ARCH_RCAR_GEN3
|
||||
select SYSC_R8A7795
|
||||
help
|
||||
This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
|
||||
later).
|
||||
|
||||
config ARCH_R8A77960
|
||||
bool "Renesas R-Car M3-W SoC Platform"
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Renesas R-Car System Controller
|
||||
*
|
||||
* Copyright (C) 2016 Glider bvba
|
||||
|
@ -259,7 +259,7 @@ static const struct of_device_id renesas_socs[] __initconst = {
|
||||
#ifdef CONFIG_ARCH_R8A7794
|
||||
{ .compatible = "renesas,r8a7794", .data = &soc_rcar_e2 },
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_R8A7795
|
||||
#if defined(CONFIG_ARCH_R8A77950) || defined(CONFIG_ARCH_R8A77951)
|
||||
{ .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
|
||||
#endif
|
||||
#ifdef CONFIG_ARCH_R8A77960
|
||||
|
@ -44,7 +44,6 @@ static struct tee_context *teedev_open(struct tee_device *teedev)
|
||||
|
||||
kref_init(&ctx->refcount);
|
||||
ctx->teedev = teedev;
|
||||
INIT_LIST_HEAD(&ctx->list_shm);
|
||||
rc = teedev->desc->ops->open(ctx);
|
||||
if (rc)
|
||||
goto err;
|
||||
|
@ -37,7 +37,8 @@ struct tee_shm_pool {
|
||||
* @num_users: number of active users of this device
|
||||
* @c_no_user: completion used when unregistering the device
|
||||
* @mutex: mutex protecting @num_users and @idr
|
||||
* @idr: register of shared memory object allocated on this device
|
||||
* @idr: register of user space shared memory objects allocated or
|
||||
* registered on this device
|
||||
* @pool: shared memory pool
|
||||
*/
|
||||
struct tee_device {
|
||||
|
@ -13,13 +13,13 @@
|
||||
|
||||
static void tee_shm_release(struct tee_shm *shm)
|
||||
{
|
||||
struct tee_device *teedev = shm->teedev;
|
||||
struct tee_device *teedev = shm->ctx->teedev;
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
if (shm->ctx)
|
||||
list_del(&shm->link);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (shm->flags & TEE_SHM_DMA_BUF) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
|
||||
if (shm->flags & TEE_SHM_POOL) {
|
||||
struct tee_shm_pool_mgr *poolm;
|
||||
@ -44,8 +44,7 @@ static void tee_shm_release(struct tee_shm *shm)
|
||||
kfree(shm->pages);
|
||||
}
|
||||
|
||||
if (shm->ctx)
|
||||
teedev_ctx_put(shm->ctx);
|
||||
teedev_ctx_put(shm->ctx);
|
||||
|
||||
kfree(shm);
|
||||
|
||||
@ -77,7 +76,7 @@ static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
|
||||
size_t size = vma->vm_end - vma->vm_start;
|
||||
|
||||
/* Refuse sharing shared memory provided by application */
|
||||
if (shm->flags & TEE_SHM_REGISTER)
|
||||
if (shm->flags & TEE_SHM_USER_MAPPED)
|
||||
return -EINVAL;
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
|
||||
@ -91,20 +90,14 @@ static const struct dma_buf_ops tee_shm_dma_buf_ops = {
|
||||
.mmap = tee_shm_op_mmap,
|
||||
};
|
||||
|
||||
static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
|
||||
struct tee_device *teedev,
|
||||
size_t size, u32 flags)
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
{
|
||||
struct tee_device *teedev = ctx->teedev;
|
||||
struct tee_shm_pool_mgr *poolm = NULL;
|
||||
struct tee_shm *shm;
|
||||
void *ret;
|
||||
int rc;
|
||||
|
||||
if (ctx && ctx->teedev != teedev) {
|
||||
dev_err(teedev->dev.parent, "ctx and teedev mismatch\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (!(flags & TEE_SHM_MAPPED)) {
|
||||
dev_err(teedev->dev.parent,
|
||||
"only mapped allocations supported\n");
|
||||
@ -132,7 +125,6 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
|
||||
}
|
||||
|
||||
shm->flags = flags | TEE_SHM_POOL;
|
||||
shm->teedev = teedev;
|
||||
shm->ctx = ctx;
|
||||
if (flags & TEE_SHM_DMA_BUF)
|
||||
poolm = teedev->pool->dma_buf_mgr;
|
||||
@ -145,17 +137,18 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
|
||||
goto err_kfree;
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (shm->id < 0) {
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err_pool_free;
|
||||
}
|
||||
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (shm->id < 0) {
|
||||
ret = ERR_PTR(shm->id);
|
||||
goto err_pool_free;
|
||||
}
|
||||
|
||||
exp_info.ops = &tee_shm_dma_buf_ops;
|
||||
exp_info.size = shm->size;
|
||||
exp_info.flags = O_RDWR;
|
||||
@ -168,18 +161,16 @@ static struct tee_shm *__tee_shm_alloc(struct tee_context *ctx,
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx) {
|
||||
if (ctx)
|
||||
teedev_ctx_get(ctx);
|
||||
mutex_lock(&teedev->mutex);
|
||||
list_add_tail(&shm->link, &ctx->list_shm);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
|
||||
return shm;
|
||||
err_rem:
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
if (flags & TEE_SHM_DMA_BUF) {
|
||||
mutex_lock(&teedev->mutex);
|
||||
idr_remove(&teedev->idr, shm->id);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
}
|
||||
err_pool_free:
|
||||
poolm->ops->free(poolm, shm);
|
||||
err_kfree:
|
||||
@ -188,31 +179,8 @@ err_dev_put:
|
||||
tee_device_put(teedev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* tee_shm_alloc() - Allocate shared memory
|
||||
* @ctx: Context that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
* @flags: Flags setting properties for the requested shared memory.
|
||||
*
|
||||
* Memory allocated as global shared memory is automatically freed when the
|
||||
* TEE file pointer is closed. The @flags field uses the bits defined by
|
||||
* TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be
|
||||
* set. If TEE_SHM_DMA_BUF global shared memory will be allocated and
|
||||
* associated with a dma-buf handle, else driver private memory.
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags)
|
||||
{
|
||||
return __tee_shm_alloc(ctx, ctx->teedev, size, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_alloc);
|
||||
|
||||
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size)
|
||||
{
|
||||
return __tee_shm_alloc(NULL, teedev, size, TEE_SHM_MAPPED);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tee_shm_priv_alloc);
|
||||
|
||||
struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
size_t length, u32 flags)
|
||||
{
|
||||
@ -245,7 +213,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
}
|
||||
|
||||
shm->flags = flags | TEE_SHM_REGISTER;
|
||||
shm->teedev = teedev;
|
||||
shm->ctx = ctx;
|
||||
shm->id = -1;
|
||||
addr = untagged_addr(addr);
|
||||
@ -301,10 +268,6 @@ struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr,
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&teedev->mutex);
|
||||
list_add_tail(&shm->link, &ctx->list_shm);
|
||||
mutex_unlock(&teedev->mutex);
|
||||
|
||||
return shm;
|
||||
err:
|
||||
if (shm) {
|
||||
|
@ -18,6 +18,10 @@
|
||||
|
||||
#define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4)
|
||||
|
||||
/* PRUSS sysc found on AM33xx/AM43xx/AM57xx */
|
||||
#define SYSC_PRUSS_SUB_MWAIT (1 << 5)
|
||||
#define SYSC_PRUSS_STANDBY_INIT (1 << 4)
|
||||
|
||||
/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */
|
||||
#define SYSC_IDLE_FORCE 0
|
||||
#define SYSC_IDLE_NO 1
|
||||
|
32
include/dt-bindings/power/meson-a1-power.h
Normal file
32
include/dt-bindings/power/meson-a1-power.h
Normal file
@ -0,0 +1,32 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
|
||||
/*
|
||||
* Copyright (c) 2019 Amlogic, Inc.
|
||||
* Author: Jianxin Pan <jianxin.pan@amlogic.com>
|
||||
*/
|
||||
|
||||
#ifndef _DT_BINDINGS_MESON_A1_POWER_H
|
||||
#define _DT_BINDINGS_MESON_A1_POWER_H
|
||||
|
||||
#define PWRC_DSPA_ID 8
|
||||
#define PWRC_DSPB_ID 9
|
||||
#define PWRC_UART_ID 10
|
||||
#define PWRC_DMC_ID 11
|
||||
#define PWRC_I2C_ID 12
|
||||
#define PWRC_PSRAM_ID 13
|
||||
#define PWRC_ACODEC_ID 14
|
||||
#define PWRC_AUDIO_ID 15
|
||||
#define PWRC_OTP_ID 16
|
||||
#define PWRC_DMA_ID 17
|
||||
#define PWRC_SD_EMMC_ID 18
|
||||
#define PWRC_RAMA_ID 19
|
||||
#define PWRC_RAMB_ID 20
|
||||
#define PWRC_IR_ID 21
|
||||
#define PWRC_SPICC_ID 22
|
||||
#define PWRC_SPIFC_ID 23
|
||||
#define PWRC_USB_ID 24
|
||||
#define PWRC_NIC_ID 25
|
||||
#define PWRC_PDMIN_ID 26
|
||||
#define PWRC_RSA_ID 27
|
||||
#define PWRC_MAX_ID 28
|
||||
|
||||
#endif
|
@ -25,7 +25,6 @@ enum imx_sc_rpc_svc {
|
||||
IMX_SC_RPC_SVC_PAD = 6,
|
||||
IMX_SC_RPC_SVC_MISC = 7,
|
||||
IMX_SC_RPC_SVC_IRQ = 8,
|
||||
IMX_SC_RPC_SVC_ABORT = 9
|
||||
};
|
||||
|
||||
struct imx_sc_rpc_msg {
|
||||
|
@ -12,6 +12,8 @@ enum {
|
||||
SM_EFUSE_WRITE,
|
||||
SM_EFUSE_USER_MAX,
|
||||
SM_GET_CHIP_ID,
|
||||
SM_A1_PWRC_SET,
|
||||
SM_A1_PWRC_GET,
|
||||
};
|
||||
|
||||
struct meson_sm_firmware;
|
||||
|
@ -17,6 +17,7 @@ enum ti_sysc_module_type {
|
||||
TI_SYSC_OMAP4_MCASP,
|
||||
TI_SYSC_OMAP4_USB_HOST_FS,
|
||||
TI_SYSC_DRA7_MCAN,
|
||||
TI_SYSC_PRUSS,
|
||||
};
|
||||
|
||||
struct ti_sysc_cookie {
|
||||
@ -49,6 +50,9 @@ struct sysc_regbits {
|
||||
s8 emufree_shift;
|
||||
};
|
||||
|
||||
#define SYSC_MODULE_QUIRK_PRUSS BIT(24)
|
||||
#define SYSC_MODULE_QUIRK_DSS_RESET BIT(23)
|
||||
#define SYSC_MODULE_QUIRK_RTC_UNLOCK BIT(22)
|
||||
#define SYSC_QUIRK_CLKDM_NOAUTO BIT(21)
|
||||
#define SYSC_QUIRK_FORCE_MSTANDBY BIT(20)
|
||||
#define SYSC_MODULE_QUIRK_AESS BIT(19)
|
||||
@ -141,6 +145,7 @@ struct clk;
|
||||
|
||||
struct ti_sysc_platform_data {
|
||||
struct of_dev_auxdata *auxdata;
|
||||
bool (*soc_type_gp)(void);
|
||||
int (*init_clockdomain)(struct device *dev, struct clk *fck,
|
||||
struct clk *ick, struct ti_sysc_cookie *cookie);
|
||||
void (*clkdm_deny_idle)(struct device *dev,
|
||||
|
@ -85,6 +85,7 @@ struct apr_device {
|
||||
uint16_t domain_id;
|
||||
uint32_t version;
|
||||
char name[APR_NAME_SIZE];
|
||||
const char *service_path;
|
||||
spinlock_t lock;
|
||||
struct list_head node;
|
||||
};
|
||||
|
29
include/linux/soc/qcom/pdr.h
Normal file
29
include/linux/soc/qcom/pdr.h
Normal file
@ -0,0 +1,29 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef __QCOM_PDR_HELPER__
|
||||
#define __QCOM_PDR_HELPER__
|
||||
|
||||
#include <linux/soc/qcom/qmi.h>
|
||||
|
||||
#define SERVREG_NAME_LENGTH 64
|
||||
|
||||
struct pdr_service;
|
||||
struct pdr_handle;
|
||||
|
||||
enum servreg_service_state {
|
||||
SERVREG_LOCATOR_ERR = 0x1,
|
||||
SERVREG_SERVICE_STATE_DOWN = 0x0FFFFFFF,
|
||||
SERVREG_SERVICE_STATE_UP = 0x1FFFFFFF,
|
||||
SERVREG_SERVICE_STATE_EARLY_DOWN = 0x2FFFFFFF,
|
||||
SERVREG_SERVICE_STATE_UNINIT = 0x7FFFFFFF,
|
||||
};
|
||||
|
||||
struct pdr_handle *pdr_handle_alloc(void (*status)(int state,
|
||||
char *service_path,
|
||||
void *priv), void *priv);
|
||||
struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
|
||||
const char *service_name,
|
||||
const char *service_path);
|
||||
int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds);
|
||||
void pdr_handle_release(struct pdr_handle *pdr);
|
||||
|
||||
#endif
|
@ -88,6 +88,7 @@ struct qmi_elem_info {
|
||||
#define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5
|
||||
#define QMI_ERR_INVALID_ID_V01 41
|
||||
#define QMI_ERR_ENCODING_V01 58
|
||||
#define QMI_ERR_DISABLED_V01 69
|
||||
#define QMI_ERR_INCOMPATIBLE_STATE_V01 90
|
||||
#define QMI_ERR_NOT_SUPPORTED_V01 94
|
||||
|
||||
|
@ -49,7 +49,6 @@ struct tee_shm_pool;
|
||||
*/
|
||||
struct tee_context {
|
||||
struct tee_device *teedev;
|
||||
struct list_head list_shm;
|
||||
void *data;
|
||||
struct kref refcount;
|
||||
bool releasing;
|
||||
@ -168,9 +167,7 @@ void tee_device_unregister(struct tee_device *teedev);
|
||||
|
||||
/**
|
||||
* struct tee_shm - shared memory object
|
||||
* @teedev: device used to allocate the object
|
||||
* @ctx: context using the object, if NULL the context is gone
|
||||
* @link link element
|
||||
* @ctx: context using the object
|
||||
* @paddr: physical address of the shared memory
|
||||
* @kaddr: virtual address of the shared memory
|
||||
* @size: size of shared memory
|
||||
@ -185,9 +182,7 @@ void tee_device_unregister(struct tee_device *teedev);
|
||||
* subsystem and from drivers that implements their own shm pool manager.
|
||||
*/
|
||||
struct tee_shm {
|
||||
struct tee_device *teedev;
|
||||
struct tee_context *ctx;
|
||||
struct list_head link;
|
||||
phys_addr_t paddr;
|
||||
void *kaddr;
|
||||
size_t size;
|
||||
@ -318,18 +313,6 @@ void *tee_get_drvdata(struct tee_device *teedev);
|
||||
*/
|
||||
struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
|
||||
|
||||
/**
|
||||
* tee_shm_priv_alloc() - Allocate shared memory privately
|
||||
* @dev: Device that allocates the shared memory
|
||||
* @size: Requested size of shared memory
|
||||
*
|
||||
* Allocates shared memory buffer that is not associated with any client
|
||||
* context. Such buffers are owned by TEE driver and used for internal calls.
|
||||
*
|
||||
* @returns a pointer to 'struct tee_shm'
|
||||
*/
|
||||
struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size);
|
||||
|
||||
/**
|
||||
* tee_shm_register() - Register shared memory buffer
|
||||
* @ctx: Context that registers the shared memory
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
|
||||
/*
|
||||
* Copyright 2014-2016 Freescale Semiconductor Inc.
|
||||
* Copyright NXP
|
||||
* Copyright 2017-2019 NXP
|
||||
*
|
||||
*/
|
||||
#ifndef __FSL_DPAA2_IO_H
|
||||
@ -109,6 +109,10 @@ int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
|
||||
|
||||
int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, u32 fqid,
|
||||
const struct dpaa2_fd *fd);
|
||||
int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d, u32 fqid,
|
||||
const struct dpaa2_fd *fd, int number_of_frame);
|
||||
int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d, u32 *fqid,
|
||||
const struct dpaa2_fd *fd, int number_of_frame);
|
||||
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
|
||||
u16 qdbin, const struct dpaa2_fd *fd);
|
||||
int dpaa2_io_service_release(struct dpaa2_io *d, u16 bpid,
|
||||
|
@ -178,10 +178,10 @@ struct ucc_fast_info {
|
||||
struct ucc_fast_private {
|
||||
struct ucc_fast_info *uf_info;
|
||||
struct ucc_fast __iomem *uf_regs; /* a pointer to the UCC regs. */
|
||||
u32 __iomem *p_ucce; /* a pointer to the event register in memory. */
|
||||
u32 __iomem *p_uccm; /* a pointer to the mask register in memory. */
|
||||
__be32 __iomem *p_ucce; /* a pointer to the event register in memory. */
|
||||
__be32 __iomem *p_uccm; /* a pointer to the mask register in memory. */
|
||||
#ifdef CONFIG_UGETH_TX_ON_DEMAND
|
||||
u16 __iomem *p_utodr; /* pointer to the transmit on demand register */
|
||||
__be16 __iomem *p_utodr;/* pointer to the transmit on demand register */
|
||||
#endif
|
||||
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
|
||||
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
|
||||
|
@ -184,7 +184,7 @@ struct ucc_slow_info {
|
||||
struct ucc_slow_private {
|
||||
struct ucc_slow_info *us_info;
|
||||
struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
|
||||
struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */
|
||||
struct ucc_slow_pram __iomem *us_pram; /* a pointer to the parameter RAM */
|
||||
s32 us_pram_offset;
|
||||
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
|
||||
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
|
||||
@ -196,13 +196,12 @@ struct ucc_slow_private {
|
||||
and length for first BD in a frame */
|
||||
s32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
|
||||
s32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
|
||||
struct qe_bd *confBd; /* next BD for confirm after Tx */
|
||||
struct qe_bd *tx_bd; /* next BD for new Tx request */
|
||||
struct qe_bd *rx_bd; /* next BD to collect after Rx */
|
||||
struct qe_bd __iomem *confBd; /* next BD for confirm after Tx */
|
||||
struct qe_bd __iomem *tx_bd; /* next BD for new Tx request */
|
||||
struct qe_bd __iomem *rx_bd; /* next BD to collect after Rx */
|
||||
void *p_rx_frame; /* accumulating receive frame */
|
||||
u16 *p_ucce; /* a pointer to the event register in memory.
|
||||
*/
|
||||
u16 *p_uccm; /* a pointer to the mask register in memory */
|
||||
__be16 __iomem *p_ucce; /* a pointer to the event register in memory */
|
||||
__be16 __iomem *p_uccm; /* a pointer to the mask register in memory */
|
||||
u16 saved_uccm; /* a saved mask for the RX Interrupt bits */
|
||||
#ifdef STATISTICS
|
||||
u32 tx_frames; /* Transmitted frames counters */
|
||||
|
@ -20,8 +20,6 @@ int rpmh_write_async(const struct device *dev, enum rpmh_state state,
|
||||
int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
|
||||
const struct tcs_cmd *cmd, u32 *n);
|
||||
|
||||
int rpmh_flush(const struct device *dev);
|
||||
|
||||
int rpmh_invalidate(const struct device *dev);
|
||||
|
||||
#else
|
||||
@ -40,9 +38,6 @@ static inline int rpmh_write_batch(const struct device *dev,
|
||||
const struct tcs_cmd *cmd, u32 *n)
|
||||
{ return -ENODEV; }
|
||||
|
||||
static inline int rpmh_flush(const struct device *dev)
|
||||
{ return -ENODEV; }
|
||||
|
||||
static inline int rpmh_invalidate(const struct device *dev)
|
||||
{ return -ENODEV; }
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user