Merge branches 'acpi-video', 'acpi-prm', 'acpi-apei' and 'acpi-pcc'

Merge ACPI backlight driver updates, ACPI APEI updates, ACPI PRM updates
and changes related to ACPI PCC for 6.7-rc1:

 - Add acpi_backlight=vendor quirk for Toshiba Portégé R100 (Ondrej
   Zary).

 - Add "vendor" backlight quirks for 3 Lenovo x86 Android tablets (Hans
   de Goede).

 - Move Xiaomi Mi Pad 2 backlight quirk to its own section (Hans de
   Goede).

 - Annotate struct prm_module_info with __counted_by (Kees Cook).

 - Fix AER info corruption in aer_recover_queue() when error status data
   has multiple sections (Shiju Jose).

 - Make APEI use ERST max execution time value for slow devices (Jeshua
   Smith).

 - Add support for platform notification handling to the PCC mailbox
   driver and modify it to support shared interrupts for multiple
   subspaces (Huisong Li).

 - Define common macros to use when referring to various bitfields in the
   PCC generic communications channel command and status fields and use
   them in some drivers (Sudeep Holla).

* acpi-video:
  ACPI: video: Add acpi_backlight=vendor quirk for Toshiba Portégé R100
  ACPI: video: Add "vendor" quirks for 3 Lenovo x86 Android tablets
  ACPI: video: Move Xiaomi Mi Pad 2 quirk to its own section

* acpi-prm:
  ACPI: PRM: Annotate struct prm_module_info with __counted_by

* acpi-apei:
  ACPI: APEI: Use ERST timeout for slow devices
  ACPI: APEI: Fix AER info corruption when error status data has multiple sections

* acpi-pcc:
  soc: kunpeng_hccs: Migrate to use generic PCC shmem related macros
  hwmon: (xgene) Migrate to use generic PCC shmem related macros
  i2c: xgene-slimpro: Migrate to use generic PCC shmem related macros
  ACPI: PCC: Add PCC shared memory region command and status bitfields
  mailbox: pcc: Support shared interrupt for multiple subspaces
  mailbox: pcc: Add support for platform notification handling
This commit is contained in:
Rafael J. Wysocki 2023-10-26 14:58:20 +02:00
11 changed files with 254 additions and 54 deletions

View File

@ -59,6 +59,10 @@ static struct acpi_table_erst *erst_tab;
#define ERST_RANGE_NVRAM 0x0002
#define ERST_RANGE_SLOW 0x0004
/* ERST Exec max timings */
#define ERST_EXEC_TIMING_MAX_MASK 0xFFFFFFFF00000000
#define ERST_EXEC_TIMING_MAX_SHIFT 32
/*
* ERST Error Log Address Range, used as buffer for reading/writing
* error records.
@ -68,6 +72,7 @@ static struct erst_erange {
u64 size;
void __iomem *vaddr;
u32 attr;
u64 timings;
} erst_erange;
/*
@ -97,6 +102,19 @@ static inline int erst_errno(int command_status)
}
}
static inline u64 erst_get_timeout(void)
{
u64 timeout = FIRMWARE_TIMEOUT;
if (erst_erange.attr & ERST_RANGE_SLOW) {
timeout = ((erst_erange.timings & ERST_EXEC_TIMING_MAX_MASK) >>
ERST_EXEC_TIMING_MAX_SHIFT) * NSEC_PER_MSEC;
if (timeout < FIRMWARE_TIMEOUT)
timeout = FIRMWARE_TIMEOUT;
}
return timeout;
}
static int erst_timedout(u64 *t, u64 spin_unit)
{
if ((s64)*t < spin_unit) {
@ -191,9 +209,11 @@ static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
{
int rc;
u64 val;
u64 timeout = FIRMWARE_TIMEOUT;
u64 timeout;
u64 stall_time;
timeout = erst_get_timeout();
if (ctx->var1 > FIRMWARE_MAX_STALL) {
if (!in_nmi())
pr_warn(FW_WARN
@ -389,6 +409,13 @@ static int erst_get_erange(struct erst_erange *range)
if (rc)
return rc;
range->attr = apei_exec_ctx_get_output(&ctx);
rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_TIMINGS);
if (rc == 0)
range->timings = apei_exec_ctx_get_output(&ctx);
else if (rc == -ENOENT)
range->timings = 0;
else
return rc;
return 0;
}
@ -621,10 +648,12 @@ EXPORT_SYMBOL_GPL(erst_get_record_id_end);
static int __erst_write_to_storage(u64 offset)
{
struct apei_exec_context ctx;
u64 timeout = FIRMWARE_TIMEOUT;
u64 timeout;
u64 val;
int rc;
timeout = erst_get_timeout();
erst_exec_ctx_init(&ctx);
rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE);
if (rc)
@ -660,10 +689,12 @@ static int __erst_write_to_storage(u64 offset)
static int __erst_read_from_storage(u64 record_id, u64 offset)
{
struct apei_exec_context ctx;
u64 timeout = FIRMWARE_TIMEOUT;
u64 timeout;
u64 val;
int rc;
timeout = erst_get_timeout();
erst_exec_ctx_init(&ctx);
rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ);
if (rc)
@ -703,10 +734,12 @@ static int __erst_read_from_storage(u64 record_id, u64 offset)
static int __erst_clear_from_storage(u64 record_id)
{
struct apei_exec_context ctx;
u64 timeout = FIRMWARE_TIMEOUT;
u64 timeout;
u64 val;
int rc;
timeout = erst_get_timeout();
erst_exec_ctx_init(&ctx);
rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR);
if (rc)

View File

@ -209,6 +209,20 @@ err_pool_alloc:
return -ENOMEM;
}
/**
* ghes_estatus_pool_region_free - free previously allocated memory
* from the ghes_estatus_pool.
* @addr: address of memory to free.
* @size: size of memory to free.
*
* Returns none.
*/
void ghes_estatus_pool_region_free(unsigned long addr, u32 size)
{
gen_pool_free(ghes_estatus_pool, addr, size);
}
EXPORT_SYMBOL_GPL(ghes_estatus_pool_region_free);
static int map_gen_v2(struct ghes *ghes)
{
return apei_map_generic_address(&ghes->generic_v2->read_ack_register);
@ -564,6 +578,7 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
unsigned int devfn;
int aer_severity;
u8 *aer_info;
devfn = PCI_DEVFN(pcie_err->device_id.device,
pcie_err->device_id.function);
@ -577,11 +592,17 @@ static void ghes_handle_aer(struct acpi_hest_generic_data *gdata)
if (gdata->flags & CPER_SEC_RESET)
aer_severity = AER_FATAL;
aer_info = (void *)gen_pool_alloc(ghes_estatus_pool,
sizeof(struct aer_capability_regs));
if (!aer_info)
return;
memcpy(aer_info, pcie_err->aer_info, sizeof(struct aer_capability_regs));
aer_recover_queue(pcie_err->device_id.segment,
pcie_err->device_id.bus,
devfn, aer_severity,
(struct aer_capability_regs *)
pcie_err->aer_info);
aer_info);
}
#endif
}

View File

@ -69,7 +69,7 @@ struct prm_module_info {
bool updatable;
struct list_head module_list;
struct prm_handler_info handlers[];
struct prm_handler_info handlers[] __counted_by(handler_count);
};
static u64 efi_pa_va_lookup(u64 pa)

View File

@ -130,6 +130,16 @@ static int video_detect_force_native(const struct dmi_system_id *d)
return 0;
}
static int video_detect_portege_r100(const struct dmi_system_id *d)
{
struct pci_dev *dev;
/* Search for Trident CyberBlade XP4m32 to confirm Portégé R100 */
dev = pci_get_device(PCI_VENDOR_ID_TRIDENT, 0x2100, NULL);
if (dev)
acpi_backlight_dmi = acpi_backlight_vendor;
return 0;
}
static const struct dmi_system_id video_detect_dmi_table[] = {
/*
* Models which should use the vendor backlight interface,
@ -229,14 +239,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
},
},
{
.callback = video_detect_force_vendor,
/* Xiaomi Mi Pad 2 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
/*
* Models which should use the vendor backlight interface,
@ -270,6 +272,22 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
},
},
/*
* Toshiba Portégé R100 has working both acpi_video and toshiba_acpi
* vendor driver. But none of them gets activated as it has a VGA with
* no kernel driver (Trident CyberBlade XP4m32).
* The DMI strings are generic so check for the VGA chip in callback.
*/
{
.callback = video_detect_portege_r100,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"),
DMI_MATCH(DMI_BOARD_NAME, "Portable PC")
},
},
/*
* Models which need acpi_video backlight control where the GPU drivers
* do not call acpi_video_register_backlight() because no internal panel
@ -799,6 +817,56 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"),
},
},
/*
* x86 android tablets which directly control the backlight through
* an external backlight controller, typically TI's LP8557.
* The backlight is directly controlled by the lp855x driver on these.
* This setup means that neither i915's native nor acpi_video backlight
* control works. Add a "vendor" quirk to disable both. Note these
* devices do not use vendor control in the typical meaning of
* vendor specific SMBIOS or ACPI calls being used.
*/
{
.callback = video_detect_force_vendor,
/* Lenovo Yoga Book X90F / X90L */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
},
},
{
.callback = video_detect_force_vendor,
/*
* Lenovo Yoga Tablet 2 830F/L or 1050F/L (The 8" and 10"
* Lenovo Yoga Tablet 2 use the same mainboard)
*/
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."),
DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"),
DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"),
/* Partial match on beginning of BIOS version */
DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"),
},
},
{
.callback = video_detect_force_vendor,
/* Lenovo Yoga Tab 3 Pro YT3-X90F */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"),
},
},
{
.callback = video_detect_force_vendor,
/* Xiaomi Mi Pad 2 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"),
DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"),
},
},
{ },
};

View File

@ -57,12 +57,6 @@
(MSG_TYPE_SET(MSG_TYPE_PWRMGMT) | \
MSG_SUBTYPE_SET(hndl) | TPC_CMD_SET(cmd) | type)
/* PCC defines */
#define PCC_SIGNATURE_MASK 0x50424300
#define PCCC_GENERATE_DB_INT BIT(15)
#define PCCS_CMD_COMPLETE BIT(0)
#define PCCS_SCI_DOORBEL BIT(1)
#define PCCS_PLATFORM_NOTIFICATION BIT(3)
/*
* Arbitrary retries in case the remote processor is slow to respond
* to PCC commands
@ -142,15 +136,15 @@ static int xgene_hwmon_pcc_rd(struct xgene_hwmon_dev *ctx, u32 *msg)
/* Write signature for subspace */
WRITE_ONCE(generic_comm_base->signature,
cpu_to_le32(PCC_SIGNATURE_MASK | ctx->mbox_idx));
cpu_to_le32(PCC_SIGNATURE | ctx->mbox_idx));
/* Write to the shared command region */
WRITE_ONCE(generic_comm_base->command,
cpu_to_le16(MSG_TYPE(msg[0]) | PCCC_GENERATE_DB_INT));
cpu_to_le16(MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INTR));
/* Flip CMD COMPLETE bit */
val = le16_to_cpu(READ_ONCE(generic_comm_base->status));
val &= ~PCCS_CMD_COMPLETE;
val &= ~PCC_STATUS_CMD_COMPLETE;
WRITE_ONCE(generic_comm_base->status, cpu_to_le16(val));
/* Copy the message to the PCC comm space */
@ -544,7 +538,7 @@ static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
msg = generic_comm_base + 1;
/* Check if platform sends interrupt */
if (!xgene_word_tst_and_clr(&generic_comm_base->status,
PCCS_SCI_DOORBEL))
PCC_STATUS_SCI_DOORBELL))
return;
/*
@ -566,7 +560,7 @@ static void xgene_hwmon_pcc_rx_cb(struct mbox_client *cl, void *msg)
TPC_CMD(((u32 *)msg)[0]) == TPC_ALARM))) {
/* Check if platform completes command */
if (xgene_word_tst_and_clr(&generic_comm_base->status,
PCCS_CMD_COMPLETE)) {
PCC_STATUS_CMD_COMPLETE)) {
ctx->sync_msg.msg = ((u32 *)msg)[0];
ctx->sync_msg.param1 = ((u32 *)msg)[1];
ctx->sync_msg.param2 = ((u32 *)msg)[2];

View File

@ -91,14 +91,6 @@
#define SLIMPRO_IIC_MSG_DWORD_COUNT 3
/* PCC related defines */
#define PCC_SIGNATURE 0x50424300
#define PCC_STS_CMD_COMPLETE BIT(0)
#define PCC_STS_SCI_DOORBELL BIT(1)
#define PCC_STS_ERR BIT(2)
#define PCC_STS_PLAT_NOTIFY BIT(3)
#define PCC_CMD_GENERATE_DB_INT BIT(15)
struct slimpro_i2c_dev {
struct i2c_adapter adapter;
struct device *dev;
@ -160,11 +152,11 @@ static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
/* Check if platform sends interrupt */
if (!xgene_word_tst_and_clr(&generic_comm_base->status,
PCC_STS_SCI_DOORBELL))
PCC_STATUS_SCI_DOORBELL))
return;
if (xgene_word_tst_and_clr(&generic_comm_base->status,
PCC_STS_CMD_COMPLETE)) {
PCC_STATUS_CMD_COMPLETE)) {
msg = generic_comm_base + 1;
/* Response message msg[1] contains the return value. */
@ -186,10 +178,10 @@ static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg)
cpu_to_le32(PCC_SIGNATURE | ctx->mbox_idx));
WRITE_ONCE(generic_comm_base->command,
cpu_to_le16(SLIMPRO_MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INT));
cpu_to_le16(SLIMPRO_MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INTR));
status = le16_to_cpu(READ_ONCE(generic_comm_base->status));
status &= ~PCC_STS_CMD_COMPLETE;
status &= ~PCC_STATUS_CMD_COMPLETE;
WRITE_ONCE(generic_comm_base->status, cpu_to_le16(status));
/* Copy the message to the PCC comm space */

View File

@ -91,6 +91,14 @@ struct pcc_chan_reg {
* @cmd_update: PCC register bundle for the command complete update register
* @error: PCC register bundle for the error status register
* @plat_irq: platform interrupt
* @type: PCC subspace type
* @plat_irq_flags: platform interrupt flags
* @chan_in_use: this flag is used just to check if the interrupt needs
* handling when it is shared. Since only one transfer can occur
* at a time and mailbox takes care of locking, this flag can be
* accessed without a lock. Note: the type only support the
* communication from OSPM to Platform, like type3, use it, and
* other types completely ignore it.
*/
struct pcc_chan_info {
struct pcc_mbox_chan chan;
@ -100,12 +108,17 @@ struct pcc_chan_info {
struct pcc_chan_reg cmd_update;
struct pcc_chan_reg error;
int plat_irq;
u8 type;
unsigned int plat_irq_flags;
bool chan_in_use;
};
#define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan)
static struct pcc_chan_info *chan_info;
static int pcc_chan_count;
static int pcc_send_data(struct mbox_chan *chan, void *data);
/*
* PCC can be used with perf critical drivers such as CPPC
* So it makes sense to locally cache the virtual address and
@ -221,6 +234,41 @@ static int pcc_map_interrupt(u32 interrupt, u32 flags)
return acpi_register_gsi(NULL, interrupt, trigger, polarity);
}
static bool pcc_chan_plat_irq_can_be_shared(struct pcc_chan_info *pchan)
{
return (pchan->plat_irq_flags & ACPI_PCCT_INTERRUPT_MODE) ==
ACPI_LEVEL_SENSITIVE;
}
static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
{
u64 val;
int ret;
ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
if (ret)
return false;
if (!pchan->cmd_complete.gas)
return true;
/*
* Judge if the channel respond the interrupt based on the value of
* command complete.
*/
val &= pchan->cmd_complete.status_mask;
/*
* If this is PCC slave subspace channel, and the command complete
* bit 0 indicates that Platform is sending a notification and OSPM
* needs to respond this interrupt to process this command.
*/
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
return !val;
return !!val;
}
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
* @irq: interrupt number
@ -236,16 +284,12 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
int ret;
pchan = chan->con_priv;
ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
if (ret)
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
!pchan->chan_in_use)
return IRQ_NONE;
if (val) { /* Ensure GAS exists and value is non-zero */
val &= pchan->cmd_complete.status_mask;
if (!val)
return IRQ_NONE;
}
if (!pcc_mbox_cmd_complete_check(pchan))
return IRQ_NONE;
ret = pcc_chan_reg_read(&pchan->error, &val);
if (ret)
@ -262,6 +306,16 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
mbox_chan_received_data(chan, NULL);
/*
* The PCC slave subspace channel needs to set the command complete bit
* and ring doorbell after processing message.
*
* The PCC master subspace channel clears chan_in_use to free channel.
*/
if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
pcc_send_data(chan, NULL);
pchan->chan_in_use = false;
return IRQ_HANDLED;
}
@ -340,7 +394,11 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
if (ret)
return ret;
return pcc_chan_reg_read_modify_write(&pchan->db);
ret = pcc_chan_reg_read_modify_write(&pchan->db);
if (!ret && pchan->plat_irq > 0)
pchan->chan_in_use = true;
return ret;
}
/**
@ -353,11 +411,14 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
static int pcc_startup(struct mbox_chan *chan)
{
struct pcc_chan_info *pchan = chan->con_priv;
unsigned long irqflags;
int rc;
if (pchan->plat_irq > 0) {
rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq, 0,
MBOX_IRQ_NAME, chan);
irqflags = pcc_chan_plat_irq_can_be_shared(pchan) ?
IRQF_SHARED | IRQF_ONESHOT : 0;
rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq,
irqflags, MBOX_IRQ_NAME, chan);
if (unlikely(rc)) {
dev_err(chan->mbox->dev, "failed to register PCC interrupt %d\n",
pchan->plat_irq);
@ -463,6 +524,7 @@ static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
pcct_ss->platform_interrupt);
return -EINVAL;
}
pchan->plat_irq_flags = pcct_ss->flags;
if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
@ -484,6 +546,12 @@ static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
"PLAT IRQ ACK");
}
if (pcc_chan_plat_irq_can_be_shared(pchan) &&
!pchan->plat_irq_ack.gas) {
pr_err("PCC subspace has level IRQ with no ACK register\n");
return -EINVAL;
}
return ret;
}
@ -698,6 +766,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
pcc_parse_subspace_shmem(pchan, pcct_entry);
pchan->type = pcct_entry->type;
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}

View File

@ -29,6 +29,7 @@
#include <linux/kfifo.h>
#include <linux/slab.h>
#include <acpi/apei.h>
#include <acpi/ghes.h>
#include <ras/ras_event.h>
#include "../pci.h"
@ -997,6 +998,15 @@ static void aer_recover_work_func(struct work_struct *work)
continue;
}
cper_print_aer(pdev, entry.severity, entry.regs);
/*
* Memory for aer_capability_regs(entry.regs) is being allocated from the
* ghes_estatus_pool to protect it from overwriting when multiple sections
* are present in the error status. Thus free the same after processing
* the data.
*/
ghes_estatus_pool_region_free((unsigned long)entry.regs,
sizeof(struct aer_capability_regs));
if (entry.severity == AER_NONFATAL)
pcie_do_recovery(pdev, pci_channel_io_normal,
aer_root_reset);

View File

@ -31,10 +31,6 @@
#include "kunpeng_hccs.h"
/* PCC defines */
#define HCCS_PCC_SIGNATURE_MASK 0x50434300
#define HCCS_PCC_STATUS_CMD_COMPLETE BIT(0)
/*
* Arbitrary retries in case the remote processor is slow to respond
* to PCC commands
@ -187,7 +183,7 @@ static int hccs_check_chan_cmd_complete(struct hccs_dev *hdev)
* deadline_us(timeout_us) until PCC command complete bit is set(cond)
*/
ret = readw_poll_timeout(&comm_base->status, status,
status & HCCS_PCC_STATUS_CMD_COMPLETE,
status & PCC_STATUS_CMD_COMPLETE,
HCCS_POLL_STATUS_TIME_INTERVAL_US,
cl_info->deadline_us);
if (unlikely(ret))
@ -208,7 +204,7 @@ static int hccs_pcc_cmd_send(struct hccs_dev *hdev, u8 cmd,
int ret;
/* Write signature for this subspace */
tmp.signature = HCCS_PCC_SIGNATURE_MASK | hdev->chan_id;
tmp.signature = PCC_SIGNATURE | hdev->chan_id;
/* Write to the shared command region */
tmp.command = cmd;
/* Clear cmd complete bit */

View File

@ -73,8 +73,12 @@ int ghes_register_vendor_record_notifier(struct notifier_block *nb);
void ghes_unregister_vendor_record_notifier(struct notifier_block *nb);
struct list_head *ghes_get_devices(void);
void ghes_estatus_pool_region_free(unsigned long addr, u32 size);
#else
static inline struct list_head *ghes_get_devices(void) { return NULL; }
static inline void ghes_estatus_pool_region_free(unsigned long addr, u32 size) { return; }
#endif
int ghes_estatus_pool_init(unsigned int num_ghes);

View File

@ -18,7 +18,20 @@ struct pcc_mbox_chan {
u16 min_turnaround_time;
};
/* Generic Communications Channel Shared Memory Region */
#define PCC_SIGNATURE 0x50434300
/* Generic Communications Channel Command Field */
#define PCC_CMD_GENERATE_DB_INTR BIT(15)
/* Generic Communications Channel Status Field */
#define PCC_STATUS_CMD_COMPLETE BIT(0)
#define PCC_STATUS_SCI_DOORBELL BIT(1)
#define PCC_STATUS_ERROR BIT(2)
#define PCC_STATUS_PLATFORM_NOTIFY BIT(3)
/* Initiator Responder Communications Channel Flags */
#define PCC_CMD_COMPLETION_NOTIFY BIT(0)
#define MAX_PCC_SUBSPACES 256
#ifdef CONFIG_PCC
extern struct pcc_mbox_chan *
pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id);