soundwire updates for 6.4-rc1

- Support for AMD soundwire controller
  - Intel driver updates to support future platforms
  - Core API sdw_nread/nwrite_no_pm updates to handle page boundaries
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmRSOS4ACgkQfBQHDyUj
 g0fV/BAAlmWN+6PwWO02Reyj6+JCFej10by7GIjCAOeD8QeAt89cgbuD4/FYufrH
 2P/KXbTGs2dBQPP1J62lvUqTsUoJ2wYZR3R0QkcDHmhso61m+XeQ9g6BTBowp5U7
 1+fEUhZUTVS7+8Zx8Cuy0DyV2oFITT1n6WxpT4XzDc3h5pY3MAp8T1t6JVfQoSPT
 UX6YEniENI/5qFS0pe1MuWXju5zF4gQ/p1sbyzuhh9X5bIA4RUeJJDcIlJv6WGki
 EqLyDOVp6KUxFXIk0W5k1K6jqJASNqrGeaEqKUIYW7ZqJE4lxGvQpcmZ89s++pYU
 SsqN3TM0XkW3BhXSP8tX3KWAkazyUIbsEeu707qPaZ+4hBmZ6eS5+maOpUw0ddML
 MHinR/uw/ZZOyUmfIoEmMFjEtMYUsXKqMhFketZ95GYwt1hRv3kihiUcUl1EasfK
 63UflB4v1TV258FKrZokAUt9M0Bs8a7qEEIzhfmv9910raLUtS0a1oFSDjIB0UAz
 2nYDAXyzs0sU+fcIflou0f2bPu8cDgy7FaMTm5wBnjWJMNtebWO2npFOW3yAkjw5
 auQtT3Ja+4ag9dcJIXMCdxzbbXSX8HK4aQ+d9Z4o8mhfFOJ/YM22JcseG92HKsQM
 jouDxw7oj+tixIdtgc4oYHEZqu9fkW9C8valkKrQ0R0gBeGx4s0=
 =u4rP
 -----END PGP SIGNATURE-----

Merge tag 'soundwire-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/soundwire

Pull soundwire updates from Vinod Koul:
 "This features AMD soundwire controller driver, a bunch of Intel
  changes for future platform support, sdw API updates etc:

   - Support for AMD soundwire controller

   - Intel driver updates to support future platforms

   - Core API sdw_nread/nwrite_no_pm updates to handle page boundaries"

* tag 'soundwire-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/soundwire: (38 commits)
  soundwire: intel_auxdevice: improve pm_prepare step
  soundwire: bus: Fix unbalanced pm_runtime_put() causing usage count underflow
  soundwire: intel: don't save hw_params for use in prepare
  soundwire: bus: Update sdw_nread/nwrite_no_pm to handle page boundaries
  soundwire: bus: Update kernel doc for no_pm functions
  soundwire: bus: Remove now outdated comments on no_pm IO
  soundwire: stream: uniquify dev_err() logs
  soundwire: stream: remove bus->dev from logs on multiple buses
  soundwire: amd: add pm_prepare callback and pm ops support
  soundwire: amd: handle SoundWire wake enable interrupt
  soundwire: amd: add runtime pm ops for AMD SoundWire manager driver
  soundwire: amd: add SoundWire manager interrupt handling
  soundwire: amd: enable build for AMD SoundWire manager driver
  soundwire: amd: register SoundWire manager dai ops
  soundwire: amd: Add support for AMD Manager driver
  soundwire: export sdw_compute_slave_ports() function
  soundwire: stream: restore cumulative bus bandwidth when compute_params callback failed
  soundwire: bandwidth allocation: Use hweight32() to calculate set bits
  soundwire: qcom: gracefully handle too many ports in DT
  soundwire: qcom: define hardcoded version magic numbers
  ...
This commit is contained in:
Linus Torvalds 2023-05-03 11:21:07 -07:00
commit 4c9818d865
18 changed files with 2192 additions and 441 deletions

View File

@ -18,6 +18,16 @@ if SOUNDWIRE
comment "SoundWire Devices"
config SOUNDWIRE_AMD
tristate "AMD SoundWire Manager driver"
select SOUNDWIRE_GENERIC_ALLOCATION
depends on ACPI && SND_SOC
help
SoundWire AMD Manager driver.
If you have an AMD platform which has a SoundWire Manager then
enable this config option to get the SoundWire support for that
device.
config SOUNDWIRE_CADENCE
tristate

View File

@ -15,12 +15,17 @@ ifdef CONFIG_DEBUG_FS
soundwire-bus-y += debugfs.o
endif
#AMD driver
soundwire-amd-y := amd_manager.o
obj-$(CONFIG_SOUNDWIRE_AMD) += soundwire-amd.o
#Cadence Objs
soundwire-cadence-y := cadence_master.o
obj-$(CONFIG_SOUNDWIRE_CADENCE) += soundwire-cadence.o
#Intel driver
soundwire-intel-y := intel.o intel_auxdevice.o intel_init.o dmi-quirks.o
soundwire-intel-y := intel.o intel_auxdevice.o intel_init.o dmi-quirks.o \
intel_bus_common.o
obj-$(CONFIG_SOUNDWIRE_INTEL) += soundwire-intel.o
#Qualcomm driver

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,258 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
*/
#ifndef __AMD_MANAGER_H
#define __AMD_MANAGER_H
#include <linux/soundwire/sdw_amd.h>
#define SDW_MANAGER_REG_OFFSET 0xc00
#define AMD_SDW_DEFAULT_ROWS 50
#define AMD_SDW_DEFAULT_COLUMNS 10
#define ACP_PAD_PULLDOWN_CTRL 0x0001448
#define ACP_SW_PAD_KEEPER_EN 0x0001454
#define ACP_SW0_WAKE_EN 0x0001458
#define ACP_EXTERNAL_INTR_CNTL0 0x0001a04
#define ACP_EXTERNAL_INTR_STAT0 0x0001a0c
#define ACP_EXTERNAL_INTR_CNTL(i) (ACP_EXTERNAL_INTR_CNTL0 + ((i) * 4))
#define ACP_EXTERNAL_INTR_STAT(i) (ACP_EXTERNAL_INTR_STAT0 + ((i) * 4))
#define ACP_SW_WAKE_EN(i) (ACP_SW0_WAKE_EN + ((i) * 8))
#define ACP_SW_EN 0x0003000
#define ACP_SW_EN_STATUS 0x0003004
#define ACP_SW_FRAMESIZE 0x0003008
#define ACP_SW_SSP_COUNTER 0x000300c
#define ACP_SW_AUDIO0_TX_EN 0x0003010
#define ACP_SW_AUDIO0_TX_EN_STATUS 0x0003014
#define ACP_SW_AUDIO0_TX_FRAME_FORMAT 0x0003018
#define ACP_SW_AUDIO0_TX_SAMPLEINTERVAL 0x000301c
#define ACP_SW_AUDIO0_TX_HCTRL_DP0 0x0003020
#define ACP_SW_AUDIO0_TX_HCTRL_DP1 0x0003024
#define ACP_SW_AUDIO0_TX_HCTRL_DP2 0x0003028
#define ACP_SW_AUDIO0_TX_HCTRL_DP3 0x000302c
#define ACP_SW_AUDIO0_TX_OFFSET_DP0 0x0003030
#define ACP_SW_AUDIO0_TX_OFFSET_DP1 0x0003034
#define ACP_SW_AUDIO0_TX_OFFSET_DP2 0x0003038
#define ACP_SW_AUDIO0_TX_OFFSET_DP3 0x000303c
#define ACP_SW_AUDIO0_TX_CHANNEL_ENABLE_DP0 0x0003040
#define ACP_SW_AUDIO0_TX_CHANNEL_ENABLE_DP1 0x0003044
#define ACP_SW_AUDIO0_TX_CHANNEL_ENABLE_DP2 0x0003048
#define ACP_SW_AUDIO0_TX_CHANNEL_ENABLE_DP3 0x000304c
#define ACP_SW_AUDIO1_TX_EN 0x0003050
#define ACP_SW_AUDIO1_TX_EN_STATUS 0x0003054
#define ACP_SW_AUDIO1_TX_FRAME_FORMAT 0x0003058
#define ACP_SW_AUDIO1_TX_SAMPLEINTERVAL 0x000305c
#define ACP_SW_AUDIO1_TX_HCTRL 0x0003060
#define ACP_SW_AUDIO1_TX_OFFSET 0x0003064
#define ACP_SW_AUDIO1_TX_CHANNEL_ENABLE_DP0 0x0003068
#define ACP_SW_AUDIO2_TX_EN 0x000306c
#define ACP_SW_AUDIO2_TX_EN_STATUS 0x0003070
#define ACP_SW_AUDIO2_TX_FRAME_FORMAT 0x0003074
#define ACP_SW_AUDIO2_TX_SAMPLEINTERVAL 0x0003078
#define ACP_SW_AUDIO2_TX_HCTRL 0x000307c
#define ACP_SW_AUDIO2_TX_OFFSET 0x0003080
#define ACP_SW_AUDIO2_TX_CHANNEL_ENABLE_DP0 0x0003084
#define ACP_SW_AUDIO0_RX_EN 0x0003088
#define ACP_SW_AUDIO0_RX_EN_STATUS 0x000308c
#define ACP_SW_AUDIO0_RX_FRAME_FORMAT 0x0003090
#define ACP_SW_AUDIO0_RX_SAMPLEINTERVAL 0x0003094
#define ACP_SW_AUDIO0_RX_HCTRL_DP0 0x0003098
#define ACP_SW_AUDIO0_RX_HCTRL_DP1 0x000309c
#define ACP_SW_AUDIO0_RX_HCTRL_DP2 0x0003100
#define ACP_SW_AUDIO0_RX_HCTRL_DP3 0x0003104
#define ACP_SW_AUDIO0_RX_OFFSET_DP0 0x0003108
#define ACP_SW_AUDIO0_RX_OFFSET_DP1 0x000310c
#define ACP_SW_AUDIO0_RX_OFFSET_DP2 0x0003110
#define ACP_SW_AUDIO0_RX_OFFSET_DP3 0x0003114
#define ACP_SW_AUDIO0_RX_CHANNEL_ENABLE_DP0 0x0003118
#define ACP_SW_AUDIO0_RX_CHANNEL_ENABLE_DP1 0x000311c
#define ACP_SW_AUDIO0_RX_CHANNEL_ENABLE_DP2 0x0003120
#define ACP_SW_AUDIO0_RX_CHANNEL_ENABLE_DP3 0x0003124
#define ACP_SW_AUDIO1_RX_EN 0x0003128
#define ACP_SW_AUDIO1_RX_EN_STATUS 0x000312c
#define ACP_SW_AUDIO1_RX_FRAME_FORMAT 0x0003130
#define ACP_SW_AUDIO1_RX_SAMPLEINTERVAL 0x0003134
#define ACP_SW_AUDIO1_RX_HCTRL 0x0003138
#define ACP_SW_AUDIO1_RX_OFFSET 0x000313c
#define ACP_SW_AUDIO1_RX_CHANNEL_ENABLE_DP0 0x0003140
#define ACP_SW_AUDIO2_RX_EN 0x0003144
#define ACP_SW_AUDIO2_RX_EN_STATUS 0x0003148
#define ACP_SW_AUDIO2_RX_FRAME_FORMAT 0x000314c
#define ACP_SW_AUDIO2_RX_SAMPLEINTERVAL 0x0003150
#define ACP_SW_AUDIO2_RX_HCTRL 0x0003154
#define ACP_SW_AUDIO2_RX_OFFSET 0x0003158
#define ACP_SW_AUDIO2_RX_CHANNEL_ENABLE_DP0 0x000315c
#define ACP_SW_BPT_PORT_EN 0x0003160
#define ACP_SW_BPT_PORT_EN_STATUS 0x0003164
#define ACP_SW_BPT_PORT_FRAME_FORMAT 0x0003168
#define ACP_SW_BPT_PORT_SAMPLEINTERVAL 0x000316c
#define ACP_SW_BPT_PORT_HCTRL 0x0003170
#define ACP_SW_BPT_PORT_OFFSET 0x0003174
#define ACP_SW_BPT_PORT_CHANNEL_ENABLE 0x0003178
#define ACP_SW_BPT_PORT_FIRST_BYTE_ADDR 0x000317c
#define ACP_SW_CLK_RESUME_CTRL 0x0003180
#define ACP_SW_CLK_RESUME_DELAY_CNTR 0x0003184
#define ACP_SW_BUS_RESET_CTRL 0x0003188
#define ACP_SW_PRBS_ERR_STATUS 0x000318c
#define ACP_SW_IMM_CMD_UPPER_WORD 0x0003230
#define ACP_SW_IMM_CMD_LOWER_QWORD 0x0003234
#define ACP_SW_IMM_RESP_UPPER_WORD 0x0003238
#define ACP_SW_IMM_RESP_LOWER_QWORD 0x000323c
#define ACP_SW_IMM_CMD_STS 0x0003240
#define ACP_SW_BRA_BASE_ADDRESS 0x0003244
#define ACP_SW_BRA_TRANSFER_SIZE 0x0003248
#define ACP_SW_BRA_DMA_BUSY 0x000324c
#define ACP_SW_BRA_RESP 0x0003250
#define ACP_SW_BRA_RESP_FRAME_ADDR 0x0003254
#define ACP_SW_BRA_CURRENT_TRANSFER_SIZE 0x0003258
#define ACP_SW_STATE_CHANGE_STATUS_0TO7 0x000325c
#define ACP_SW_STATE_CHANGE_STATUS_8TO11 0x0003260
#define ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7 0x0003264
#define ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11 0x0003268
#define ACP_SW_CLK_FREQUENCY_CTRL 0x000326c
#define ACP_SW_ERROR_INTR_MASK 0x0003270
#define ACP_SW_PHY_TEST_MODE_DATA_OFF 0x0003274
#define ACP_DELAY_US 10
#define AMD_SDW_TIMEOUT 1000
#define AMD_SDW_DEFAULT_CLK_FREQ 12000000
#define AMD_SDW_MCP_RESP_ACK BIT(0)
#define AMD_SDW_MCP_RESP_NACK BIT(1)
#define AMD_SDW_MCP_RESP_RDATA GENMASK(14, 7)
#define AMD_SDW_MCP_CMD_SSP_TAG BIT(31)
#define AMD_SDW_MCP_CMD_COMMAND GENMASK(14, 12)
#define AMD_SDW_MCP_CMD_DEV_ADDR GENMASK(11, 8)
#define AMD_SDW_MCP_CMD_REG_ADDR_HIGH GENMASK(7, 0)
#define AMD_SDW_MCP_CMD_REG_ADDR_LOW GENMASK(31, 24)
#define AMD_SDW_MCP_CMD_REG_DATA GENMASK(14, 7)
#define AMD_SDW_MCP_SLAVE_STAT_0_3 GENMASK(14, 7)
#define AMD_SDW_MCP_SLAVE_STAT_4_11 GENMASK_ULL(39, 24)
#define AMD_SDW_MCP_SLAVE_STATUS_MASK GENMASK(1, 0)
#define AMD_SDW_MCP_SLAVE_STATUS_BITS GENMASK(3, 2)
#define AMD_SDW_MCP_SLAVE_STATUS_8TO_11 GENMASK_ULL(15, 0)
#define AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(x) BIT(((x) * 4))
#define AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(x) (((x) * 4) + 1)
#define AMD_SDW_MASTER_SUSPEND_DELAY_MS 2000
#define AMD_SDW_QUIRK_MASK_BUS_ENABLE BIT(0)
#define AMD_SDW_IMM_RES_VALID 1
#define AMD_SDW_IMM_CMD_BUSY 2
#define AMD_SDW_ENABLE 1
#define AMD_SDW_DISABLE 0
#define AMD_SDW_BUS_RESET_CLEAR_REQ 0
#define AMD_SDW_BUS_RESET_REQ 1
#define AMD_SDW_BUS_RESET_DONE 2
#define AMD_SDW_BUS_BASE_FREQ 24000000
#define AMD_SDW0_EXT_INTR_MASK 0x200000
#define AMD_SDW1_EXT_INTR_MASK 4
#define AMD_SDW_IRQ_MASK_0TO7 0x77777777
#define AMD_SDW_IRQ_MASK_8TO11 0x000d7777
#define AMD_SDW_IRQ_ERROR_MASK 0xff
#define AMD_SDW_MAX_FREQ_NUM 1
#define AMD_SDW0_MAX_TX_PORTS 3
#define AMD_SDW0_MAX_RX_PORTS 3
#define AMD_SDW1_MAX_TX_PORTS 1
#define AMD_SDW1_MAX_RX_PORTS 1
#define AMD_SDW0_MAX_DAI 6
#define AMD_SDW1_MAX_DAI 2
#define AMD_SDW_SLAVE_0_ATTACHED 5
#define AMD_SDW_SSP_COUNTER_VAL 3
#define AMD_DPN_FRAME_FMT_PFM GENMASK(1, 0)
#define AMD_DPN_FRAME_FMT_PDM GENMASK(3, 2)
#define AMD_DPN_FRAME_FMT_BLK_PKG_MODE BIT(4)
#define AMD_DPN_FRAME_FMT_BLK_GRP_CTRL GENMASK(6, 5)
#define AMD_DPN_FRAME_FMT_WORD_LEN GENMASK(12, 7)
#define AMD_DPN_FRAME_FMT_PCM_OR_PDM BIT(13)
#define AMD_DPN_HCTRL_HSTOP GENMASK(3, 0)
#define AMD_DPN_HCTRL_HSTART GENMASK(7, 4)
#define AMD_DPN_OFFSET_CTRL_1 GENMASK(7, 0)
#define AMD_DPN_OFFSET_CTRL_2 GENMASK(15, 8)
#define AMD_DPN_CH_EN_LCTRL GENMASK(2, 0)
#define AMD_DPN_CH_EN_CHMASK GENMASK(10, 3)
#define AMD_SDW_STAT_MAX_RETRY_COUNT 100
#define AMD_SDW0_PAD_PULLDOWN_CTRL_ENABLE_MASK 0x7f9f
#define AMD_SDW1_PAD_PULLDOWN_CTRL_ENABLE_MASK 0x7ffa
#define AMD_SDW0_PAD_PULLDOWN_CTRL_DISABLE_MASK 0x60
#define AMD_SDW1_PAD_PULLDOWN_CTRL_DISABLE_MASK 5
#define AMD_SDW0_PAD_KEEPER_EN_MASK 1
#define AMD_SDW1_PAD_KEEPER_EN_MASK 0x10
#define AMD_SDW0_PAD_KEEPER_DISABLE_MASK 0x1e
#define AMD_SDW1_PAD_KEEPER_DISABLE_MASK 0xf
#define AMD_SDW_PREQ_INTR_STAT BIT(19)
#define AMD_SDW_CLK_STOP_DONE 1
#define AMD_SDW_CLK_RESUME_REQ 2
#define AMD_SDW_CLK_RESUME_DONE 3
#define AMD_SDW_WAKE_STAT_MASK BIT(16)
static u32 amd_sdw_freq_tbl[AMD_SDW_MAX_FREQ_NUM] = {
AMD_SDW_DEFAULT_CLK_FREQ,
};
struct sdw_manager_dp_reg {
u32 frame_fmt_reg;
u32 sample_int_reg;
u32 hctrl_dp0_reg;
u32 offset_reg;
u32 lane_ctrl_ch_en_reg;
};
/*
* SDW0 Manager instance registers 6 CPU DAI (3 TX & 3 RX Ports)
* whereas SDW1 Manager Instance registers 2 CPU DAI (one TX & one RX port)
* Below is the CPU DAI <->Manager port number mapping
* i.e SDW0 Pin0 -> port number 0 -> AUDIO0 TX
* SDW0 Pin1 -> Port number 1 -> AUDIO1 TX
* SDW0 Pin2 -> Port number 2 -> AUDIO2 TX
* SDW0 Pin3 -> port number 3 -> AUDIO0 RX
* SDW0 Pin4 -> Port number 4 -> AUDIO1 RX
* SDW0 Pin5 -> Port number 5 -> AUDIO2 RX
* Whereas for SDW1 instance
* SDW1 Pin0 -> port number 0 -> AUDIO1 TX
* SDW1 Pin1 -> Port number 1 -> AUDIO1 RX
* Same mapping should be used for programming DMA controller registers in SoundWire DMA driver.
* i.e if AUDIO0 TX channel is selected then we need to use AUDIO0 TX registers for DMA programming
* in SoundWire DMA driver.
*/
static struct sdw_manager_dp_reg sdw0_manager_dp_reg[AMD_SDW0_MAX_DAI] = {
{ACP_SW_AUDIO0_TX_FRAME_FORMAT, ACP_SW_AUDIO0_TX_SAMPLEINTERVAL, ACP_SW_AUDIO0_TX_HCTRL_DP0,
ACP_SW_AUDIO0_TX_OFFSET_DP0, ACP_SW_AUDIO0_TX_CHANNEL_ENABLE_DP0},
{ACP_SW_AUDIO1_TX_FRAME_FORMAT, ACP_SW_AUDIO1_TX_SAMPLEINTERVAL, ACP_SW_AUDIO1_TX_HCTRL,
ACP_SW_AUDIO1_TX_OFFSET, ACP_SW_AUDIO1_TX_CHANNEL_ENABLE_DP0},
{ACP_SW_AUDIO2_TX_FRAME_FORMAT, ACP_SW_AUDIO2_TX_SAMPLEINTERVAL, ACP_SW_AUDIO2_TX_HCTRL,
ACP_SW_AUDIO2_TX_OFFSET, ACP_SW_AUDIO2_TX_CHANNEL_ENABLE_DP0},
{ACP_SW_AUDIO0_RX_FRAME_FORMAT, ACP_SW_AUDIO0_RX_SAMPLEINTERVAL, ACP_SW_AUDIO0_RX_HCTRL_DP0,
ACP_SW_AUDIO0_RX_OFFSET_DP0, ACP_SW_AUDIO0_RX_CHANNEL_ENABLE_DP0},
{ACP_SW_AUDIO1_RX_FRAME_FORMAT, ACP_SW_AUDIO1_RX_SAMPLEINTERVAL, ACP_SW_AUDIO1_RX_HCTRL,
ACP_SW_AUDIO1_RX_OFFSET, ACP_SW_AUDIO1_RX_CHANNEL_ENABLE_DP0},
{ACP_SW_AUDIO2_RX_FRAME_FORMAT, ACP_SW_AUDIO2_RX_SAMPLEINTERVAL, ACP_SW_AUDIO2_RX_HCTRL,
ACP_SW_AUDIO2_RX_OFFSET, ACP_SW_AUDIO2_RX_CHANNEL_ENABLE_DP0},
};
static struct sdw_manager_dp_reg sdw1_manager_dp_reg[AMD_SDW1_MAX_DAI] = {
{ACP_SW_AUDIO1_TX_FRAME_FORMAT, ACP_SW_AUDIO1_TX_SAMPLEINTERVAL, ACP_SW_AUDIO1_TX_HCTRL,
ACP_SW_AUDIO1_TX_OFFSET, ACP_SW_AUDIO1_TX_CHANNEL_ENABLE_DP0},
{ACP_SW_AUDIO1_RX_FRAME_FORMAT, ACP_SW_AUDIO1_RX_SAMPLEINTERVAL, ACP_SW_AUDIO1_RX_HCTRL,
ACP_SW_AUDIO1_RX_OFFSET, ACP_SW_AUDIO1_RX_CHANNEL_ENABLE_DP0}
};
static struct sdw_manager_reg_mask sdw_manager_reg_mask_array[2] = {
{
AMD_SDW0_PAD_KEEPER_EN_MASK,
AMD_SDW0_PAD_PULLDOWN_CTRL_ENABLE_MASK,
AMD_SDW0_EXT_INTR_MASK
},
{
AMD_SDW1_PAD_KEEPER_EN_MASK,
AMD_SDW1_PAD_PULLDOWN_CTRL_ENABLE_MASK,
AMD_SDW1_EXT_INTR_MASK
}
};
#endif

View File

@ -384,45 +384,73 @@ int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
/*
* Read/Write IO functions.
* no_pm versions can only be called by the bus, e.g. while enumerating or
* handling suspend-resume sequences.
* all clients need to use the pm versions
*/
int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
static int sdw_ntransfer_no_pm(struct sdw_slave *slave, u32 addr, u8 flags,
size_t count, u8 *val)
{
struct sdw_msg msg;
size_t size;
int ret;
ret = sdw_fill_msg(&msg, slave, addr, count,
slave->dev_num, SDW_MSG_FLAG_READ, val);
if (ret < 0)
return ret;
while (count) {
// Only handle bytes up to next page boundary
size = min_t(size_t, count, (SDW_REGADDR + 1) - (addr & SDW_REGADDR));
ret = sdw_transfer(slave->bus, &msg);
if (slave->is_mockup_device)
ret = 0;
return ret;
ret = sdw_fill_msg(&msg, slave, addr, size, slave->dev_num, flags, val);
if (ret < 0)
return ret;
ret = sdw_transfer(slave->bus, &msg);
if (ret < 0 && !slave->is_mockup_device)
return ret;
addr += size;
val += size;
count -= size;
}
return 0;
}
/**
* sdw_nread_no_pm() - Read "n" contiguous SDW Slave registers with no PM
* @slave: SDW Slave
* @addr: Register address
* @count: length
* @val: Buffer for values to be read
*
* Note that if the message crosses a page boundary each page will be
* transferred under a separate invocation of the msg_lock.
*/
int sdw_nread_no_pm(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_READ, count, val);
}
EXPORT_SYMBOL(sdw_nread_no_pm);
/**
* sdw_nwrite_no_pm() - Write "n" contiguous SDW Slave registers with no PM
* @slave: SDW Slave
* @addr: Register address
* @count: length
* @val: Buffer for values to be written
*
* Note that if the message crosses a page boundary each page will be
* transferred under a separate invocation of the msg_lock.
*/
int sdw_nwrite_no_pm(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
{
struct sdw_msg msg;
int ret;
ret = sdw_fill_msg(&msg, slave, addr, count,
slave->dev_num, SDW_MSG_FLAG_WRITE, (u8 *)val);
if (ret < 0)
return ret;
ret = sdw_transfer(slave->bus, &msg);
if (slave->is_mockup_device)
ret = 0;
return ret;
return sdw_ntransfer_no_pm(slave, addr, SDW_MSG_FLAG_WRITE, count, (u8 *)val);
}
EXPORT_SYMBOL(sdw_nwrite_no_pm);
/**
* sdw_write_no_pm() - Write a SDW Slave register with no PM
* @slave: SDW Slave
* @addr: Register address
* @value: Register value
*/
int sdw_write_no_pm(struct sdw_slave *slave, u32 addr, u8 value)
{
return sdw_nwrite_no_pm(slave, addr, 1, &value);
@ -495,6 +523,11 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val
}
EXPORT_SYMBOL(sdw_bwrite_no_pm_unlocked);
/**
* sdw_read_no_pm() - Read a SDW Slave register with no PM
* @slave: SDW Slave
* @addr: Register address
*/
int sdw_read_no_pm(struct sdw_slave *slave, u32 addr)
{
u8 buf;
@ -541,14 +574,21 @@ EXPORT_SYMBOL(sdw_update);
* @addr: Register address
* @count: length
* @val: Buffer for values to be read
*
* This version of the function will take a PM reference to the slave
* device.
* Note that if the message crosses a page boundary each page will be
* transferred under a separate invocation of the msg_lock.
*/
int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val)
{
int ret;
ret = pm_runtime_resume_and_get(&slave->dev);
if (ret < 0 && ret != -EACCES)
ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_noidle(&slave->dev);
return ret;
}
ret = sdw_nread_no_pm(slave, addr, count, val);
@ -565,14 +605,21 @@ EXPORT_SYMBOL(sdw_nread);
* @addr: Register address
* @count: length
* @val: Buffer for values to be written
*
* This version of the function will take a PM reference to the slave
* device.
* Note that if the message crosses a page boundary each page will be
* transferred under a separate invocation of the msg_lock.
*/
int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, const u8 *val)
{
int ret;
ret = pm_runtime_resume_and_get(&slave->dev);
if (ret < 0 && ret != -EACCES)
ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) {
pm_runtime_put_noidle(&slave->dev);
return ret;
}
ret = sdw_nwrite_no_pm(slave, addr, count, val);
@ -587,6 +634,9 @@ EXPORT_SYMBOL(sdw_nwrite);
* sdw_read() - Read a SDW Slave register
* @slave: SDW Slave
* @addr: Register address
*
* This version of the function will take a PM reference to the slave
* device.
*/
int sdw_read(struct sdw_slave *slave, u32 addr)
{
@ -606,6 +656,9 @@ EXPORT_SYMBOL(sdw_read);
* @slave: SDW Slave
* @addr: Register address
* @value: Register value
*
* This version of the function will take a PM reference to the slave
* device.
*/
int sdw_write(struct sdw_slave *slave, u32 addr, u8 value)
{
@ -1541,9 +1594,10 @@ static int sdw_handle_slave_alerts(struct sdw_slave *slave)
sdw_modify_slave_status(slave, SDW_SLAVE_ALERT);
ret = pm_runtime_resume_and_get(&slave->dev);
ret = pm_runtime_get_sync(&slave->dev);
if (ret < 0 && ret != -EACCES) {
dev_err(&slave->dev, "Failed to resume device: %d\n", ret);
pm_runtime_put_noidle(&slave->dev);
return ret;
}

View File

@ -144,6 +144,13 @@ struct sdw_master_runtime {
struct list_head bus_node;
};
struct sdw_transport_data {
int hstart;
int hstop;
int block_offset;
int sub_block_offset;
};
struct sdw_dpn_prop *sdw_get_slave_dpn_prop(struct sdw_slave *slave,
enum sdw_data_direction direction,
unsigned int port_num);
@ -158,17 +165,6 @@ int sdw_transfer_defer(struct sdw_bus *bus, struct sdw_msg *msg);
int sdw_fill_msg(struct sdw_msg *msg, struct sdw_slave *slave,
u32 addr, size_t count, u16 dev_num, u8 flags, u8 *buf);
/* Retrieve and return channel count from channel mask */
static inline int sdw_ch_mask_to_ch(int ch_mask)
{
int c = 0;
for (c = 0; ch_mask; ch_mask >>= 1)
c += ch_mask & 1;
return c;
}
/* Fill transport parameter data structure */
static inline void sdw_fill_xport_params(struct sdw_transport_params *params,
int port_num, bool grp_ctrl_valid,
@ -212,5 +208,7 @@ int sdw_bwrite_no_pm_unlocked(struct sdw_bus *bus, u16 dev_num, u32 addr, u8 val
void sdw_clear_slave_status(struct sdw_bus *bus, u32 request);
int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size);
void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
struct sdw_transport_data *t_data);
#endif /* __SDW_BUS_H */

View File

@ -27,32 +27,36 @@ module_param_named(cnds_mcp_int_mask, interrupt_mask, int, 0444);
MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_CONFIG 0x0
#define CDNS_MCP_CONFIG_MCMD_RETRY GENMASK(27, 24)
#define CDNS_MCP_CONFIG_MPREQ_DELAY GENMASK(20, 16)
#define CDNS_MCP_CONFIG_MMASTER BIT(7)
#define CDNS_MCP_CONFIG_BUS_REL BIT(6)
#define CDNS_MCP_CONFIG_SNIFFER BIT(5)
#define CDNS_MCP_CONFIG_SSPMOD BIT(4)
#define CDNS_MCP_CONFIG_CMD BIT(3)
#define CDNS_MCP_CONFIG_OP GENMASK(2, 0)
#define CDNS_MCP_CONFIG_OP_NORMAL 0
#define CDNS_IP_MCP_CONFIG 0x0 /* IP offset added at run-time */
#define CDNS_IP_MCP_CONFIG_MCMD_RETRY GENMASK(27, 24)
#define CDNS_IP_MCP_CONFIG_MPREQ_DELAY GENMASK(20, 16)
#define CDNS_IP_MCP_CONFIG_MMASTER BIT(7)
#define CDNS_IP_MCP_CONFIG_SNIFFER BIT(5)
#define CDNS_IP_MCP_CONFIG_CMD BIT(3)
#define CDNS_IP_MCP_CONFIG_OP GENMASK(2, 0)
#define CDNS_IP_MCP_CONFIG_OP_NORMAL 0
#define CDNS_MCP_CONTROL 0x4
#define CDNS_MCP_CONTROL_RST_DELAY GENMASK(10, 8)
#define CDNS_MCP_CONTROL_CMD_RST BIT(7)
#define CDNS_MCP_CONTROL_SOFT_RST BIT(6)
#define CDNS_MCP_CONTROL_SW_RST BIT(5)
#define CDNS_MCP_CONTROL_HW_RST BIT(4)
#define CDNS_MCP_CONTROL_CLK_PAUSE BIT(3)
#define CDNS_MCP_CONTROL_CLK_STOP_CLR BIT(2)
#define CDNS_MCP_CONTROL_CMD_ACCEPT BIT(1)
#define CDNS_MCP_CONTROL_BLOCK_WAKEUP BIT(0)
#define CDNS_MCP_CMDCTRL 0x8
#define CDNS_IP_MCP_CONTROL 0x4 /* IP offset added at run-time */
#define CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR BIT(2)
#define CDNS_IP_MCP_CONTROL_RST_DELAY GENMASK(10, 8)
#define CDNS_IP_MCP_CONTROL_SW_RST BIT(5)
#define CDNS_IP_MCP_CONTROL_CLK_PAUSE BIT(3)
#define CDNS_IP_MCP_CONTROL_CMD_ACCEPT BIT(1)
#define CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP BIT(0)
#define CDNS_IP_MCP_CMDCTRL 0x8 /* IP offset added at run-time */
#define CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR BIT(2)
#define CDNS_MCP_SSPSTAT 0xC
#define CDNS_MCP_FRAME_SHAPE 0x10
@ -125,8 +129,8 @@ MODULE_PARM_DESC(cdns_mcp_int_mask, "Cadence MCP IntMask");
#define CDNS_MCP_FIFOSTAT 0x7C
#define CDNS_MCP_RX_FIFO_AVAIL GENMASK(5, 0)
#define CDNS_MCP_CMD_BASE 0x80
#define CDNS_MCP_RESP_BASE 0x80
#define CDNS_IP_MCP_CMD_BASE 0x80 /* IP offset added at run-time */
#define CDNS_IP_MCP_RESP_BASE 0x80 /* IP offset added at run-time */
/* FIFO can hold 8 commands */
#define CDNS_MCP_CMD_LEN 8
#define CDNS_MCP_CMD_WORD_LEN 0x4
@ -206,6 +210,16 @@ static inline void cdns_writel(struct sdw_cdns *cdns, int offset, u32 value)
writel(value, cdns->registers + offset);
}
static inline u32 cdns_ip_readl(struct sdw_cdns *cdns, int offset)
{
return cdns_readl(cdns, cdns->ip_offset + offset);
}
static inline void cdns_ip_writel(struct sdw_cdns *cdns, int offset, u32 value)
{
return cdns_writel(cdns, cdns->ip_offset + offset, value);
}
static inline void cdns_updatel(struct sdw_cdns *cdns,
int offset, u32 mask, u32 val)
{
@ -216,6 +230,12 @@ static inline void cdns_updatel(struct sdw_cdns *cdns,
cdns_writel(cdns, offset, tmp);
}
static inline void cdns_ip_updatel(struct sdw_cdns *cdns,
int offset, u32 mask, u32 val)
{
cdns_updatel(cdns, cdns->ip_offset + offset, mask, val);
}
static int cdns_set_wait(struct sdw_cdns *cdns, int offset, u32 mask, u32 value)
{
int timeout = 10;
@ -408,9 +428,9 @@ static int cdns_parity_error_injection(void *data, u64 value)
mutex_lock(&bus->bus_lock);
/* program hardware to inject parity error */
cdns_updatel(cdns, CDNS_MCP_CMDCTRL,
CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR,
CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR);
cdns_ip_updatel(cdns, CDNS_IP_MCP_CMDCTRL,
CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR,
CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR);
/* commit changes */
cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
@ -422,9 +442,9 @@ static int cdns_parity_error_injection(void *data, u64 value)
dev_info(cdns->dev, "parity error injection, read: %d\n", ret);
/* program hardware to disable parity error */
cdns_updatel(cdns, CDNS_MCP_CMDCTRL,
CDNS_MCP_CMDCTRL_INSERT_PARITY_ERR,
0);
cdns_ip_updatel(cdns, CDNS_IP_MCP_CMDCTRL,
CDNS_IP_MCP_CMDCTRL_INSERT_PARITY_ERR,
0);
/* commit changes */
cdns_updatel(cdns, CDNS_MCP_CONFIG_UPDATE,
@ -570,10 +590,10 @@ static void cdns_read_response(struct sdw_cdns *cdns)
num_resp = ARRAY_SIZE(cdns->response_buf);
}
cmd_base = CDNS_MCP_CMD_BASE;
cmd_base = CDNS_IP_MCP_CMD_BASE;
for (i = 0; i < num_resp; i++) {
cdns->response_buf[i] = cdns_readl(cdns, cmd_base);
cdns->response_buf[i] = cdns_ip_readl(cdns, cmd_base);
cmd_base += CDNS_MCP_CMD_WORD_LEN;
}
}
@ -592,7 +612,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
cdns->msg_count = count;
}
base = CDNS_MCP_CMD_BASE;
base = CDNS_IP_MCP_CMD_BASE;
addr = msg->addr + offset;
for (i = 0; i < count; i++) {
@ -605,7 +625,7 @@ _cdns_xfer_msg(struct sdw_cdns *cdns, struct sdw_msg *msg, int cmd,
data |= msg->buf[i + offset];
data |= FIELD_PREP(CDNS_MCP_CMD_SSP_TAG, msg->ssp_sync);
cdns_writel(cdns, base, data);
cdns_ip_writel(cdns, base, data);
base += CDNS_MCP_CMD_WORD_LEN;
}
@ -653,10 +673,10 @@ cdns_program_scp_addr(struct sdw_cdns *cdns, struct sdw_msg *msg)
data[0] |= msg->addr_page1;
data[1] |= msg->addr_page2;
base = CDNS_MCP_CMD_BASE;
cdns_writel(cdns, base, data[0]);
base = CDNS_IP_MCP_CMD_BASE;
cdns_ip_writel(cdns, base, data[0]);
base += CDNS_MCP_CMD_WORD_LEN;
cdns_writel(cdns, base, data[1]);
cdns_ip_writel(cdns, base, data[1]);
time = wait_for_completion_timeout(&cdns->tx_complete,
msecs_to_jiffies(CDNS_TX_TIMEOUT));
@ -1033,6 +1053,7 @@ update_status:
void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string,
bool initial_delay, int reset_iterations)
{
u32 ip_mcp_control;
u32 mcp_control;
u32 mcp_config_update;
int i;
@ -1040,6 +1061,12 @@ void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string
if (initial_delay)
usleep_range(1000, 1500);
ip_mcp_control = cdns_ip_readl(cdns, CDNS_IP_MCP_CONTROL);
/* the following bits should be cleared immediately */
if (ip_mcp_control & CDNS_IP_MCP_CONTROL_SW_RST)
dev_err(cdns->dev, "%s failed: IP_MCP_CONTROL_SW_RST is not cleared\n", string);
mcp_control = cdns_readl(cdns, CDNS_MCP_CONTROL);
/* the following bits should be cleared immediately */
@ -1047,10 +1074,9 @@ void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string
dev_err(cdns->dev, "%s failed: MCP_CONTROL_CMD_RST is not cleared\n", string);
if (mcp_control & CDNS_MCP_CONTROL_SOFT_RST)
dev_err(cdns->dev, "%s failed: MCP_CONTROL_SOFT_RST is not cleared\n", string);
if (mcp_control & CDNS_MCP_CONTROL_SW_RST)
dev_err(cdns->dev, "%s failed: MCP_CONTROL_SW_RST is not cleared\n", string);
if (mcp_control & CDNS_MCP_CONTROL_CLK_STOP_CLR)
dev_err(cdns->dev, "%s failed: MCP_CONTROL_CLK_STOP_CLR is not cleared\n", string);
mcp_config_update = cdns_readl(cdns, CDNS_MCP_CONFIG_UPDATE);
if (mcp_config_update & CDNS_MCP_CONFIG_UPDATE_BIT)
dev_err(cdns->dev, "%s failed: MCP_CONFIG_UPDATE_BIT is not cleared\n", string);
@ -1327,34 +1353,39 @@ int sdw_cdns_init(struct sdw_cdns *cdns)
CDNS_MCP_CONTROL_CMD_RST);
/* Set cmd accept mode */
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
CDNS_MCP_CONTROL_CMD_ACCEPT);
cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, CDNS_IP_MCP_CONTROL_CMD_ACCEPT,
CDNS_IP_MCP_CONTROL_CMD_ACCEPT);
/* Configure mcp config */
val = cdns_readl(cdns, CDNS_MCP_CONFIG);
/* enable bus operations with clock and data */
val &= ~CDNS_MCP_CONFIG_OP;
val |= CDNS_MCP_CONFIG_OP_NORMAL;
/* Set cmd mode for Tx and Rx cmds */
val &= ~CDNS_MCP_CONFIG_CMD;
/* Disable sniffer mode */
val &= ~CDNS_MCP_CONFIG_SNIFFER;
/* Disable auto bus release */
val &= ~CDNS_MCP_CONFIG_BUS_REL;
cdns_writel(cdns, CDNS_MCP_CONFIG, val);
/* Configure IP mcp config */
val = cdns_ip_readl(cdns, CDNS_IP_MCP_CONFIG);
/* enable bus operations with clock and data */
val &= ~CDNS_IP_MCP_CONFIG_OP;
val |= CDNS_IP_MCP_CONFIG_OP_NORMAL;
/* Set cmd mode for Tx and Rx cmds */
val &= ~CDNS_IP_MCP_CONFIG_CMD;
/* Disable sniffer mode */
val &= ~CDNS_IP_MCP_CONFIG_SNIFFER;
if (cdns->bus.multi_link)
/* Set Multi-master mode to take gsync into account */
val |= CDNS_MCP_CONFIG_MMASTER;
val |= CDNS_IP_MCP_CONFIG_MMASTER;
/* leave frame delay to hardware default of 0x1F */
/* leave command retry to hardware default of 0 */
cdns_writel(cdns, CDNS_MCP_CONFIG, val);
cdns_ip_writel(cdns, CDNS_IP_MCP_CONFIG, val);
/* changes will be committed later */
return 0;
@ -1584,9 +1615,9 @@ int sdw_cdns_clock_stop(struct sdw_cdns *cdns, bool block_wake)
* in clock stop state
*/
if (block_wake)
cdns_updatel(cdns, CDNS_MCP_CONTROL,
CDNS_MCP_CONTROL_BLOCK_WAKEUP,
CDNS_MCP_CONTROL_BLOCK_WAKEUP);
cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL,
CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP,
CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP);
list_for_each_entry(slave, &cdns->bus.slaves, node) {
if (slave->status == SDW_SLAVE_ATTACHED ||
@ -1659,18 +1690,18 @@ int sdw_cdns_clock_restart(struct sdw_cdns *cdns, bool bus_reset)
return ret;
}
cdns_updatel(cdns, CDNS_MCP_CONTROL,
CDNS_MCP_CONTROL_BLOCK_WAKEUP, 0);
cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL,
CDNS_IP_MCP_CONTROL_BLOCK_WAKEUP, 0);
cdns_updatel(cdns, CDNS_MCP_CONTROL, CDNS_MCP_CONTROL_CMD_ACCEPT,
CDNS_MCP_CONTROL_CMD_ACCEPT);
cdns_ip_updatel(cdns, CDNS_IP_MCP_CONTROL, CDNS_IP_MCP_CONTROL_CMD_ACCEPT,
CDNS_IP_MCP_CONTROL_CMD_ACCEPT);
if (!bus_reset) {
/* enable bus operations with clock and data */
cdns_updatel(cdns, CDNS_MCP_CONFIG,
CDNS_MCP_CONFIG_OP,
CDNS_MCP_CONFIG_OP_NORMAL);
cdns_ip_updatel(cdns, CDNS_IP_MCP_CONFIG,
CDNS_IP_MCP_CONFIG_OP,
CDNS_IP_MCP_CONFIG_OP_NORMAL);
ret = cdns_config_update(cdns);
if (ret < 0) {

View File

@ -84,7 +84,6 @@ struct sdw_cdns_stream_config {
* @bus: Bus handle
* @stream_type: Stream type
* @link_id: Master link id
* @hw_params: hw_params to be applied in .prepare step
* @suspended: status set when suspended, to be used in .prepare
* @paused: status set in .trigger, to be used in suspend
* @direction: stream direction
@ -96,7 +95,6 @@ struct sdw_cdns_dai_runtime {
struct sdw_bus *bus;
enum sdw_stream_type stream_type;
int link_id;
struct snd_pcm_hw_params *hw_params;
bool suspended;
bool paused;
int direction;
@ -107,6 +105,7 @@ struct sdw_cdns_dai_runtime {
* @dev: Linux device
* @bus: Bus handle
* @instance: instance number
* @ip_offset: version-dependent offset to access IP_MCP registers and fields
* @response_buf: SoundWire response buffer
* @tx_complete: Tx completion
* @ports: Data ports
@ -122,6 +121,8 @@ struct sdw_cdns {
struct sdw_bus bus;
unsigned int instance;
u32 ip_offset;
/*
* The datasheet says the RX FIFO AVAIL can be 2 entries more
* than the FIFO capacity, so allow for this.

View File

@ -73,6 +73,23 @@ static const struct adr_remap hp_omen_16[] = {
{}
};
/*
* Intel NUC M15 LAPRC510 and LAPRC710
*/
static const struct adr_remap intel_rooks_county[] = {
/* rt711-sdca on link0 */
{
0x000020025d071100ull,
0x000030025d071101ull
},
/* rt1316-sdca on link2 */
{
0x000120025d071100ull,
0x000230025d131601ull
},
{}
};
static const struct dmi_system_id adr_remap_quirk_table[] = {
/* TGL devices */
{
@ -98,6 +115,14 @@ static const struct dmi_system_id adr_remap_quirk_table[] = {
},
.driver_data = (void *)intel_tgl_bios,
},
{
/* quirk used for NUC15 'Rooks County' LAPRC510 and LAPRC710 skews */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
DMI_MATCH(DMI_PRODUCT_NAME, "LAPRC"),
},
.driver_data = (void *)intel_rooks_county,
},
{
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),

View File

@ -6,6 +6,7 @@
*
*/
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@ -28,15 +29,8 @@ struct sdw_group {
unsigned int *rates;
};
struct sdw_transport_data {
int hstart;
int hstop;
int block_offset;
int sub_block_offset;
};
static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
struct sdw_transport_data *t_data)
void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
struct sdw_transport_data *t_data)
{
struct sdw_slave_runtime *s_rt = NULL;
struct sdw_port_runtime *p_rt;
@ -54,7 +48,7 @@ static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
slave_total_ch = 0;
list_for_each_entry(p_rt, &s_rt->port_list, port_node) {
ch = sdw_ch_mask_to_ch(p_rt->ch_mask);
ch = hweight32(p_rt->ch_mask);
sdw_fill_xport_params(&p_rt->transport_params,
p_rt->num, false,
@ -85,6 +79,7 @@ static void sdw_compute_slave_ports(struct sdw_master_runtime *m_rt,
}
}
}
EXPORT_SYMBOL(sdw_compute_slave_ports);
static void sdw_compute_master_ports(struct sdw_master_runtime *m_rt,
struct sdw_group_params *params,

View File

@ -19,38 +19,6 @@
#include "bus.h"
#include "intel.h"
enum intel_pdi_type {
INTEL_PDI_IN = 0,
INTEL_PDI_OUT = 1,
INTEL_PDI_BD = 2,
};
#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
/*
* Read, write helpers for HW registers
*/
static inline int intel_readl(void __iomem *base, int offset)
{
return readl(base + offset);
}
static inline void intel_writel(void __iomem *base, int offset, int value)
{
writel(value, base + offset);
}
static inline u16 intel_readw(void __iomem *base, int offset)
{
return readw(base + offset);
}
static inline void intel_writew(void __iomem *base, int offset, u16 value)
{
writew(value, base + offset);
}
static int intel_wait_bit(void __iomem *base, int offset, u32 mask, u32 target)
{
int timeout = 10;
@ -357,6 +325,15 @@ static void intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
mutex_unlock(sdw->link_res->shim_lock);
}
static bool intel_check_cmdsync_unlocked(struct sdw_intel *sdw)
{
void __iomem *shim = sdw->link_res->shim;
int sync_reg;
sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
return !!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK);
}
static int intel_link_power_up(struct sdw_intel *sdw)
{
unsigned int link_id = sdw->instance;
@ -507,7 +484,6 @@ static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
{
void __iomem *shim = sdw->link_res->shim;
u32 sync_reg;
int ret;
/* Read SYNC register */
sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
@ -519,13 +495,9 @@ static int intel_shim_sync_go_unlocked(struct sdw_intel *sdw)
*/
sync_reg |= SDW_SHIM_SYNC_SYNCGO;
ret = intel_clear_bit(shim, SDW_SHIM_SYNC, sync_reg,
SDW_SHIM_SYNC_SYNCGO);
intel_writel(shim, SDW_SHIM_SYNC, sync_reg);
if (ret < 0)
dev_err(sdw->cdns.dev, "SyncGO clear failed: %d\n", ret);
return ret;
return 0;
}
static int intel_shim_sync_go(struct sdw_intel *sdw)
@ -618,13 +590,6 @@ static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
return 0;
}
static int intel_pdi_ch_update(struct sdw_intel *sdw)
{
intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
return 0;
}
static void
intel_pdi_shim_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
{
@ -717,63 +682,6 @@ static int intel_free_stream(struct sdw_intel *sdw,
return 0;
}
/*
* bank switch routines
*/
static int intel_pre_bank_switch(struct sdw_intel *sdw)
{
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
/* Write to register only for multi-link */
if (!bus->multi_link)
return 0;
intel_shim_sync_arm(sdw);
return 0;
}
static int intel_post_bank_switch(struct sdw_intel *sdw)
{
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
void __iomem *shim = sdw->link_res->shim;
int sync_reg, ret;
/* Write to register only for multi-link */
if (!bus->multi_link)
return 0;
mutex_lock(sdw->link_res->shim_lock);
/* Read SYNC register */
sync_reg = intel_readl(shim, SDW_SHIM_SYNC);
/*
* post_bank_switch() ops is called from the bus in loop for
* all the Masters in the steam with the expectation that
* we trigger the bankswitch for the only first Master in the list
* and do nothing for the other Masters
*
* So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
*/
if (!(sync_reg & SDW_SHIM_SYNC_CMDSYNC_MASK)) {
ret = 0;
goto unlock;
}
ret = intel_shim_sync_go_unlocked(sdw);
unlock:
mutex_unlock(sdw->link_res->shim_lock);
if (ret < 0)
dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
return ret;
}
/*
* DAI routines
*/
@ -817,7 +725,6 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
dai_runtime->paused = false;
dai_runtime->suspended = false;
dai_runtime->pdi = pdi;
dai_runtime->hw_params = params;
/* Inform DSP about PDI stream number */
ret = intel_params_stream(sdw, substream->stream, dai, params,
@ -870,6 +777,11 @@ static int intel_prepare(struct snd_pcm_substream *substream,
}
if (dai_runtime->suspended) {
struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
struct snd_pcm_hw_params *hw_params;
hw_params = &rtd->dpcm[substream->stream].hw_params;
dai_runtime->suspended = false;
/*
@ -881,7 +793,7 @@ static int intel_prepare(struct snd_pcm_substream *substream,
*/
/* configure stream */
ch = params_channels(dai_runtime->hw_params);
ch = params_channels(hw_params);
if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
dir = SDW_DATA_DIR_RX;
else
@ -893,7 +805,7 @@ static int intel_prepare(struct snd_pcm_substream *substream,
/* Inform DSP about PDI stream number */
ret = intel_params_stream(sdw, substream->stream, dai,
dai_runtime->hw_params,
hw_params,
sdw->instance,
dai_runtime->pdi->intel_alh_id);
}
@ -932,7 +844,6 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
return ret;
}
dai_runtime->hw_params = NULL;
dai_runtime->pdi = NULL;
return 0;
@ -1088,7 +999,6 @@ static int intel_create_dai(struct sdw_cdns *cdns,
if (num == 0)
return 0;
/* TODO: Read supported rates/formats from hardware */
for (i = off; i < (off + num); i++) {
dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
"SDW%d Pin%d",
@ -1099,15 +1009,11 @@ static int intel_create_dai(struct sdw_cdns *cdns,
if (type == INTEL_PDI_BD || type == INTEL_PDI_OUT) {
dais[i].playback.channels_min = 1;
dais[i].playback.channels_max = max_ch;
dais[i].playback.rates = SNDRV_PCM_RATE_48000;
dais[i].playback.formats = SNDRV_PCM_FMTBIT_S16_LE;
}
if (type == INTEL_PDI_BD || type == INTEL_PDI_IN) {
dais[i].capture.channels_min = 1;
dais[i].capture.channels_max = max_ch;
dais[i].capture.rates = SNDRV_PCM_RATE_48000;
dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
}
dais[i].ops = &intel_pcm_dai_ops;
@ -1131,7 +1037,7 @@ static int intel_register_dai(struct sdw_intel *sdw)
if (ret)
return ret;
intel_pdi_ch_update(sdw);
intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
/* DAIs are created based on total number of PDIs supported */
num_dai = cdns->pcm.num_pdi;
@ -1171,205 +1077,6 @@ static int intel_register_dai(struct sdw_intel *sdw)
dais, num_dai);
}
static int intel_start_bus(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
int ret;
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
return ret;
}
/*
* follow recommended programming flows to avoid timeouts when
* gsync is enabled
*/
if (bus->multi_link)
intel_shim_sync_arm(sdw);
ret = sdw_cdns_init(cdns);
if (ret < 0) {
dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
goto err_interrupt;
}
ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
goto err_interrupt;
}
if (bus->multi_link) {
ret = intel_shim_sync_go(sdw);
if (ret < 0) {
dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
goto err_interrupt;
}
}
sdw_cdns_check_self_clearing_bits(cdns, __func__,
true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
err_interrupt:
sdw_cdns_enable_interrupt(cdns, false);
return ret;
}
static int intel_start_bus_after_reset(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
bool clock_stop0;
int status;
int ret;
/*
* An exception condition occurs for the CLK_STOP_BUS_RESET
* case if one or more masters remain active. In this condition,
* all the masters are powered on for they are in the same power
* domain. Master can preserve its context for clock stop0, so
* there is no need to clear slave status and reset bus.
*/
clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
if (!clock_stop0) {
/*
* make sure all Slaves are tagged as UNATTACHED and
* provide reason for reinitialization
*/
status = SDW_UNATTACH_REQUEST_MASTER_RESET;
sdw_clear_slave_status(bus, status);
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "cannot enable interrupts during resume\n");
return ret;
}
/*
* follow recommended programming flows to avoid
* timeouts when gsync is enabled
*/
if (bus->multi_link)
intel_shim_sync_arm(sdw);
/*
* Re-initialize the IP since it was powered-off
*/
sdw_cdns_init(&sdw->cdns);
} else {
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "cannot enable interrupts during resume\n");
return ret;
}
}
ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
if (ret < 0) {
dev_err(dev, "unable to restart clock during resume\n");
goto err_interrupt;
}
if (!clock_stop0) {
ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
dev_err(dev, "unable to exit bus reset sequence during resume\n");
goto err_interrupt;
}
if (bus->multi_link) {
ret = intel_shim_sync_go(sdw);
if (ret < 0) {
dev_err(sdw->cdns.dev, "sync go failed during resume\n");
goto err_interrupt;
}
}
}
sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
err_interrupt:
sdw_cdns_enable_interrupt(cdns, false);
return ret;
}
static void intel_check_clock_stop(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
bool clock_stop0;
clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
if (!clock_stop0)
dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
}
static int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
int ret;
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
return ret;
}
ret = sdw_cdns_clock_restart(cdns, false);
if (ret < 0) {
dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
sdw_cdns_enable_interrupt(cdns, false);
return ret;
}
sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
}
static int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
bool wake_enable = false;
int ret;
if (clock_stop) {
ret = sdw_cdns_clock_stop(cdns, true);
if (ret < 0)
dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
else
wake_enable = true;
}
ret = sdw_cdns_enable_interrupt(cdns, false);
if (ret < 0) {
dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
return ret;
}
ret = intel_link_power_down(sdw);
if (ret) {
dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
return ret;
}
intel_shim_wake(sdw, wake_enable);
return 0;
}
const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = {
.debugfs_init = intel_debugfs_init,
@ -1391,6 +1098,11 @@ const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops = {
.pre_bank_switch = intel_pre_bank_switch,
.post_bank_switch = intel_post_bank_switch,
.sync_arm = intel_shim_sync_arm,
.sync_go_unlocked = intel_shim_sync_go_unlocked,
.sync_go = intel_shim_sync_go,
.sync_check_cmdsync_unlocked = intel_check_cmdsync_unlocked,
};
EXPORT_SYMBOL_NS(sdw_intel_cnl_hw_ops, SOUNDWIRE_INTEL);

View File

@ -50,6 +50,35 @@ struct sdw_intel {
#endif
};
enum intel_pdi_type {
INTEL_PDI_IN = 0,
INTEL_PDI_OUT = 1,
INTEL_PDI_BD = 2,
};
/*
* Read, write helpers for HW registers
*/
static inline int intel_readl(void __iomem *base, int offset)
{
return readl(base + offset);
}
static inline void intel_writel(void __iomem *base, int offset, int value)
{
writel(value, base + offset);
}
static inline u16 intel_readw(void __iomem *base, int offset)
{
return readw(base + offset);
}
static inline void intel_writew(void __iomem *base, int offset, u16 value)
{
writew(value, base + offset);
}
#define cdns_to_intel(_cdns) container_of(_cdns, struct sdw_intel, cdns)
#define INTEL_MASTER_RESET_ITERATIONS 10
@ -138,4 +167,42 @@ static inline void sdw_intel_shim_wake(struct sdw_intel *sdw, bool wake_enable)
SDW_INTEL_OPS(sdw, shim_wake)(sdw, wake_enable);
}
static inline void sdw_intel_sync_arm(struct sdw_intel *sdw)
{
if (SDW_INTEL_CHECK_OPS(sdw, sync_arm))
SDW_INTEL_OPS(sdw, sync_arm)(sdw);
}
static inline int sdw_intel_sync_go_unlocked(struct sdw_intel *sdw)
{
if (SDW_INTEL_CHECK_OPS(sdw, sync_go_unlocked))
return SDW_INTEL_OPS(sdw, sync_go_unlocked)(sdw);
return -ENOTSUPP;
}
static inline int sdw_intel_sync_go(struct sdw_intel *sdw)
{
if (SDW_INTEL_CHECK_OPS(sdw, sync_go))
return SDW_INTEL_OPS(sdw, sync_go)(sdw);
return -ENOTSUPP;
}
static inline bool sdw_intel_sync_check_cmdsync_unlocked(struct sdw_intel *sdw)
{
if (SDW_INTEL_CHECK_OPS(sdw, sync_check_cmdsync_unlocked))
return SDW_INTEL_OPS(sdw, sync_check_cmdsync_unlocked)(sdw);
return false;
}
/* common bus management */
int intel_start_bus(struct sdw_intel *sdw);
int intel_start_bus_after_reset(struct sdw_intel *sdw);
void intel_check_clock_stop(struct sdw_intel *sdw);
int intel_start_bus_after_clock_stop(struct sdw_intel *sdw);
int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop);
/* common bank switch routines */
int intel_pre_bank_switch(struct sdw_intel *sdw);
int intel_post_bank_switch(struct sdw_intel *sdw);
#endif /* __SDW_INTEL_LOCAL_H */

View File

@ -358,10 +358,12 @@ static int intel_resume_child_device(struct device *dev, void *data)
}
ret = pm_request_resume(dev);
if (ret < 0)
if (ret < 0) {
dev_err(dev, "%s: pm_request_resume failed: %d\n", __func__, ret);
return ret;
}
return ret;
return 0;
}
static int __maybe_unused intel_pm_prepare(struct device *dev)

View File

@ -0,0 +1,259 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
// Copyright(c) 2015-2023 Intel Corporation. All rights reserved.
#include <linux/acpi.h>
#include <linux/soundwire/sdw_registers.h>
#include <linux/soundwire/sdw.h>
#include <linux/soundwire/sdw_intel.h>
#include "cadence_master.h"
#include "bus.h"
#include "intel.h"
int intel_start_bus(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
int ret;
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
return ret;
}
/*
* follow recommended programming flows to avoid timeouts when
* gsync is enabled
*/
if (bus->multi_link)
sdw_intel_sync_arm(sdw);
ret = sdw_cdns_init(cdns);
if (ret < 0) {
dev_err(dev, "%s: unable to initialize Cadence IP: %d\n", __func__, ret);
goto err_interrupt;
}
ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
dev_err(dev, "%s: unable to exit bus reset sequence: %d\n", __func__, ret);
goto err_interrupt;
}
if (bus->multi_link) {
ret = sdw_intel_sync_go(sdw);
if (ret < 0) {
dev_err(dev, "%s: sync go failed: %d\n", __func__, ret);
goto err_interrupt;
}
}
sdw_cdns_check_self_clearing_bits(cdns, __func__,
true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
err_interrupt:
sdw_cdns_enable_interrupt(cdns, false);
return ret;
}
int intel_start_bus_after_reset(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
bool clock_stop0;
int status;
int ret;
/*
* An exception condition occurs for the CLK_STOP_BUS_RESET
* case if one or more masters remain active. In this condition,
* all the masters are powered on for they are in the same power
* domain. Master can preserve its context for clock stop0, so
* there is no need to clear slave status and reset bus.
*/
clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
if (!clock_stop0) {
/*
* make sure all Slaves are tagged as UNATTACHED and
* provide reason for reinitialization
*/
status = SDW_UNATTACH_REQUEST_MASTER_RESET;
sdw_clear_slave_status(bus, status);
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "cannot enable interrupts during resume\n");
return ret;
}
/*
* follow recommended programming flows to avoid
* timeouts when gsync is enabled
*/
if (bus->multi_link)
sdw_intel_sync_arm(sdw);
/*
* Re-initialize the IP since it was powered-off
*/
sdw_cdns_init(&sdw->cdns);
} else {
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "cannot enable interrupts during resume\n");
return ret;
}
}
ret = sdw_cdns_clock_restart(cdns, !clock_stop0);
if (ret < 0) {
dev_err(dev, "unable to restart clock during resume\n");
goto err_interrupt;
}
if (!clock_stop0) {
ret = sdw_cdns_exit_reset(cdns);
if (ret < 0) {
dev_err(dev, "unable to exit bus reset sequence during resume\n");
goto err_interrupt;
}
if (bus->multi_link) {
ret = sdw_intel_sync_go(sdw);
if (ret < 0) {
dev_err(sdw->cdns.dev, "sync go failed during resume\n");
goto err_interrupt;
}
}
}
sdw_cdns_check_self_clearing_bits(cdns, __func__, true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
err_interrupt:
sdw_cdns_enable_interrupt(cdns, false);
return ret;
}
void intel_check_clock_stop(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
bool clock_stop0;
clock_stop0 = sdw_cdns_is_clock_stop(&sdw->cdns);
if (!clock_stop0)
dev_err(dev, "%s: invalid configuration, clock was not stopped\n", __func__);
}
int intel_start_bus_after_clock_stop(struct sdw_intel *sdw)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
int ret;
ret = sdw_cdns_enable_interrupt(cdns, true);
if (ret < 0) {
dev_err(dev, "%s: cannot enable interrupts: %d\n", __func__, ret);
return ret;
}
ret = sdw_cdns_clock_restart(cdns, false);
if (ret < 0) {
dev_err(dev, "%s: unable to restart clock: %d\n", __func__, ret);
sdw_cdns_enable_interrupt(cdns, false);
return ret;
}
sdw_cdns_check_self_clearing_bits(cdns, "intel_resume_runtime no_quirks",
true, INTEL_MASTER_RESET_ITERATIONS);
return 0;
}
int intel_stop_bus(struct sdw_intel *sdw, bool clock_stop)
{
struct device *dev = sdw->cdns.dev;
struct sdw_cdns *cdns = &sdw->cdns;
bool wake_enable = false;
int ret;
if (clock_stop) {
ret = sdw_cdns_clock_stop(cdns, true);
if (ret < 0)
dev_err(dev, "%s: cannot stop clock: %d\n", __func__, ret);
else
wake_enable = true;
}
ret = sdw_cdns_enable_interrupt(cdns, false);
if (ret < 0) {
dev_err(dev, "%s: cannot disable interrupts: %d\n", __func__, ret);
return ret;
}
ret = sdw_intel_link_power_down(sdw);
if (ret) {
dev_err(dev, "%s: Link power down failed: %d\n", __func__, ret);
return ret;
}
sdw_intel_shim_wake(sdw, wake_enable);
return 0;
}
/*
* bank switch routines
*/
int intel_pre_bank_switch(struct sdw_intel *sdw)
{
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
/* Write to register only for multi-link */
if (!bus->multi_link)
return 0;
sdw_intel_sync_arm(sdw);
return 0;
}
int intel_post_bank_switch(struct sdw_intel *sdw)
{
struct sdw_cdns *cdns = &sdw->cdns;
struct sdw_bus *bus = &cdns->bus;
int ret = 0;
/* Write to register only for multi-link */
if (!bus->multi_link)
return 0;
mutex_lock(sdw->link_res->shim_lock);
/*
* post_bank_switch() ops is called from the bus in loop for
* all the Masters in the steam with the expectation that
* we trigger the bankswitch for the only first Master in the list
* and do nothing for the other Masters
*
* So, set the SYNCGO bit only if CMDSYNC bit is set for any Master.
*/
if (sdw_intel_sync_check_cmdsync_unlocked(sdw))
ret = sdw_intel_sync_go_unlocked(sdw);
mutex_unlock(sdw->link_res->shim_lock);
if (ret < 0)
dev_err(sdw->cdns.dev, "Post bank switch failed: %d\n", ret);
return ret;
}

View File

@ -28,6 +28,9 @@
#define SWRM_LINK_MANAGER_EE 0x018
#define SWRM_EE_CPU 1
#define SWRM_FRM_GEN_ENABLED BIT(0)
#define SWRM_VERSION_1_3_0 0x01030000
#define SWRM_VERSION_1_5_1 0x01050001
#define SWRM_VERSION_1_7_0 0x01070000
#define SWRM_COMP_HW_VERSION 0x00
#define SWRM_COMP_CFG_ADDR 0x04
#define SWRM_COMP_CFG_IRQ_LEVEL_OR_PULSE_MSK BIT(1)
@ -351,8 +354,7 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data,
/* Its assumed that write is okay as we do not get any status back */
swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val);
/* version 1.3 or less */
if (swrm->version <= 0x01030000)
if (swrm->version <= SWRM_VERSION_1_3_0)
usleep_range(150, 155);
if (cmd_id == SWR_BROADCAST_CMD_ID) {
@ -695,7 +697,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
u32p_replace_bits(&val, SWRM_DEF_CMD_NO_PINGS, SWRM_MCP_CFG_MAX_NUM_OF_CMD_NO_PINGS_BMSK);
ctrl->reg_write(ctrl, SWRM_MCP_CFG_ADDR, val);
if (ctrl->version >= 0x01070000) {
if (ctrl->version >= SWRM_VERSION_1_7_0) {
ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL,
SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU);
@ -704,8 +706,7 @@ static int qcom_swrm_init(struct qcom_swrm_ctrl *ctrl)
}
/* Configure number of retries of a read/write cmd */
if (ctrl->version > 0x01050001) {
/* Only for versions >= 1.5.1 */
if (ctrl->version >= SWRM_VERSION_1_5_1) {
ctrl->reg_write(ctrl, SWRM_CMD_FIFO_CFG_ADDR,
SWRM_RD_WR_CMD_RETRIES |
SWRM_CONTINUE_EXEC_ON_CMD_IGNORE);
@ -1217,6 +1218,9 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ctrl->num_dout_ports = val;
nports = ctrl->num_dout_ports + ctrl->num_din_ports;
if (nports > QCOM_SDW_MAX_PORTS)
return -EINVAL;
/* Valid port numbers are from 1-14, so mask out port 0 explicitly */
set_bit(0, &ctrl->dout_port_mask);
set_bit(0, &ctrl->din_port_mask);
@ -1239,7 +1243,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
bp_mode, nports);
if (ret) {
if (ctrl->version <= 0x01030000)
if (ctrl->version <= SWRM_VERSION_1_3_0)
memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
else
return ret;
@ -1442,7 +1446,7 @@ static int qcom_swrm_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
/* Clk stop is not supported on WSA Soundwire masters */
if (ctrl->version <= 0x01030000) {
if (ctrl->version <= SWRM_VERSION_1_3_0) {
ctrl->clock_stop_not_supported = true;
} else {
ctrl->reg_read(ctrl, SWRM_COMP_MASTER_ID, &val);
@ -1527,7 +1531,7 @@ static int __maybe_unused swrm_runtime_resume(struct device *dev)
} else {
reset_control_reset(ctrl->audio_cgcr);
if (ctrl->version >= 0x01070000) {
if (ctrl->version >= SWRM_VERSION_1_7_0) {
ctrl->reg_write(ctrl, SWRM_LINK_MANAGER_EE, SWRM_EE_CPU);
ctrl->reg_write(ctrl, SWRM_MCP_BUS_CTRL,
SWRM_MCP_BUS_CLK_START << SWRM_EE_CPU);

View File

@ -1369,7 +1369,7 @@ static int _sdw_prepare_stream(struct sdw_stream_runtime *stream,
if (ret < 0) {
dev_err(bus->dev, "Compute params failed: %d\n",
ret);
return ret;
goto restore_params;
}
}
@ -1389,7 +1389,7 @@ program_params:
ret = do_bank_switch(stream);
if (ret < 0) {
dev_err(bus->dev, "Bank switch failed: %d\n", ret);
pr_err("%s: do_bank_switch failed: %d\n", __func__, ret);
goto restore_params;
}
@ -1477,7 +1477,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
/* Program params */
ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
dev_err(bus->dev, "%s: Program params failed: %d\n", __func__, ret);
return ret;
}
@ -1497,7 +1497,7 @@ static int _sdw_enable_stream(struct sdw_stream_runtime *stream)
ret = do_bank_switch(stream);
if (ret < 0) {
dev_err(bus->dev, "Bank switch failed: %d\n", ret);
pr_err("%s: do_bank_switch failed: %d\n", __func__, ret);
return ret;
}
@ -1567,14 +1567,14 @@ static int _sdw_disable_stream(struct sdw_stream_runtime *stream)
/* Program params */
ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
dev_err(bus->dev, "%s: Program params failed: %d\n", __func__, ret);
return ret;
}
}
ret = do_bank_switch(stream);
if (ret < 0) {
pr_err("Bank switch failed: %d\n", ret);
pr_err("%s: do_bank_switch failed: %d\n", __func__, ret);
return ret;
}
@ -1664,7 +1664,7 @@ static int _sdw_deprepare_stream(struct sdw_stream_runtime *stream)
/* Program params */
ret = sdw_program_params(bus, false);
if (ret < 0) {
dev_err(bus->dev, "Program params failed: %d\n", ret);
dev_err(bus->dev, "%s: Program params failed: %d\n", __func__, ret);
return ret;
}
}
@ -1893,7 +1893,8 @@ int sdw_stream_add_master(struct sdw_bus *bus,
m_rt = sdw_master_rt_alloc(bus, stream);
if (!m_rt) {
dev_err(bus->dev, "Master runtime alloc failed for stream:%s\n", stream->name);
dev_err(bus->dev, "%s: Master runtime alloc failed for stream:%s\n",
__func__, stream->name);
ret = -ENOMEM;
goto unlock;
}
@ -2012,7 +2013,8 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
*/
m_rt = sdw_master_rt_alloc(slave->bus, stream);
if (!m_rt) {
dev_err(&slave->dev, "Master runtime alloc failed for stream:%s\n", stream->name);
dev_err(&slave->dev, "%s: Master runtime alloc failed for stream:%s\n",
__func__, stream->name);
ret = -ENOMEM;
goto unlock;
}

View File

@ -0,0 +1,109 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2023 Advanced Micro Devices, Inc. All rights reserved.
*/
#ifndef __SDW_AMD_H
#define __SDW_AMD_H
#include <linux/soundwire/sdw.h>
/* AMD pm_runtime quirk definitions */
/*
* Force the clock to stop(ClockStopMode0) when suspend callback
* is invoked.
*/
#define AMD_SDW_CLK_STOP_MODE 1
/*
* Stop the bus when runtime suspend/system level suspend callback
* is invoked. If set, a complete bus reset and re-enumeration will
* be performed when the bus restarts. In-band wake interrupts are
* not supported in this mode.
*/
#define AMD_SDW_POWER_OFF_MODE 2
#define ACP_SDW0 0
#define ACP_SDW1 1
struct acp_sdw_pdata {
u16 instance;
/* mutex to protect acp common register access */
struct mutex *acp_sdw_lock;
};
struct sdw_manager_reg_mask {
u32 sw_pad_enable_mask;
u32 sw_pad_pulldown_mask;
u32 acp_sdw_intr_mask;
};
/**
* struct sdw_amd_dai_runtime: AMD sdw dai runtime data
*
* @name: SoundWire stream name
* @stream: stream runtime
* @bus: Bus handle
* @stream_type: Stream type
*/
struct sdw_amd_dai_runtime {
char *name;
struct sdw_stream_runtime *stream;
struct sdw_bus *bus;
enum sdw_stream_type stream_type;
};
/**
* struct amd_sdw_manager - amd manager driver context
* @bus: bus handle
* @dev: linux device
* @mmio: SoundWire registers mmio base
* @acp_mmio: acp registers mmio base
* @reg_mask: register mask structure per manager instance
* @amd_sdw_irq_thread: SoundWire manager irq workqueue
* @amd_sdw_work: peripheral status work queue
* @probe_work: SoundWire manager probe workqueue
* @acp_sdw_lock: mutex to protect acp share register access
* @status: peripheral devices status array
* @num_din_ports: number of input ports
* @num_dout_ports: number of output ports
* @cols_index: Column index in frame shape
* @rows_index: Rows index in frame shape
* @instance: SoundWire manager instance
* @quirks: SoundWire manager quirks
* @wake_en_mask: wake enable mask per SoundWire manager
* @clk_stopped: flag set to true when clock is stopped
* @power_mode_mask: flag interprets amd SoundWire manager power mode
* @dai_runtime_array: dai runtime array
*/
struct amd_sdw_manager {
struct sdw_bus bus;
struct device *dev;
void __iomem *mmio;
void __iomem *acp_mmio;
struct sdw_manager_reg_mask *reg_mask;
struct work_struct amd_sdw_irq_thread;
struct work_struct amd_sdw_work;
struct work_struct probe_work;
/* mutex to protect acp common register access */
struct mutex *acp_sdw_lock;
enum sdw_slave_status status[SDW_MAX_DEVICES + 1];
int num_din_ports;
int num_dout_ports;
int cols_index;
int rows_index;
u32 instance;
u32 quirks;
u32 wake_en_mask;
u32 power_mode_mask;
bool clk_stopped;
struct sdw_amd_dai_runtime **dai_runtime_array;
};
#endif

View File

@ -309,6 +309,12 @@ struct sdw_intel;
* @shim_wake: enable/disable in-band wake management
* @pre_bank_switch: helper for bus management
* @post_bank_switch: helper for bus management
* @sync_arm: helper for multi-link synchronization
* @sync_go_unlocked: helper for multi-link synchronization -
* shim_lock is assumed to be locked at higher level
* @sync_go: helper for multi-link synchronization
* @sync_check_cmdsync_unlocked: helper for multi-link synchronization
* and bank switch - shim_lock is assumed to be locked at higher level
*/
struct sdw_intel_hw_ops {
void (*debugfs_init)(struct sdw_intel *sdw);
@ -330,6 +336,11 @@ struct sdw_intel_hw_ops {
int (*pre_bank_switch)(struct sdw_intel *sdw);
int (*post_bank_switch)(struct sdw_intel *sdw);
void (*sync_arm)(struct sdw_intel *sdw);
int (*sync_go_unlocked)(struct sdw_intel *sdw);
int (*sync_go)(struct sdw_intel *sdw);
bool (*sync_check_cmdsync_unlocked)(struct sdw_intel *sdw);
};
extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;