mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 01:24:12 +08:00
Merge 6.7-rc4 into char-misc-linus
We need 6.7-rc4 in here as we need to revert one of the debugfs changes that came in that release through the wireless tree. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
4906f39a13
@ -59,15 +59,6 @@ Description:
|
||||
brightness. Reading this file when no hw brightness change
|
||||
event has happened will return an ENODATA error.
|
||||
|
||||
What: /sys/class/leds/<led>/color
|
||||
Date: June 2023
|
||||
KernelVersion: 6.5
|
||||
Description:
|
||||
Color of the LED.
|
||||
|
||||
This is a read-only file. Reading this file returns the color
|
||||
of the LED as a string (e.g: "red", "green", "multicolor").
|
||||
|
||||
What: /sys/class/leds/<led>/trigger
|
||||
Date: March 2006
|
||||
KernelVersion: 2.6.17
|
||||
|
@ -9,7 +9,7 @@ title: NXP S32G2 pin controller
|
||||
|
||||
maintainers:
|
||||
- Ghennadi Procopciuc <Ghennadi.Procopciuc@oss.nxp.com>
|
||||
- Chester Lin <clin@suse.com>
|
||||
- Chester Lin <chester62515@gmail.com>
|
||||
|
||||
description: |
|
||||
S32G2 pinmux is implemented in SIUL2 (System Integration Unit Lite2),
|
||||
|
10
MAINTAINERS
10
MAINTAINERS
@ -5076,7 +5076,6 @@ CLANG CONTROL FLOW INTEGRITY SUPPORT
|
||||
M: Sami Tolvanen <samitolvanen@google.com>
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
R: Nathan Chancellor <nathan@kernel.org>
|
||||
R: Nick Desaulniers <ndesaulniers@google.com>
|
||||
L: llvm@lists.linux.dev
|
||||
S: Supported
|
||||
B: https://github.com/ClangBuiltLinux/linux/issues
|
||||
@ -5091,8 +5090,9 @@ F: .clang-format
|
||||
|
||||
CLANG/LLVM BUILD SUPPORT
|
||||
M: Nathan Chancellor <nathan@kernel.org>
|
||||
M: Nick Desaulniers <ndesaulniers@google.com>
|
||||
R: Tom Rix <trix@redhat.com>
|
||||
R: Nick Desaulniers <ndesaulniers@google.com>
|
||||
R: Bill Wendling <morbo@google.com>
|
||||
R: Justin Stitt <justinstitt@google.com>
|
||||
L: llvm@lists.linux.dev
|
||||
S: Supported
|
||||
W: https://clangbuiltlinux.github.io/
|
||||
@ -5242,7 +5242,6 @@ F: drivers/platform/x86/compal-laptop.c
|
||||
|
||||
COMPILER ATTRIBUTES
|
||||
M: Miguel Ojeda <ojeda@kernel.org>
|
||||
R: Nick Desaulniers <ndesaulniers@google.com>
|
||||
S: Maintained
|
||||
F: include/linux/compiler_attributes.h
|
||||
|
||||
@ -11516,7 +11515,6 @@ F: fs/autofs/
|
||||
KERNEL BUILD + files below scripts/ (unless maintained elsewhere)
|
||||
M: Masahiro Yamada <masahiroy@kernel.org>
|
||||
R: Nathan Chancellor <nathan@kernel.org>
|
||||
R: Nick Desaulniers <ndesaulniers@google.com>
|
||||
R: Nicolas Schier <nicolas@fjasle.eu>
|
||||
L: linux-kbuild@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -17948,6 +17946,8 @@ L: iommu@lists.linux.dev
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/iommu/arm/arm-smmu/qcom_iommu.c
|
||||
F: drivers/iommu/arm/arm-smmu/arm-smmu-qcom*
|
||||
F: drivers/iommu/msm_iommu*
|
||||
|
||||
QUALCOMM IPC ROUTER (QRTR) DRIVER
|
||||
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 7
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1839,6 +1839,10 @@ static int __init __kpti_install_ng_mappings(void *__unused)
|
||||
|
||||
static void __init kpti_install_ng_mappings(void)
|
||||
{
|
||||
/* Check whether KPTI is going to be used */
|
||||
if (!cpus_have_cap(ARM64_UNMAP_KERNEL_AT_EL0))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We don't need to rewrite the page-tables if either we've done
|
||||
* it already or we have KASLR enabled and therefore have not
|
||||
|
@ -23,6 +23,15 @@
|
||||
#include <asm/feature-fixups.h>
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
#define __REST_1FPVSR(n,c,base) \
|
||||
BEGIN_FTR_SECTION \
|
||||
b 2f; \
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
|
||||
REST_FPR(n,base); \
|
||||
b 3f; \
|
||||
2: REST_VSR(n,c,base); \
|
||||
3:
|
||||
|
||||
#define __REST_32FPVSRS(n,c,base) \
|
||||
BEGIN_FTR_SECTION \
|
||||
b 2f; \
|
||||
@ -41,9 +50,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \
|
||||
2: SAVE_32VSRS(n,c,base); \
|
||||
3:
|
||||
#else
|
||||
#define __REST_1FPVSR(n,b,base) REST_FPR(n, base)
|
||||
#define __REST_32FPVSRS(n,b,base) REST_32FPRS(n, base)
|
||||
#define __SAVE_32FPVSRS(n,b,base) SAVE_32FPRS(n, base)
|
||||
#endif
|
||||
#define REST_1FPVSR(n,c,base) __REST_1FPVSR(n,__REG_##c,__REG_##base)
|
||||
#define REST_32FPVSRS(n,c,base) __REST_32FPVSRS(n,__REG_##c,__REG_##base)
|
||||
#define SAVE_32FPVSRS(n,c,base) __SAVE_32FPVSRS(n,__REG_##c,__REG_##base)
|
||||
|
||||
@ -67,6 +78,7 @@ _GLOBAL(store_fp_state)
|
||||
SAVE_32FPVSRS(0, R4, R3)
|
||||
mffs fr0
|
||||
stfd fr0,FPSTATE_FPSCR(r3)
|
||||
REST_1FPVSR(0, R4, R3)
|
||||
blr
|
||||
EXPORT_SYMBOL(store_fp_state)
|
||||
|
||||
@ -138,4 +150,5 @@ _GLOBAL(save_fpu)
|
||||
2: SAVE_32FPVSRS(0, R4, R6)
|
||||
mffs fr0
|
||||
stfd fr0,FPSTATE_FPSCR(r6)
|
||||
REST_1FPVSR(0, R4, R6)
|
||||
blr
|
||||
|
@ -1198,11 +1198,11 @@ void kvmppc_save_user_regs(void)
|
||||
|
||||
usermsr = current->thread.regs->msr;
|
||||
|
||||
/* Caller has enabled FP/VEC/VSX/TM in MSR */
|
||||
if (usermsr & MSR_FP)
|
||||
save_fpu(current);
|
||||
|
||||
__giveup_fpu(current);
|
||||
if (usermsr & MSR_VEC)
|
||||
save_altivec(current);
|
||||
__giveup_altivec(current);
|
||||
|
||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||
if (usermsr & MSR_TM) {
|
||||
|
@ -33,6 +33,7 @@ _GLOBAL(store_vr_state)
|
||||
mfvscr v0
|
||||
li r4, VRSTATE_VSCR
|
||||
stvx v0, r4, r3
|
||||
lvx v0, 0, r3
|
||||
blr
|
||||
EXPORT_SYMBOL(store_vr_state)
|
||||
|
||||
@ -109,6 +110,7 @@ _GLOBAL(save_altivec)
|
||||
mfvscr v0
|
||||
li r4,VRSTATE_VSCR
|
||||
stvx v0,r4,r7
|
||||
lvx v0,0,r7
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
|
@ -33,9 +33,12 @@ EXPORT_SYMBOL_GPL(hypercall_page);
|
||||
* and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
|
||||
* but during boot it is switched to point to xen_vcpu_info.
|
||||
* The pointer is used in xen_evtchn_do_upcall to acknowledge pending events.
|
||||
* Make sure that xen_vcpu_info doesn't cross a page boundary by making it
|
||||
* cache-line aligned (the struct is guaranteed to have a size of 64 bytes,
|
||||
* which matches the cache line size of 64-bit x86 processors).
|
||||
*/
|
||||
DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
||||
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
|
||||
DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
|
||||
|
||||
/* Linux <-> Xen vCPU id mapping */
|
||||
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
|
||||
@ -160,6 +163,7 @@ void xen_vcpu_setup(int cpu)
|
||||
int err;
|
||||
struct vcpu_info *vcpup;
|
||||
|
||||
BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES);
|
||||
BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
|
||||
|
||||
/*
|
||||
|
@ -21,7 +21,7 @@ extern void *xen_initial_gdt;
|
||||
struct trap_info;
|
||||
void xen_copy_trap_info(struct trap_info *traps);
|
||||
|
||||
DECLARE_PER_CPU(struct vcpu_info, xen_vcpu_info);
|
||||
DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info);
|
||||
DECLARE_PER_CPU(unsigned long, xen_cr3);
|
||||
DECLARE_PER_CPU(unsigned long, xen_current_cr3);
|
||||
|
||||
|
@ -501,9 +501,17 @@ static inline void bio_check_ro(struct bio *bio)
|
||||
if (op_is_write(bio_op(bio)) && bdev_read_only(bio->bi_bdev)) {
|
||||
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
|
||||
return;
|
||||
pr_warn_ratelimited("Trying to write to read-only block-device %pg\n",
|
||||
bio->bi_bdev);
|
||||
/* Older lvm-tools actually trigger this */
|
||||
|
||||
if (bio->bi_bdev->bd_ro_warned)
|
||||
return;
|
||||
|
||||
bio->bi_bdev->bd_ro_warned = true;
|
||||
/*
|
||||
* Use ioctl to set underlying disk of raid/dm to read-only
|
||||
* will trigger this.
|
||||
*/
|
||||
pr_warn("Trying to write to read-only block-device %pg\n",
|
||||
bio->bi_bdev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1512,14 +1512,26 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
|
||||
|
||||
static bool blk_is_flush_data_rq(struct request *rq)
|
||||
{
|
||||
return (rq->rq_flags & RQF_FLUSH_SEQ) && !is_flush_rq(rq);
|
||||
}
|
||||
|
||||
static bool blk_mq_rq_inflight(struct request *rq, void *priv)
|
||||
{
|
||||
/*
|
||||
* If we find a request that isn't idle we know the queue is busy
|
||||
* as it's checked in the iter.
|
||||
* Return false to stop the iteration.
|
||||
*
|
||||
* In case of queue quiesce, if one flush data request is completed,
|
||||
* don't count it as inflight given the flush sequence is suspended,
|
||||
* and the original flush data request is invisible to driver, just
|
||||
* like other pending requests because of quiesce
|
||||
*/
|
||||
if (blk_mq_request_started(rq)) {
|
||||
if (blk_mq_request_started(rq) && !(blk_queue_quiesced(rq->q) &&
|
||||
blk_is_flush_data_rq(rq) &&
|
||||
blk_mq_request_completed(rq))) {
|
||||
bool *busy = priv;
|
||||
|
||||
*busy = true;
|
||||
|
@ -615,6 +615,7 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
|
||||
QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec");
|
||||
#endif
|
||||
|
||||
/* Common attributes for bio-based and request-based queues. */
|
||||
static struct attribute *queue_attrs[] = {
|
||||
&queue_ra_entry.attr,
|
||||
&queue_max_hw_sectors_entry.attr,
|
||||
@ -659,6 +660,7 @@ static struct attribute *queue_attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
/* Request-based queue attributes that are not relevant for bio-based queues. */
|
||||
static struct attribute *blk_mq_queue_attrs[] = {
|
||||
&queue_requests_entry.attr,
|
||||
&elv_iosched_entry.attr,
|
||||
|
@ -253,8 +253,7 @@ static const struct backlight_ops acpi_backlight_ops = {
|
||||
static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
|
||||
unsigned long *state)
|
||||
{
|
||||
struct acpi_device *device = cooling_dev->devdata;
|
||||
struct acpi_video_device *video = acpi_driver_data(device);
|
||||
struct acpi_video_device *video = cooling_dev->devdata;
|
||||
|
||||
*state = video->brightness->count - ACPI_VIDEO_FIRST_LEVEL - 1;
|
||||
return 0;
|
||||
@ -263,8 +262,7 @@ static int video_get_max_state(struct thermal_cooling_device *cooling_dev,
|
||||
static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
|
||||
unsigned long *state)
|
||||
{
|
||||
struct acpi_device *device = cooling_dev->devdata;
|
||||
struct acpi_video_device *video = acpi_driver_data(device);
|
||||
struct acpi_video_device *video = cooling_dev->devdata;
|
||||
unsigned long long level;
|
||||
int offset;
|
||||
|
||||
@ -283,8 +281,7 @@ static int video_get_cur_state(struct thermal_cooling_device *cooling_dev,
|
||||
static int
|
||||
video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long state)
|
||||
{
|
||||
struct acpi_device *device = cooling_dev->devdata;
|
||||
struct acpi_video_device *video = acpi_driver_data(device);
|
||||
struct acpi_video_device *video = cooling_dev->devdata;
|
||||
int level;
|
||||
|
||||
if (state >= video->brightness->count - ACPI_VIDEO_FIRST_LEVEL)
|
||||
@ -1125,7 +1122,6 @@ static int acpi_video_bus_get_one_device(struct acpi_device *device, void *arg)
|
||||
|
||||
strcpy(acpi_device_name(device), ACPI_VIDEO_DEVICE_NAME);
|
||||
strcpy(acpi_device_class(device), ACPI_VIDEO_CLASS);
|
||||
device->driver_data = data;
|
||||
|
||||
data->device_id = device_id;
|
||||
data->video = video;
|
||||
@ -1747,8 +1743,8 @@ static void acpi_video_dev_register_backlight(struct acpi_video_device *device)
|
||||
device->backlight->props.brightness =
|
||||
acpi_video_get_brightness(device->backlight);
|
||||
|
||||
device->cooling_dev = thermal_cooling_device_register("LCD",
|
||||
device->dev, &video_cooling_ops);
|
||||
device->cooling_dev = thermal_cooling_device_register("LCD", device,
|
||||
&video_cooling_ops);
|
||||
if (IS_ERR(device->cooling_dev)) {
|
||||
/*
|
||||
* Set cooling_dev to NULL so we don't crash trying to free it.
|
||||
|
@ -1568,17 +1568,22 @@ static const struct iommu_ops *acpi_iommu_configure_id(struct device *dev,
|
||||
int err;
|
||||
const struct iommu_ops *ops;
|
||||
|
||||
/* Serialise to make dev->iommu stable under our potential fwspec */
|
||||
mutex_lock(&iommu_probe_device_lock);
|
||||
/*
|
||||
* If we already translated the fwspec there is nothing left to do,
|
||||
* return the iommu_ops.
|
||||
*/
|
||||
ops = acpi_iommu_fwspec_ops(dev);
|
||||
if (ops)
|
||||
if (ops) {
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
return ops;
|
||||
}
|
||||
|
||||
err = iort_iommu_configure_id(dev, id_in);
|
||||
if (err && err != -EPROBE_DEFER)
|
||||
err = viot_iommu_configure(dev);
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
|
||||
/*
|
||||
* If we have reason to believe the IOMMU driver missed the initial
|
||||
|
@ -1055,9 +1055,14 @@ int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev)
|
||||
* Ask the sd driver to issue START STOP UNIT on runtime suspend
|
||||
* and resume and shutdown only. For system level suspend/resume,
|
||||
* devices power state is handled directly by libata EH.
|
||||
* Given that disks are always spun up on system resume, also
|
||||
* make sure that the sd driver forces runtime suspended disks
|
||||
* to be resumed to correctly reflect the power state of the
|
||||
* device.
|
||||
*/
|
||||
sdev->manage_runtime_start_stop = true;
|
||||
sdev->manage_shutdown = true;
|
||||
sdev->manage_runtime_start_stop = 1;
|
||||
sdev->manage_shutdown = 1;
|
||||
sdev->force_runtime_start_on_system_start = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -307,11 +307,11 @@ static int pstate_init_perf(struct amd_cpudata *cpudata)
|
||||
highest_perf = AMD_CPPC_HIGHEST_PERF(cap1);
|
||||
|
||||
WRITE_ONCE(cpudata->highest_perf, highest_perf);
|
||||
|
||||
WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
|
||||
WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
|
||||
WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
|
||||
WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
|
||||
|
||||
WRITE_ONCE(cpudata->min_limit_perf, AMD_CPPC_LOWEST_PERF(cap1));
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -329,11 +329,12 @@ static int cppc_init_perf(struct amd_cpudata *cpudata)
|
||||
highest_perf = cppc_perf.highest_perf;
|
||||
|
||||
WRITE_ONCE(cpudata->highest_perf, highest_perf);
|
||||
|
||||
WRITE_ONCE(cpudata->max_limit_perf, highest_perf);
|
||||
WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
|
||||
WRITE_ONCE(cpudata->lowest_nonlinear_perf,
|
||||
cppc_perf.lowest_nonlinear_perf);
|
||||
WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
|
||||
WRITE_ONCE(cpudata->min_limit_perf, cppc_perf.lowest_perf);
|
||||
|
||||
if (cppc_state == AMD_PSTATE_ACTIVE)
|
||||
return 0;
|
||||
@ -432,6 +433,10 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
|
||||
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
|
||||
u64 value = prev;
|
||||
|
||||
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
|
||||
cpudata->max_limit_perf);
|
||||
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
|
||||
cpudata->max_limit_perf);
|
||||
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
|
||||
|
||||
if ((cppc_state == AMD_PSTATE_GUIDED) && (gov_flags & CPUFREQ_GOV_DYNAMIC_SWITCHING)) {
|
||||
@ -470,6 +475,22 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
|
||||
{
|
||||
u32 max_limit_perf, min_limit_perf;
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
|
||||
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
|
||||
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
|
||||
|
||||
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
|
||||
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
|
||||
WRITE_ONCE(cpudata->max_limit_freq, policy->max);
|
||||
WRITE_ONCE(cpudata->min_limit_freq, policy->min);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_pstate_update_freq(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq, bool fast_switch)
|
||||
{
|
||||
@ -480,6 +501,9 @@ static int amd_pstate_update_freq(struct cpufreq_policy *policy,
|
||||
if (!cpudata->max_freq)
|
||||
return -ENODEV;
|
||||
|
||||
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
|
||||
amd_pstate_update_min_max_limit(policy);
|
||||
|
||||
cap_perf = READ_ONCE(cpudata->highest_perf);
|
||||
min_perf = READ_ONCE(cpudata->lowest_perf);
|
||||
max_perf = cap_perf;
|
||||
@ -518,7 +542,9 @@ static int amd_pstate_target(struct cpufreq_policy *policy,
|
||||
static unsigned int amd_pstate_fast_switch(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
return amd_pstate_update_freq(policy, target_freq, true);
|
||||
if (!amd_pstate_update_freq(policy, target_freq, true))
|
||||
return target_freq;
|
||||
return policy->cur;
|
||||
}
|
||||
|
||||
static void amd_pstate_adjust_perf(unsigned int cpu,
|
||||
@ -532,6 +558,10 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
unsigned int target_freq;
|
||||
|
||||
if (policy->min != cpudata->min_limit_freq || policy->max != cpudata->max_limit_freq)
|
||||
amd_pstate_update_min_max_limit(policy);
|
||||
|
||||
|
||||
cap_perf = READ_ONCE(cpudata->highest_perf);
|
||||
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
|
||||
max_freq = READ_ONCE(cpudata->max_freq);
|
||||
@ -745,6 +775,8 @@ static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
/* Initial processor data capability frequencies */
|
||||
cpudata->max_freq = max_freq;
|
||||
cpudata->min_freq = min_freq;
|
||||
cpudata->max_limit_freq = max_freq;
|
||||
cpudata->min_limit_freq = min_freq;
|
||||
cpudata->nominal_freq = nominal_freq;
|
||||
cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
|
||||
|
||||
@ -850,11 +882,16 @@ static ssize_t show_energy_performance_available_preferences(
|
||||
{
|
||||
int i = 0;
|
||||
int offset = 0;
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
|
||||
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
return sysfs_emit_at(buf, offset, "%s\n",
|
||||
energy_perf_strings[EPP_INDEX_PERFORMANCE]);
|
||||
|
||||
while (energy_perf_strings[i] != NULL)
|
||||
offset += sysfs_emit_at(buf, offset, "%s ", energy_perf_strings[i++]);
|
||||
|
||||
sysfs_emit_at(buf, offset, "\n");
|
||||
offset += sysfs_emit_at(buf, offset, "\n");
|
||||
|
||||
return offset;
|
||||
}
|
||||
@ -1183,16 +1220,25 @@ static int amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amd_pstate_epp_init(unsigned int cpu)
|
||||
static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
u32 max_perf, min_perf;
|
||||
u32 max_perf, min_perf, min_limit_perf, max_limit_perf;
|
||||
u64 value;
|
||||
s16 epp;
|
||||
|
||||
max_perf = READ_ONCE(cpudata->highest_perf);
|
||||
min_perf = READ_ONCE(cpudata->lowest_perf);
|
||||
max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
|
||||
min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
|
||||
|
||||
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
|
||||
cpudata->max_limit_perf);
|
||||
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
|
||||
cpudata->max_limit_perf);
|
||||
|
||||
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
|
||||
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
|
||||
|
||||
value = READ_ONCE(cpudata->cppc_req_cached);
|
||||
|
||||
@ -1210,9 +1256,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
|
||||
value &= ~AMD_CPPC_DES_PERF(~0L);
|
||||
value |= AMD_CPPC_DES_PERF(0);
|
||||
|
||||
if (cpudata->epp_policy == cpudata->policy)
|
||||
goto skip_epp;
|
||||
|
||||
cpudata->epp_policy = cpudata->policy;
|
||||
|
||||
/* Get BIOS pre-defined epp value */
|
||||
@ -1222,7 +1265,7 @@ static void amd_pstate_epp_init(unsigned int cpu)
|
||||
* This return value can only be negative for shared_memory
|
||||
* systems where EPP register read/write not supported.
|
||||
*/
|
||||
goto skip_epp;
|
||||
return;
|
||||
}
|
||||
|
||||
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
@ -1236,8 +1279,6 @@ static void amd_pstate_epp_init(unsigned int cpu)
|
||||
|
||||
WRITE_ONCE(cpudata->cppc_req_cached, value);
|
||||
amd_pstate_set_epp(cpudata, epp);
|
||||
skip_epp:
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
|
||||
@ -1252,7 +1293,7 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
|
||||
|
||||
cpudata->policy = policy->policy;
|
||||
|
||||
amd_pstate_epp_init(policy->cpu);
|
||||
amd_pstate_epp_update_limit(policy);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ static int imx6ul_opp_check_speed_grading(struct device *dev)
|
||||
imx6x_disable_freq_in_opp(dev, 696000000);
|
||||
|
||||
if (of_machine_is_compatible("fsl,imx6ull")) {
|
||||
if (val != OCOTP_CFG3_6ULL_SPEED_792MHZ)
|
||||
if (val < OCOTP_CFG3_6ULL_SPEED_792MHZ)
|
||||
imx6x_disable_freq_in_opp(dev, 792000000);
|
||||
|
||||
if (val != OCOTP_CFG3_6ULL_SPEED_900MHZ)
|
||||
|
@ -23,8 +23,10 @@
|
||||
#include <linux/nvmem-consumer.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/pm_opp.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/soc/qcom/smem.h>
|
||||
|
||||
@ -55,6 +57,7 @@ struct qcom_cpufreq_match_data {
|
||||
|
||||
struct qcom_cpufreq_drv_cpu {
|
||||
int opp_token;
|
||||
struct device **virt_devs;
|
||||
};
|
||||
|
||||
struct qcom_cpufreq_drv {
|
||||
@ -424,6 +427,30 @@ static const struct qcom_cpufreq_match_data match_data_ipq8074 = {
|
||||
.get_version = qcom_cpufreq_ipq8074_name_version,
|
||||
};
|
||||
|
||||
static void qcom_cpufreq_suspend_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
|
||||
{
|
||||
const char * const *name = drv->data->genpd_names;
|
||||
int i;
|
||||
|
||||
if (!drv->cpus[cpu].virt_devs)
|
||||
return;
|
||||
|
||||
for (i = 0; *name; i++, name++)
|
||||
device_set_awake_path(drv->cpus[cpu].virt_devs[i]);
|
||||
}
|
||||
|
||||
static void qcom_cpufreq_put_virt_devs(struct qcom_cpufreq_drv *drv, unsigned int cpu)
|
||||
{
|
||||
const char * const *name = drv->data->genpd_names;
|
||||
int i;
|
||||
|
||||
if (!drv->cpus[cpu].virt_devs)
|
||||
return;
|
||||
|
||||
for (i = 0; *name; i++, name++)
|
||||
pm_runtime_put(drv->cpus[cpu].virt_devs[i]);
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct qcom_cpufreq_drv *drv;
|
||||
@ -478,6 +505,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
of_node_put(np);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct device **virt_devs = NULL;
|
||||
struct dev_pm_opp_config config = {
|
||||
.supported_hw = NULL,
|
||||
};
|
||||
@ -498,7 +526,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
|
||||
if (drv->data->genpd_names) {
|
||||
config.genpd_names = drv->data->genpd_names;
|
||||
config.virt_devs = NULL;
|
||||
config.virt_devs = &virt_devs;
|
||||
}
|
||||
|
||||
if (config.supported_hw || config.genpd_names) {
|
||||
@ -509,6 +537,27 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
goto free_opp;
|
||||
}
|
||||
}
|
||||
|
||||
if (virt_devs) {
|
||||
const char * const *name = config.genpd_names;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; *name; i++, name++) {
|
||||
ret = pm_runtime_resume_and_get(virt_devs[i]);
|
||||
if (ret) {
|
||||
dev_err(cpu_dev, "failed to resume %s: %d\n",
|
||||
*name, ret);
|
||||
|
||||
/* Rollback previous PM runtime calls */
|
||||
name = config.genpd_names;
|
||||
for (j = 0; *name && j < i; j++, name++)
|
||||
pm_runtime_put(virt_devs[j]);
|
||||
|
||||
goto free_opp;
|
||||
}
|
||||
}
|
||||
drv->cpus[cpu].virt_devs = virt_devs;
|
||||
}
|
||||
}
|
||||
|
||||
cpufreq_dt_pdev = platform_device_register_simple("cpufreq-dt", -1,
|
||||
@ -522,8 +571,10 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
dev_err(cpu_dev, "Failed to register platform device\n");
|
||||
|
||||
free_opp:
|
||||
for_each_possible_cpu(cpu)
|
||||
for_each_possible_cpu(cpu) {
|
||||
qcom_cpufreq_put_virt_devs(drv, cpu);
|
||||
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -534,15 +585,31 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
|
||||
|
||||
platform_device_unregister(cpufreq_dt_pdev);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
for_each_possible_cpu(cpu) {
|
||||
qcom_cpufreq_put_virt_devs(drv, cpu);
|
||||
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
|
||||
}
|
||||
}
|
||||
|
||||
static int qcom_cpufreq_suspend(struct device *dev)
|
||||
{
|
||||
struct qcom_cpufreq_drv *drv = dev_get_drvdata(dev);
|
||||
unsigned int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
qcom_cpufreq_suspend_virt_devs(drv, cpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_SIMPLE_DEV_PM_OPS(qcom_cpufreq_pm_ops, qcom_cpufreq_suspend, NULL);
|
||||
|
||||
static struct platform_driver qcom_cpufreq_driver = {
|
||||
.probe = qcom_cpufreq_probe,
|
||||
.remove_new = qcom_cpufreq_remove,
|
||||
.driver = {
|
||||
.name = "qcom-cpufreq-nvmem",
|
||||
.pm = pm_sleep_ptr(&qcom_cpufreq_pm_ops),
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -301,7 +301,7 @@ void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
|
||||
|
||||
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
|
||||
if ((old->context == fence->context && old_usage >= usage &&
|
||||
dma_fence_is_later(fence, old)) ||
|
||||
dma_fence_is_later_or_same(fence, old)) ||
|
||||
dma_fence_is_signaled(old)) {
|
||||
dma_resv_list_set(fobj, i, fence, usage);
|
||||
dma_fence_put(old);
|
||||
|
@ -717,14 +717,11 @@ static void create_units(struct fw_device *device)
|
||||
fw_unit_attributes,
|
||||
&unit->attribute_group);
|
||||
|
||||
if (device_register(&unit->device) < 0)
|
||||
goto skip_unit;
|
||||
|
||||
fw_device_get(device);
|
||||
continue;
|
||||
|
||||
skip_unit:
|
||||
kfree(unit);
|
||||
if (device_register(&unit->device) < 0) {
|
||||
put_device(&unit->device);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1519,9 +1519,9 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
|
||||
sdev->use_10_for_rw = 1;
|
||||
|
||||
if (sbp2_param_exclusive_login) {
|
||||
sdev->manage_system_start_stop = true;
|
||||
sdev->manage_runtime_start_stop = true;
|
||||
sdev->manage_shutdown = true;
|
||||
sdev->manage_system_start_stop = 1;
|
||||
sdev->manage_runtime_start_stop = 1;
|
||||
sdev->manage_shutdown = 1;
|
||||
}
|
||||
|
||||
if (sdev->type == TYPE_ROM)
|
||||
|
@ -101,7 +101,7 @@ retry:
|
||||
* overlap on physical address level.
|
||||
*/
|
||||
list_for_each_entry(entry, &accepting_list, list) {
|
||||
if (entry->end < range.start)
|
||||
if (entry->end <= range.start)
|
||||
continue;
|
||||
if (entry->start >= range.end)
|
||||
continue;
|
||||
|
@ -547,7 +547,7 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
|
||||
struct amdgpu_device *adev = dst, *peer_adev;
|
||||
int num_links;
|
||||
|
||||
if (adev->asic_type != CHIP_ALDEBARAN)
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 4, 2))
|
||||
return 0;
|
||||
|
||||
if (src)
|
||||
|
@ -638,6 +638,9 @@ static ssize_t amdgpu_debugfs_regs_didt_read(struct file *f, char __user *buf,
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->didt_rreg)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
if (r < 0) {
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
@ -694,6 +697,9 @@ static ssize_t amdgpu_debugfs_regs_didt_write(struct file *f, const char __user
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
if (!adev->didt_wreg)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
|
||||
if (r < 0) {
|
||||
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
|
||||
|
@ -4538,6 +4538,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
amdgpu_virt_release_full_gpu(adev, false);
|
||||
|
||||
r = amdgpu_dpm_notify_rlc_state(adev, false);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -340,14 +340,11 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
|
||||
adev->have_disp_power_ref = true;
|
||||
return ret;
|
||||
}
|
||||
/* if we have no active crtcs, then drop the power ref
|
||||
* we got before
|
||||
/* if we have no active crtcs, then go to
|
||||
* drop the power ref we got before
|
||||
*/
|
||||
if (!active && adev->have_disp_power_ref) {
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
if (!active && adev->have_disp_power_ref)
|
||||
adev->have_disp_power_ref = false;
|
||||
}
|
||||
|
||||
out:
|
||||
/* drop the power reference we got coming in here */
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
@ -2263,6 +2263,8 @@ retry_init:
|
||||
pm_runtime_mark_last_busy(ddev->dev);
|
||||
pm_runtime_put_autosuspend(ddev->dev);
|
||||
|
||||
pci_wake_from_d3(pdev, TRUE);
|
||||
|
||||
/*
|
||||
* For runpm implemented via BACO, PMFW will handle the
|
||||
* timing for BACO in and out:
|
||||
|
@ -181,6 +181,9 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
|
||||
if (!bo->ttm)
|
||||
return AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached)
|
||||
return AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
|
@ -1527,10 +1527,14 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
uint64_t offset;
|
||||
uint64_t offset = AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
|
||||
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
|
||||
if (bo->tbo.resource->mem_type == TTM_PL_TT)
|
||||
offset = amdgpu_gmc_agp_addr(&bo->tbo);
|
||||
|
||||
if (offset == AMDGPU_BO_INVALID_OFFSET)
|
||||
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
|
||||
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
|
||||
|
||||
return amdgpu_gmc_sign_extend(offset);
|
||||
}
|
||||
|
@ -214,6 +214,12 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
|
||||
control->i2c_address = EEPROM_I2C_MADDR_0;
|
||||
return true;
|
||||
case IP_VERSION(13, 0, 0):
|
||||
if (strnstr(atom_ctx->vbios_pn, "D707",
|
||||
sizeof(atom_ctx->vbios_pn)))
|
||||
control->i2c_address = EEPROM_I2C_MADDR_0;
|
||||
else
|
||||
control->i2c_address = EEPROM_I2C_MADDR_4;
|
||||
return true;
|
||||
case IP_VERSION(13, 0, 6):
|
||||
case IP_VERSION(13, 0, 10):
|
||||
control->i2c_address = EEPROM_I2C_MADDR_4;
|
||||
|
@ -959,10 +959,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||
return 0;
|
||||
|
||||
addr = amdgpu_gmc_agp_addr(bo);
|
||||
if (addr != AMDGPU_BO_INVALID_OFFSET) {
|
||||
bo->resource->start = addr >> PAGE_SHIFT;
|
||||
if (addr != AMDGPU_BO_INVALID_OFFSET)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* allocate GART space */
|
||||
placement.num_placement = 1;
|
||||
|
@ -89,6 +89,10 @@ MODULE_FIRMWARE("amdgpu/gc_11_5_0_me.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_5_0_mec.bin");
|
||||
MODULE_FIRMWARE("amdgpu/gc_11_5_0_rlc.bin");
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_11_0[] = {
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, regTCP_CNTL, 0x20000000, 0x20000000)
|
||||
};
|
||||
|
||||
static const struct soc15_reg_golden golden_settings_gc_11_0_1[] =
|
||||
{
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, regCGTT_GS_NGG_CLK_CTRL, 0x9fff8fff, 0x00000010),
|
||||
@ -304,6 +308,10 @@ static void gfx_v11_0_init_golden_registers(struct amdgpu_device *adev)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
soc15_program_register_sequence(adev,
|
||||
golden_settings_gc_11_0,
|
||||
(const u32)ARRAY_SIZE(golden_settings_gc_11_0));
|
||||
|
||||
}
|
||||
|
||||
static void gfx_v11_0_write_data_to_reg(struct amdgpu_ring *ring, int eng_sel,
|
||||
@ -419,7 +427,7 @@ static int gfx_v11_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
cpu_ptr = &adev->wb.wb[index];
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, 16, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r) {
|
||||
DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
|
||||
goto err1;
|
||||
|
@ -883,8 +883,8 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -1039,8 +1039,8 @@ static int gfx_v9_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -297,8 +297,8 @@ static int gfx_v9_4_3_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
||||
gpu_addr = adev->wb.gpu_addr + (index * 4);
|
||||
adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
|
||||
memset(&ib, 0, sizeof(ib));
|
||||
r = amdgpu_ib_get(adev, NULL, 16,
|
||||
AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
|
||||
r = amdgpu_ib_get(adev, NULL, 20, AMDGPU_IB_POOL_DIRECT, &ib);
|
||||
if (r)
|
||||
goto err1;
|
||||
|
||||
|
@ -259,17 +259,17 @@ const struct nbio_hdp_flush_reg nbio_v7_11_hdp_flush_reg = {
|
||||
|
||||
static void nbio_v7_11_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
/* uint32_t def, data;
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
|
||||
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
|
||||
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
|
||||
def = data = RREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3);
|
||||
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1);
|
||||
data = REG_SET_FIELD(data, BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3,
|
||||
CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV, 1);
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(NBIO, 0, regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3, data);
|
||||
*/
|
||||
}
|
||||
|
||||
static void nbio_v7_11_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
|
@ -611,11 +611,6 @@ static void nbio_v7_9_handle_ras_controller_intr_no_bifring(struct amdgpu_device
|
||||
|
||||
dev_info(adev->dev, "RAS controller interrupt triggered "
|
||||
"by NBIF error\n");
|
||||
|
||||
/* ras_controller_int is dedicated for nbif ras error,
|
||||
* not the global interrupt for sync flood
|
||||
*/
|
||||
amdgpu_ras_reset_gpu(adev);
|
||||
}
|
||||
|
||||
amdgpu_ras_error_data_fini(&err_data);
|
||||
|
@ -1161,6 +1161,11 @@ static int soc15_common_early_init(void *handle)
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
adev->external_rev_id = adev->rev_id + 0x46;
|
||||
/* GC 9.4.3 uses MMIO register region hole at a different offset */
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
adev->rmmio_remap.reg_offset = 0x1A000;
|
||||
adev->rmmio_remap.bus_addr = adev->rmmio_base + 0x1A000;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
|
@ -1128,7 +1128,7 @@ static inline struct kfd_node *kfd_node_by_irq_ids(struct amdgpu_device *adev,
|
||||
struct kfd_dev *dev = adev->kfd.dev;
|
||||
uint32_t i;
|
||||
|
||||
if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 3))
|
||||
if (KFD_GC_VERSION(dev) != IP_VERSION(9, 4, 3))
|
||||
return dev->nodes[0];
|
||||
|
||||
for (i = 0; i < dev->num_nodes; i++)
|
||||
|
@ -169,16 +169,43 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pqm_clean_queue_resource(struct process_queue_manager *pqm,
|
||||
struct process_queue_node *pqn)
|
||||
{
|
||||
struct kfd_node *dev;
|
||||
struct kfd_process_device *pdd;
|
||||
|
||||
dev = pqn->q->device;
|
||||
|
||||
pdd = kfd_get_process_device_data(dev, pqm->process);
|
||||
if (!pdd) {
|
||||
pr_err("Process device data doesn't exist\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (pqn->q->gws) {
|
||||
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
|
||||
!dev->kfd->shared_resources.enable_mes)
|
||||
amdgpu_amdkfd_remove_gws_from_process(
|
||||
pqm->process->kgd_process_info, pqn->q->gws);
|
||||
pdd->qpd.num_gws = 0;
|
||||
}
|
||||
|
||||
if (dev->kfd->shared_resources.enable_mes) {
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->gang_ctx_bo);
|
||||
if (pqn->q->wptr_bo)
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
|
||||
}
|
||||
}
|
||||
|
||||
void pqm_uninit(struct process_queue_manager *pqm)
|
||||
{
|
||||
struct process_queue_node *pqn, *next;
|
||||
|
||||
list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
|
||||
if (pqn->q && pqn->q->gws &&
|
||||
KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
|
||||
!pqn->q->device->kfd->shared_resources.enable_mes)
|
||||
amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
|
||||
pqn->q->gws);
|
||||
if (pqn->q)
|
||||
pqm_clean_queue_resource(pqm, pqn);
|
||||
|
||||
kfd_procfs_del_queue(pqn->q);
|
||||
uninit_queue(pqn->q);
|
||||
list_del(&pqn->process_queue_list);
|
||||
@ -461,22 +488,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
|
||||
goto err_destroy_queue;
|
||||
}
|
||||
|
||||
if (pqn->q->gws) {
|
||||
if (KFD_GC_VERSION(pqn->q->device) != IP_VERSION(9, 4, 3) &&
|
||||
!dev->kfd->shared_resources.enable_mes)
|
||||
amdgpu_amdkfd_remove_gws_from_process(
|
||||
pqm->process->kgd_process_info,
|
||||
pqn->q->gws);
|
||||
pdd->qpd.num_gws = 0;
|
||||
}
|
||||
|
||||
if (dev->kfd->shared_resources.enable_mes) {
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev,
|
||||
pqn->q->gang_ctx_bo);
|
||||
if (pqn->q->wptr_bo)
|
||||
amdgpu_amdkfd_free_gtt_mem(dev->adev, pqn->q->wptr_bo);
|
||||
|
||||
}
|
||||
pqm_clean_queue_resource(pqm, pqn);
|
||||
uninit_queue(pqn->q);
|
||||
}
|
||||
|
||||
|
@ -6267,7 +6267,7 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
|
||||
dm_new_state->underscan_enable = val;
|
||||
ret = 0;
|
||||
} else if (property == adev->mode_info.abm_level_property) {
|
||||
dm_new_state->abm_level = val;
|
||||
dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -6312,7 +6312,8 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
|
||||
*val = dm_state->underscan_enable;
|
||||
ret = 0;
|
||||
} else if (property == adev->mode_info.abm_level_property) {
|
||||
*val = dm_state->abm_level;
|
||||
*val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ?
|
||||
dm_state->abm_level : 0;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
@ -6385,7 +6386,8 @@ void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
|
||||
state->pbn = 0;
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
|
||||
state->abm_level = amdgpu_dm_abm_level;
|
||||
state->abm_level = amdgpu_dm_abm_level ?:
|
||||
ABM_LEVEL_IMMEDIATE_DISABLE;
|
||||
|
||||
__drm_atomic_helper_connector_reset(connector, &state->base);
|
||||
}
|
||||
|
@ -334,7 +334,7 @@ static struct wm_table lpddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -342,7 +342,7 @@ static struct wm_table lpddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -350,7 +350,7 @@ static struct wm_table lpddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
@ -358,7 +358,7 @@ static struct wm_table lpddr5_wm_table = {
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.pstate_latency_us = 129.0,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.valid = true,
|
||||
|
@ -232,6 +232,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
if (dc->work_arounds.skip_clock_update)
|
||||
return;
|
||||
|
||||
/* DTBCLK is fixed, so set a default if unspecified. */
|
||||
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
|
||||
new_clocks->ref_dtbclk_khz = 600000;
|
||||
|
||||
/*
|
||||
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
|
||||
* also if safe to lower is false, we just go in the higher state
|
||||
@ -265,8 +269,10 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
|
||||
if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
|
||||
dcn35_smu_set_dtbclk(clk_mgr, true);
|
||||
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
|
||||
clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
|
||||
|
||||
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
|
||||
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
|
||||
}
|
||||
|
||||
/* check that we're not already in D0 */
|
||||
@ -314,17 +320,12 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
update_dispclk = true;
|
||||
}
|
||||
|
||||
if (!new_clocks->dtbclk_en) {
|
||||
new_clocks->ref_dtbclk_khz = 600000;
|
||||
}
|
||||
|
||||
/* clock limits are received with MHz precision, divide by 1000 to prevent setting clocks at every call */
|
||||
if (!dc->debug.disable_dtb_ref_clk_switch &&
|
||||
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000, clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
|
||||
/* DCCG requires KHz precision for DTBCLK */
|
||||
dcn35_smu_set_dtbclk(clk_mgr, true);
|
||||
|
||||
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
|
||||
should_set_clock(safe_to_lower, new_clocks->ref_dtbclk_khz / 1000,
|
||||
clk_mgr_base->clks.ref_dtbclk_khz / 1000)) {
|
||||
dcn35_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
|
||||
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
|
||||
}
|
||||
|
||||
if (dpp_clock_lowered) {
|
||||
@ -443,32 +444,32 @@ static struct wm_table ddr5_wm_table = {
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.72,
|
||||
.sr_exit_time_us = 9,
|
||||
.sr_enter_plus_exit_time_us = 11,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
@ -480,32 +481,32 @@ static struct wm_table lpddr5_wm_table = {
|
||||
.wm_inst = WM_A,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_B,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_C,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
{
|
||||
.wm_inst = WM_D,
|
||||
.wm_type = WM_TYPE_PSTATE_CHG,
|
||||
.pstate_latency_us = 11.65333,
|
||||
.sr_exit_time_us = 11.5,
|
||||
.sr_enter_plus_exit_time_us = 14.5,
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.valid = true,
|
||||
},
|
||||
}
|
||||
@ -515,11 +516,6 @@ static DpmClocks_t_dcn35 dummy_clocks;
|
||||
|
||||
static struct dcn35_watermarks dummy_wms = { 0 };
|
||||
|
||||
static struct dcn35_ss_info_table ss_info_table = {
|
||||
.ss_divider = 1000,
|
||||
.ss_percentage = {0, 0, 375, 375, 375}
|
||||
};
|
||||
|
||||
static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table)
|
||||
{
|
||||
int i, num_valid_sets;
|
||||
@ -653,27 +649,47 @@ static unsigned int convert_wck_ratio(uint8_t wck_ratio)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline uint32_t calc_dram_speed_mts(const MemPstateTable_t *entry)
|
||||
{
|
||||
return entry->UClk * convert_wck_ratio(entry->WckRatio) * 2;
|
||||
}
|
||||
|
||||
static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
|
||||
struct integrated_info *bios_info,
|
||||
DpmClocks_t_dcn35 *clock_table)
|
||||
{
|
||||
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
|
||||
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
|
||||
uint32_t max_pstate = 0, max_uclk = 0, max_fclk = 0;
|
||||
uint32_t min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
|
||||
uint32_t max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
|
||||
uint32_t max_pstate = 0, max_dram_speed_mts = 0, min_dram_speed_mts = 0;
|
||||
int i;
|
||||
|
||||
/* Determine min/max p-state values. */
|
||||
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
|
||||
if (is_valid_clock_value(clock_table->MemPstateTable[i].UClk) &&
|
||||
clock_table->MemPstateTable[i].UClk > max_uclk) {
|
||||
max_uclk = clock_table->MemPstateTable[i].UClk;
|
||||
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
|
||||
|
||||
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts > max_dram_speed_mts) {
|
||||
max_dram_speed_mts = dram_speed_mts;
|
||||
max_pstate = i;
|
||||
}
|
||||
}
|
||||
|
||||
/* We expect the table to contain at least one valid Uclk entry. */
|
||||
ASSERT(is_valid_clock_value(max_uclk));
|
||||
min_dram_speed_mts = max_dram_speed_mts;
|
||||
min_pstate = max_pstate;
|
||||
|
||||
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
|
||||
uint32_t dram_speed_mts = calc_dram_speed_mts(&clock_table->MemPstateTable[i]);
|
||||
|
||||
if (is_valid_clock_value(dram_speed_mts) && dram_speed_mts < min_dram_speed_mts) {
|
||||
min_dram_speed_mts = dram_speed_mts;
|
||||
min_pstate = i;
|
||||
}
|
||||
}
|
||||
|
||||
/* We expect the table to contain at least one valid P-state entry. */
|
||||
ASSERT(clock_table->NumMemPstatesEnabled &&
|
||||
is_valid_clock_value(max_dram_speed_mts) &&
|
||||
is_valid_clock_value(min_dram_speed_mts));
|
||||
|
||||
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
|
||||
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
|
||||
@ -683,47 +699,46 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
max_dppclk = find_max_clk_value(clock_table->DppClocks,
|
||||
clock_table->NumDispClkLevelsEnabled);
|
||||
} else {
|
||||
/* Invalid number of entries in the table from PMFW. */
|
||||
ASSERT(0);
|
||||
}
|
||||
if (clock_table->NumFclkLevelsEnabled <= NUM_FCLK_DPM_LEVELS)
|
||||
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq,
|
||||
clock_table->NumFclkLevelsEnabled);
|
||||
|
||||
for (i = 0; i < clock_table->NumMemPstatesEnabled; i++) {
|
||||
uint32_t min_uclk = clock_table->MemPstateTable[0].UClk;
|
||||
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
|
||||
ASSERT(clock_table->NumDcfClkLevelsEnabled > 0);
|
||||
|
||||
max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, clock_table->NumFclkLevelsEnabled);
|
||||
|
||||
for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
|
||||
int j;
|
||||
|
||||
for (j = 1; j < clock_table->NumMemPstatesEnabled; j++) {
|
||||
if (is_valid_clock_value(clock_table->MemPstateTable[j].UClk) &&
|
||||
clock_table->MemPstateTable[j].UClk < min_uclk &&
|
||||
clock_table->MemPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
|
||||
min_uclk = clock_table->MemPstateTable[j].UClk;
|
||||
min_pstate = j;
|
||||
}
|
||||
}
|
||||
|
||||
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
|
||||
for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
|
||||
if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
|
||||
break;
|
||||
break;
|
||||
|
||||
bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
|
||||
bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
|
||||
bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
|
||||
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
|
||||
|
||||
/* Now update clocks we do read */
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[min_pstate].MemClk;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[min_pstate].Voltage;
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
|
||||
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
|
||||
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
|
||||
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
|
||||
bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
|
||||
clock_table->MemPstateTable[min_pstate].WckRatio);
|
||||
}
|
||||
bw_params->clk_table.entries[i].wck_ratio =
|
||||
convert_wck_ratio(clock_table->MemPstateTable[min_pstate].WckRatio);
|
||||
|
||||
/* Dcfclk and Fclk are tied, but at a different ratio */
|
||||
bw_params->clk_table.entries[i].fclk_mhz = min(max_fclk, 2 * clock_table->DcfClocks[i]);
|
||||
}
|
||||
|
||||
/* Make sure to include at least one entry at highest pstate */
|
||||
if (max_pstate != min_pstate || i == 0) {
|
||||
if (i > MAX_NUM_DPM_LVL - 1)
|
||||
i = MAX_NUM_DPM_LVL - 1;
|
||||
|
||||
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemPstateTable[max_pstate].MemClk;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->MemPstateTable[max_pstate].Voltage;
|
||||
@ -739,6 +754,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
}
|
||||
bw_params->clk_table.num_entries = i--;
|
||||
|
||||
/* Make sure all highest clocks are included*/
|
||||
bw_params->clk_table.entries[i].socclk_mhz =
|
||||
find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
|
||||
bw_params->clk_table.entries[i].dispclk_mhz =
|
||||
@ -757,6 +773,11 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk
|
||||
bw_params->clk_table.num_entries_per_clk.num_fclk_levels = clock_table->NumFclkLevelsEnabled;
|
||||
bw_params->clk_table.num_entries_per_clk.num_memclk_levels = clock_table->NumMemPstatesEnabled;
|
||||
bw_params->clk_table.num_entries_per_clk.num_socclk_levels = clock_table->NumSocClkLevelsEnabled;
|
||||
|
||||
/*
|
||||
* Set any 0 clocks to max default setting. Not an issue for
|
||||
* power since we aren't doing switching in such case anyway
|
||||
*/
|
||||
for (i = 0; i < bw_params->clk_table.num_entries; i++) {
|
||||
if (!bw_params->clk_table.entries[i].fclk_mhz) {
|
||||
bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
|
||||
@ -965,21 +986,6 @@ struct clk_mgr_funcs dcn35_fpga_funcs = {
|
||||
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
|
||||
};
|
||||
|
||||
static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
|
||||
{
|
||||
uint32_t clock_source;
|
||||
struct dc_context *ctx = clk_mgr->base.ctx;
|
||||
|
||||
REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source);
|
||||
|
||||
clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source];
|
||||
|
||||
if (clk_mgr->dprefclk_ss_percentage != 0) {
|
||||
clk_mgr->ss_on_dprefclk = true;
|
||||
clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider;
|
||||
}
|
||||
}
|
||||
|
||||
void dcn35_clk_mgr_construct(
|
||||
struct dc_context *ctx,
|
||||
struct clk_mgr_dcn35 *clk_mgr,
|
||||
@ -1043,17 +1049,11 @@ void dcn35_clk_mgr_construct(
|
||||
dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
|
||||
|
||||
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
|
||||
clk_mgr->base.base.clks.ref_dtbclk_khz = dcn35_smu_get_dtbclk(&clk_mgr->base);
|
||||
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
|
||||
|
||||
if (!clk_mgr->base.base.clks.ref_dtbclk_khz)
|
||||
dcn35_smu_set_dtbclk(&clk_mgr->base, true);
|
||||
|
||||
clk_mgr->base.base.clks.dtbclk_en = true;
|
||||
dce_clock_read_ss_info(&clk_mgr->base);
|
||||
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
|
||||
|
||||
dcn35_read_ss_info_from_lut(&clk_mgr->base);
|
||||
|
||||
clk_mgr->base.base.bw_params = &dcn35_bw_params;
|
||||
|
||||
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
|
||||
@ -1129,7 +1129,6 @@ void dcn35_clk_mgr_construct(
|
||||
ctx->dc->debug.disable_dpp_power_gate = false;
|
||||
ctx->dc->debug.disable_hubp_power_gate = false;
|
||||
ctx->dc->debug.disable_dsc_power_gate = false;
|
||||
ctx->dc->debug.disable_hpo_power_gate = false;
|
||||
} else {
|
||||
/*let's reset the config control flag*/
|
||||
ctx->dc->config.disable_ips = DMUB_IPS_DISABLE_ALL; /*pmfw not support it, disable it all*/
|
||||
|
@ -874,6 +874,7 @@ struct dc_debug_options {
|
||||
unsigned int seamless_boot_odm_combine;
|
||||
unsigned int force_odm_combine_4to1; //bit vector based on otg inst
|
||||
int minimum_z8_residency_time;
|
||||
int minimum_z10_residency_time;
|
||||
bool disable_z9_mpc;
|
||||
unsigned int force_fclk_khz;
|
||||
bool enable_tri_buf;
|
||||
@ -1608,7 +1609,6 @@ struct dc_link {
|
||||
enum edp_revision edp_revision;
|
||||
union dpcd_sink_ext_caps dpcd_sink_ext_caps;
|
||||
|
||||
struct backlight_settings backlight_settings;
|
||||
struct psr_settings psr_settings;
|
||||
|
||||
struct replay_settings replay_settings;
|
||||
|
@ -991,10 +991,6 @@ struct link_mst_stream_allocation_table {
|
||||
struct link_mst_stream_allocation stream_allocations[MAX_CONTROLLER_NUM];
|
||||
};
|
||||
|
||||
struct backlight_settings {
|
||||
uint32_t backlight_millinits;
|
||||
};
|
||||
|
||||
/* PSR feature flags */
|
||||
struct psr_settings {
|
||||
bool psr_feature_enabled; // PSR is supported by sink
|
||||
|
@ -871,7 +871,7 @@ static const struct dc_plane_cap plane_cap = {
|
||||
static const struct dc_debug_options debug_defaults_drv = {
|
||||
.disable_z10 = false,
|
||||
.enable_z9_disable_interface = true,
|
||||
.minimum_z8_residency_time = 2000,
|
||||
.minimum_z8_residency_time = 2100,
|
||||
.psr_skip_crtc_disable = true,
|
||||
.replay_skip_crtc_disabled = true,
|
||||
.disable_dmcu = true,
|
||||
|
@ -261,6 +261,7 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
|
||||
uint32_t power_gate = power_on ? 0 : 1;
|
||||
uint32_t pwr_status = power_on ? 0 : 2;
|
||||
uint32_t org_ip_request_cntl;
|
||||
uint32_t power_forceon;
|
||||
bool block_enabled;
|
||||
|
||||
if (pg_cntl->ctx->dc->debug.ignore_pg ||
|
||||
@ -277,6 +278,10 @@ void pg_cntl35_hpo_pg_control(struct pg_cntl *pg_cntl, bool power_on)
|
||||
return;
|
||||
}
|
||||
|
||||
REG_GET(DOMAIN25_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
|
||||
if (power_forceon)
|
||||
return;
|
||||
|
||||
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
|
||||
if (org_ip_request_cntl == 0)
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
|
||||
@ -304,6 +309,7 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
|
||||
uint32_t power_gate = power_on ? 0 : 1;
|
||||
uint32_t pwr_status = power_on ? 0 : 2;
|
||||
uint32_t org_ip_request_cntl;
|
||||
uint32_t power_forceon;
|
||||
bool block_enabled;
|
||||
|
||||
if (pg_cntl->ctx->dc->debug.ignore_pg ||
|
||||
@ -319,6 +325,10 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
|
||||
return;
|
||||
}
|
||||
|
||||
REG_GET(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, &power_forceon);
|
||||
if (power_forceon)
|
||||
return;
|
||||
|
||||
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
|
||||
if (org_ip_request_cntl == 0)
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
|
||||
|
@ -1712,6 +1712,13 @@ static bool dcn35_validate_bandwidth(struct dc *dc,
|
||||
|
||||
out = dml2_validate(dc, context, fast_validate);
|
||||
|
||||
if (fast_validate)
|
||||
return out;
|
||||
|
||||
DC_FP_START();
|
||||
dcn35_decide_zstate_support(dc, context);
|
||||
DC_FP_END();
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
@ -1857,7 +1864,7 @@ static bool dcn35_resource_construct(
|
||||
|
||||
/* Use pipe context based otg sync logic */
|
||||
dc->config.use_pipe_ctx_sync_logic = true;
|
||||
dc->config.use_default_clock_table = false;
|
||||
|
||||
/* read VBIOS LTTPR caps */
|
||||
{
|
||||
if (ctx->dc_bios->funcs->get_lttpr_caps) {
|
||||
|
@ -36,7 +36,7 @@
|
||||
* Define the maximum amount of states supported by the ASIC. Every ASIC has a
|
||||
* specific number of states; this macro defines the maximum number of states.
|
||||
*/
|
||||
#define DC__VOLTAGE_STATES 20
|
||||
#define DC__VOLTAGE_STATES 40
|
||||
#define DC__NUM_DPP__4 1
|
||||
#define DC__NUM_DPP__0_PRESENT 1
|
||||
#define DC__NUM_DPP__1_PRESENT 1
|
||||
|
@ -950,10 +950,8 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
|
||||
{
|
||||
int plane_count;
|
||||
int i;
|
||||
unsigned int min_dst_y_next_start_us;
|
||||
|
||||
plane_count = 0;
|
||||
min_dst_y_next_start_us = 0;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].plane_state)
|
||||
plane_count++;
|
||||
@ -975,26 +973,15 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc
|
||||
else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
|
||||
struct dc_link *link = context->streams[0]->sink->link;
|
||||
struct dc_stream_status *stream_status = &context->stream_status[0];
|
||||
struct dc_stream_state *current_stream = context->streams[0];
|
||||
int minmum_z8_residency = dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
|
||||
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
|
||||
bool is_pwrseq0 = link->link_index == 0;
|
||||
bool isFreesyncVideo;
|
||||
|
||||
isFreesyncVideo = current_stream->adjust.v_total_min == current_stream->adjust.v_total_max;
|
||||
isFreesyncVideo = isFreesyncVideo && current_stream->timing.v_total < current_stream->adjust.v_total_min;
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].stream == current_stream && isFreesyncVideo) {
|
||||
min_dst_y_next_start_us = context->res_ctx.pipe_ctx[i].dlg_regs.min_dst_y_next_start_us;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Don't support multi-plane configurations */
|
||||
if (stream_status->plane_count > 1)
|
||||
return DCN_ZSTATE_SUPPORT_DISALLOW;
|
||||
|
||||
if (is_pwrseq0 && (context->bw_ctx.dml.vba.StutterPeriod > 5000.0 || min_dst_y_next_start_us > 5000))
|
||||
if (is_pwrseq0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0)
|
||||
return DCN_ZSTATE_SUPPORT_ALLOW;
|
||||
else if (is_pwrseq0 && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr)
|
||||
return allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
|
||||
|
@ -1192,13 +1192,16 @@ static bool update_pipe_slice_table_with_split_flags(
|
||||
*/
|
||||
struct pipe_ctx *pipe;
|
||||
bool odm;
|
||||
int i;
|
||||
int dc_pipe_idx, dml_pipe_idx = 0;
|
||||
bool updated = false;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[i];
|
||||
for (dc_pipe_idx = 0;
|
||||
dc_pipe_idx < dc->res_pool->pipe_count; dc_pipe_idx++) {
|
||||
pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
|
||||
if (resource_is_pipe_type(pipe, FREE_PIPE))
|
||||
continue;
|
||||
|
||||
if (merge[i]) {
|
||||
if (merge[dc_pipe_idx]) {
|
||||
if (resource_is_pipe_type(pipe, OPP_HEAD))
|
||||
/* merging OPP head means reducing ODM slice
|
||||
* count by 1
|
||||
@ -1213,17 +1216,18 @@ static bool update_pipe_slice_table_with_split_flags(
|
||||
updated = true;
|
||||
}
|
||||
|
||||
if (split[i]) {
|
||||
odm = vba->ODMCombineEnabled[vba->pipe_plane[i]] !=
|
||||
if (split[dc_pipe_idx]) {
|
||||
odm = vba->ODMCombineEnabled[vba->pipe_plane[dml_pipe_idx]] !=
|
||||
dm_odm_combine_mode_disabled;
|
||||
if (odm && resource_is_pipe_type(pipe, OPP_HEAD))
|
||||
update_slice_table_for_stream(
|
||||
table, pipe->stream, split[i] - 1);
|
||||
table, pipe->stream, split[dc_pipe_idx] - 1);
|
||||
else if (!odm && resource_is_pipe_type(pipe, DPP_PIPE))
|
||||
update_slice_table_for_plane(table, pipe,
|
||||
pipe->plane_state, split[i] - 1);
|
||||
pipe->plane_state, split[dc_pipe_idx] - 1);
|
||||
updated = true;
|
||||
}
|
||||
dml_pipe_idx++;
|
||||
}
|
||||
return updated;
|
||||
}
|
||||
@ -2231,6 +2235,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
|
||||
int i, pipe_idx, vlevel_temp = 0;
|
||||
double dcfclk = dcn3_2_soc.clock_limits[0].dcfclk_mhz;
|
||||
double dcfclk_from_validation = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb];
|
||||
double dram_speed_from_validation = context->bw_ctx.dml.vba.DRAMSpeed;
|
||||
double dcfclk_from_fw_based_mclk_switching = dcfclk_from_validation;
|
||||
bool pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] !=
|
||||
dm_dram_clock_change_unsupported;
|
||||
@ -2418,7 +2423,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context,
|
||||
}
|
||||
|
||||
if (dc->clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid) {
|
||||
min_dram_speed_mts = context->bw_ctx.dml.vba.DRAMSpeed;
|
||||
min_dram_speed_mts = dram_speed_from_validation;
|
||||
min_dram_speed_mts_margin = 160;
|
||||
|
||||
context->bw_ctx.dml.soc.dram_clock_change_latency_us =
|
||||
|
@ -164,10 +164,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
|
||||
},
|
||||
},
|
||||
.num_states = 5,
|
||||
.sr_exit_time_us = 9.0,
|
||||
.sr_enter_plus_exit_time_us = 11.0,
|
||||
.sr_exit_z8_time_us = 50.0, /*changed from 442.0*/
|
||||
.sr_enter_plus_exit_z8_time_us = 50.0,/*changed from 560.0*/
|
||||
.sr_exit_time_us = 14.0,
|
||||
.sr_enter_plus_exit_time_us = 16.0,
|
||||
.sr_exit_z8_time_us = 525.0,
|
||||
.sr_enter_plus_exit_z8_time_us = 715.0,
|
||||
.fclk_change_latency_us = 20.0,
|
||||
.usr_retraining_latency_us = 2,
|
||||
.writeback_latency_us = 12.0,
|
||||
@ -329,6 +329,48 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
|
||||
/*temp till dml2 fully work without dml1*/
|
||||
dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip,
|
||||
DML_PROJECT_DCN31);
|
||||
|
||||
/*copy to dml2, before dml2_create*/
|
||||
if (clk_table->num_entries > 2) {
|
||||
|
||||
for (i = 0; i < clk_table->num_entries; i++) {
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_states =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz =
|
||||
clock_limits[i].dcfclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz =
|
||||
clock_limits[i].fabricclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz =
|
||||
clock_limits[i].dispclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz =
|
||||
clock_limits[i].dppclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz =
|
||||
clock_limits[i].socclk_mhz;
|
||||
dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz =
|
||||
clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels =
|
||||
clk_table->num_entries;
|
||||
dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels =
|
||||
clk_table->num_entries;
|
||||
}
|
||||
}
|
||||
|
||||
/* Update latency values */
|
||||
dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = dcn3_5_soc.dram_clock_change_latency_us;
|
||||
|
||||
dc->dml2_options.bbox_overrides.sr_exit_latency_us = dcn3_5_soc.sr_exit_time_us;
|
||||
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = dcn3_5_soc.sr_enter_plus_exit_time_us;
|
||||
|
||||
dc->dml2_options.bbox_overrides.sr_exit_z8_time_us = dcn3_5_soc.sr_exit_z8_time_us;
|
||||
dc->dml2_options.bbox_overrides.sr_enter_plus_exit_z8_time_us = dcn3_5_soc.sr_enter_plus_exit_z8_time_us;
|
||||
}
|
||||
|
||||
static bool is_dual_plane(enum surface_pixel_format format)
|
||||
@ -507,3 +549,37 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
|
||||
|
||||
return pipe_cnt;
|
||||
}
|
||||
|
||||
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context)
|
||||
{
|
||||
enum dcn_zstate_support_state support = DCN_ZSTATE_SUPPORT_DISALLOW;
|
||||
unsigned int i, plane_count = 0;
|
||||
|
||||
for (i = 0; i < dc->res_pool->pipe_count; i++) {
|
||||
if (context->res_ctx.pipe_ctx[i].plane_state)
|
||||
plane_count++;
|
||||
}
|
||||
|
||||
if (plane_count == 0) {
|
||||
support = DCN_ZSTATE_SUPPORT_ALLOW;
|
||||
} else if (plane_count == 1 && context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) {
|
||||
struct dc_link *link = context->streams[0]->sink->link;
|
||||
bool is_pwrseq0 = link && link->link_index == 0;
|
||||
bool is_psr1 = link && link->psr_settings.psr_version == DC_PSR_VERSION_1 && !link->panel_config.psr.disable_psr;
|
||||
int minmum_z8_residency =
|
||||
dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000;
|
||||
bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency;
|
||||
int minmum_z10_residency =
|
||||
dc->debug.minimum_z10_residency_time > 0 ? dc->debug.minimum_z10_residency_time : 5000;
|
||||
bool allow_z10 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z10_residency;
|
||||
|
||||
if (is_pwrseq0 && allow_z10)
|
||||
support = DCN_ZSTATE_SUPPORT_ALLOW;
|
||||
else if (is_pwrseq0 && is_psr1)
|
||||
support = allow_z8 ? DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY : DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY;
|
||||
else if (allow_z8)
|
||||
support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY;
|
||||
}
|
||||
|
||||
context->bw_ctx.bw.dcn.clk.zstate_support = support;
|
||||
}
|
||||
|
@ -39,4 +39,6 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc,
|
||||
display_e2e_pipe_params_st *pipes,
|
||||
bool fast_validate);
|
||||
|
||||
void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context);
|
||||
|
||||
#endif
|
||||
|
@ -6329,7 +6329,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
|
||||
mode_lib->ms.NoOfDPPThisState,
|
||||
mode_lib->ms.dpte_group_bytes,
|
||||
s->HostVMInefficiencyFactor,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
|
||||
|
||||
s->NextMaxVStartup = s->MaxVStartupAllPlanes[j];
|
||||
@ -6542,7 +6542,7 @@ static void dml_prefetch_check(struct display_mode_lib_st *mode_lib)
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
|
||||
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k],
|
||||
mode_lib->ms.MetaRowBytes[j][k],
|
||||
mode_lib->ms.DPTEBytesPerRow[j][k],
|
||||
@ -7687,7 +7687,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
|
||||
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
|
||||
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = mode_lib->ms.PTEBufferSizeNotExceededPerState;
|
||||
@ -7957,7 +7957,7 @@ dml_bool_t dml_core_mode_support(struct display_mode_lib_st *mode_lib)
|
||||
UseMinimumDCFCLK_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
|
||||
UseMinimumDCFCLK_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
|
||||
UseMinimumDCFCLK_params->NumberOfActiveSurfaces = mode_lib->ms.num_active_planes;
|
||||
UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
UseMinimumDCFCLK_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
UseMinimumDCFCLK_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
UseMinimumDCFCLK_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
|
||||
UseMinimumDCFCLK_params->ImmediateFlipRequirement = s->ImmediateFlipRequiredFinal;
|
||||
@ -8699,7 +8699,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
||||
CalculateVMRowAndSwath_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMaxPageTableLevels = mode_lib->ms.cache_display_cfg.plane.GPUVMMaxPageTableLevels;
|
||||
CalculateVMRowAndSwath_params->GPUVMMinPageSizeKBytes = mode_lib->ms.cache_display_cfg.plane.GPUVMMinPageSizeKBytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
CalculateVMRowAndSwath_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideEn = mode_lib->ms.cache_display_cfg.plane.PTEBufferModeOverrideEn;
|
||||
CalculateVMRowAndSwath_params->PTEBufferModeOverrideVal = mode_lib->ms.cache_display_cfg.plane.PTEBufferMode;
|
||||
CalculateVMRowAndSwath_params->PTEBufferSizeNotExceeded = s->dummy_boolean_array[0];
|
||||
@ -8805,7 +8805,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
||||
mode_lib->ms.cache_display_cfg.hw.DPPPerSurface,
|
||||
locals->dpte_group_bytes,
|
||||
s->HostVMInefficiencyFactor,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels);
|
||||
|
||||
locals->TCalc = 24.0 / locals->DCFCLKDeepSleep;
|
||||
@ -8995,7 +8995,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
||||
CalculatePrefetchSchedule_params->GPUVMEnable = mode_lib->ms.cache_display_cfg.plane.GPUVMEnable;
|
||||
CalculatePrefetchSchedule_params->HostVMEnable = mode_lib->ms.cache_display_cfg.plane.HostVMEnable;
|
||||
CalculatePrefetchSchedule_params->HostVMMaxNonCachedPageTableLevels = mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels;
|
||||
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes;
|
||||
CalculatePrefetchSchedule_params->HostVMMinPageSize = mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024;
|
||||
CalculatePrefetchSchedule_params->DynamicMetadataEnable = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataEnable[k];
|
||||
CalculatePrefetchSchedule_params->DynamicMetadataVMEnabled = mode_lib->ms.ip.dynamic_metadata_vm_enabled;
|
||||
CalculatePrefetchSchedule_params->DynamicMetadataLinesBeforeActiveRequired = mode_lib->ms.cache_display_cfg.plane.DynamicMetadataLinesBeforeActiveRequired[k];
|
||||
@ -9240,7 +9240,7 @@ void dml_core_mode_programming(struct display_mode_lib_st *mode_lib, const struc
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMEnable,
|
||||
mode_lib->ms.cache_display_cfg.plane.HostVMMaxPageTableLevels,
|
||||
mode_lib->ms.cache_display_cfg.plane.GPUVMEnable,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes,
|
||||
mode_lib->ms.soc.hostvm_min_page_size_kbytes * 1024,
|
||||
locals->PDEAndMetaPTEBytesFrame[k],
|
||||
locals->MetaRowByte[k],
|
||||
locals->PixelPTEBytesPerRow[k],
|
||||
|
@ -341,25 +341,42 @@ void dml2_init_soc_states(struct dml2_context *dml2, const struct dc *in_dc,
|
||||
break;
|
||||
}
|
||||
|
||||
/* Override from passed values, mainly for debugging purposes, if available */
|
||||
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
|
||||
p->in_states->state_array[0].sr_exit_time_us = dml2->config.bbox_overrides.sr_exit_latency_us;
|
||||
}
|
||||
/* Override from passed values, if available */
|
||||
for (i = 0; i < p->in_states->num_states; i++) {
|
||||
if (dml2->config.bbox_overrides.sr_exit_latency_us) {
|
||||
p->in_states->state_array[i].sr_exit_time_us =
|
||||
dml2->config.bbox_overrides.sr_exit_latency_us;
|
||||
}
|
||||
|
||||
if (dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us) {
|
||||
p->in_states->state_array[0].sr_enter_plus_exit_time_us = dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us;
|
||||
}
|
||||
if (dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us) {
|
||||
p->in_states->state_array[i].sr_enter_plus_exit_time_us =
|
||||
dml2->config.bbox_overrides.sr_enter_plus_exit_latency_us;
|
||||
}
|
||||
|
||||
if (dml2->config.bbox_overrides.urgent_latency_us) {
|
||||
p->in_states->state_array[0].urgent_latency_pixel_data_only_us = dml2->config.bbox_overrides.urgent_latency_us;
|
||||
}
|
||||
if (dml2->config.bbox_overrides.sr_exit_z8_time_us) {
|
||||
p->in_states->state_array[i].sr_exit_z8_time_us =
|
||||
dml2->config.bbox_overrides.sr_exit_z8_time_us;
|
||||
}
|
||||
|
||||
if (dml2->config.bbox_overrides.dram_clock_change_latency_us) {
|
||||
p->in_states->state_array[0].dram_clock_change_latency_us = dml2->config.bbox_overrides.dram_clock_change_latency_us;
|
||||
}
|
||||
if (dml2->config.bbox_overrides.sr_enter_plus_exit_z8_time_us) {
|
||||
p->in_states->state_array[i].sr_enter_plus_exit_z8_time_us =
|
||||
dml2->config.bbox_overrides.sr_enter_plus_exit_z8_time_us;
|
||||
}
|
||||
|
||||
if (dml2->config.bbox_overrides.fclk_change_latency_us) {
|
||||
p->in_states->state_array[0].fclk_change_latency_us = dml2->config.bbox_overrides.fclk_change_latency_us;
|
||||
if (dml2->config.bbox_overrides.urgent_latency_us) {
|
||||
p->in_states->state_array[i].urgent_latency_pixel_data_only_us =
|
||||
dml2->config.bbox_overrides.urgent_latency_us;
|
||||
}
|
||||
|
||||
if (dml2->config.bbox_overrides.dram_clock_change_latency_us) {
|
||||
p->in_states->state_array[i].dram_clock_change_latency_us =
|
||||
dml2->config.bbox_overrides.dram_clock_change_latency_us;
|
||||
}
|
||||
|
||||
if (dml2->config.bbox_overrides.fclk_change_latency_us) {
|
||||
p->in_states->state_array[i].fclk_change_latency_us =
|
||||
dml2->config.bbox_overrides.fclk_change_latency_us;
|
||||
}
|
||||
}
|
||||
|
||||
/* DCFCLK stas values are project specific */
|
||||
@ -498,8 +515,8 @@ void dml2_translate_socbb_params(const struct dc *in, struct soc_bounding_box_st
|
||||
out->do_urgent_latency_adjustment = in_soc_params->do_urgent_latency_adjustment;
|
||||
out->dram_channel_width_bytes = (dml_uint_t)in_soc_params->dram_channel_width_bytes;
|
||||
out->fabric_datapath_to_dcn_data_return_bytes = (dml_uint_t)in_soc_params->fabric_datapath_to_dcn_data_return_bytes;
|
||||
out->gpuvm_min_page_size_kbytes = in_soc_params->gpuvm_min_page_size_bytes * 1024;
|
||||
out->hostvm_min_page_size_kbytes = in_soc_params->hostvm_min_page_size_bytes * 1024;
|
||||
out->gpuvm_min_page_size_kbytes = in_soc_params->gpuvm_min_page_size_bytes / 1024;
|
||||
out->hostvm_min_page_size_kbytes = in_soc_params->hostvm_min_page_size_bytes / 1024;
|
||||
out->mall_allocated_for_dcn_mbytes = (dml_uint_t)in_soc_params->mall_allocated_for_dcn_mbytes;
|
||||
out->max_avg_dram_bw_use_normal_percent = in_soc_params->max_avg_dram_bw_use_normal_percent;
|
||||
out->max_avg_fabric_bw_use_normal_percent = in_soc_params->max_avg_fabric_bw_use_normal_percent;
|
||||
@ -1040,9 +1057,12 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
|
||||
}
|
||||
|
||||
//Generally these are set by referencing our latest BB/IP params in dcn32_resource.c file
|
||||
dml_dispcfg->plane.GPUVMEnable = true;
|
||||
dml_dispcfg->plane.GPUVMMaxPageTableLevels = 4;
|
||||
dml_dispcfg->plane.HostVMEnable = false;
|
||||
dml_dispcfg->plane.GPUVMEnable = dml2->v20.dml_core_ctx.ip.gpuvm_enable;
|
||||
dml_dispcfg->plane.GPUVMMaxPageTableLevels = dml2->v20.dml_core_ctx.ip.gpuvm_max_page_table_levels;
|
||||
dml_dispcfg->plane.HostVMEnable = dml2->v20.dml_core_ctx.ip.hostvm_enable;
|
||||
dml_dispcfg->plane.HostVMMaxPageTableLevels = dml2->v20.dml_core_ctx.ip.hostvm_max_page_table_levels;
|
||||
if (dml2->v20.dml_core_ctx.ip.hostvm_enable)
|
||||
dml2->v20.dml_core_ctx.policy.AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter;
|
||||
|
||||
dml2_populate_pipe_to_plane_index_mapping(dml2, context);
|
||||
|
||||
|
@ -139,6 +139,8 @@ struct dml2_soc_bbox_overrides {
|
||||
double urgent_latency_us;
|
||||
double sr_exit_latency_us;
|
||||
double sr_enter_plus_exit_latency_us;
|
||||
double sr_exit_z8_time_us;
|
||||
double sr_enter_plus_exit_z8_time_us;
|
||||
double dram_clock_change_latency_us;
|
||||
double fclk_change_latency_us;
|
||||
unsigned int dram_num_chan;
|
||||
|
@ -487,8 +487,7 @@ bool dcn32_set_mcm_luts(
|
||||
if (plane_state->blend_tf->type == TF_TYPE_HWPWL)
|
||||
lut_params = &plane_state->blend_tf->pwl;
|
||||
else if (plane_state->blend_tf->type == TF_TYPE_DISTRIBUTED_POINTS) {
|
||||
cm_helper_translate_curve_to_hw_format(plane_state->ctx,
|
||||
plane_state->blend_tf,
|
||||
cm3_helper_translate_curve_to_hw_format(plane_state->blend_tf,
|
||||
&dpp_base->regamma_params, false);
|
||||
lut_params = &dpp_base->regamma_params;
|
||||
}
|
||||
@ -503,8 +502,7 @@ bool dcn32_set_mcm_luts(
|
||||
else if (plane_state->in_shaper_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
|
||||
// TODO: dpp_base replace
|
||||
ASSERT(false);
|
||||
cm_helper_translate_curve_to_hw_format(plane_state->ctx,
|
||||
plane_state->in_shaper_func,
|
||||
cm3_helper_translate_curve_to_hw_format(plane_state->in_shaper_func,
|
||||
&dpp_base->shaper_params, true);
|
||||
lut_params = &dpp_base->shaper_params;
|
||||
}
|
||||
|
@ -879,7 +879,7 @@ static bool detect_link_and_local_sink(struct dc_link *link,
|
||||
(link->dpcd_sink_ext_caps.bits.oled == 1)) {
|
||||
dpcd_set_source_specific_data(link);
|
||||
msleep(post_oui_delay);
|
||||
set_cached_brightness_aux(link);
|
||||
set_default_brightness_aux(link);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
@ -2142,8 +2142,7 @@ static enum dc_status enable_link_dp(struct dc_state *state,
|
||||
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
|
||||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
|
||||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
|
||||
set_cached_brightness_aux(link);
|
||||
|
||||
set_default_brightness_aux(link);
|
||||
if (link->dpcd_sink_ext_caps.bits.oled == 1)
|
||||
msleep(bl_oled_enable_delay);
|
||||
edp_backlight_enable_aux(link, true);
|
||||
|
@ -115,7 +115,7 @@ static enum link_training_result perform_fixed_vs_pe_nontransparent_training_seq
|
||||
lt_settings->cr_pattern_time = 16000;
|
||||
|
||||
/* Fixed VS/PE specific: Toggle link rate */
|
||||
apply_toggle_rate_wa = (link->vendor_specific_lttpr_link_rate_wa == target_rate);
|
||||
apply_toggle_rate_wa = ((link->vendor_specific_lttpr_link_rate_wa == target_rate) || (link->vendor_specific_lttpr_link_rate_wa == 0));
|
||||
target_rate = get_dpcd_link_rate(<_settings->link_settings);
|
||||
toggle_rate = (target_rate == 0x6) ? 0xA : 0x6;
|
||||
|
||||
@ -271,7 +271,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
|
||||
/* Vendor specific: Toggle link rate */
|
||||
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
|
||||
|
||||
if (link->vendor_specific_lttpr_link_rate_wa == rate) {
|
||||
if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
DP_LINK_BW_SET,
|
||||
@ -617,7 +617,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
|
||||
/* Vendor specific: Toggle link rate */
|
||||
toggle_rate = (rate == 0x6) ? 0xA : 0x6;
|
||||
|
||||
if (link->vendor_specific_lttpr_link_rate_wa == rate) {
|
||||
if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) {
|
||||
core_link_write_dpcd(
|
||||
link,
|
||||
DP_LINK_BW_SET,
|
||||
|
@ -170,7 +170,6 @@ bool edp_set_backlight_level_nits(struct dc_link *link,
|
||||
*(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
|
||||
*(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
|
||||
|
||||
link->backlight_settings.backlight_millinits = backlight_millinits;
|
||||
|
||||
if (!link->dpcd_caps.panel_luminance_control) {
|
||||
if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
|
||||
@ -288,9 +287,9 @@ bool set_default_brightness_aux(struct dc_link *link)
|
||||
if (link && link->dpcd_sink_ext_caps.bits.oled == 1) {
|
||||
if (!read_default_bl_aux(link, &default_backlight))
|
||||
default_backlight = 150000;
|
||||
// if < 1 nits or > 5000, it might be wrong readback
|
||||
if (default_backlight < 1000 || default_backlight > 5000000)
|
||||
default_backlight = 150000; //
|
||||
// if > 5000, it might be wrong readback
|
||||
if (default_backlight > 5000000)
|
||||
default_backlight = 150000;
|
||||
|
||||
return edp_set_backlight_level_nits(link, true,
|
||||
default_backlight, 0);
|
||||
@ -298,15 +297,6 @@ bool set_default_brightness_aux(struct dc_link *link)
|
||||
return false;
|
||||
}
|
||||
|
||||
bool set_cached_brightness_aux(struct dc_link *link)
|
||||
{
|
||||
if (link->backlight_settings.backlight_millinits)
|
||||
return edp_set_backlight_level_nits(link, true,
|
||||
link->backlight_settings.backlight_millinits, 0);
|
||||
else
|
||||
return set_default_brightness_aux(link);
|
||||
return false;
|
||||
}
|
||||
bool edp_is_ilr_optimization_enabled(struct dc_link *link)
|
||||
{
|
||||
if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate)
|
||||
|
@ -30,7 +30,6 @@
|
||||
enum dp_panel_mode dp_get_panel_mode(struct dc_link *link);
|
||||
void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
|
||||
bool set_default_brightness_aux(struct dc_link *link);
|
||||
bool set_cached_brightness_aux(struct dc_link *link);
|
||||
void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd);
|
||||
int edp_get_backlight_level(const struct dc_link *link);
|
||||
bool edp_get_backlight_level_nits(struct dc_link *link,
|
||||
|
@ -1077,6 +1077,7 @@ enum dmub_status dmub_srv_wait_for_inbox0_ack(struct dmub_srv *dmub, uint32_t ti
|
||||
ack = dmub->hw_funcs.read_inbox0_ack_register(dmub);
|
||||
if (ack)
|
||||
return DMUB_STATUS_OK;
|
||||
udelay(1);
|
||||
}
|
||||
return DMUB_STATUS_TIMEOUT;
|
||||
}
|
||||
|
@ -6369,6 +6369,8 @@
|
||||
#define regTCP_INVALIDATE_BASE_IDX 1
|
||||
#define regTCP_STATUS 0x19a1
|
||||
#define regTCP_STATUS_BASE_IDX 1
|
||||
#define regTCP_CNTL 0x19a2
|
||||
#define regTCP_CNTL_BASE_IDX 1
|
||||
#define regTCP_CNTL2 0x19a3
|
||||
#define regTCP_CNTL2_BASE_IDX 1
|
||||
#define regTCP_DEBUG_INDEX 0x19a5
|
||||
|
@ -781,6 +781,8 @@
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_CNTL2_BASE_IDX 5
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1 0x420187
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1_BASE_IDX 5
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3 0x4201c6
|
||||
#define regBIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3_BASE_IDX 5
|
||||
|
||||
|
||||
// addressBlock: nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
|
||||
|
@ -24646,6 +24646,35 @@
|
||||
//BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK 0x00000001L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK 0x00000008L
|
||||
//BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x8
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x9
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_10BIT_TAG_EN_OVERRIDE__SHIFT 0xb
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_10BIT_TAG_EN_OVERRIDE__SHIFT 0xd
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__MST_DROP_SYNC_FLOOD_EN__SHIFT 0xf
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_PAYLOAD_SIZE_MODE__SHIFT 0x10
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_PAYLOAD_SIZE__SHIFT 0x11
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x14
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_READ_REQUEST_SIZE__SHIFT 0x15
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_SAFE_MODE__SHIFT 0x18
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x19
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE__SHIFT 0x1b
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV__SHIFT 0x1c
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE__SHIFT 0x1e
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_PAYLOAD_SIZE_MODE_MASK 0x00000100L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_PRIV_MAX_PAYLOAD_SIZE_MASK 0x00000600L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_10BIT_TAG_EN_OVERRIDE_MASK 0x00001800L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_10BIT_TAG_EN_OVERRIDE_MASK 0x00006000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__MST_DROP_SYNC_FLOOD_EN_MASK 0x00008000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_PAYLOAD_SIZE_MODE_MASK 0x00010000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_PAYLOAD_SIZE_MASK 0x000E0000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_REQUEST_SIZE_MODE_MASK 0x00100000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_PRIV_MAX_READ_REQUEST_SIZE_MASK 0x00E00000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_MAX_READ_SAFE_MODE_MASK 0x01000000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_EXTENDED_TAG_EN_OVERRIDE_MASK 0x06000000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_MODE_MASK 0x08000000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_MAX_READ_REQUEST_SIZE_PRIV_MASK 0x30000000L
|
||||
#define BIF_BIF256_CI256_RC3X4_USB4_PCIE_MST_CTRL_3__CI_SWUS_EXTENDED_TAG_EN_OVERRIDE_MASK 0xC0000000L
|
||||
|
||||
// addressBlock: nbio_nbif0_bif_cfg_dev0_rc_bifcfgdecp
|
||||
//BIF_CFG_DEV0_RC0_VENDOR_ID
|
||||
|
@ -444,6 +444,7 @@ struct amd_pm_funcs {
|
||||
struct dpm_clocks *clock_table);
|
||||
int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size);
|
||||
void (*pm_compute_clocks)(void *handle);
|
||||
int (*notify_rlc_state)(void *handle, bool en);
|
||||
};
|
||||
|
||||
struct metrics_table_header {
|
||||
|
@ -181,6 +181,24 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en)
|
||||
{
|
||||
int ret = 0;
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
||||
if (pp_funcs && pp_funcs->notify_rlc_state) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
ret = pp_funcs->notify_rlc_state(
|
||||
adev->powerplay.pp_handle,
|
||||
en);
|
||||
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
|
||||
{
|
||||
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
|
||||
|
@ -415,6 +415,8 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
|
||||
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
|
||||
enum pp_mp1_state mp1_state);
|
||||
|
||||
int amdgpu_dpm_notify_rlc_state(struct amdgpu_device *adev, bool en);
|
||||
|
||||
int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
|
||||
|
@ -1710,6 +1710,16 @@ static int smu_disable_dpms(struct smu_context *smu)
|
||||
}
|
||||
}
|
||||
|
||||
/* Notify SMU RLC is going to be off, stop RLC and SMU interaction.
|
||||
* otherwise SMU will hang while interacting with RLC if RLC is halted
|
||||
* this is a WA for Vangogh asic which fix the SMU hang issue.
|
||||
*/
|
||||
ret = smu_notify_rlc_state(smu, false);
|
||||
if (ret) {
|
||||
dev_err(adev->dev, "Fail to notify rlc status!\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(9, 4, 2) &&
|
||||
!((adev->flags & AMD_IS_APU) && adev->gfx.imu.funcs) &&
|
||||
!amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs->stop)
|
||||
|
@ -1360,6 +1360,11 @@ struct pptable_funcs {
|
||||
* management.
|
||||
*/
|
||||
int (*dpm_set_umsch_mm_enable)(struct smu_context *smu, bool enable);
|
||||
|
||||
/**
|
||||
* @notify_rlc_state: Notify RLC power state to SMU.
|
||||
*/
|
||||
int (*notify_rlc_state)(struct smu_context *smu, bool en);
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
|
@ -2193,8 +2193,7 @@ static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clock
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int vangogh_system_features_control(struct smu_context *smu, bool en)
|
||||
static int vangogh_notify_rlc_state(struct smu_context *smu, bool en)
|
||||
{
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
int ret = 0;
|
||||
@ -2523,7 +2522,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
|
||||
.print_clk_levels = vangogh_common_print_clk_levels,
|
||||
.set_default_dpm_table = vangogh_set_default_dpm_tables,
|
||||
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
|
||||
.system_features_control = vangogh_system_features_control,
|
||||
.notify_rlc_state = vangogh_notify_rlc_state,
|
||||
.feature_is_enabled = smu_cmn_feature_is_enabled,
|
||||
.set_power_profile_mode = vangogh_set_power_profile_mode,
|
||||
.get_power_profile_mode = vangogh_get_power_profile_mode,
|
||||
|
@ -257,8 +257,11 @@ static int aldebaran_tables_init(struct smu_context *smu)
|
||||
}
|
||||
|
||||
smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
|
||||
if (!smu_table->ecc_table)
|
||||
if (!smu_table->ecc_table) {
|
||||
kfree(smu_table->metrics_table);
|
||||
kfree(smu_table->gpu_metrics_table);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -97,6 +97,7 @@
|
||||
#define smu_get_default_config_table_settings(smu, config_table) smu_ppt_funcs(get_default_config_table_settings, -EOPNOTSUPP, smu, config_table)
|
||||
#define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table)
|
||||
#define smu_init_pptable_microcode(smu) smu_ppt_funcs(init_pptable_microcode, 0, smu)
|
||||
#define smu_notify_rlc_state(smu, en) smu_ppt_funcs(notify_rlc_state, 0, smu, en)
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
@ -4,8 +4,6 @@
|
||||
* Copyright (C) 2017 Broadcom
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_connector.h>
|
||||
@ -21,7 +19,6 @@ struct panel_bridge {
|
||||
struct drm_bridge bridge;
|
||||
struct drm_connector connector;
|
||||
struct drm_panel *panel;
|
||||
struct device_link *link;
|
||||
u32 connector_type;
|
||||
};
|
||||
|
||||
@ -63,24 +60,13 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
|
||||
{
|
||||
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
|
||||
struct drm_connector *connector = &panel_bridge->connector;
|
||||
struct drm_panel *panel = panel_bridge->panel;
|
||||
struct drm_device *drm_dev = bridge->dev;
|
||||
int ret;
|
||||
|
||||
panel_bridge->link = device_link_add(drm_dev->dev, panel->dev,
|
||||
DL_FLAG_STATELESS);
|
||||
if (!panel_bridge->link) {
|
||||
DRM_ERROR("Failed to add device link between %s and %s\n",
|
||||
dev_name(drm_dev->dev), dev_name(panel->dev));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
|
||||
return 0;
|
||||
|
||||
if (!bridge->encoder) {
|
||||
DRM_ERROR("Missing encoder\n");
|
||||
device_link_del(panel_bridge->link);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@ -92,7 +78,6 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
|
||||
panel_bridge->connector_type);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to initialize connector\n");
|
||||
device_link_del(panel_bridge->link);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -115,8 +100,6 @@ static void panel_bridge_detach(struct drm_bridge *bridge)
|
||||
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
|
||||
struct drm_connector *connector = &panel_bridge->connector;
|
||||
|
||||
device_link_del(panel_bridge->link);
|
||||
|
||||
/*
|
||||
* Cleanup the connector if we know it was initialized.
|
||||
*
|
||||
|
@ -1,4 +1,4 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
// SPDX-License-Identifier: GPL-2.0-only OR MIT
|
||||
/*
|
||||
* Copyright (c) 2022 Red Hat.
|
||||
*
|
||||
|
@ -278,7 +278,7 @@ void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_dmabuf_release);
|
||||
|
||||
/*
|
||||
/**
|
||||
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
|
||||
* @dev: drm_device to import into
|
||||
* @file_priv: drm file-private structure
|
||||
@ -292,9 +292,9 @@ EXPORT_SYMBOL(drm_gem_dmabuf_release);
|
||||
*
|
||||
* Returns 0 on success or a negative error code on failure.
|
||||
*/
|
||||
static int drm_gem_prime_fd_to_handle(struct drm_device *dev,
|
||||
struct drm_file *file_priv, int prime_fd,
|
||||
uint32_t *handle)
|
||||
int drm_gem_prime_fd_to_handle(struct drm_device *dev,
|
||||
struct drm_file *file_priv, int prime_fd,
|
||||
uint32_t *handle)
|
||||
{
|
||||
struct dma_buf *dma_buf;
|
||||
struct drm_gem_object *obj;
|
||||
@ -360,6 +360,7 @@ out_put:
|
||||
dma_buf_put(dma_buf);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
|
||||
|
||||
int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
@ -408,7 +409,7 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
|
||||
return dmabuf;
|
||||
}
|
||||
|
||||
/*
|
||||
/**
|
||||
* drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
|
||||
* @dev: dev to export the buffer from
|
||||
* @file_priv: drm file-private structure
|
||||
@ -421,10 +422,10 @@ static struct dma_buf *export_and_register_object(struct drm_device *dev,
|
||||
* The actual exporting from GEM object to a dma-buf is done through the
|
||||
* &drm_gem_object_funcs.export callback.
|
||||
*/
|
||||
static int drm_gem_prime_handle_to_fd(struct drm_device *dev,
|
||||
struct drm_file *file_priv, uint32_t handle,
|
||||
uint32_t flags,
|
||||
int *prime_fd)
|
||||
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
|
||||
struct drm_file *file_priv, uint32_t handle,
|
||||
uint32_t flags,
|
||||
int *prime_fd)
|
||||
{
|
||||
struct drm_gem_object *obj;
|
||||
int ret = 0;
|
||||
@ -506,6 +507,7 @@ out_unlock:
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
|
||||
|
||||
int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
@ -864,9 +866,9 @@ EXPORT_SYMBOL(drm_prime_get_contiguous_size);
|
||||
* @obj: GEM object to export
|
||||
* @flags: flags like DRM_CLOEXEC and DRM_RDWR
|
||||
*
|
||||
* This is the implementation of the &drm_gem_object_funcs.export functions
|
||||
* for GEM drivers using the PRIME helpers. It is used as the default for
|
||||
* drivers that do not set their own.
|
||||
* This is the implementation of the &drm_gem_object_funcs.export functions for GEM drivers
|
||||
* using the PRIME helpers. It is used as the default in
|
||||
* drm_gem_prime_handle_to_fd().
|
||||
*/
|
||||
struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
|
||||
int flags)
|
||||
@ -962,9 +964,10 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev);
|
||||
* @dev: drm_device to import into
|
||||
* @dma_buf: dma-buf object to import
|
||||
*
|
||||
* This is the implementation of the gem_prime_import functions for GEM
|
||||
* drivers using the PRIME helpers. It is the default for drivers that do
|
||||
* not set their own &drm_driver.gem_prime_import.
|
||||
* This is the implementation of the gem_prime_import functions for GEM drivers
|
||||
* using the PRIME helpers. Drivers can use this as their
|
||||
* &drm_driver.gem_prime_import implementation. It is used as the default
|
||||
* implementation in drm_gem_prime_fd_to_handle().
|
||||
*
|
||||
* Drivers must arrange to call drm_prime_gem_destroy() from their
|
||||
* &drm_gem_object_funcs.free hook when using this function.
|
||||
|
@ -6853,10 +6853,11 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state)
|
||||
if (!intel_crtc_needs_modeset(new_crtc_state))
|
||||
continue;
|
||||
|
||||
intel_pre_plane_update(state, crtc);
|
||||
|
||||
if (!old_crtc_state->hw.active)
|
||||
continue;
|
||||
|
||||
intel_pre_plane_update(state, crtc);
|
||||
intel_crtc_disable_planes(state, crtc);
|
||||
}
|
||||
|
||||
|
@ -6037,8 +6037,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
* (eg. Acer Chromebook C710), so we'll check it only if multiple
|
||||
* ports are attempting to use the same AUX CH, according to VBT.
|
||||
*/
|
||||
if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
|
||||
!intel_digital_port_connected(encoder)) {
|
||||
if (intel_bios_dp_has_shared_aux_ch(encoder->devdata)) {
|
||||
/*
|
||||
* If this fails, presume the DPCD answer came
|
||||
* from some other port using the same AUX CH.
|
||||
@ -6046,10 +6045,27 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
|
||||
* FIXME maybe cleaner to check this before the
|
||||
* DPCD read? Would need sort out the VDD handling...
|
||||
*/
|
||||
drm_info(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] HPD is down, disabling eDP\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
goto out_vdd_off;
|
||||
if (!intel_digital_port_connected(encoder)) {
|
||||
drm_info(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] HPD is down, disabling eDP\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
goto out_vdd_off;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unfortunately even the HPD based detection fails on
|
||||
* eg. Asus B360M-A (CFL+CNP), so as a last resort fall
|
||||
* back to checking for a VGA branch device. Only do this
|
||||
* on known affected platforms to minimize false positives.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) == 9 && drm_dp_is_branch(intel_dp->dpcd) &&
|
||||
(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK) ==
|
||||
DP_DWN_STRM_PORT_TYPE_ANALOG) {
|
||||
drm_info(&dev_priv->drm,
|
||||
"[ENCODER:%d:%s] VGA converter detected, disabling eDP\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
goto out_vdd_off;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&dev_priv->drm.mode_config.mutex);
|
||||
|
@ -41,12 +41,15 @@ void intel_engine_add_user(struct intel_engine_cs *engine)
|
||||
llist_add(&engine->uabi_llist, &engine->i915->uabi_engines_llist);
|
||||
}
|
||||
|
||||
static const u8 uabi_classes[] = {
|
||||
#define I915_NO_UABI_CLASS ((u16)(-1))
|
||||
|
||||
static const u16 uabi_classes[] = {
|
||||
[RENDER_CLASS] = I915_ENGINE_CLASS_RENDER,
|
||||
[COPY_ENGINE_CLASS] = I915_ENGINE_CLASS_COPY,
|
||||
[VIDEO_DECODE_CLASS] = I915_ENGINE_CLASS_VIDEO,
|
||||
[VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE,
|
||||
[COMPUTE_CLASS] = I915_ENGINE_CLASS_COMPUTE,
|
||||
[OTHER_CLASS] = I915_NO_UABI_CLASS, /* Not exposed to users, no uabi class. */
|
||||
};
|
||||
|
||||
static int engine_cmp(void *priv, const struct list_head *A,
|
||||
@ -200,6 +203,7 @@ static void engine_rename(struct intel_engine_cs *engine, const char *name, u16
|
||||
|
||||
void intel_engines_driver_register(struct drm_i915_private *i915)
|
||||
{
|
||||
u16 name_instance, other_instance = 0;
|
||||
struct legacy_ring ring = {};
|
||||
struct list_head *it, *next;
|
||||
struct rb_node **p, *prev;
|
||||
@ -216,27 +220,28 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
|
||||
if (intel_gt_has_unrecoverable_error(engine->gt))
|
||||
continue; /* ignore incomplete engines */
|
||||
|
||||
/*
|
||||
* We don't want to expose the GSC engine to the users, but we
|
||||
* still rename it so it is easier to identify in the debug logs
|
||||
*/
|
||||
if (engine->id == GSC0) {
|
||||
engine_rename(engine, "gsc", 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
GEM_BUG_ON(engine->class >= ARRAY_SIZE(uabi_classes));
|
||||
engine->uabi_class = uabi_classes[engine->class];
|
||||
if (engine->uabi_class == I915_NO_UABI_CLASS) {
|
||||
name_instance = other_instance++;
|
||||
} else {
|
||||
GEM_BUG_ON(engine->uabi_class >=
|
||||
ARRAY_SIZE(i915->engine_uabi_class_count));
|
||||
name_instance =
|
||||
i915->engine_uabi_class_count[engine->uabi_class]++;
|
||||
}
|
||||
engine->uabi_instance = name_instance;
|
||||
|
||||
GEM_BUG_ON(engine->uabi_class >=
|
||||
ARRAY_SIZE(i915->engine_uabi_class_count));
|
||||
engine->uabi_instance =
|
||||
i915->engine_uabi_class_count[engine->uabi_class]++;
|
||||
|
||||
/* Replace the internal name with the final user facing name */
|
||||
/*
|
||||
* Replace the internal name with the final user and log facing
|
||||
* name.
|
||||
*/
|
||||
engine_rename(engine,
|
||||
intel_engine_class_repr(engine->class),
|
||||
engine->uabi_instance);
|
||||
name_instance);
|
||||
|
||||
if (engine->uabi_class == I915_NO_UABI_CLASS)
|
||||
continue;
|
||||
|
||||
rb_link_node(&engine->uabi_node, prev, p);
|
||||
rb_insert_color(&engine->uabi_node, &i915->uabi_engines);
|
||||
|
@ -38,7 +38,7 @@ typedef struct PACKED_REGISTRY_TABLE
|
||||
{
|
||||
NvU32 size;
|
||||
NvU32 numEntries;
|
||||
PACKED_REGISTRY_ENTRY entries[0];
|
||||
PACKED_REGISTRY_ENTRY entries[] __counted_by(numEntries);
|
||||
} PACKED_REGISTRY_TABLE;
|
||||
|
||||
#endif
|
||||
|
@ -318,8 +318,9 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
|
||||
(!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT))
|
||||
continue;
|
||||
|
||||
if (pi < 0)
|
||||
pi = i;
|
||||
/* pick the last one as it will be smallest. */
|
||||
pi = i;
|
||||
|
||||
/* Stop once the buffer is larger than the current page size. */
|
||||
if (*size >= 1ULL << vmm->page[i].shift)
|
||||
break;
|
||||
|
@ -365,10 +365,8 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc)
|
||||
}
|
||||
|
||||
ret = r535_gsp_cmdq_push(gsp, rpc);
|
||||
if (ret) {
|
||||
mutex_unlock(&gsp->cmdq.mutex);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (wait) {
|
||||
msg = r535_gsp_msg_recv(gsp, fn, repc);
|
||||
@ -1048,7 +1046,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp)
|
||||
char *strings;
|
||||
int str_offset;
|
||||
int i;
|
||||
size_t rpc_size = sizeof(*rpc) + sizeof(rpc->entries[0]) * NV_GSP_REG_NUM_ENTRIES;
|
||||
size_t rpc_size = struct_size(rpc, entries, NV_GSP_REG_NUM_ENTRIES);
|
||||
|
||||
/* add strings + null terminator */
|
||||
for (i = 0; i < NV_GSP_REG_NUM_ENTRIES; i++)
|
||||
|
@ -1764,6 +1764,7 @@ static const struct panel_desc starry_qfh032011_53g_desc = {
|
||||
.mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
|
||||
MIPI_DSI_MODE_LPM,
|
||||
.init_cmds = starry_qfh032011_53g_init_cmd,
|
||||
.lp11_before_reset = true,
|
||||
};
|
||||
|
||||
static const struct drm_display_mode starry_himax83102_j02_default_mode = {
|
||||
|
@ -1254,9 +1254,9 @@ static int nt36523_probe(struct mipi_dsi_device *dsi)
|
||||
return dev_err_probe(dev, -EPROBE_DEFER, "cannot get secondary DSI host\n");
|
||||
|
||||
pinfo->dsi[1] = mipi_dsi_device_register_full(dsi1_host, info);
|
||||
if (!pinfo->dsi[1]) {
|
||||
if (IS_ERR(pinfo->dsi[1])) {
|
||||
dev_err(dev, "cannot get secondary DSI device\n");
|
||||
return -ENODEV;
|
||||
return PTR_ERR(pinfo->dsi[1]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1522,6 +1522,15 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
|
||||
{
|
||||
struct qi_desc desc;
|
||||
|
||||
/*
|
||||
* VT-d spec, section 4.3:
|
||||
*
|
||||
* Software is recommended to not submit any Device-TLB invalidation
|
||||
* requests while address remapping hardware is disabled.
|
||||
*/
|
||||
if (!(iommu->gcmd & DMA_GCMD_TE))
|
||||
return;
|
||||
|
||||
if (mask) {
|
||||
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
|
||||
desc.qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
|
||||
@ -1587,6 +1596,15 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
|
||||
unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
|
||||
struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0};
|
||||
|
||||
/*
|
||||
* VT-d spec, section 4.3:
|
||||
*
|
||||
* Software is recommended to not submit any Device-TLB invalidation
|
||||
* requests while address remapping hardware is disabled.
|
||||
*/
|
||||
if (!(iommu->gcmd & DMA_GCMD_TE))
|
||||
return;
|
||||
|
||||
desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
|
||||
QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
|
||||
QI_DEV_IOTLB_PFSID(pfsid);
|
||||
|
@ -299,7 +299,7 @@ static int iommu_skip_te_disable;
|
||||
#define IDENTMAP_AZALIA 4
|
||||
|
||||
const struct iommu_ops intel_iommu_ops;
|
||||
const struct iommu_dirty_ops intel_dirty_ops;
|
||||
static const struct iommu_dirty_ops intel_dirty_ops;
|
||||
|
||||
static bool translation_pre_enabled(struct intel_iommu *iommu)
|
||||
{
|
||||
@ -2207,6 +2207,8 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
||||
attr |= DMA_FL_PTE_DIRTY;
|
||||
}
|
||||
|
||||
domain->has_mappings = true;
|
||||
|
||||
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr;
|
||||
|
||||
while (nr_pages > 0) {
|
||||
@ -2490,7 +2492,8 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
|
||||
return ret;
|
||||
}
|
||||
|
||||
iommu_enable_pci_caps(info);
|
||||
if (sm_supported(info->iommu) || !domain_type_is_si(info->domain))
|
||||
iommu_enable_pci_caps(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -3925,8 +3928,8 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
|
||||
*/
|
||||
static void domain_context_clear(struct device_domain_info *info)
|
||||
{
|
||||
if (!info->iommu || !info->dev || !dev_is_pci(info->dev))
|
||||
return;
|
||||
if (!dev_is_pci(info->dev))
|
||||
domain_context_clear_one(info, info->bus, info->devfn);
|
||||
|
||||
pci_for_each_dma_alias(to_pci_dev(info->dev),
|
||||
&domain_context_clear_one_cb, info);
|
||||
@ -4360,7 +4363,8 @@ static bool intel_iommu_enforce_cache_coherency(struct iommu_domain *domain)
|
||||
return true;
|
||||
|
||||
spin_lock_irqsave(&dmar_domain->lock, flags);
|
||||
if (!domain_support_force_snooping(dmar_domain)) {
|
||||
if (!domain_support_force_snooping(dmar_domain) ||
|
||||
(!dmar_domain->use_first_level && dmar_domain->has_mappings)) {
|
||||
spin_unlock_irqrestore(&dmar_domain->lock, flags);
|
||||
return false;
|
||||
}
|
||||
@ -4925,7 +4929,7 @@ static int intel_iommu_read_and_clear_dirty(struct iommu_domain *domain,
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct iommu_dirty_ops intel_dirty_ops = {
|
||||
static const struct iommu_dirty_ops intel_dirty_ops = {
|
||||
.set_dirty_tracking = intel_iommu_set_dirty_tracking,
|
||||
.read_and_clear_dirty = intel_iommu_read_and_clear_dirty,
|
||||
};
|
||||
@ -5073,7 +5077,7 @@ static void quirk_igfx_skip_te_disable(struct pci_dev *dev)
|
||||
ver = (dev->device >> 8) & 0xff;
|
||||
if (ver != 0x45 && ver != 0x46 && ver != 0x4c &&
|
||||
ver != 0x4e && ver != 0x8a && ver != 0x98 &&
|
||||
ver != 0x9a && ver != 0xa7)
|
||||
ver != 0x9a && ver != 0xa7 && ver != 0x7d)
|
||||
return;
|
||||
|
||||
if (risky_device(dev))
|
||||
|
@ -602,6 +602,9 @@ struct dmar_domain {
|
||||
*/
|
||||
u8 dirty_tracking:1; /* Dirty tracking is enabled */
|
||||
u8 nested_parent:1; /* Has other domains nested on it */
|
||||
u8 has_mappings:1; /* Has mappings configured through
|
||||
* iommu_map() interface.
|
||||
*/
|
||||
|
||||
spinlock_t lock; /* Protect device tracking lists */
|
||||
struct list_head devices; /* all devices' list */
|
||||
|
@ -216,6 +216,27 @@ static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void intel_flush_svm_all(struct intel_svm *svm)
|
||||
{
|
||||
struct device_domain_info *info;
|
||||
struct intel_svm_dev *sdev;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(sdev, &svm->devs, list) {
|
||||
info = dev_iommu_priv_get(sdev->dev);
|
||||
|
||||
qi_flush_piotlb(sdev->iommu, sdev->did, svm->pasid, 0, -1UL, 0);
|
||||
if (info->ats_enabled) {
|
||||
qi_flush_dev_iotlb_pasid(sdev->iommu, sdev->sid, info->pfsid,
|
||||
svm->pasid, sdev->qdep,
|
||||
0, 64 - VTD_PAGE_SHIFT);
|
||||
quirk_extra_dev_tlb_flush(info, 0, 64 - VTD_PAGE_SHIFT,
|
||||
svm->pasid, sdev->qdep);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Pages have been freed at this point */
|
||||
static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
@ -223,6 +244,11 @@ static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
|
||||
{
|
||||
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
|
||||
|
||||
if (start == 0 && end == -1UL) {
|
||||
intel_flush_svm_all(svm);
|
||||
return;
|
||||
}
|
||||
|
||||
intel_flush_svm_range(svm, start,
|
||||
(end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
|
||||
}
|
||||
|
@ -485,11 +485,12 @@ static void iommu_deinit_device(struct device *dev)
|
||||
dev_iommu_free(dev);
|
||||
}
|
||||
|
||||
DEFINE_MUTEX(iommu_probe_device_lock);
|
||||
|
||||
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
|
||||
{
|
||||
const struct iommu_ops *ops = dev->bus->iommu_ops;
|
||||
struct iommu_group *group;
|
||||
static DEFINE_MUTEX(iommu_probe_device_lock);
|
||||
struct group_device *gdev;
|
||||
int ret;
|
||||
|
||||
@ -502,17 +503,15 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
|
||||
* probably be able to use device_lock() here to minimise the scope,
|
||||
* but for now enforcing a simple global ordering is fine.
|
||||
*/
|
||||
mutex_lock(&iommu_probe_device_lock);
|
||||
lockdep_assert_held(&iommu_probe_device_lock);
|
||||
|
||||
/* Device is probed already if in a group */
|
||||
if (dev->iommu_group) {
|
||||
ret = 0;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (dev->iommu_group)
|
||||
return 0;
|
||||
|
||||
ret = iommu_init_device(dev, ops);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
return ret;
|
||||
|
||||
group = dev->iommu_group;
|
||||
gdev = iommu_group_alloc_device(group, dev);
|
||||
@ -548,7 +547,6 @@ static int __iommu_probe_device(struct device *dev, struct list_head *group_list
|
||||
list_add_tail(&group->entry, group_list);
|
||||
}
|
||||
mutex_unlock(&group->mutex);
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
|
||||
if (dev_is_pci(dev))
|
||||
iommu_dma_set_pci_32bit_workaround(dev);
|
||||
@ -562,8 +560,6 @@ err_put_group:
|
||||
iommu_deinit_device(dev);
|
||||
mutex_unlock(&group->mutex);
|
||||
iommu_group_put(group);
|
||||
out_unlock:
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -573,7 +569,9 @@ int iommu_probe_device(struct device *dev)
|
||||
const struct iommu_ops *ops;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&iommu_probe_device_lock);
|
||||
ret = __iommu_probe_device(dev, NULL);
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1788,7 +1786,7 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
|
||||
*/
|
||||
if (ops->default_domain) {
|
||||
if (req_type)
|
||||
return NULL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
return ops->default_domain;
|
||||
}
|
||||
|
||||
@ -1797,15 +1795,15 @@ iommu_group_alloc_default_domain(struct iommu_group *group, int req_type)
|
||||
|
||||
/* The driver gave no guidance on what type to use, try the default */
|
||||
dom = __iommu_group_alloc_default_domain(group, iommu_def_domain_type);
|
||||
if (dom)
|
||||
if (!IS_ERR(dom))
|
||||
return dom;
|
||||
|
||||
/* Otherwise IDENTITY and DMA_FQ defaults will try DMA */
|
||||
if (iommu_def_domain_type == IOMMU_DOMAIN_DMA)
|
||||
return NULL;
|
||||
return ERR_PTR(-EINVAL);
|
||||
dom = __iommu_group_alloc_default_domain(group, IOMMU_DOMAIN_DMA);
|
||||
if (!dom)
|
||||
return NULL;
|
||||
if (IS_ERR(dom))
|
||||
return dom;
|
||||
|
||||
pr_warn("Failed to allocate default IOMMU domain of type %u for group %s - Falling back to IOMMU_DOMAIN_DMA",
|
||||
iommu_def_domain_type, group->name);
|
||||
@ -1822,7 +1820,9 @@ static int probe_iommu_group(struct device *dev, void *data)
|
||||
struct list_head *group_list = data;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&iommu_probe_device_lock);
|
||||
ret = __iommu_probe_device(dev, group_list);
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
if (ret == -ENODEV)
|
||||
ret = 0;
|
||||
|
||||
@ -2094,10 +2094,17 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
|
||||
else if (ops->domain_alloc)
|
||||
domain = ops->domain_alloc(alloc_type);
|
||||
else
|
||||
return NULL;
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
/*
|
||||
* Many domain_alloc ops now return ERR_PTR, make things easier for the
|
||||
* driver by accepting ERR_PTR from all domain_alloc ops instead of
|
||||
* having two rules.
|
||||
*/
|
||||
if (IS_ERR(domain))
|
||||
return domain;
|
||||
if (!domain)
|
||||
return NULL;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
domain->type = type;
|
||||
/*
|
||||
@ -2110,9 +2117,14 @@ static struct iommu_domain *__iommu_domain_alloc(const struct iommu_ops *ops,
|
||||
if (!domain->ops)
|
||||
domain->ops = ops->default_domain_ops;
|
||||
|
||||
if (iommu_is_dma_domain(domain) && iommu_get_dma_cookie(domain)) {
|
||||
iommu_domain_free(domain);
|
||||
domain = NULL;
|
||||
if (iommu_is_dma_domain(domain)) {
|
||||
int rc;
|
||||
|
||||
rc = iommu_get_dma_cookie(domain);
|
||||
if (rc) {
|
||||
iommu_domain_free(domain);
|
||||
return ERR_PTR(rc);
|
||||
}
|
||||
}
|
||||
return domain;
|
||||
}
|
||||
@ -2129,10 +2141,15 @@ __iommu_group_domain_alloc(struct iommu_group *group, unsigned int type)
|
||||
|
||||
struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
|
||||
if (bus == NULL || bus->iommu_ops == NULL)
|
||||
return NULL;
|
||||
return __iommu_domain_alloc(bus->iommu_ops, NULL,
|
||||
domain = __iommu_domain_alloc(bus->iommu_ops, NULL,
|
||||
IOMMU_DOMAIN_UNMANAGED);
|
||||
if (IS_ERR(domain))
|
||||
return NULL;
|
||||
return domain;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_domain_alloc);
|
||||
|
||||
@ -3041,8 +3058,8 @@ static int iommu_setup_default_domain(struct iommu_group *group,
|
||||
return -EINVAL;
|
||||
|
||||
dom = iommu_group_alloc_default_domain(group, req_type);
|
||||
if (!dom)
|
||||
return -ENODEV;
|
||||
if (IS_ERR(dom))
|
||||
return PTR_ERR(dom);
|
||||
|
||||
if (group->default_domain == dom)
|
||||
return 0;
|
||||
@ -3243,21 +3260,23 @@ void iommu_device_unuse_default_domain(struct device *dev)
|
||||
|
||||
static int __iommu_group_alloc_blocking_domain(struct iommu_group *group)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
|
||||
if (group->blocking_domain)
|
||||
return 0;
|
||||
|
||||
group->blocking_domain =
|
||||
__iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
|
||||
if (!group->blocking_domain) {
|
||||
domain = __iommu_group_domain_alloc(group, IOMMU_DOMAIN_BLOCKED);
|
||||
if (IS_ERR(domain)) {
|
||||
/*
|
||||
* For drivers that do not yet understand IOMMU_DOMAIN_BLOCKED
|
||||
* create an empty domain instead.
|
||||
*/
|
||||
group->blocking_domain = __iommu_group_domain_alloc(
|
||||
group, IOMMU_DOMAIN_UNMANAGED);
|
||||
if (!group->blocking_domain)
|
||||
return -EINVAL;
|
||||
domain = __iommu_group_domain_alloc(group,
|
||||
IOMMU_DOMAIN_UNMANAGED);
|
||||
if (IS_ERR(domain))
|
||||
return PTR_ERR(domain);
|
||||
}
|
||||
group->blocking_domain = domain;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -112,16 +112,20 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||
const u32 *id)
|
||||
{
|
||||
const struct iommu_ops *ops = NULL;
|
||||
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
|
||||
struct iommu_fwspec *fwspec;
|
||||
int err = NO_IOMMU;
|
||||
|
||||
if (!master_np)
|
||||
return NULL;
|
||||
|
||||
/* Serialise to make dev->iommu stable under our potential fwspec */
|
||||
mutex_lock(&iommu_probe_device_lock);
|
||||
fwspec = dev_iommu_fwspec_get(dev);
|
||||
if (fwspec) {
|
||||
if (fwspec->ops)
|
||||
if (fwspec->ops) {
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
return fwspec->ops;
|
||||
|
||||
}
|
||||
/* In the deferred case, start again from scratch */
|
||||
iommu_fwspec_free(dev);
|
||||
}
|
||||
@ -155,6 +159,8 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
|
||||
fwspec = dev_iommu_fwspec_get(dev);
|
||||
ops = fwspec->ops;
|
||||
}
|
||||
mutex_unlock(&iommu_probe_device_lock);
|
||||
|
||||
/*
|
||||
* If we have reason to believe the IOMMU driver missed the initial
|
||||
* probe for dev, replay it to get things in order.
|
||||
@ -191,7 +197,7 @@ iommu_resv_region_get_type(struct device *dev,
|
||||
if (start == phys->start && end == phys->end)
|
||||
return IOMMU_RESV_DIRECT;
|
||||
|
||||
dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", &phys,
|
||||
dev_warn(dev, "treating non-direct mapping [%pr] -> [%pap-%pap] as reservation\n", phys,
|
||||
&start, &end);
|
||||
return IOMMU_RESV_RESERVED;
|
||||
}
|
||||
|
@ -75,19 +75,6 @@ static ssize_t max_brightness_show(struct device *dev,
|
||||
}
|
||||
static DEVICE_ATTR_RO(max_brightness);
|
||||
|
||||
static ssize_t color_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
const char *color_text = "invalid";
|
||||
struct led_classdev *led_cdev = dev_get_drvdata(dev);
|
||||
|
||||
if (led_cdev->color < LED_COLOR_ID_MAX)
|
||||
color_text = led_colors[led_cdev->color];
|
||||
|
||||
return sysfs_emit(buf, "%s\n", color_text);
|
||||
}
|
||||
static DEVICE_ATTR_RO(color);
|
||||
|
||||
#ifdef CONFIG_LEDS_TRIGGERS
|
||||
static BIN_ATTR(trigger, 0644, led_trigger_read, led_trigger_write, 0);
|
||||
static struct bin_attribute *led_trigger_bin_attrs[] = {
|
||||
@ -102,7 +89,6 @@ static const struct attribute_group led_trigger_group = {
|
||||
static struct attribute *led_class_attrs[] = {
|
||||
&dev_attr_brightness.attr,
|
||||
&dev_attr_max_brightness.attr,
|
||||
&dev_attr_color.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -293,16 +293,16 @@ static void btree_complete_write(struct btree *b, struct btree_write *w)
|
||||
w->journal = NULL;
|
||||
}
|
||||
|
||||
static void btree_node_write_unlock(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(btree_node_write_unlock)
|
||||
{
|
||||
struct btree *b = container_of(cl, struct btree, io);
|
||||
closure_type(b, struct btree, io);
|
||||
|
||||
up(&b->io_mutex);
|
||||
}
|
||||
|
||||
static void __btree_node_write_done(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(__btree_node_write_done)
|
||||
{
|
||||
struct btree *b = container_of(cl, struct btree, io);
|
||||
closure_type(b, struct btree, io);
|
||||
struct btree_write *w = btree_prev_write(b);
|
||||
|
||||
bch_bbio_free(b->bio, b->c);
|
||||
@ -315,12 +315,12 @@ static void __btree_node_write_done(struct closure *cl)
|
||||
closure_return_with_destructor(cl, btree_node_write_unlock);
|
||||
}
|
||||
|
||||
static void btree_node_write_done(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(btree_node_write_done)
|
||||
{
|
||||
struct btree *b = container_of(cl, struct btree, io);
|
||||
closure_type(b, struct btree, io);
|
||||
|
||||
bio_free_pages(b->bio);
|
||||
__btree_node_write_done(cl);
|
||||
__btree_node_write_done(&cl->work);
|
||||
}
|
||||
|
||||
static void btree_node_write_endio(struct bio *bio)
|
||||
@ -1522,7 +1522,7 @@ out_nocoalesce:
|
||||
bch_keylist_free(&keylist);
|
||||
|
||||
for (i = 0; i < nodes; i++)
|
||||
if (!IS_ERR(new_nodes[i])) {
|
||||
if (!IS_ERR_OR_NULL(new_nodes[i])) {
|
||||
btree_node_free(new_nodes[i]);
|
||||
rw_unlock(true, new_nodes[i]);
|
||||
}
|
||||
|
@ -723,11 +723,11 @@ static void journal_write_endio(struct bio *bio)
|
||||
closure_put(&w->c->journal.io);
|
||||
}
|
||||
|
||||
static void journal_write(struct closure *cl);
|
||||
static CLOSURE_CALLBACK(journal_write);
|
||||
|
||||
static void journal_write_done(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(journal_write_done)
|
||||
{
|
||||
struct journal *j = container_of(cl, struct journal, io);
|
||||
closure_type(j, struct journal, io);
|
||||
struct journal_write *w = (j->cur == j->w)
|
||||
? &j->w[1]
|
||||
: &j->w[0];
|
||||
@ -736,19 +736,19 @@ static void journal_write_done(struct closure *cl)
|
||||
continue_at_nobarrier(cl, journal_write, bch_journal_wq);
|
||||
}
|
||||
|
||||
static void journal_write_unlock(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(journal_write_unlock)
|
||||
__releases(&c->journal.lock)
|
||||
{
|
||||
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
|
||||
closure_type(c, struct cache_set, journal.io);
|
||||
|
||||
c->journal.io_in_flight = 0;
|
||||
spin_unlock(&c->journal.lock);
|
||||
}
|
||||
|
||||
static void journal_write_unlocked(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(journal_write_unlocked)
|
||||
__releases(c->journal.lock)
|
||||
{
|
||||
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
|
||||
closure_type(c, struct cache_set, journal.io);
|
||||
struct cache *ca = c->cache;
|
||||
struct journal_write *w = c->journal.cur;
|
||||
struct bkey *k = &c->journal.key;
|
||||
@ -823,12 +823,12 @@ static void journal_write_unlocked(struct closure *cl)
|
||||
continue_at(cl, journal_write_done, NULL);
|
||||
}
|
||||
|
||||
static void journal_write(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(journal_write)
|
||||
{
|
||||
struct cache_set *c = container_of(cl, struct cache_set, journal.io);
|
||||
closure_type(c, struct cache_set, journal.io);
|
||||
|
||||
spin_lock(&c->journal.lock);
|
||||
journal_write_unlocked(cl);
|
||||
journal_write_unlocked(&cl->work);
|
||||
}
|
||||
|
||||
static void journal_try_write(struct cache_set *c)
|
||||
|
@ -35,16 +35,16 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
|
||||
|
||||
/* Moving GC - IO loop */
|
||||
|
||||
static void moving_io_destructor(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(moving_io_destructor)
|
||||
{
|
||||
struct moving_io *io = container_of(cl, struct moving_io, cl);
|
||||
closure_type(io, struct moving_io, cl);
|
||||
|
||||
kfree(io);
|
||||
}
|
||||
|
||||
static void write_moving_finish(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(write_moving_finish)
|
||||
{
|
||||
struct moving_io *io = container_of(cl, struct moving_io, cl);
|
||||
closure_type(io, struct moving_io, cl);
|
||||
struct bio *bio = &io->bio.bio;
|
||||
|
||||
bio_free_pages(bio);
|
||||
@ -89,9 +89,9 @@ static void moving_init(struct moving_io *io)
|
||||
bch_bio_map(bio, NULL);
|
||||
}
|
||||
|
||||
static void write_moving(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(write_moving)
|
||||
{
|
||||
struct moving_io *io = container_of(cl, struct moving_io, cl);
|
||||
closure_type(io, struct moving_io, cl);
|
||||
struct data_insert_op *op = &io->op;
|
||||
|
||||
if (!op->status) {
|
||||
@ -113,9 +113,9 @@ static void write_moving(struct closure *cl)
|
||||
continue_at(cl, write_moving_finish, op->wq);
|
||||
}
|
||||
|
||||
static void read_moving_submit(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(read_moving_submit)
|
||||
{
|
||||
struct moving_io *io = container_of(cl, struct moving_io, cl);
|
||||
closure_type(io, struct moving_io, cl);
|
||||
struct bio *bio = &io->bio.bio;
|
||||
|
||||
bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
|
||||
|
@ -25,7 +25,7 @@
|
||||
|
||||
struct kmem_cache *bch_search_cache;
|
||||
|
||||
static void bch_data_insert_start(struct closure *cl);
|
||||
static CLOSURE_CALLBACK(bch_data_insert_start);
|
||||
|
||||
static unsigned int cache_mode(struct cached_dev *dc)
|
||||
{
|
||||
@ -55,9 +55,9 @@ static void bio_csum(struct bio *bio, struct bkey *k)
|
||||
|
||||
/* Insert data into cache */
|
||||
|
||||
static void bch_data_insert_keys(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(bch_data_insert_keys)
|
||||
{
|
||||
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
|
||||
closure_type(op, struct data_insert_op, cl);
|
||||
atomic_t *journal_ref = NULL;
|
||||
struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
|
||||
int ret;
|
||||
@ -136,9 +136,9 @@ out:
|
||||
continue_at(cl, bch_data_insert_keys, op->wq);
|
||||
}
|
||||
|
||||
static void bch_data_insert_error(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(bch_data_insert_error)
|
||||
{
|
||||
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
|
||||
closure_type(op, struct data_insert_op, cl);
|
||||
|
||||
/*
|
||||
* Our data write just errored, which means we've got a bunch of keys to
|
||||
@ -163,7 +163,7 @@ static void bch_data_insert_error(struct closure *cl)
|
||||
|
||||
op->insert_keys.top = dst;
|
||||
|
||||
bch_data_insert_keys(cl);
|
||||
bch_data_insert_keys(&cl->work);
|
||||
}
|
||||
|
||||
static void bch_data_insert_endio(struct bio *bio)
|
||||
@ -184,9 +184,9 @@ static void bch_data_insert_endio(struct bio *bio)
|
||||
bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
|
||||
}
|
||||
|
||||
static void bch_data_insert_start(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(bch_data_insert_start)
|
||||
{
|
||||
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
|
||||
closure_type(op, struct data_insert_op, cl);
|
||||
struct bio *bio = op->bio, *n;
|
||||
|
||||
if (op->bypass)
|
||||
@ -305,16 +305,16 @@ err:
|
||||
* If op->bypass is true, instead of inserting the data it invalidates the
|
||||
* region of the cache represented by op->bio and op->inode.
|
||||
*/
|
||||
void bch_data_insert(struct closure *cl)
|
||||
CLOSURE_CALLBACK(bch_data_insert)
|
||||
{
|
||||
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
|
||||
closure_type(op, struct data_insert_op, cl);
|
||||
|
||||
trace_bcache_write(op->c, op->inode, op->bio,
|
||||
op->writeback, op->bypass);
|
||||
|
||||
bch_keylist_init(&op->insert_keys);
|
||||
bio_get(op->bio);
|
||||
bch_data_insert_start(cl);
|
||||
bch_data_insert_start(&cl->work);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -575,9 +575,9 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
|
||||
return n == bio ? MAP_DONE : MAP_CONTINUE;
|
||||
}
|
||||
|
||||
static void cache_lookup(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cache_lookup)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, iop.cl);
|
||||
closure_type(s, struct search, iop.cl);
|
||||
struct bio *bio = &s->bio.bio;
|
||||
struct cached_dev *dc;
|
||||
int ret;
|
||||
@ -698,9 +698,9 @@ static void do_bio_hook(struct search *s,
|
||||
bio_cnt_set(bio, 3);
|
||||
}
|
||||
|
||||
static void search_free(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(search_free)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
|
||||
atomic_dec(&s->iop.c->search_inflight);
|
||||
|
||||
@ -749,20 +749,20 @@ static inline struct search *search_alloc(struct bio *bio,
|
||||
|
||||
/* Cached devices */
|
||||
|
||||
static void cached_dev_bio_complete(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_bio_complete)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
|
||||
cached_dev_put(dc);
|
||||
search_free(cl);
|
||||
search_free(&cl->work);
|
||||
}
|
||||
|
||||
/* Process reads */
|
||||
|
||||
static void cached_dev_read_error_done(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_read_error_done)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
|
||||
if (s->iop.replace_collision)
|
||||
bch_mark_cache_miss_collision(s->iop.c, s->d);
|
||||
@ -770,12 +770,12 @@ static void cached_dev_read_error_done(struct closure *cl)
|
||||
if (s->iop.bio)
|
||||
bio_free_pages(s->iop.bio);
|
||||
|
||||
cached_dev_bio_complete(cl);
|
||||
cached_dev_bio_complete(&cl->work);
|
||||
}
|
||||
|
||||
static void cached_dev_read_error(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_read_error)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
struct bio *bio = &s->bio.bio;
|
||||
|
||||
/*
|
||||
@ -801,9 +801,9 @@ static void cached_dev_read_error(struct closure *cl)
|
||||
continue_at(cl, cached_dev_read_error_done, NULL);
|
||||
}
|
||||
|
||||
static void cached_dev_cache_miss_done(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_cache_miss_done)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
struct bcache_device *d = s->d;
|
||||
|
||||
if (s->iop.replace_collision)
|
||||
@ -812,13 +812,13 @@ static void cached_dev_cache_miss_done(struct closure *cl)
|
||||
if (s->iop.bio)
|
||||
bio_free_pages(s->iop.bio);
|
||||
|
||||
cached_dev_bio_complete(cl);
|
||||
cached_dev_bio_complete(&cl->work);
|
||||
closure_put(&d->cl);
|
||||
}
|
||||
|
||||
static void cached_dev_read_done(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_read_done)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
|
||||
/*
|
||||
@ -858,9 +858,9 @@ static void cached_dev_read_done(struct closure *cl)
|
||||
continue_at(cl, cached_dev_cache_miss_done, NULL);
|
||||
}
|
||||
|
||||
static void cached_dev_read_done_bh(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_read_done_bh)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
|
||||
bch_mark_cache_accounting(s->iop.c, s->d,
|
||||
@ -955,13 +955,13 @@ static void cached_dev_read(struct cached_dev *dc, struct search *s)
|
||||
|
||||
/* Process writes */
|
||||
|
||||
static void cached_dev_write_complete(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_write_complete)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
|
||||
|
||||
up_read_non_owner(&dc->writeback_lock);
|
||||
cached_dev_bio_complete(cl);
|
||||
cached_dev_bio_complete(&cl->work);
|
||||
}
|
||||
|
||||
static void cached_dev_write(struct cached_dev *dc, struct search *s)
|
||||
@ -1048,9 +1048,9 @@ insert_data:
|
||||
continue_at(cl, cached_dev_write_complete, NULL);
|
||||
}
|
||||
|
||||
static void cached_dev_nodata(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_nodata)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
struct bio *bio = &s->bio.bio;
|
||||
|
||||
if (s->iop.flush_journal)
|
||||
@ -1265,9 +1265,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
|
||||
return MAP_CONTINUE;
|
||||
}
|
||||
|
||||
static void flash_dev_nodata(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(flash_dev_nodata)
|
||||
{
|
||||
struct search *s = container_of(cl, struct search, cl);
|
||||
closure_type(s, struct search, cl);
|
||||
|
||||
if (s->iop.flush_journal)
|
||||
bch_journal_meta(s->iop.c, cl);
|
||||
|
@ -34,7 +34,7 @@ struct data_insert_op {
|
||||
};
|
||||
|
||||
unsigned int bch_get_congested(const struct cache_set *c);
|
||||
void bch_data_insert(struct closure *cl);
|
||||
CLOSURE_CALLBACK(bch_data_insert);
|
||||
|
||||
void bch_cached_dev_request_init(struct cached_dev *dc);
|
||||
void cached_dev_submit_bio(struct bio *bio);
|
||||
|
@ -327,9 +327,9 @@ static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
||||
static void bch_write_bdev_super_unlock(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(bch_write_bdev_super_unlock)
|
||||
{
|
||||
struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
|
||||
closure_type(dc, struct cached_dev, sb_write);
|
||||
|
||||
up(&dc->sb_write_mutex);
|
||||
}
|
||||
@ -363,9 +363,9 @@ static void write_super_endio(struct bio *bio)
|
||||
closure_put(&ca->set->sb_write);
|
||||
}
|
||||
|
||||
static void bcache_write_super_unlock(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(bcache_write_super_unlock)
|
||||
{
|
||||
struct cache_set *c = container_of(cl, struct cache_set, sb_write);
|
||||
closure_type(c, struct cache_set, sb_write);
|
||||
|
||||
up(&c->sb_write_mutex);
|
||||
}
|
||||
@ -407,9 +407,9 @@ static void uuid_endio(struct bio *bio)
|
||||
closure_put(cl);
|
||||
}
|
||||
|
||||
static void uuid_io_unlock(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(uuid_io_unlock)
|
||||
{
|
||||
struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
|
||||
closure_type(c, struct cache_set, uuid_write);
|
||||
|
||||
up(&c->uuid_write_mutex);
|
||||
}
|
||||
@ -1344,9 +1344,9 @@ void bch_cached_dev_release(struct kobject *kobj)
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static void cached_dev_free(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_free)
|
||||
{
|
||||
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
|
||||
closure_type(dc, struct cached_dev, disk.cl);
|
||||
|
||||
if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
|
||||
cancel_writeback_rate_update_dwork(dc);
|
||||
@ -1378,9 +1378,9 @@ static void cached_dev_free(struct closure *cl)
|
||||
kobject_put(&dc->disk.kobj);
|
||||
}
|
||||
|
||||
static void cached_dev_flush(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cached_dev_flush)
|
||||
{
|
||||
struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
|
||||
closure_type(dc, struct cached_dev, disk.cl);
|
||||
struct bcache_device *d = &dc->disk;
|
||||
|
||||
mutex_lock(&bch_register_lock);
|
||||
@ -1499,9 +1499,9 @@ void bch_flash_dev_release(struct kobject *kobj)
|
||||
kfree(d);
|
||||
}
|
||||
|
||||
static void flash_dev_free(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(flash_dev_free)
|
||||
{
|
||||
struct bcache_device *d = container_of(cl, struct bcache_device, cl);
|
||||
closure_type(d, struct bcache_device, cl);
|
||||
|
||||
mutex_lock(&bch_register_lock);
|
||||
atomic_long_sub(bcache_dev_sectors_dirty(d),
|
||||
@ -1512,9 +1512,9 @@ static void flash_dev_free(struct closure *cl)
|
||||
kobject_put(&d->kobj);
|
||||
}
|
||||
|
||||
static void flash_dev_flush(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(flash_dev_flush)
|
||||
{
|
||||
struct bcache_device *d = container_of(cl, struct bcache_device, cl);
|
||||
closure_type(d, struct bcache_device, cl);
|
||||
|
||||
mutex_lock(&bch_register_lock);
|
||||
bcache_device_unlink(d);
|
||||
@ -1670,9 +1670,9 @@ void bch_cache_set_release(struct kobject *kobj)
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
||||
static void cache_set_free(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cache_set_free)
|
||||
{
|
||||
struct cache_set *c = container_of(cl, struct cache_set, cl);
|
||||
closure_type(c, struct cache_set, cl);
|
||||
struct cache *ca;
|
||||
|
||||
debugfs_remove(c->debug);
|
||||
@ -1711,9 +1711,9 @@ static void cache_set_free(struct closure *cl)
|
||||
kobject_put(&c->kobj);
|
||||
}
|
||||
|
||||
static void cache_set_flush(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(cache_set_flush)
|
||||
{
|
||||
struct cache_set *c = container_of(cl, struct cache_set, caching);
|
||||
closure_type(c, struct cache_set, caching);
|
||||
struct cache *ca = c->cache;
|
||||
struct btree *b;
|
||||
|
||||
@ -1808,9 +1808,9 @@ static void conditional_stop_bcache_device(struct cache_set *c,
|
||||
}
|
||||
}
|
||||
|
||||
static void __cache_set_unregister(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(__cache_set_unregister)
|
||||
{
|
||||
struct cache_set *c = container_of(cl, struct cache_set, caching);
|
||||
closure_type(c, struct cache_set, caching);
|
||||
struct cached_dev *dc;
|
||||
struct bcache_device *d;
|
||||
size_t i;
|
||||
|
@ -341,16 +341,16 @@ static void dirty_init(struct keybuf_key *w)
|
||||
bch_bio_map(bio, NULL);
|
||||
}
|
||||
|
||||
static void dirty_io_destructor(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(dirty_io_destructor)
|
||||
{
|
||||
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
|
||||
closure_type(io, struct dirty_io, cl);
|
||||
|
||||
kfree(io);
|
||||
}
|
||||
|
||||
static void write_dirty_finish(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(write_dirty_finish)
|
||||
{
|
||||
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
|
||||
closure_type(io, struct dirty_io, cl);
|
||||
struct keybuf_key *w = io->bio.bi_private;
|
||||
struct cached_dev *dc = io->dc;
|
||||
|
||||
@ -400,9 +400,9 @@ static void dirty_endio(struct bio *bio)
|
||||
closure_put(&io->cl);
|
||||
}
|
||||
|
||||
static void write_dirty(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(write_dirty)
|
||||
{
|
||||
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
|
||||
closure_type(io, struct dirty_io, cl);
|
||||
struct keybuf_key *w = io->bio.bi_private;
|
||||
struct cached_dev *dc = io->dc;
|
||||
|
||||
@ -462,9 +462,9 @@ static void read_dirty_endio(struct bio *bio)
|
||||
dirty_endio(bio);
|
||||
}
|
||||
|
||||
static void read_dirty_submit(struct closure *cl)
|
||||
static CLOSURE_CALLBACK(read_dirty_submit)
|
||||
{
|
||||
struct dirty_io *io = container_of(cl, struct dirty_io, cl);
|
||||
closure_type(io, struct dirty_io, cl);
|
||||
|
||||
closure_bio_submit(io->dc->disk.c, &io->bio, cl);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user