KVM/riscv fixes for 6.6, take #1

- Fix KVM_GET_REG_LIST API for ISA_EXT registers
 - Fix reading ISA_EXT register of a missing extension
 - Fix ISA_EXT register handling in get-reg-list test
 - Fix filtering of AIA registers in get-reg-list test
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEZdn75s5e6LHDQ+f/rUjsVaLHLAcFAmUMDssACgkQrUjsVaLH
 LAckSg/+IZ5DPvPs81rUpL3i1Z5SrK4jXWL2zyvMIksBEYmFD2NPNvinVZ4Sxv6u
 IzWNKJcAp4nA/+qPGPLXCURDe1W6PCDvO4SShjYm2UkPtNIfiskmFr3MunXZysgm
 I7USJgj9ev+46yfOnwlYrwpZ8sQk7Z6nLTI/6Osk4Q7Sm0Vjoobh6krub7LNjeKQ
 y6p+vxrXj+Owc5H9bgl0wAi6GOmOJKAM+cZU5DygQxjOgiUgNbOzsVgbLDTvExNq
 gISUU4PoAO7/U1NKEaaopbe7C0KNQHTnegedtXsDzg7WTBah77/MNBt4snbfiR27
 6rODklZlG/kAGIHdVtYC+zf8AfPqvGTIT8SLGmzQlyVlHujFBGn0L41NmMzW+EeA
 7UpfUk8vPiiGhefBE+Ml3yqiReogo+aRhL1mZoI39rPusd7DMnwx97KpBlAcYuTI
 PTgqycIMRmq2dSCHya+nrOVpwwV3Qx4G8Alpq1jOa7XDMeGMj4h521NQHjWckIK2
 IJ2a0RtzB10+Z91nLV+amdAno326AnxJC7dR26O6uqVSPJy/nHE2GAc49gFKeWq6
 QmzgzY1sU2Y02/TM8miyKSl3i+bpZSIPzXCKlOm1TowBKO+sfJzn/yMon9sVaVhb
 4Wjgg3vgE74y9FVsL4JXR/PfrZH5Aq77J1R+/pMtsNTtVYrt1Sk=
 =ytFs
 -----END PGP SIGNATURE-----

Merge tag 'kvm-riscv-fixes-6.6-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv fixes for 6.6, take #1

- Fix KVM_GET_REG_LIST API for ISA_EXT registers
- Fix reading ISA_EXT register of a missing extension
- Fix ISA_EXT register handling in get-reg-list test
- Fix filtering of AIA registers in get-reg-list test
This commit is contained in:
Paolo Bonzini 2023-09-23 05:35:55 -04:00
commit 5804c19b80
350 changed files with 2326 additions and 1246 deletions

View File

@ -37,7 +37,6 @@ For more information please refer to the documentation site or wiki
https://btrfs.readthedocs.io
https://btrfs.wiki.kernel.org
that maintains information about administration tasks, frequently asked
questions, use cases, mount options, comprehensible changelogs, features,

View File

@ -251,6 +251,7 @@ an involved disclosed party. The current ambassadors list:
IBM Z Christian Borntraeger <borntraeger@de.ibm.com>
Intel Tony Luck <tony.luck@intel.com>
Qualcomm Trilok Soni <tsoni@codeaurora.org>
RISC-V Palmer Dabbelt <palmer@dabbelt.com>
Samsung Javier González <javier.gonz@samsung.com>
Microsoft James Morris <jamorris@linux.microsoft.com>

View File

@ -1855,7 +1855,7 @@ F: Documentation/devicetree/bindings/phy/amlogic*
F: arch/arm/boot/dts/amlogic/
F: arch/arm/mach-meson/
F: arch/arm64/boot/dts/amlogic/
F: drivers/genpd/amlogic/
F: drivers/pmdomain/amlogic/
F: drivers/mmc/host/meson*
F: drivers/phy/amlogic/
F: drivers/pinctrl/meson/
@ -1918,7 +1918,7 @@ F: drivers/bluetooth/hci_bcm4377.c
F: drivers/clk/clk-apple-nco.c
F: drivers/cpufreq/apple-soc-cpufreq.c
F: drivers/dma/apple-admac.c
F: drivers/genpd/apple/
F: drivers/pmdomain/apple/
F: drivers/i2c/busses/i2c-pasemi-core.c
F: drivers/i2c/busses/i2c-pasemi-platform.c
F: drivers/iommu/apple-dart.c
@ -2435,7 +2435,7 @@ F: arch/arm/mach-ux500/
F: drivers/clk/clk-nomadik.c
F: drivers/clocksource/clksrc-dbx500-prcmu.c
F: drivers/dma/ste_dma40*
F: drivers/genpd/st/ste-ux500-pm-domain.c
F: drivers/pmdomain/st/ste-ux500-pm-domain.c
F: drivers/hwspinlock/u8500_hsem.c
F: drivers/i2c/busses/i2c-nomadik.c
F: drivers/iio/adc/ab8500-gpadc.c
@ -2598,7 +2598,7 @@ F: arch/arm/include/debug/renesas-scif.S
F: arch/arm/mach-shmobile/
F: arch/arm64/boot/dts/renesas/
F: arch/riscv/boot/dts/renesas/
F: drivers/genpd/renesas/
F: drivers/pmdomain/renesas/
F: drivers/soc/renesas/
F: include/linux/soc/renesas/
K: \brenesas,
@ -4026,7 +4026,7 @@ F: arch/mips/kernel/*bmips*
F: drivers/irqchip/irq-bcm63*
F: drivers/irqchip/irq-bcm7*
F: drivers/irqchip/irq-brcmstb*
F: drivers/genpd/bcm/bcm63xx-power.c
F: drivers/pmdomain/bcm/bcm63xx-power.c
F: include/linux/bcm963xx_nvram.h
F: include/linux/bcm963xx_tag.h
@ -4248,7 +4248,7 @@ R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
L: linux-pm@vger.kernel.org
S: Maintained
T: git https://github.com/broadcom/stblinux.git
F: drivers/genpd/bcm/bcm-pmb.c
F: drivers/pmdomain/bcm/bcm-pmb.c
F: include/dt-bindings/soc/bcm-pmb.h
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
@ -4378,7 +4378,6 @@ M: David Sterba <dsterba@suse.com>
L: linux-btrfs@vger.kernel.org
S: Maintained
W: https://btrfs.readthedocs.io
W: https://btrfs.wiki.kernel.org/
Q: https://patchwork.kernel.org/project/linux-btrfs/list/
C: irc://irc.libera.chat/btrfs
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux.git
@ -8729,7 +8728,7 @@ M: Ulf Hansson <ulf.hansson@linaro.org>
L: linux-pm@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/linux-pm.git
F: drivers/genpd/
F: drivers/pmdomain/
GENERIC RESISTIVE TOUCHSCREEN ADC DRIVER
M: Eugen Hristev <eugen.hristev@microchip.com>
@ -17680,7 +17679,7 @@ L: linux-pm@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/power/avs/qcom,cpr.yaml
F: drivers/genpd/qcom/cpr.c
F: drivers/pmdomain/qcom/cpr.c
QUALCOMM CPUFREQ DRIVER MSM8996/APQ8096
M: Ilia Lin <ilia.lin@kernel.org>
@ -20514,7 +20513,7 @@ STARFIVE JH71XX PMU CONTROLLER DRIVER
M: Walker Chen <walker.chen@starfivetech.com>
S: Supported
F: Documentation/devicetree/bindings/power/starfive*
F: drivers/genpd/starfive/jh71xx-pmu.c
F: drivers/pmdomain/starfive/jh71xx-pmu.c
F: include/dt-bindings/power/starfive,jh7110-pmu.h
STARFIVE SOC DRIVERS
@ -21339,7 +21338,7 @@ F: drivers/irqchip/irq-ti-sci-inta.c
F: drivers/irqchip/irq-ti-sci-intr.c
F: drivers/reset/reset-ti-sci.c
F: drivers/soc/ti/ti_sci_inta_msi.c
F: drivers/genpd/ti/ti_sci_pm_domains.c
F: drivers/pmdomain/ti/ti_sci_pm_domains.c
F: include/dt-bindings/soc/ti,sci_pm_domain.h
F: include/linux/soc/ti/ti_sci_inta_msi.h
F: include/linux/soc/ti/ti_sci_protocol.h
@ -21581,7 +21580,7 @@ L: linux-kernel@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
F: drivers/genpd/ti/omap_prm.c
F: drivers/pmdomain/ti/omap_prm.c
F: drivers/soc/ti/*
TI LM49xxx FAMILY ASoC CODEC DRIVERS

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 6
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -37,6 +37,7 @@ extern int split_tlb;
extern int dcache_stride;
extern int icache_stride;
extern struct pdc_cache_info cache_info;
extern struct pdc_btlb_info btlb_info;
void parisc_setup_cache_timing(void);
#define pdtlb(sr, addr) asm volatile("pdtlb 0(%%sr%0,%1)" \

View File

@ -1,8 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef ASM_PARISC_MCKINLEY_H
#define ASM_PARISC_MCKINLEY_H
/* declared in arch/parisc/kernel/setup.c */
extern struct proc_dir_entry * proc_mckinley_root;
#endif /*ASM_PARISC_MCKINLEY_H*/

View File

@ -44,10 +44,11 @@ int pdc_model_capabilities(unsigned long *capabilities);
int pdc_model_platform_info(char *orig_prod_num, char *current_prod_num, char *serial_no);
int pdc_cache_info(struct pdc_cache_info *cache);
int pdc_spaceid_bits(unsigned long *space_bits);
#ifndef CONFIG_PA20
int pdc_btlb_info(struct pdc_btlb_info *btlb);
int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
unsigned long entry_info, unsigned long slot);
int pdc_btlb_purge_all(void);
int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
#endif /* !CONFIG_PA20 */
int pdc_pim_toc11(struct pdc_toc_pim_11 *ret);
int pdc_pim_toc20(struct pdc_toc_pim_20 *ret);
int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);

View File

@ -310,6 +310,7 @@ extern void do_syscall_trace_exit(struct pt_regs *);
struct seq_file;
extern void early_trap_init(void);
extern void collect_boot_cpu_data(void);
extern void btlb_init_per_cpu(void);
extern int show_cpuinfo (struct seq_file *m, void *v);
/* driver code in driver/parisc */

View File

@ -29,7 +29,7 @@
struct ioc {
void __iomem *ioc_hpa; /* I/O MMU base address */
char *res_map; /* resource map, bit == pdir entry */
u64 *pdir_base; /* physical base address */
__le64 *pdir_base; /* physical base address */
unsigned long ibase; /* pdir IOV Space base - shared w/lba_pci */
unsigned long imask; /* pdir IOV Space mask - shared w/lba_pci */
#ifdef ZX1_SUPPORT
@ -86,6 +86,9 @@ struct sba_device {
struct ioc ioc[MAX_IOC];
};
/* list of SBA's in system, see drivers/parisc/sba_iommu.c */
extern struct sba_device *sba_list;
#define ASTRO_RUNWAY_PORT 0x582
#define IKE_MERCED_PORT 0x803
#define REO_MERCED_PORT 0x804
@ -110,7 +113,7 @@ static inline int IS_PLUTO(struct parisc_device *d) {
#define SBA_PDIR_VALID_BIT 0x8000000000000000ULL
#define SBA_AGPGART_COOKIE 0x0000badbadc0ffeeULL
#define SBA_AGPGART_COOKIE (__force __le64) 0x0000badbadc0ffeeULL
#define SBA_FUNC_ID 0x0000 /* function id */
#define SBA_FCLASS 0x0008 /* function class, bist, header, rev... */

View File

@ -2,6 +2,21 @@
#ifndef _ASMPARISC_SHMPARAM_H
#define _ASMPARISC_SHMPARAM_H
/*
* PA-RISC uses virtually indexed & physically tagged (VIPT) caches
* which has strict requirements when two pages to the same physical
* address are accessed through different mappings. Read the section
* "Address Aliasing" in the arch docs for more detail:
* PA-RISC 1.1 (page 3-6):
* https://parisc.wiki.kernel.org/images-parisc/6/68/Pa11_acd.pdf
* PA-RISC 2.0 (page F-5):
* https://parisc.wiki.kernel.org/images-parisc/7/73/Parisc2.0.pdf
*
* For Linux we allow kernel and userspace to map pages on page size
* granularity (SHMLBA) but have to ensure that, if two pages are
* mapped to the same physical address, the virtual and physical
* addresses modulo SHM_COLOUR are identical.
*/
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#define SHM_COLOUR 0x00400000 /* shared mappings colouring */

View File

@ -275,6 +275,8 @@ int main(void)
* and kernel data on physical huge pages */
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
#elif !defined(CONFIG_64BIT)
DEFINE(HUGEPAGE_SIZE, 4*1024*1024);
#else
DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
#endif

View File

@ -58,7 +58,7 @@ int pa_serialize_tlb_flushes __ro_after_init;
struct pdc_cache_info cache_info __ro_after_init;
#ifndef CONFIG_PA20
static struct pdc_btlb_info btlb_info __ro_after_init;
struct pdc_btlb_info btlb_info __ro_after_init;
#endif
DEFINE_STATIC_KEY_TRUE(parisc_has_cache);
@ -264,12 +264,6 @@ parisc_cache_init(void)
icache_stride = CAFL_STRIDE(cache_info.ic_conf);
#undef CAFL_STRIDE
#ifndef CONFIG_PA20
if (pdc_btlb_info(&btlb_info) < 0) {
memset(&btlb_info, 0, sizeof btlb_info);
}
#endif
if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
PDC_MODEL_NVA_UNSUPPORTED) {
printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");

View File

@ -925,9 +925,9 @@ static __init void qemu_header(void)
pr_info("#define PARISC_MODEL \"%s\"\n\n",
boot_cpu_data.pdc.sys_model_name);
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
pr_info("#define PARISC_PDC_MODEL 0x%lx, 0x%lx, 0x%lx, "
"0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx\n\n",
#define p ((unsigned long *)&boot_cpu_data.pdc.model)
p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]);
#undef p

View File

@ -687,7 +687,6 @@ int pdc_spaceid_bits(unsigned long *space_bits)
return retval;
}
#ifndef CONFIG_PA20
/**
* pdc_btlb_info - Return block TLB information.
* @btlb: The return buffer.
@ -696,18 +695,51 @@ int pdc_spaceid_bits(unsigned long *space_bits)
*/
int pdc_btlb_info(struct pdc_btlb_info *btlb)
{
int retval;
int retval;
unsigned long flags;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
memcpy(btlb, pdc_result, sizeof(*btlb));
spin_unlock_irqrestore(&pdc_lock, flags);
if (IS_ENABLED(CONFIG_PA20))
return PDC_BAD_PROC;
if(retval < 0) {
btlb->max_size = 0;
}
return retval;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INFO, __pa(pdc_result), 0);
memcpy(btlb, pdc_result, sizeof(*btlb));
spin_unlock_irqrestore(&pdc_lock, flags);
if(retval < 0) {
btlb->max_size = 0;
}
return retval;
}
int pdc_btlb_insert(unsigned long long vpage, unsigned long physpage, unsigned long len,
unsigned long entry_info, unsigned long slot)
{
int retval;
unsigned long flags;
if (IS_ENABLED(CONFIG_PA20))
return PDC_BAD_PROC;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_INSERT, (unsigned long) (vpage >> 32),
(unsigned long) vpage, physpage, len, entry_info, slot);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
int pdc_btlb_purge_all(void)
{
int retval;
unsigned long flags;
if (IS_ENABLED(CONFIG_PA20))
return PDC_BAD_PROC;
spin_lock_irqsave(&pdc_lock, flags);
retval = mem_pdc_call(PDC_BLOCK_TLB, PDC_BTLB_PURGE_ALL);
spin_unlock_irqrestore(&pdc_lock, flags);
return retval;
}
/**
@ -728,6 +760,9 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address,
int retval;
unsigned long flags;
if (IS_ENABLED(CONFIG_PA20))
return PDC_BAD_PROC;
spin_lock_irqsave(&pdc_lock, flags);
memcpy(pdc_result2, mod_path, sizeof(*mod_path));
retval = mem_pdc_call(PDC_MEM_MAP, PDC_MEM_MAP_HPA, __pa(pdc_result),
@ -737,7 +772,6 @@ int pdc_mem_map_hpa(struct pdc_memory_map *address,
return retval;
}
#endif /* !CONFIG_PA20 */
/**
* pdc_lan_station_id - Get the LAN address.

View File

@ -180,10 +180,10 @@ $pgt_fill_loop:
std %dp,0x18(%r10)
#endif
#ifdef CONFIG_64BIT
/* Get PDCE_PROC for monarch CPU. */
#define MEM_PDC_LO 0x388
#define MEM_PDC_HI 0x35C
#ifdef CONFIG_64BIT
/* Get PDCE_PROC for monarch CPU. */
ldw MEM_PDC_LO(%r0),%r3
ldw MEM_PDC_HI(%r0),%r10
depd %r10, 31, 32, %r3 /* move to upper word */
@ -269,7 +269,17 @@ stext_pdc_ret:
tovirt_r1 %r6
mtctl %r6,%cr30 /* restore task thread info */
#endif
#ifndef CONFIG_64BIT
/* clear all BTLBs */
ldi PDC_BLOCK_TLB,%arg0
load32 PA(stext_pdc_btlb_ret), %rp
ldw MEM_PDC_LO(%r0),%r3
bv (%r3)
ldi PDC_BTLB_PURGE_ALL,%arg1
stext_pdc_btlb_ret:
#endif
/* PARANOID: clear user scratch/user space SR's */
mtsp %r0,%sr0
mtsp %r0,%sr1

View File

@ -365,7 +365,7 @@ union irq_stack_union {
volatile unsigned int lock[1];
};
DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
static DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
.slock = { 1,1,1,1 },
};
#endif

View File

@ -368,6 +368,8 @@ int init_per_cpu(int cpunum)
/* FUTURE: Enable Performance Monitor : ccr bit 0x20 */
init_percpu_prof(cpunum);
btlb_init_per_cpu();
return ret;
}

View File

@ -154,6 +154,7 @@ SECTIONS
}
/* End of data section */
. = ALIGN(PAGE_SIZE);
_edata = .;
/* BSS */

View File

@ -32,6 +32,7 @@
#include <asm/sections.h>
#include <asm/msgbuf.h>
#include <asm/sparsemem.h>
#include <asm/asm-offsets.h>
extern int data_start;
extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
@ -720,6 +721,77 @@ void __init paging_init(void)
parisc_bootmem_free();
}
static void alloc_btlb(unsigned long start, unsigned long end, int *slot,
unsigned long entry_info)
{
const int slot_max = btlb_info.fixed_range_info.num_comb;
int min_num_pages = btlb_info.min_size;
unsigned long size;
/* map at minimum 4 pages */
if (min_num_pages < 4)
min_num_pages = 4;
size = HUGEPAGE_SIZE;
while (start < end && *slot < slot_max && size >= PAGE_SIZE) {
/* starting address must have same alignment as size! */
/* if correctly aligned and fits in double size, increase */
if (((start & (2 * size - 1)) == 0) &&
(end - start) >= (2 * size)) {
size <<= 1;
continue;
}
/* if current size alignment is too big, try smaller size */
if ((start & (size - 1)) != 0) {
size >>= 1;
continue;
}
if ((end - start) >= size) {
if ((size >> PAGE_SHIFT) >= min_num_pages)
pdc_btlb_insert(start >> PAGE_SHIFT, __pa(start) >> PAGE_SHIFT,
size >> PAGE_SHIFT, entry_info, *slot);
(*slot)++;
start += size;
continue;
}
size /= 2;
continue;
}
}
void btlb_init_per_cpu(void)
{
unsigned long s, t, e;
int slot;
/* BTLBs are not available on 64-bit CPUs */
if (IS_ENABLED(CONFIG_PA20))
return;
else if (pdc_btlb_info(&btlb_info) < 0) {
memset(&btlb_info, 0, sizeof btlb_info);
}
/* insert BLTLBs for code and data segments */
s = (uintptr_t) dereference_function_descriptor(&_stext);
e = (uintptr_t) dereference_function_descriptor(&_etext);
t = (uintptr_t) dereference_function_descriptor(&_sdata);
BUG_ON(t != e);
/* code segments */
slot = 0;
alloc_btlb(s, e, &slot, 0x13800000);
/* sanity check */
t = (uintptr_t) dereference_function_descriptor(&_edata);
e = (uintptr_t) dereference_function_descriptor(&__bss_start);
BUG_ON(t != e);
/* data segments */
s = (uintptr_t) dereference_function_descriptor(&_sdata);
e = (uintptr_t) dereference_function_descriptor(&__bss_stop);
alloc_btlb(s, e, &slot, 0x11800000);
}
#ifdef CONFIG_PA20
/*

View File

@ -105,7 +105,7 @@ asm volatile(ALTERNATIVE( \
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
* 0000001 01001 rs1 000 00000 0001011
* dcache.cva rs1 (clean, virtual address)
* 0000001 00100 rs1 000 00000 0001011
* 0000001 00101 rs1 000 00000 0001011
*
* dcache.cipa rs1 (clean then invalidate, physical address)
* | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
@ -118,7 +118,7 @@ asm volatile(ALTERNATIVE( \
* 0000000 11001 00000 000 00000 0001011
*/
#define THEAD_inval_A0 ".long 0x0265000b"
#define THEAD_clean_A0 ".long 0x0245000b"
#define THEAD_clean_A0 ".long 0x0255000b"
#define THEAD_flush_A0 ".long 0x0275000b"
#define THEAD_SYNC_S ".long 0x0190000b"

View File

@ -98,7 +98,13 @@ static int elf_find_pbase(struct kimage *image, unsigned long kernel_len,
kbuf.image = image;
kbuf.buf_min = lowest_paddr;
kbuf.buf_max = ULONG_MAX;
kbuf.buf_align = PAGE_SIZE;
/*
* Current riscv boot protocol requires 2MB alignment for
* RV64 and 4MB alignment for RV32
*
*/
kbuf.buf_align = PMD_SIZE;
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
kbuf.memsz = ALIGN(kernel_len, PAGE_SIZE);
kbuf.top_down = false;

View File

@ -460,8 +460,11 @@ static int riscv_vcpu_get_isa_ext_single(struct kvm_vcpu *vcpu,
reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
return -ENOENT;
*reg_val = 0;
host_isa_ext = kvm_isa_ext_arr[reg_num];
if (!__riscv_isa_extension_available(NULL, host_isa_ext))
return -ENOENT;
*reg_val = 0;
if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
*reg_val = 1; /* Mark the given extension as available */
@ -842,7 +845,7 @@ static int copy_isa_ext_reg_indices(const struct kvm_vcpu *vcpu,
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_ISA_EXT | i;
isa_ext = kvm_isa_ext_arr[i];
if (!__riscv_isa_extension_available(vcpu->arch.isa, isa_ext))
if (!__riscv_isa_extension_available(NULL, isa_ext))
continue;
if (uindices) {

View File

@ -1945,6 +1945,7 @@ config EFI
select UCS2_STRING
select EFI_RUNTIME_WRAPPERS
select ARCH_USE_MEMREMAP_PROT
select EFI_RUNTIME_MAP if KEXEC_CORE
help
This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services).
@ -2020,7 +2021,6 @@ config EFI_MAX_FAKE_MEM
config EFI_RUNTIME_MAP
bool "Export EFI runtime maps to sysfs" if EXPERT
depends on EFI
default KEXEC_CORE
help
Export EFI runtime memory regions to /sys/firmware/efi/runtime-map.
That memory map is required by the 2nd kernel to set up EFI virtual

View File

@ -59,6 +59,14 @@ static void *alloc_pgt_page(void *context)
return NULL;
}
/* Consumed more tables than expected? */
if (pages->pgt_buf_offset == BOOT_PGT_SIZE_WARN) {
debug_putstr("pgt_buf running low in " __FILE__ "\n");
debug_putstr("Need to raise BOOT_PGT_SIZE?\n");
debug_putaddr(pages->pgt_buf_offset);
debug_putaddr(pages->pgt_buf_size);
}
entry = pages->pgt_buf + pages->pgt_buf_offset;
pages->pgt_buf_offset += PAGE_SIZE;

View File

@ -40,23 +40,40 @@
#ifdef CONFIG_X86_64
# define BOOT_STACK_SIZE 0x4000
# define BOOT_INIT_PGT_SIZE (6*4096)
# ifdef CONFIG_RANDOMIZE_BASE
/*
* Assuming all cross the 512GB boundary:
* 1 page for level4
* (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel
* 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP).
* Total is 19 pages.
* Used by decompressor's startup_32() to allocate page tables for identity
* mapping of the 4G of RAM in 4-level paging mode:
* - 1 level4 table;
* - 1 level3 table;
* - 4 level2 table that maps everything with 2M pages;
*
* The additional level5 table needed for 5-level paging is allocated from
* trampoline_32bit memory.
*/
# ifdef CONFIG_X86_VERBOSE_BOOTUP
# define BOOT_PGT_SIZE (19*4096)
# else /* !CONFIG_X86_VERBOSE_BOOTUP */
# define BOOT_PGT_SIZE (17*4096)
# endif
# else /* !CONFIG_RANDOMIZE_BASE */
# define BOOT_PGT_SIZE BOOT_INIT_PGT_SIZE
# endif
# define BOOT_INIT_PGT_SIZE (6*4096)
/*
* Total number of page tables kernel_add_identity_map() can allocate,
* including page tables consumed by startup_32().
*
* Worst-case scenario:
* - 5-level paging needs 1 level5 table;
* - KASLR needs to map kernel, boot_params, cmdline and randomized kernel,
* assuming all of them cross 256T boundary:
* + 4*2 level4 table;
* + 4*2 level3 table;
* + 4*2 level2 table;
* - X86_VERBOSE_BOOTUP needs to map the first 2M (video RAM):
* + 1 level4 table;
* + 1 level3 table;
* + 1 level2 table;
* Total: 28 tables
*
* Add 4 spare table in case decompressor touches anything beyond what is
* accounted above. Warn if it happens.
*/
# define BOOT_PGT_SIZE_WARN (28*4096)
# define BOOT_PGT_SIZE (32*4096)
#else /* !CONFIG_X86_64 */
# define BOOT_STACK_SIZE 0x1000

View File

@ -91,19 +91,6 @@ static inline void efi_fpu_end(void)
#ifdef CONFIG_X86_32
#define EFI_X86_KERNEL_ALLOC_LIMIT (SZ_512M - 1)
#define arch_efi_call_virt_setup() \
({ \
efi_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \
})
#define arch_efi_call_virt_teardown() \
({ \
firmware_restrict_branch_speculation_end(); \
efi_fpu_end(); \
})
#else /* !CONFIG_X86_32 */
#define EFI_X86_KERNEL_ALLOC_LIMIT EFI_ALLOC_LIMIT
@ -116,14 +103,6 @@ extern bool efi_disable_ibt_for_runtime;
__efi_call(__VA_ARGS__); \
})
#define arch_efi_call_virt_setup() \
({ \
efi_sync_low_kernel_mappings(); \
efi_fpu_begin(); \
firmware_restrict_branch_speculation_start(); \
efi_enter_mm(); \
})
#undef arch_efi_call_virt
#define arch_efi_call_virt(p, f, args...) ({ \
u64 ret, ibt = ibt_save(efi_disable_ibt_for_runtime); \
@ -132,13 +111,6 @@ extern bool efi_disable_ibt_for_runtime;
ret; \
})
#define arch_efi_call_virt_teardown() \
({ \
efi_leave_mm(); \
firmware_restrict_branch_speculation_end(); \
efi_fpu_end(); \
})
#ifdef CONFIG_KASAN
/*
* CONFIG_KASAN may redefine memset to __memset. __memset function is present
@ -168,8 +140,8 @@ extern void efi_delete_dummy_variable(void);
extern void efi_crash_gracefully_on_page_fault(unsigned long phys_addr);
extern void efi_free_boot_services(void);
void efi_enter_mm(void);
void efi_leave_mm(void);
void arch_efi_call_virt_setup(void);
void arch_efi_call_virt_teardown(void);
/* kexec external ABI */
struct efi_setup_data {

View File

@ -8,6 +8,14 @@
#undef notrace
#define notrace __attribute__((no_instrument_function))
#ifdef CONFIG_64BIT
/*
* The generic version tends to create spurious ENDBR instructions under
* certain conditions.
*/
#define _THIS_IP_ ({ unsigned long __here; asm ("lea 0(%%rip), %0" : "=r" (__here)); __here; })
#endif
#ifdef CONFIG_X86_32
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
#endif /* CONFIG_X86_32 */

View File

@ -1533,7 +1533,7 @@ static void __init build_socket_tables(void)
{
struct uv_gam_range_entry *gre = uv_gre_table;
int nums, numn, nump;
int cpu, i, lnid;
int i, lnid, apicid;
int minsock = _min_socket;
int maxsock = _max_socket;
int minpnode = _min_pnode;
@ -1584,15 +1584,14 @@ static void __init build_socket_tables(void)
/* Set socket -> node values: */
lnid = NUMA_NO_NODE;
for_each_possible_cpu(cpu) {
int nid = cpu_to_node(cpu);
int apicid, sockid;
for (apicid = 0; apicid < ARRAY_SIZE(__apicid_to_node); apicid++) {
int nid = __apicid_to_node[apicid];
int sockid;
if (lnid == nid)
if ((nid == NUMA_NO_NODE) || (lnid == nid))
continue;
lnid = nid;
apicid = per_cpu(x86_cpu_to_apicid, cpu);
sockid = apicid >> uv_cpuid.socketid_shift;
if (_socket_to_node[sockid - minsock] == SOCK_EMPTY)

View File

@ -579,7 +579,6 @@ static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
}
#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_CLUSTER) || defined(CONFIG_SCHED_MC)
static inline int x86_sched_itmt_flags(void)
{
return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0;
@ -603,7 +602,14 @@ static int x86_cluster_flags(void)
return cpu_cluster_flags() | x86_sched_itmt_flags();
}
#endif
#endif
static int x86_die_flags(void)
{
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
return x86_sched_itmt_flags();
return 0;
}
/*
* Set if a package/die has multiple NUMA nodes inside.
@ -640,7 +646,7 @@ static void __init build_sched_topology(void)
*/
if (!x86_has_numa_in_package) {
x86_topology[i++] = (struct sched_domain_topology_level){
cpu_cpu_mask, SD_INIT_NAME(DIE)
cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(DIE)
};
}

View File

@ -56,7 +56,6 @@ SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
SYM_FUNC_START(__put_user_nocheck_1)
ENDBR
ASM_STAC
2: movb %al,(%_ASM_CX)
xor %ecx,%ecx
@ -76,7 +75,6 @@ SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
SYM_FUNC_START(__put_user_nocheck_2)
ENDBR
ASM_STAC
4: movw %ax,(%_ASM_CX)
xor %ecx,%ecx
@ -96,7 +94,6 @@ SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
SYM_FUNC_START(__put_user_nocheck_4)
ENDBR
ASM_STAC
6: movl %eax,(%_ASM_CX)
xor %ecx,%ecx
@ -119,7 +116,6 @@ SYM_FUNC_END(__put_user_8)
EXPORT_SYMBOL(__put_user_8)
SYM_FUNC_START(__put_user_nocheck_8)
ENDBR
ASM_STAC
9: mov %_ASM_AX,(%_ASM_CX)
#ifdef CONFIG_X86_32

View File

@ -140,3 +140,15 @@ void __init efi_runtime_update_mappings(void)
}
}
}
void arch_efi_call_virt_setup(void)
{
efi_fpu_begin();
firmware_restrict_branch_speculation_start();
}
void arch_efi_call_virt_teardown(void)
{
firmware_restrict_branch_speculation_end();
efi_fpu_end();
}

View File

@ -474,19 +474,34 @@ void __init efi_dump_pagetable(void)
* can not change under us.
* It should be ensured that there are no concurrent calls to this function.
*/
void efi_enter_mm(void)
static void efi_enter_mm(void)
{
efi_prev_mm = current->active_mm;
current->active_mm = &efi_mm;
switch_mm(efi_prev_mm, &efi_mm, NULL);
}
void efi_leave_mm(void)
static void efi_leave_mm(void)
{
current->active_mm = efi_prev_mm;
switch_mm(&efi_mm, efi_prev_mm, NULL);
}
void arch_efi_call_virt_setup(void)
{
efi_sync_low_kernel_mappings();
efi_fpu_begin();
firmware_restrict_branch_speculation_start();
efi_enter_mm();
}
void arch_efi_call_virt_teardown(void)
{
efi_leave_mm();
firmware_restrict_branch_speculation_end();
efi_fpu_end();
}
static DEFINE_SPINLOCK(efi_runtime_lock);
/*

View File

@ -19,6 +19,10 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS -D__NO_FORTIFY
# optimization flags.
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
# When LTO is enabled, llvm emits many text sections, which is not supported
# by kexec. Remove -flto=* flags.
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_LTO),$(KBUILD_CFLAGS))
# When linking purgatory.ro with -r unresolved symbols are not checked,
# also link a purgatory.chk binary without -r to check for unresolved symbols.
PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib

View File

@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
struct blk_mq_tags **new_tags;
int i;
if (set->nr_hw_queues >= new_nr_hw_queues) {
for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++)
__blk_mq_free_map_and_rqs(set, i);
if (set->nr_hw_queues >= new_nr_hw_queues)
goto done;
}
new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *),
GFP_KERNEL, set->numa_node);
@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
{
struct request_queue *q;
LIST_HEAD(head);
int prev_nr_hw_queues;
int prev_nr_hw_queues = set->nr_hw_queues;
int i;
lockdep_assert_held(&set->tag_list_lock);
@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
blk_mq_sysfs_unregister_hctxs(q);
}
prev_nr_hw_queues = set->nr_hw_queues;
if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0)
goto reregister;
@ -4781,6 +4778,10 @@ switch_back:
list_for_each_entry(q, &set->tag_list, tag_set_list)
blk_mq_unfreeze_queue(q);
/* Free the excess tags when nr_hw_queues shrink. */
for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
__blk_mq_free_map_and_rqs(set, i);
}
void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)

View File

@ -46,7 +46,7 @@ obj-$(CONFIG_DMADEVICES) += dma/
# SOC specific infrastructure drivers.
obj-y += soc/
obj-$(CONFIG_PM_GENERIC_DOMAINS) += genpd/
obj-$(CONFIG_PM_GENERIC_DOMAINS) += pmdomain/
obj-y += virtio/
obj-$(CONFIG_VDPA) += vdpa/

View File

@ -492,7 +492,7 @@ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
}
static int thermal_get_trend(struct thermal_zone_device *thermal,
struct thermal_trip *trip,
const struct thermal_trip *trip,
enum thermal_trend *trend)
{
struct acpi_thermal *tz = thermal_zone_device_priv(thermal);

View File

@ -1883,6 +1883,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
dev_info(&pdev->dev, "SSS flag set, parallel bus scan disabled\n");
if (!(hpriv->cap & HOST_CAP_PART))
host->flags |= ATA_HOST_NO_PART;
if (!(hpriv->cap & HOST_CAP_SSC))
host->flags |= ATA_HOST_NO_SSC;
if (!(hpriv->cap2 & HOST_CAP2_SDS))
host->flags |= ATA_HOST_NO_DEVSLP;
if (pi.flags & ATA_FLAG_EM)
ahci_reset_em(host);

View File

@ -1256,6 +1256,26 @@ static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
return sprintf(buf, "%d\n", emp->blink_policy);
}
static void ahci_port_clear_pending_irq(struct ata_port *ap)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp;
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
dev_dbg(ap->host->dev, "PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(ap->host->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << ap->port_no, hpriv->mmio + HOST_IRQ_STAT);
}
static void ahci_port_init(struct device *dev, struct ata_port *ap,
int port_no, void __iomem *mmio,
void __iomem *port_mmio)
@ -1270,18 +1290,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
if (rc)
dev_warn(dev, "%s (%d)\n", emsg, rc);
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << port_no, mmio + HOST_IRQ_STAT);
ahci_port_clear_pending_irq(ap);
/* mark esata ports */
tmp = readl(port_mmio + PORT_CMD);
@ -1603,6 +1612,8 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
tf.status = ATA_BUSY;
ata_tf_to_fis(&tf, 0, 0, d2h_fis);
ahci_port_clear_pending_irq(ap);
rc = sata_link_hardreset(link, timing, deadline, online,
ahci_check_ready);

View File

@ -4783,11 +4783,8 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* been aborted by the device due to a limit timeout using the policy
* 0xD. For these commands, invoke EH to get the command sense data.
*/
if (qc->result_tf.status & ATA_SENSE &&
((ata_is_ncq(qc->tf.protocol) &&
dev->flags & ATA_DFLAG_CDL_ENABLED) ||
(!ata_is_ncq(qc->tf.protocol) &&
ata_id_sense_reporting_enabled(dev->id)))) {
if (qc->flags & ATA_QCFLAG_HAS_CDL &&
qc->result_tf.status & ATA_SENSE) {
/*
* Tell SCSI EH to not overwrite scmd->result even if this
* command is finished with result SAM_STAT_GOOD.

View File

@ -2796,23 +2796,13 @@ int ata_eh_reset(struct ata_link *link, int classify,
}
}
/*
* Some controllers can't be frozen very well and may set spurious
* error conditions during reset. Clear accumulated error
* information and re-thaw the port if frozen. As reset is the
* final recovery action and we cross check link onlineness against
* device classification later, no hotplug event is lost by this.
*/
/* clear cached SError */
spin_lock_irqsave(link->ap->lock, flags);
memset(&link->eh_info, 0, sizeof(link->eh_info));
link->eh_info.serror = 0;
if (slave)
memset(&slave->eh_info, 0, sizeof(link->eh_info));
ap->pflags &= ~ATA_PFLAG_EH_PENDING;
slave->eh_info.serror = 0;
spin_unlock_irqrestore(link->ap->lock, flags);
if (ata_port_is_frozen(ap))
ata_eh_thaw_port(ap);
/*
* Make sure onlineness and classification result correspond.
* Hotplug could have happened during reset and some

View File

@ -396,10 +396,23 @@ int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
case ATA_LPM_MED_POWER_WITH_DIPM:
case ATA_LPM_MIN_POWER_WITH_PARTIAL:
case ATA_LPM_MIN_POWER:
if (ata_link_nr_enabled(link) > 0)
/* no restrictions on LPM transitions */
if (ata_link_nr_enabled(link) > 0) {
/* assume no restrictions on LPM transitions */
scontrol &= ~(0x7 << 8);
else {
/*
* If the controller does not support partial, slumber,
* or devsleep, then disallow these transitions.
*/
if (link->ap->host->flags & ATA_HOST_NO_PART)
scontrol |= (0x1 << 8);
if (link->ap->host->flags & ATA_HOST_NO_SSC)
scontrol |= (0x2 << 8);
if (link->ap->host->flags & ATA_HOST_NO_DEVSLP)
scontrol |= (0x4 << 8);
} else {
/* empty port, power off */
scontrol &= ~0xf;
scontrol |= (0x1 << 2);

View File

@ -37,7 +37,7 @@ static int comm_read_regr(struct pi_adapter *pi, int cont, int regr)
{
int l, h, r;
r = regr + cont_map[cont];
r = regr + cont_map[cont];
switch (pi->mode) {
case 0:
@ -90,7 +90,6 @@ static void comm_connect(struct pi_adapter *pi)
}
static void comm_disconnect(struct pi_adapter *pi)
{
w2(0); w2(0); w2(0); w2(4);
w0(pi->saved_r0);
@ -172,12 +171,12 @@ static void comm_write_block(struct pi_adapter *pi, char *buf, int count)
w4l(swab16(((u16 *)buf)[2 * k]) |
swab16(((u16 *)buf)[2 * k + 1]) << 16);
break;
}
}
}
static void comm_log_adapter(struct pi_adapter *pi)
{ char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
{
char *mode_string[5] = { "4-bit", "8-bit", "EPP-8", "EPP-16", "EPP-32" };
dev_info(&pi->dev,
"DataStor Commuter at 0x%x, mode %d (%s), delay %d\n",

View File

@ -1255,8 +1255,8 @@ static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
for (b = 0; b < bytes; ) {
for (w = 0, o = 0; b < bytes && w < 4; w++) {
o += snprintf(linebuf + o, sizeof(linebuf) - o,
"%08x ", readl(start + b));
o += scnprintf(linebuf + o, sizeof(linebuf) - o,
"%08x ", readl(start + b));
b += sizeof(u32);
}
dev_dbg(dev, "%s: %p: %s\n",

View File

@ -3537,6 +3537,8 @@ int device_add(struct device *dev)
/* subsystems can specify simple device enumeration */
else if (dev->bus && dev->bus->dev_name)
error = dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
else
error = -EINVAL;
if (error)
goto name_error;

View File

@ -394,8 +394,6 @@ find_quicksilver(struct device *dev, void *data)
static int __init
parisc_agp_init(void)
{
extern struct sba_device *sba_list;
int err = -1;
struct parisc_device *sba = NULL, *lba = NULL;
struct lba_device *lbadev = NULL;

View File

@ -33,7 +33,7 @@ const struct class tpm_class = {
.shutdown_pre = tpm_class_shutdown,
};
const struct class tpmrm_class = {
.name = "tmprm",
.name = "tpmrm",
};
dev_t tpm_devt;

View File

@ -67,7 +67,6 @@ config COMEDI_TEST
config COMEDI_PARPORT
tristate "Parallel port support"
depends on HAS_IOPORT
help
Enable support for the standard parallel port.
A cheap and easy way to get a few more digital I/O lines. Steal
@ -80,7 +79,6 @@ config COMEDI_PARPORT
config COMEDI_SSV_DNP
tristate "SSV Embedded Systems DIL/Net-PC support"
depends on X86_32 || COMPILE_TEST
depends on HAS_IOPORT
help
Enable support for SSV Embedded Systems DIL/Net-PC
@ -91,7 +89,6 @@ endif # COMEDI_MISC_DRIVERS
menuconfig COMEDI_ISA_DRIVERS
bool "Comedi ISA and PC/104 drivers"
depends on ISA
help
Enable comedi ISA and PC/104 drivers to be built
@ -103,8 +100,7 @@ if COMEDI_ISA_DRIVERS
config COMEDI_PCL711
tristate "Advantech PCL-711/711b and ADlink ACL-8112 ISA card support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for Advantech PCL-711 and 711b, ADlink ACL-8112
@ -165,9 +161,8 @@ config COMEDI_PCL730
config COMEDI_PCL812
tristate "Advantech PCL-812/813 and ADlink ACL-8112/8113/8113/8216"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for Advantech PCL-812/PG, PCL-813/B, ADLink
ACL-8112DG/HG/PG, ACL-8113, ACL-8216, ICP DAS A-821PGH/PGL/PGL-NDA,
@ -178,9 +173,8 @@ config COMEDI_PCL812
config COMEDI_PCL816
tristate "Advantech PCL-814 and PCL-816 ISA card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for Advantech PCL-814 and PCL-816 ISA cards
@ -189,9 +183,8 @@ config COMEDI_PCL816
config COMEDI_PCL818
tristate "Advantech PCL-718 and PCL-818 ISA card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for Advantech PCL-818 ISA cards
PCL-818L, PCL-818H, PCL-818HD, PCL-818HG, PCL-818 and PCL-718
@ -210,7 +203,7 @@ config COMEDI_PCM3724
config COMEDI_AMPLC_DIO200_ISA
tristate "Amplicon PC212E/PC214E/PC215E/PC218E/PC272E"
depends on COMEDI_AMPLC_DIO200
select COMEDI_AMPLC_DIO200
help
Enable support for Amplicon PC212E, PC214E, PC215E, PC218E and
PC272E ISA DIO boards
@ -262,8 +255,7 @@ config COMEDI_DAC02
config COMEDI_DAS16M1
tristate "MeasurementComputing CIO-DAS16/M1DAS-16 ISA card support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
select COMEDI_8255
help
Enable support for Measurement Computing CIO-DAS16/M1 ISA cards.
@ -273,7 +265,7 @@ config COMEDI_DAS16M1
config COMEDI_DAS08_ISA
tristate "DAS-08 compatible ISA and PC/104 card support"
depends on COMEDI_DAS08
select COMEDI_DAS08
help
Enable support for Keithley Metrabyte/ComputerBoards DAS08
and compatible ISA and PC/104 cards:
@ -286,9 +278,8 @@ config COMEDI_DAS08_ISA
config COMEDI_DAS16
tristate "DAS-16 compatible ISA and PC/104 card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254
select COMEDI_8254
select COMEDI_8255
help
Enable support for Keithley Metrabyte/ComputerBoards DAS16
@ -305,8 +296,7 @@ config COMEDI_DAS16
config COMEDI_DAS800
tristate "DAS800 and compatible ISA card support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for Keithley Metrabyte DAS800 and compatible ISA cards
Keithley Metrabyte DAS-800, DAS-801, DAS-802
@ -318,9 +308,8 @@ config COMEDI_DAS800
config COMEDI_DAS1800
tristate "DAS1800 and compatible ISA card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for DAS1800 and compatible ISA cards
Keithley Metrabyte DAS-1701ST, DAS-1701ST-DA, DAS-1701/AO,
@ -334,8 +323,7 @@ config COMEDI_DAS1800
config COMEDI_DAS6402
tristate "DAS6402 and compatible ISA card support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for DAS6402 and compatible ISA cards
Computerboards, Keithley Metrabyte DAS6402 and compatibles
@ -414,8 +402,7 @@ config COMEDI_FL512
config COMEDI_AIO_AIO12_8
tristate "I/O Products PC/104 AIO12-8 Analog I/O Board support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
select COMEDI_8255
help
Enable support for I/O Products PC/104 AIO12-8 Analog I/O Board
@ -469,9 +456,8 @@ config COMEDI_ADQ12B
config COMEDI_NI_AT_A2150
tristate "NI AT-A2150 ISA card support"
depends on HAS_IOPORT
select COMEDI_ISADMA if ISA_DMA_API
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for National Instruments AT-A2150 cards
@ -480,8 +466,7 @@ config COMEDI_NI_AT_A2150
config COMEDI_NI_AT_AO
tristate "NI AT-AO-6/10 EISA card support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for National Instruments AT-AO-6/10 cards
@ -512,7 +497,7 @@ config COMEDI_NI_ATMIO16D
config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support"
depends on COMEDI_NI_LABPC
select COMEDI_NI_LABPC
help
Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
@ -576,7 +561,7 @@ endif # COMEDI_ISA_DRIVERS
menuconfig COMEDI_PCI_DRIVERS
tristate "Comedi PCI drivers"
depends on PCI && HAS_IOPORT
depends on PCI
help
Enable support for comedi PCI drivers.
@ -725,8 +710,7 @@ config COMEDI_ADL_PCI8164
config COMEDI_ADL_PCI9111
tristate "ADLink PCI-9111HR support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for ADlink PCI9111 cards
@ -736,7 +720,7 @@ config COMEDI_ADL_PCI9111
config COMEDI_ADL_PCI9118
tristate "ADLink PCI-9118DG, PCI-9118HG, PCI-9118HR support"
depends on HAS_DMA
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for ADlink PCI-9118DG, PCI-9118HG, PCI-9118HR cards
@ -745,8 +729,7 @@ config COMEDI_ADL_PCI9118
config COMEDI_ADV_PCI1710
tristate "Advantech PCI-171x and PCI-1731 support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for Advantech PCI-1710, PCI-1710HG, PCI-1711,
PCI-1713 and PCI-1731
@ -790,8 +773,7 @@ config COMEDI_ADV_PCI1760
config COMEDI_ADV_PCI_DIO
tristate "Advantech PCI DIO card support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
select COMEDI_8255
help
Enable support for Advantech PCI DIO cards
@ -804,7 +786,7 @@ config COMEDI_ADV_PCI_DIO
config COMEDI_AMPLC_DIO200_PCI
tristate "Amplicon PCI215/PCI272/PCIe215/PCIe236/PCIe296 DIO support"
depends on COMEDI_AMPLC_DIO200
select COMEDI_AMPLC_DIO200
help
Enable support for Amplicon PCI215, PCI272, PCIe215, PCIe236
and PCIe296 DIO boards.
@ -832,8 +814,7 @@ config COMEDI_AMPLC_PC263_PCI
config COMEDI_AMPLC_PCI224
tristate "Amplicon PCI224 and PCI234 support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for Amplicon PCI224 and PCI234 AO boards
@ -842,8 +823,7 @@ config COMEDI_AMPLC_PCI224
config COMEDI_AMPLC_PCI230
tristate "Amplicon PCI230 and PCI260 support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
select COMEDI_8255
help
Enable support for Amplicon PCI230 and PCI260 Multifunction I/O
@ -862,7 +842,7 @@ config COMEDI_CONTEC_PCI_DIO
config COMEDI_DAS08_PCI
tristate "DAS-08 PCI support"
depends on COMEDI_DAS08
select COMEDI_DAS08
help
Enable support for PCI DAS-08 cards.
@ -949,8 +929,7 @@ config COMEDI_CB_PCIDAS64
config COMEDI_CB_PCIDAS
tristate "MeasurementComputing PCI-DAS support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCI-DAS with
@ -974,8 +953,7 @@ config COMEDI_CB_PCIDDA
config COMEDI_CB_PCIMDAS
tristate "MeasurementComputing PCIM-DAS1602/16, PCIe-DAS1602/16 support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
select COMEDI_8255
help
Enable support for ComputerBoards/MeasurementComputing PCI Migration
@ -995,8 +973,7 @@ config COMEDI_CB_PCIMDDA
config COMEDI_ME4000
tristate "Meilhaus ME-4000 support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for Meilhaus PCI data acquisition cards
ME-4650, ME-4670i, ME-4680, ME-4680i and ME-4680is
@ -1054,7 +1031,7 @@ config COMEDI_NI_670X
config COMEDI_NI_LABPC_PCI
tristate "NI Lab-PC PCI-1200 support"
depends on COMEDI_NI_LABPC
select COMEDI_NI_LABPC
help
Enable support for National Instruments Lab-PC PCI-1200.
@ -1076,7 +1053,6 @@ config COMEDI_NI_PCIDIO
config COMEDI_NI_PCIMIO
tristate "NI PCI-MIO-E series and M series support"
depends on HAS_DMA
depends on HAS_IOPORT
select COMEDI_NI_TIOCMD
select COMEDI_8255
help
@ -1098,8 +1074,7 @@ config COMEDI_NI_PCIMIO
config COMEDI_RTD520
tristate "Real Time Devices PCI4520/DM7520 support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for Real Time Devices PCI4520/DM7520
@ -1139,8 +1114,7 @@ if COMEDI_PCMCIA_DRIVERS
config COMEDI_CB_DAS16_CS
tristate "CB DAS16 series PCMCIA support"
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
help
Enable support for the ComputerBoards/MeasurementComputing PCMCIA
cards DAS16/16, PCM-DAS16D/12 and PCM-DAS16s/16
@ -1150,7 +1124,7 @@ config COMEDI_CB_DAS16_CS
config COMEDI_DAS08_CS
tristate "CB DAS08 PCMCIA support"
depends on COMEDI_DAS08
select COMEDI_DAS08
help
Enable support for the ComputerBoards/MeasurementComputing DAS-08
PCMCIA card
@ -1160,7 +1134,6 @@ config COMEDI_DAS08_CS
config COMEDI_NI_DAQ_700_CS
tristate "NI DAQCard-700 PCMCIA support"
depends on HAS_IOPORT
help
Enable support for the National Instruments PCMCIA DAQCard-700 DIO
@ -1169,7 +1142,6 @@ config COMEDI_NI_DAQ_700_CS
config COMEDI_NI_DAQ_DIO24_CS
tristate "NI DAQ-Card DIO-24 PCMCIA support"
depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for the National Instruments PCMCIA DAQ-Card DIO-24
@ -1179,7 +1151,7 @@ config COMEDI_NI_DAQ_DIO24_CS
config COMEDI_NI_LABPC_CS
tristate "NI DAQCard-1200 PCMCIA support"
depends on COMEDI_NI_LABPC
select COMEDI_NI_LABPC
help
Enable support for the National Instruments PCMCIA DAQCard-1200
@ -1188,7 +1160,6 @@ config COMEDI_NI_LABPC_CS
config COMEDI_NI_MIO_CS
tristate "NI DAQCard E series PCMCIA support"
depends on HAS_IOPORT
select COMEDI_NI_TIO
select COMEDI_8255
help
@ -1201,7 +1172,6 @@ config COMEDI_NI_MIO_CS
config COMEDI_QUATECH_DAQP_CS
tristate "Quatech DAQP PCMCIA data capture card support"
depends on HAS_IOPORT
help
Enable support for the Quatech DAQP PCMCIA data capture cards
DAQP-208 and DAQP-308
@ -1278,14 +1248,12 @@ endif # COMEDI_USB_DRIVERS
config COMEDI_8254
tristate
depends on HAS_IOPORT
config COMEDI_8255
tristate
config COMEDI_8255_SA
tristate "Standalone 8255 support"
depends on HAS_IOPORT
select COMEDI_8255
help
Enable support for 8255 digital I/O as a standalone driver.
@ -1317,7 +1285,7 @@ config COMEDI_KCOMEDILIB
called kcomedilib.
config COMEDI_AMPLC_DIO200
depends on COMEDI_8254
select COMEDI_8254
tristate
config COMEDI_AMPLC_PC236
@ -1326,7 +1294,7 @@ config COMEDI_AMPLC_PC236
config COMEDI_DAS08
tristate
depends on COMEDI_8254
select COMEDI_8254
select COMEDI_8255
config COMEDI_ISADMA
@ -1334,8 +1302,7 @@ config COMEDI_ISADMA
config COMEDI_NI_LABPC
tristate
depends on HAS_IOPORT
depends on COMEDI_8254
select COMEDI_8254
select COMEDI_8255
config COMEDI_NI_LABPC_ISADMA

View File

@ -1211,7 +1211,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* without actually having a link.
*/
create:
device = kzalloc(sizeof(*device), GFP_KERNEL);
device = kzalloc(sizeof(*device), GFP_ATOMIC);
if (device == NULL)
break;

View File

@ -101,7 +101,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
{
struct fw_node *node;
node = kzalloc(struct_size(node, ports, port_count), GFP_KERNEL);
node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
if (node == NULL)
return NULL;

View File

@ -62,7 +62,7 @@ efi_status_t allocate_unaccepted_bitmap(__u32 nr_desc,
bitmap_size = DIV_ROUND_UP(unaccepted_end - unaccepted_start,
EFI_UNACCEPTED_UNIT_SIZE * BITS_PER_BYTE);
status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY,
sizeof(*unaccepted_table) + bitmap_size,
(void **)&unaccepted_table);
if (status != EFI_SUCCESS) {

View File

@ -1293,7 +1293,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);

View File

@ -478,7 +478,7 @@ void amdgpu_amdkfd_get_cu_info(struct amdgpu_device *adev, struct kfd_cu_info *c
cu_info->cu_active_number = acu_info.number;
cu_info->cu_ao_mask = acu_info.ao_cu_mask;
memcpy(&cu_info->cu_bitmap[0], &acu_info.bitmap[0],
sizeof(acu_info.bitmap));
sizeof(cu_info->cu_bitmap));
cu_info->num_shader_engines = adev->gfx.config.max_shader_engines;
cu_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se;
cu_info->num_cu_per_sh = adev->gfx.config.max_cu_per_sh;

View File

@ -980,8 +980,7 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data,
uint32_t inst)
uint32_t *reg_data)
{
*reg_data = wait_times;

View File

@ -55,5 +55,4 @@ void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data,
uint32_t inst);
uint32_t *reg_data);

View File

@ -1103,8 +1103,7 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data,
uint32_t inst)
uint32_t *reg_data)
{
*reg_data = wait_times;
@ -1120,8 +1119,7 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
SCH_WAVE,
grace_period);
*reg_offset = SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
mmCP_IQ_WAIT_TIME2);
*reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2);
}
void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev,

View File

@ -100,5 +100,4 @@ void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev,
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data,
uint32_t inst);
uint32_t *reg_data);

View File

@ -1244,32 +1244,6 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
}
/*
* On APUs with >= 64GB white flickering has been observed w/ SG enabled.
* Disable S/G on such systems until we have a proper fix.
* https://gitlab.freedesktop.org/drm/amd/-/issues/2354
* https://gitlab.freedesktop.org/drm/amd/-/issues/2735
*/
bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
{
switch (amdgpu_sg_display) {
case -1:
break;
case 0:
return false;
case 1:
return true;
default:
return false;
}
if ((totalram_pages() << (PAGE_SHIFT - 10)) +
(adev->gmc.real_vram_size / 1024) >= 64000000) {
DRM_WARN("Disabling S/G due to >=64GB RAM\n");
return false;
}
return true;
}
/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host

View File

@ -43,6 +43,7 @@
#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
#define AMDGPU_MAX_GC_INSTANCES 8
#define KGD_MAX_QUEUES 128
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
@ -257,7 +258,7 @@ struct amdgpu_cu_info {
uint32_t number;
uint32_t ao_cu_mask;
uint32_t ao_cu_bitmap[4][4];
uint32_t bitmap[4][4];
uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
};
struct amdgpu_gfx_ras {

View File

@ -839,7 +839,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
memcpy(&dev_info->cu_ao_bitmap[0], &adev->gfx.cu_info.ao_cu_bitmap[0],
sizeof(adev->gfx.cu_info.ao_cu_bitmap));
memcpy(&dev_info->cu_bitmap[0], &adev->gfx.cu_info.bitmap[0],
sizeof(adev->gfx.cu_info.bitmap));
sizeof(dev_info->cu_bitmap));
dev_info->vram_type = adev->gmc.vram_type;
dev_info->vram_bit_width = adev->gmc.vram_width;
dev_info->vce_harvest_config = adev->vce.harvest_config;
@ -940,12 +940,17 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
struct atom_context *atom_context;
atom_context = adev->mode_info.atom_context;
memcpy(vbios_info.name, atom_context->name, sizeof(atom_context->name));
memcpy(vbios_info.vbios_pn, atom_context->vbios_pn, sizeof(atom_context->vbios_pn));
vbios_info.version = atom_context->version;
memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
sizeof(atom_context->vbios_ver_str));
memcpy(vbios_info.date, atom_context->date, sizeof(atom_context->date));
if (atom_context) {
memcpy(vbios_info.name, atom_context->name,
sizeof(atom_context->name));
memcpy(vbios_info.vbios_pn, atom_context->vbios_pn,
sizeof(atom_context->vbios_pn));
vbios_info.version = atom_context->version;
memcpy(vbios_info.vbios_ver_str, atom_context->vbios_ver_str,
sizeof(atom_context->vbios_ver_str));
memcpy(vbios_info.date, atom_context->date,
sizeof(atom_context->date));
}
return copy_to_user(out, &vbios_info,
min((size_t)size, sizeof(vbios_info))) ? -EFAULT : 0;

View File

@ -1052,7 +1052,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
info->ce_count = obj->err_data.ce_count;
if (err_data.ce_count) {
if (adev->smuio.funcs &&
if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d "
@ -1072,7 +1073,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
}
}
if (err_data.ue_count) {
if (adev->smuio.funcs &&
if (!adev->aid_mask &&
adev->smuio.funcs &&
adev->smuio.funcs->get_socket_id &&
adev->smuio.funcs->get_die_id) {
dev_info(adev->dev, "socket: %d, die: %d "

View File

@ -81,7 +81,7 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
unsigned int size)
{
struct drm_suballoc *sa = drm_suballoc_new(&sa_manager->base, size,
GFP_KERNEL, true, 0);
GFP_KERNEL, false, 0);
if (IS_ERR(sa)) {
*sa_bo = NULL;

View File

@ -9449,7 +9449,7 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
gfx_v10_0_set_user_wgp_inactive_bitmap_per_sh(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v10_0_get_cu_active_bitmap_per_sh(adev);
cu_info->bitmap[i][j] = bitmap;
cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {

View File

@ -6368,7 +6368,7 @@ static int gfx_v11_0_get_cu_info(struct amdgpu_device *adev,
* SE6: {SH0,SH1} --> {bitmap[2][2], bitmap[2][3]}
* SE7: {SH0,SH1} --> {bitmap[3][2], bitmap[3][3]}
*/
cu_info->bitmap[i % 4][j + (i / 4) * 2] = bitmap;
cu_info->bitmap[0][i % 4][j + (i / 4) * 2] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask)

View File

@ -3577,7 +3577,7 @@ static void gfx_v6_0_get_cu_info(struct amdgpu_device *adev)
gfx_v6_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v6_0_get_cu_enabled(adev);
cu_info->bitmap[i][j] = bitmap;
cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {

View File

@ -5119,7 +5119,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
gfx_v7_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {

View File

@ -7121,7 +7121,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
gfx_v8_0_set_user_cu_inactive_bitmap(
adev, disable_masks[i * 2 + j]);
bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
cu_info->bitmap[i][j] = bitmap;
cu_info->bitmap[0][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) {

View File

@ -1499,7 +1499,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
amdgpu_gfx_select_se_sh(adev, i, j, 0xffffffff, 0);
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (cu_info->bitmap[i][j] & mask) {
if (cu_info->bitmap[0][i][j] & mask) {
if (counter == pg_always_on_cu_num)
WREG32_SOC15(GC, 0, mmRLC_PG_ALWAYS_ON_CU_MASK, cu_bitmap);
if (counter < always_on_cu_num)
@ -7233,7 +7233,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev,
* SE6,SH0 --> bitmap[2][1]
* SE7,SH0 --> bitmap[3][1]
*/
cu_info->bitmap[i % 4][j + i / 4] = bitmap;
cu_info->bitmap[0][i % 4][j + i / 4] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {
if (bitmap & mask) {

View File

@ -4259,7 +4259,7 @@ static void gfx_v9_4_3_set_gds_init(struct amdgpu_device *adev)
}
static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
u32 bitmap)
u32 bitmap, int xcc_id)
{
u32 data;
@ -4269,15 +4269,15 @@ static void gfx_v9_4_3_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
WREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG, data);
WREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG, data);
}
static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev, int xcc_id)
{
u32 data, mask;
data = RREG32_SOC15(GC, GET_INST(GC, 0), regCC_GC_SHADER_ARRAY_CONFIG);
data |= RREG32_SOC15(GC, GET_INST(GC, 0), regGC_USER_SHADER_ARRAY_CONFIG);
data = RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCC_GC_SHADER_ARRAY_CONFIG);
data |= RREG32_SOC15(GC, GET_INST(GC, xcc_id), regGC_USER_SHADER_ARRAY_CONFIG);
data &= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
data >>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
@ -4290,7 +4290,7 @@ static u32 gfx_v9_4_3_get_cu_active_bitmap(struct amdgpu_device *adev)
static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
struct amdgpu_cu_info *cu_info)
{
int i, j, k, counter, active_cu_number = 0;
int i, j, k, counter, xcc_id, active_cu_number = 0;
u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
unsigned disable_masks[4 * 4];
@ -4309,46 +4309,38 @@ static int gfx_v9_4_3_get_cu_info(struct amdgpu_device *adev,
adev->gfx.config.max_sh_per_se);
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, 0);
gfx_v9_4_3_set_user_cu_inactive_bitmap(
adev, disable_masks[i * adev->gfx.config.max_sh_per_se + j]);
bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev);
for (xcc_id = 0; xcc_id < NUM_XCC(adev->gfx.xcc_mask); xcc_id++) {
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
mask = 1;
ao_bitmap = 0;
counter = 0;
gfx_v9_4_3_xcc_select_se_sh(adev, i, j, 0xffffffff, xcc_id);
gfx_v9_4_3_set_user_cu_inactive_bitmap(
adev,
disable_masks[i * adev->gfx.config.max_sh_per_se + j],
xcc_id);
bitmap = gfx_v9_4_3_get_cu_active_bitmap(adev, xcc_id);
/*
* The bitmap(and ao_cu_bitmap) in cu_info structure is
* 4x4 size array, and it's usually suitable for Vega
* ASICs which has 4*2 SE/SH layout.
* But for Arcturus, SE/SH layout is changed to 8*1.
* To mostly reduce the impact, we make it compatible
* with current bitmap array as below:
* SE4,SH0 --> bitmap[0][1]
* SE5,SH0 --> bitmap[1][1]
* SE6,SH0 --> bitmap[2][1]
* SE7,SH0 --> bitmap[3][1]
*/
cu_info->bitmap[i % 4][j + i / 4] = bitmap;
cu_info->bitmap[xcc_id][i][j] = bitmap;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {
if (counter < adev->gfx.config.max_cu_per_sh)
ao_bitmap |= mask;
counter++;
for (k = 0; k < adev->gfx.config.max_cu_per_sh; k++) {
if (bitmap & mask) {
if (counter < adev->gfx.config.max_cu_per_sh)
ao_bitmap |= mask;
counter++;
}
mask <<= 1;
}
mask <<= 1;
active_cu_number += counter;
if (i < 2 && j < 2)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i][j] = ao_bitmap;
}
active_cu_number += counter;
if (i < 2 && j < 2)
ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
cu_info->ao_cu_bitmap[i % 4][j + i / 4] = ao_bitmap;
}
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
xcc_id);
}
gfx_v9_4_3_xcc_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff,
0);
mutex_unlock(&adev->grbm_idx_mutex);
cu_info->number = active_cu_number;

View File

@ -345,6 +345,9 @@ static void nbio_v4_3_init_registers(struct amdgpu_device *adev)
data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK;
WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data);
}
if (amdgpu_sriov_vf(adev))
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
}
static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev)

View File

@ -766,7 +766,7 @@ static int soc21_common_hw_init(void *handle)
* for the purpose of expose those registers
* to process space
*/
if (adev->nbio.funcs->remap_hdp_registers)
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
adev->nbio.funcs->remap_hdp_registers(adev);
/* enable the doorbell aperture */
adev->nbio.funcs->enable_doorbell_aperture(adev, true);

View File

@ -2087,7 +2087,8 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image,
amdgpu_amdkfd_get_cu_info(kdev->adev, &cu_info);
cu->num_simd_per_cu = cu_info.simd_per_cu;
cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
cu->num_simd_cores = cu_info.simd_per_cu *
(cu_info.cu_active_number / kdev->kfd->num_nodes);
cu->max_waves_simd = cu_info.max_waves_per_simd;
cu->wave_front_size = cu_info.wave_front_size;

View File

@ -79,6 +79,10 @@ struct crat_header {
#define CRAT_SUBTYPE_IOLINK_AFFINITY 5
#define CRAT_SUBTYPE_MAX 6
/*
* Do not change the value of CRAT_SIBLINGMAP_SIZE from 32
* as it breaks the ABI.
*/
#define CRAT_SIBLINGMAP_SIZE 32
/*

View File

@ -1677,8 +1677,7 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm->dev->kfd2kgd->build_grace_period_packet_info(
dqm->dev->adev, dqm->wait_times,
grace_period, &reg_offset,
&dqm->wait_times,
ffs(dqm->dev->xcc_mask) - 1);
&dqm->wait_times);
}
dqm_unlock(dqm);

View File

@ -162,6 +162,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
return NULL;
*doorbell_off = amdgpu_doorbell_index_on_bar(kfd->adev, kfd->doorbells, inx);
inx *= 2;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
@ -176,6 +177,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
unsigned int inx;
inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
inx /= 2;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_bitmap);

View File

@ -97,18 +97,22 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count,
uint32_t *se_mask)
uint32_t *se_mask, uint32_t inst)
{
struct kfd_cu_info cu_info;
uint32_t cu_per_sh[KFD_MAX_NUM_SE][KFD_MAX_NUM_SH_PER_SE] = {0};
bool wgp_mode_req = KFD_GC_VERSION(mm->dev) >= IP_VERSION(10, 0, 0);
uint32_t en_mask = wgp_mode_req ? 0x3 : 0x1;
int i, se, sh, cu, cu_bitmap_sh_mul, inc = wgp_mode_req ? 2 : 1;
int i, se, sh, cu, cu_bitmap_sh_mul, cu_inc = wgp_mode_req ? 2 : 1;
uint32_t cu_active_per_node;
int inc = cu_inc * NUM_XCC(mm->dev->xcc_mask);
int xcc_inst = inst + ffs(mm->dev->xcc_mask) - 1;
amdgpu_amdkfd_get_cu_info(mm->dev->adev, &cu_info);
if (cu_mask_count > cu_info.cu_active_number)
cu_mask_count = cu_info.cu_active_number;
cu_active_per_node = cu_info.cu_active_number / mm->dev->kfd->num_nodes;
if (cu_mask_count > cu_active_per_node)
cu_mask_count = cu_active_per_node;
/* Exceeding these bounds corrupts the stack and indicates a coding error.
* Returning with no CU's enabled will hang the queue, which should be
@ -141,7 +145,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
for (se = 0; se < cu_info.num_shader_engines; se++)
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
cu_per_sh[se][sh] = hweight32(
cu_info.cu_bitmap[se % 4][sh + (se / 4) * cu_bitmap_sh_mul]);
cu_info.cu_bitmap[xcc_inst][se % 4][sh + (se / 4) *
cu_bitmap_sh_mul]);
/* Symmetrically map cu_mask to all SEs & SHs:
* se_mask programs up to 2 SH in the upper and lower 16 bits.
@ -164,20 +169,33 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
* cu_mask[0] bit8 -> se_mask[0] bit1 (SE0,SH0,CU1)
* ...
*
* For GFX 9.4.3, the following code only looks at a
* subset of the cu_mask corresponding to the inst parameter.
* If we have n XCCs under one GPU node
* cu_mask[0] bit0 -> XCC0 se_mask[0] bit0 (XCC0,SE0,SH0,CU0)
* cu_mask[0] bit1 -> XCC1 se_mask[0] bit0 (XCC1,SE0,SH0,CU0)
* ..
* cu_mask[0] bitn -> XCCn se_mask[0] bit0 (XCCn,SE0,SH0,CU0)
* cu_mask[0] bit n+1 -> XCC0 se_mask[1] bit0 (XCC0,SE1,SH0,CU0)
*
* For example, if there are 6 XCCs under 1 KFD node, this code
* running for each inst, will look at the bits as:
* inst, inst + 6, inst + 12...
*
* First ensure all CUs are disabled, then enable user specified CUs.
*/
for (i = 0; i < cu_info.num_shader_engines; i++)
se_mask[i] = 0;
i = 0;
for (cu = 0; cu < 16; cu += inc) {
i = inst;
for (cu = 0; cu < 16; cu += cu_inc) {
for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++) {
for (se = 0; se < cu_info.num_shader_engines; se++) {
if (cu_per_sh[se][sh] > cu) {
if (cu_mask[i / 32] & (en_mask << (i % 32)))
se_mask[se] |= en_mask << (cu + sh * 16);
i += inc;
if (i == cu_mask_count)
if (i >= cu_mask_count)
return;
}
}

View File

@ -138,7 +138,7 @@ void free_mqd_hiq_sdma(struct mqd_manager *mm, void *mqd,
void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
const uint32_t *cu_mask, uint32_t cu_mask_count,
uint32_t *se_mask);
uint32_t *se_mask, uint32_t inst);
int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
uint32_t pipe_id, uint32_t queue_id,

View File

@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return;
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];

View File

@ -52,7 +52,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return;
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];

View File

@ -71,7 +71,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
}
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
@ -321,6 +321,43 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
return 0;
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
{
struct v11_compute_mqd *m;
m = get_mqd(mqd);
memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd));
}
static void restore_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *qp,
const void *mqd_src,
const void *ctl_stack_src, const u32 ctl_stack_size)
{
uint64_t addr;
struct v11_compute_mqd *m;
m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
addr = mqd_mem_obj->gpu_addr;
memcpy(m, mqd_src, sizeof(*m));
*mqd = m;
if (gart_addr)
*gart_addr = addr;
m->cp_hqd_pq_doorbell_control =
qp->doorbell_off <<
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
m->cp_hqd_pq_doorbell_control);
qp->is_active = 0;
}
static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@ -458,6 +495,8 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->mqd_size = sizeof(struct v11_compute_mqd);
mqd->get_wave_state = get_wave_state;
mqd->mqd_stride = kfd_mqd_stride;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
#if defined(CONFIG_DEBUG_FS)
mqd->debugfs_show_mqd = debugfs_show_mqd;
#endif
@ -502,6 +541,8 @@ struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
mqd->update_mqd = update_mqd_sdma;
mqd->destroy_mqd = kfd_destroy_mqd_sdma;
mqd->is_occupied = kfd_is_occupied_sdma;
mqd->checkpoint_mqd = checkpoint_mqd;
mqd->restore_mqd = restore_mqd;
mqd->mqd_size = sizeof(struct v11_sdma_mqd);
mqd->mqd_stride = kfd_mqd_stride;
#if defined(CONFIG_DEBUG_FS)

View File

@ -60,7 +60,7 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct mqd_update_info *minfo)
struct mqd_update_info *minfo, uint32_t inst)
{
struct v9_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
@ -69,27 +69,36 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return;
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, inst);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
m->compute_static_thread_mgmt_se4 = se_mask[4];
m->compute_static_thread_mgmt_se5 = se_mask[5];
m->compute_static_thread_mgmt_se6 = se_mask[6];
m->compute_static_thread_mgmt_se7 = se_mask[7];
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) {
m->compute_static_thread_mgmt_se4 = se_mask[4];
m->compute_static_thread_mgmt_se5 = se_mask[5];
m->compute_static_thread_mgmt_se6 = se_mask[6];
m->compute_static_thread_mgmt_se7 = se_mask[7];
pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3,
m->compute_static_thread_mgmt_se4,
m->compute_static_thread_mgmt_se5,
m->compute_static_thread_mgmt_se6,
m->compute_static_thread_mgmt_se7);
pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3,
m->compute_static_thread_mgmt_se4,
m->compute_static_thread_mgmt_se5,
m->compute_static_thread_mgmt_se6,
m->compute_static_thread_mgmt_se7);
} else {
pr_debug("inst: %u, update cu mask to %#x %#x %#x %#x\n",
inst, m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
m->compute_static_thread_mgmt_se3);
}
}
static void set_priority(struct v9_mqd *m, struct queue_properties *q)
@ -290,7 +299,8 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, minfo);
if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3))
update_cu_mask(mm, mqd, minfo, 0);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
@ -676,6 +686,8 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd + size * xcc);
update_mqd(mm, m, q, minfo);
update_cu_mask(mm, mqd, minfo, xcc);
if (q->format == KFD_QUEUE_FORMAT_AQL) {
switch (xcc) {
case 0:

View File

@ -55,7 +55,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
return;
mqd_symmetrically_map_cu_mask(mm,
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];

View File

@ -299,8 +299,7 @@ static int pm_set_grace_period_v9(struct packet_manager *pm,
pm->dqm->wait_times,
grace_period,
&reg_offset,
&reg_data,
0);
&reg_data);
if (grace_period == USE_DEFAULT_GRACE_PERIOD)
reg_data = pm->dqm->wait_times;

View File

@ -1466,8 +1466,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev)
{
return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) ||
return KFD_GC_VERSION(dev) > IP_VERSION(9, 4, 2) ||
(KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && dev->sdma_fw_version >= 18) ||
KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0);
}

View File

@ -450,8 +450,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, offs, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, offs, "simd_count",
dev->gpu ? (dev->node_props.simd_count *
NUM_XCC(dev->gpu->xcc_mask)) : 0);
dev->gpu ? dev->node_props.simd_count : 0);
sysfs_show_32bit_prop(buffer, offs, "mem_banks_count",
dev->node_props.mem_banks_count);
sysfs_show_32bit_prop(buffer, offs, "caches_count",
@ -1597,14 +1596,17 @@ static int fill_in_l1_pcache(struct kfd_cache_properties **props_ext,
static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
struct kfd_gpu_cache_info *pcache_info,
struct kfd_cu_info *cu_info,
int cache_type, unsigned int cu_processor_id)
int cache_type, unsigned int cu_processor_id,
struct kfd_node *knode)
{
unsigned int cu_sibling_map_mask;
int first_active_cu;
int i, j, k;
int i, j, k, xcc, start, end;
struct kfd_cache_properties *pcache = NULL;
cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
start = ffs(knode->xcc_mask) - 1;
end = start + NUM_XCC(knode->xcc_mask);
cu_sibling_map_mask = cu_info->cu_bitmap[start][0][0];
cu_sibling_map_mask &=
((1 << pcache_info[cache_type].num_cu_shared) - 1);
first_active_cu = ffs(cu_sibling_map_mask);
@ -1639,16 +1641,18 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
cu_sibling_map_mask = cu_sibling_map_mask >> (first_active_cu - 1);
k = 0;
for (i = 0; i < cu_info->num_shader_engines; i++) {
for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
k += 4;
for (xcc = start; xcc < end; xcc++) {
for (i = 0; i < cu_info->num_shader_engines; i++) {
for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
pcache->sibling_map[k] = (uint8_t)(cu_sibling_map_mask & 0xFF);
pcache->sibling_map[k+1] = (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
pcache->sibling_map[k+2] = (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
pcache->sibling_map[k+3] = (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
k += 4;
cu_sibling_map_mask = cu_info->cu_bitmap[i % 4][j + i / 4];
cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
cu_sibling_map_mask = cu_info->cu_bitmap[xcc][i % 4][j + i / 4];
cu_sibling_map_mask &= ((1 << pcache_info[cache_type].num_cu_shared) - 1);
}
}
}
pcache->sibling_map_size = k;
@ -1666,7 +1670,7 @@ static int fill_in_l2_l3_pcache(struct kfd_cache_properties **props_ext,
static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct kfd_node *kdev)
{
struct kfd_gpu_cache_info *pcache_info = NULL;
int i, j, k;
int i, j, k, xcc, start, end;
int ct = 0;
unsigned int cu_processor_id;
int ret;
@ -1700,37 +1704,42 @@ static void kfd_fill_cache_non_crat_info(struct kfd_topology_device *dev, struct
* then it will consider only one CU from
* the shared unit
*/
start = ffs(kdev->xcc_mask) - 1;
end = start + NUM_XCC(kdev->xcc_mask);
for (ct = 0; ct < num_of_cache_types; ct++) {
cu_processor_id = gpu_processor_id;
if (pcache_info[ct].cache_level == 1) {
for (i = 0; i < pcu_info->num_shader_engines; i++) {
for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
for (xcc = start; xcc < end; xcc++) {
for (i = 0; i < pcu_info->num_shader_engines; i++) {
for (j = 0; j < pcu_info->num_shader_arrays_per_engine; j++) {
for (k = 0; k < pcu_info->num_cu_per_sh; k += pcache_info[ct].num_cu_shared) {
ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
pcu_info->cu_bitmap[i % 4][j + i / 4], ct,
ret = fill_in_l1_pcache(&props_ext, pcache_info, pcu_info,
pcu_info->cu_bitmap[xcc][i % 4][j + i / 4], ct,
cu_processor_id, k);
if (ret < 0)
break;
if (ret < 0)
break;
if (!ret) {
num_of_entries++;
list_add_tail(&props_ext->list, &dev->cache_props);
if (!ret) {
num_of_entries++;
list_add_tail(&props_ext->list, &dev->cache_props);
}
/* Move to next CU block */
num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
pcu_info->num_cu_per_sh) ?
pcache_info[ct].num_cu_shared :
(pcu_info->num_cu_per_sh - k);
cu_processor_id += num_cu_shared;
}
/* Move to next CU block */
num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
pcu_info->num_cu_per_sh) ?
pcache_info[ct].num_cu_shared :
(pcu_info->num_cu_per_sh - k);
cu_processor_id += num_cu_shared;
}
}
}
} else {
ret = fill_in_l2_l3_pcache(&props_ext, pcache_info,
pcu_info, ct, cu_processor_id);
pcu_info, ct, cu_processor_id, kdev);
if (ret < 0)
break;

View File

@ -89,7 +89,7 @@ struct kfd_mem_properties {
struct attribute attr;
};
#define CACHE_SIBLINGMAP_SIZE 64
#define CACHE_SIBLINGMAP_SIZE 128
struct kfd_cache_properties {
struct list_head list;

View File

@ -1274,11 +1274,15 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >>
AMDGPU_GPU_PAGE_SHIFT);
page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >>
AMDGPU_GPU_PAGE_SHIFT);
page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >>
AMDGPU_GPU_PAGE_SHIFT);
page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >>
AMDGPU_GPU_PAGE_SHIFT);
page_table_base.high_part = upper_32_bits(pt_base);
page_table_base.low_part = lower_32_bits(pt_base);
pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
@ -1640,8 +1644,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
if (init_data.flags.gpu_vm_support)
init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
if (init_data.flags.gpu_vm_support &&
(amdgpu_sg_display == 0))
init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;
@ -2335,14 +2340,62 @@ static int dm_late_init(void *handle)
return detect_mst_link_for_all_connectors(adev_to_drm(adev));
}
static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr)
{
int ret;
u8 guid[16];
u64 tmp64;
mutex_lock(&mgr->lock);
if (!mgr->mst_primary)
goto out_fail;
if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN |
DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC);
if (ret < 0) {
drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
if (memchr_inv(guid, 0, 16) == NULL) {
tmp64 = get_jiffies_64();
memcpy(&guid[0], &tmp64, sizeof(u64));
memcpy(&guid[8], &tmp64, sizeof(u64));
ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n");
goto out_fail;
}
}
memcpy(mgr->mst_primary->guid, guid, 16);
out_fail:
mutex_unlock(&mgr->lock);
}
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@ -2364,18 +2417,15 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
if (!dp_is_lttpr_present(aconnector->dc_link))
try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD);
ret = drm_dp_mst_topology_mgr_resume(mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
aconnector->dc_link);
need_hotplug = true;
}
/* TODO: move resume_mst_branch_status() into drm mst resume again
* once topology probing work is pulled out from mst resume into mst
* resume 2nd step. mst resume 2nd step should be called after old
* state getting restored (i.e. drm_atomic_helper_resume()).
*/
resume_mst_branch_status(mgr);
}
}
drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
}
static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
@ -2769,7 +2819,8 @@ static int dm_resume(void *handle)
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
struct dc_state *dc_state;
int i, r, j;
int i, r, j, ret;
bool need_hotplug = false;
if (amdgpu_in_reset(adev)) {
dc_state = dm->cached_dc_state;
@ -2867,7 +2918,7 @@ static int dm_resume(void *handle)
continue;
/*
* this is the case when traversing through already created
* this is the case when traversing through already created end sink
* MST connectors, should be skipped
*/
if (aconnector && aconnector->mst_root)
@ -2927,6 +2978,27 @@ static int dm_resume(void *handle)
dm->cached_state = NULL;
/* Do mst topology probing after resuming cached state*/
drm_connector_list_iter_begin(ddev, &iter);
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_root)
continue;
ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true);
if (ret < 0) {
dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx,
aconnector->dc_link);
need_hotplug = true;
}
}
drm_connector_list_iter_end(&iter);
if (need_hotplug)
drm_kms_helper_hotplug_event(ddev);
amdgpu_dm_irq_resume_late(adev);
amdgpu_dm_smu_write_watermarks_table(adev);
@ -8073,7 +8145,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
bundle->surface_updates[planes_count].plane_info =
&bundle->plane_infos[planes_count];
if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
if (acrtc_state->stream->link->psr_settings.psr_feature_enabled ||
acrtc_state->stream->link->replay_settings.replay_feature_enabled) {
fill_dc_dirty_rects(plane, old_plane_state,
new_plane_state, new_crtc_state,
&bundle->flip_addrs[planes_count],

View File

@ -620,7 +620,7 @@ struct amdgpu_hdmi_vsdb_info {
unsigned int max_refresh_rate_hz;
/**
* @replay mode: Replay supported
* @replay_mode: Replay supported
*/
bool replay_mode;
};

View File

@ -169,11 +169,23 @@ static void add_link_enc_assignment(
/* Return first available DIG link encoder. */
static enum engine_id find_first_avail_link_enc(
const struct dc_context *ctx,
const struct dc_state *state)
const struct dc_state *state,
enum engine_id eng_id_requested)
{
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
int i;
if (eng_id_requested != ENGINE_ID_UNKNOWN) {
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
if (eng_id == eng_id_requested)
return eng_id;
}
}
eng_id = ENGINE_ID_UNKNOWN;
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
if (eng_id != ENGINE_ID_UNKNOWN)
@ -287,7 +299,7 @@ void link_enc_cfg_link_encs_assign(
struct dc_stream_state *streams[],
uint8_t stream_count)
{
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
enum engine_id eng_id = ENGINE_ID_UNKNOWN, eng_id_req = ENGINE_ID_UNKNOWN;
int i;
int j;
@ -377,8 +389,14 @@ void link_enc_cfg_link_encs_assign(
* assigned to that endpoint.
*/
link_enc = get_link_enc_used_by_link(state, stream->link);
if (link_enc == NULL)
eng_id = find_first_avail_link_enc(stream->ctx, state);
if (link_enc == NULL) {
if (stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
stream->link->dpia_preferred_eng_id != ENGINE_ID_UNKNOWN)
eng_id_req = stream->link->dpia_preferred_eng_id;
eng_id = find_first_avail_link_enc(stream->ctx, state, eng_id_req);
}
else
eng_id = link_enc->preferred_engine;
@ -402,7 +420,9 @@ void link_enc_cfg_link_encs_assign(
DC_LOG_DEBUG("%s: CUR %s(%d) - enc_id(%d)\n",
__func__,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
assignment.ep_id.link_id.enum_id - 1,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
assignment.ep_id.link_id.enum_id :
assignment.ep_id.link_id.enum_id - 1,
assignment.eng_id);
}
for (i = 0; i < MAX_PIPES; i++) {
@ -413,7 +433,9 @@ void link_enc_cfg_link_encs_assign(
DC_LOG_DEBUG("%s: NEW %s(%d) - enc_id(%d)\n",
__func__,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ? "PHY" : "DPIA",
assignment.ep_id.link_id.enum_id - 1,
assignment.ep_id.ep_type == DISPLAY_ENDPOINT_PHY ?
assignment.ep_id.link_id.enum_id :
assignment.ep_id.link_id.enum_id - 1,
assignment.eng_id);
}
@ -478,7 +500,6 @@ struct dc_link *link_enc_cfg_get_link_using_link_enc(
if (stream)
link = stream->link;
// dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
return link;
}

View File

@ -1496,6 +1496,7 @@ struct dc_link {
* object creation.
*/
enum engine_id eng_id;
enum engine_id dpia_preferred_eng_id;
bool test_pattern_enabled;
enum dp_test_pattern current_test_pattern;

View File

@ -964,7 +964,9 @@ void dce110_edp_backlight_control(
return;
}
if (link->panel_cntl) {
if (link->panel_cntl && !(link->dpcd_sink_ext_caps.bits.oled ||
link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
bool is_backlight_on = link->panel_cntl->funcs->is_panel_backlight_on(link->panel_cntl);
if ((enable && is_backlight_on) || (!enable && !is_backlight_on)) {

View File

@ -1032,6 +1032,28 @@ static const struct dce_i2c_mask i2c_masks = {
I2C_COMMON_MASK_SH_LIST_DCN30(_MASK)
};
/* ========================================================== */
/*
* DPIA index | Preferred Encoder | Host Router
* 0 | C | 0
* 1 | First Available | 0
* 2 | D | 1
* 3 | First Available | 1
*/
/* ========================================================== */
static const enum engine_id dpia_to_preferred_enc_id_table[] = {
ENGINE_ID_DIGC,
ENGINE_ID_DIGC,
ENGINE_ID_DIGD,
ENGINE_ID_DIGD
};
static enum engine_id dcn314_get_preferred_eng_id_dpia(unsigned int dpia_index)
{
return dpia_to_preferred_enc_id_table[dpia_index];
}
static struct dce_i2c_hw *dcn31_i2c_hw_create(
struct dc_context *ctx,
uint32_t inst)
@ -1785,6 +1807,7 @@ static struct resource_funcs dcn314_res_pool_funcs = {
.update_bw_bounding_box = dcn314_update_bw_bounding_box,
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.get_panel_config_defaults = dcn314_get_panel_config_defaults,
.get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia,
};
static struct clock_source *dcn30_clock_source_create(

View File

@ -65,6 +65,7 @@ struct resource_context;
struct clk_bw_params;
struct resource_funcs {
enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index);
void (*destroy)(struct resource_pool **pool);
void (*link_init)(struct dc_link *link);
struct panel_cntl*(*panel_cntl_create)(

View File

@ -791,6 +791,10 @@ static bool construct_dpia(struct dc_link *link,
/* Set dpia port index : 0 to number of dpia ports */
link->ddc_hw_inst = init_params->connector_index;
// Assign Dpia preferred eng_id
if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia)
link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst);
/* TODO: Create link encoder */
link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;

View File

@ -31,12 +31,12 @@
#include <linux/types.h>
#include <linux/bitmap.h>
#include <linux/dma-fence.h>
#include "amdgpu_irq.h"
#include "amdgpu_gfx.h"
struct pci_dev;
struct amdgpu_device;
#define KGD_MAX_QUEUES 128
struct kfd_dev;
struct kgd_mem;
@ -68,7 +68,7 @@ struct kfd_cu_info {
uint32_t wave_front_size;
uint32_t max_scratch_slots_per_cu;
uint32_t lds_size;
uint32_t cu_bitmap[4][4];
uint32_t cu_bitmap[AMDGPU_MAX_GC_INSTANCES][4][4];
};
/* For getting GPU local memory information from KGD */
@ -326,8 +326,7 @@ struct kfd2kgd_calls {
uint32_t wait_times,
uint32_t grace_period,
uint32_t *reg_offset,
uint32_t *reg_data,
uint32_t inst);
uint32_t *reg_data);
void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid,
int *wave_cnt, int *max_waves_per_cu, uint32_t inst);
void (*program_trap_handler_settings)(struct amdgpu_device *adev,

View File

@ -336,7 +336,7 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
/* Store one-time values in driver PPTable */
if (!pptable->Init) {
while (retry--) {
while (--retry) {
ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
if (ret)
return ret;

View File

@ -2203,6 +2203,7 @@ static int drm_mode_create_colorspace_property(struct drm_connector *connector,
/**
* drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property
* @connector: connector to create the Colorspace property on.
* @supported_colorspaces: bitmap of supported color spaces
*
* Called by a driver the first time it's needed, must be attached to desired
* HDMI connectors.
@ -2227,6 +2228,7 @@ EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property);
/**
* drm_mode_create_dp_colorspace_property - create dp colorspace property
* @connector: connector to create the Colorspace property on.
* @supported_colorspaces: bitmap of supported color spaces
*
* Called by a driver the first time it's needed, must be attached to desired
* DP connectors.

View File

@ -56,7 +56,7 @@ static void drm_exec_unlock_all(struct drm_exec *exec)
struct drm_gem_object *obj;
unsigned long index;
drm_exec_for_each_locked_object(exec, index, obj) {
drm_exec_for_each_locked_object_reverse(exec, index, obj) {
dma_resv_unlock(obj->resv);
drm_gem_object_put(obj);
}

View File

@ -3540,6 +3540,27 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata)
return map_aux_ch(devdata->i915, devdata->child.aux_channel);
}
bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata)
{
struct drm_i915_private *i915;
u8 aux_channel;
int count = 0;
if (!devdata || !devdata->child.aux_channel)
return false;
i915 = devdata->i915;
aux_channel = devdata->child.aux_channel;
list_for_each_entry(devdata, &i915->display.vbt.display_devices, node) {
if (intel_bios_encoder_supports_dp(devdata) &&
aux_channel == devdata->child.aux_channel)
count++;
}
return count > 1;
}
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata)
{
if (!devdata || devdata->i915->display.vbt.version < 196 || !devdata->child.iboost)

View File

@ -273,6 +273,7 @@ enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata);
int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata);
bool intel_bios_dp_has_shared_aux_ch(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_boost_level(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_ddc_pin(const struct intel_bios_encoder_data *devdata);
int intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata);

View File

@ -5512,8 +5512,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
/*
* VBT and straps are liars. Also check HPD as that seems
* to be the most reliable piece of information available.
*
* ... expect on devices that forgot to hook HPD up for eDP
* (eg. Acer Chromebook C710), so we'll check it only if multiple
* ports are attempting to use the same AUX CH, according to VBT.
*/
if (!intel_digital_port_connected(encoder)) {
if (intel_bios_dp_has_shared_aux_ch(encoder->devdata) &&
!intel_digital_port_connected(encoder)) {
/*
* If this fails, presume the DPCD answer came
* from some other port using the same AUX CH.

Some files were not shown because too many files have changed in this diff Show More