mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-26 05:34:13 +08:00
Linux 5.3-rc6
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAl1i2wkeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGcDQIAJINYON5WdDSFDpp htva213hSIxYLix8Dc4cTMk8qT/P2MAj9pPYERuLwIxWZlfbduW6Fxy8bJANZ7k3 4cJ/IbmA5M5ZIaOJTTL45w8H0CMR/4mdPl5rb5k/Wkh449Cj101gZLlh0FEtR5zG uDJecKSuHjH1ikySk6+zmRG5X+lq6wNY8NkuBtfwAwLffFc0ljQHwPUMJ8ojgqt/ p3ChNgtb/I6U6ExITlyktKdP59bAoHAoBiKKFZWw5yJWgXE2q4Sv9nT4Btkr5KdJ 9mnWnSaSLwptNCOtU4tKLwFIZP2WoVXGPNxxq4XLoTEuieXCqmikhc9tSSTwk+Tp CKHN6wU= =JkJ4 -----END PGP SIGNATURE----- Merge tag 'v5.3-rc6' into perf/core, to pick up fixes Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
8db5957bc7
@ -9,7 +9,7 @@ Linux PCI Bus Subsystem
|
||||
:numbered:
|
||||
|
||||
pci
|
||||
picebus-howto
|
||||
pciebus-howto
|
||||
pci-iov-howto
|
||||
msi-howto
|
||||
acpi-info
|
||||
|
@ -4090,6 +4090,13 @@
|
||||
Run specified binary instead of /init from the ramdisk,
|
||||
used for early userspace startup. See initrd.
|
||||
|
||||
rdrand= [X86]
|
||||
force - Override the decision by the kernel to hide the
|
||||
advertisement of RDRAND support (this affects
|
||||
certain AMD processors because of buggy BIOS
|
||||
support, specifically around the suspend/resume
|
||||
path).
|
||||
|
||||
rdt= [HW,X86,RDT]
|
||||
Turn on/off individual RDT features. List is:
|
||||
cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
|
||||
|
@ -39,7 +39,6 @@ Table : Subdirectories in /proc/sys/net
|
||||
802 E802 protocol ax25 AX25
|
||||
ethernet Ethernet protocol rose X.25 PLP layer
|
||||
ipv4 IP version 4 x25 X.25 protocol
|
||||
ipx IPX token-ring IBM token ring
|
||||
bridge Bridging decnet DEC net
|
||||
ipv6 IP version 6 tipc TIPC
|
||||
========= =================== = ========== ==================
|
||||
@ -401,33 +400,7 @@ interface.
|
||||
(network) that the route leads to, the router (may be directly connected), the
|
||||
route flags, and the device the route is using.
|
||||
|
||||
|
||||
5. IPX
|
||||
------
|
||||
|
||||
The IPX protocol has no tunable values in proc/sys/net.
|
||||
|
||||
The IPX protocol does, however, provide proc/net/ipx. This lists each IPX
|
||||
socket giving the local and remote addresses in Novell format (that is
|
||||
network:node:port). In accordance with the strange Novell tradition,
|
||||
everything but the port is in hex. Not_Connected is displayed for sockets that
|
||||
are not tied to a specific remote address. The Tx and Rx queue sizes indicate
|
||||
the number of bytes pending for transmission and reception. The state
|
||||
indicates the state the socket is in and the uid is the owning uid of the
|
||||
socket.
|
||||
|
||||
The /proc/net/ipx_interface file lists all IPX interfaces. For each interface
|
||||
it gives the network number, the node number, and indicates if the network is
|
||||
the primary network. It also indicates which device it is bound to (or
|
||||
Internal for internal networks) and the Frame Type if appropriate. Linux
|
||||
supports 802.3, 802.2, 802.2 SNAP and DIX (Blue Book) ethernet framing for
|
||||
IPX.
|
||||
|
||||
The /proc/net/ipx_route table holds a list of IPX routes. For each route it
|
||||
gives the destination network, the router node (or Directly) and the network
|
||||
address of the router (or Connected) for internal networks.
|
||||
|
||||
6. TIPC
|
||||
5. TIPC
|
||||
-------
|
||||
|
||||
tipc_rmem
|
||||
|
@ -506,21 +506,3 @@ Drivers should ignore the changes to TLS the device feature flags.
|
||||
These flags will be acted upon accordingly by the core ``ktls`` code.
|
||||
TLS device feature flags only control adding of new TLS connection
|
||||
offloads, old connections will remain active after flags are cleared.
|
||||
|
||||
Known bugs
|
||||
==========
|
||||
|
||||
skb_orphan() leaks clear text
|
||||
-----------------------------
|
||||
|
||||
Currently drivers depend on the :c:member:`sk` member of
|
||||
:c:type:`struct sk_buff <sk_buff>` to identify segments requiring
|
||||
encryption. Any operation which removes or does not preserve the socket
|
||||
association such as :c:func:`skb_orphan` or :c:func:`skb_clone`
|
||||
will cause the driver to miss the packets and lead to clear text leaks.
|
||||
|
||||
Redirects leak clear text
|
||||
-------------------------
|
||||
|
||||
In the RX direction, if segment has already been decrypted by the device
|
||||
and it gets redirected or mirrored - clear text will be transmitted out.
|
||||
|
@ -204,8 +204,8 @@ Ethernet device, which instead of receiving packets from a physical
|
||||
media, receives them from user space program and instead of sending
|
||||
packets via physical media sends them to the user space program.
|
||||
|
||||
Let's say that you configured IPX on the tap0, then whenever
|
||||
the kernel sends an IPX packet to tap0, it is passed to the application
|
||||
Let's say that you configured IPv6 on the tap0, then whenever
|
||||
the kernel sends an IPv6 packet to tap0, it is passed to the application
|
||||
(VTun for example). The application encrypts, compresses and sends it to
|
||||
the other side over TCP or UDP. The application on the other side decompresses
|
||||
and decrypts the data received and writes the packet to the TAP device,
|
||||
|
27
MAINTAINERS
27
MAINTAINERS
@ -183,7 +183,7 @@ M: Realtek linux nic maintainers <nic_swsd@realtek.com>
|
||||
M: Heiner Kallweit <hkallweit1@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/realtek/r8169.c
|
||||
F: drivers/net/ethernet/realtek/r8169*
|
||||
|
||||
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
@ -6065,7 +6065,7 @@ M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
M: Heiner Kallweit <hkallweit1@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-bus-mdio
|
||||
F: Documentation/ABI/testing/sysfs-class-net-phydev
|
||||
F: Documentation/devicetree/bindings/net/ethernet-phy.yaml
|
||||
F: Documentation/devicetree/bindings/net/mdio*
|
||||
F: Documentation/networking/phy.rst
|
||||
@ -8832,14 +8832,6 @@ F: virt/kvm/*
|
||||
F: tools/kvm/
|
||||
F: tools/testing/selftests/kvm/
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR AMD-V (KVM/amd)
|
||||
M: Joerg Roedel <joro@8bytes.org>
|
||||
L: kvm@vger.kernel.org
|
||||
W: http://www.linux-kvm.org/
|
||||
S: Maintained
|
||||
F: arch/x86/include/asm/svm.h
|
||||
F: arch/x86/kvm/svm.c
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
|
||||
M: Marc Zyngier <maz@kernel.org>
|
||||
R: James Morse <james.morse@arm.com>
|
||||
@ -8882,7 +8874,7 @@ M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
M: Janosch Frank <frankja@linux.ibm.com>
|
||||
R: David Hildenbrand <david@redhat.com>
|
||||
R: Cornelia Huck <cohuck@redhat.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: kvm@vger.kernel.org
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
|
||||
S: Supported
|
||||
@ -8897,6 +8889,11 @@ F: tools/testing/selftests/kvm/*/s390x/
|
||||
KERNEL VIRTUAL MACHINE FOR X86 (KVM/x86)
|
||||
M: Paolo Bonzini <pbonzini@redhat.com>
|
||||
M: Radim Krčmář <rkrcmar@redhat.com>
|
||||
R: Sean Christopherson <sean.j.christopherson@intel.com>
|
||||
R: Vitaly Kuznetsov <vkuznets@redhat.com>
|
||||
R: Wanpeng Li <wanpengli@tencent.com>
|
||||
R: Jim Mattson <jmattson@google.com>
|
||||
R: Joerg Roedel <joro@8bytes.org>
|
||||
L: kvm@vger.kernel.org
|
||||
W: http://www.linux-kvm.org
|
||||
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
|
||||
@ -8904,8 +8901,12 @@ S: Supported
|
||||
F: arch/x86/kvm/
|
||||
F: arch/x86/kvm/*/
|
||||
F: arch/x86/include/uapi/asm/kvm*
|
||||
F: arch/x86/include/uapi/asm/vmx.h
|
||||
F: arch/x86/include/uapi/asm/svm.h
|
||||
F: arch/x86/include/asm/kvm*
|
||||
F: arch/x86/include/asm/pvclock-abi.h
|
||||
F: arch/x86/include/asm/svm.h
|
||||
F: arch/x86/include/asm/vmx.h
|
||||
F: arch/x86/kernel/kvm.c
|
||||
F: arch/x86/kernel/kvmclock.c
|
||||
|
||||
@ -14882,9 +14883,9 @@ F: include/linux/arm_sdei.h
|
||||
F: include/uapi/linux/arm_sdei.h
|
||||
|
||||
SOFTWARE RAID (Multiple Disks) SUPPORT
|
||||
M: Shaohua Li <shli@kernel.org>
|
||||
M: Song Liu <song@kernel.org>
|
||||
L: linux-raid@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shli/md.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/song/md.git
|
||||
S: Supported
|
||||
F: drivers/md/Makefile
|
||||
F: drivers/md/Kconfig
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 3
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Bobtail Squid
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -7,6 +7,8 @@ config ARM
|
||||
select ARCH_HAS_BINFMT_FLAT
|
||||
select ARCH_HAS_DEBUG_VIRTUAL if MMU
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
|
||||
select ARCH_HAS_DMA_MMAP_PGPROT if SWIOTLB
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
select ARCH_HAS_KEEPINITRD
|
||||
@ -18,6 +20,8 @@ config ARM
|
||||
select ARCH_HAS_SET_MEMORY
|
||||
select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL
|
||||
select ARCH_HAS_STRICT_MODULE_RWX if MMU
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB
|
||||
select ARCH_HAS_TEARDOWN_DMA_OPS if MMU
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_HAVE_CUSTOM_GPIO_H
|
||||
|
@ -46,6 +46,7 @@ static int __init parse_tag_acorn(const struct tag *tag)
|
||||
switch (tag->u.acorn.vram_pages) {
|
||||
case 512:
|
||||
vram_size += PAGE_SIZE * 256;
|
||||
/* Fall through - ??? */
|
||||
case 256:
|
||||
vram_size += PAGE_SIZE * 256;
|
||||
default:
|
||||
|
@ -664,10 +664,6 @@ config ARM_LPAE
|
||||
!CPU_32v4 && !CPU_32v3
|
||||
select PHYS_ADDR_T_64BIT
|
||||
select SWIOTLB
|
||||
select ARCH_HAS_DMA_COHERENT_TO_PFN
|
||||
select ARCH_HAS_DMA_MMAP_PGPROT
|
||||
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
|
||||
select ARCH_HAS_SYNC_DMA_FOR_CPU
|
||||
help
|
||||
Say Y if you have an ARMv7 processor supporting the LPAE page
|
||||
table format and you would like to access memory beyond the
|
||||
|
@ -46,6 +46,7 @@ static inline uint64_t CVMX_SLI_PCIE_MSI_RCV_FUNC(void)
|
||||
case OCTEON_CN78XX & OCTEON_FAMILY_MASK:
|
||||
if (OCTEON_IS_MODEL(OCTEON_CN78XX_PASS1_X))
|
||||
return 0x0000000000003CB0ull;
|
||||
/* Else, fall through */
|
||||
default:
|
||||
return 0x0000000000023CB0ull;
|
||||
}
|
||||
|
@ -2,6 +2,7 @@
|
||||
#ifndef _PARISC_PGTABLE_H
|
||||
#define _PARISC_PGTABLE_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/4level-fixup.h>
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
@ -98,8 +99,6 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
#define pmd_ERROR(e) \
|
||||
|
@ -34,10 +34,13 @@ static inline void time_travel_set_time(unsigned long long ns)
|
||||
time_travel_time = ns;
|
||||
}
|
||||
|
||||
static inline void time_travel_set_timer(enum time_travel_timer_mode mode,
|
||||
unsigned long long expiry)
|
||||
static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
|
||||
{
|
||||
time_travel_timer_mode = mode;
|
||||
}
|
||||
|
||||
static inline void time_travel_set_timer_expiry(unsigned long long expiry)
|
||||
{
|
||||
time_travel_timer_expiry = expiry;
|
||||
}
|
||||
#else
|
||||
@ -50,8 +53,11 @@ static inline void time_travel_set_time(unsigned long long ns)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void time_travel_set_timer(enum time_travel_timer_mode mode,
|
||||
unsigned long long expiry)
|
||||
static inline void time_travel_set_timer_mode(enum time_travel_timer_mode mode)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void time_travel_set_timer_expiry(unsigned long long expiry)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -213,7 +213,7 @@ static void time_travel_sleep(unsigned long long duration)
|
||||
if (time_travel_timer_mode != TT_TMR_DISABLED ||
|
||||
time_travel_timer_expiry < next) {
|
||||
if (time_travel_timer_mode == TT_TMR_ONESHOT)
|
||||
time_travel_set_timer(TT_TMR_DISABLED, 0);
|
||||
time_travel_set_timer_mode(TT_TMR_DISABLED);
|
||||
/*
|
||||
* time_travel_time will be adjusted in the timer
|
||||
* IRQ handler so it works even when the signal
|
||||
|
@ -50,7 +50,7 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
|
||||
static int itimer_shutdown(struct clock_event_device *evt)
|
||||
{
|
||||
if (time_travel_mode != TT_MODE_OFF)
|
||||
time_travel_set_timer(TT_TMR_DISABLED, 0);
|
||||
time_travel_set_timer_mode(TT_TMR_DISABLED);
|
||||
|
||||
if (time_travel_mode != TT_MODE_INFCPU)
|
||||
os_timer_disable();
|
||||
@ -62,9 +62,10 @@ static int itimer_set_periodic(struct clock_event_device *evt)
|
||||
{
|
||||
unsigned long long interval = NSEC_PER_SEC / HZ;
|
||||
|
||||
if (time_travel_mode != TT_MODE_OFF)
|
||||
time_travel_set_timer(TT_TMR_PERIODIC,
|
||||
time_travel_time + interval);
|
||||
if (time_travel_mode != TT_MODE_OFF) {
|
||||
time_travel_set_timer_mode(TT_TMR_PERIODIC);
|
||||
time_travel_set_timer_expiry(time_travel_time + interval);
|
||||
}
|
||||
|
||||
if (time_travel_mode != TT_MODE_INFCPU)
|
||||
os_timer_set_interval(interval);
|
||||
@ -77,9 +78,10 @@ static int itimer_next_event(unsigned long delta,
|
||||
{
|
||||
delta += 1;
|
||||
|
||||
if (time_travel_mode != TT_MODE_OFF)
|
||||
time_travel_set_timer(TT_TMR_ONESHOT,
|
||||
time_travel_time + delta);
|
||||
if (time_travel_mode != TT_MODE_OFF) {
|
||||
time_travel_set_timer_mode(TT_TMR_ONESHOT);
|
||||
time_travel_set_timer_expiry(time_travel_time + delta);
|
||||
}
|
||||
|
||||
if (time_travel_mode != TT_MODE_INFCPU)
|
||||
return os_timer_one_shot(delta);
|
||||
|
@ -72,6 +72,8 @@ static unsigned long find_trampoline_placement(void)
|
||||
|
||||
/* Find the first usable memory region under bios_start. */
|
||||
for (i = boot_params->e820_entries - 1; i >= 0; i--) {
|
||||
unsigned long new;
|
||||
|
||||
entry = &boot_params->e820_table[i];
|
||||
|
||||
/* Skip all entries above bios_start. */
|
||||
@ -84,15 +86,20 @@ static unsigned long find_trampoline_placement(void)
|
||||
|
||||
/* Adjust bios_start to the end of the entry if needed. */
|
||||
if (bios_start > entry->addr + entry->size)
|
||||
bios_start = entry->addr + entry->size;
|
||||
new = entry->addr + entry->size;
|
||||
|
||||
/* Keep bios_start page-aligned. */
|
||||
bios_start = round_down(bios_start, PAGE_SIZE);
|
||||
new = round_down(new, PAGE_SIZE);
|
||||
|
||||
/* Skip the entry if it's too small. */
|
||||
if (bios_start - TRAMPOLINE_32BIT_SIZE < entry->addr)
|
||||
if (new - TRAMPOLINE_32BIT_SIZE < entry->addr)
|
||||
continue;
|
||||
|
||||
/* Protect against underflow. */
|
||||
if (new - TRAMPOLINE_32BIT_SIZE > bios_start)
|
||||
break;
|
||||
|
||||
bios_start = new;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1236,7 +1236,7 @@ void x86_pmu_enable_event(struct perf_event *event)
|
||||
* Add a single event to the PMU.
|
||||
*
|
||||
* The event is added to the group of enabled events
|
||||
* but only if it can be scehduled with existing events.
|
||||
* but only if it can be scheduled with existing events.
|
||||
*/
|
||||
static int x86_pmu_add(struct perf_event *event, int flags)
|
||||
{
|
||||
|
@ -59,7 +59,6 @@ static void sanitize_boot_params(struct boot_params *boot_params)
|
||||
BOOT_PARAM_PRESERVE(apm_bios_info),
|
||||
BOOT_PARAM_PRESERVE(tboot_addr),
|
||||
BOOT_PARAM_PRESERVE(ist_info),
|
||||
BOOT_PARAM_PRESERVE(acpi_rsdp_addr),
|
||||
BOOT_PARAM_PRESERVE(hd0_info),
|
||||
BOOT_PARAM_PRESERVE(hd1_info),
|
||||
BOOT_PARAM_PRESERVE(sys_desc_table),
|
||||
@ -71,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
|
||||
BOOT_PARAM_PRESERVE(eddbuf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
|
||||
BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
|
||||
BOOT_PARAM_PRESERVE(hdr),
|
||||
BOOT_PARAM_PRESERVE(e820_table),
|
||||
BOOT_PARAM_PRESERVE(eddbuf),
|
||||
};
|
||||
|
@ -11,6 +11,21 @@
|
||||
* While adding a new CPUID for a new microarchitecture, add a new
|
||||
* group to keep logically sorted out in chronological order. Within
|
||||
* that group keep the CPUID for the variants sorted by model number.
|
||||
*
|
||||
* The defined symbol names have the following form:
|
||||
* INTEL_FAM6{OPTFAMILY}_{MICROARCH}{OPTDIFF}
|
||||
* where:
|
||||
* OPTFAMILY Describes the family of CPUs that this belongs to. Default
|
||||
* is assumed to be "_CORE" (and should be omitted). Other values
|
||||
* currently in use are _ATOM and _XEON_PHI
|
||||
* MICROARCH Is the code name for the micro-architecture for this core.
|
||||
* N.B. Not the platform name.
|
||||
* OPTDIFF If needed, a short string to differentiate by market segment.
|
||||
* Exact strings here will vary over time. _DESKTOP, _MOBILE, and
|
||||
* _X (short for Xeon server) should be used when they are
|
||||
* appropriate.
|
||||
*
|
||||
* The #define line may optionally include a comment including platform names.
|
||||
*/
|
||||
|
||||
#define INTEL_FAM6_CORE_YONAH 0x0E
|
||||
|
@ -381,6 +381,7 @@
|
||||
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
|
||||
#define MSR_AMD64_TSC_RATIO 0xc0000104
|
||||
#define MSR_AMD64_NB_CFG 0xc001001f
|
||||
#define MSR_AMD64_CPUID_FN_1 0xc0011004
|
||||
#define MSR_AMD64_PATCH_LOADER 0xc0010020
|
||||
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
|
||||
#define MSR_AMD64_OSVW_STATUS 0xc0010141
|
||||
|
@ -192,7 +192,7 @@
|
||||
" lfence;\n" \
|
||||
" jmp 902b;\n" \
|
||||
" .align 16\n" \
|
||||
"903: addl $4, %%esp;\n" \
|
||||
"903: lea 4(%%esp), %%esp;\n" \
|
||||
" pushl %[thunk_target];\n" \
|
||||
" ret;\n" \
|
||||
" .align 16\n" \
|
||||
|
@ -722,7 +722,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
|
||||
static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
|
||||
|
||||
/*
|
||||
* Temporary interrupt handler.
|
||||
* Temporary interrupt handler and polled calibration function.
|
||||
*/
|
||||
static void __init lapic_cal_handler(struct clock_event_device *dev)
|
||||
{
|
||||
@ -851,7 +851,8 @@ bool __init apic_needs_pit(void)
|
||||
static int __init calibrate_APIC_clock(void)
|
||||
{
|
||||
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
|
||||
void (*real_handler)(struct clock_event_device *dev);
|
||||
u64 tsc_perj = 0, tsc_start = 0;
|
||||
unsigned long jif_start;
|
||||
unsigned long deltaj;
|
||||
long delta, deltatsc;
|
||||
int pm_referenced = 0;
|
||||
@ -878,29 +879,65 @@ static int __init calibrate_APIC_clock(void)
|
||||
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
|
||||
"calibrating APIC timer ...\n");
|
||||
|
||||
/*
|
||||
* There are platforms w/o global clockevent devices. Instead of
|
||||
* making the calibration conditional on that, use a polling based
|
||||
* approach everywhere.
|
||||
*/
|
||||
local_irq_disable();
|
||||
|
||||
/* Replace the global interrupt handler */
|
||||
real_handler = global_clock_event->event_handler;
|
||||
global_clock_event->event_handler = lapic_cal_handler;
|
||||
|
||||
/*
|
||||
* Setup the APIC counter to maximum. There is no way the lapic
|
||||
* can underflow in the 100ms detection time frame
|
||||
*/
|
||||
__setup_APIC_LVTT(0xffffffff, 0, 0);
|
||||
|
||||
/* Let the interrupts run */
|
||||
/*
|
||||
* Methods to terminate the calibration loop:
|
||||
* 1) Global clockevent if available (jiffies)
|
||||
* 2) TSC if available and frequency is known
|
||||
*/
|
||||
jif_start = READ_ONCE(jiffies);
|
||||
|
||||
if (tsc_khz) {
|
||||
tsc_start = rdtsc();
|
||||
tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable interrupts so the tick can fire, if a global
|
||||
* clockevent device is available
|
||||
*/
|
||||
local_irq_enable();
|
||||
|
||||
while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
|
||||
cpu_relax();
|
||||
while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
|
||||
/* Wait for a tick to elapse */
|
||||
while (1) {
|
||||
if (tsc_khz) {
|
||||
u64 tsc_now = rdtsc();
|
||||
if ((tsc_now - tsc_start) >= tsc_perj) {
|
||||
tsc_start += tsc_perj;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
unsigned long jif_now = READ_ONCE(jiffies);
|
||||
|
||||
if (time_after(jif_now, jif_start)) {
|
||||
jif_start = jif_now;
|
||||
break;
|
||||
}
|
||||
}
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/* Invoke the calibration routine */
|
||||
local_irq_disable();
|
||||
lapic_cal_handler(NULL);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
/* Restore the real event handler */
|
||||
global_clock_event->event_handler = real_handler;
|
||||
|
||||
/* Build delta t1-t2 as apic timer counts down */
|
||||
delta = lapic_cal_t1 - lapic_cal_t2;
|
||||
apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
|
||||
@ -943,10 +980,11 @@ static int __init calibrate_APIC_clock(void)
|
||||
levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
|
||||
|
||||
/*
|
||||
* PM timer calibration failed or not turned on
|
||||
* so lets try APIC timer based calibration
|
||||
* PM timer calibration failed or not turned on so lets try APIC
|
||||
* timer based calibration, if a global clockevent device is
|
||||
* available.
|
||||
*/
|
||||
if (!pm_referenced) {
|
||||
if (!pm_referenced && global_clock_event) {
|
||||
apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
|
||||
|
||||
/*
|
||||
|
@ -804,6 +804,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
|
||||
msr_set_bit(MSR_AMD64_DE_CFG, 31);
|
||||
}
|
||||
|
||||
static bool rdrand_force;
|
||||
|
||||
static int __init rdrand_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "force"))
|
||||
rdrand_force = true;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("rdrand", rdrand_cmdline);
|
||||
|
||||
static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
* Saving of the MSR used to hide the RDRAND support during
|
||||
* suspend/resume is done by arch/x86/power/cpu.c, which is
|
||||
* dependent on CONFIG_PM_SLEEP.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_PM_SLEEP))
|
||||
return;
|
||||
|
||||
/*
|
||||
* The nordrand option can clear X86_FEATURE_RDRAND, so check for
|
||||
* RDRAND support using the CPUID function directly.
|
||||
*/
|
||||
if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
|
||||
return;
|
||||
|
||||
msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
|
||||
|
||||
/*
|
||||
* Verify that the CPUID change has occurred in case the kernel is
|
||||
* running virtualized and the hypervisor doesn't support the MSR.
|
||||
*/
|
||||
if (cpuid_ecx(1) & BIT(30)) {
|
||||
pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
clear_cpu_cap(c, X86_FEATURE_RDRAND);
|
||||
pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
|
||||
}
|
||||
|
||||
static void init_amd_jg(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
* Some BIOS implementations do not restore proper RDRAND support
|
||||
* across suspend and resume. Check on whether to hide the RDRAND
|
||||
* instruction support via CPUID.
|
||||
*/
|
||||
clear_rdrand_cpuid_bit(c);
|
||||
}
|
||||
|
||||
static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 value;
|
||||
@ -818,6 +876,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
wrmsrl_safe(MSR_F15H_IC_CFG, value);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Some BIOS implementations do not restore proper RDRAND support
|
||||
* across suspend and resume. Check on whether to hide the RDRAND
|
||||
* instruction support via CPUID.
|
||||
*/
|
||||
clear_rdrand_cpuid_bit(c);
|
||||
}
|
||||
|
||||
static void init_amd_zn(struct cpuinfo_x86 *c)
|
||||
@ -860,6 +925,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||
case 0x10: init_amd_gh(c); break;
|
||||
case 0x12: init_amd_ln(c); break;
|
||||
case 0x15: init_amd_bd(c); break;
|
||||
case 0x16: init_amd_jg(c); break;
|
||||
case 0x17: init_amd_zn(c); break;
|
||||
}
|
||||
|
||||
|
@ -216,6 +216,9 @@ static void recalculate_apic_map(struct kvm *kvm)
|
||||
if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
|
||||
new->phys_map[xapic_id] = apic;
|
||||
|
||||
if (!kvm_apic_sw_enabled(apic))
|
||||
continue;
|
||||
|
||||
ldr = kvm_lapic_get_reg(apic, APIC_LDR);
|
||||
|
||||
if (apic_x2apic_mode(apic)) {
|
||||
@ -258,6 +261,8 @@ static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
|
||||
static_key_slow_dec_deferred(&apic_sw_disabled);
|
||||
else
|
||||
static_key_slow_inc(&apic_sw_disabled.key);
|
||||
|
||||
recalculate_apic_map(apic->vcpu->kvm);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5653,38 +5653,7 @@ static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
struct kvm_page_track_notifier_node *node)
|
||||
{
|
||||
struct kvm_mmu_page *sp;
|
||||
LIST_HEAD(invalid_list);
|
||||
unsigned long i;
|
||||
bool flush;
|
||||
gfn_t gfn;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
if (list_empty(&kvm->arch.active_mmu_pages))
|
||||
goto out_unlock;
|
||||
|
||||
flush = slot_handle_all_level(kvm, slot, kvm_zap_rmapp, false);
|
||||
|
||||
for (i = 0; i < slot->npages; i++) {
|
||||
gfn = slot->base_gfn + i;
|
||||
|
||||
for_each_valid_sp(kvm, sp, gfn) {
|
||||
if (sp->gfn != gfn)
|
||||
continue;
|
||||
|
||||
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
|
||||
}
|
||||
if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
|
||||
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
||||
flush = false;
|
||||
cond_resched_lock(&kvm->mmu_lock);
|
||||
}
|
||||
}
|
||||
kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
kvm_mmu_zap_all(kvm);
|
||||
}
|
||||
|
||||
void kvm_mmu_init_vm(struct kvm *kvm)
|
||||
|
@ -1714,7 +1714,6 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
|
||||
if (!entry)
|
||||
return -EINVAL;
|
||||
|
||||
new_entry = READ_ONCE(*entry);
|
||||
new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
|
||||
AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
|
||||
AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
|
||||
|
@ -390,8 +390,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
|
||||
|
||||
emit_prologue(&prog, bpf_prog->aux->stack_depth,
|
||||
bpf_prog_was_classic(bpf_prog));
|
||||
addrs[0] = prog - temp;
|
||||
|
||||
for (i = 0; i < insn_cnt; i++, insn++) {
|
||||
for (i = 1; i <= insn_cnt; i++, insn++) {
|
||||
const s32 imm32 = insn->imm;
|
||||
u32 dst_reg = insn->dst_reg;
|
||||
u32 src_reg = insn->src_reg;
|
||||
@ -1105,7 +1106,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
extra_pass = true;
|
||||
goto skip_init_addrs;
|
||||
}
|
||||
addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL);
|
||||
addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
|
||||
if (!addrs) {
|
||||
prog = orig_prog;
|
||||
goto out_addrs;
|
||||
@ -1115,7 +1116,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
* Before first pass, make a rough estimation of addrs[]
|
||||
* each BPF instruction is translated to less than 64 bytes
|
||||
*/
|
||||
for (proglen = 0, i = 0; i < prog->len; i++) {
|
||||
for (proglen = 0, i = 0; i <= prog->len; i++) {
|
||||
proglen += 64;
|
||||
addrs[i] = proglen;
|
||||
}
|
||||
@ -1180,7 +1181,7 @@ out_image:
|
||||
|
||||
if (!image || !prog->is_func || extra_pass) {
|
||||
if (image)
|
||||
bpf_prog_fill_jited_linfo(prog, addrs);
|
||||
bpf_prog_fill_jited_linfo(prog, addrs + 1);
|
||||
out_addrs:
|
||||
kfree(addrs);
|
||||
kfree(jit_data);
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/smp.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/tboot.h>
|
||||
#include <linux/dmi.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/proto.h>
|
||||
@ -23,7 +24,7 @@
|
||||
#include <asm/debugreg.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
__visible unsigned long saved_context_ebx;
|
||||
@ -397,15 +398,14 @@ static int __init bsp_pm_check_init(void)
|
||||
|
||||
core_initcall(bsp_pm_check_init);
|
||||
|
||||
static int msr_init_context(const u32 *msr_id, const int total_num)
|
||||
static int msr_build_context(const u32 *msr_id, const int num)
|
||||
{
|
||||
int i = 0;
|
||||
struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
|
||||
struct saved_msr *msr_array;
|
||||
int total_num;
|
||||
int i, j;
|
||||
|
||||
if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
|
||||
pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
total_num = saved_msrs->num + num;
|
||||
|
||||
msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
|
||||
if (!msr_array) {
|
||||
@ -413,19 +413,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < total_num; i++) {
|
||||
msr_array[i].info.msr_no = msr_id[i];
|
||||
if (saved_msrs->array) {
|
||||
/*
|
||||
* Multiple callbacks can invoke this function, so copy any
|
||||
* MSR save requests from previous invocations.
|
||||
*/
|
||||
memcpy(msr_array, saved_msrs->array,
|
||||
sizeof(struct saved_msr) * saved_msrs->num);
|
||||
|
||||
kfree(saved_msrs->array);
|
||||
}
|
||||
|
||||
for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
|
||||
msr_array[i].info.msr_no = msr_id[j];
|
||||
msr_array[i].valid = false;
|
||||
msr_array[i].info.reg.q = 0;
|
||||
}
|
||||
saved_context.saved_msrs.num = total_num;
|
||||
saved_context.saved_msrs.array = msr_array;
|
||||
saved_msrs->num = total_num;
|
||||
saved_msrs->array = msr_array;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* The following section is a quirk framework for problematic BIOSen:
|
||||
* The following sections are a quirk framework for problematic BIOSen:
|
||||
* Sometimes MSRs are modified by the BIOSen after suspended to
|
||||
* RAM, this might cause unexpected behavior after wakeup.
|
||||
* Thus we save/restore these specified MSRs across suspend/resume
|
||||
@ -440,7 +451,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
|
||||
u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
|
||||
|
||||
pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
|
||||
return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
|
||||
return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
|
||||
}
|
||||
|
||||
static const struct dmi_system_id msr_save_dmi_table[] = {
|
||||
@ -455,9 +466,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
|
||||
{}
|
||||
};
|
||||
|
||||
static int msr_save_cpuid_features(const struct x86_cpu_id *c)
|
||||
{
|
||||
u32 cpuid_msr_id[] = {
|
||||
MSR_AMD64_CPUID_FN_1,
|
||||
};
|
||||
|
||||
pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
|
||||
c->family);
|
||||
|
||||
return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
|
||||
}
|
||||
|
||||
static const struct x86_cpu_id msr_save_cpu_table[] = {
|
||||
{
|
||||
.vendor = X86_VENDOR_AMD,
|
||||
.family = 0x15,
|
||||
.model = X86_MODEL_ANY,
|
||||
.feature = X86_FEATURE_ANY,
|
||||
.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
|
||||
},
|
||||
{
|
||||
.vendor = X86_VENDOR_AMD,
|
||||
.family = 0x16,
|
||||
.model = X86_MODEL_ANY,
|
||||
.feature = X86_FEATURE_ANY,
|
||||
.driver_data = (kernel_ulong_t)msr_save_cpuid_features,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
|
||||
static int pm_cpu_check(const struct x86_cpu_id *c)
|
||||
{
|
||||
const struct x86_cpu_id *m;
|
||||
int ret = 0;
|
||||
|
||||
m = x86_match_cpu(msr_save_cpu_table);
|
||||
if (m) {
|
||||
pm_cpu_match_t fn;
|
||||
|
||||
fn = (pm_cpu_match_t)m->driver_data;
|
||||
ret = fn(m);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pm_check_save_msr(void)
|
||||
{
|
||||
dmi_check_system(msr_save_dmi_table);
|
||||
pm_cpu_check(msr_save_cpu_table);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -74,7 +74,7 @@ struct ht16k33_priv {
|
||||
struct ht16k33_fbdev fbdev;
|
||||
};
|
||||
|
||||
static struct fb_fix_screeninfo ht16k33_fb_fix = {
|
||||
static const struct fb_fix_screeninfo ht16k33_fb_fix = {
|
||||
.id = DRIVER_NAME,
|
||||
.type = FB_TYPE_PACKED_PIXELS,
|
||||
.visual = FB_VISUAL_MONO10,
|
||||
@ -85,7 +85,7 @@ static struct fb_fix_screeninfo ht16k33_fb_fix = {
|
||||
.accel = FB_ACCEL_NONE,
|
||||
};
|
||||
|
||||
static struct fb_var_screeninfo ht16k33_fb_var = {
|
||||
static const struct fb_var_screeninfo ht16k33_fb_var = {
|
||||
.xres = HT16K33_MATRIX_LED_MAX_ROWS,
|
||||
.yres = HT16K33_MATRIX_LED_MAX_COLS,
|
||||
.xres_virtual = HT16K33_MATRIX_LED_MAX_ROWS,
|
||||
|
@ -322,6 +322,8 @@ static int drbd_thread_setup(void *arg)
|
||||
thi->name[0],
|
||||
resource->name);
|
||||
|
||||
allow_kernel_signal(DRBD_SIGKILL);
|
||||
allow_kernel_signal(SIGXCPU);
|
||||
restart:
|
||||
retval = thi->function(thi);
|
||||
|
||||
|
@ -99,6 +99,27 @@ static int qca_send_reset(struct hci_dev *hdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
bt_dev_dbg(hdev, "QCA pre shutdown cmd");
|
||||
|
||||
skb = __hci_cmd_sync(hdev, QCA_PRE_SHUTDOWN_CMD, 0,
|
||||
NULL, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
|
||||
|
||||
static void qca_tlv_check_data(struct rome_config *config,
|
||||
const struct firmware *fw)
|
||||
{
|
||||
@ -119,6 +140,7 @@ static void qca_tlv_check_data(struct rome_config *config,
|
||||
BT_DBG("Length\t\t : %d bytes", length);
|
||||
|
||||
config->dnld_mode = ROME_SKIP_EVT_NONE;
|
||||
config->dnld_type = ROME_SKIP_EVT_NONE;
|
||||
|
||||
switch (config->type) {
|
||||
case TLV_TYPE_PATCH:
|
||||
@ -268,7 +290,7 @@ static int qca_inject_cmd_complete_event(struct hci_dev *hdev)
|
||||
|
||||
evt = skb_put(skb, sizeof(*evt));
|
||||
evt->ncmd = 1;
|
||||
evt->opcode = QCA_HCI_CC_OPCODE;
|
||||
evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE);
|
||||
|
||||
skb_put_u8(skb, QCA_HCI_CC_SUCCESS);
|
||||
|
||||
@ -323,7 +345,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
|
||||
*/
|
||||
if (config->dnld_type == ROME_SKIP_EVT_VSE_CC ||
|
||||
config->dnld_type == ROME_SKIP_EVT_VSE)
|
||||
return qca_inject_cmd_complete_event(hdev);
|
||||
ret = qca_inject_cmd_complete_event(hdev);
|
||||
|
||||
out:
|
||||
release_firmware(fw);
|
||||
@ -388,6 +410,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Give the controller some time to get ready to receive the NVM */
|
||||
msleep(10);
|
||||
|
||||
/* Download NVM configuration */
|
||||
config.type = TLV_TYPE_NVM;
|
||||
if (firmware_name)
|
||||
|
@ -13,6 +13,7 @@
|
||||
#define EDL_PATCH_TLV_REQ_CMD (0x1E)
|
||||
#define EDL_NVM_ACCESS_SET_REQ_CMD (0x01)
|
||||
#define MAX_SIZE_PER_TLV_SEGMENT (243)
|
||||
#define QCA_PRE_SHUTDOWN_CMD (0xFC08)
|
||||
|
||||
#define EDL_CMD_REQ_RES_EVT (0x00)
|
||||
#define EDL_PATCH_VER_RES_EVT (0x19)
|
||||
@ -135,6 +136,7 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
|
||||
const char *firmware_name);
|
||||
int qca_read_soc_version(struct hci_dev *hdev, u32 *soc_version);
|
||||
int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
|
||||
int qca_send_pre_shutdown_cmd(struct hci_dev *hdev);
|
||||
static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
|
||||
{
|
||||
return soc_type == QCA_WCN3990 || soc_type == QCA_WCN3998;
|
||||
@ -167,4 +169,9 @@ static inline bool qca_is_wcn399x(enum qca_btsoc_type soc_type)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif
|
||||
|
@ -2762,8 +2762,10 @@ static int btusb_mtk_setup_firmware(struct hci_dev *hdev, const char *fwname)
|
||||
fw_size = fw->size;
|
||||
|
||||
/* The size of patch header is 30 bytes, should be skip */
|
||||
if (fw_size < 30)
|
||||
if (fw_size < 30) {
|
||||
err = -EINVAL;
|
||||
goto err_release_fw;
|
||||
}
|
||||
|
||||
fw_size -= 30;
|
||||
fw_ptr += 30;
|
||||
|
@ -705,7 +705,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
|
||||
unsigned long flags;
|
||||
struct qca_data *qca = hu->priv;
|
||||
|
||||
BT_DBG("hu %p want to sleep", hu);
|
||||
BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state);
|
||||
|
||||
spin_lock_irqsave(&qca->hci_ibs_lock, flags);
|
||||
|
||||
@ -720,7 +720,7 @@ static void device_want_to_sleep(struct hci_uart *hu)
|
||||
break;
|
||||
|
||||
case HCI_IBS_RX_ASLEEP:
|
||||
/* Fall through */
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Any other state is illegal */
|
||||
@ -912,7 +912,7 @@ static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
if (hdr->evt == HCI_EV_VENDOR)
|
||||
complete(&qca->drop_ev_comp);
|
||||
|
||||
kfree(skb);
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1386,6 +1386,9 @@ static int qca_power_off(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
|
||||
/* Perform pre shutdown command */
|
||||
qca_send_pre_shutdown_cmd(hdev);
|
||||
|
||||
qca_power_shutdown(hu);
|
||||
return 0;
|
||||
}
|
||||
|
@ -324,6 +324,25 @@ static struct clk_core *clk_core_lookup(const char *name)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static int of_parse_clkspec(const struct device_node *np, int index,
|
||||
const char *name, struct of_phandle_args *out_args);
|
||||
static struct clk_hw *
|
||||
of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
|
||||
#else
|
||||
static inline int of_parse_clkspec(const struct device_node *np, int index,
|
||||
const char *name,
|
||||
struct of_phandle_args *out_args)
|
||||
{
|
||||
return -ENOENT;
|
||||
}
|
||||
static inline struct clk_hw *
|
||||
of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
|
||||
{
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* clk_core_get - Find the clk_core parent of a clk
|
||||
* @core: clk to find parent of
|
||||
@ -355,8 +374,9 @@ static struct clk_core *clk_core_lookup(const char *name)
|
||||
* };
|
||||
*
|
||||
* Returns: -ENOENT when the provider can't be found or the clk doesn't
|
||||
* exist in the provider. -EINVAL when the name can't be found. NULL when the
|
||||
* provider knows about the clk but it isn't provided on this system.
|
||||
* exist in the provider or the name can't be found in the DT node or
|
||||
* in a clkdev lookup. NULL when the provider knows about the clk but it
|
||||
* isn't provided on this system.
|
||||
* A valid clk_core pointer when the clk can be found in the provider.
|
||||
*/
|
||||
static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
|
||||
@ -367,17 +387,19 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
|
||||
struct device *dev = core->dev;
|
||||
const char *dev_id = dev ? dev_name(dev) : NULL;
|
||||
struct device_node *np = core->of_node;
|
||||
struct of_phandle_args clkspec;
|
||||
|
||||
if (np && (name || index >= 0))
|
||||
hw = of_clk_get_hw(np, index, name);
|
||||
|
||||
/*
|
||||
* If the DT search above couldn't find the provider or the provider
|
||||
* didn't know about this clk, fallback to looking up via clkdev based
|
||||
* clk_lookups
|
||||
*/
|
||||
if (PTR_ERR(hw) == -ENOENT && name)
|
||||
if (np && (name || index >= 0) &&
|
||||
!of_parse_clkspec(np, index, name, &clkspec)) {
|
||||
hw = of_clk_get_hw_from_clkspec(&clkspec);
|
||||
of_node_put(clkspec.np);
|
||||
} else if (name) {
|
||||
/*
|
||||
* If the DT search above couldn't find the provider fallback to
|
||||
* looking up via clkdev based clk_lookups.
|
||||
*/
|
||||
hw = clk_find_hw(dev_id, name);
|
||||
}
|
||||
|
||||
if (IS_ERR(hw))
|
||||
return ERR_CAST(hw);
|
||||
@ -401,7 +423,7 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
|
||||
parent = ERR_PTR(-EPROBE_DEFER);
|
||||
} else {
|
||||
parent = clk_core_get(core, index);
|
||||
if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT)
|
||||
if (IS_ERR(parent) && PTR_ERR(parent) == -ENOENT && entry->name)
|
||||
parent = clk_core_lookup(entry->name);
|
||||
}
|
||||
|
||||
@ -1632,7 +1654,8 @@ static int clk_fetch_parent_index(struct clk_core *core,
|
||||
break;
|
||||
|
||||
/* Fallback to comparing globally unique names */
|
||||
if (!strcmp(parent->name, core->parents[i].name))
|
||||
if (core->parents[i].name &&
|
||||
!strcmp(parent->name, core->parents[i].name))
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "clk-exynos5-subcmu.h"
|
||||
|
||||
static struct samsung_clk_provider *ctx;
|
||||
static const struct exynos5_subcmu_info *cmu;
|
||||
static const struct exynos5_subcmu_info **cmu;
|
||||
static int nr_cmus;
|
||||
|
||||
static void exynos5_subcmu_clk_save(void __iomem *base,
|
||||
@ -56,17 +56,17 @@ static void exynos5_subcmu_defer_gate(struct samsung_clk_provider *ctx,
|
||||
* when OF-core populates all device-tree nodes.
|
||||
*/
|
||||
void exynos5_subcmus_init(struct samsung_clk_provider *_ctx, int _nr_cmus,
|
||||
const struct exynos5_subcmu_info *_cmu)
|
||||
const struct exynos5_subcmu_info **_cmu)
|
||||
{
|
||||
ctx = _ctx;
|
||||
cmu = _cmu;
|
||||
nr_cmus = _nr_cmus;
|
||||
|
||||
for (; _nr_cmus--; _cmu++) {
|
||||
exynos5_subcmu_defer_gate(ctx, _cmu->gate_clks,
|
||||
_cmu->nr_gate_clks);
|
||||
exynos5_subcmu_clk_save(ctx->reg_base, _cmu->suspend_regs,
|
||||
_cmu->nr_suspend_regs);
|
||||
exynos5_subcmu_defer_gate(ctx, (*_cmu)->gate_clks,
|
||||
(*_cmu)->nr_gate_clks);
|
||||
exynos5_subcmu_clk_save(ctx->reg_base, (*_cmu)->suspend_regs,
|
||||
(*_cmu)->nr_suspend_regs);
|
||||
}
|
||||
}
|
||||
|
||||
@ -163,9 +163,9 @@ static int __init exynos5_clk_probe(struct platform_device *pdev)
|
||||
if (of_property_read_string(np, "label", &name) < 0)
|
||||
continue;
|
||||
for (i = 0; i < nr_cmus; i++)
|
||||
if (strcmp(cmu[i].pd_name, name) == 0)
|
||||
if (strcmp(cmu[i]->pd_name, name) == 0)
|
||||
exynos5_clk_register_subcmu(&pdev->dev,
|
||||
&cmu[i], np);
|
||||
cmu[i], np);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -21,6 +21,6 @@ struct exynos5_subcmu_info {
|
||||
};
|
||||
|
||||
void exynos5_subcmus_init(struct samsung_clk_provider *ctx, int nr_cmus,
|
||||
const struct exynos5_subcmu_info *cmu);
|
||||
const struct exynos5_subcmu_info **cmu);
|
||||
|
||||
#endif
|
||||
|
@ -681,6 +681,10 @@ static const struct exynos5_subcmu_info exynos5250_disp_subcmu = {
|
||||
.pd_name = "DISP1",
|
||||
};
|
||||
|
||||
static const struct exynos5_subcmu_info *exynos5250_subcmus[] = {
|
||||
&exynos5250_disp_subcmu,
|
||||
};
|
||||
|
||||
static const struct samsung_pll_rate_table vpll_24mhz_tbl[] __initconst = {
|
||||
/* sorted in descending order */
|
||||
/* PLL_36XX_RATE(rate, m, p, s, k) */
|
||||
@ -843,7 +847,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
|
||||
|
||||
samsung_clk_sleep_init(reg_base, exynos5250_clk_regs,
|
||||
ARRAY_SIZE(exynos5250_clk_regs));
|
||||
exynos5_subcmus_init(ctx, 1, &exynos5250_disp_subcmu);
|
||||
exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5250_subcmus),
|
||||
exynos5250_subcmus);
|
||||
|
||||
samsung_clk_of_add_provider(np, ctx);
|
||||
|
||||
|
@ -534,8 +534,6 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
|
||||
GATE_BUS_TOP, 24, 0, 0),
|
||||
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
|
||||
GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
|
||||
GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
|
||||
SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
|
||||
};
|
||||
|
||||
static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
|
||||
@ -577,8 +575,13 @@ static const struct samsung_div_clock exynos5420_div_clks[] __initconst = {
|
||||
|
||||
static const struct samsung_gate_clock exynos5420_gate_clks[] __initconst = {
|
||||
GATE(CLK_SECKEY, "seckey", "aclk66_psgen", GATE_BUS_PERIS1, 1, 0, 0),
|
||||
/* Maudio Block */
|
||||
GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
|
||||
SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
|
||||
GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
|
||||
GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
|
||||
GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
|
||||
GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
|
||||
};
|
||||
|
||||
static const struct samsung_mux_clock exynos5x_mux_clks[] __initconst = {
|
||||
@ -890,9 +893,6 @@ static const struct samsung_div_clock exynos5x_div_clks[] __initconst = {
|
||||
/* GSCL Block */
|
||||
DIV(0, "dout_gscl_blk_333", "aclk333_432_gscl", DIV2_RATIO0, 6, 2),
|
||||
|
||||
/* MSCL Block */
|
||||
DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
|
||||
|
||||
/* PSGEN */
|
||||
DIV(0, "dout_gen_blk", "mout_user_aclk266", DIV2_RATIO0, 8, 1),
|
||||
DIV(0, "dout_jpg_blk", "aclk166", DIV2_RATIO0, 20, 1),
|
||||
@ -1017,12 +1017,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
|
||||
GATE(CLK_SCLK_DP1, "sclk_dp1", "dout_dp1",
|
||||
GATE_TOP_SCLK_DISP1, 20, CLK_SET_RATE_PARENT, 0),
|
||||
|
||||
/* Maudio Block */
|
||||
GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
|
||||
GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
|
||||
GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
|
||||
GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
|
||||
|
||||
/* FSYS Block */
|
||||
GATE(CLK_TSI, "tsi", "aclk200_fsys", GATE_BUS_FSYS0, 0, 0, 0),
|
||||
GATE(CLK_PDMA0, "pdma0", "aclk200_fsys", GATE_BUS_FSYS0, 1, 0, 0),
|
||||
@ -1162,17 +1156,6 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
|
||||
GATE(CLK_FIMC_LITE3, "fimc_lite3", "aclk333_432_gscl",
|
||||
GATE_IP_GSCL1, 17, 0, 0),
|
||||
|
||||
/* MSCL Block */
|
||||
GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
|
||||
GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
|
||||
GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
|
||||
GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
|
||||
GATE_IP_MSCL, 8, 0, 0),
|
||||
GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
|
||||
GATE_IP_MSCL, 9, 0, 0),
|
||||
GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
|
||||
GATE_IP_MSCL, 10, 0, 0),
|
||||
|
||||
/* ISP */
|
||||
GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "dout_uart_isp",
|
||||
GATE_TOP_SCLK_ISP, 0, CLK_SET_RATE_PARENT, 0),
|
||||
@ -1281,32 +1264,103 @@ static struct exynos5_subcmu_reg_dump exynos5x_mfc_suspend_regs[] = {
|
||||
{ DIV4_RATIO, 0, 0x3 }, /* DIV dout_mfc_blk */
|
||||
};
|
||||
|
||||
static const struct exynos5_subcmu_info exynos5x_subcmus[] = {
|
||||
{
|
||||
.div_clks = exynos5x_disp_div_clks,
|
||||
.nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
|
||||
.gate_clks = exynos5x_disp_gate_clks,
|
||||
.nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
|
||||
.suspend_regs = exynos5x_disp_suspend_regs,
|
||||
.nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
|
||||
.pd_name = "DISP",
|
||||
}, {
|
||||
.div_clks = exynos5x_gsc_div_clks,
|
||||
.nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
|
||||
.gate_clks = exynos5x_gsc_gate_clks,
|
||||
.nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
|
||||
.suspend_regs = exynos5x_gsc_suspend_regs,
|
||||
.nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
|
||||
.pd_name = "GSC",
|
||||
}, {
|
||||
.div_clks = exynos5x_mfc_div_clks,
|
||||
.nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
|
||||
.gate_clks = exynos5x_mfc_gate_clks,
|
||||
.nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
|
||||
.suspend_regs = exynos5x_mfc_suspend_regs,
|
||||
.nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
|
||||
.pd_name = "MFC",
|
||||
},
|
||||
static const struct samsung_gate_clock exynos5x_mscl_gate_clks[] __initconst = {
|
||||
/* MSCL Block */
|
||||
GATE(CLK_MSCL0, "mscl0", "aclk400_mscl", GATE_IP_MSCL, 0, 0, 0),
|
||||
GATE(CLK_MSCL1, "mscl1", "aclk400_mscl", GATE_IP_MSCL, 1, 0, 0),
|
||||
GATE(CLK_MSCL2, "mscl2", "aclk400_mscl", GATE_IP_MSCL, 2, 0, 0),
|
||||
GATE(CLK_SMMU_MSCL0, "smmu_mscl0", "dout_mscl_blk",
|
||||
GATE_IP_MSCL, 8, 0, 0),
|
||||
GATE(CLK_SMMU_MSCL1, "smmu_mscl1", "dout_mscl_blk",
|
||||
GATE_IP_MSCL, 9, 0, 0),
|
||||
GATE(CLK_SMMU_MSCL2, "smmu_mscl2", "dout_mscl_blk",
|
||||
GATE_IP_MSCL, 10, 0, 0),
|
||||
};
|
||||
|
||||
static const struct samsung_div_clock exynos5x_mscl_div_clks[] __initconst = {
|
||||
DIV(0, "dout_mscl_blk", "aclk400_mscl", DIV2_RATIO0, 28, 2),
|
||||
};
|
||||
|
||||
static struct exynos5_subcmu_reg_dump exynos5x_mscl_suspend_regs[] = {
|
||||
{ GATE_IP_MSCL, 0xffffffff, 0xffffffff }, /* MSCL gates */
|
||||
{ SRC_TOP3, 0, BIT(4) }, /* MUX mout_user_aclk400_mscl */
|
||||
{ DIV2_RATIO0, 0, 0x30000000 }, /* DIV dout_mscl_blk */
|
||||
};
|
||||
|
||||
static const struct samsung_gate_clock exynos5800_mau_gate_clks[] __initconst = {
|
||||
GATE(CLK_MAU_EPLL, "mau_epll", "mout_user_mau_epll",
|
||||
SRC_MASK_TOP7, 20, CLK_SET_RATE_PARENT, 0),
|
||||
GATE(CLK_SCLK_MAUDIO0, "sclk_maudio0", "dout_maudio0",
|
||||
GATE_TOP_SCLK_MAU, 0, CLK_SET_RATE_PARENT, 0),
|
||||
GATE(CLK_SCLK_MAUPCM0, "sclk_maupcm0", "dout_maupcm0",
|
||||
GATE_TOP_SCLK_MAU, 1, CLK_SET_RATE_PARENT, 0),
|
||||
};
|
||||
|
||||
static struct exynos5_subcmu_reg_dump exynos5800_mau_suspend_regs[] = {
|
||||
{ SRC_TOP9, 0, BIT(8) }, /* MUX mout_user_mau_epll */
|
||||
};
|
||||
|
||||
static const struct exynos5_subcmu_info exynos5x_disp_subcmu = {
|
||||
.div_clks = exynos5x_disp_div_clks,
|
||||
.nr_div_clks = ARRAY_SIZE(exynos5x_disp_div_clks),
|
||||
.gate_clks = exynos5x_disp_gate_clks,
|
||||
.nr_gate_clks = ARRAY_SIZE(exynos5x_disp_gate_clks),
|
||||
.suspend_regs = exynos5x_disp_suspend_regs,
|
||||
.nr_suspend_regs = ARRAY_SIZE(exynos5x_disp_suspend_regs),
|
||||
.pd_name = "DISP",
|
||||
};
|
||||
|
||||
static const struct exynos5_subcmu_info exynos5x_gsc_subcmu = {
|
||||
.div_clks = exynos5x_gsc_div_clks,
|
||||
.nr_div_clks = ARRAY_SIZE(exynos5x_gsc_div_clks),
|
||||
.gate_clks = exynos5x_gsc_gate_clks,
|
||||
.nr_gate_clks = ARRAY_SIZE(exynos5x_gsc_gate_clks),
|
||||
.suspend_regs = exynos5x_gsc_suspend_regs,
|
||||
.nr_suspend_regs = ARRAY_SIZE(exynos5x_gsc_suspend_regs),
|
||||
.pd_name = "GSC",
|
||||
};
|
||||
|
||||
static const struct exynos5_subcmu_info exynos5x_mfc_subcmu = {
|
||||
.div_clks = exynos5x_mfc_div_clks,
|
||||
.nr_div_clks = ARRAY_SIZE(exynos5x_mfc_div_clks),
|
||||
.gate_clks = exynos5x_mfc_gate_clks,
|
||||
.nr_gate_clks = ARRAY_SIZE(exynos5x_mfc_gate_clks),
|
||||
.suspend_regs = exynos5x_mfc_suspend_regs,
|
||||
.nr_suspend_regs = ARRAY_SIZE(exynos5x_mfc_suspend_regs),
|
||||
.pd_name = "MFC",
|
||||
};
|
||||
|
||||
static const struct exynos5_subcmu_info exynos5x_mscl_subcmu = {
|
||||
.div_clks = exynos5x_mscl_div_clks,
|
||||
.nr_div_clks = ARRAY_SIZE(exynos5x_mscl_div_clks),
|
||||
.gate_clks = exynos5x_mscl_gate_clks,
|
||||
.nr_gate_clks = ARRAY_SIZE(exynos5x_mscl_gate_clks),
|
||||
.suspend_regs = exynos5x_mscl_suspend_regs,
|
||||
.nr_suspend_regs = ARRAY_SIZE(exynos5x_mscl_suspend_regs),
|
||||
.pd_name = "MSC",
|
||||
};
|
||||
|
||||
static const struct exynos5_subcmu_info exynos5800_mau_subcmu = {
|
||||
.gate_clks = exynos5800_mau_gate_clks,
|
||||
.nr_gate_clks = ARRAY_SIZE(exynos5800_mau_gate_clks),
|
||||
.suspend_regs = exynos5800_mau_suspend_regs,
|
||||
.nr_suspend_regs = ARRAY_SIZE(exynos5800_mau_suspend_regs),
|
||||
.pd_name = "MAU",
|
||||
};
|
||||
|
||||
static const struct exynos5_subcmu_info *exynos5x_subcmus[] = {
|
||||
&exynos5x_disp_subcmu,
|
||||
&exynos5x_gsc_subcmu,
|
||||
&exynos5x_mfc_subcmu,
|
||||
&exynos5x_mscl_subcmu,
|
||||
};
|
||||
|
||||
static const struct exynos5_subcmu_info *exynos5800_subcmus[] = {
|
||||
&exynos5x_disp_subcmu,
|
||||
&exynos5x_gsc_subcmu,
|
||||
&exynos5x_mfc_subcmu,
|
||||
&exynos5x_mscl_subcmu,
|
||||
&exynos5800_mau_subcmu,
|
||||
};
|
||||
|
||||
static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] __initconst = {
|
||||
@ -1539,11 +1593,17 @@ static void __init exynos5x_clk_init(struct device_node *np,
|
||||
samsung_clk_extended_sleep_init(reg_base,
|
||||
exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
|
||||
exynos5420_set_clksrc, ARRAY_SIZE(exynos5420_set_clksrc));
|
||||
if (soc == EXYNOS5800)
|
||||
|
||||
if (soc == EXYNOS5800) {
|
||||
samsung_clk_sleep_init(reg_base, exynos5800_clk_regs,
|
||||
ARRAY_SIZE(exynos5800_clk_regs));
|
||||
exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
|
||||
exynos5x_subcmus);
|
||||
|
||||
exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5800_subcmus),
|
||||
exynos5800_subcmus);
|
||||
} else {
|
||||
exynos5_subcmus_init(ctx, ARRAY_SIZE(exynos5x_subcmus),
|
||||
exynos5x_subcmus);
|
||||
}
|
||||
|
||||
samsung_clk_of_add_provider(np, ctx);
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
|
||||
if (socfpgaclk->fixed_div) {
|
||||
div = socfpgaclk->fixed_div;
|
||||
} else {
|
||||
if (!socfpgaclk->bypass_reg)
|
||||
if (socfpgaclk->hw.reg)
|
||||
div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
|
||||
}
|
||||
|
||||
|
@ -1163,6 +1163,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
|
||||
switch (chan->feature & FSL_DMA_IP_MASK) {
|
||||
case FSL_DMA_IP_85XX:
|
||||
chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
|
||||
/* Fall through */
|
||||
case FSL_DMA_IP_83XX:
|
||||
chan->toggle_ext_start = fsl_chan_toggle_ext_start;
|
||||
chan->set_src_loop_size = fsl_chan_set_src_loop_size;
|
||||
|
@ -363,7 +363,7 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
|
||||
/* Special handling for SPI GPIOs if used */
|
||||
if (IS_ERR(desc))
|
||||
desc = of_find_spi_gpio(dev, con_id, &of_flags);
|
||||
if (IS_ERR(desc)) {
|
||||
if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) {
|
||||
/* This quirk looks up flags and all */
|
||||
desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
|
||||
if (!IS_ERR(desc))
|
||||
|
@ -1091,9 +1091,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
|
||||
lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
|
||||
lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
|
||||
GPIOLINE_FLAG_IS_OUT);
|
||||
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
|
||||
lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
|
||||
lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
|
||||
GPIOLINE_FLAG_IS_OUT);
|
||||
|
||||
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
|
||||
return -EFAULT;
|
||||
@ -1371,21 +1373,13 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
if (status)
|
||||
goto err_remove_from_list;
|
||||
|
||||
status = gpiochip_irqchip_init_valid_mask(chip);
|
||||
status = gpiochip_alloc_valid_mask(chip);
|
||||
if (status)
|
||||
goto err_remove_from_list;
|
||||
|
||||
status = gpiochip_alloc_valid_mask(chip);
|
||||
if (status)
|
||||
goto err_remove_irqchip_mask;
|
||||
|
||||
status = gpiochip_add_irqchip(chip, lock_key, request_key);
|
||||
if (status)
|
||||
goto err_free_gpiochip_mask;
|
||||
|
||||
status = of_gpiochip_add(chip);
|
||||
if (status)
|
||||
goto err_remove_chip;
|
||||
goto err_free_gpiochip_mask;
|
||||
|
||||
status = gpiochip_init_valid_mask(chip);
|
||||
if (status)
|
||||
@ -1411,6 +1405,14 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
|
||||
machine_gpiochip_add(chip);
|
||||
|
||||
status = gpiochip_irqchip_init_valid_mask(chip);
|
||||
if (status)
|
||||
goto err_remove_acpi_chip;
|
||||
|
||||
status = gpiochip_add_irqchip(chip, lock_key, request_key);
|
||||
if (status)
|
||||
goto err_remove_irqchip_mask;
|
||||
|
||||
/*
|
||||
* By first adding the chardev, and then adding the device,
|
||||
* we get a device node entry in sysfs under
|
||||
@ -1422,21 +1424,21 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
||||
if (gpiolib_initialized) {
|
||||
status = gpiochip_setup_dev(gdev);
|
||||
if (status)
|
||||
goto err_remove_acpi_chip;
|
||||
goto err_remove_irqchip;
|
||||
}
|
||||
return 0;
|
||||
|
||||
err_remove_irqchip:
|
||||
gpiochip_irqchip_remove(chip);
|
||||
err_remove_irqchip_mask:
|
||||
gpiochip_irqchip_free_valid_mask(chip);
|
||||
err_remove_acpi_chip:
|
||||
acpi_gpiochip_remove(chip);
|
||||
err_remove_of_chip:
|
||||
gpiochip_free_hogs(chip);
|
||||
of_gpiochip_remove(chip);
|
||||
err_remove_chip:
|
||||
gpiochip_irqchip_remove(chip);
|
||||
err_free_gpiochip_mask:
|
||||
gpiochip_free_valid_mask(chip);
|
||||
err_remove_irqchip_mask:
|
||||
gpiochip_irqchip_free_valid_mask(chip);
|
||||
err_remove_from_list:
|
||||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
list_del(&gdev->list);
|
||||
|
@ -1143,6 +1143,9 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_sem);
|
||||
|
||||
if (p->post_deps)
|
||||
return -EINVAL;
|
||||
|
||||
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
|
||||
GFP_KERNEL);
|
||||
p->num_post_deps = 0;
|
||||
@ -1166,8 +1169,7 @@ static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
|
||||
|
||||
|
||||
static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_cs_chunk
|
||||
*chunk)
|
||||
struct amdgpu_cs_chunk *chunk)
|
||||
{
|
||||
struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
|
||||
unsigned num_deps;
|
||||
@ -1177,6 +1179,9 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
|
||||
num_deps = chunk->length_dw * 4 /
|
||||
sizeof(struct drm_amdgpu_cs_chunk_syncobj);
|
||||
|
||||
if (p->post_deps)
|
||||
return -EINVAL;
|
||||
|
||||
p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
|
||||
GFP_KERNEL);
|
||||
p->num_post_deps = 0;
|
||||
|
@ -604,6 +604,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
|
||||
(adev->gfx.rlc_feature_version < 1) ||
|
||||
!adev->gfx.rlc.is_rlc_v2_1)
|
||||
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -552,7 +552,6 @@ static int nv_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_BIF_LS;
|
||||
adev->pg_flags = AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
AMD_PG_SUPPORT_MMHUB |
|
||||
AMD_PG_SUPPORT_ATHUB;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
break;
|
||||
|
@ -992,11 +992,6 @@ static int soc15_common_early_init(void *handle)
|
||||
|
||||
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
|
||||
}
|
||||
|
||||
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
AMD_PG_SUPPORT_RLC_SMU_HS;
|
||||
break;
|
||||
default:
|
||||
/* FIXME: not supported yet */
|
||||
|
@ -3131,13 +3131,25 @@ static enum dc_color_depth
|
||||
convert_color_depth_from_display_info(const struct drm_connector *connector,
|
||||
const struct drm_connector_state *state)
|
||||
{
|
||||
uint32_t bpc = connector->display_info.bpc;
|
||||
uint8_t bpc = (uint8_t)connector->display_info.bpc;
|
||||
|
||||
/* Assume 8 bpc by default if no bpc is specified. */
|
||||
bpc = bpc ? bpc : 8;
|
||||
|
||||
if (!state)
|
||||
state = connector->state;
|
||||
|
||||
if (state) {
|
||||
bpc = state->max_bpc;
|
||||
/*
|
||||
* Cap display bpc based on the user requested value.
|
||||
*
|
||||
* The value for state->max_bpc may not correctly updated
|
||||
* depending on when the connector gets added to the state
|
||||
* or if this was called outside of atomic check, so it
|
||||
* can't be used directly.
|
||||
*/
|
||||
bpc = min(bpc, state->max_requested_bpc);
|
||||
|
||||
/* Round down to the nearest even number. */
|
||||
bpc = bpc - (bpc & 1);
|
||||
}
|
||||
|
@ -907,8 +907,6 @@ struct smu_funcs
|
||||
((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0)
|
||||
#define smu_set_azalia_d3_pme(smu) \
|
||||
((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0)
|
||||
#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
|
||||
((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0)
|
||||
#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \
|
||||
((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0)
|
||||
#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \
|
||||
|
@ -326,7 +326,8 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
|
||||
struct amdgpu_device *adev = smu->adev;
|
||||
const struct smc_firmware_header_v1_0 *hdr;
|
||||
int ret, index;
|
||||
uint32_t size;
|
||||
uint32_t size = 0;
|
||||
uint16_t atom_table_size;
|
||||
uint8_t frev, crev;
|
||||
void *table;
|
||||
uint16_t version_major, version_minor;
|
||||
@ -354,10 +355,11 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu)
|
||||
index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
|
||||
powerplayinfo);
|
||||
|
||||
ret = smu_get_atom_data_table(smu, index, (uint16_t *)&size, &frev, &crev,
|
||||
ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
|
||||
(uint8_t **)&table);
|
||||
if (ret)
|
||||
return ret;
|
||||
size = atom_table_size;
|
||||
}
|
||||
|
||||
if (!smu->smu_table.power_play_table)
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/iommu.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
@ -143,6 +144,12 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
|
||||
return mdev->irq;
|
||||
}
|
||||
|
||||
/* Get the optional framebuffer memory resource */
|
||||
ret = of_reserved_mem_device_init(dev);
|
||||
if (ret && ret != -ENODEV)
|
||||
return ret;
|
||||
ret = 0;
|
||||
|
||||
for_each_available_child_of_node(np, child) {
|
||||
if (of_node_cmp(child->name, "pipeline") == 0) {
|
||||
ret = komeda_parse_pipe_dt(mdev, child);
|
||||
@ -289,6 +296,8 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
|
||||
|
||||
mdev->n_pipelines = 0;
|
||||
|
||||
of_reserved_mem_device_release(dev);
|
||||
|
||||
if (funcs && funcs->cleanup)
|
||||
funcs->cleanup(mdev);
|
||||
|
||||
|
@ -35,6 +35,25 @@ komeda_get_format_caps(struct komeda_format_caps_table *table,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info, u64 modifier)
|
||||
{
|
||||
u32 bpp;
|
||||
|
||||
switch (info->format) {
|
||||
case DRM_FORMAT_YUV420_8BIT:
|
||||
bpp = 12;
|
||||
break;
|
||||
case DRM_FORMAT_YUV420_10BIT:
|
||||
bpp = 15;
|
||||
break;
|
||||
default:
|
||||
bpp = info->cpp[0] * 8;
|
||||
break;
|
||||
}
|
||||
|
||||
return bpp;
|
||||
}
|
||||
|
||||
/* Two assumptions
|
||||
* 1. RGB always has YTR
|
||||
* 2. Tiled RGB always has SC
|
||||
|
@ -97,6 +97,9 @@ const struct komeda_format_caps *
|
||||
komeda_get_format_caps(struct komeda_format_caps_table *table,
|
||||
u32 fourcc, u64 modifier);
|
||||
|
||||
u32 komeda_get_afbc_format_bpp(const struct drm_format_info *info,
|
||||
u64 modifier);
|
||||
|
||||
u32 *komeda_get_layer_fourcc_list(struct komeda_format_caps_table *table,
|
||||
u32 layer_type, u32 *n_fmts);
|
||||
|
||||
|
@ -43,7 +43,7 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
|
||||
struct drm_framebuffer *fb = &kfb->base;
|
||||
const struct drm_format_info *info = fb->format;
|
||||
struct drm_gem_object *obj;
|
||||
u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks;
|
||||
u32 alignment_w = 0, alignment_h = 0, alignment_header, n_blocks, bpp;
|
||||
u64 min_size;
|
||||
|
||||
obj = drm_gem_object_lookup(file, mode_cmd->handles[0]);
|
||||
@ -88,8 +88,9 @@ komeda_fb_afbc_size_check(struct komeda_fb *kfb, struct drm_file *file,
|
||||
kfb->offset_payload = ALIGN(n_blocks * AFBC_HEADER_SIZE,
|
||||
alignment_header);
|
||||
|
||||
bpp = komeda_get_afbc_format_bpp(info, fb->modifier);
|
||||
kfb->afbc_size = kfb->offset_payload + n_blocks *
|
||||
ALIGN(info->cpp[0] * AFBC_SUPERBLK_PIXELS,
|
||||
ALIGN(bpp * AFBC_SUPERBLK_PIXELS / 8,
|
||||
AFBC_SUPERBLK_ALIGNMENT);
|
||||
min_size = kfb->afbc_size + fb->offsets[0];
|
||||
if (min_size > obj->size) {
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_irq.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
||||
#include "komeda_dev.h"
|
||||
#include "komeda_framebuffer.h"
|
||||
@ -315,6 +316,8 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
|
||||
|
||||
drm->irq_enabled = true;
|
||||
|
||||
drm_kms_helper_poll_init(drm);
|
||||
|
||||
err = drm_dev_register(drm, 0);
|
||||
if (err)
|
||||
goto cleanup_mode_config;
|
||||
@ -322,6 +325,7 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
|
||||
return kms;
|
||||
|
||||
cleanup_mode_config:
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
drm->irq_enabled = false;
|
||||
drm_mode_config_cleanup(drm);
|
||||
komeda_kms_cleanup_private_objs(kms);
|
||||
@ -338,6 +342,7 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
|
||||
drm->irq_enabled = false;
|
||||
mdev->funcs->disable_irq(mdev);
|
||||
drm_dev_unregister(drm);
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
component_unbind_all(mdev->dev, drm);
|
||||
komeda_kms_cleanup_private_objs(kms);
|
||||
drm_mode_config_cleanup(drm);
|
||||
|
@ -1465,8 +1465,8 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
|
||||
else if (intel_crtc_has_dp_encoder(pipe_config))
|
||||
dotclock = intel_dotclock_calculate(pipe_config->port_clock,
|
||||
&pipe_config->dp_m_n);
|
||||
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
|
||||
dotclock = pipe_config->port_clock * 2 / 3;
|
||||
else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp > 24)
|
||||
dotclock = pipe_config->port_clock * 24 / pipe_config->pipe_bpp;
|
||||
else
|
||||
dotclock = pipe_config->port_clock;
|
||||
|
||||
|
@ -829,7 +829,7 @@ struct intel_crtc_state {
|
||||
|
||||
/*
|
||||
* Frequence the dpll for the port should run at. Differs from the
|
||||
* adjusted dotclock e.g. for DP or 12bpc hdmi mode. This is also
|
||||
* adjusted dotclock e.g. for DP or 10/12bpc hdmi mode. This is also
|
||||
* already multiplied by pixel_multiplier.
|
||||
*/
|
||||
int port_clock;
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include "mtk_drm_crtc.h"
|
||||
#include "mtk_drm_ddp.h"
|
||||
@ -213,6 +214,7 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
||||
struct mtk_drm_private *private = drm->dev_private;
|
||||
struct platform_device *pdev;
|
||||
struct device_node *np;
|
||||
struct device *dma_dev;
|
||||
int ret;
|
||||
|
||||
if (!iommu_present(&platform_bus_type))
|
||||
@ -275,7 +277,29 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
||||
goto err_component_unbind;
|
||||
}
|
||||
|
||||
private->dma_dev = &pdev->dev;
|
||||
dma_dev = &pdev->dev;
|
||||
private->dma_dev = dma_dev;
|
||||
|
||||
/*
|
||||
* Configure the DMA segment size to make sure we get contiguous IOVA
|
||||
* when importing PRIME buffers.
|
||||
*/
|
||||
if (!dma_dev->dma_parms) {
|
||||
private->dma_parms_allocated = true;
|
||||
dma_dev->dma_parms =
|
||||
devm_kzalloc(drm->dev, sizeof(*dma_dev->dma_parms),
|
||||
GFP_KERNEL);
|
||||
}
|
||||
if (!dma_dev->dma_parms) {
|
||||
ret = -ENOMEM;
|
||||
goto err_component_unbind;
|
||||
}
|
||||
|
||||
ret = dma_set_max_seg_size(dma_dev, (unsigned int)DMA_BIT_MASK(32));
|
||||
if (ret) {
|
||||
dev_err(dma_dev, "Failed to set DMA segment size\n");
|
||||
goto err_unset_dma_parms;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't use the drm_irq_install() helpers provided by the DRM
|
||||
@ -285,13 +309,16 @@ static int mtk_drm_kms_init(struct drm_device *drm)
|
||||
drm->irq_enabled = true;
|
||||
ret = drm_vblank_init(drm, MAX_CRTC);
|
||||
if (ret < 0)
|
||||
goto err_component_unbind;
|
||||
goto err_unset_dma_parms;
|
||||
|
||||
drm_kms_helper_poll_init(drm);
|
||||
drm_mode_config_reset(drm);
|
||||
|
||||
return 0;
|
||||
|
||||
err_unset_dma_parms:
|
||||
if (private->dma_parms_allocated)
|
||||
dma_dev->dma_parms = NULL;
|
||||
err_component_unbind:
|
||||
component_unbind_all(drm->dev, drm);
|
||||
err_config_cleanup:
|
||||
@ -302,9 +329,14 @@ err_config_cleanup:
|
||||
|
||||
static void mtk_drm_kms_deinit(struct drm_device *drm)
|
||||
{
|
||||
struct mtk_drm_private *private = drm->dev_private;
|
||||
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
|
||||
if (private->dma_parms_allocated)
|
||||
private->dma_dev->dma_parms = NULL;
|
||||
|
||||
component_unbind_all(drm->dev, drm);
|
||||
drm_mode_config_cleanup(drm);
|
||||
}
|
||||
@ -320,6 +352,18 @@ static const struct file_operations mtk_drm_fops = {
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
};
|
||||
|
||||
/*
|
||||
* We need to override this because the device used to import the memory is
|
||||
* not dev->dev, as drm_gem_prime_import() expects.
|
||||
*/
|
||||
struct drm_gem_object *mtk_drm_gem_prime_import(struct drm_device *dev,
|
||||
struct dma_buf *dma_buf)
|
||||
{
|
||||
struct mtk_drm_private *private = dev->dev_private;
|
||||
|
||||
return drm_gem_prime_import_dev(dev, dma_buf, private->dma_dev);
|
||||
}
|
||||
|
||||
static struct drm_driver mtk_drm_driver = {
|
||||
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
|
||||
DRIVER_ATOMIC,
|
||||
@ -331,7 +375,7 @@ static struct drm_driver mtk_drm_driver = {
|
||||
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
|
||||
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
|
||||
.gem_prime_export = drm_gem_prime_export,
|
||||
.gem_prime_import = drm_gem_prime_import,
|
||||
.gem_prime_import = mtk_drm_gem_prime_import,
|
||||
.gem_prime_get_sg_table = mtk_gem_prime_get_sg_table,
|
||||
.gem_prime_import_sg_table = mtk_gem_prime_import_sg_table,
|
||||
.gem_prime_mmap = mtk_drm_gem_mmap_buf,
|
||||
@ -524,12 +568,15 @@ static int mtk_drm_probe(struct platform_device *pdev)
|
||||
comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
|
||||
if (!comp) {
|
||||
ret = -ENOMEM;
|
||||
of_node_put(node);
|
||||
goto err_node;
|
||||
}
|
||||
|
||||
ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
of_node_put(node);
|
||||
goto err_node;
|
||||
}
|
||||
|
||||
private->ddp_comp[comp_id] = comp;
|
||||
}
|
||||
|
@ -51,6 +51,8 @@ struct mtk_drm_private {
|
||||
} commit;
|
||||
|
||||
struct drm_atomic_state *suspend_state;
|
||||
|
||||
bool dma_parms_allocated;
|
||||
};
|
||||
|
||||
extern struct platform_driver mtk_ddp_driver;
|
||||
|
@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
|
||||
u8 *ptr = msg->buf;
|
||||
|
||||
while (remaining) {
|
||||
u8 cnt = (remaining > 16) ? 16 : remaining;
|
||||
u8 cmd;
|
||||
u8 cnt, retries, cmd;
|
||||
|
||||
if (msg->flags & I2C_M_RD)
|
||||
cmd = 1;
|
||||
@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
|
||||
if (mcnt || remaining > 16)
|
||||
cmd |= 4; /* MOT */
|
||||
|
||||
ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
|
||||
if (ret < 0) {
|
||||
nvkm_i2c_aux_release(aux);
|
||||
return ret;
|
||||
for (retries = 0, cnt = 0;
|
||||
retries < 32 && !cnt;
|
||||
retries++) {
|
||||
cnt = min_t(u8, remaining, 16);
|
||||
ret = aux->func->xfer(aux, true, cmd,
|
||||
msg->addr, ptr, &cnt);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
}
|
||||
if (!cnt) {
|
||||
AUX_TRACE(aux, "no data after 32 retries");
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ptr += cnt;
|
||||
@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
|
||||
msg++;
|
||||
}
|
||||
|
||||
ret = num;
|
||||
out:
|
||||
nvkm_i2c_aux_release(aux);
|
||||
return num;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u32
|
||||
|
@ -669,7 +669,7 @@ static int pdev_probe(struct platform_device *pdev)
|
||||
if (omapdss_is_initialized() == false)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Failed to set the DMA mask\n");
|
||||
return ret;
|
||||
|
@ -673,10 +673,8 @@ static int rcar_lvds_parse_dt_companion(struct rcar_lvds *lvds)
|
||||
|
||||
/* Locate the companion LVDS encoder for dual-link operation, if any. */
|
||||
companion = of_parse_phandle(dev->of_node, "renesas,companion", 0);
|
||||
if (!companion) {
|
||||
dev_err(dev, "Companion LVDS encoder not found\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
if (!companion)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Sanity check: the companion encoder must have the same compatible
|
||||
|
@ -314,6 +314,7 @@ static void sun4i_tcon0_mode_set_dithering(struct sun4i_tcon *tcon,
|
||||
/* R and B components are only 5 bits deep */
|
||||
val |= SUN4I_TCON0_FRM_CTL_MODE_R;
|
||||
val |= SUN4I_TCON0_FRM_CTL_MODE_B;
|
||||
/* Fall through */
|
||||
case MEDIA_BUS_FMT_RGB666_1X18:
|
||||
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
|
||||
/* Fall through: enable dithering */
|
||||
|
@ -993,6 +993,7 @@ static ssize_t sun6i_dsi_transfer(struct mipi_dsi_host *host,
|
||||
ret = sun6i_dsi_dcs_read(dsi, msg);
|
||||
break;
|
||||
}
|
||||
/* Else, fall through */
|
||||
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -1153,8 +1153,6 @@ static unsigned int cp2112_gpio_irq_startup(struct irq_data *d)
|
||||
|
||||
INIT_DELAYED_WORK(&dev->gpio_poll_worker, cp2112_gpio_poll_callback);
|
||||
|
||||
cp2112_gpio_direction_input(gc, d->hwirq);
|
||||
|
||||
if (!dev->gpio_poll) {
|
||||
dev->gpio_poll = true;
|
||||
schedule_delayed_work(&dev->gpio_poll_worker, 0);
|
||||
@ -1204,6 +1202,12 @@ static int __maybe_unused cp2112_allocate_irq(struct cp2112_device *dev,
|
||||
return PTR_ERR(dev->desc[pin]);
|
||||
}
|
||||
|
||||
ret = cp2112_gpio_direction_input(&dev->gc, pin);
|
||||
if (ret < 0) {
|
||||
dev_err(dev->gc.parent, "Failed to set GPIO to input dir\n");
|
||||
goto err_desc;
|
||||
}
|
||||
|
||||
ret = gpiochip_lock_as_irq(&dev->gc, pin);
|
||||
if (ret) {
|
||||
dev_err(dev->gc.parent, "Failed to lock GPIO as interrupt\n");
|
||||
|
@ -3749,30 +3749,8 @@ static const struct hid_device_id hidpp_devices[] = {
|
||||
|
||||
{ L27MHZ_DEVICE(HID_ANY_ID) },
|
||||
|
||||
{ /* Logitech G203/Prodigy Gaming Mouse */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC084) },
|
||||
{ /* Logitech G302 Gaming Mouse */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07F) },
|
||||
{ /* Logitech G303 Gaming Mouse */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC080) },
|
||||
{ /* Logitech G400 Gaming Mouse */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07E) },
|
||||
{ /* Logitech G403 Wireless Gaming Mouse over USB */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC082) },
|
||||
{ /* Logitech G403 Gaming Mouse */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC083) },
|
||||
{ /* Logitech G403 Hero Gaming Mouse over USB */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08F) },
|
||||
{ /* Logitech G502 Proteus Core Gaming Mouse */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07D) },
|
||||
{ /* Logitech G502 Proteus Spectrum Gaming Mouse over USB */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC332) },
|
||||
{ /* Logitech G502 Hero Gaming Mouse over USB */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC08B) },
|
||||
{ /* Logitech G700 Gaming Mouse over USB */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC06B) },
|
||||
{ /* Logitech G700s Gaming Mouse over USB */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC07C) },
|
||||
{ /* Logitech G703 Gaming Mouse over USB */
|
||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC087) },
|
||||
{ /* Logitech G703 Hero Gaming Mouse over USB */
|
||||
|
@ -24,6 +24,7 @@
|
||||
#define ICL_MOBILE_DEVICE_ID 0x34FC
|
||||
#define SPT_H_DEVICE_ID 0xA135
|
||||
#define CML_LP_DEVICE_ID 0x02FC
|
||||
#define EHL_Ax_DEVICE_ID 0x4BB3
|
||||
|
||||
#define REVISION_ID_CHT_A0 0x6
|
||||
#define REVISION_ID_CHT_Ax_SI 0x0
|
||||
|
@ -33,6 +33,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CML_LP_DEVICE_ID)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, EHL_Ax_DEVICE_ID)},
|
||||
{0, }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
|
||||
|
@ -846,6 +846,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
|
||||
y >>= 1;
|
||||
distance >>= 1;
|
||||
}
|
||||
if (features->type == INTUOSHT2)
|
||||
distance = features->distance_max - distance;
|
||||
input_report_abs(input, ABS_X, x);
|
||||
input_report_abs(input, ABS_Y, y);
|
||||
input_report_abs(input, ABS_DISTANCE, distance);
|
||||
@ -1059,7 +1061,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
|
||||
input_report_key(input, BTN_BASE2, (data[11] & 0x02));
|
||||
|
||||
if (data[12] & 0x80)
|
||||
input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
|
||||
input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
|
||||
else
|
||||
input_report_abs(input, ABS_WHEEL, 0);
|
||||
|
||||
@ -1290,7 +1292,8 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
|
||||
}
|
||||
if (wacom->tool[0]) {
|
||||
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
|
||||
if (wacom->features.type == INTUOSP2_BT) {
|
||||
if (wacom->features.type == INTUOSP2_BT ||
|
||||
wacom->features.type == INTUOSP2S_BT) {
|
||||
input_report_abs(pen_input, ABS_DISTANCE,
|
||||
range ? frame[13] : wacom->features.distance_max);
|
||||
} else {
|
||||
|
@ -26,7 +26,7 @@
|
||||
|
||||
static unsigned long virt_to_hvpfn(void *addr)
|
||||
{
|
||||
unsigned long paddr;
|
||||
phys_addr_t paddr;
|
||||
|
||||
if (is_vmalloc_addr(addr))
|
||||
paddr = page_to_phys(vmalloc_to_page(addr)) +
|
||||
|
@ -146,8 +146,6 @@ struct hv_context {
|
||||
*/
|
||||
u64 guestid;
|
||||
|
||||
void *tsc_page;
|
||||
|
||||
struct hv_per_cpu_context __percpu *cpu_context;
|
||||
|
||||
/*
|
||||
|
@ -4724,10 +4724,14 @@ static int __init cma_init(void)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
cma_configfs_init();
|
||||
ret = cma_configfs_init();
|
||||
if (ret)
|
||||
goto err_ib;
|
||||
|
||||
return 0;
|
||||
|
||||
err_ib:
|
||||
ib_unregister_client(&cma_client);
|
||||
err:
|
||||
unregister_netdevice_notifier(&cma_nb);
|
||||
ib_sa_unregister_client(&sa_client);
|
||||
|
@ -149,13 +149,11 @@ static bool auto_mode_match(struct ib_qp *qp, struct rdma_counter *counter,
|
||||
struct auto_mode_param *param = &counter->mode.param;
|
||||
bool match = true;
|
||||
|
||||
if (rdma_is_kernel_res(&counter->res) != rdma_is_kernel_res(&qp->res))
|
||||
if (!rdma_is_visible_in_pid_ns(&qp->res))
|
||||
return false;
|
||||
|
||||
/* Ensure that counter belong to right PID */
|
||||
if (!rdma_is_kernel_res(&counter->res) &&
|
||||
!rdma_is_kernel_res(&qp->res) &&
|
||||
(task_pid_vnr(counter->res.task) != current->pid))
|
||||
/* Ensure that counter belongs to the right PID */
|
||||
if (task_pid_nr(counter->res.task) != task_pid_nr(qp->res.task))
|
||||
return false;
|
||||
|
||||
if (auto_mask & RDMA_COUNTER_MASK_QP_TYPE)
|
||||
@ -424,7 +422,7 @@ static struct ib_qp *rdma_counter_get_qp(struct ib_device *dev, u32 qp_num)
|
||||
return qp;
|
||||
|
||||
err:
|
||||
rdma_restrack_put(&qp->res);
|
||||
rdma_restrack_put(res);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -382,8 +382,7 @@ static int fill_res_info(struct sk_buff *msg, struct ib_device *device)
|
||||
for (i = 0; i < RDMA_RESTRACK_MAX; i++) {
|
||||
if (!names[i])
|
||||
continue;
|
||||
curr = rdma_restrack_count(device, i,
|
||||
task_active_pid_ns(current));
|
||||
curr = rdma_restrack_count(device, i);
|
||||
ret = fill_res_info_entry(msg, names[i], curr);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -107,10 +107,8 @@ void rdma_restrack_clean(struct ib_device *dev)
|
||||
* rdma_restrack_count() - the current usage of specific object
|
||||
* @dev: IB device
|
||||
* @type: actual type of object to operate
|
||||
* @ns: PID namespace
|
||||
*/
|
||||
int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
|
||||
struct pid_namespace *ns)
|
||||
int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type)
|
||||
{
|
||||
struct rdma_restrack_root *rt = &dev->res[type];
|
||||
struct rdma_restrack_entry *e;
|
||||
@ -119,10 +117,9 @@ int rdma_restrack_count(struct ib_device *dev, enum rdma_restrack_type type,
|
||||
|
||||
xa_lock(&rt->xa);
|
||||
xas_for_each(&xas, e, U32_MAX) {
|
||||
if (ns == &init_pid_ns ||
|
||||
(!rdma_is_kernel_res(e) &&
|
||||
ns == task_active_pid_ns(e->task)))
|
||||
cnt++;
|
||||
if (!rdma_is_visible_in_pid_ns(e))
|
||||
continue;
|
||||
cnt++;
|
||||
}
|
||||
xa_unlock(&rt->xa);
|
||||
return cnt;
|
||||
@ -360,5 +357,7 @@ bool rdma_is_visible_in_pid_ns(struct rdma_restrack_entry *res)
|
||||
*/
|
||||
if (rdma_is_kernel_res(res))
|
||||
return task_active_pid_ns(current) == &init_pid_ns;
|
||||
return task_active_pid_ns(current) == task_active_pid_ns(res->task);
|
||||
|
||||
/* PID 0 means that resource is not found in current namespace */
|
||||
return task_pid_vnr(res->task);
|
||||
}
|
||||
|
@ -379,14 +379,9 @@ EXPORT_SYMBOL(ib_umem_release);
|
||||
|
||||
int ib_umem_page_count(struct ib_umem *umem)
|
||||
{
|
||||
int i;
|
||||
int n;
|
||||
int i, n = 0;
|
||||
struct scatterlist *sg;
|
||||
|
||||
if (umem->is_odp)
|
||||
return ib_umem_num_pages(umem);
|
||||
|
||||
n = 0;
|
||||
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, i)
|
||||
n += sg_dma_len(sg) >> PAGE_SHIFT;
|
||||
|
||||
|
@ -136,6 +136,13 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
||||
spin_unlock_irqrestore(&cmdq->lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
size = req->cmd_size;
|
||||
/* change the cmd_size to the number of 16byte cmdq unit.
|
||||
* req->cmd_size is modified here
|
||||
*/
|
||||
bnxt_qplib_set_cmd_slots(req);
|
||||
|
||||
memset(resp, 0, sizeof(*resp));
|
||||
crsqe->resp = (struct creq_qp_event *)resp;
|
||||
crsqe->resp->cookie = req->cookie;
|
||||
@ -150,7 +157,6 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
|
||||
|
||||
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
|
||||
preq = (u8 *)req;
|
||||
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
|
||||
do {
|
||||
/* Locate the next cmdq slot */
|
||||
sw_prod = HWQ_CMP(cmdq->prod, cmdq);
|
||||
|
@ -55,9 +55,7 @@
|
||||
do { \
|
||||
memset(&(req), 0, sizeof((req))); \
|
||||
(req).opcode = CMDQ_BASE_OPCODE_##CMD; \
|
||||
(req).cmd_size = (sizeof((req)) + \
|
||||
BNXT_QPLIB_CMDQE_UNITS - 1) / \
|
||||
BNXT_QPLIB_CMDQE_UNITS; \
|
||||
(req).cmd_size = sizeof((req)); \
|
||||
(req).flags = cpu_to_le16(cmd_flags); \
|
||||
} while (0)
|
||||
|
||||
@ -95,6 +93,13 @@ static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
|
||||
BNXT_QPLIB_CMDQE_UNITS);
|
||||
}
|
||||
|
||||
/* Set the cmd_size to a factor of CMDQE unit */
|
||||
static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
|
||||
{
|
||||
req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
|
||||
BNXT_QPLIB_CMDQE_UNITS;
|
||||
}
|
||||
|
||||
#define MAX_CMDQ_IDX(depth) ((depth) - 1)
|
||||
|
||||
static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
|
||||
|
@ -141,12 +141,14 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
copy = min(len, datalen - 1);
|
||||
if (copy_from_user(data, buf, copy))
|
||||
return -EFAULT;
|
||||
if (copy_from_user(data, buf, copy)) {
|
||||
ret = -EFAULT;
|
||||
goto free_data;
|
||||
}
|
||||
|
||||
ret = debugfs_file_get(file->f_path.dentry);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
goto free_data;
|
||||
ptr = data;
|
||||
token = ptr;
|
||||
for (ptr = data; *ptr; ptr = end + 1, token = ptr) {
|
||||
@ -195,6 +197,7 @@ static ssize_t fault_opcodes_write(struct file *file, const char __user *buf,
|
||||
ret = len;
|
||||
|
||||
debugfs_file_put(file->f_path.dentry);
|
||||
free_data:
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
@ -214,7 +217,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
|
||||
return -ENOMEM;
|
||||
ret = debugfs_file_get(file->f_path.dentry);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
goto free_data;
|
||||
bit = find_first_bit(fault->opcodes, bitsize);
|
||||
while (bit < bitsize) {
|
||||
zero = find_next_zero_bit(fault->opcodes, bitsize, bit);
|
||||
@ -232,6 +235,7 @@ static ssize_t fault_opcodes_read(struct file *file, char __user *buf,
|
||||
data[size - 1] = '\n';
|
||||
data[size] = '\0';
|
||||
ret = simple_read_from_buffer(buf, len, pos, data, size);
|
||||
free_data:
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2574,18 +2574,9 @@ void hfi1_kern_read_tid_flow_free(struct rvt_qp *qp)
|
||||
hfi1_kern_clear_hw_flow(priv->rcd, qp);
|
||||
}
|
||||
|
||||
static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
|
||||
struct hfi1_packet *packet, u8 rcv_type,
|
||||
u8 opcode)
|
||||
static bool tid_rdma_tid_err(struct hfi1_packet *packet, u8 rcv_type)
|
||||
{
|
||||
struct rvt_qp *qp = packet->qp;
|
||||
struct hfi1_qp_priv *qpriv = qp->priv;
|
||||
u32 ipsn;
|
||||
struct ib_other_headers *ohdr = packet->ohdr;
|
||||
struct rvt_ack_entry *e;
|
||||
struct tid_rdma_request *req;
|
||||
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
|
||||
u32 i;
|
||||
|
||||
if (rcv_type >= RHF_RCV_TYPE_IB)
|
||||
goto done;
|
||||
@ -2602,41 +2593,9 @@ static bool tid_rdma_tid_err(struct hfi1_ctxtdata *rcd,
|
||||
if (rcv_type == RHF_RCV_TYPE_EAGER) {
|
||||
hfi1_restart_rc(qp, qp->s_last_psn + 1, 1);
|
||||
hfi1_schedule_send(qp);
|
||||
goto done_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* For TID READ response, error out QP after freeing the tid
|
||||
* resources.
|
||||
*/
|
||||
if (opcode == TID_OP(READ_RESP)) {
|
||||
ipsn = mask_psn(be32_to_cpu(ohdr->u.tid_rdma.r_rsp.verbs_psn));
|
||||
if (cmp_psn(ipsn, qp->s_last_psn) > 0 &&
|
||||
cmp_psn(ipsn, qp->s_psn) < 0) {
|
||||
hfi1_kern_read_tid_flow_free(qp);
|
||||
spin_unlock(&qp->s_lock);
|
||||
rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
|
||||
goto done;
|
||||
}
|
||||
goto done_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Error out the qp for TID RDMA WRITE
|
||||
*/
|
||||
hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
|
||||
for (i = 0; i < rvt_max_atomic(rdi); i++) {
|
||||
e = &qp->s_ack_queue[i];
|
||||
if (e->opcode == TID_OP(WRITE_REQ)) {
|
||||
req = ack_to_tid_req(e);
|
||||
hfi1_kern_exp_rcv_clear_all(req);
|
||||
}
|
||||
}
|
||||
spin_unlock(&qp->s_lock);
|
||||
rvt_rc_error(qp, IB_WC_LOC_LEN_ERR);
|
||||
goto done;
|
||||
|
||||
done_unlock:
|
||||
/* Since no payload is delivered, just drop the packet */
|
||||
spin_unlock(&qp->s_lock);
|
||||
done:
|
||||
return true;
|
||||
@ -2687,12 +2646,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||
u32 fpsn;
|
||||
|
||||
lockdep_assert_held(&qp->r_lock);
|
||||
spin_lock(&qp->s_lock);
|
||||
/* If the psn is out of valid range, drop the packet */
|
||||
if (cmp_psn(ibpsn, qp->s_last_psn) < 0 ||
|
||||
cmp_psn(ibpsn, qp->s_psn) > 0)
|
||||
return ret;
|
||||
goto s_unlock;
|
||||
|
||||
spin_lock(&qp->s_lock);
|
||||
/*
|
||||
* Note that NAKs implicitly ACK outstanding SEND and RDMA write
|
||||
* requests and implicitly NAK RDMA read and atomic requests issued
|
||||
@ -2740,9 +2699,12 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||
|
||||
wqe = do_rc_completion(qp, wqe, ibp);
|
||||
if (qp->s_acked == qp->s_tail)
|
||||
break;
|
||||
goto s_unlock;
|
||||
}
|
||||
|
||||
if (qp->s_acked == qp->s_tail)
|
||||
goto s_unlock;
|
||||
|
||||
/* Handle the eflags for the request */
|
||||
if (wqe->wr.opcode != IB_WR_TID_RDMA_READ)
|
||||
goto s_unlock;
|
||||
@ -2922,7 +2884,7 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||
if (lnh == HFI1_LRH_GRH)
|
||||
goto r_unlock;
|
||||
|
||||
if (tid_rdma_tid_err(rcd, packet, rcv_type, opcode))
|
||||
if (tid_rdma_tid_err(packet, rcv_type))
|
||||
goto r_unlock;
|
||||
}
|
||||
|
||||
@ -2942,8 +2904,15 @@ bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
|
||||
*/
|
||||
spin_lock(&qp->s_lock);
|
||||
qpriv = qp->priv;
|
||||
if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
|
||||
qpriv->r_tid_tail == qpriv->r_tid_head)
|
||||
goto unlock;
|
||||
e = &qp->s_ack_queue[qpriv->r_tid_tail];
|
||||
if (e->opcode != TID_OP(WRITE_REQ))
|
||||
goto unlock;
|
||||
req = ack_to_tid_req(e);
|
||||
if (req->comp_seg == req->cur_seg)
|
||||
goto unlock;
|
||||
flow = &req->flows[req->clear_tail];
|
||||
trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
|
||||
trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
|
||||
@ -4509,7 +4478,7 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
|
||||
struct rvt_swqe *wqe;
|
||||
struct tid_rdma_request *req;
|
||||
struct tid_rdma_flow *flow;
|
||||
u32 aeth, psn, req_psn, ack_psn, resync_psn, ack_kpsn;
|
||||
u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
|
||||
unsigned long flags;
|
||||
u16 fidx;
|
||||
|
||||
@ -4538,6 +4507,9 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
|
||||
ack_kpsn--;
|
||||
}
|
||||
|
||||
if (unlikely(qp->s_acked == qp->s_tail))
|
||||
goto ack_op_err;
|
||||
|
||||
wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
|
||||
|
||||
if (wqe->wr.opcode != IB_WR_TID_RDMA_WRITE)
|
||||
@ -4550,7 +4522,8 @@ void hfi1_rc_rcv_tid_rdma_ack(struct hfi1_packet *packet)
|
||||
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
|
||||
|
||||
/* Drop stale ACK/NAK */
|
||||
if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0)
|
||||
if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
|
||||
cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
|
||||
goto ack_op_err;
|
||||
|
||||
while (cmp_psn(ack_kpsn,
|
||||
@ -4712,7 +4685,12 @@ done:
|
||||
switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
|
||||
IB_AETH_CREDIT_MASK) {
|
||||
case 0: /* PSN sequence error */
|
||||
if (!req->flows)
|
||||
break;
|
||||
flow = &req->flows[req->acked_tail];
|
||||
flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
|
||||
if (cmp_psn(psn, flpsn) > 0)
|
||||
break;
|
||||
trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
|
||||
flow);
|
||||
req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
|
||||
|
@ -1677,8 +1677,6 @@ tx_err:
|
||||
tx_buf_size, DMA_TO_DEVICE);
|
||||
kfree(tun_qp->tx_ring[i].buf.addr);
|
||||
}
|
||||
kfree(tun_qp->tx_ring);
|
||||
tun_qp->tx_ring = NULL;
|
||||
i = MLX4_NUM_TUNNEL_BUFS;
|
||||
err:
|
||||
while (i > 0) {
|
||||
@ -1687,6 +1685,8 @@ err:
|
||||
rx_buf_size, DMA_FROM_DEVICE);
|
||||
kfree(tun_qp->ring[i].addr);
|
||||
}
|
||||
kfree(tun_qp->tx_ring);
|
||||
tun_qp->tx_ring = NULL;
|
||||
kfree(tun_qp->ring);
|
||||
tun_qp->ring = NULL;
|
||||
return -ENOMEM;
|
||||
|
@ -1023,7 +1023,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
||||
props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
|
||||
if (MLX5_CAP_GEN(mdev, pg))
|
||||
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
|
||||
props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
|
||||
props->odp_caps = dev->odp_caps;
|
||||
}
|
||||
@ -6139,6 +6139,8 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
|
||||
dev->port[i].roce.last_port_state = IB_PORT_DOWN;
|
||||
}
|
||||
|
||||
mlx5_ib_internal_fill_odp_caps(dev);
|
||||
|
||||
err = mlx5_ib_init_multiport_master(dev);
|
||||
if (err)
|
||||
return err;
|
||||
@ -6563,8 +6565,6 @@ static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
|
||||
|
||||
static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_ib_internal_fill_odp_caps(dev);
|
||||
|
||||
return mlx5_ib_odp_init_one(dev);
|
||||
}
|
||||
|
||||
|
@ -57,9 +57,10 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
|
||||
int entry;
|
||||
|
||||
if (umem->is_odp) {
|
||||
unsigned int page_shift = to_ib_umem_odp(umem)->page_shift;
|
||||
struct ib_umem_odp *odp = to_ib_umem_odp(umem);
|
||||
unsigned int page_shift = odp->page_shift;
|
||||
|
||||
*ncont = ib_umem_page_count(umem);
|
||||
*ncont = ib_umem_odp_num_pages(odp);
|
||||
*count = *ncont << (page_shift - PAGE_SHIFT);
|
||||
*shift = page_shift;
|
||||
if (order)
|
||||
|
@ -1475,4 +1475,18 @@ int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
|
||||
bool dyn_bfreg);
|
||||
|
||||
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
|
||||
|
||||
static inline bool mlx5_ib_can_use_umr(struct mlx5_ib_dev *dev,
|
||||
bool do_modify_atomic)
|
||||
{
|
||||
if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
|
||||
return false;
|
||||
|
||||
if (do_modify_atomic &&
|
||||
MLX5_CAP_GEN(dev->mdev, atomic) &&
|
||||
MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif /* MLX5_IB_H */
|
||||
|
@ -1293,9 +1293,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
||||
if (err < 0)
|
||||
return ERR_PTR(err);
|
||||
|
||||
use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
|
||||
(!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
|
||||
!MLX5_CAP_GEN(dev->mdev, atomic));
|
||||
use_umr = mlx5_ib_can_use_umr(dev, true);
|
||||
|
||||
if (order <= mr_cache_max_order(dev) && use_umr) {
|
||||
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
|
||||
@ -1448,7 +1446,8 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
|
||||
if (!mlx5_ib_can_use_umr(dev, true) ||
|
||||
(flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len))) {
|
||||
/*
|
||||
* UMR can't be used - MKey needs to be replaced.
|
||||
*/
|
||||
|
@ -301,7 +301,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
|
||||
|
||||
memset(caps, 0, sizeof(*caps));
|
||||
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg))
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg) ||
|
||||
!mlx5_ib_can_use_umr(dev, true))
|
||||
return;
|
||||
|
||||
caps->general_caps = IB_ODP_SUPPORT;
|
||||
@ -355,7 +356,8 @@ void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, fixed_buffer_size) &&
|
||||
MLX5_CAP_GEN(dev->mdev, null_mkey) &&
|
||||
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
|
||||
MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
|
||||
!MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled))
|
||||
caps->general_caps |= IB_ODP_SUPPORT_IMPLICIT;
|
||||
|
||||
return;
|
||||
@ -1622,8 +1624,10 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT)
|
||||
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
|
||||
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
|
||||
return ret;
|
||||
|
||||
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_odp_ops);
|
||||
|
||||
if (dev->odp_caps.general_caps & IB_ODP_SUPPORT_IMPLICIT) {
|
||||
ret = mlx5_cmd_null_mkey(dev->mdev, &dev->null_mkey);
|
||||
@ -1633,9 +1637,6 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
|
||||
}
|
||||
}
|
||||
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg))
|
||||
return ret;
|
||||
|
||||
ret = mlx5_ib_create_pf_eq(dev, &dev->odp_pf_eq);
|
||||
|
||||
return ret;
|
||||
@ -1643,7 +1644,7 @@ int mlx5_ib_odp_init_one(struct mlx5_ib_dev *dev)
|
||||
|
||||
void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
if (!MLX5_CAP_GEN(dev->mdev, pg))
|
||||
if (!(dev->odp_caps.general_caps & IB_ODP_SUPPORT))
|
||||
return;
|
||||
|
||||
mlx5_ib_destroy_pf_eq(dev, &dev->odp_pf_eq);
|
||||
|
@ -4162,7 +4162,7 @@ static u64 get_xlt_octo(u64 bytes)
|
||||
MLX5_IB_UMR_OCTOWORD;
|
||||
}
|
||||
|
||||
static __be64 frwr_mkey_mask(void)
|
||||
static __be64 frwr_mkey_mask(bool atomic)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
@ -4175,10 +4175,12 @@ static __be64 frwr_mkey_mask(void)
|
||||
MLX5_MKEY_MASK_LW |
|
||||
MLX5_MKEY_MASK_RR |
|
||||
MLX5_MKEY_MASK_RW |
|
||||
MLX5_MKEY_MASK_A |
|
||||
MLX5_MKEY_MASK_SMALL_FENCE |
|
||||
MLX5_MKEY_MASK_FREE;
|
||||
|
||||
if (atomic)
|
||||
result |= MLX5_MKEY_MASK_A;
|
||||
|
||||
return cpu_to_be64(result);
|
||||
}
|
||||
|
||||
@ -4204,7 +4206,7 @@ static __be64 sig_mkey_mask(void)
|
||||
}
|
||||
|
||||
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
struct mlx5_ib_mr *mr, u8 flags)
|
||||
struct mlx5_ib_mr *mr, u8 flags, bool atomic)
|
||||
{
|
||||
int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
|
||||
|
||||
@ -4212,7 +4214,7 @@ static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
|
||||
|
||||
umr->flags = flags;
|
||||
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(size));
|
||||
umr->mkey_mask = frwr_mkey_mask();
|
||||
umr->mkey_mask = frwr_mkey_mask(atomic);
|
||||
}
|
||||
|
||||
static void set_linv_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr)
|
||||
@ -4811,10 +4813,22 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
|
||||
{
|
||||
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
|
||||
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
|
||||
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
|
||||
int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
|
||||
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
|
||||
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
|
||||
u8 flags = 0;
|
||||
|
||||
if (!mlx5_ib_can_use_umr(dev, atomic)) {
|
||||
mlx5_ib_warn(to_mdev(qp->ibqp.device),
|
||||
"Fast update of %s for MR is disabled\n",
|
||||
(MLX5_CAP_GEN(dev->mdev,
|
||||
umr_modify_entity_size_disabled)) ?
|
||||
"entity size" :
|
||||
"atomic access");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(wr->wr.send_flags & IB_SEND_INLINE)) {
|
||||
mlx5_ib_warn(to_mdev(qp->ibqp.device),
|
||||
"Invalid IB_SEND_INLINE send flag\n");
|
||||
@ -4826,7 +4840,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
|
||||
if (umr_inline)
|
||||
flags |= MLX5_UMR_INLINE;
|
||||
|
||||
set_reg_umr_seg(*seg, mr, flags);
|
||||
set_reg_umr_seg(*seg, mr, flags, atomic);
|
||||
*seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
|
||||
*size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
|
||||
handle_post_send_edge(&qp->sq, seg, *size, cur_edge);
|
||||
|
@ -138,9 +138,9 @@ struct siw_umem {
|
||||
};
|
||||
|
||||
struct siw_pble {
|
||||
u64 addr; /* Address of assigned user buffer */
|
||||
u64 size; /* Size of this entry */
|
||||
u64 pbl_off; /* Total offset from start of PBL */
|
||||
dma_addr_t addr; /* Address of assigned buffer */
|
||||
unsigned int size; /* Size of this entry */
|
||||
unsigned long pbl_off; /* Total offset from start of PBL */
|
||||
};
|
||||
|
||||
struct siw_pbl {
|
||||
@ -734,7 +734,7 @@ static inline void siw_crc_skb(struct siw_rx_stream *srx, unsigned int len)
|
||||
"MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
|
||||
|
||||
#define siw_dbg_cep(cep, fmt, ...) \
|
||||
ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%p] %s: " fmt, \
|
||||
ibdev_dbg(&cep->sdev->base_dev, "CEP[0x%pK] %s: " fmt, \
|
||||
cep, __func__, ##__VA_ARGS__)
|
||||
|
||||
void siw_cq_flush(struct siw_cq *cq);
|
||||
|
@ -355,8 +355,8 @@ static int siw_cm_upcall(struct siw_cep *cep, enum iw_cm_event_type reason,
|
||||
getname_local(cep->sock, &event.local_addr);
|
||||
getname_peer(cep->sock, &event.remote_addr);
|
||||
}
|
||||
siw_dbg_cep(cep, "[QP %u]: id 0x%p, reason=%d, status=%d\n",
|
||||
cep->qp ? qp_id(cep->qp) : -1, id, reason, status);
|
||||
siw_dbg_cep(cep, "[QP %u]: reason=%d, status=%d\n",
|
||||
cep->qp ? qp_id(cep->qp) : UINT_MAX, reason, status);
|
||||
|
||||
return id->event_handler(id, &event);
|
||||
}
|
||||
@ -947,8 +947,6 @@ static void siw_accept_newconn(struct siw_cep *cep)
|
||||
siw_cep_get(new_cep);
|
||||
new_s->sk->sk_user_data = new_cep;
|
||||
|
||||
siw_dbg_cep(cep, "listen socket 0x%p, new 0x%p\n", s, new_s);
|
||||
|
||||
if (siw_tcp_nagle == false) {
|
||||
int val = 1;
|
||||
|
||||
@ -1011,7 +1009,8 @@ static void siw_cm_work_handler(struct work_struct *w)
|
||||
cep = work->cep;
|
||||
|
||||
siw_dbg_cep(cep, "[QP %u]: work type: %d, state %d\n",
|
||||
cep->qp ? qp_id(cep->qp) : -1, work->type, cep->state);
|
||||
cep->qp ? qp_id(cep->qp) : UINT_MAX,
|
||||
work->type, cep->state);
|
||||
|
||||
siw_cep_set_inuse(cep);
|
||||
|
||||
@ -1145,9 +1144,9 @@ static void siw_cm_work_handler(struct work_struct *w)
|
||||
}
|
||||
if (release_cep) {
|
||||
siw_dbg_cep(cep,
|
||||
"release: timer=%s, QP[%u], id 0x%p\n",
|
||||
"release: timer=%s, QP[%u]\n",
|
||||
cep->mpa_timer ? "y" : "n",
|
||||
cep->qp ? qp_id(cep->qp) : -1, cep->cm_id);
|
||||
cep->qp ? qp_id(cep->qp) : UINT_MAX);
|
||||
|
||||
siw_cancel_mpatimer(cep);
|
||||
|
||||
@ -1211,8 +1210,8 @@ int siw_cm_queue_work(struct siw_cep *cep, enum siw_work_type type)
|
||||
else
|
||||
delay = MPAREP_TIMEOUT;
|
||||
}
|
||||
siw_dbg_cep(cep, "[QP %u]: work type: %d, work 0x%p, timeout %lu\n",
|
||||
cep->qp ? qp_id(cep->qp) : -1, type, work, delay);
|
||||
siw_dbg_cep(cep, "[QP %u]: work type: %d, timeout %lu\n",
|
||||
cep->qp ? qp_id(cep->qp) : -1, type, delay);
|
||||
|
||||
queue_delayed_work(siw_cm_wq, &work->work, delay);
|
||||
|
||||
@ -1376,16 +1375,16 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
|
||||
}
|
||||
if (v4)
|
||||
siw_dbg_qp(qp,
|
||||
"id 0x%p, pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
|
||||
id, pd_len,
|
||||
"pd_len %d, laddr %pI4 %d, raddr %pI4 %d\n",
|
||||
pd_len,
|
||||
&((struct sockaddr_in *)(laddr))->sin_addr,
|
||||
ntohs(((struct sockaddr_in *)(laddr))->sin_port),
|
||||
&((struct sockaddr_in *)(raddr))->sin_addr,
|
||||
ntohs(((struct sockaddr_in *)(raddr))->sin_port));
|
||||
else
|
||||
siw_dbg_qp(qp,
|
||||
"id 0x%p, pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
|
||||
id, pd_len,
|
||||
"pd_len %d, laddr %pI6 %d, raddr %pI6 %d\n",
|
||||
pd_len,
|
||||
&((struct sockaddr_in6 *)(laddr))->sin6_addr,
|
||||
ntohs(((struct sockaddr_in6 *)(laddr))->sin6_port),
|
||||
&((struct sockaddr_in6 *)(raddr))->sin6_addr,
|
||||
@ -1508,14 +1507,13 @@ int siw_connect(struct iw_cm_id *id, struct iw_cm_conn_param *params)
|
||||
if (rv >= 0) {
|
||||
rv = siw_cm_queue_work(cep, SIW_CM_WORK_MPATIMEOUT);
|
||||
if (!rv) {
|
||||
siw_dbg_cep(cep, "id 0x%p, [QP %u]: exit\n", id,
|
||||
qp_id(qp));
|
||||
siw_dbg_cep(cep, "[QP %u]: exit\n", qp_id(qp));
|
||||
siw_cep_set_free(cep);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
error:
|
||||
siw_dbg_qp(qp, "failed: %d\n", rv);
|
||||
siw_dbg(id->device, "failed: %d\n", rv);
|
||||
|
||||
if (cep) {
|
||||
siw_socket_disassoc(s);
|
||||
@ -1540,7 +1538,8 @@ error:
|
||||
} else if (s) {
|
||||
sock_release(s);
|
||||
}
|
||||
siw_qp_put(qp);
|
||||
if (qp)
|
||||
siw_qp_put(qp);
|
||||
|
||||
return rv;
|
||||
}
|
||||
@ -1580,7 +1579,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
|
||||
siw_cancel_mpatimer(cep);
|
||||
|
||||
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
|
||||
siw_dbg_cep(cep, "id 0x%p: out of state\n", id);
|
||||
siw_dbg_cep(cep, "out of state\n");
|
||||
|
||||
siw_cep_set_free(cep);
|
||||
siw_cep_put(cep);
|
||||
@ -1601,7 +1600,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
|
||||
up_write(&qp->state_lock);
|
||||
goto error;
|
||||
}
|
||||
siw_dbg_cep(cep, "id 0x%p\n", id);
|
||||
siw_dbg_cep(cep, "[QP %d]\n", params->qpn);
|
||||
|
||||
if (try_gso && cep->mpa.hdr.params.bits & MPA_RR_FLAG_GSO_EXP) {
|
||||
siw_dbg_cep(cep, "peer allows GSO on TX\n");
|
||||
@ -1611,8 +1610,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
|
||||
params->ird > sdev->attrs.max_ird) {
|
||||
siw_dbg_cep(
|
||||
cep,
|
||||
"id 0x%p, [QP %u]: ord %d (max %d), ird %d (max %d)\n",
|
||||
id, qp_id(qp), params->ord, sdev->attrs.max_ord,
|
||||
"[QP %u]: ord %d (max %d), ird %d (max %d)\n",
|
||||
qp_id(qp), params->ord, sdev->attrs.max_ord,
|
||||
params->ird, sdev->attrs.max_ird);
|
||||
rv = -EINVAL;
|
||||
up_write(&qp->state_lock);
|
||||
@ -1624,8 +1623,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
|
||||
if (params->private_data_len > max_priv_data) {
|
||||
siw_dbg_cep(
|
||||
cep,
|
||||
"id 0x%p, [QP %u]: private data length: %d (max %d)\n",
|
||||
id, qp_id(qp), params->private_data_len, max_priv_data);
|
||||
"[QP %u]: private data length: %d (max %d)\n",
|
||||
qp_id(qp), params->private_data_len, max_priv_data);
|
||||
rv = -EINVAL;
|
||||
up_write(&qp->state_lock);
|
||||
goto error;
|
||||
@ -1679,7 +1678,7 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
|
||||
qp_attrs.flags = SIW_MPA_CRC;
|
||||
qp_attrs.state = SIW_QP_STATE_RTS;
|
||||
|
||||
siw_dbg_cep(cep, "id 0x%p, [QP%u]: moving to rts\n", id, qp_id(qp));
|
||||
siw_dbg_cep(cep, "[QP%u]: moving to rts\n", qp_id(qp));
|
||||
|
||||
/* Associate QP with CEP */
|
||||
siw_cep_get(cep);
|
||||
@ -1700,8 +1699,8 @@ int siw_accept(struct iw_cm_id *id, struct iw_cm_conn_param *params)
|
||||
if (rv)
|
||||
goto error;
|
||||
|
||||
siw_dbg_cep(cep, "id 0x%p, [QP %u]: send mpa reply, %d byte pdata\n",
|
||||
id, qp_id(qp), params->private_data_len);
|
||||
siw_dbg_cep(cep, "[QP %u]: send mpa reply, %d byte pdata\n",
|
||||
qp_id(qp), params->private_data_len);
|
||||
|
||||
rv = siw_send_mpareqrep(cep, params->private_data,
|
||||
params->private_data_len);
|
||||
@ -1759,14 +1758,14 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
|
||||
siw_cancel_mpatimer(cep);
|
||||
|
||||
if (cep->state != SIW_EPSTATE_RECVD_MPAREQ) {
|
||||
siw_dbg_cep(cep, "id 0x%p: out of state\n", id);
|
||||
siw_dbg_cep(cep, "out of state\n");
|
||||
|
||||
siw_cep_set_free(cep);
|
||||
siw_cep_put(cep); /* put last reference */
|
||||
|
||||
return -ECONNRESET;
|
||||
}
|
||||
siw_dbg_cep(cep, "id 0x%p, cep->state %d, pd_len %d\n", id, cep->state,
|
||||
siw_dbg_cep(cep, "cep->state %d, pd_len %d\n", cep->state,
|
||||
pd_len);
|
||||
|
||||
if (__mpa_rr_revision(cep->mpa.hdr.params.bits) >= MPA_REVISION_1) {
|
||||
@ -1804,14 +1803,14 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
|
||||
rv = kernel_setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (char *)&s_val,
|
||||
sizeof(s_val));
|
||||
if (rv) {
|
||||
siw_dbg(id->device, "id 0x%p: setsockopt error: %d\n", id, rv);
|
||||
siw_dbg(id->device, "setsockopt error: %d\n", rv);
|
||||
goto error;
|
||||
}
|
||||
rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
|
||||
sizeof(struct sockaddr_in) :
|
||||
sizeof(struct sockaddr_in6));
|
||||
if (rv) {
|
||||
siw_dbg(id->device, "id 0x%p: socket bind error: %d\n", id, rv);
|
||||
siw_dbg(id->device, "socket bind error: %d\n", rv);
|
||||
goto error;
|
||||
}
|
||||
cep = siw_cep_alloc(sdev);
|
||||
@ -1824,13 +1823,13 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
|
||||
rv = siw_cm_alloc_work(cep, backlog);
|
||||
if (rv) {
|
||||
siw_dbg(id->device,
|
||||
"id 0x%p: alloc_work error %d, backlog %d\n", id,
|
||||
"alloc_work error %d, backlog %d\n",
|
||||
rv, backlog);
|
||||
goto error;
|
||||
}
|
||||
rv = s->ops->listen(s, backlog);
|
||||
if (rv) {
|
||||
siw_dbg(id->device, "id 0x%p: listen error %d\n", id, rv);
|
||||
siw_dbg(id->device, "listen error %d\n", rv);
|
||||
goto error;
|
||||
}
|
||||
cep->cm_id = id;
|
||||
@ -1914,8 +1913,7 @@ static void siw_drop_listeners(struct iw_cm_id *id)
|
||||
|
||||
list_del(p);
|
||||
|
||||
siw_dbg_cep(cep, "id 0x%p: drop cep, state %d\n", id,
|
||||
cep->state);
|
||||
siw_dbg_cep(cep, "drop cep, state %d\n", cep->state);
|
||||
|
||||
siw_cep_set_inuse(cep);
|
||||
|
||||
@ -1952,7 +1950,7 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
struct net_device *dev = to_siw_dev(id->device)->netdev;
|
||||
int rv = 0, listeners = 0;
|
||||
|
||||
siw_dbg(id->device, "id 0x%p: backlog %d\n", id, backlog);
|
||||
siw_dbg(id->device, "backlog %d\n", backlog);
|
||||
|
||||
/*
|
||||
* For each attached address of the interface, create a
|
||||
@ -1968,8 +1966,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
s_raddr = (struct sockaddr_in *)&id->remote_addr;
|
||||
|
||||
siw_dbg(id->device,
|
||||
"id 0x%p: laddr %pI4:%d, raddr %pI4:%d\n",
|
||||
id, &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
|
||||
"laddr %pI4:%d, raddr %pI4:%d\n",
|
||||
&s_laddr.sin_addr, ntohs(s_laddr.sin_port),
|
||||
&s_raddr->sin_addr, ntohs(s_raddr->sin_port));
|
||||
|
||||
rtnl_lock();
|
||||
@ -1994,8 +1992,8 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
*s_raddr = &to_sockaddr_in6(id->remote_addr);
|
||||
|
||||
siw_dbg(id->device,
|
||||
"id 0x%p: laddr %pI6:%d, raddr %pI6:%d\n",
|
||||
id, &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
|
||||
"laddr %pI6:%d, raddr %pI6:%d\n",
|
||||
&s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
|
||||
&s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
|
||||
|
||||
read_lock_bh(&in6_dev->lock);
|
||||
@ -2028,17 +2026,15 @@ int siw_create_listen(struct iw_cm_id *id, int backlog)
|
||||
else if (!rv)
|
||||
rv = -EINVAL;
|
||||
|
||||
siw_dbg(id->device, "id 0x%p: %s\n", id, rv ? "FAIL" : "OK");
|
||||
siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
int siw_destroy_listen(struct iw_cm_id *id)
|
||||
{
|
||||
siw_dbg(id->device, "id 0x%p\n", id);
|
||||
|
||||
if (!id->provider_data) {
|
||||
siw_dbg(id->device, "id 0x%p: no cep(s)\n", id);
|
||||
siw_dbg(id->device, "no cep(s)\n");
|
||||
return 0;
|
||||
}
|
||||
siw_drop_listeners(id);
|
||||
|
@ -71,9 +71,10 @@ int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
|
||||
wc->wc_flags = IB_WC_WITH_INVALIDATE;
|
||||
}
|
||||
wc->qp = cqe->base_qp;
|
||||
siw_dbg_cq(cq, "idx %u, type %d, flags %2x, id 0x%p\n",
|
||||
siw_dbg_cq(cq,
|
||||
"idx %u, type %d, flags %2x, id 0x%pK\n",
|
||||
cq->cq_get % cq->num_cqe, cqe->opcode,
|
||||
cqe->flags, (void *)cqe->id);
|
||||
cqe->flags, (void *)(uintptr_t)cqe->id);
|
||||
}
|
||||
WRITE_ONCE(cqe->flags, 0);
|
||||
cq->cq_get++;
|
||||
|
@ -197,12 +197,12 @@ int siw_check_mem(struct ib_pd *pd, struct siw_mem *mem, u64 addr,
|
||||
*/
|
||||
if (addr < mem->va || addr + len > mem->va + mem->len) {
|
||||
siw_dbg_pd(pd, "MEM interval len %d\n", len);
|
||||
siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] out of bounds\n",
|
||||
(unsigned long long)addr,
|
||||
(unsigned long long)(addr + len));
|
||||
siw_dbg_pd(pd, "[0x%016llx, 0x%016llx] STag=0x%08x\n",
|
||||
(unsigned long long)mem->va,
|
||||
(unsigned long long)(mem->va + mem->len),
|
||||
siw_dbg_pd(pd, "[0x%pK, 0x%pK] out of bounds\n",
|
||||
(void *)(uintptr_t)addr,
|
||||
(void *)(uintptr_t)(addr + len));
|
||||
siw_dbg_pd(pd, "[0x%pK, 0x%pK] STag=0x%08x\n",
|
||||
(void *)(uintptr_t)mem->va,
|
||||
(void *)(uintptr_t)(mem->va + mem->len),
|
||||
mem->stag);
|
||||
|
||||
return -E_BASE_BOUNDS;
|
||||
@ -330,7 +330,7 @@ out:
|
||||
* Optionally, provides remaining len within current element, and
|
||||
* current PBL index for later resume at same element.
|
||||
*/
|
||||
u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
|
||||
dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx)
|
||||
{
|
||||
int i = idx ? *idx : 0;
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable);
|
||||
void siw_umem_release(struct siw_umem *umem, bool dirty);
|
||||
struct siw_pbl *siw_pbl_alloc(u32 num_buf);
|
||||
u64 siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
|
||||
dma_addr_t siw_pbl_get_buffer(struct siw_pbl *pbl, u64 off, int *len, int *idx);
|
||||
struct siw_mem *siw_mem_id2obj(struct siw_device *sdev, int stag_index);
|
||||
int siw_mem_add(struct siw_device *sdev, struct siw_mem *m);
|
||||
int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
|
||||
|
@ -949,7 +949,7 @@ skip_irq:
|
||||
rv = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1];
|
||||
wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
|
||||
wqe->sqe.sge[0].lkey = 0;
|
||||
wqe->sqe.num_sge = 1;
|
||||
}
|
||||
|
@ -38,9 +38,10 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
|
||||
|
||||
p = siw_get_upage(umem, dest_addr);
|
||||
if (unlikely(!p)) {
|
||||
pr_warn("siw: %s: [QP %u]: bogus addr: %p, %p\n",
|
||||
pr_warn("siw: %s: [QP %u]: bogus addr: %pK, %pK\n",
|
||||
__func__, qp_id(rx_qp(srx)),
|
||||
(void *)dest_addr, (void *)umem->fp_addr);
|
||||
(void *)(uintptr_t)dest_addr,
|
||||
(void *)(uintptr_t)umem->fp_addr);
|
||||
/* siw internal error */
|
||||
srx->skb_copied += copied;
|
||||
srx->skb_new -= copied;
|
||||
@ -50,7 +51,7 @@ static int siw_rx_umem(struct siw_rx_stream *srx, struct siw_umem *umem,
|
||||
pg_off = dest_addr & ~PAGE_MASK;
|
||||
bytes = min(len, (int)PAGE_SIZE - pg_off);
|
||||
|
||||
siw_dbg_qp(rx_qp(srx), "page %p, bytes=%u\n", p, bytes);
|
||||
siw_dbg_qp(rx_qp(srx), "page %pK, bytes=%u\n", p, bytes);
|
||||
|
||||
dest = kmap_atomic(p);
|
||||
rv = skb_copy_bits(srx->skb, srx->skb_offset, dest + pg_off,
|
||||
@ -104,11 +105,11 @@ static int siw_rx_kva(struct siw_rx_stream *srx, void *kva, int len)
|
||||
{
|
||||
int rv;
|
||||
|
||||
siw_dbg_qp(rx_qp(srx), "kva: 0x%p, len: %u\n", kva, len);
|
||||
siw_dbg_qp(rx_qp(srx), "kva: 0x%pK, len: %u\n", kva, len);
|
||||
|
||||
rv = skb_copy_bits(srx->skb, srx->skb_offset, kva, len);
|
||||
if (unlikely(rv)) {
|
||||
pr_warn("siw: [QP %u]: %s, len %d, kva 0x%p, rv %d\n",
|
||||
pr_warn("siw: [QP %u]: %s, len %d, kva 0x%pK, rv %d\n",
|
||||
qp_id(rx_qp(srx)), __func__, len, kva, rv);
|
||||
|
||||
return rv;
|
||||
@ -132,7 +133,7 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
|
||||
|
||||
while (len) {
|
||||
int bytes;
|
||||
u64 buf_addr =
|
||||
dma_addr_t buf_addr =
|
||||
siw_pbl_get_buffer(pbl, offset, &bytes, pbl_idx);
|
||||
if (!buf_addr)
|
||||
break;
|
||||
@ -485,8 +486,8 @@ int siw_proc_send(struct siw_qp *qp)
|
||||
mem_p = *mem;
|
||||
if (mem_p->mem_obj == NULL)
|
||||
rv = siw_rx_kva(srx,
|
||||
(void *)(sge->laddr + frx->sge_off),
|
||||
sge_bytes);
|
||||
(void *)(uintptr_t)(sge->laddr + frx->sge_off),
|
||||
sge_bytes);
|
||||
else if (!mem_p->is_pbl)
|
||||
rv = siw_rx_umem(srx, mem_p->umem,
|
||||
sge->laddr + frx->sge_off, sge_bytes);
|
||||
@ -598,8 +599,8 @@ int siw_proc_write(struct siw_qp *qp)
|
||||
|
||||
if (mem->mem_obj == NULL)
|
||||
rv = siw_rx_kva(srx,
|
||||
(void *)(srx->ddp_to + srx->fpdu_part_rcvd),
|
||||
bytes);
|
||||
(void *)(uintptr_t)(srx->ddp_to + srx->fpdu_part_rcvd),
|
||||
bytes);
|
||||
else if (!mem->is_pbl)
|
||||
rv = siw_rx_umem(srx, mem->umem,
|
||||
srx->ddp_to + srx->fpdu_part_rcvd, bytes);
|
||||
@ -841,8 +842,9 @@ int siw_proc_rresp(struct siw_qp *qp)
|
||||
bytes = min(srx->fpdu_part_rem, srx->skb_new);
|
||||
|
||||
if (mem_p->mem_obj == NULL)
|
||||
rv = siw_rx_kva(srx, (void *)(sge->laddr + wqe->processed),
|
||||
bytes);
|
||||
rv = siw_rx_kva(srx,
|
||||
(void *)(uintptr_t)(sge->laddr + wqe->processed),
|
||||
bytes);
|
||||
else if (!mem_p->is_pbl)
|
||||
rv = siw_rx_umem(srx, mem_p->umem, sge->laddr + wqe->processed,
|
||||
bytes);
|
||||
|
@ -26,7 +26,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
|
||||
{
|
||||
struct siw_pbl *pbl = mem->pbl;
|
||||
u64 offset = addr - mem->va;
|
||||
u64 paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
|
||||
dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx);
|
||||
|
||||
if (paddr)
|
||||
return virt_to_page(paddr);
|
||||
@ -37,7 +37,7 @@ static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx)
|
||||
/*
|
||||
* Copy short payload at provided destination payload address
|
||||
*/
|
||||
static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
|
||||
static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr)
|
||||
{
|
||||
struct siw_wqe *wqe = &c_tx->wqe_active;
|
||||
struct siw_sge *sge = &wqe->sqe.sge[0];
|
||||
@ -50,16 +50,16 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
|
||||
return 0;
|
||||
|
||||
if (tx_flags(wqe) & SIW_WQE_INLINE) {
|
||||
memcpy((void *)paddr, &wqe->sqe.sge[1], bytes);
|
||||
memcpy(paddr, &wqe->sqe.sge[1], bytes);
|
||||
} else {
|
||||
struct siw_mem *mem = wqe->mem[0];
|
||||
|
||||
if (!mem->mem_obj) {
|
||||
/* Kernel client using kva */
|
||||
memcpy((void *)paddr, (void *)sge->laddr, bytes);
|
||||
memcpy(paddr,
|
||||
(const void *)(uintptr_t)sge->laddr, bytes);
|
||||
} else if (c_tx->in_syscall) {
|
||||
if (copy_from_user((void *)paddr,
|
||||
(const void __user *)sge->laddr,
|
||||
if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr),
|
||||
bytes))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
@ -79,12 +79,12 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
|
||||
buffer = kmap_atomic(p);
|
||||
|
||||
if (likely(PAGE_SIZE - off >= bytes)) {
|
||||
memcpy((void *)paddr, buffer + off, bytes);
|
||||
memcpy(paddr, buffer + off, bytes);
|
||||
kunmap_atomic(buffer);
|
||||
} else {
|
||||
unsigned long part = bytes - (PAGE_SIZE - off);
|
||||
|
||||
memcpy((void *)paddr, buffer + off, part);
|
||||
memcpy(paddr, buffer + off, part);
|
||||
kunmap_atomic(buffer);
|
||||
|
||||
if (!mem->is_pbl)
|
||||
@ -98,7 +98,7 @@ static int siw_try_1seg(struct siw_iwarp_tx *c_tx, u64 paddr)
|
||||
return -EFAULT;
|
||||
|
||||
buffer = kmap_atomic(p);
|
||||
memcpy((void *)(paddr + part), buffer,
|
||||
memcpy(paddr + part, buffer,
|
||||
bytes - part);
|
||||
kunmap_atomic(buffer);
|
||||
}
|
||||
@ -166,7 +166,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
|
||||
c_tx->ctrl_len = sizeof(struct iwarp_send);
|
||||
|
||||
crc = (char *)&c_tx->pkt.send_pkt.crc;
|
||||
data = siw_try_1seg(c_tx, (u64)crc);
|
||||
data = siw_try_1seg(c_tx, crc);
|
||||
break;
|
||||
|
||||
case SIW_OP_SEND_REMOTE_INV:
|
||||
@ -189,7 +189,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
|
||||
c_tx->ctrl_len = sizeof(struct iwarp_send_inv);
|
||||
|
||||
crc = (char *)&c_tx->pkt.send_pkt.crc;
|
||||
data = siw_try_1seg(c_tx, (u64)crc);
|
||||
data = siw_try_1seg(c_tx, crc);
|
||||
break;
|
||||
|
||||
case SIW_OP_WRITE:
|
||||
@ -201,7 +201,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
|
||||
c_tx->ctrl_len = sizeof(struct iwarp_rdma_write);
|
||||
|
||||
crc = (char *)&c_tx->pkt.write_pkt.crc;
|
||||
data = siw_try_1seg(c_tx, (u64)crc);
|
||||
data = siw_try_1seg(c_tx, crc);
|
||||
break;
|
||||
|
||||
case SIW_OP_READ_RESPONSE:
|
||||
@ -216,7 +216,7 @@ static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx)
|
||||
c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp);
|
||||
|
||||
crc = (char *)&c_tx->pkt.write_pkt.crc;
|
||||
data = siw_try_1seg(c_tx, (u64)crc);
|
||||
data = siw_try_1seg(c_tx, crc);
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -398,15 +398,13 @@ static int siw_0copy_tx(struct socket *s, struct page **page,
|
||||
|
||||
#define MAX_TRAILER (MPA_CRC_SIZE + 4)
|
||||
|
||||
static void siw_unmap_pages(struct page **pages, int hdr_len, int num_maps)
|
||||
static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask)
|
||||
{
|
||||
if (hdr_len) {
|
||||
++pages;
|
||||
--num_maps;
|
||||
}
|
||||
while (num_maps-- > 0) {
|
||||
kunmap(*pages);
|
||||
pages++;
|
||||
while (kmap_mask) {
|
||||
if (kmap_mask & BIT(0))
|
||||
kunmap(*pp);
|
||||
pp++;
|
||||
kmap_mask >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -437,6 +435,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0,
|
||||
sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx,
|
||||
pbl_idx = c_tx->pbl_idx;
|
||||
unsigned long kmap_mask = 0L;
|
||||
|
||||
if (c_tx->state == SIW_SEND_HDR) {
|
||||
if (c_tx->use_sendpage) {
|
||||
@ -463,8 +462,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
|
||||
if (!(tx_flags(wqe) & SIW_WQE_INLINE)) {
|
||||
mem = wqe->mem[sge_idx];
|
||||
if (!mem->mem_obj)
|
||||
is_kva = 1;
|
||||
is_kva = mem->mem_obj == NULL ? 1 : 0;
|
||||
} else {
|
||||
is_kva = 1;
|
||||
}
|
||||
@ -473,7 +471,8 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
* tx from kernel virtual address: either inline data
|
||||
* or memory region with assigned kernel buffer
|
||||
*/
|
||||
iov[seg].iov_base = (void *)(sge->laddr + sge_off);
|
||||
iov[seg].iov_base =
|
||||
(void *)(uintptr_t)(sge->laddr + sge_off);
|
||||
iov[seg].iov_len = sge_len;
|
||||
|
||||
if (do_crc)
|
||||
@ -500,12 +499,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
p = siw_get_upage(mem->umem,
|
||||
sge->laddr + sge_off);
|
||||
if (unlikely(!p)) {
|
||||
if (hdr_len)
|
||||
seg--;
|
||||
if (!c_tx->use_sendpage && seg) {
|
||||
siw_unmap_pages(page_array,
|
||||
hdr_len, seg);
|
||||
}
|
||||
siw_unmap_pages(page_array, kmap_mask);
|
||||
wqe->processed -= c_tx->bytes_unsent;
|
||||
rv = -EFAULT;
|
||||
goto done_crc;
|
||||
@ -515,6 +509,10 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
if (!c_tx->use_sendpage) {
|
||||
iov[seg].iov_base = kmap(p) + fp_off;
|
||||
iov[seg].iov_len = plen;
|
||||
|
||||
/* Remember for later kunmap() */
|
||||
kmap_mask |= BIT(seg);
|
||||
|
||||
if (do_crc)
|
||||
crypto_shash_update(
|
||||
c_tx->mpa_crc_hd,
|
||||
@ -526,13 +524,13 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
page_address(p) + fp_off,
|
||||
plen);
|
||||
} else {
|
||||
u64 pa = ((sge->laddr + sge_off) & PAGE_MASK);
|
||||
u64 va = sge->laddr + sge_off;
|
||||
|
||||
page_array[seg] = virt_to_page(pa);
|
||||
page_array[seg] = virt_to_page(va & PAGE_MASK);
|
||||
if (do_crc)
|
||||
crypto_shash_update(
|
||||
c_tx->mpa_crc_hd,
|
||||
(void *)(sge->laddr + sge_off),
|
||||
(void *)(uintptr_t)va,
|
||||
plen);
|
||||
}
|
||||
|
||||
@ -543,10 +541,7 @@ static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s)
|
||||
|
||||
if (++seg > (int)MAX_ARRAY) {
|
||||
siw_dbg_qp(tx_qp(c_tx), "to many fragments\n");
|
||||
if (!is_kva && !c_tx->use_sendpage) {
|
||||
siw_unmap_pages(page_array, hdr_len,
|
||||
seg - 1);
|
||||
}
|
||||
siw_unmap_pages(page_array, kmap_mask);
|
||||
wqe->processed -= c_tx->bytes_unsent;
|
||||
rv = -EMSGSIZE;
|
||||
goto done_crc;
|
||||
@ -597,8 +592,7 @@ sge_done:
|
||||
} else {
|
||||
rv = kernel_sendmsg(s, &msg, iov, seg + 1,
|
||||
hdr_len + data_len + trl_len);
|
||||
if (!is_kva)
|
||||
siw_unmap_pages(page_array, hdr_len, seg);
|
||||
siw_unmap_pages(page_array, kmap_mask);
|
||||
}
|
||||
if (rv < (int)hdr_len) {
|
||||
/* Not even complete hdr pushed or negative rv */
|
||||
@ -829,7 +823,8 @@ static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe)
|
||||
rv = -EINVAL;
|
||||
goto tx_error;
|
||||
}
|
||||
wqe->sqe.sge[0].laddr = (u64)&wqe->sqe.sge[1];
|
||||
wqe->sqe.sge[0].laddr =
|
||||
(u64)(uintptr_t)&wqe->sqe.sge[1];
|
||||
}
|
||||
}
|
||||
wqe->wr_status = SIW_WR_INPROGRESS;
|
||||
@ -924,7 +919,7 @@ tx_error:
|
||||
|
||||
static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
|
||||
{
|
||||
struct ib_mr *base_mr = (struct ib_mr *)sqe->base_mr;
|
||||
struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr;
|
||||
struct siw_device *sdev = to_siw_dev(pd->device);
|
||||
struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8);
|
||||
int rv = 0;
|
||||
@ -954,8 +949,7 @@ static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe)
|
||||
mem->stag = sqe->rkey;
|
||||
mem->perms = sqe->access;
|
||||
|
||||
siw_dbg_mem(mem, "STag now valid, MR va: 0x%016llx -> 0x%016llx\n",
|
||||
mem->va, base_mr->iova);
|
||||
siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey);
|
||||
mem->va = base_mr->iova;
|
||||
mem->stag_valid = 1;
|
||||
out:
|
||||
|
@ -424,8 +424,7 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
|
||||
*/
|
||||
qp->srq = to_siw_srq(attrs->srq);
|
||||
qp->attrs.rq_size = 0;
|
||||
siw_dbg(base_dev, "QP [%u]: [SRQ 0x%p] attached\n",
|
||||
qp->qp_num, qp->srq);
|
||||
siw_dbg(base_dev, "QP [%u]: SRQ attached\n", qp->qp_num);
|
||||
} else if (num_rqe) {
|
||||
if (qp->kernel_verbs)
|
||||
qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
|
||||
@ -610,7 +609,7 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
|
||||
base_ucontext);
|
||||
struct siw_qp_attrs qp_attrs;
|
||||
|
||||
siw_dbg_qp(qp, "state %d, cep 0x%p\n", qp->attrs.state, qp->cep);
|
||||
siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
|
||||
|
||||
/*
|
||||
* Mark QP as in process of destruction to prevent from
|
||||
@ -662,7 +661,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
|
||||
void *kbuf = &sqe->sge[1];
|
||||
int num_sge = core_wr->num_sge, bytes = 0;
|
||||
|
||||
sqe->sge[0].laddr = (u64)kbuf;
|
||||
sqe->sge[0].laddr = (uintptr_t)kbuf;
|
||||
sqe->sge[0].lkey = 0;
|
||||
|
||||
while (num_sge--) {
|
||||
@ -825,7 +824,7 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
|
||||
break;
|
||||
|
||||
case IB_WR_REG_MR:
|
||||
sqe->base_mr = (uint64_t)reg_wr(wr)->mr;
|
||||
sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
|
||||
sqe->rkey = reg_wr(wr)->key;
|
||||
sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
|
||||
sqe->opcode = SIW_OP_REG_MR;
|
||||
@ -842,8 +841,9 @@ int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
|
||||
rv = -EINVAL;
|
||||
break;
|
||||
}
|
||||
siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%p\n",
|
||||
sqe->opcode, sqe->flags, (void *)sqe->id);
|
||||
siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
|
||||
sqe->opcode, sqe->flags,
|
||||
(void *)(uintptr_t)sqe->id);
|
||||
|
||||
if (unlikely(rv < 0))
|
||||
break;
|
||||
@ -1205,8 +1205,8 @@ struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
|
||||
unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
|
||||
int rv;
|
||||
|
||||
siw_dbg_pd(pd, "start: 0x%016llx, va: 0x%016llx, len: %llu\n",
|
||||
(unsigned long long)start, (unsigned long long)rnic_va,
|
||||
siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
|
||||
(void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
|
||||
(unsigned long long)len);
|
||||
|
||||
if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
|
||||
@ -1363,7 +1363,7 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
|
||||
struct siw_mem *mem = mr->mem;
|
||||
struct siw_pbl *pbl = mem->pbl;
|
||||
struct siw_pble *pble;
|
||||
u64 pbl_size;
|
||||
unsigned long pbl_size;
|
||||
int i, rv;
|
||||
|
||||
if (!pbl) {
|
||||
@ -1402,16 +1402,18 @@ int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
|
||||
pbl_size += sg_dma_len(slp);
|
||||
}
|
||||
siw_dbg_mem(mem,
|
||||
"sge[%d], size %llu, addr 0x%016llx, total %llu\n",
|
||||
i, pble->size, pble->addr, pbl_size);
|
||||
"sge[%d], size %u, addr 0x%p, total %lu\n",
|
||||
i, pble->size, (void *)(uintptr_t)pble->addr,
|
||||
pbl_size);
|
||||
}
|
||||
rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
|
||||
if (rv > 0) {
|
||||
mem->len = base_mr->length;
|
||||
mem->va = base_mr->iova;
|
||||
siw_dbg_mem(mem,
|
||||
"%llu bytes, start 0x%016llx, %u SLE to %u entries\n",
|
||||
mem->len, mem->va, num_sle, pbl->num_buf);
|
||||
"%llu bytes, start 0x%pK, %u SLE to %u entries\n",
|
||||
mem->len, (void *)(uintptr_t)mem->va, num_sle,
|
||||
pbl->num_buf);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
@ -1529,7 +1531,7 @@ int siw_create_srq(struct ib_srq *base_srq,
|
||||
}
|
||||
spin_lock_init(&srq->lock);
|
||||
|
||||
siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: success\n", srq);
|
||||
siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1650,8 +1652,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
|
||||
|
||||
if (unlikely(!srq->kernel_verbs)) {
|
||||
siw_dbg_pd(base_srq->pd,
|
||||
"[SRQ 0x%p]: no kernel post_recv for mapped srq\n",
|
||||
srq);
|
||||
"[SRQ]: no kernel post_recv for mapped srq\n");
|
||||
rv = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
@ -1673,8 +1674,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
|
||||
}
|
||||
if (unlikely(wr->num_sge > srq->max_sge)) {
|
||||
siw_dbg_pd(base_srq->pd,
|
||||
"[SRQ 0x%p]: too many sge's: %d\n", srq,
|
||||
wr->num_sge);
|
||||
"[SRQ]: too many sge's: %d\n", wr->num_sge);
|
||||
rv = -EINVAL;
|
||||
break;
|
||||
}
|
||||
@ -1693,7 +1693,7 @@ int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
|
||||
spin_unlock_irqrestore(&srq->lock, flags);
|
||||
out:
|
||||
if (unlikely(rv < 0)) {
|
||||
siw_dbg_pd(base_srq->pd, "[SRQ 0x%p]: error %d\n", srq, rv);
|
||||
siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
|
||||
*bad_wr = wr;
|
||||
}
|
||||
return rv;
|
||||
|
@ -237,40 +237,17 @@ static void hv_kbd_handle_received_packet(struct hv_device *hv_dev,
|
||||
|
||||
static void hv_kbd_on_channel_callback(void *context)
|
||||
{
|
||||
struct vmpacket_descriptor *desc;
|
||||
struct hv_device *hv_dev = context;
|
||||
void *buffer;
|
||||
int bufferlen = 0x100; /* Start with sensible size */
|
||||
u32 bytes_recvd;
|
||||
u64 req_id;
|
||||
int error;
|
||||
|
||||
buffer = kmalloc(bufferlen, GFP_ATOMIC);
|
||||
if (!buffer)
|
||||
return;
|
||||
foreach_vmbus_pkt(desc, hv_dev->channel) {
|
||||
bytes_recvd = desc->len8 * 8;
|
||||
req_id = desc->trans_id;
|
||||
|
||||
while (1) {
|
||||
error = vmbus_recvpacket_raw(hv_dev->channel, buffer, bufferlen,
|
||||
&bytes_recvd, &req_id);
|
||||
switch (error) {
|
||||
case 0:
|
||||
if (bytes_recvd == 0) {
|
||||
kfree(buffer);
|
||||
return;
|
||||
}
|
||||
|
||||
hv_kbd_handle_received_packet(hv_dev, buffer,
|
||||
bytes_recvd, req_id);
|
||||
break;
|
||||
|
||||
case -ENOBUFS:
|
||||
kfree(buffer);
|
||||
/* Handle large packet */
|
||||
bufferlen = bytes_recvd;
|
||||
buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
|
||||
if (!buffer)
|
||||
return;
|
||||
break;
|
||||
}
|
||||
hv_kbd_handle_received_packet(hv_dev, desc, bytes_recvd,
|
||||
req_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -965,10 +965,13 @@ static void *iommu_dma_alloc_pages(struct device *dev, size_t size,
|
||||
{
|
||||
bool coherent = dev_is_dma_coherent(dev);
|
||||
size_t alloc_size = PAGE_ALIGN(size);
|
||||
int node = dev_to_node(dev);
|
||||
struct page *page = NULL;
|
||||
void *cpu_addr;
|
||||
|
||||
page = dma_alloc_contiguous(dev, alloc_size, gfp);
|
||||
if (!page)
|
||||
page = alloc_pages_node(node, gfp, get_order(alloc_size));
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
|
@ -1599,7 +1599,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
unsigned long freed;
|
||||
|
||||
c = container_of(shrink, struct dm_bufio_client, shrinker);
|
||||
if (!dm_bufio_trylock(c))
|
||||
if (sc->gfp_mask & __GFP_FS)
|
||||
dm_bufio_lock(c);
|
||||
else if (!dm_bufio_trylock(c))
|
||||
return SHRINK_STOP;
|
||||
|
||||
freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user