2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-16 17:23:55 +08:00

Merge back PCI power management material for v5.3.

This commit is contained in:
Rafael J. Wysocki 2019-06-24 10:11:27 +02:00
commit 25bc694a8a
213 changed files with 2157 additions and 1204 deletions

View File

@ -56,6 +56,18 @@ model features for SVE is included in Appendix A.
is to connect to a target process first and then attempt a
ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov).
* Whenever SVE scalable register values (Zn, Pn, FFR) are exchanged in memory
between userspace and the kernel, the register value is encoded in memory in
an endianness-invariant layout, with bits [(8 * i + 7) : (8 * i)] encoded at
byte offset i from the start of the memory representation. This affects for
example the signal frame (struct sve_context) and ptrace interface
(struct user_sve_header) and associated data.
Beware that on big-endian systems this results in a different byte order than
for the FPSIMD V-registers, which are stored as single host-endian 128-bit
values, with bits [(127 - 8 * i) : (120 - 8 * i)] of the register encoded at
byte offset i. (struct fpsimd_context, struct user_fpsimd_state).
2. Vector length terminology
-----------------------------
@ -124,6 +136,10 @@ the SVE instruction set architecture.
size and layout. Macros SVE_SIG_* are defined [1] to facilitate access to
the members.
* Each scalable register (Zn, Pn, FFR) is stored in an endianness-invariant
layout, with bits [(8 * i + 7) : (8 * i)] stored at byte offset i from the
start of the register's representation in memory.
* If the SVE context is too big to fit in sigcontext.__reserved[], then extra
space is allocated on the stack, an extra_context record is written in
__reserved[] referencing this space. sve_context is then written in the

View File

@ -13,11 +13,9 @@ you can do so by typing:
# mount none /sys -t sysfs
As of the Linux 2.6.10 kernel, it is now possible to change the
IO scheduler for a given block device on the fly (thus making it possible,
for instance, to set the CFQ scheduler for the system default, but
set a specific device to use the deadline or noop schedulers - which
can improve that device's throughput).
It is possible to change the IO scheduler for a given block device on
the fly to select one of mq-deadline, none, bfq, or kyber schedulers -
which can improve that device's throughput.
To set a specific scheduler, simply do this:
@ -30,8 +28,8 @@ The list of defined schedulers can be found by simply doing
a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
will be displayed, with the currently selected scheduler in brackets:
# cat /sys/block/hda/queue/scheduler
noop deadline [cfq]
# echo deadline > /sys/block/hda/queue/scheduler
# cat /sys/block/hda/queue/scheduler
noop [deadline] cfq
# cat /sys/block/sda/queue/scheduler
[mq-deadline] kyber bfq none
# echo none >/sys/block/sda/queue/scheduler
# cat /sys/block/sda/queue/scheduler
[none] mq-deadline kyber bfq

View File

@ -8,61 +8,13 @@ both at leaf nodes as well as at intermediate nodes in a storage hierarchy.
Plan is to use the same cgroup based management interface for blkio controller
and based on user options switch IO policies in the background.
Currently two IO control policies are implemented. First one is proportional
weight time based division of disk policy. It is implemented in CFQ. Hence
this policy takes effect only on leaf nodes when CFQ is being used. The second
one is throttling policy which can be used to specify upper IO rate limits
on devices. This policy is implemented in generic block layer and can be
used on leaf nodes as well as higher level logical devices like device mapper.
One IO control policy is throttling policy which can be used to
specify upper IO rate limits on devices. This policy is implemented in
generic block layer and can be used on leaf nodes as well as higher
level logical devices like device mapper.
HOWTO
=====
Proportional Weight division of bandwidth
-----------------------------------------
You can do a very simple testing of running two dd threads in two different
cgroups. Here is what you can do.
- Enable Block IO controller
CONFIG_BLK_CGROUP=y
- Enable group scheduling in CFQ
CONFIG_CFQ_GROUP_IOSCHED=y
- Compile and boot into kernel and mount IO controller (blkio); see
cgroups.txt, Why are cgroups needed?.
mount -t tmpfs cgroup_root /sys/fs/cgroup
mkdir /sys/fs/cgroup/blkio
mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
- Create two cgroups
mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
- Set weights of group test1 and test2
echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
- Create two same size files (say 512MB each) on same disk (file1, file2) and
launch two dd threads in different cgroup to read those files.
sync
echo 3 > /proc/sys/vm/drop_caches
dd if=/mnt/sdb/zerofile1 of=/dev/null &
echo $! > /sys/fs/cgroup/blkio/test1/tasks
cat /sys/fs/cgroup/blkio/test1/tasks
dd if=/mnt/sdb/zerofile2 of=/dev/null &
echo $! > /sys/fs/cgroup/blkio/test2/tasks
cat /sys/fs/cgroup/blkio/test2/tasks
- At macro level, first dd should finish first. To get more precise data, keep
on looking at (with the help of script), at blkio.disk_time and
blkio.disk_sectors files of both test1 and test2 groups. This will tell how
much disk time (in milliseconds), each group got and how many sectors each
group dispatched to the disk. We provide fairness in terms of disk time, so
ideally io.disk_time of cgroups should be in proportion to the weight.
Throttling/Upper Limit policy
-----------------------------
- Enable Block IO controller
@ -94,7 +46,7 @@ Throttling/Upper Limit policy
Hierarchical Cgroups
====================
Both CFQ and throttling implement hierarchy support; however,
Throttling implements hierarchy support; however,
throttling's hierarchy support is enabled iff "sane_behavior" is
enabled from cgroup side, which currently is a development option and
not publicly available.
@ -107,9 +59,8 @@ If somebody created a hierarchy like as follows.
|
test3
CFQ by default and throttling with "sane_behavior" will handle the
hierarchy correctly. For details on CFQ hierarchy support, refer to
Documentation/block/cfq-iosched.txt. For throttling, all limits apply
Throttling with "sane_behavior" will handle the
hierarchy correctly. For throttling, all limits apply
to the whole subtree while all statistics are local to the IOs
directly generated by tasks in that cgroup.
@ -130,10 +81,6 @@ CONFIG_DEBUG_BLK_CGROUP
- Debug help. Right now some additional stats file show up in cgroup
if this option is enabled.
CONFIG_CFQ_GROUP_IOSCHED
- Enables group scheduling in CFQ. Currently only 1 level of group
creation is allowed.
CONFIG_BLK_DEV_THROTTLING
- Enable block device throttling support in block layer.
@ -344,32 +291,3 @@ Common files among various policies
- blkio.reset_stats
- Writing an int to this file will result in resetting all the stats
for that cgroup.
CFQ sysfs tunable
=================
/sys/block/<disk>/queue/iosched/slice_idle
------------------------------------------
On a faster hardware CFQ can be slow, especially with sequential workload.
This happens because CFQ idles on a single queue and single queue might not
drive deeper request queue depths to keep the storage busy. In such scenarios
one can try setting slice_idle=0 and that would switch CFQ to IOPS
(IO operations per second) mode on NCQ supporting hardware.
That means CFQ will not idle between cfq queues of a cfq group and hence be
able to driver higher queue depth and achieve better throughput. That also
means that cfq provides fairness among groups in terms of IOPS and not in
terms of disk time.
/sys/block/<disk>/queue/iosched/group_idle
------------------------------------------
If one disables idling on individual cfq queues and cfq service trees by
setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
on the group in an attempt to provide fairness among groups.
By default group_idle is same as slice_idle and does not do anything if
slice_idle is enabled.
One can experience an overall throughput drop if you have created multiple
groups and put applications in that group which are not driving enough
IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
on individual groups and throughput should improve.

View File

@ -32,14 +32,18 @@ Brief summary of control files
hugetlb.<hugepagesize>.usage_in_bytes # show current usage for "hugepagesize" hugetlb
hugetlb.<hugepagesize>.failcnt # show the number of allocation failure due to HugeTLB limit
For a system supporting two hugepage size (16M and 16G) the control
For a system supporting three hugepage sizes (64k, 32M and 1G), the control
files include:
hugetlb.16GB.limit_in_bytes
hugetlb.16GB.max_usage_in_bytes
hugetlb.16GB.usage_in_bytes
hugetlb.16GB.failcnt
hugetlb.16MB.limit_in_bytes
hugetlb.16MB.max_usage_in_bytes
hugetlb.16MB.usage_in_bytes
hugetlb.16MB.failcnt
hugetlb.1GB.limit_in_bytes
hugetlb.1GB.max_usage_in_bytes
hugetlb.1GB.usage_in_bytes
hugetlb.1GB.failcnt
hugetlb.64KB.limit_in_bytes
hugetlb.64KB.max_usage_in_bytes
hugetlb.64KB.usage_in_bytes
hugetlb.64KB.failcnt
hugetlb.32MB.limit_in_bytes
hugetlb.32MB.max_usage_in_bytes
hugetlb.32MB.usage_in_bytes
hugetlb.32MB.failcnt

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Golden Lions
# *DOCUMENTATION*

View File

@ -51,7 +51,7 @@ endif
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
KBUILD_CFLAGS += -Wno-psabi
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)

View File

@ -195,6 +195,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long asid = ASID(vma->vm_mm);
unsigned long addr;
start = round_down(start, stride);
end = round_up(end, stride);
if ((end - start) >= (MAX_TLBI_OPS * stride)) {
flush_tlb_mm(vma->vm_mm);
return;

View File

@ -260,6 +260,13 @@ struct kvm_vcpu_events {
KVM_REG_SIZE_U256 | \
((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
/*
* Register values for KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() and
* KVM_REG_ARM64_SVE_FFR() are represented in memory in an endianness-
* invariant layout which differs from the layout used for the FPSIMD
* V-registers on big-endian systems: see sigcontext.h for more explanation.
*/
#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX

View File

@ -176,6 +176,10 @@ struct user_sve_header {
* FPCR uint32_t FPCR
*
* Additional data might be appended in the future.
*
* The Z-, P- and FFR registers are represented in memory in an endianness-
* invariant layout which differs from the layout used for the FPSIMD
* V-registers on big-endian systems: see sigcontext.h for more explanation.
*/
#define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)

View File

@ -77,6 +77,15 @@ struct fpsimd_context {
__uint128_t vregs[32];
};
/*
* Note: similarly to all other integer fields, each V-register is stored in an
* endianness-dependent format, with the byte at offset i from the start of the
* in-memory representation of the register value containing
*
* bits [(7 + 8 * i) : (8 * i)] of the register on little-endian hosts; or
* bits [(127 - 8 * i) : (120 - 8 * i)] on big-endian hosts.
*/
/* ESR_EL1 context */
#define ESR_MAGIC 0x45535201
@ -204,6 +213,11 @@ struct sve_context {
* FFR uint16_t[vq] first-fault status register
*
* Additional data might be appended in the future.
*
* Unlike vregs[] in fpsimd_context, each SVE scalable register (Z-, P- or FFR)
* is encoded in memory in an endianness-invariant format, with the byte at
* offset i from the start of the in-memory representation containing bits
* [(7 + 8 * i) : (8 * i)] of the register value.
*/
#define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)

View File

@ -39,6 +39,7 @@
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/sysctl.h>
#include <linux/swab.h>
#include <asm/esr.h>
#include <asm/fpsimd.h>
@ -352,6 +353,23 @@ static int __init sve_sysctl_init(void) { return 0; }
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
#ifdef CONFIG_CPU_BIG_ENDIAN
static __uint128_t arm64_cpu_to_le128(__uint128_t x)
{
u64 a = swab64(x);
u64 b = swab64(x >> 64);
return ((__uint128_t)a << 64) | b;
}
#else
static __uint128_t arm64_cpu_to_le128(__uint128_t x)
{
return x;
}
#endif
#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
/*
* Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
* task->thread.sve_state.
@ -369,14 +387,16 @@ static void fpsimd_to_sve(struct task_struct *task)
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
__uint128_t *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
for (i = 0; i < 32; ++i)
memcpy(ZREG(sst, vq, i), &fst->vregs[i],
sizeof(fst->vregs[i]));
for (i = 0; i < 32; ++i) {
p = (__uint128_t *)ZREG(sst, vq, i);
*p = arm64_cpu_to_le128(fst->vregs[i]);
}
}
/*
@ -395,14 +415,16 @@ static void sve_to_fpsimd(struct task_struct *task)
void const *sst = task->thread.sve_state;
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
__uint128_t const *p;
if (!system_supports_sve())
return;
vq = sve_vq_from_vl(task->thread.sve_vl);
for (i = 0; i < 32; ++i)
memcpy(&fst->vregs[i], ZREG(sst, vq, i),
sizeof(fst->vregs[i]));
for (i = 0; i < 32; ++i) {
p = (__uint128_t const *)ZREG(sst, vq, i);
fst->vregs[i] = arm64_le128_to_cpu(*p);
}
}
#ifdef CONFIG_ARM64_SVE
@ -491,6 +513,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
void *sst = task->thread.sve_state;
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
unsigned int i;
__uint128_t *p;
if (!test_tsk_thread_flag(task, TIF_SVE))
return;
@ -499,9 +522,10 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
for (i = 0; i < 32; ++i)
memcpy(ZREG(sst, vq, i), &fst->vregs[i],
sizeof(fst->vregs[i]));
for (i = 0; i < 32; ++i) {
p = (__uint128_t *)ZREG(sst, vq, i);
*p = arm64_cpu_to_le128(fst->vregs[i]);
}
}
int sve_set_vector_length(struct task_struct *task,

View File

@ -876,6 +876,23 @@ static inline int pmd_present(pmd_t pmd)
return false;
}
static inline int pmd_is_serializing(pmd_t pmd)
{
/*
* If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear
* and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).
*
* This condition may also occur when flushing a pmd while flushing
* it (see ptep_modify_prot_start), so callers must ensure this
* case is fine as well.
*/
if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
cpu_to_be64(_PAGE_INVALID))
return true;
return false;
}
static inline int pmd_bad(pmd_t pmd)
{
if (radix_enabled())
@ -1092,6 +1109,19 @@ static inline int pmd_protnone(pmd_t pmd)
#define pmd_access_permitted pmd_access_permitted
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
{
/*
* pmdp_invalidate sets this combination (which is not caught by
* !pte_present() check in pte_access_permitted), to prevent
* lock-free lookups, as part of the serialize_against_pte_lookup()
* synchronisation.
*
* This also catches the case where the PTE's hardware PRESENT bit is
* cleared while TLB is flushed, which is suboptimal but should not
* be frequent.
*/
if (pmd_is_serializing(pmd))
return false;
return pte_access_permitted(pmd_pte(pmd), write);
}

View File

@ -13,7 +13,11 @@ extern void btext_update_display(unsigned long phys, int width, int height,
int depth, int pitch);
extern void btext_setup_display(int width, int height, int depth, int pitch,
unsigned long address);
#ifdef CONFIG_PPC32
extern void btext_prepare_BAT(void);
#else
static inline void btext_prepare_BAT(void) { }
#endif
extern void btext_map(void);
extern void btext_unmap(void);

View File

@ -94,6 +94,9 @@ static inline bool kdump_in_progress(void)
return crashing_cpu >= 0;
}
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;
#ifdef CONFIG_KEXEC_FILE
extern const struct kexec_file_ops kexec_elf64_ops;

View File

@ -30,7 +30,6 @@ typedef void (*relocate_new_kernel_t)(
*/
void default_machine_kexec(struct kimage *image)
{
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
unsigned long page_list;
unsigned long reboot_code_buffer, reboot_code_buffer_phys;
@ -58,6 +57,9 @@ void default_machine_kexec(struct kimage *image)
reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
printk(KERN_INFO "Bye!\n");
if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x))
relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
/* now call it */
rnk = (relocate_new_kernel_t) reboot_code_buffer;
(*rnk)(page_list, reboot_code_buffer_phys, image->start);

View File

@ -2336,6 +2336,7 @@ static void __init prom_check_displays(void)
prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
width, height, pitch, addr);
btext_setup_display(width, height, 8, pitch, addr);
btext_prepare_BAT();
}
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
}

View File

@ -24,7 +24,7 @@ fi
WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
_end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
__secondary_hold_acknowledge __secondary_hold_spinloop __start
logo_linux_clut224
logo_linux_clut224 btext_prepare_BAT
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."

View File

@ -112,6 +112,9 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
/*
* This ensures that generic code that rely on IRQ disabling
* to prevent a parallel THP split work as expected.
*
* Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
* a special case check in pmd_access_permitted.
*/
serialize_against_pte_lookup(vma->vm_mm);
return __pmd(old_pmd);

View File

@ -368,13 +368,25 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
pdshift = PMD_SHIFT;
pmdp = pmd_offset(&pud, ea);
pmd = READ_ONCE(*pmdp);
/*
* A hugepage collapse is captured by pmd_none, because
* it mark the pmd none and do a hpte invalidate.
* A hugepage collapse is captured by this condition, see
* pmdp_collapse_flush.
*/
if (pmd_none(pmd))
return NULL;
#ifdef CONFIG_PPC_BOOK3S_64
/*
* A hugepage split is captured by this condition, see
* pmdp_invalidate.
*
* Huge page modification can be caught here too.
*/
if (pmd_is_serializing(pmd))
return NULL;
#endif
if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
if (is_thp)
*is_thp = true;

View File

@ -536,7 +536,7 @@ static inline void __fpregs_load_activate(void)
struct fpu *fpu = &current->thread.fpu;
int cpu = smp_processor_id();
if (WARN_ON_ONCE(current->mm == NULL))
if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
return;
if (!fpregs_state_valid(fpu, cpu)) {
@ -567,11 +567,11 @@ static inline void __fpregs_load_activate(void)
* otherwise.
*
* The FPU context is only stored/restored for a user task and
* ->mm is used to distinguish between kernel and user threads.
* PF_KTHREAD is used to distinguish between kernel and user threads.
*/
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
{
if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
if (!copy_fpregs_to_fpstate(old_fpu))
old_fpu->last_cpu = -1;
else

View File

@ -52,6 +52,9 @@
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
#define INTEL_FAM6_ICELAKE_X 0x6A
#define INTEL_FAM6_ICELAKE_XEON_D 0x6C
#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
/* "Small Core" Processors (Atom) */

View File

@ -872,7 +872,7 @@ int __init microcode_init(void)
goto out_ucode_group;
register_syscore_ops(&mc_syscore_ops);
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
mc_cpu_online, mc_cpu_down_prep);
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);

View File

@ -360,6 +360,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
struct list_head *head;
struct rdtgroup *entry;
if (!is_mbm_local_enabled())
return;
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
closid = rgrp->closid;
rmid = rgrp->mon.rmid;

View File

@ -2534,7 +2534,12 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
if (closid_allocated(i) && i != closid) {
mode = rdtgroup_mode_by_closid(i);
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
break;
/*
* ctrl values for locksetup aren't relevant
* until the schemata is written, and the mode
* becomes RDT_MODE_PSEUDO_LOCKED.
*/
continue;
/*
* If CDP is active include peer domain's
* usage to ensure there is no overlap

View File

@ -102,7 +102,7 @@ static void __kernel_fpu_begin(void)
kernel_fpu_disable();
if (current->mm) {
if (!(current->flags & PF_KTHREAD)) {
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
set_thread_flag(TIF_NEED_FPU_LOAD);
/*

View File

@ -5,6 +5,7 @@
#include <linux/compat.h>
#include <linux/cpu.h>
#include <linux/pagemap.h>
#include <asm/fpu/internal.h>
#include <asm/fpu/signal.h>
@ -61,6 +62,11 @@ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
struct user_i387_ia32_struct env;
struct _fpstate_32 __user *fp = buf;
fpregs_lock();
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
copy_fxregs_to_kernel(&tsk->thread.fpu);
fpregs_unlock();
convert_from_fxsr(&env, tsk);
if (__copy_to_user(buf, &env, sizeof(env)) ||
@ -189,15 +195,7 @@ retry:
fpregs_unlock();
if (ret) {
int aligned_size;
int nr_pages;
aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
NULL, FOLL_WRITE);
if (ret == nr_pages)
if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
goto retry;
return -EFAULT;
}

View File

@ -758,7 +758,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
BREAK_INSTR_SIZE);
bpt->type = BP_POKE_BREAKPOINT;
return err;
return 0;
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)

View File

@ -199,7 +199,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
if (!pgtable_l5_enabled())
return (p4d_t *)pgd;
p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
p4d = pgd_val(*pgd) & PTE_PFN_MASK;
p4d += __START_KERNEL_map - phys_base;
return (p4d_t *)p4d + p4d_index(addr);
}

View File

@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
} kaslr_regions[] = {
{ &page_offset_base, 0 },
{ &vmalloc_base, 0 },
{ &vmemmap_base, 1 },
{ &vmemmap_base, 0 },
};
/* Get size in bytes used by the memory region */
@ -78,6 +78,7 @@ void __init kernel_randomize_memory(void)
unsigned long rand, memory_tb;
struct rnd_state rand_state;
unsigned long remain_entropy;
unsigned long vmemmap_size;
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
vaddr = vaddr_start;
@ -109,6 +110,14 @@ void __init kernel_randomize_memory(void)
if (memory_tb < kaslr_regions[0].size_tb)
kaslr_regions[0].size_tb = memory_tb;
/*
* Calculate the vmemmap region size in TBs, aligned to a TB
* boundary.
*/
vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
sizeof(struct page);
kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
/* Calculate entropy available between regions */
remain_entropy = vaddr_end - vaddr_start;
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)

View File

@ -73,6 +73,7 @@ config BLK_DEV_INTEGRITY
config BLK_DEV_ZONED
bool "Zoned block device support"
select MQ_IOSCHED_DEADLINE
---help---
Block layer zoned block device support. This option enables
support for ZAC/ZBC host-managed and host-aware zoned block devices.

View File

@ -1046,8 +1046,7 @@ struct blkcg_policy blkcg_policy_bfq = {
struct cftype bfq_blkcg_legacy_files[] = {
{
.name = "bfq.weight",
.link_name = "weight",
.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = bfq_io_show_weight,
.write_u64 = bfq_io_set_weight_legacy,
},
@ -1167,8 +1166,7 @@ struct cftype bfq_blkcg_legacy_files[] = {
struct cftype bfq_blkg_files[] = {
{
.name = "bfq.weight",
.link_name = "weight",
.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = bfq_io_show_weight,
.write = bfq_io_set_weight,
},

View File

@ -821,38 +821,28 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
{},
};
static bool debugfs_create_files(struct dentry *parent, void *data,
static void debugfs_create_files(struct dentry *parent, void *data,
const struct blk_mq_debugfs_attr *attr)
{
if (IS_ERR_OR_NULL(parent))
return false;
return;
d_inode(parent)->i_private = data;
for (; attr->name; attr++) {
if (!debugfs_create_file(attr->name, attr->mode, parent,
(void *)attr, &blk_mq_debugfs_fops))
return false;
}
return true;
for (; attr->name; attr++)
debugfs_create_file(attr->name, attr->mode, parent,
(void *)attr, &blk_mq_debugfs_fops);
}
int blk_mq_debugfs_register(struct request_queue *q)
void blk_mq_debugfs_register(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
if (!blk_debugfs_root)
return -ENOENT;
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
blk_debugfs_root);
if (!q->debugfs_dir)
return -ENOMEM;
if (!debugfs_create_files(q->debugfs_dir, q,
blk_mq_debugfs_queue_attrs))
goto err;
debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
/*
* blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
@ -864,11 +854,10 @@ int blk_mq_debugfs_register(struct request_queue *q)
/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
queue_for_each_hw_ctx(q, hctx, i) {
if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
goto err;
if (q->elevator && !hctx->sched_debugfs_dir &&
blk_mq_debugfs_register_sched_hctx(q, hctx))
goto err;
if (!hctx->debugfs_dir)
blk_mq_debugfs_register_hctx(q, hctx);
if (q->elevator && !hctx->sched_debugfs_dir)
blk_mq_debugfs_register_sched_hctx(q, hctx);
}
if (q->rq_qos) {
@ -879,12 +868,6 @@ int blk_mq_debugfs_register(struct request_queue *q)
rqos = rqos->next;
}
}
return 0;
err:
blk_mq_debugfs_unregister(q);
return -ENOMEM;
}
void blk_mq_debugfs_unregister(struct request_queue *q)
@ -894,52 +877,32 @@ void blk_mq_debugfs_unregister(struct request_queue *q)
q->debugfs_dir = NULL;
}
static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
struct blk_mq_ctx *ctx)
{
struct dentry *ctx_dir;
char name[20];
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
if (!ctx_dir)
return -ENOMEM;
if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
return -ENOMEM;
return 0;
debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
}
int blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
void blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_ctx *ctx;
char name[20];
int i;
if (!q->debugfs_dir)
return -ENOENT;
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
if (!hctx->debugfs_dir)
return -ENOMEM;
if (!debugfs_create_files(hctx->debugfs_dir, hctx,
blk_mq_debugfs_hctx_attrs))
goto err;
debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
hctx_for_each_ctx(hctx, ctx, i) {
if (blk_mq_debugfs_register_ctx(hctx, ctx))
goto err;
}
return 0;
err:
blk_mq_debugfs_unregister_hctx(hctx);
return -ENOMEM;
hctx_for_each_ctx(hctx, ctx, i)
blk_mq_debugfs_register_ctx(hctx, ctx);
}
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@ -949,17 +912,13 @@ void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
hctx->debugfs_dir = NULL;
}
int blk_mq_debugfs_register_hctxs(struct request_queue *q)
void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
struct blk_mq_hw_ctx *hctx;
int i;
queue_for_each_hw_ctx(q, hctx, i) {
if (blk_mq_debugfs_register_hctx(q, hctx))
return -ENOMEM;
}
return 0;
queue_for_each_hw_ctx(q, hctx, i)
blk_mq_debugfs_register_hctx(q, hctx);
}
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
@ -971,29 +930,16 @@ void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
blk_mq_debugfs_unregister_hctx(hctx);
}
int blk_mq_debugfs_register_sched(struct request_queue *q)
void blk_mq_debugfs_register_sched(struct request_queue *q)
{
struct elevator_type *e = q->elevator->type;
if (!q->debugfs_dir)
return -ENOENT;
if (!e->queue_debugfs_attrs)
return 0;
return;
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
if (!q->sched_debugfs_dir)
return -ENOMEM;
if (!debugfs_create_files(q->sched_debugfs_dir, q,
e->queue_debugfs_attrs))
goto err;
return 0;
err:
blk_mq_debugfs_unregister_sched(q);
return -ENOMEM;
debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
}
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
@ -1008,36 +954,22 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
rqos->debugfs_dir = NULL;
}
int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
{
struct request_queue *q = rqos->q;
const char *dir_name = rq_qos_id_to_name(rqos->id);
if (!q->debugfs_dir)
return -ENOENT;
if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
return 0;
return;
if (!q->rqos_debugfs_dir) {
if (!q->rqos_debugfs_dir)
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
q->debugfs_dir);
if (!q->rqos_debugfs_dir)
return -ENOMEM;
}
rqos->debugfs_dir = debugfs_create_dir(dir_name,
rqos->q->rqos_debugfs_dir);
if (!rqos->debugfs_dir)
return -ENOMEM;
if (!debugfs_create_files(rqos->debugfs_dir, rqos,
rqos->ops->debugfs_attrs))
goto err;
return 0;
err:
blk_mq_debugfs_unregister_rqos(rqos);
return -ENOMEM;
debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
}
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
@ -1046,27 +978,18 @@ void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
q->rqos_debugfs_dir = NULL;
}
int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
struct elevator_type *e = q->elevator->type;
if (!hctx->debugfs_dir)
return -ENOENT;
if (!e->hctx_debugfs_attrs)
return 0;
return;
hctx->sched_debugfs_dir = debugfs_create_dir("sched",
hctx->debugfs_dir);
if (!hctx->sched_debugfs_dir)
return -ENOMEM;
if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
e->hctx_debugfs_attrs))
return -ENOMEM;
return 0;
debugfs_create_files(hctx->sched_debugfs_dir, hctx,
e->hctx_debugfs_attrs);
}
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)

View File

@ -18,74 +18,68 @@ struct blk_mq_debugfs_attr {
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
int blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_register(struct request_queue *q);
void blk_mq_debugfs_unregister(struct request_queue *q);
int blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
int blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
int blk_mq_debugfs_register_sched(struct request_queue *q);
void blk_mq_debugfs_register_sched(struct request_queue *q);
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx);
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
int blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
#else
static inline int blk_mq_debugfs_register(struct request_queue *q)
static inline void blk_mq_debugfs_register(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
{
}
static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
static inline void blk_mq_debugfs_register_sched(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
struct blk_mq_hw_ctx *hctx)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
{
}
static inline int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)

View File

@ -555,7 +555,6 @@ void blk_mq_sched_free_requests(struct request_queue *q)
int i;
lockdep_assert_held(&q->sysfs_lock);
WARN_ON(!q->elevator);
queue_for_each_hw_ctx(q, hctx, i) {
if (hctx->sched_tags)

View File

@ -4460,9 +4460,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
the ST disks also have LPM issues */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
ATA_HORKAGE_NOLPM, },
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
ATA_HORKAGE_NOLPM, },
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132

View File

@ -755,10 +755,32 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
&devres));
}
EXPORT_SYMBOL_GPL(devm_remove_action);
/**
* devm_release_action() - release previously added custom action
* @dev: Device that owns the action
* @action: Function implementing the action
* @data: Pointer to data passed to @action implementation
*
* Releases and removes instance of @action previously added by
* devm_add_action(). Both action and data should match one of the
* existing entries.
*/
void devm_release_action(struct device *dev, void (*action)(void *), void *data)
{
struct action_devres devres = {
.data = data,
.action = action,
};
WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
&devres));
}
EXPORT_SYMBOL_GPL(devm_release_action);
/*
* Managed kmalloc/kfree
*/

View File

@ -74,10 +74,6 @@ int null_zone_report(struct gendisk *disk, sector_t sector,
struct nullb_device *dev = nullb->dev;
unsigned int zno, nrz = 0;
if (!dev->zoned)
/* Not a zoned null device */
return -EOPNOTSUPP;
zno = null_zone_no(dev, sector);
if (zno < dev->nr_zones) {
nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);

View File

@ -767,7 +767,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
set_capacity(gendisk, priv->size >> 9);
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
dev_info(&dev->core, "%s: Using %llu MiB of GPU memory\n",
gendisk->disk_name, get_capacity(gendisk) >> 11);
device_add_disk(&dev->core, gendisk, NULL);

View File

@ -149,22 +149,22 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
return val;
}
static u64 arch_counter_get_cntpct_stable(void)
static notrace u64 arch_counter_get_cntpct_stable(void)
{
return __arch_counter_get_cntpct_stable();
}
static u64 arch_counter_get_cntpct(void)
static notrace u64 arch_counter_get_cntpct(void)
{
return __arch_counter_get_cntpct();
}
static u64 arch_counter_get_cntvct_stable(void)
static notrace u64 arch_counter_get_cntvct_stable(void)
{
return __arch_counter_get_cntvct_stable();
}
static u64 arch_counter_get_cntvct(void)
static notrace u64 arch_counter_get_cntvct(void)
{
return __arch_counter_get_cntvct();
}

View File

@ -896,7 +896,7 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
return ret;
}
const static struct omap_dm_timer_ops dmtimer_ops = {
static const struct omap_dm_timer_ops dmtimer_ops = {
.request_by_node = omap_dm_timer_request_by_node,
.request_specific = omap_dm_timer_request_specific,
.request = omap_dm_timer_request,

View File

@ -27,9 +27,8 @@ static void dev_dax_percpu_release(struct percpu_ref *ref)
complete(&dev_dax->cmp);
}
static void dev_dax_percpu_exit(void *data)
static void dev_dax_percpu_exit(struct percpu_ref *ref)
{
struct percpu_ref *ref = data;
struct dev_dax *dev_dax = ref_to_dev_dax(ref);
dev_dbg(&dev_dax->dev, "%s\n", __func__);
@ -466,18 +465,12 @@ int dev_dax_probe(struct device *dev)
if (rc)
return rc;
rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref);
if (rc)
return rc;
dev_dax->pgmap.ref = &dev_dax->ref;
dev_dax->pgmap.kill = dev_dax_percpu_kill;
dev_dax->pgmap.cleanup = dev_dax_percpu_exit;
addr = devm_memremap_pages(dev, &dev_dax->pgmap);
if (IS_ERR(addr)) {
devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref);
percpu_ref_exit(&dev_dax->ref);
if (IS_ERR(addr))
return PTR_ERR(addr);
}
inode = dax_inode(dax_dev);
cdev = inode->i_cdev;

View File

@ -305,7 +305,8 @@ static const struct regmap_config pca953x_i2c_regmap = {
.volatile_reg = pca953x_volatile_register,
.cache_type = REGCACHE_RBTREE,
.max_register = 0x7f,
/* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */
.max_register = 0xff,
};
static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,

View File

@ -2492,7 +2492,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
{
int r = -EINVAL;
int r;
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
@ -2502,7 +2502,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
}
*smu_version = adev->pm.fw_version;
}
return r;
return 0;
}
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)

View File

@ -172,6 +172,8 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
{
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
if (block >= AMDGPU_RAS_BLOCK_COUNT)
return 0;
return ras && (ras->supported & (1 << block));
}

View File

@ -594,7 +594,7 @@ error:
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t rptr = amdgpu_ring_get_rptr(ring);
uint32_t rptr;
unsigned i;
int r;
@ -602,6 +602,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
if (r)
return r;
rptr = amdgpu_ring_get_rptr(ring);
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
amdgpu_ring_commit(ring);

View File

@ -170,13 +170,16 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t rptr = amdgpu_ring_get_rptr(ring);
uint32_t rptr;
unsigned i;
int r;
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
rptr = amdgpu_ring_get_rptr(ring);
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);

View File

@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
uint32_t rptr = amdgpu_ring_get_rptr(ring);
uint32_t rptr;
unsigned i;
int r;
@ -185,6 +185,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
rptr = amdgpu_ring_get_rptr(ring);
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
amdgpu_ring_commit(ring);

View File

@ -1570,6 +1570,50 @@ static void connector_bad_edid(struct drm_connector *connector,
}
}
/* Get override or firmware EDID */
static struct edid *drm_get_override_edid(struct drm_connector *connector)
{
struct edid *override = NULL;
if (connector->override_edid)
override = drm_edid_duplicate(connector->edid_blob_ptr->data);
if (!override)
override = drm_load_edid_firmware(connector);
return IS_ERR(override) ? NULL : override;
}
/**
* drm_add_override_edid_modes - add modes from override/firmware EDID
* @connector: connector we're probing
*
* Add modes from the override/firmware EDID, if available. Only to be used from
* drm_helper_probe_single_connector_modes() as a fallback for when DDC probe
* failed during drm_get_edid() and caused the override/firmware EDID to be
* skipped.
*
* Return: The number of modes added or 0 if we couldn't find any.
*/
int drm_add_override_edid_modes(struct drm_connector *connector)
{
struct edid *override;
int num_modes = 0;
override = drm_get_override_edid(connector);
if (override) {
drm_connector_update_edid_property(connector, override);
num_modes = drm_add_edid_modes(connector, override);
kfree(override);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
connector->base.id, connector->name, num_modes);
}
return num_modes;
}
EXPORT_SYMBOL(drm_add_override_edid_modes);
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@ -1597,15 +1641,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
{
int i, j = 0, valid_extensions = 0;
u8 *edid, *new;
struct edid *override = NULL;
struct edid *override;
if (connector->override_edid)
override = drm_edid_duplicate(connector->edid_blob_ptr->data);
if (!override)
override = drm_load_edid_firmware(connector);
if (!IS_ERR_OR_NULL(override))
override = drm_get_override_edid(connector);
if (override)
return override;
if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)

View File

@ -255,7 +255,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
if (obj->import_attach)
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
else
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");

View File

@ -42,6 +42,14 @@ static const struct drm_dmi_panel_orientation_data asus_t100ha = {
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_micropc = {
.width = 720,
.height = 1280,
.bios_dates = (const char * const []){ "04/26/2019",
NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_pocket = {
.width = 1200,
.height = 1920,
@ -50,6 +58,14 @@ static const struct drm_dmi_panel_orientation_data gpd_pocket = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_pocket2 = {
.width = 1200,
.height = 1920,
.bios_dates = (const char * const []){ "06/28/2018", "08/28/2018",
"12/07/2018", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data gpd_win = {
.width = 720,
.height = 1280,
@ -99,6 +115,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
.driver_data = (void *)&asus_t100ha,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_micropc,
}, { /*
* GPD Pocket, note that the the DMI data is less generic then
* it seems, devices with a board-vendor of "AMI Corporation"
@ -112,6 +136,14 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
},
.driver_data = (void *)&gpd_pocket,
}, { /* GPD Pocket 2 (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_pocket2,
}, { /* GPD Win (same note on DMI match as GPD Pocket) */
.matches = {
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),

View File

@ -479,6 +479,13 @@ retry:
count = (*connector_funcs->get_modes)(connector);
/*
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
* override/firmware EDID.
*/
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_override_edid_modes(connector);
if (count == 0 && connector->status == connector_status_connected)
count = drm_add_modes_noedid(connector, 1024, 768);
count += drm_helper_probe_add_cmdline_mode(connector);

View File

@ -3005,6 +3005,7 @@ static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
{
return gen8_is_valid_mux_addr(dev_priv, addr) ||
addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
(addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
}

View File

@ -1062,6 +1062,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define NOA_DATA _MMIO(0x986C)
#define NOA_WRITE _MMIO(0x9888)
#define GEN10_NOA_WRITE_HIGH _MMIO(0x9884)
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068

View File

@ -303,10 +303,17 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
u32 i;
u32 *dmc_payload;
size_t fsize;
if (!fw)
return NULL;
fsize = sizeof(struct intel_css_header) +
sizeof(struct intel_package_header) +
sizeof(struct intel_dmc_header);
if (fsize > fw->size)
goto error_truncated;
/* Extract CSS Header information*/
css_header = (struct intel_css_header *)fw->data;
if (sizeof(struct intel_css_header) !=
@ -366,6 +373,9 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
/* Convert dmc_offset into number of bytes. By default it is in dwords*/
dmc_offset *= 4;
readcount += dmc_offset;
fsize += dmc_offset;
if (fsize > fw->size)
goto error_truncated;
/* Extract dmc_header information. */
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
@ -397,6 +407,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
nbytes = dmc_header->fw_size * 4;
fsize += nbytes;
if (fsize > fw->size)
goto error_truncated;
if (nbytes > csr->max_fw_size) {
DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
return NULL;
@ -410,6 +424,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
}
return memcpy(dmc_payload, &fw->data[readcount], nbytes);
error_truncated:
DRM_ERROR("Truncated DMC firmware, rejecting.\n");
return NULL;
}
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)

View File

@ -2432,10 +2432,14 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
* main surface.
*/
static const struct drm_format_info ccs_formats[] = {
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
};
static const struct drm_format_info *
@ -11942,7 +11946,7 @@ encoder_retry:
return 0;
}
static bool intel_fuzzy_clock_check(int clock1, int clock2)
bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;

View File

@ -1742,6 +1742,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
bool intel_fuzzy_clock_check(int clock1, int clock2);
/* modesetting asserts */
void assert_panel_unlocked(struct drm_i915_private *dev_priv,

View File

@ -853,6 +853,17 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
if (mipi_config->target_burst_mode_freq) {
u32 bitrate = intel_dsi_bitrate(intel_dsi);
/*
* Sometimes the VBT contains a slightly lower clock,
* then the bitrate we have calculated, in this case
* just replace it with the calculated bitrate.
*/
if (mipi_config->target_burst_mode_freq < bitrate &&
intel_fuzzy_clock_check(
mipi_config->target_burst_mode_freq,
bitrate))
mipi_config->target_burst_mode_freq = bitrate;
if (mipi_config->target_burst_mode_freq < bitrate) {
DRM_ERROR("Burst mode freq is less than computed\n");
return false;

View File

@ -916,6 +916,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
}
static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
u8 audio_state)
{
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT,
&audio_state, 1);
}
#if 0
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
{
@ -1487,11 +1494,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
else
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
if (crtc_state->has_audio) {
WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4);
sdvox |= SDVO_AUDIO_ENABLE;
}
if (INTEL_GEN(dev_priv) >= 4) {
/* done in crtc_mode_set as the dpll_md reg must be written early */
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
@ -1635,8 +1637,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
if (sdvox & SDVO_AUDIO_ENABLE)
pipe_config->has_audio = true;
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
&val, 1)) {
u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
if ((val & mask) == mask)
pipe_config->has_audio = true;
}
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
&val, 1)) {
@ -1647,6 +1654,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
}
static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
{
intel_sdvo_set_audio_state(intel_sdvo, 0);
}
static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
u8 *eld = connector->eld;
eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
intel_sdvo_set_audio_state(intel_sdvo, 0);
intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
SDVO_HBUF_TX_DISABLED,
eld, drm_eld_size(eld));
intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID |
SDVO_AUDIO_PRESENCE_DETECT);
}
static void intel_disable_sdvo(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *conn_state)
@ -1656,6 +1689,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
u32 temp;
if (old_crtc_state->has_audio)
intel_sdvo_disable_audio(intel_sdvo);
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
@ -1741,6 +1777,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
if (pipe_config->has_audio)
intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
}
static enum drm_mode_status
@ -2603,7 +2642,6 @@ static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
{
struct drm_encoder *encoder = &intel_sdvo->base.base;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct drm_connector *connector;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
struct intel_connector *intel_connector;
@ -2640,9 +2678,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
/* gen3 doesn't do the hdmi bits in the SDVO register */
if (INTEL_GEN(dev_priv) >= 4 &&
intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo_connector->is_hdmi = true;
}

View File

@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg {
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
#define SDVO_CMD_SET_AUDIO_STAT 0x91
#define SDVO_CMD_GET_AUDIO_STAT 0x92
#define SDVO_AUDIO_ELD_VALID (1 << 0)
#define SDVO_AUDIO_PRESENCE_DETECT (1 << 1)
#define SDVO_AUDIO_CP_READY (1 << 2)
#define SDVO_CMD_SET_HBUF_INDEX 0x93
#define SDVO_HBUF_INDEX_ELD 0
#define SDVO_HBUF_INDEX_AVI_IF 1

View File

@ -90,10 +90,6 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
mtk_disp_mutex_put(mtk_crtc->mutex);
@ -186,7 +182,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
ret = clk_enable(mtk_crtc->ddp_comp[i]->clk);
ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
if (ret) {
DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
goto err;
@ -196,7 +192,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
return 0;
err:
while (--i >= 0)
clk_disable(mtk_crtc->ddp_comp[i]->clk);
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
return ret;
}
@ -206,7 +202,7 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
clk_disable(mtk_crtc->ddp_comp[i]->clk);
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
}
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
@ -577,15 +573,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
if (!comp) {
dev_err(dev, "Component %pOF not initialized\n", node);
ret = -ENODEV;
goto unprepare;
}
ret = clk_prepare(comp->clk);
if (ret) {
dev_err(dev,
"Failed to prepare clock for component %pOF: %d\n",
node, ret);
goto unprepare;
return ret;
}
mtk_crtc->ddp_comp[i] = comp;
@ -603,23 +591,17 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
BIT(pipe), type);
if (ret)
goto unprepare;
return ret;
}
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
NULL, pipe);
if (ret < 0)
goto unprepare;
return ret;
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
priv->num_pipes++;
return 0;
unprepare:
while (--i >= 0)
clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
return ret;
}

View File

@ -303,6 +303,7 @@ err_config_cleanup:
static void mtk_drm_kms_deinit(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
@ -389,7 +390,9 @@ static void mtk_drm_unbind(struct device *dev)
struct mtk_drm_private *private = dev_get_drvdata(dev);
drm_dev_unregister(private->drm);
mtk_drm_kms_deinit(private->drm);
drm_dev_put(private->drm);
private->num_pipes = 0;
private->drm = NULL;
}
@ -560,13 +563,8 @@ err_node:
static int mtk_drm_remove(struct platform_device *pdev)
{
struct mtk_drm_private *private = platform_get_drvdata(pdev);
struct drm_device *drm = private->drm;
int i;
drm_dev_unregister(drm);
mtk_drm_kms_deinit(drm);
drm_dev_put(drm);
component_master_del(&pdev->dev, &mtk_drm_ops);
pm_runtime_disable(&pdev->dev);
of_node_put(private->mutex_node);

View File

@ -136,7 +136,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
*/
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
@ -168,6 +167,12 @@ int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
obj = vma->vm_private_data;
/*
* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
* whole buffer from the start.
*/
vma->vm_pgoff = 0;
return mtk_drm_gem_object_mmap(obj, vma);
}

View File

@ -622,6 +622,15 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
if (--dsi->refcount != 0)
return;
/*
* mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
* mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
* which needs irq for vblank, and mtk_dsi_stop() will disable irq.
* mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
* after dsi is fully set.
*/
mtk_dsi_stop(dsi);
if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
if (dsi->panel) {
if (drm_panel_unprepare(dsi->panel)) {
@ -688,7 +697,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
}
}
mtk_dsi_stop(dsi);
mtk_dsi_poweroff(dsi);
dsi->enabled = false;
@ -836,6 +844,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
/* Skip connector cleanup if creation was delegated to the bridge */
if (dsi->conn.dev)
drm_connector_cleanup(&dsi->conn);
if (dsi->panel)
drm_panel_detach(dsi->panel);
}
static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)

View File

@ -107,8 +107,6 @@ static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
priv->io_base + _REG(VPP_OUT_H_V_SIZE));
drm_crtc_vblank_on(crtc);
priv->viu.osd1_enabled = true;
}
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
@ -137,8 +135,6 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
priv->io_base + _REG(VPP_MISC));
drm_crtc_vblank_on(crtc);
priv->viu.osd1_enabled = true;
}
static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
@ -256,6 +252,8 @@ static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
writel_relaxed(priv->viu.osb_blend1_size,
priv->io_base +
_REG(VIU_OSD_BLEND_BLEND1_SIZE));
writel_bits_relaxed(3 << 8, 3 << 8,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
}
static void meson_crtc_enable_vd1(struct meson_drm *priv)

View File

@ -305,6 +305,8 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
meson_plane->enabled = true;
}
priv->viu.osd1_enabled = true;
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
@ -316,14 +318,14 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
/* Disable OSD1 */
if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
writel_bits_relaxed(BIT(0) | BIT(21), 0,
priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
writel_bits_relaxed(3 << 8, 0,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
else
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
priv->io_base + _REG(VPP_MISC));
meson_plane->enabled = false;
priv->viu.osd1_enabled = false;
}
static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {

View File

@ -503,8 +503,17 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
/* G12A HDMI PLL Needs specific parameters for 5.4GHz */
if (m >= 0xf7) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0xea68dc00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
if (frac < 0x10000) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
0x6a685c00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
0x11551293);
} else {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
0xea68dc00);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
0x65771290);
}
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x55540000);
} else {

View File

@ -405,8 +405,7 @@ void meson_viu_init(struct meson_drm *priv)
0 << 16 |
1,
priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
writel_relaxed(3 << 8 |
1 << 20,
writel_relaxed(1 << 20,
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
writel_relaxed(1 << 20,
priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));

View File

@ -10,6 +10,7 @@ config DRM_PANFROST
select IOMMU_IO_PGTABLE_LPAE
select DRM_GEM_SHMEM_HELPER
select PM_DEVFREQ
select DEVFREQ_GOV_SIMPLE_ONDEMAND
help
DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
Bifrost (G3x, G5x, G7x) GPUs.

View File

@ -140,7 +140,9 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
return 0;
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
if (ret)
if (ret == -ENODEV) /* Optional, continue without devfreq */
return 0;
else if (ret)
return ret;
panfrost_devfreq_reset(pfdev);
@ -170,6 +172,9 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
int i;
if (!pfdev->devfreq.devfreq)
return;
panfrost_devfreq_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
pfdev->devfreq.slot[i].busy = false;
@ -179,6 +184,9 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
{
if (!pfdev->devfreq.devfreq)
return;
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
@ -188,6 +196,9 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
ktime_t now;
ktime_t last;
if (!pfdev->devfreq.devfreq)
return;
now = ktime_get();
last = pfdev->devfreq.slot[slot].time_last_update;

View File

@ -35,8 +35,10 @@ static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
{
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
if (usage->type == EV_REL && usage->code == REL_WHEEL)
if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) {
set_bit(REL_HWHEEL, *bit);
set_bit(REL_HWHEEL_HI_RES, *bit);
}
if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007)
return -1;
@ -57,7 +59,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
input = field->hidinput->input;
if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) {
if (usage->type == EV_REL && usage->code == REL_WHEEL) {
if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) {
a4->delayed_value = value;
return 1;
}
@ -65,6 +67,8 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
if (usage->hid == 0x000100b8) {
input_event(input, EV_REL, value ? REL_HWHEEL :
REL_WHEEL, a4->delayed_value);
input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES :
REL_WHEEL_HI_RES, a4->delayed_value * 120);
return 1;
}
}
@ -74,8 +78,9 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
return 1;
}
if (usage->code == REL_WHEEL && a4->hw_wheel) {
if (usage->code == REL_WHEEL_HI_RES && a4->hw_wheel) {
input_event(input, usage->type, REL_HWHEEL, value);
input_event(input, usage->type, REL_HWHEEL_HI_RES, value * 120);
return 1;
}

View File

@ -27,7 +27,6 @@
#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/async.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@ -1311,10 +1310,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
u32 hid_field_extract(const struct hid_device *hid, u8 *report,
unsigned offset, unsigned n)
{
if (n > 256) {
hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
if (n > 32) {
hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
n, current->comm);
n = 256;
n = 32;
}
return __extract(report, offset, n);
@ -2362,15 +2361,6 @@ int hid_add_device(struct hid_device *hdev)
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
hdev->vendor, hdev->product, atomic_inc_return(&id));
/*
* Try loading the module for the device before the add, so that we do
* not first have hid-generic binding only to have it replaced
* immediately afterwards with a specialized driver.
*/
if (!current_is_async())
request_module("hid:b%04Xg%04Xv%08Xp%08X", hdev->bus,
hdev->group, hdev->vendor, hdev->product);
hid_debug_register(hdev, dev_name(&hdev->dev));
ret = device_add(&hdev->dev);
if (!ret)

View File

@ -606,5 +606,7 @@ static void __exit mousevsc_exit(void)
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic HID Driver");
module_init(mousevsc_init);
module_exit(mousevsc_exit);

View File

@ -1086,6 +1086,7 @@
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855

View File

@ -113,6 +113,7 @@ enum recvr_type {
recvr_type_dj,
recvr_type_hidpp,
recvr_type_gaming_hidpp,
recvr_type_mouse_only,
recvr_type_27mhz,
recvr_type_bluetooth,
};
@ -864,9 +865,12 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
schedule_work(&djrcv_dev->work);
}
static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
struct hidpp_event *hidpp_report,
struct dj_workitem *workitem)
{
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
workitem->type = WORKITEM_TYPE_PAIRED;
workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
HIDPP_DEVICE_TYPE_MASK;
@ -880,6 +884,8 @@ static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
break;
case REPORT_TYPE_MOUSE:
workitem->reports_supported |= STD_MOUSE | HIDPP;
if (djrcv_dev->type == recvr_type_mouse_only)
workitem->reports_supported |= MULTIMEDIA;
break;
}
}
@ -923,7 +929,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
case 0x01:
device_type = "Bluetooth";
/* Bluetooth connect packet contents is the same as (e)QUAD */
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
if (!(hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
HIDPP_MANUFACTURER_MASK)) {
hid_info(hdev, "Non Logitech device connected on slot %d\n",
@ -937,18 +943,18 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
break;
case 0x03:
device_type = "QUAD or eQUAD";
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
break;
case 0x04:
device_type = "eQUAD step 4 DJ";
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
break;
case 0x05:
device_type = "DFU Lite";
break;
case 0x06:
device_type = "eQUAD step 4 Lite";
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
break;
case 0x07:
device_type = "eQUAD step 4 Gaming";
@ -958,11 +964,11 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
break;
case 0x0a:
device_type = "eQUAD nano Lite";
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
break;
case 0x0c:
device_type = "eQUAD Lightspeed";
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
workitem.reports_supported |= STD_KEYBOARD;
break;
}
@ -1313,7 +1319,8 @@ static int logi_dj_ll_parse(struct hid_device *hid)
if (djdev->reports_supported & STD_MOUSE) {
dbg_hid("%s: sending a mouse descriptor, reports_supported: %llx\n",
__func__, djdev->reports_supported);
if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp)
if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp ||
djdev->dj_receiver_dev->type == recvr_type_mouse_only)
rdcat(rdesc, &rsize, mse_high_res_descriptor,
sizeof(mse_high_res_descriptor));
else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
@ -1556,15 +1563,19 @@ static int logi_dj_raw_event(struct hid_device *hdev,
data[0] = data[1];
data[1] = 0;
}
/* The 27 MHz mouse-only receiver sends unnumbered mouse data */
/*
* Mouse-only receivers send unnumbered mouse data. The 27 MHz
* receiver uses 6 byte packets, the nano receiver 8 bytes.
*/
if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
size == 6) {
u8 mouse_report[7];
size <= 8) {
u8 mouse_report[9];
/* Prepend report id */
mouse_report[0] = REPORT_TYPE_MOUSE;
memcpy(mouse_report + 1, data, 6);
logi_dj_recv_forward_input_report(hdev, mouse_report, 7);
memcpy(mouse_report + 1, data, size);
logi_dj_recv_forward_input_report(hdev, mouse_report,
size + 1);
}
return false;
@ -1635,6 +1646,7 @@ static int logi_dj_probe(struct hid_device *hdev,
case recvr_type_dj: no_dj_interfaces = 3; break;
case recvr_type_hidpp: no_dj_interfaces = 2; break;
case recvr_type_gaming_hidpp: no_dj_interfaces = 3; break;
case recvr_type_mouse_only: no_dj_interfaces = 2; break;
case recvr_type_27mhz: no_dj_interfaces = 2; break;
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
}
@ -1808,10 +1820,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
.driver_data = recvr_type_dj},
{ /* Logitech Nano (non DJ) receiver */
{ /* Logitech Nano mouse only receiver */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
.driver_data = recvr_type_hidpp},
.driver_data = recvr_type_mouse_only},
{ /* Logitech Nano (non DJ) receiver */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
@ -1836,6 +1848,14 @@ static const struct hid_device_id logi_dj_receivers[] = {
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
0xc70a),
.driver_data = recvr_type_bluetooth},
{ /* Logitech MX5500 HID++ / bluetooth receiver keyboard intf. */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
0xc71b),
.driver_data = recvr_type_bluetooth},
{ /* Logitech MX5500 HID++ / bluetooth receiver mouse intf. */
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
0xc71c),
.driver_data = recvr_type_bluetooth},
{}
};

View File

@ -3728,6 +3728,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
LDJ_DEVICE(0xb305),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
LDJ_DEVICE(0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ LDJ_DEVICE(HID_ANY_ID) },
@ -3740,6 +3743,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* Keyboard MX3200 (Y-RAV80) */
L27MHZ_DEVICE(0x005c),
.driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
{ /* S510 Media Remote */
L27MHZ_DEVICE(0x00fe),
.driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL },
{ L27MHZ_DEVICE(HID_ANY_ID) },
@ -3756,6 +3762,9 @@ static const struct hid_device_id hidpp_devices[] = {
{ /* MX5000 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{ /* MX5500 keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
{}
};

View File

@ -637,6 +637,13 @@ static void mt_store_field(struct hid_device *hdev,
if (*target != DEFAULT_TRUE &&
*target != DEFAULT_FALSE &&
*target != DEFAULT_ZERO) {
if (usage->contactid == DEFAULT_ZERO ||
usage->x == DEFAULT_ZERO ||
usage->y == DEFAULT_ZERO) {
hid_dbg(hdev,
"ignoring duplicate usage on incomplete");
return;
}
usage = mt_allocate_usage(hdev, application);
if (!usage)
return;

View File

@ -35,6 +35,7 @@
/* device flags */
#define RMI_DEVICE BIT(0)
#define RMI_DEVICE_HAS_PHYS_BUTTONS BIT(1)
#define RMI_DEVICE_OUTPUT_SET_REPORT BIT(2)
/*
* retrieve the ctrl registers
@ -163,9 +164,19 @@ static int rmi_set_mode(struct hid_device *hdev, u8 mode)
static int rmi_write_report(struct hid_device *hdev, u8 *report, int len)
{
struct rmi_data *data = hid_get_drvdata(hdev);
int ret;
ret = hid_hw_output_report(hdev, (void *)report, len);
if (data->device_flags & RMI_DEVICE_OUTPUT_SET_REPORT) {
/*
* Talk to device by using SET_REPORT requests instead.
*/
ret = hid_hw_raw_request(hdev, report[0], report,
len, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
} else {
ret = hid_hw_output_report(hdev, (void *)report, len);
}
if (ret < 0) {
dev_err(&hdev->dev, "failed to write hid report (%d)\n", ret);
return ret;
@ -747,6 +758,8 @@ static const struct hid_device_id rmi_id[] = {
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5),
.driver_data = RMI_DEVICE_OUTPUT_SET_REPORT },
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
{ }
};

View File

@ -354,6 +354,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
},
.driver_data = (void *)&sipodev_desc
},
{
.ident = "iBall Aer3",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "iBall"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Aer3"),
},
.driver_data = (void *)&sipodev_desc
},
{ } /* Terminate list */
};

View File

@ -1232,13 +1232,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
/* Add back in missing bits of ID for non-USI pens */
wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
}
wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
for (i = 0; i < pen_frames; i++) {
unsigned char *frame = &data[i*pen_frame_len + 1];
bool valid = frame[0] & 0x80;
bool prox = frame[0] & 0x40;
bool range = frame[0] & 0x20;
bool invert = frame[0] & 0x10;
if (!valid)
continue;
@ -1247,9 +1247,24 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
wacom->shared->stylus_in_proximity = false;
wacom_exit_report(wacom);
input_sync(pen_input);
wacom->tool[0] = 0;
wacom->id[0] = 0;
wacom->serial[0] = 0;
return;
}
if (range) {
if (!wacom->tool[0]) { /* first in range */
/* Going into range select tool */
if (invert)
wacom->tool[0] = BTN_TOOL_RUBBER;
else if (wacom->id[0])
wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]);
else
wacom->tool[0] = BTN_TOOL_PEN;
}
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
@ -1271,24 +1286,27 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
get_unaligned_le16(&frame[11]));
}
}
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
if (wacom->features.type == INTUOSP2_BT) {
input_report_abs(pen_input, ABS_DISTANCE,
range ? frame[13] : wacom->features.distance_max);
} else {
input_report_abs(pen_input, ABS_DISTANCE,
range ? frame[7] : wacom->features.distance_max);
if (wacom->tool[0]) {
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
if (wacom->features.type == INTUOSP2_BT) {
input_report_abs(pen_input, ABS_DISTANCE,
range ? frame[13] : wacom->features.distance_max);
} else {
input_report_abs(pen_input, ABS_DISTANCE,
range ? frame[7] : wacom->features.distance_max);
}
input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09);
input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
input_report_key(pen_input, wacom->tool[0], prox);
input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
input_report_abs(pen_input, ABS_MISC,
wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
}
input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
input_report_key(pen_input, wacom->tool[0], prox);
input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
input_report_abs(pen_input, ABS_MISC,
wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
wacom->shared->stylus_in_proximity = prox;
input_sync(pen_input);
@ -1349,11 +1367,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
if (wacom->num_contacts_left <= 0) {
wacom->num_contacts_left = 0;
wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
input_sync(touch_input);
}
}
input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
input_sync(touch_input);
if (wacom->num_contacts_left == 0) {
// Be careful that we don't accidentally call input_sync with
// only a partial set of fingers of processed
input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
input_sync(touch_input);
}
}
static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
@ -1361,7 +1385,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
struct input_dev *pad_input = wacom->pad_input;
unsigned char *data = wacom->data;
int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
int buttons = data[282] | ((data[281] & 0x40) << 2);
int ring = data[285] & 0x7F;
bool ringstatus = data[285] & 0x80;
bool prox = buttons || ringstatus;
@ -3810,7 +3834,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
int mask, int group)
{
int button_per_group;
int group_button;
/*
* 21UX2 has LED group 1 to the left and LED group 0
@ -3820,9 +3844,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
if (wacom->wacom_wac.features.type == WACOM_21UX2)
group = 1 - group;
button_per_group = button_count/wacom->led.count;
group_button = group * (button_count/wacom->led.count);
return mask & (1 << (group * button_per_group));
if (wacom->wacom_wac.features.type == INTUOSP2_BT)
group_button = 8;
return mask & (1 << group_button);
}
static void wacom_update_led(struct wacom *wacom, int button_count, int mask,

View File

@ -81,6 +81,7 @@ static struct i2c_algo_bit_data ioc_data = {
static struct i2c_adapter ioc_ops = {
.nr = 0,
.name = "ioc",
.algo_data = &ioc_data,
};

View File

@ -21,7 +21,6 @@
#include <linux/platform_device.h>
#include <linux/i2c-algo-pca.h>
#include <linux/platform_data/i2c-pca-platform.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/io.h>
#include <linux/of.h>
@ -173,7 +172,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
i2c->adap.dev.parent = &pdev->dev;
i2c->adap.dev.of_node = np;
i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset-gpios", GPIOD_OUT_LOW);
i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(i2c->gpio))
return PTR_ERR(i2c->gpio);

View File

@ -47,6 +47,15 @@
#include "arm-smmu-regs.h"
/*
* Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
* global register space are still, in fact, using a hypervisor to mediate it
* by trapping and emulating register accesses. Sadly, some deployed versions
* of said trapping code have bugs wherein they go horribly wrong for stores
* using r31 (i.e. XZR/WZR) as the source register.
*/
#define QCOM_DUMMY_VAL -1
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
@ -411,7 +420,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
{
unsigned int spin_cnt, delay;
writel_relaxed(0, sync);
writel_relaxed(QCOM_DUMMY_VAL, sync);
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
@ -1751,8 +1760,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
}
/* Invalidate the TLB, just in case */
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);

View File

@ -2504,6 +2504,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
}
}
spin_lock(&iommu->lock);
spin_lock_irqsave(&device_domain_lock, flags);
if (dev)
found = find_domain(dev);
@ -2519,17 +2520,16 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (found) {
spin_unlock_irqrestore(&device_domain_lock, flags);
spin_unlock(&iommu->lock);
free_devinfo_mem(info);
/* Caller must free the original domain */
return found;
}
spin_lock(&iommu->lock);
ret = domain_attach_iommu(domain, iommu);
spin_unlock(&iommu->lock);
if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags);
spin_unlock(&iommu->lock);
free_devinfo_mem(info);
return NULL;
}
@ -2539,6 +2539,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
if (dev)
dev->archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
spin_unlock(&iommu->lock);
/* PASID table is mandatory for a PCI device in scalable mode. */
if (dev && dev_is_pci(dev) && sm_supported(iommu)) {

View File

@ -389,7 +389,7 @@ static inline void pasid_set_present(struct pasid_entry *pe)
*/
static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
{
pasid_set_bits(&pe->val[1], 1 << 23, value);
pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
}
/*

View File

@ -329,7 +329,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
type = "unmanaged\n";
break;
case IOMMU_DOMAIN_DMA:
type = "DMA";
type = "DMA\n";
break;
}
}

View File

@ -887,12 +887,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
struct bset *i = bset_tree_last(b)->data;
struct bkey *m, *prev = NULL;
struct btree_iter iter;
struct bkey preceding_key_on_stack = ZERO_KEY;
struct bkey *preceding_key_p = &preceding_key_on_stack;
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
m = bch_btree_iter_init(b, &iter, b->ops->is_extents
? PRECEDING_KEY(&START_KEY(k))
: PRECEDING_KEY(k));
/*
* If k has preceding key, preceding_key_p will be set to address
* of k's preceding key; otherwise preceding_key_p will be set
* to NULL inside preceding_key().
*/
if (b->ops->is_extents)
preceding_key(&START_KEY(k), &preceding_key_p);
else
preceding_key(k, &preceding_key_p);
m = bch_btree_iter_init(b, &iter, preceding_key_p);
if (b->ops->insert_fixup(b, k, &iter, replace_key))
return status;

View File

@ -434,20 +434,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
return __bch_cut_back(where, k);
}
#define PRECEDING_KEY(_k) \
({ \
struct bkey *_ret = NULL; \
\
if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
_ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
\
if (!_ret->low) \
_ret->high--; \
_ret->low--; \
} \
\
_ret; \
})
/*
* Pointer '*preceding_key_p' points to a memory object to store preceding
* key of k. If the preceding key does not exist, set '*preceding_key_p' to
* NULL. So the caller of preceding_key() needs to take care of memory
* which '*preceding_key_p' pointed to before calling preceding_key().
* Currently the only caller of preceding_key() is bch_btree_insert_key(),
* and it points to an on-stack variable, so the memory release is handled
* by stackframe itself.
*/
static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
{
if (KEY_INODE(k) || KEY_OFFSET(k)) {
(**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
if (!(*preceding_key_p)->low)
(*preceding_key_p)->high--;
(*preceding_key_p)->low--;
} else {
(*preceding_key_p) = NULL;
}
}
static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
{

View File

@ -431,8 +431,13 @@ STORE(bch_cached_dev)
bch_writeback_queue(dc);
}
/*
* Only set BCACHE_DEV_WB_RUNNING when cached device attached to
* a cache set, otherwise it doesn't make sense.
*/
if (attr == &sysfs_writeback_percent)
if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
if ((dc->disk.c != NULL) &&
(!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);

View File

@ -905,7 +905,7 @@ static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
"DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
fe->dvb->num, fe->id);
dprintk("frequency interval: tuner: %u...%u, frontend: %u...%u",
dev_dbg(fe->dvb->device, "frequency interval: tuner: %u...%u, frontend: %u...%u",
tuner_min, tuner_max, frontend_min, frontend_max);
/* If the standard is for satellite, convert frequencies to kHz */

View File

@ -560,7 +560,7 @@ struct hfi_capability {
struct hfi_capabilities {
u32 num_capabilities;
struct hfi_capability *data;
struct hfi_capability data[];
};
#define HFI_DEBUG_MSG_LOW 0x01
@ -717,7 +717,7 @@ struct hfi_profile_level {
struct hfi_profile_level_supported {
u32 profile_count;
struct hfi_profile_level *profile_level;
struct hfi_profile_level profile_level[];
};
struct hfi_quality_vs_speed {

View File

@ -303,11 +303,19 @@ static const struct attribute_group *pmem_attribute_groups[] = {
NULL,
};
static void pmem_release_queue(void *q)
static void __pmem_release_queue(struct percpu_ref *ref)
{
struct request_queue *q;
q = container_of(ref, typeof(*q), q_usage_counter);
blk_cleanup_queue(q);
}
static void pmem_release_queue(void *ref)
{
__pmem_release_queue(ref);
}
static void pmem_freeze_queue(struct percpu_ref *ref)
{
struct request_queue *q;
@ -399,12 +407,10 @@ static int pmem_attach_disk(struct device *dev,
if (!q)
return -ENOMEM;
if (devm_add_action_or_reset(dev, pmem_release_queue, q))
return -ENOMEM;
pmem->pfn_flags = PFN_DEV;
pmem->pgmap.ref = &q->q_usage_counter;
pmem->pgmap.kill = pmem_freeze_queue;
pmem->pgmap.cleanup = __pmem_release_queue;
if (is_nd_pfn(dev)) {
if (setup_pagemap_fsdax(dev, &pmem->pgmap))
return -ENOMEM;
@ -425,6 +431,9 @@ static int pmem_attach_disk(struct device *dev,
pmem->pfn_flags |= PFN_MAP;
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
} else {
if (devm_add_action_or_reset(dev, pmem_release_queue,
&q->q_usage_counter))
return -ENOMEM;
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
memcpy(&bb_res, &nsio->res, sizeof(bb_res));

View File

@ -20,12 +20,16 @@
#include <linux/seq_buf.h>
struct pci_p2pdma {
struct percpu_ref devmap_ref;
struct completion devmap_ref_done;
struct gen_pool *pool;
bool p2pmem_published;
};
struct p2pdma_pagemap {
struct dev_pagemap pgmap;
struct percpu_ref ref;
struct completion ref_done;
};
static ssize_t size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@ -74,41 +78,45 @@ static const struct attribute_group p2pmem_group = {
.name = "p2pmem",
};
static struct p2pdma_pagemap *to_p2p_pgmap(struct percpu_ref *ref)
{
return container_of(ref, struct p2pdma_pagemap, ref);
}
static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
{
struct pci_p2pdma *p2p =
container_of(ref, struct pci_p2pdma, devmap_ref);
struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
complete_all(&p2p->devmap_ref_done);
complete(&p2p_pgmap->ref_done);
}
static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
{
/*
* pci_p2pdma_add_resource() may be called multiple times
* by a driver and may register the percpu_kill devm action multiple
* times. We only want the first action to actually kill the
* percpu_ref.
*/
if (percpu_ref_is_dying(ref))
return;
percpu_ref_kill(ref);
}
static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref)
{
struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
wait_for_completion(&p2p_pgmap->ref_done);
percpu_ref_exit(&p2p_pgmap->ref);
}
static void pci_p2pdma_release(void *data)
{
struct pci_dev *pdev = data;
struct pci_p2pdma *p2pdma = pdev->p2pdma;
if (!pdev->p2pdma)
if (!p2pdma)
return;
wait_for_completion(&pdev->p2pdma->devmap_ref_done);
percpu_ref_exit(&pdev->p2pdma->devmap_ref);
gen_pool_destroy(pdev->p2pdma->pool);
sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
/* Flush and disable pci_alloc_p2p_mem() */
pdev->p2pdma = NULL;
synchronize_rcu();
gen_pool_destroy(p2pdma->pool);
sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
}
static int pci_p2pdma_setup(struct pci_dev *pdev)
@ -124,12 +132,6 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
if (!p2p->pool)
goto out;
init_completion(&p2p->devmap_ref_done);
error = percpu_ref_init(&p2p->devmap_ref,
pci_p2pdma_percpu_release, 0, GFP_KERNEL);
if (error)
goto out_pool_destroy;
error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
if (error)
goto out_pool_destroy;
@ -163,6 +165,7 @@ out:
int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
u64 offset)
{
struct p2pdma_pagemap *p2p_pgmap;
struct dev_pagemap *pgmap;
void *addr;
int error;
@ -185,18 +188,27 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
return error;
}
pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL);
if (!pgmap)
p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
if (!p2p_pgmap)
return -ENOMEM;
init_completion(&p2p_pgmap->ref_done);
error = percpu_ref_init(&p2p_pgmap->ref,
pci_p2pdma_percpu_release, 0, GFP_KERNEL);
if (error)
goto pgmap_free;
pgmap = &p2p_pgmap->pgmap;
pgmap->res.start = pci_resource_start(pdev, bar) + offset;
pgmap->res.end = pgmap->res.start + size - 1;
pgmap->res.flags = pci_resource_flags(pdev, bar);
pgmap->ref = &pdev->p2pdma->devmap_ref;
pgmap->ref = &p2p_pgmap->ref;
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
pci_resource_start(pdev, bar);
pgmap->kill = pci_p2pdma_percpu_kill;
pgmap->cleanup = pci_p2pdma_percpu_cleanup;
addr = devm_memremap_pages(&pdev->dev, pgmap);
if (IS_ERR(addr)) {
@ -204,19 +216,22 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
goto pgmap_free;
}
error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr,
error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
pci_bus_address(pdev, bar) + offset,
resource_size(&pgmap->res), dev_to_node(&pdev->dev));
resource_size(&pgmap->res), dev_to_node(&pdev->dev),
&p2p_pgmap->ref);
if (error)
goto pgmap_free;
goto pages_free;
pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
&pgmap->res);
return 0;
pages_free:
devm_memunmap_pages(&pdev->dev, pgmap);
pgmap_free:
devm_kfree(&pdev->dev, pgmap);
devm_kfree(&pdev->dev, p2p_pgmap);
return error;
}
EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
@ -585,19 +600,30 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
*/
void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
{
void *ret;
void *ret = NULL;
struct percpu_ref *ref;
/*
* Pairs with synchronize_rcu() in pci_p2pdma_release() to
* ensure pdev->p2pdma is non-NULL for the duration of the
* read-lock.
*/
rcu_read_lock();
if (unlikely(!pdev->p2pdma))
return NULL;
goto out;
if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref)))
return NULL;
ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size);
if (unlikely(!ret))
percpu_ref_put(&pdev->p2pdma->devmap_ref);
ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
(void **) &ref);
if (!ret)
goto out;
if (unlikely(!percpu_ref_tryget_live(ref))) {
gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
ret = NULL;
goto out;
}
out:
rcu_read_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
@ -610,8 +636,11 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
*/
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
{
gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size);
percpu_ref_put(&pdev->p2pdma->devmap_ref);
struct percpu_ref *ref;
gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
(void **) &ref);
percpu_ref_put(ref);
}
EXPORT_SYMBOL_GPL(pci_free_p2pmem);

View File

@ -678,6 +678,7 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
static int pci_pm_prepare(struct device *dev)
{
struct device_driver *drv = dev->driver;
struct pci_dev *pci_dev = to_pci_dev(dev);
if (drv && drv->pm && drv->pm->prepare) {
int error = drv->pm->prepare(dev);
@ -687,7 +688,15 @@ static int pci_pm_prepare(struct device *dev)
if (!error && dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_PREPARE))
return 0;
}
return pci_dev_keep_suspended(to_pci_dev(dev));
if (pci_dev_need_resume(pci_dev))
return 0;
/*
* The PME setting needs to be adjusted here in case the direct-complete
* optimization is used with respect to this device.
*/
pci_dev_adjust_pme(pci_dev);
return 1;
}
static void pci_pm_complete(struct device *dev)
@ -757,9 +766,11 @@ static int pci_pm_suspend(struct device *dev)
* better to resume the device from runtime suspend here.
*/
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
!pci_dev_keep_suspended(pci_dev)) {
pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
pci_dev_adjust_pme(pci_dev);
}
if (pm->suspend) {
@ -1130,10 +1141,13 @@ static int pci_pm_poweroff(struct device *dev)
/* The reason to do that is the same as in pci_pm_suspend(). */
if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
!pci_dev_keep_suspended(pci_dev))
pci_dev_need_resume(pci_dev)) {
pm_runtime_resume(dev);
pci_dev->state_saved = false;
} else {
pci_dev_adjust_pme(pci_dev);
}
pci_dev->state_saved = false;
if (pm->poweroff) {
int error;

View File

@ -1004,15 +1004,10 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D0) {
pci_platform_power_transition(dev, PCI_D0);
/*
* Mandatory power management transition delays, see
* PCI Express Base Specification Revision 2.0 Section
* 6.6.1: Conventional Reset. Do not delay for
* devices powered on/off by corresponding bridge,
* because have already delayed for the bridge.
* Mandatory power management transition delays are
* handled in the PCIe portdrv resume hooks.
*/
if (dev->runtime_d3cold) {
if (dev->d3cold_delay && !dev->imm_ready)
msleep(dev->d3cold_delay);
/*
* When powering on a bridge from D3cold, the
* whole hierarchy may be powered on into
@ -2065,6 +2060,13 @@ static void pci_pme_list_scan(struct work_struct *work)
*/
if (bridge && bridge->current_state != PCI_D0)
continue;
/*
* If the device is in D3cold it should not be
* polled either.
*/
if (pme_dev->dev->current_state == PCI_D3cold)
continue;
pci_pme_wakeup(pme_dev->dev, NULL);
} else {
list_del(&pme_dev->list);
@ -2459,45 +2461,56 @@ bool pci_dev_run_wake(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_dev_run_wake);
/**
* pci_dev_keep_suspended - Check if the device can stay in the suspended state.
* pci_dev_need_resume - Check if it is necessary to resume the device.
* @pci_dev: Device to check.
*
* Return 'true' if the device is runtime-suspended, it doesn't have to be
* Return 'true' if the device is not runtime-suspended or it has to be
* reconfigured due to wakeup settings difference between system and runtime
* suspend and the current power state of it is suitable for the upcoming
* (system) transition.
*
* If the device is not configured for system wakeup, disable PME for it before
* returning 'true' to prevent it from waking up the system unnecessarily.
* suspend, or the current power state of it is not suitable for the upcoming
* (system-wide) transition.
*/
bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
bool pci_dev_need_resume(struct pci_dev *pci_dev)
{
struct device *dev = &pci_dev->dev;
bool wakeup = device_may_wakeup(dev);
pci_power_t target_state;
if (!pm_runtime_suspended(dev)
|| pci_target_state(pci_dev, wakeup) != pci_dev->current_state
|| platform_pci_need_resume(pci_dev))
return false;
if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
return true;
target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
/*
* At this point the device is good to go unless it's been configured
* to generate PME at the runtime suspend time, but it is not supposed
* to wake up the system. In that case, simply disable PME for it
* (it will have to be re-enabled on exit from system resume).
*
* If the device's power state is D3cold and the platform check above
* hasn't triggered, the device's configuration is suitable and we don't
* need to manipulate it at all.
* If the earlier platform check has not triggered, D3cold is just power
* removal on top of D3hot, so no need to resume the device in that
* case.
*/
return target_state != pci_dev->current_state &&
target_state != PCI_D3cold &&
pci_dev->current_state != PCI_D3hot;
}
/**
* pci_dev_adjust_pme - Adjust PME setting for a suspended device.
* @pci_dev: Device to check.
*
* If the device is suspended and it is not configured for system wakeup,
* disable PME for it to prevent it from waking up the system unnecessarily.
*
* Note that if the device's power state is D3cold and the platform check in
* pci_dev_need_resume() has not triggered, the device's configuration need not
* be changed.
*/
void pci_dev_adjust_pme(struct pci_dev *pci_dev)
{
struct device *dev = &pci_dev->dev;
spin_lock_irq(&dev->power.lock);
if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
!wakeup)
if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
pci_dev->current_state < PCI_D3cold)
__pci_pme_active(pci_dev, false);
spin_unlock_irq(&dev->power.lock);
return true;
}
/**
@ -4568,14 +4581,16 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
}
/**
* pcie_wait_for_link - Wait until link is active or inactive
* pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
* @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
{
int timeout = 1000;
bool ret;
@ -4612,13 +4627,25 @@ bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
timeout -= 10;
}
if (active && ret)
msleep(100);
msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
active ? "set" : "cleared");
return ret == active;
}
/**
* pcie_wait_for_link - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
*
* Use this to wait till link becomes active or inactive.
*/
bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
{
return pcie_wait_for_link_delay(pdev, active, 100);
}
void pci_reset_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;

View File

@ -82,7 +82,8 @@ int pci_finish_runtime_suspend(struct pci_dev *dev);
void pcie_clear_root_pme_status(struct pci_dev *dev);
int __pci_pme_wakeup(struct pci_dev *dev, void *ign);
void pci_pme_restore(struct pci_dev *dev);
bool pci_dev_keep_suspended(struct pci_dev *dev);
bool pci_dev_need_resume(struct pci_dev *dev);
void pci_dev_adjust_pme(struct pci_dev *dev);
void pci_dev_complete_resume(struct pci_dev *pci_dev);
void pci_config_pm_runtime_get(struct pci_dev *dev);
void pci_config_pm_runtime_put(struct pci_dev *dev);
@ -493,6 +494,7 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
u32 service);
bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
#ifdef CONFIG_PCIEASPM
void pcie_aspm_init_link_state(struct pci_dev *pdev);

View File

@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@ -378,6 +379,67 @@ static int pm_iter(struct device *dev, void *data)
return 0;
}
static int get_downstream_delay(struct pci_bus *bus)
{
struct pci_dev *pdev;
int min_delay = 100;
int max_delay = 0;
list_for_each_entry(pdev, &bus->devices, bus_list) {
if (!pdev->imm_ready)
min_delay = 0;
else if (pdev->d3cold_delay < min_delay)
min_delay = pdev->d3cold_delay;
if (pdev->d3cold_delay > max_delay)
max_delay = pdev->d3cold_delay;
}
return max(min_delay, max_delay);
}
/*
* wait_for_downstream_link - Wait for downstream link to establish
* @pdev: PCIe port whose downstream link is waited
*
* Handle delays according to PCIe 4.0 section 6.6.1 before configuration
* access to the downstream component is permitted.
*
* This blocks PCI core resume of the hierarchy below this port until the
* link is trained. Should be called before resuming port services to
* prevent pciehp from starting to tear-down the hierarchy too soon.
*/
static void wait_for_downstream_link(struct pci_dev *pdev)
{
int delay;
if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
return;
if (pci_dev_is_disconnected(pdev))
return;
if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
!pdev->bridge_d3)
return;
delay = get_downstream_delay(pdev->subordinate);
if (!delay)
return;
dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
/*
* If downstream port does not support speeds greater than 5 GT/s
* need to wait 100ms. For higher speeds (gen3) we need to wait
* first for the data link layer to become active.
*/
if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
msleep(delay);
else
pcie_wait_for_link_delay(pdev, true, delay);
}
/**
* pcie_port_device_suspend - suspend port services associated with a PCIe port
* @dev: PCI Express port to handle
@ -391,6 +453,8 @@ int pcie_port_device_suspend(struct device *dev)
int pcie_port_device_resume_noirq(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}
@ -421,6 +485,8 @@ int pcie_port_device_runtime_suspend(struct device *dev)
int pcie_port_device_runtime_resume(struct device *dev)
{
size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
wait_for_downstream_link(to_pci_dev(dev));
return device_for_each_child(dev, &off, pm_iter);
}
#endif /* PM */

View File

@ -694,6 +694,7 @@ static int mlxreg_hotplug_remove(struct platform_device *pdev)
/* Clean interrupts setup. */
mlxreg_hotplug_unset_irq(priv);
devm_free_irq(&pdev->dev, priv->irq, priv);
return 0;
}

View File

@ -65,10 +65,12 @@ static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
static struct quirk_entry quirk_asus_unknown = {
.wapf = 0,
.wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_q500a = {
.i8042_filter = asus_q500a_i8042_filter,
.wmi_backlight_set_devstate = true,
};
/*
@ -79,26 +81,32 @@ static struct quirk_entry quirk_asus_q500a = {
static struct quirk_entry quirk_asus_x55u = {
.wapf = 4,
.wmi_backlight_power = true,
.wmi_backlight_set_devstate = true,
.no_display_toggle = true,
};
static struct quirk_entry quirk_asus_wapf4 = {
.wapf = 4,
.wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_x200ca = {
.wapf = 2,
.wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_ux303ub = {
.wmi_backlight_native = true,
.wmi_backlight_set_devstate = true,
};
static struct quirk_entry quirk_asus_x550lb = {
.wmi_backlight_set_devstate = true,
.xusb2pr = 0x01D9,
};
static struct quirk_entry quirk_asus_forceals = {
.wmi_backlight_set_devstate = true,
.wmi_force_als_set = true,
};

View File

@ -2146,7 +2146,7 @@ static int asus_wmi_add(struct platform_device *pdev)
err = asus_wmi_backlight_init(asus);
if (err && err != -ENODEV)
goto fail_backlight;
} else
} else if (asus->driver->quirks->wmi_backlight_set_devstate)
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
if (asus_wmi_has_fnlock_key(asus)) {

View File

@ -31,6 +31,7 @@ struct quirk_entry {
bool store_backlight_power;
bool wmi_backlight_power;
bool wmi_backlight_native;
bool wmi_backlight_set_devstate;
bool wmi_force_als_set;
int wapf;
/*

Some files were not shown because too many files have changed in this diff Show More