mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-03 09:04:21 +08:00
Merge remote-tracking branch 'torvalds/master' into perf/core
To pick up fixes. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
commit
70f0ba9f24
5
.mailmap
5
.mailmap
@ -9,9 +9,6 @@
|
||||
#
|
||||
# Please keep this list dictionary sorted.
|
||||
#
|
||||
# This comment is parsed by git-shortlog:
|
||||
# repo-abbrev: /pub/scm/linux/kernel/git/
|
||||
#
|
||||
Aaron Durbin <adurbin@google.com>
|
||||
Adam Oldham <oldhamca@gmail.com>
|
||||
Adam Radford <aradford@gmail.com>
|
||||
@ -55,6 +52,8 @@ Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
|
||||
Ben Gardner <bgardner@wabtec.com>
|
||||
Ben M Cahill <ben.m.cahill@intel.com>
|
||||
Björn Steinbrink <B.Steinbrink@gmx.de>
|
||||
Björn Töpel <bjorn@kernel.org> <bjorn.topel@gmail.com>
|
||||
Björn Töpel <bjorn@kernel.org> <bjorn.topel@intel.com>
|
||||
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon.dev@gmail.com>
|
||||
Boris Brezillon <bbrezillon@kernel.org> <b.brezillon@overkiz.com>
|
||||
Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
|
||||
|
@ -5,8 +5,8 @@ Description:
|
||||
Provide a place in sysfs for the device link objects in the
|
||||
kernel at any given time. The name of a device link directory,
|
||||
denoted as ... above, is of the form <supplier>--<consumer>
|
||||
where <supplier> is the supplier device name and <consumer> is
|
||||
the consumer device name.
|
||||
where <supplier> is the supplier bus:device name and <consumer>
|
||||
is the consumer bus:device name.
|
||||
|
||||
What: /sys/class/devlink/.../auto_remove_on
|
||||
Date: May 2020
|
||||
|
@ -4,5 +4,6 @@ Contact: Saravana Kannan <saravanak@google.com>
|
||||
Description:
|
||||
The /sys/devices/.../consumer:<consumer> are symlinks to device
|
||||
links where this device is the supplier. <consumer> denotes the
|
||||
name of the consumer in that device link. There can be zero or
|
||||
more of these symlinks for a given device.
|
||||
name of the consumer in that device link and is of the form
|
||||
bus:device name. There can be zero or more of these symlinks
|
||||
for a given device.
|
||||
|
@ -4,5 +4,6 @@ Contact: Saravana Kannan <saravanak@google.com>
|
||||
Description:
|
||||
The /sys/devices/.../supplier:<supplier> are symlinks to device
|
||||
links where this device is the consumer. <supplier> denotes the
|
||||
name of the supplier in that device link. There can be zero or
|
||||
more of these symlinks for a given device.
|
||||
name of the supplier in that device link and is of the form
|
||||
bus:device name. There can be zero or more of these symlinks
|
||||
for a given device.
|
||||
|
@ -916,21 +916,25 @@ Date: September 2014
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Description: This entry could be used to set or show the UFS device
|
||||
runtime power management level. The current driver
|
||||
implementation supports 6 levels with next target states:
|
||||
implementation supports 7 levels with next target states:
|
||||
|
||||
== ====================================================
|
||||
0 an UFS device will stay active, an UIC link will
|
||||
0 UFS device will stay active, UIC link will
|
||||
stay active
|
||||
1 an UFS device will stay active, an UIC link will
|
||||
1 UFS device will stay active, UIC link will
|
||||
hibernate
|
||||
2 an UFS device will moved to sleep, an UIC link will
|
||||
2 UFS device will be moved to sleep, UIC link will
|
||||
stay active
|
||||
3 an UFS device will moved to sleep, an UIC link will
|
||||
3 UFS device will be moved to sleep, UIC link will
|
||||
hibernate
|
||||
4 an UFS device will be powered off, an UIC link will
|
||||
4 UFS device will be powered off, UIC link will
|
||||
hibernate
|
||||
5 an UFS device will be powered off, an UIC link will
|
||||
5 UFS device will be powered off, UIC link will
|
||||
be powered off
|
||||
6 UFS device will be moved to deep sleep, UIC link
|
||||
will be powered off. Note, deep sleep might not be
|
||||
supported in which case this value will not be
|
||||
accepted
|
||||
== ====================================================
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/rpm_target_dev_state
|
||||
@ -954,21 +958,25 @@ Date: September 2014
|
||||
Contact: Subhash Jadavani <subhashj@codeaurora.org>
|
||||
Description: This entry could be used to set or show the UFS device
|
||||
system power management level. The current driver
|
||||
implementation supports 6 levels with next target states:
|
||||
implementation supports 7 levels with next target states:
|
||||
|
||||
== ====================================================
|
||||
0 an UFS device will stay active, an UIC link will
|
||||
0 UFS device will stay active, UIC link will
|
||||
stay active
|
||||
1 an UFS device will stay active, an UIC link will
|
||||
1 UFS device will stay active, UIC link will
|
||||
hibernate
|
||||
2 an UFS device will moved to sleep, an UIC link will
|
||||
2 UFS device will be moved to sleep, UIC link will
|
||||
stay active
|
||||
3 an UFS device will moved to sleep, an UIC link will
|
||||
3 UFS device will be moved to sleep, UIC link will
|
||||
hibernate
|
||||
4 an UFS device will be powered off, an UIC link will
|
||||
4 UFS device will be powered off, UIC link will
|
||||
hibernate
|
||||
5 an UFS device will be powered off, an UIC link will
|
||||
5 UFS device will be powered off, UIC link will
|
||||
be powered off
|
||||
6 UFS device will be moved to deep sleep, UIC link
|
||||
will be powered off. Note, deep sleep might not be
|
||||
supported in which case this value will not be
|
||||
accepted
|
||||
== ====================================================
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/spm_target_dev_state
|
||||
|
@ -177,14 +177,20 @@ bitmap_flush_interval:number
|
||||
The bitmap flush interval in milliseconds. The metadata buffers
|
||||
are synchronized when this interval expires.
|
||||
|
||||
allow_discards
|
||||
Allow block discard requests (a.k.a. TRIM) for the integrity device.
|
||||
Discards are only allowed to devices using internal hash.
|
||||
|
||||
fix_padding
|
||||
Use a smaller padding of the tag area that is more
|
||||
space-efficient. If this option is not present, large padding is
|
||||
used - that is for compatibility with older kernels.
|
||||
|
||||
allow_discards
|
||||
Allow block discard requests (a.k.a. TRIM) for the integrity device.
|
||||
Discards are only allowed to devices using internal hash.
|
||||
legacy_recalculate
|
||||
Allow recalculating of volumes with HMAC keys. This is disabled by
|
||||
default for security reasons - an attacker could modify the volume,
|
||||
set recalc_sector to zero, and the kernel would not detect the
|
||||
modification.
|
||||
|
||||
The journal mode (D/J), buffer_sectors, journal_watermark, commit_time and
|
||||
allow_discards can be changed when reloading the target (load an inactive
|
||||
|
@ -160,29 +160,14 @@ intended for use in production as a security mitigation. Therefore it supports
|
||||
boot parameters that allow to disable KASAN competely or otherwise control
|
||||
particular KASAN features.
|
||||
|
||||
The things that can be controlled are:
|
||||
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
|
||||
|
||||
1. Whether KASAN is enabled at all.
|
||||
2. Whether KASAN collects and saves alloc/free stacks.
|
||||
3. Whether KASAN panics on a detected bug or not.
|
||||
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
|
||||
traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
|
||||
``off``).
|
||||
|
||||
The ``kasan.mode`` boot parameter allows to choose one of three main modes:
|
||||
|
||||
- ``kasan.mode=off`` - KASAN is disabled, no tag checks are performed
|
||||
- ``kasan.mode=prod`` - only essential production features are enabled
|
||||
- ``kasan.mode=full`` - all KASAN features are enabled
|
||||
|
||||
The chosen mode provides default control values for the features mentioned
|
||||
above. However it's also possible to override the default values by providing:
|
||||
|
||||
- ``kasan.stacktrace=off`` or ``=on`` - enable alloc/free stack collection
|
||||
(default: ``on`` for ``mode=full``,
|
||||
otherwise ``off``)
|
||||
- ``kasan.fault=report`` or ``=panic`` - only print KASAN report or also panic
|
||||
(default: ``report``)
|
||||
|
||||
If ``kasan.mode`` parameter is not provided, it defaults to ``full`` when
|
||||
``CONFIG_DEBUG_KERNEL`` is enabled, and to ``prod`` otherwise.
|
||||
- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
|
||||
report or also panic the kernel (default: ``report``).
|
||||
|
||||
For developers
|
||||
~~~~~~~~~~~~~~
|
||||
|
@ -522,6 +522,63 @@ There's more boilerplate involved, but it can:
|
||||
* E.g. if we wanted to also test ``sha256sum``, we could add a ``sha256``
|
||||
field and reuse ``cases``.
|
||||
|
||||
* be converted to a "parameterized test", see below.
|
||||
|
||||
Parameterized Testing
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The table-driven testing pattern is common enough that KUnit has special
|
||||
support for it.
|
||||
|
||||
Reusing the same ``cases`` array from above, we can write the test as a
|
||||
"parameterized test" with the following.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
// This is copy-pasted from above.
|
||||
struct sha1_test_case {
|
||||
const char *str;
|
||||
const char *sha1;
|
||||
};
|
||||
struct sha1_test_case cases[] = {
|
||||
{
|
||||
.str = "hello world",
|
||||
.sha1 = "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed",
|
||||
},
|
||||
{
|
||||
.str = "hello world!",
|
||||
.sha1 = "430ce34d020724ed75a196dfc2ad67c77772d169",
|
||||
},
|
||||
};
|
||||
|
||||
// Need a helper function to generate a name for each test case.
|
||||
static void case_to_desc(const struct sha1_test_case *t, char *desc)
|
||||
{
|
||||
strcpy(desc, t->str);
|
||||
}
|
||||
// Creates `sha1_gen_params()` to iterate over `cases`.
|
||||
KUNIT_ARRAY_PARAM(sha1, cases, case_to_desc);
|
||||
|
||||
// Looks no different from a normal test.
|
||||
static void sha1_test(struct kunit *test)
|
||||
{
|
||||
// This function can just contain the body of the for-loop.
|
||||
// The former `cases[i]` is accessible under test->param_value.
|
||||
char out[40];
|
||||
struct sha1_test_case *test_param = (struct sha1_test_case *)(test->param_value);
|
||||
|
||||
sha1sum(test_param->str, out);
|
||||
KUNIT_EXPECT_STREQ_MSG(test, (char *)out, test_param->sha1,
|
||||
"sha1sum(%s)", test_param->str);
|
||||
}
|
||||
|
||||
// Instead of KUNIT_CASE, we use KUNIT_CASE_PARAM and pass in the
|
||||
// function declared by KUNIT_ARRAY_PARAM.
|
||||
static struct kunit_case sha1_test_cases[] = {
|
||||
KUNIT_CASE_PARAM(sha1_test, sha1_gen_params),
|
||||
{}
|
||||
};
|
||||
|
||||
.. _kunit-on-non-uml:
|
||||
|
||||
KUnit on non-UML architectures
|
||||
|
@ -16,8 +16,8 @@ description:
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- bosch,bmc150
|
||||
- bosch,bmi055
|
||||
- bosch,bmc150_accel
|
||||
- bosch,bmi055_accel
|
||||
- bosch,bma255
|
||||
- bosch,bma250e
|
||||
- bosch,bma222
|
||||
|
@ -534,3 +534,6 @@ offload. Hence, TLS TX device feature flag requires TX csum offload being set.
|
||||
Disabling the latter implies clearing the former. Disabling TX checksum offload
|
||||
should not affect old connections, and drivers should make sure checksum
|
||||
calculation does not break for them.
|
||||
Similarly, device-offloaded TLS decryption implies doing RXCSUM. If the user
|
||||
does not want to enable RX csum offload, TLS RX device feature is disabled
|
||||
as well.
|
||||
|
@ -360,10 +360,9 @@ since the last call to this ioctl. Bit 0 is the first page in the
|
||||
memory slot. Ensure the entire structure is cleared to avoid padding
|
||||
issues.
|
||||
|
||||
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
|
||||
the address space for which you want to return the dirty bitmap.
|
||||
They must be less than the value that KVM_CHECK_EXTENSION returns for
|
||||
the KVM_CAP_MULTI_ADDRESS_SPACE capability.
|
||||
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
|
||||
the address space for which you want to return the dirty bitmap. See
|
||||
KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
|
||||
|
||||
The bits in the dirty bitmap are cleared before the ioctl returns, unless
|
||||
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
|
||||
@ -1281,6 +1280,9 @@ field userspace_addr, which must point at user addressable memory for
|
||||
the entire memory slot size. Any object may back this memory, including
|
||||
anonymous memory, ordinary files, and hugetlbfs.
|
||||
|
||||
On architectures that support a form of address tagging, userspace_addr must
|
||||
be an untagged address.
|
||||
|
||||
It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr
|
||||
be identical. This allows large pages in the guest to be backed by large
|
||||
pages in the host.
|
||||
@ -1333,7 +1335,7 @@ documentation when it pops into existence).
|
||||
|
||||
:Capability: KVM_CAP_ENABLE_CAP_VM
|
||||
:Architectures: all
|
||||
:Type: vcpu ioctl
|
||||
:Type: vm ioctl
|
||||
:Parameters: struct kvm_enable_cap (in)
|
||||
:Returns: 0 on success; -1 on error
|
||||
|
||||
@ -4432,7 +4434,7 @@ to I/O ports.
|
||||
:Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
|
||||
:Architectures: x86, arm, arm64, mips
|
||||
:Type: vm ioctl
|
||||
:Parameters: struct kvm_dirty_log (in)
|
||||
:Parameters: struct kvm_clear_dirty_log (in)
|
||||
:Returns: 0 on success, -1 on error
|
||||
|
||||
::
|
||||
@ -4459,10 +4461,9 @@ in KVM's dirty bitmap, and dirty tracking is re-enabled for that page
|
||||
(for example via write-protection, or by clearing the dirty bit in
|
||||
a page table entry).
|
||||
|
||||
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 specifies
|
||||
the address space for which you want to return the dirty bitmap.
|
||||
They must be less than the value that KVM_CHECK_EXTENSION returns for
|
||||
the KVM_CAP_MULTI_ADDRESS_SPACE capability.
|
||||
If KVM_CAP_MULTI_ADDRESS_SPACE is available, bits 16-31 of slot field specifies
|
||||
the address space for which you want to clear the dirty status. See
|
||||
KVM_SET_USER_MEMORY_REGION for details on the usage of slot field.
|
||||
|
||||
This ioctl is mostly useful when KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
|
||||
is enabled; for more information, see the description of the capability.
|
||||
|
24
MAINTAINERS
24
MAINTAINERS
@ -3334,7 +3334,7 @@ F: arch/riscv/net/
|
||||
X: arch/riscv/net/bpf_jit_comp64.c
|
||||
|
||||
BPF JIT for RISC-V (64-bit)
|
||||
M: Björn Töpel <bjorn.topel@gmail.com>
|
||||
M: Björn Töpel <bjorn@kernel.org>
|
||||
L: netdev@vger.kernel.org
|
||||
L: bpf@vger.kernel.org
|
||||
S: Maintained
|
||||
@ -3879,7 +3879,7 @@ F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
|
||||
F: drivers/mtd/nand/raw/cadence-nand-controller.c
|
||||
|
||||
CADENCE USB3 DRD IP DRIVER
|
||||
M: Peter Chen <peter.chen@nxp.com>
|
||||
M: Peter Chen <peter.chen@kernel.org>
|
||||
M: Pawel Laszczak <pawell@cadence.com>
|
||||
R: Roger Quadros <rogerq@kernel.org>
|
||||
R: Aswath Govindraju <a-govindraju@ti.com>
|
||||
@ -4161,7 +4161,7 @@ S: Maintained
|
||||
F: Documentation/translations/zh_CN/
|
||||
|
||||
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
|
||||
M: Peter Chen <Peter.Chen@nxp.com>
|
||||
M: Peter Chen <peter.chen@kernel.org>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
|
||||
@ -4311,7 +4311,9 @@ W: https://clangbuiltlinux.github.io/
|
||||
B: https://github.com/ClangBuiltLinux/linux/issues
|
||||
C: irc://chat.freenode.net/clangbuiltlinux
|
||||
F: Documentation/kbuild/llvm.rst
|
||||
F: include/linux/compiler-clang.h
|
||||
F: scripts/clang-tools/
|
||||
F: scripts/clang-version.sh
|
||||
F: scripts/lld-version.sh
|
||||
K: \b(?i:clang|llvm)\b
|
||||
|
||||
@ -14504,10 +14506,18 @@ S: Supported
|
||||
F: drivers/crypto/qat/
|
||||
|
||||
QCOM AUDIO (ASoC) DRIVERS
|
||||
M: Patrick Lai <plai@codeaurora.org>
|
||||
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
M: Banajit Goswami <bgoswami@codeaurora.org>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: sound/soc/codecs/lpass-va-macro.c
|
||||
F: sound/soc/codecs/lpass-wsa-macro.*
|
||||
F: sound/soc/codecs/msm8916-wcd-analog.c
|
||||
F: sound/soc/codecs/msm8916-wcd-digital.c
|
||||
F: sound/soc/codecs/wcd9335.*
|
||||
F: sound/soc/codecs/wcd934x.c
|
||||
F: sound/soc/codecs/wcd-clsh-v2.*
|
||||
F: sound/soc/codecs/wsa881x.c
|
||||
F: sound/soc/qcom/
|
||||
|
||||
QCOM IPA DRIVER
|
||||
@ -16959,7 +16969,7 @@ M: Olivier Moysan <olivier.moysan@st.com>
|
||||
M: Arnaud Pouliquen <arnaud.pouliquen@st.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/sound/st,stm32-*.txt
|
||||
F: Documentation/devicetree/bindings/iio/adc/st,stm32-*.yaml
|
||||
F: sound/soc/stm/
|
||||
|
||||
STM32 TIMER/LPTIMER DRIVERS
|
||||
@ -18412,7 +18422,7 @@ F: Documentation/usb/ohci.rst
|
||||
F: drivers/usb/host/ohci*
|
||||
|
||||
USB OTG FSM (Finite State Machine)
|
||||
M: Peter Chen <Peter.Chen@nxp.com>
|
||||
M: Peter Chen <peter.chen@kernel.org>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
|
||||
@ -19416,7 +19426,7 @@ F: drivers/net/ethernet/*/*/*xdp*
|
||||
K: (?:\b|_)xdp(?:\b|_)
|
||||
|
||||
XDP SOCKETS (AF_XDP)
|
||||
M: Björn Töpel <bjorn.topel@intel.com>
|
||||
M: Björn Töpel <bjorn@kernel.org>
|
||||
M: Magnus Karlsson <magnus.karlsson@intel.com>
|
||||
R: Jonathan Lemon <jonathan.lemon@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -352,8 +352,8 @@ kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
|
||||
unsigned long addr = instruction_pointer(regs);
|
||||
struct kprobe *cur = kprobe_running();
|
||||
|
||||
if (cur && (kcb->kprobe_status == KPROBE_HIT_SS)
|
||||
&& ((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
|
||||
if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
|
||||
((unsigned long)&cur->ainsn.api.insn[1] == addr)) {
|
||||
kprobes_restore_local_irqflag(kcb, regs);
|
||||
post_kprobe_handler(cur, kcb, regs);
|
||||
|
||||
|
@ -1396,8 +1396,9 @@ static void cpu_init_hyp_mode(void)
|
||||
* Calculate the raw per-cpu offset without a translation from the
|
||||
* kernel's mapping to the linear mapping, and store it in tpidr_el2
|
||||
* so that we can use adr_l to access per-cpu variables in EL2.
|
||||
* Also drop the KASAN tag which gets in the way...
|
||||
*/
|
||||
params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
|
||||
params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
|
||||
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
|
||||
|
||||
params->mair_el2 = read_sysreg(mair_el1);
|
||||
|
@ -77,12 +77,6 @@ static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
|
||||
cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
|
||||
}
|
||||
|
||||
static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
psci_forward(host_ctxt);
|
||||
hyp_panic(); /* unreachable */
|
||||
}
|
||||
|
||||
static unsigned int find_cpu_id(u64 mpidr)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -251,10 +245,13 @@ static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_
|
||||
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
|
||||
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
|
||||
return psci_forward(host_ctxt);
|
||||
/*
|
||||
* SYSTEM_OFF/RESET should not return according to the spec.
|
||||
* Allow it so as to stay robust to broken firmware.
|
||||
*/
|
||||
case PSCI_0_2_FN_SYSTEM_OFF:
|
||||
case PSCI_0_2_FN_SYSTEM_RESET:
|
||||
psci_forward_noreturn(host_ctxt);
|
||||
unreachable();
|
||||
return psci_forward(host_ctxt);
|
||||
case PSCI_0_2_FN64_CPU_SUSPEND:
|
||||
return psci_cpu_suspend(func_id, host_ctxt);
|
||||
case PSCI_0_2_FN64_CPU_ON:
|
||||
|
@ -788,7 +788,7 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
||||
{
|
||||
unsigned long *bmap = vcpu->kvm->arch.pmu_filter;
|
||||
u64 val, mask = 0;
|
||||
int base, i;
|
||||
int base, i, nr_events;
|
||||
|
||||
if (!pmceid1) {
|
||||
val = read_sysreg(pmceid0_el0);
|
||||
@ -801,13 +801,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
||||
if (!bmap)
|
||||
return val;
|
||||
|
||||
nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1;
|
||||
|
||||
for (i = 0; i < 32; i += 8) {
|
||||
u64 byte;
|
||||
|
||||
byte = bitmap_get_value8(bmap, base + i);
|
||||
mask |= byte << i;
|
||||
byte = bitmap_get_value8(bmap, 0x4000 + base + i);
|
||||
mask |= byte << (32 + i);
|
||||
if (nr_events >= (0x4000 + base + 32)) {
|
||||
byte = bitmap_get_value8(bmap, 0x4000 + base + i);
|
||||
mask |= byte << (32 + i);
|
||||
}
|
||||
}
|
||||
|
||||
return val & mask;
|
||||
|
@ -43,6 +43,10 @@
|
||||
* 64bit interface.
|
||||
*/
|
||||
|
||||
#define reg_to_encoding(x) \
|
||||
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
|
||||
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
|
||||
|
||||
static bool read_from_write_only(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *params,
|
||||
const struct sys_reg_desc *r)
|
||||
@ -273,8 +277,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
|
||||
u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1,
|
||||
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
|
||||
u32 sr = reg_to_encoding(r);
|
||||
|
||||
if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
@ -590,6 +593,15 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
|
||||
}
|
||||
|
||||
static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
return 0;
|
||||
|
||||
return REG_HIDDEN;
|
||||
}
|
||||
|
||||
static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 pmcr, val;
|
||||
@ -613,9 +625,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
|
||||
{
|
||||
u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
|
||||
bool enabled = kvm_vcpu_has_pmu(vcpu);
|
||||
bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
|
||||
|
||||
enabled &= (reg & flags) || vcpu_mode_priv(vcpu);
|
||||
if (!enabled)
|
||||
kvm_inject_undefined(vcpu);
|
||||
|
||||
@ -900,11 +911,6 @@ static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (!kvm_vcpu_has_pmu(vcpu)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (p->is_write) {
|
||||
if (!vcpu_mode_priv(vcpu)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
@ -921,10 +927,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
return true;
|
||||
}
|
||||
|
||||
#define reg_to_encoding(x) \
|
||||
sys_reg((u32)(x)->Op0, (u32)(x)->Op1, \
|
||||
(u32)(x)->CRn, (u32)(x)->CRm, (u32)(x)->Op2)
|
||||
|
||||
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
|
||||
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
|
||||
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
|
||||
@ -936,15 +938,18 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
|
||||
trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
|
||||
|
||||
#define PMU_SYS_REG(r) \
|
||||
SYS_DESC(r), .reset = reset_unknown, .visibility = pmu_visibility
|
||||
|
||||
/* Macro to expand the PMEVCNTRn_EL0 register */
|
||||
#define PMU_PMEVCNTR_EL0(n) \
|
||||
{ SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
|
||||
access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
|
||||
{ PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
|
||||
.access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
|
||||
|
||||
/* Macro to expand the PMEVTYPERn_EL0 register */
|
||||
#define PMU_PMEVTYPER_EL0(n) \
|
||||
{ SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
|
||||
access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
|
||||
{ PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
|
||||
.access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
|
||||
|
||||
static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
@ -1020,8 +1025,7 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
||||
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_desc const *r, bool raz)
|
||||
{
|
||||
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
|
||||
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
|
||||
u32 id = reg_to_encoding(r);
|
||||
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
|
||||
|
||||
if (id == SYS_ID_AA64PFR0_EL1) {
|
||||
@ -1062,8 +1066,7 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
||||
static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
|
||||
(u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
|
||||
u32 id = reg_to_encoding(r);
|
||||
|
||||
switch (id) {
|
||||
case SYS_ID_AA64ZFR0_EL1:
|
||||
@ -1486,8 +1489,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
|
||||
{ SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
|
||||
|
||||
{ SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
|
||||
{ SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
|
||||
{ PMU_SYS_REG(SYS_PMINTENSET_EL1),
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
{ PMU_SYS_REG(SYS_PMINTENCLR_EL1),
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
|
||||
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
||||
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
||||
@ -1526,23 +1531,36 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
|
||||
{ SYS_DESC(SYS_CTR_EL0), access_ctr },
|
||||
|
||||
{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
|
||||
{ SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
|
||||
{ SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
|
||||
{ SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
|
||||
{ SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
|
||||
{ SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
|
||||
{ SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
|
||||
{ SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
|
||||
{ PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
|
||||
.reset = reset_pmcr, .reg = PMCR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCNTENSET_EL0),
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
|
||||
.access = access_pmcnten, .reg = PMCNTENSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMOVSCLR_EL0),
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMSWINC_EL0),
|
||||
.access = access_pmswinc, .reg = PMSWINC_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMSELR_EL0),
|
||||
.access = access_pmselr, .reg = PMSELR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMCEID0_EL0),
|
||||
.access = access_pmceid, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMCEID1_EL0),
|
||||
.access = access_pmceid, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMCCNTR_EL0),
|
||||
.access = access_pmu_evcntr, .reg = PMCCNTR_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
|
||||
.access = access_pmu_evtyper, .reset = NULL },
|
||||
{ PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
|
||||
.access = access_pmu_evcntr, .reset = NULL },
|
||||
/*
|
||||
* PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
|
||||
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
||||
*/
|
||||
{ SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
|
||||
{ SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
|
||||
{ PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
|
||||
.reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
|
||||
{ PMU_SYS_REG(SYS_PMOVSSET_EL0),
|
||||
.access = access_pmovs, .reg = PMOVSSET_EL0 },
|
||||
|
||||
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
|
||||
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
|
||||
@ -1694,7 +1712,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
* PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
|
||||
* in 32bit mode. Here we choose to reset it as zero for consistency.
|
||||
*/
|
||||
{ SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
|
||||
{ PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
|
||||
.reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
|
||||
|
||||
{ SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
|
||||
{ SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
|
||||
|
@ -709,10 +709,11 @@ static int do_tag_check_fault(unsigned long far, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN for tag
|
||||
* check faults. Mask them out now so that userspace doesn't see them.
|
||||
* The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN
|
||||
* for tag check faults. Set them to corresponding bits in the untagged
|
||||
* address.
|
||||
*/
|
||||
far &= (1UL << 60) - 1;
|
||||
far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK);
|
||||
do_bad_area(far, esr, regs);
|
||||
return 0;
|
||||
}
|
||||
|
@ -51,6 +51,7 @@ extern void kmap_flush_tlb(unsigned long addr);
|
||||
|
||||
#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
|
||||
|
||||
#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) set_pte(ptep, ptev)
|
||||
#define arch_kmap_local_post_map(vaddr, pteval) local_flush_tlb_one(vaddr)
|
||||
#define arch_kmap_local_post_unmap(vaddr) local_flush_tlb_one(vaddr)
|
||||
|
||||
|
@ -31,7 +31,7 @@
|
||||
void __iomem *ioremap(phys_addr_t offset, unsigned long size);
|
||||
|
||||
#define iounmap iounmap
|
||||
extern void iounmap(void *addr);
|
||||
extern void iounmap(void __iomem *addr);
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
|
@ -77,7 +77,7 @@ void __iomem *__ref ioremap(phys_addr_t addr, unsigned long size)
|
||||
}
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
|
||||
void iounmap(void *addr)
|
||||
void iounmap(void __iomem *addr)
|
||||
{
|
||||
/* If the page is from the fixmap pool then we just clear out
|
||||
* the fixmap mapping.
|
||||
|
@ -202,9 +202,8 @@ config PREFETCH
|
||||
depends on PA8X00 || PA7200
|
||||
|
||||
config MLONGCALLS
|
||||
bool "Enable the -mlong-calls compiler option for big kernels"
|
||||
default y if !MODULES || UBSAN || FTRACE
|
||||
default n
|
||||
def_bool y if !MODULES || UBSAN || FTRACE
|
||||
bool "Enable the -mlong-calls compiler option for big kernels" if MODULES && !UBSAN && !FTRACE
|
||||
depends on PA8X00
|
||||
help
|
||||
If you configure the kernel to include many drivers built-in instead
|
||||
|
@ -47,7 +47,4 @@ extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
|
||||
extern int cpu_claim_irq(unsigned int irq, struct irq_chip *, void *);
|
||||
extern int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest);
|
||||
|
||||
/* soft power switch support (power.c) */
|
||||
extern struct tasklet_struct power_tasklet;
|
||||
|
||||
#endif /* _ASM_PARISC_IRQ_H */
|
||||
|
@ -997,10 +997,17 @@ intr_do_preempt:
|
||||
bb,<,n %r20, 31 - PSW_SM_I, intr_restore
|
||||
nop
|
||||
|
||||
/* ssm PSW_SM_I done later in intr_restore */
|
||||
#ifdef CONFIG_MLONGCALLS
|
||||
ldil L%intr_restore, %r2
|
||||
load32 preempt_schedule_irq, %r1
|
||||
bv %r0(%r1)
|
||||
ldo R%intr_restore(%r2), %r2
|
||||
#else
|
||||
ldil L%intr_restore, %r1
|
||||
BL preempt_schedule_irq, %r2
|
||||
nop
|
||||
|
||||
b,n intr_restore /* ssm PSW_SM_I done by intr_restore */
|
||||
ldo R%intr_restore(%r1), %r2
|
||||
#endif
|
||||
#endif /* CONFIG_PREEMPTION */
|
||||
|
||||
/*
|
||||
|
@ -63,6 +63,12 @@
|
||||
nop; \
|
||||
nop;
|
||||
|
||||
#define SCV_ENTRY_FLUSH_SLOT \
|
||||
SCV_ENTRY_FLUSH_FIXUP_SECTION; \
|
||||
nop; \
|
||||
nop; \
|
||||
nop;
|
||||
|
||||
/*
|
||||
* r10 must be free to use, r13 must be paca
|
||||
*/
|
||||
@ -70,6 +76,13 @@
|
||||
STF_ENTRY_BARRIER_SLOT; \
|
||||
ENTRY_FLUSH_SLOT
|
||||
|
||||
/*
|
||||
* r10, ctr must be free to use, r13 must be paca
|
||||
*/
|
||||
#define SCV_INTERRUPT_TO_KERNEL \
|
||||
STF_ENTRY_BARRIER_SLOT; \
|
||||
SCV_ENTRY_FLUSH_SLOT
|
||||
|
||||
/*
|
||||
* Macros for annotating the expected destination of (h)rfid
|
||||
*
|
||||
|
@ -240,6 +240,14 @@ label##3: \
|
||||
FTR_ENTRY_OFFSET 957b-958b; \
|
||||
.popsection;
|
||||
|
||||
#define SCV_ENTRY_FLUSH_FIXUP_SECTION \
|
||||
957: \
|
||||
.pushsection __scv_entry_flush_fixup,"a"; \
|
||||
.align 2; \
|
||||
958: \
|
||||
FTR_ENTRY_OFFSET 957b-958b; \
|
||||
.popsection;
|
||||
|
||||
#define RFI_FLUSH_FIXUP_SECTION \
|
||||
951: \
|
||||
.pushsection __rfi_flush_fixup,"a"; \
|
||||
@ -273,10 +281,12 @@ label##3: \
|
||||
|
||||
extern long stf_barrier_fallback;
|
||||
extern long entry_flush_fallback;
|
||||
extern long scv_entry_flush_fallback;
|
||||
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
|
||||
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
|
||||
extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
|
||||
extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
|
||||
extern long __start___scv_entry_flush_fixup, __stop___scv_entry_flush_fixup;
|
||||
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
|
||||
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
|
||||
extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
|
||||
|
@ -58,6 +58,8 @@ extern pte_t *pkmap_page_table;
|
||||
|
||||
#define flush_cache_kmaps() flush_cache_all()
|
||||
|
||||
#define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
|
||||
__set_pte_at(mm, vaddr, ptep, ptev, 1)
|
||||
#define arch_kmap_local_post_map(vaddr, pteval) \
|
||||
local_flush_tlb_page(NULL, vaddr)
|
||||
#define arch_kmap_local_post_unmap(vaddr) \
|
||||
|
@ -75,7 +75,7 @@ BEGIN_FTR_SECTION
|
||||
bne .Ltabort_syscall
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
#endif
|
||||
INTERRUPT_TO_KERNEL
|
||||
SCV_INTERRUPT_TO_KERNEL
|
||||
mr r10,r1
|
||||
ld r1,PACAKSAVE(r13)
|
||||
std r10,0(r1)
|
||||
|
@ -2993,6 +2993,25 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
|
||||
ld r11,PACA_EXRFI+EX_R11(r13)
|
||||
blr
|
||||
|
||||
/*
|
||||
* The SCV entry flush happens with interrupts enabled, so it must disable
|
||||
* to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
|
||||
* (containing LR) does not need to be preserved here because scv entry
|
||||
* puts 0 in the pt_regs, CTR can be clobbered for the same reason.
|
||||
*/
|
||||
TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
|
||||
li r10,0
|
||||
mtmsrd r10,1
|
||||
lbz r10,PACAIRQHAPPENED(r13)
|
||||
ori r10,r10,PACA_IRQ_HARD_DIS
|
||||
stb r10,PACAIRQHAPPENED(r13)
|
||||
std r11,PACA_EXRFI+EX_R11(r13)
|
||||
L1D_DISPLACEMENT_FLUSH
|
||||
ld r11,PACA_EXRFI+EX_R11(r13)
|
||||
li r10,MSR_RI
|
||||
mtmsrd r10,1
|
||||
blr
|
||||
|
||||
TRAMP_REAL_BEGIN(rfi_flush_fallback)
|
||||
SET_SCRATCH0(r13);
|
||||
GET_PACA(r13);
|
||||
|
@ -145,6 +145,13 @@ SECTIONS
|
||||
__stop___entry_flush_fixup = .;
|
||||
}
|
||||
|
||||
. = ALIGN(8);
|
||||
__scv_entry_flush_fixup : AT(ADDR(__scv_entry_flush_fixup) - LOAD_OFFSET) {
|
||||
__start___scv_entry_flush_fixup = .;
|
||||
*(__scv_entry_flush_fixup)
|
||||
__stop___scv_entry_flush_fixup = .;
|
||||
}
|
||||
|
||||
. = ALIGN(8);
|
||||
__stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
|
||||
__start___stf_exit_barrier_fixup = .;
|
||||
|
@ -290,9 +290,6 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
|
||||
long *start, *end;
|
||||
int i;
|
||||
|
||||
start = PTRRELOC(&__start___entry_flush_fixup);
|
||||
end = PTRRELOC(&__stop___entry_flush_fixup);
|
||||
|
||||
instrs[0] = 0x60000000; /* nop */
|
||||
instrs[1] = 0x60000000; /* nop */
|
||||
instrs[2] = 0x60000000; /* nop */
|
||||
@ -312,6 +309,8 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
|
||||
if (types & L1D_FLUSH_MTTRIG)
|
||||
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
|
||||
|
||||
start = PTRRELOC(&__start___entry_flush_fixup);
|
||||
end = PTRRELOC(&__stop___entry_flush_fixup);
|
||||
for (i = 0; start < end; start++, i++) {
|
||||
dest = (void *)start + *start;
|
||||
|
||||
@ -328,6 +327,25 @@ void do_entry_flush_fixups(enum l1d_flush_type types)
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
}
|
||||
|
||||
start = PTRRELOC(&__start___scv_entry_flush_fixup);
|
||||
end = PTRRELOC(&__stop___scv_entry_flush_fixup);
|
||||
for (; start < end; start++, i++) {
|
||||
dest = (void *)start + *start;
|
||||
|
||||
pr_devel("patching dest %lx\n", (unsigned long)dest);
|
||||
|
||||
patch_instruction((struct ppc_inst *)dest, ppc_inst(instrs[0]));
|
||||
|
||||
if (types == L1D_FLUSH_FALLBACK)
|
||||
patch_branch((struct ppc_inst *)(dest + 1), (unsigned long)&scv_entry_flush_fallback,
|
||||
BRANCH_SET_LINK);
|
||||
else
|
||||
patch_instruction((struct ppc_inst *)(dest + 1), ppc_inst(instrs[1]));
|
||||
|
||||
patch_instruction((struct ppc_inst *)(dest + 2), ppc_inst(instrs[2]));
|
||||
}
|
||||
|
||||
|
||||
printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
|
||||
(types == L1D_FLUSH_NONE) ? "no" :
|
||||
(types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
|
||||
|
@ -29,7 +29,6 @@ config SUPERH
|
||||
select HAVE_ARCH_KGDB
|
||||
select HAVE_ARCH_SECCOMP_FILTER
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_DEBUG_BUGVERBOSE
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DYNAMIC_FTRACE
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/bcd.h>
|
||||
#include <linux/rtc.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/rtc.h>
|
||||
|
@ -27,13 +27,12 @@ CONFIG_NETFILTER=y
|
||||
CONFIG_ATALK=m
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_IDE=y
|
||||
CONFIG_BLK_DEV_IDECD=y
|
||||
CONFIG_BLK_DEV_OFFBOARD=y
|
||||
CONFIG_BLK_DEV_GENERIC=y
|
||||
CONFIG_BLK_DEV_AEC62XX=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_ATA_GENERIC=y
|
||||
CONFIG_PATA_ATP867X=y
|
||||
CONFIG_SCSI=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_BLK_DEV_SR=y
|
||||
CONFIG_SCSI_MULTI_LUN=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_MD=m
|
||||
|
@ -20,8 +20,6 @@ CONFIG_IP_PNP=y
|
||||
# CONFIG_IPV6 is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_IDE=y
|
||||
CONFIG_BLK_DEV_IDECD=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_NET_ETHERNET=y
|
||||
CONFIG_SMC91X=y
|
||||
|
@ -44,16 +44,14 @@ CONFIG_NET_SCHED=y
|
||||
CONFIG_PARPORT=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_IDE=y
|
||||
CONFIG_BLK_DEV_IDECD=y
|
||||
CONFIG_BLK_DEV_PLATFORM=y
|
||||
CONFIG_BLK_DEV_GENERIC=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_BLK_DEV_SR=y
|
||||
CONFIG_CHR_DEV_SG=y
|
||||
CONFIG_SCSI_SPI_ATTRS=y
|
||||
CONFIG_SCSI_FC_ATTRS=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_ATA_GENERIC=y
|
||||
CONFIG_PATA_PLATFORM=y
|
||||
CONFIG_MD=y
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
CONFIG_NETDEVICES=y
|
||||
|
@ -116,9 +116,6 @@ CONFIG_MTD_UBI_GLUEBI=m
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_IDE=y
|
||||
CONFIG_BLK_DEV_IDECD=y
|
||||
CONFIG_BLK_DEV_PLATFORM=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_BLK_DEV_SR=y
|
||||
CONFIG_SCSI_MULTI_LUN=y
|
||||
|
@ -29,7 +29,6 @@ CONFIG_MTD_BLOCK=y
|
||||
CONFIG_MTD_CFI=y
|
||||
CONFIG_MTD_CFI_AMDSTD=y
|
||||
CONFIG_MTD_ROM=y
|
||||
CONFIG_IDE=y
|
||||
CONFIG_SCSI=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_NET_ETHERNET=y
|
||||
|
@ -39,9 +39,6 @@ CONFIG_IP_PNP_RARP=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_NBD=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_IDE=y
|
||||
CONFIG_BLK_DEV_IDECD=m
|
||||
CONFIG_BLK_DEV_IDETAPE=m
|
||||
CONFIG_SCSI=m
|
||||
CONFIG_BLK_DEV_SD=m
|
||||
CONFIG_BLK_DEV_SR=m
|
||||
|
@ -63,8 +63,7 @@ config PVR2_DMA
|
||||
|
||||
config G2_DMA
|
||||
tristate "G2 Bus DMA support"
|
||||
depends on SH_DREAMCAST
|
||||
select SH_DMA_API
|
||||
depends on SH_DREAMCAST && SH_DMA_API
|
||||
help
|
||||
This enables support for the DMA controller for the Dreamcast's
|
||||
G2 bus. Drivers that want this will generally enable this on
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <cpu/gpio.h>
|
||||
#endif
|
||||
|
||||
#define ARCH_NR_GPIOS 512
|
||||
#include <asm-generic/gpio.h>
|
||||
|
||||
#ifdef CONFIG_GPIOLIB
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <cpu/mmu_context.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
! NOTE:
|
||||
! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
|
||||
|
@ -105,7 +105,7 @@ config VSYSCALL
|
||||
(the default value) say Y.
|
||||
|
||||
config NUMA
|
||||
bool "Non Uniform Memory Access (NUMA) Support"
|
||||
bool "Non-Uniform Memory Access (NUMA) Support"
|
||||
depends on MMU && SYS_SUPPORTS_NUMA
|
||||
select ARCH_WANT_NUMA_VARIABLE_LOCALITY
|
||||
default n
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static int asids_seq_show(struct seq_file *file, void *iter)
|
||||
static int asids_debugfs_show(struct seq_file *file, void *iter)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
@ -48,18 +48,7 @@ static int asids_seq_show(struct seq_file *file, void *iter)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int asids_debugfs_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, asids_seq_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations asids_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = asids_debugfs_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
DEFINE_SHOW_ATTRIBUTE(asids_debugfs);
|
||||
|
||||
static int __init asids_debugfs_init(void)
|
||||
{
|
||||
|
@ -22,7 +22,7 @@ enum cache_type {
|
||||
CACHE_TYPE_UNIFIED,
|
||||
};
|
||||
|
||||
static int cache_seq_show(struct seq_file *file, void *iter)
|
||||
static int cache_debugfs_show(struct seq_file *file, void *iter)
|
||||
{
|
||||
unsigned int cache_type = (unsigned int)file->private;
|
||||
struct cache_info *cache;
|
||||
@ -94,18 +94,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cache_debugfs_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, cache_seq_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations cache_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = cache_debugfs_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
DEFINE_SHOW_ATTRIBUTE(cache_debugfs);
|
||||
|
||||
static int __init cache_debugfs_init(void)
|
||||
{
|
||||
|
@ -812,7 +812,7 @@ bool __in_29bit_mode(void)
|
||||
return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
|
||||
}
|
||||
|
||||
static int pmb_seq_show(struct seq_file *file, void *iter)
|
||||
static int pmb_debugfs_show(struct seq_file *file, void *iter)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -846,18 +846,7 @@ static int pmb_seq_show(struct seq_file *file, void *iter)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmb_debugfs_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, pmb_seq_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations pmb_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = pmb_debugfs_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
DEFINE_SHOW_ATTRIBUTE(pmb_debugfs);
|
||||
|
||||
static int __init pmb_debugfs_init(void)
|
||||
{
|
||||
|
@ -50,10 +50,11 @@ extern pte_t *pkmap_page_table;
|
||||
|
||||
#define flush_cache_kmaps() flush_cache_all()
|
||||
|
||||
/* FIXME: Use __flush_tlb_one(vaddr) instead of flush_cache_all() -- Anton */
|
||||
#define arch_kmap_local_post_map(vaddr, pteval) flush_cache_all()
|
||||
#define arch_kmap_local_post_unmap(vaddr) flush_cache_all()
|
||||
|
||||
/* FIXME: Use __flush_*_one(vaddr) instead of flush_*_all() -- Anton */
|
||||
#define arch_kmap_local_pre_map(vaddr, pteval) flush_cache_all()
|
||||
#define arch_kmap_local_pre_unmap(vaddr) flush_cache_all()
|
||||
#define arch_kmap_local_post_map(vaddr, pteval) flush_tlb_all()
|
||||
#define arch_kmap_local_post_unmap(vaddr) flush_tlb_all()
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
|
@ -73,10 +73,8 @@ static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs,
|
||||
unsigned int nr)
|
||||
{
|
||||
if (likely(nr < IA32_NR_syscalls)) {
|
||||
instrumentation_begin();
|
||||
nr = array_index_nospec(nr, IA32_NR_syscalls);
|
||||
regs->ax = ia32_sys_call_table[nr](regs);
|
||||
instrumentation_end();
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,8 +89,11 @@ __visible noinstr void do_int80_syscall_32(struct pt_regs *regs)
|
||||
* or may not be necessary, but it matches the old asm behavior.
|
||||
*/
|
||||
nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
|
||||
instrumentation_begin();
|
||||
|
||||
do_syscall_32_irqs_on(regs, nr);
|
||||
|
||||
instrumentation_end();
|
||||
syscall_exit_to_user_mode(regs);
|
||||
}
|
||||
|
||||
@ -121,11 +122,12 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
|
||||
res = get_user(*(u32 *)®s->bp,
|
||||
(u32 __user __force *)(unsigned long)(u32)regs->sp);
|
||||
}
|
||||
instrumentation_end();
|
||||
|
||||
if (res) {
|
||||
/* User code screwed up. */
|
||||
regs->ax = -EFAULT;
|
||||
|
||||
instrumentation_end();
|
||||
syscall_exit_to_user_mode(regs);
|
||||
return false;
|
||||
}
|
||||
@ -135,6 +137,8 @@ static noinstr bool __do_fast_syscall_32(struct pt_regs *regs)
|
||||
|
||||
/* Now this is just like a normal syscall. */
|
||||
do_syscall_32_irqs_on(regs, nr);
|
||||
|
||||
instrumentation_end();
|
||||
syscall_exit_to_user_mode(regs);
|
||||
return true;
|
||||
}
|
||||
|
@ -16,14 +16,25 @@
|
||||
* Use kernel_fpu_begin/end() if you intend to use FPU in kernel context. It
|
||||
* disables preemption so be careful if you intend to use it for long periods
|
||||
* of time.
|
||||
* If you intend to use the FPU in softirq you need to check first with
|
||||
* If you intend to use the FPU in irq/softirq you need to check first with
|
||||
* irq_fpu_usable() if it is possible.
|
||||
*/
|
||||
extern void kernel_fpu_begin(void);
|
||||
|
||||
/* Kernel FPU states to initialize in kernel_fpu_begin_mask() */
|
||||
#define KFPU_387 _BITUL(0) /* 387 state will be initialized */
|
||||
#define KFPU_MXCSR _BITUL(1) /* MXCSR will be initialized */
|
||||
|
||||
extern void kernel_fpu_begin_mask(unsigned int kfpu_mask);
|
||||
extern void kernel_fpu_end(void);
|
||||
extern bool irq_fpu_usable(void);
|
||||
extern void fpregs_mark_activate(void);
|
||||
|
||||
/* Code that is unaware of kernel_fpu_begin_mask() can use this */
|
||||
static inline void kernel_fpu_begin(void)
|
||||
{
|
||||
kernel_fpu_begin_mask(KFPU_387 | KFPU_MXCSR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use fpregs_lock() while editing CPU's FPU registers or fpu->state.
|
||||
* A context switch will (and softirq might) save CPU's FPU registers to
|
||||
|
@ -97,6 +97,7 @@
|
||||
|
||||
#define INTEL_FAM6_LAKEFIELD 0x8A
|
||||
#define INTEL_FAM6_ALDERLAKE 0x97
|
||||
#define INTEL_FAM6_ALDERLAKE_L 0x9A
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
||||
|
@ -86,7 +86,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
|
||||
* think of extending them - you will be slapped with a stinking trout or a frozen
|
||||
* shark will reach you, wherever you are! You've been warned.
|
||||
*/
|
||||
static inline unsigned long long notrace __rdmsr(unsigned int msr)
|
||||
static __always_inline unsigned long long __rdmsr(unsigned int msr)
|
||||
{
|
||||
DECLARE_ARGS(val, low, high);
|
||||
|
||||
@ -98,7 +98,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
|
||||
return EAX_EDX_VAL(val, low, high);
|
||||
}
|
||||
|
||||
static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
|
||||
static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
|
||||
{
|
||||
asm volatile("1: wrmsr\n"
|
||||
"2:\n"
|
||||
|
@ -110,6 +110,8 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
#define topology_die_id(cpu) (cpu_data(cpu).cpu_die_id)
|
||||
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
|
||||
|
||||
extern unsigned int __max_die_per_package;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define topology_die_cpumask(cpu) (per_cpu(cpu_die_map, cpu))
|
||||
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
|
||||
@ -118,8 +120,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
extern unsigned int __max_logical_packages;
|
||||
#define topology_max_packages() (__max_logical_packages)
|
||||
|
||||
extern unsigned int __max_die_per_package;
|
||||
|
||||
static inline int topology_max_die_per_package(void)
|
||||
{
|
||||
return __max_die_per_package;
|
||||
|
@ -542,12 +542,12 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
||||
u32 ecx;
|
||||
|
||||
ecx = cpuid_ecx(0x8000001e);
|
||||
nodes_per_socket = ((ecx >> 8) & 7) + 1;
|
||||
__max_die_per_package = nodes_per_socket = ((ecx >> 8) & 7) + 1;
|
||||
} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
|
||||
u64 value;
|
||||
|
||||
rdmsrl(MSR_FAM10H_NODE_ID, value);
|
||||
nodes_per_socket = ((value >> 3) & 7) + 1;
|
||||
__max_die_per_package = nodes_per_socket = ((value >> 3) & 7) + 1;
|
||||
}
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
|
||||
|
@ -1992,10 +1992,9 @@ static __always_inline void exc_machine_check_kernel(struct pt_regs *regs)
|
||||
* that out because it's an indirect call. Annotate it.
|
||||
*/
|
||||
instrumentation_begin();
|
||||
trace_hardirqs_off_finish();
|
||||
|
||||
machine_check_vector(regs);
|
||||
if (regs->flags & X86_EFLAGS_IF)
|
||||
trace_hardirqs_on_prepare();
|
||||
|
||||
instrumentation_end();
|
||||
irqentry_nmi_exit(regs, irq_state);
|
||||
}
|
||||
@ -2004,7 +2003,9 @@ static __always_inline void exc_machine_check_user(struct pt_regs *regs)
|
||||
{
|
||||
irqentry_enter_from_user_mode(regs);
|
||||
instrumentation_begin();
|
||||
|
||||
machine_check_vector(regs);
|
||||
|
||||
instrumentation_end();
|
||||
irqentry_exit_to_user_mode(regs);
|
||||
}
|
||||
|
@ -25,10 +25,10 @@
|
||||
#define BITS_SHIFT_NEXT_LEVEL(eax) ((eax) & 0x1f)
|
||||
#define LEVEL_MAX_SIBLINGS(ebx) ((ebx) & 0xffff)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int __max_die_per_package __read_mostly = 1;
|
||||
EXPORT_SYMBOL(__max_die_per_package);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Check if given CPUID extended toplogy "leaf" is implemented
|
||||
*/
|
||||
|
@ -121,7 +121,7 @@ int copy_fpregs_to_fpstate(struct fpu *fpu)
|
||||
}
|
||||
EXPORT_SYMBOL(copy_fpregs_to_fpstate);
|
||||
|
||||
void kernel_fpu_begin(void)
|
||||
void kernel_fpu_begin_mask(unsigned int kfpu_mask)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
@ -141,13 +141,14 @@ void kernel_fpu_begin(void)
|
||||
}
|
||||
__cpu_invalidate_fpregs_state();
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_XMM))
|
||||
/* Put sane initial values into the control registers. */
|
||||
if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
|
||||
ldmxcsr(MXCSR_DEFAULT);
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_FPU))
|
||||
if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
|
||||
asm volatile ("fninit");
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kernel_fpu_begin);
|
||||
EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
|
||||
|
||||
void kernel_fpu_end(void)
|
||||
{
|
||||
|
@ -660,17 +660,6 @@ static void __init trim_platform_memory_ranges(void)
|
||||
|
||||
static void __init trim_bios_range(void)
|
||||
{
|
||||
/*
|
||||
* A special case is the first 4Kb of memory;
|
||||
* This is a BIOS owned area, not kernel ram, but generally
|
||||
* not listed as such in the E820 table.
|
||||
*
|
||||
* This typically reserves additional memory (64KiB by default)
|
||||
* since some BIOSes are known to corrupt low memory. See the
|
||||
* Kconfig help text for X86_RESERVE_LOW.
|
||||
*/
|
||||
e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
|
||||
|
||||
/*
|
||||
* special case: Some BIOSes report the PC BIOS
|
||||
* area (640Kb -> 1Mb) as RAM even though it is not.
|
||||
@ -728,6 +717,15 @@ early_param("reservelow", parse_reservelow);
|
||||
|
||||
static void __init trim_low_memory_range(void)
|
||||
{
|
||||
/*
|
||||
* A special case is the first 4Kb of memory;
|
||||
* This is a BIOS owned area, not kernel ram, but generally
|
||||
* not listed as such in the E820 table.
|
||||
*
|
||||
* This typically reserves additional memory (64KiB by default)
|
||||
* since some BIOSes are known to corrupt low memory. See the
|
||||
* Kconfig help text for X86_RESERVE_LOW.
|
||||
*/
|
||||
memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
|
||||
}
|
||||
|
||||
|
@ -225,7 +225,7 @@ static inline u64 sev_es_rd_ghcb_msr(void)
|
||||
return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
|
||||
}
|
||||
|
||||
static inline void sev_es_wr_ghcb_msr(u64 val)
|
||||
static __always_inline void sev_es_wr_ghcb_msr(u64 val)
|
||||
{
|
||||
u32 low, high;
|
||||
|
||||
@ -286,6 +286,12 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
|
||||
u16 d2;
|
||||
u8 d1;
|
||||
|
||||
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
|
||||
if (!user_mode(ctxt->regs) && !access_ok(target, size)) {
|
||||
memcpy(dst, buf, size);
|
||||
return ES_OK;
|
||||
}
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
memcpy(&d1, buf, 1);
|
||||
@ -335,6 +341,12 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
|
||||
u16 d2;
|
||||
u8 d1;
|
||||
|
||||
/* If instruction ran in kernel mode and the I/O buffer is in kernel space */
|
||||
if (!user_mode(ctxt->regs) && !access_ok(s, size)) {
|
||||
memcpy(buf, src, size);
|
||||
return ES_OK;
|
||||
}
|
||||
|
||||
switch (size) {
|
||||
case 1:
|
||||
if (get_user(d1, s))
|
||||
|
@ -56,6 +56,7 @@
|
||||
#include <linux/numa.h>
|
||||
#include <linux/pgtable.h>
|
||||
#include <linux/overflow.h>
|
||||
#include <linux/syscore_ops.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/desc.h>
|
||||
@ -2083,6 +2084,23 @@ static void init_counter_refs(void)
|
||||
this_cpu_write(arch_prev_mperf, mperf);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static struct syscore_ops freq_invariance_syscore_ops = {
|
||||
.resume = init_counter_refs,
|
||||
};
|
||||
|
||||
static void register_freq_invariance_syscore_ops(void)
|
||||
{
|
||||
/* Bail out if registered already. */
|
||||
if (freq_invariance_syscore_ops.node.prev)
|
||||
return;
|
||||
|
||||
register_syscore_ops(&freq_invariance_syscore_ops);
|
||||
}
|
||||
#else
|
||||
static inline void register_freq_invariance_syscore_ops(void) {}
|
||||
#endif
|
||||
|
||||
static void init_freq_invariance(bool secondary, bool cppc_ready)
|
||||
{
|
||||
bool ret = false;
|
||||
@ -2109,6 +2127,7 @@ static void init_freq_invariance(bool secondary, bool cppc_ready)
|
||||
if (ret) {
|
||||
init_counter_refs();
|
||||
static_branch_enable(&arch_scale_freq_key);
|
||||
register_freq_invariance_syscore_ops();
|
||||
pr_info("Estimated ratio of average max frequency by base frequency (times 1024): %llu\n", arch_max_freq_ratio);
|
||||
} else {
|
||||
pr_debug("Couldn't determine max cpu frequency, necessary for scale-invariant accounting.\n");
|
||||
|
@ -9,6 +9,34 @@
|
||||
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
|
||||
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
|
||||
|
||||
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
|
||||
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
|
||||
{ \
|
||||
return vcpu->arch.regs[VCPU_REGS_##uname]; \
|
||||
} \
|
||||
static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
|
||||
unsigned long val) \
|
||||
{ \
|
||||
vcpu->arch.regs[VCPU_REGS_##uname] = val; \
|
||||
}
|
||||
BUILD_KVM_GPR_ACCESSORS(rax, RAX)
|
||||
BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
|
||||
BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
|
||||
BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
|
||||
BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
|
||||
BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
|
||||
BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
|
||||
#ifdef CONFIG_X86_64
|
||||
BUILD_KVM_GPR_ACCESSORS(r8, R8)
|
||||
BUILD_KVM_GPR_ACCESSORS(r9, R9)
|
||||
BUILD_KVM_GPR_ACCESSORS(r10, R10)
|
||||
BUILD_KVM_GPR_ACCESSORS(r11, R11)
|
||||
BUILD_KVM_GPR_ACCESSORS(r12, R12)
|
||||
BUILD_KVM_GPR_ACCESSORS(r13, R13)
|
||||
BUILD_KVM_GPR_ACCESSORS(r14, R14)
|
||||
BUILD_KVM_GPR_ACCESSORS(r15, R15)
|
||||
#endif
|
||||
|
||||
static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
|
||||
enum kvm_reg reg)
|
||||
{
|
||||
@ -34,35 +62,6 @@ static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
|
||||
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
|
||||
}
|
||||
|
||||
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
|
||||
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
|
||||
{ \
|
||||
return vcpu->arch.regs[VCPU_REGS_##uname]; \
|
||||
} \
|
||||
static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
|
||||
unsigned long val) \
|
||||
{ \
|
||||
vcpu->arch.regs[VCPU_REGS_##uname] = val; \
|
||||
kvm_register_mark_dirty(vcpu, VCPU_REGS_##uname); \
|
||||
}
|
||||
BUILD_KVM_GPR_ACCESSORS(rax, RAX)
|
||||
BUILD_KVM_GPR_ACCESSORS(rbx, RBX)
|
||||
BUILD_KVM_GPR_ACCESSORS(rcx, RCX)
|
||||
BUILD_KVM_GPR_ACCESSORS(rdx, RDX)
|
||||
BUILD_KVM_GPR_ACCESSORS(rbp, RBP)
|
||||
BUILD_KVM_GPR_ACCESSORS(rsi, RSI)
|
||||
BUILD_KVM_GPR_ACCESSORS(rdi, RDI)
|
||||
#ifdef CONFIG_X86_64
|
||||
BUILD_KVM_GPR_ACCESSORS(r8, R8)
|
||||
BUILD_KVM_GPR_ACCESSORS(r9, R9)
|
||||
BUILD_KVM_GPR_ACCESSORS(r10, R10)
|
||||
BUILD_KVM_GPR_ACCESSORS(r11, R11)
|
||||
BUILD_KVM_GPR_ACCESSORS(r12, R12)
|
||||
BUILD_KVM_GPR_ACCESSORS(r13, R13)
|
||||
BUILD_KVM_GPR_ACCESSORS(r14, R14)
|
||||
BUILD_KVM_GPR_ACCESSORS(r15, R15)
|
||||
#endif
|
||||
|
||||
static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, int reg)
|
||||
{
|
||||
if (WARN_ON_ONCE((unsigned int)reg >= NR_VCPU_REGS))
|
||||
|
@ -44,8 +44,15 @@
|
||||
#define PT32_ROOT_LEVEL 2
|
||||
#define PT32E_ROOT_LEVEL 3
|
||||
|
||||
static inline u64 rsvd_bits(int s, int e)
|
||||
static __always_inline u64 rsvd_bits(int s, int e)
|
||||
{
|
||||
BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
|
||||
|
||||
if (__builtin_constant_p(e))
|
||||
BUILD_BUG_ON(e > 63);
|
||||
else
|
||||
e &= 63;
|
||||
|
||||
if (e < s)
|
||||
return 0;
|
||||
|
||||
|
@ -200,6 +200,9 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
if (WARN_ON(!is_guest_mode(vcpu)))
|
||||
return true;
|
||||
|
||||
if (!nested_svm_vmrun_msrpm(svm)) {
|
||||
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
vcpu->run->internal.suberror =
|
||||
|
@ -1415,16 +1415,13 @@ static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
|
||||
* to be returned:
|
||||
* GPRs RAX, RBX, RCX, RDX
|
||||
*
|
||||
* Copy their values to the GHCB if they are dirty.
|
||||
* Copy their values, even if they may not have been written during the
|
||||
* VM-Exit. It's the guest's responsibility to not consume random data.
|
||||
*/
|
||||
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RAX))
|
||||
ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
|
||||
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RBX))
|
||||
ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
|
||||
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RCX))
|
||||
ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
|
||||
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RDX))
|
||||
ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
|
||||
ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
|
||||
ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
|
||||
ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
|
||||
ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
|
||||
}
|
||||
|
||||
static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
|
||||
|
@ -3739,6 +3739,8 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_svm *svm = to_svm(vcpu);
|
||||
|
||||
trace_kvm_entry(vcpu);
|
||||
|
||||
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
|
||||
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
|
||||
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
|
||||
|
@ -3124,13 +3124,9 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||
static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct kvm_host_map *map;
|
||||
struct page *page;
|
||||
u64 hpa;
|
||||
|
||||
/*
|
||||
* hv_evmcs may end up being not mapped after migration (when
|
||||
@ -3153,6 +3149,17 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
struct kvm_host_map *map;
|
||||
struct page *page;
|
||||
u64 hpa;
|
||||
|
||||
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
|
||||
/*
|
||||
* Translate L1 physical address to host physical
|
||||
@ -3221,6 +3228,18 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
|
||||
exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
|
||||
else
|
||||
exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (!nested_get_evmcs_page(vcpu))
|
||||
return false;
|
||||
|
||||
if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -6077,11 +6096,14 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
|
||||
if (is_guest_mode(vcpu)) {
|
||||
sync_vmcs02_to_vmcs12(vcpu, vmcs12);
|
||||
sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
|
||||
} else if (!vmx->nested.need_vmcs12_to_shadow_sync) {
|
||||
if (vmx->nested.hv_evmcs)
|
||||
copy_enlightened_to_vmcs12(vmx);
|
||||
else if (enable_shadow_vmcs)
|
||||
copy_shadow_to_vmcs12(vmx);
|
||||
} else {
|
||||
copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
|
||||
if (!vmx->nested.need_vmcs12_to_shadow_sync) {
|
||||
if (vmx->nested.hv_evmcs)
|
||||
copy_enlightened_to_vmcs12(vmx);
|
||||
else if (enable_shadow_vmcs)
|
||||
copy_shadow_to_vmcs12(vmx);
|
||||
}
|
||||
}
|
||||
|
||||
BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
|
||||
@ -6602,7 +6624,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
|
||||
.hv_timer_pending = nested_vmx_preemption_timer_pending,
|
||||
.get_state = vmx_get_nested_state,
|
||||
.set_state = vmx_set_nested_state,
|
||||
.get_nested_state_pages = nested_get_vmcs12_pages,
|
||||
.get_nested_state_pages = vmx_get_nested_state_pages,
|
||||
.write_log_dirty = nested_vmx_write_pml_buffer,
|
||||
.enable_evmcs = nested_enable_evmcs,
|
||||
.get_evmcs_version = nested_get_evmcs_version,
|
||||
|
@ -29,7 +29,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
|
||||
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
|
||||
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
|
||||
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
|
||||
[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
|
||||
[7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
|
||||
};
|
||||
|
||||
/* mapping between fixed pmc index and intel_arch_events array */
|
||||
@ -345,7 +345,9 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
|
||||
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
|
||||
x86_pmu.num_counters_gp);
|
||||
eax.split.bit_width = min_t(int, eax.split.bit_width, x86_pmu.bit_width_gp);
|
||||
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
|
||||
eax.split.mask_length = min_t(int, eax.split.mask_length, x86_pmu.events_mask_len);
|
||||
pmu->available_event_types = ~entry->ebx &
|
||||
((1ull << eax.split.mask_length) - 1);
|
||||
|
||||
@ -355,6 +357,8 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
|
||||
pmu->nr_arch_fixed_counters =
|
||||
min_t(int, edx.split.num_counters_fixed,
|
||||
x86_pmu.num_counters_fixed);
|
||||
edx.split.bit_width_fixed = min_t(int,
|
||||
edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
|
||||
pmu->counter_bitmask[KVM_PMC_FIXED] =
|
||||
((u64)1 << edx.split.bit_width_fixed) - 1;
|
||||
}
|
||||
|
@ -6653,6 +6653,8 @@ reenter_guest:
|
||||
if (vmx->emulation_required)
|
||||
return EXIT_FASTPATH_NONE;
|
||||
|
||||
trace_kvm_entry(vcpu);
|
||||
|
||||
if (vmx->ple_window_dirty) {
|
||||
vmx->ple_window_dirty = false;
|
||||
vmcs_write32(PLE_WINDOW, vmx->ple_window);
|
||||
|
@ -105,6 +105,7 @@ static u64 __read_mostly cr4_reserved_bits = CR4_RESERVED_BITS;
|
||||
|
||||
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
|
||||
static void process_nmi(struct kvm_vcpu *vcpu);
|
||||
static void process_smi(struct kvm_vcpu *vcpu);
|
||||
static void enter_smm(struct kvm_vcpu *vcpu);
|
||||
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||
static void store_regs(struct kvm_vcpu *vcpu);
|
||||
@ -4230,6 +4231,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
process_nmi(vcpu);
|
||||
|
||||
if (kvm_check_request(KVM_REQ_SMI, vcpu))
|
||||
process_smi(vcpu);
|
||||
|
||||
/*
|
||||
* In guest mode, payload delivery should be deferred,
|
||||
* so that the L1 hypervisor can intercept #PF before
|
||||
@ -8802,9 +8806,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (kvm_request_pending(vcpu)) {
|
||||
if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
|
||||
if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
|
||||
;
|
||||
else if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
|
||||
if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
@ -8988,8 +8990,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
kvm_x86_ops.request_immediate_exit(vcpu);
|
||||
}
|
||||
|
||||
trace_kvm_entry(vcpu);
|
||||
|
||||
fpregs_assert_state_consistent();
|
||||
if (test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
switch_fpu_return();
|
||||
@ -11556,6 +11556,7 @@ int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_sev_es_string_io);
|
||||
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_entry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
|
||||
|
@ -26,6 +26,16 @@
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm/asm.h>
|
||||
|
||||
/*
|
||||
* Use KFPU_387. MMX instructions are not affected by MXCSR,
|
||||
* but both AMD and Intel documentation states that even integer MMX
|
||||
* operations will result in #MF if an exception is pending in FCW.
|
||||
*
|
||||
* EMMS is not needed afterwards because, after calling kernel_fpu_end(),
|
||||
* any subsequent user of the 387 stack will reinitialize it using
|
||||
* KFPU_387.
|
||||
*/
|
||||
|
||||
void *_mmx_memcpy(void *to, const void *from, size_t len)
|
||||
{
|
||||
void *p;
|
||||
@ -37,7 +47,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
|
||||
p = to;
|
||||
i = len >> 6; /* len/64 */
|
||||
|
||||
kernel_fpu_begin();
|
||||
kernel_fpu_begin_mask(KFPU_387);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: prefetch (%0)\n" /* This set is 28 bytes */
|
||||
@ -127,7 +137,7 @@ static void fast_clear_page(void *page)
|
||||
{
|
||||
int i;
|
||||
|
||||
kernel_fpu_begin();
|
||||
kernel_fpu_begin_mask(KFPU_387);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" pxor %%mm0, %%mm0\n" : :
|
||||
@ -160,7 +170,7 @@ static void fast_copy_page(void *to, void *from)
|
||||
{
|
||||
int i;
|
||||
|
||||
kernel_fpu_begin();
|
||||
kernel_fpu_begin_mask(KFPU_387);
|
||||
|
||||
/*
|
||||
* maybe the prefetch stuff can go before the expensive fnsave...
|
||||
@ -247,7 +257,7 @@ static void fast_clear_page(void *page)
|
||||
{
|
||||
int i;
|
||||
|
||||
kernel_fpu_begin();
|
||||
kernel_fpu_begin_mask(KFPU_387);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" pxor %%mm0, %%mm0\n" : :
|
||||
@ -282,7 +292,7 @@ static void fast_copy_page(void *to, void *from)
|
||||
{
|
||||
int i;
|
||||
|
||||
kernel_fpu_begin();
|
||||
kernel_fpu_begin_mask(KFPU_387);
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: prefetch (%0)\n"
|
||||
|
@ -74,7 +74,9 @@ void __init xen_hvm_smp_init(void)
|
||||
smp_ops.cpu_die = xen_hvm_cpu_die;
|
||||
|
||||
if (!xen_have_vector_callback) {
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
nopvspin = true;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -356,7 +356,8 @@ int public_key_verify_signature(const struct public_key *pkey,
|
||||
if (ret)
|
||||
goto error_free_key;
|
||||
|
||||
if (strcmp(sig->pkey_algo, "sm2") == 0 && sig->data_size) {
|
||||
if (sig->pkey_algo && strcmp(sig->pkey_algo, "sm2") == 0 &&
|
||||
sig->data_size) {
|
||||
ret = cert_sig_digest_update(sig, tfm);
|
||||
if (ret)
|
||||
goto error_free_key;
|
||||
|
@ -586,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
*device = NULL;
|
||||
|
||||
status = acpi_get_data_full(handle, acpi_scan_drop_device,
|
||||
(void **)device, callback);
|
||||
if (ACPI_FAILURE(status) || !*device) {
|
||||
|
@ -208,6 +208,16 @@ int device_links_read_lock_held(void)
|
||||
#endif
|
||||
#endif /* !CONFIG_SRCU */
|
||||
|
||||
static bool device_is_ancestor(struct device *dev, struct device *target)
|
||||
{
|
||||
while (target->parent) {
|
||||
target = target->parent;
|
||||
if (dev == target)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* device_is_dependent - Check if one device depends on another one
|
||||
* @dev: Device to check dependencies for.
|
||||
@ -221,7 +231,12 @@ int device_is_dependent(struct device *dev, void *target)
|
||||
struct device_link *link;
|
||||
int ret;
|
||||
|
||||
if (dev == target)
|
||||
/*
|
||||
* The "ancestors" check is needed to catch the case when the target
|
||||
* device has not been completely initialized yet and it is still
|
||||
* missing from the list of children of its parent device.
|
||||
*/
|
||||
if (dev == target || device_is_ancestor(dev, target))
|
||||
return 1;
|
||||
|
||||
ret = device_for_each_child(dev, target, device_is_dependent);
|
||||
@ -456,7 +471,9 @@ static int devlink_add_symlinks(struct device *dev,
|
||||
struct device *con = link->consumer;
|
||||
char *buf;
|
||||
|
||||
len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
|
||||
len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
|
||||
strlen(dev_bus_name(con)) + strlen(dev_name(con)));
|
||||
len += strlen(":");
|
||||
len += strlen("supplier:") + 1;
|
||||
buf = kzalloc(len, GFP_KERNEL);
|
||||
if (!buf)
|
||||
@ -470,12 +487,12 @@ static int devlink_add_symlinks(struct device *dev,
|
||||
if (ret)
|
||||
goto err_con;
|
||||
|
||||
snprintf(buf, len, "consumer:%s", dev_name(con));
|
||||
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
|
||||
ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf);
|
||||
if (ret)
|
||||
goto err_con_dev;
|
||||
|
||||
snprintf(buf, len, "supplier:%s", dev_name(sup));
|
||||
snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
|
||||
ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf);
|
||||
if (ret)
|
||||
goto err_sup_dev;
|
||||
@ -483,7 +500,7 @@ static int devlink_add_symlinks(struct device *dev,
|
||||
goto out;
|
||||
|
||||
err_sup_dev:
|
||||
snprintf(buf, len, "consumer:%s", dev_name(con));
|
||||
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
|
||||
sysfs_remove_link(&sup->kobj, buf);
|
||||
err_con_dev:
|
||||
sysfs_remove_link(&link->link_dev.kobj, "consumer");
|
||||
@ -506,7 +523,9 @@ static void devlink_remove_symlinks(struct device *dev,
|
||||
sysfs_remove_link(&link->link_dev.kobj, "consumer");
|
||||
sysfs_remove_link(&link->link_dev.kobj, "supplier");
|
||||
|
||||
len = max(strlen(dev_name(sup)), strlen(dev_name(con)));
|
||||
len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)),
|
||||
strlen(dev_bus_name(con)) + strlen(dev_name(con)));
|
||||
len += strlen(":");
|
||||
len += strlen("supplier:") + 1;
|
||||
buf = kzalloc(len, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
@ -514,9 +533,9 @@ static void devlink_remove_symlinks(struct device *dev,
|
||||
return;
|
||||
}
|
||||
|
||||
snprintf(buf, len, "supplier:%s", dev_name(sup));
|
||||
snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup));
|
||||
sysfs_remove_link(&con->kobj, buf);
|
||||
snprintf(buf, len, "consumer:%s", dev_name(con));
|
||||
snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con));
|
||||
sysfs_remove_link(&sup->kobj, buf);
|
||||
kfree(buf);
|
||||
}
|
||||
@ -737,8 +756,9 @@ struct device_link *device_link_add(struct device *consumer,
|
||||
|
||||
link->link_dev.class = &devlink_class;
|
||||
device_set_pm_not_required(&link->link_dev);
|
||||
dev_set_name(&link->link_dev, "%s--%s",
|
||||
dev_name(supplier), dev_name(consumer));
|
||||
dev_set_name(&link->link_dev, "%s:%s--%s:%s",
|
||||
dev_bus_name(supplier), dev_name(supplier),
|
||||
dev_bus_name(consumer), dev_name(consumer));
|
||||
if (device_register(&link->link_dev)) {
|
||||
put_device(consumer);
|
||||
put_device(supplier);
|
||||
@ -1808,9 +1828,7 @@ const char *dev_driver_string(const struct device *dev)
|
||||
* never change once they are set, so they don't need special care.
|
||||
*/
|
||||
drv = READ_ONCE(dev->driver);
|
||||
return drv ? drv->name :
|
||||
(dev->bus ? dev->bus->name :
|
||||
(dev->class ? dev->class->name : ""));
|
||||
return drv ? drv->name : dev_bus_name(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(dev_driver_string);
|
||||
|
||||
|
@ -370,13 +370,6 @@ static void driver_bound(struct device *dev)
|
||||
|
||||
device_pm_check_callbacks(dev);
|
||||
|
||||
/*
|
||||
* Reorder successfully probed devices to the end of the device list.
|
||||
* This ensures that suspend/resume order matches probe order, which
|
||||
* is usually what drivers rely on.
|
||||
*/
|
||||
device_pm_move_to_tail(dev);
|
||||
|
||||
/*
|
||||
* Make sure the device is no longer in one of the deferred lists and
|
||||
* kick off retrying all pending devices
|
||||
@ -619,6 +612,8 @@ dev_groups_failed:
|
||||
else if (drv->remove)
|
||||
drv->remove(dev);
|
||||
probe_failed:
|
||||
kfree(dev->dma_range_map);
|
||||
dev->dma_range_map = NULL;
|
||||
if (dev->bus)
|
||||
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
|
||||
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
|
||||
|
@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
|
||||
return -ERANGE;
|
||||
|
||||
nvec = platform_irq_count(dev);
|
||||
if (nvec < 0)
|
||||
return nvec;
|
||||
|
||||
if (nvec < minvec)
|
||||
return -ENOSPC;
|
||||
|
@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter,
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t ti_eqep_position_floor_read(struct counter_device *counter,
|
||||
struct counter_count *count,
|
||||
void *ext_priv, char *buf)
|
||||
{
|
||||
struct ti_eqep_cnt *priv = counter->priv;
|
||||
u32 qposinit;
|
||||
|
||||
regmap_read(priv->regmap32, QPOSINIT, &qposinit);
|
||||
|
||||
return sprintf(buf, "%u\n", qposinit);
|
||||
}
|
||||
|
||||
static ssize_t ti_eqep_position_floor_write(struct counter_device *counter,
|
||||
struct counter_count *count,
|
||||
void *ext_priv, const char *buf,
|
||||
size_t len)
|
||||
{
|
||||
struct ti_eqep_cnt *priv = counter->priv;
|
||||
int err;
|
||||
u32 res;
|
||||
|
||||
err = kstrtouint(buf, 0, &res);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
regmap_write(priv->regmap32, QPOSINIT, res);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t ti_eqep_position_enable_read(struct counter_device *counter,
|
||||
struct counter_count *count,
|
||||
void *ext_priv, char *buf)
|
||||
@ -301,11 +271,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = {
|
||||
.read = ti_eqep_position_ceiling_read,
|
||||
.write = ti_eqep_position_ceiling_write,
|
||||
},
|
||||
{
|
||||
.name = "floor",
|
||||
.read = ti_eqep_position_floor_read,
|
||||
.write = ti_eqep_position_floor_write,
|
||||
},
|
||||
{
|
||||
.name = "enable",
|
||||
.read = ti_eqep_position_enable_read,
|
||||
|
@ -300,11 +300,11 @@ struct mv_cesa_tdma_desc {
|
||||
__le32 byte_cnt;
|
||||
union {
|
||||
__le32 src;
|
||||
dma_addr_t src_dma;
|
||||
u32 src_dma;
|
||||
};
|
||||
union {
|
||||
__le32 dst;
|
||||
dma_addr_t dst_dma;
|
||||
u32 dst_dma;
|
||||
};
|
||||
__le32 next_dma;
|
||||
|
||||
|
@ -521,7 +521,8 @@ config GPIO_SAMA5D2_PIOBU
|
||||
|
||||
config GPIO_SIFIVE
|
||||
bool "SiFive GPIO support"
|
||||
depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY
|
||||
depends on OF_GPIO
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select GPIO_GENERIC
|
||||
select GPIOLIB_IRQCHIP
|
||||
select REGMAP_MMIO
|
||||
@ -597,6 +598,8 @@ config GPIO_TEGRA
|
||||
default ARCH_TEGRA
|
||||
depends on ARCH_TEGRA || COMPILE_TEST
|
||||
depends on OF_GPIO
|
||||
select GPIOLIB_IRQCHIP
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
help
|
||||
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
|
||||
|
||||
|
@ -676,20 +676,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
|
||||
else
|
||||
state->duty_cycle = 1;
|
||||
|
||||
val = (unsigned long long) u; /* on duration */
|
||||
regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
|
||||
val = (unsigned long long) u * NSEC_PER_SEC;
|
||||
val += (unsigned long long) u; /* period = on + off duration */
|
||||
val *= NSEC_PER_SEC;
|
||||
do_div(val, mvpwm->clk_rate);
|
||||
if (val < state->duty_cycle) {
|
||||
if (val > UINT_MAX)
|
||||
state->period = UINT_MAX;
|
||||
else if (val)
|
||||
state->period = val;
|
||||
else
|
||||
state->period = 1;
|
||||
} else {
|
||||
val -= state->duty_cycle;
|
||||
if (val > UINT_MAX)
|
||||
state->period = UINT_MAX;
|
||||
else if (val)
|
||||
state->period = val;
|
||||
else
|
||||
state->period = 1;
|
||||
}
|
||||
|
||||
regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u);
|
||||
if (u)
|
||||
|
@ -1979,6 +1979,21 @@ struct gpio_chardev_data {
|
||||
#endif
|
||||
};
|
||||
|
||||
static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip)
|
||||
{
|
||||
struct gpio_device *gdev = cdev->gdev;
|
||||
struct gpiochip_info chipinfo;
|
||||
|
||||
memset(&chipinfo, 0, sizeof(chipinfo));
|
||||
|
||||
strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name));
|
||||
strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label));
|
||||
chipinfo.lines = gdev->ngpio;
|
||||
if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GPIO_CDEV_V1
|
||||
/*
|
||||
* returns 0 if the versions match, else the previously selected ABI version
|
||||
@ -1993,6 +2008,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata,
|
||||
|
||||
return abiv;
|
||||
}
|
||||
|
||||
static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip,
|
||||
bool watch)
|
||||
{
|
||||
struct gpio_desc *desc;
|
||||
struct gpioline_info lineinfo;
|
||||
struct gpio_v2_line_info lineinfo_v2;
|
||||
|
||||
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
/* this doubles as a range check on line_offset */
|
||||
desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset);
|
||||
if (IS_ERR(desc))
|
||||
return PTR_ERR(desc);
|
||||
|
||||
if (watch) {
|
||||
if (lineinfo_ensure_abi_version(cdev, 1))
|
||||
return -EPERM;
|
||||
|
||||
if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
gpio_desc_to_lineinfo(desc, &lineinfo_v2);
|
||||
gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
|
||||
|
||||
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
|
||||
if (watch)
|
||||
clear_bit(lineinfo.line_offset, cdev->watched_lines);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
|
||||
@ -2030,6 +2080,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
|
||||
{
|
||||
__u32 offset;
|
||||
|
||||
if (copy_from_user(&offset, ip, sizeof(offset)))
|
||||
return -EFAULT;
|
||||
|
||||
if (offset >= cdev->gdev->ngpio)
|
||||
return -EINVAL;
|
||||
|
||||
if (!test_and_clear_bit(offset, cdev->watched_lines))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* gpio_ioctl() - ioctl handler for the GPIO chardev
|
||||
*/
|
||||
@ -2037,80 +2103,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct gpio_chardev_data *cdev = file->private_data;
|
||||
struct gpio_device *gdev = cdev->gdev;
|
||||
struct gpio_chip *gc = gdev->chip;
|
||||
void __user *ip = (void __user *)arg;
|
||||
__u32 offset;
|
||||
|
||||
/* We fail any subsequent ioctl():s when the chip is gone */
|
||||
if (!gc)
|
||||
if (!gdev->chip)
|
||||
return -ENODEV;
|
||||
|
||||
/* Fill in the struct and pass to userspace */
|
||||
if (cmd == GPIO_GET_CHIPINFO_IOCTL) {
|
||||
struct gpiochip_info chipinfo;
|
||||
|
||||
memset(&chipinfo, 0, sizeof(chipinfo));
|
||||
|
||||
strscpy(chipinfo.name, dev_name(&gdev->dev),
|
||||
sizeof(chipinfo.name));
|
||||
strscpy(chipinfo.label, gdev->label,
|
||||
sizeof(chipinfo.label));
|
||||
chipinfo.lines = gdev->ngpio;
|
||||
if (copy_to_user(ip, &chipinfo, sizeof(chipinfo)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
return chipinfo_get(cdev, ip);
|
||||
#ifdef CONFIG_GPIO_CDEV_V1
|
||||
} else if (cmd == GPIO_GET_LINEINFO_IOCTL) {
|
||||
struct gpio_desc *desc;
|
||||
struct gpioline_info lineinfo;
|
||||
struct gpio_v2_line_info lineinfo_v2;
|
||||
|
||||
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
/* this doubles as a range check on line_offset */
|
||||
desc = gpiochip_get_desc(gc, lineinfo.line_offset);
|
||||
if (IS_ERR(desc))
|
||||
return PTR_ERR(desc);
|
||||
|
||||
gpio_desc_to_lineinfo(desc, &lineinfo_v2);
|
||||
gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
|
||||
|
||||
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
} else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) {
|
||||
return linehandle_create(gdev, ip);
|
||||
} else if (cmd == GPIO_GET_LINEEVENT_IOCTL) {
|
||||
return lineevent_create(gdev, ip);
|
||||
} else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
|
||||
struct gpio_desc *desc;
|
||||
struct gpioline_info lineinfo;
|
||||
struct gpio_v2_line_info lineinfo_v2;
|
||||
|
||||
if (copy_from_user(&lineinfo, ip, sizeof(lineinfo)))
|
||||
return -EFAULT;
|
||||
|
||||
/* this doubles as a range check on line_offset */
|
||||
desc = gpiochip_get_desc(gc, lineinfo.line_offset);
|
||||
if (IS_ERR(desc))
|
||||
return PTR_ERR(desc);
|
||||
|
||||
if (lineinfo_ensure_abi_version(cdev, 1))
|
||||
return -EPERM;
|
||||
|
||||
if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines))
|
||||
return -EBUSY;
|
||||
|
||||
gpio_desc_to_lineinfo(desc, &lineinfo_v2);
|
||||
gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo);
|
||||
|
||||
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) {
|
||||
clear_bit(lineinfo.line_offset, cdev->watched_lines);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else if (cmd == GPIO_GET_LINEINFO_IOCTL ||
|
||||
cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) {
|
||||
return lineinfo_get_v1(cdev, ip,
|
||||
cmd == GPIO_GET_LINEINFO_WATCH_IOCTL);
|
||||
#endif /* CONFIG_GPIO_CDEV_V1 */
|
||||
} else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL ||
|
||||
cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) {
|
||||
@ -2119,16 +2129,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
} else if (cmd == GPIO_V2_GET_LINE_IOCTL) {
|
||||
return linereq_create(gdev, ip);
|
||||
} else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) {
|
||||
if (copy_from_user(&offset, ip, sizeof(offset)))
|
||||
return -EFAULT;
|
||||
|
||||
if (offset >= cdev->gdev->ngpio)
|
||||
return -EINVAL;
|
||||
|
||||
if (!test_and_clear_bit(offset, cdev->watched_lines))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
return lineinfo_unwatch(cdev, ip);
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1489,6 +1489,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc,
|
||||
type = IRQ_TYPE_NONE;
|
||||
}
|
||||
|
||||
if (gc->to_irq)
|
||||
chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__);
|
||||
|
||||
gc->to_irq = gpiochip_to_irq;
|
||||
gc->irq.default_type = type;
|
||||
gc->irq.lock_key = lock_key;
|
||||
|
@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
|
||||
MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin");
|
||||
|
||||
#define AMDGPU_RESUME_MS 2000
|
||||
|
||||
|
@ -119,6 +119,8 @@
|
||||
#define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1
|
||||
#define mmSPI_CONFIG_CNTL_Vangogh 0x2440
|
||||
#define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1
|
||||
#define mmGCR_GENERAL_CNTL_Vangogh 0x1580
|
||||
#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0
|
||||
|
||||
#define mmCP_HYP_PFP_UCODE_ADDR 0x5814
|
||||
#define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1
|
||||
@ -3244,7 +3246,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] =
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
|
||||
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
|
||||
|
@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
{
|
||||
uint32_t def, data, def1, data1;
|
||||
|
||||
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
|
||||
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
|
||||
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) {
|
||||
data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
|
||||
|
||||
data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
|
||||
data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
||||
@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
|
||||
|
||||
} else {
|
||||
data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
|
||||
|
||||
data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK;
|
||||
data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
||||
@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
}
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
|
||||
if (def1 != data1)
|
||||
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1);
|
||||
}
|
||||
@ -525,17 +523,44 @@ static void
|
||||
mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
uint32_t def, data, def1, data1, def2, data2;
|
||||
|
||||
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
|
||||
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
|
||||
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
|
||||
def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
|
||||
data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||
else
|
||||
data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) {
|
||||
data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
|
||||
data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
|
||||
data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
|
||||
} else {
|
||||
data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK;
|
||||
data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
|
||||
data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK);
|
||||
}
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data);
|
||||
if (def1 != data1)
|
||||
WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1);
|
||||
if (def2 != data2)
|
||||
WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2);
|
||||
}
|
||||
|
||||
static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
|
||||
@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev,
|
||||
|
||||
static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags)
|
||||
{
|
||||
int data, data1;
|
||||
int data, data1, data2, data3;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
*flags = 0;
|
||||
|
||||
data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
|
||||
data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
|
||||
data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2);
|
||||
data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL);
|
||||
data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL);
|
||||
data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL);
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_MGCG */
|
||||
if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) &&
|
||||
!(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||
if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK |
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)))
|
||||
*flags |= AMD_CG_SUPPORT_MC_MGCG;
|
||||
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))
|
||||
&& !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) {
|
||||
*flags |= AMD_CG_SUPPORT_MC_MGCG;
|
||||
}
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_LS */
|
||||
if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
|
||||
if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK)
|
||||
&& !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
|
||||
DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))
|
||||
&& !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK |
|
||||
DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)))
|
||||
*flags |= AMD_CG_SUPPORT_MC_LS;
|
||||
}
|
||||
|
||||
|
@ -251,6 +251,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
|
||||
bool force_reset = false;
|
||||
bool update_uclk = false;
|
||||
bool p_state_change_support;
|
||||
|
||||
if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present)
|
||||
return;
|
||||
@ -291,8 +292,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
|
||||
|
||||
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
|
||||
if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
|
||||
clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
|
||||
p_state_change_support = new_clocks->p_state_change_support || (display_count == 0);
|
||||
if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
|
||||
clk_mgr_base->clks.p_state_change_support = p_state_change_support;
|
||||
|
||||
/* to disable P-State switching, set UCLK min = max */
|
||||
if (!clk_mgr_base->clks.p_state_change_support)
|
||||
|
@ -2399,6 +2399,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
|
||||
initial_link_setting;
|
||||
uint32_t link_bw;
|
||||
|
||||
if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
|
||||
return false;
|
||||
|
||||
/* search for the minimum link setting that:
|
||||
* 1. is supported according to the link training result
|
||||
* 2. could support the b/w requested by the timing
|
||||
@ -3045,14 +3048,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
|
||||
pipe_ctx->stream->link == link)
|
||||
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
|
||||
core_link_disable_stream(pipe_ctx);
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
|
||||
if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
|
||||
pipe_ctx->stream->link == link)
|
||||
pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
|
||||
core_link_enable_stream(link->dc->current_state, pipe_ctx);
|
||||
}
|
||||
|
||||
|
@ -647,8 +647,13 @@ static void power_on_plane(
|
||||
if (REG(DC_IP_REQUEST_CNTL)) {
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 1);
|
||||
hws->funcs.dpp_pg_control(hws, plane_id, true);
|
||||
hws->funcs.hubp_pg_control(hws, plane_id, true);
|
||||
|
||||
if (hws->funcs.dpp_pg_control)
|
||||
hws->funcs.dpp_pg_control(hws, plane_id, true);
|
||||
|
||||
if (hws->funcs.hubp_pg_control)
|
||||
hws->funcs.hubp_pg_control(hws, plane_id, true);
|
||||
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 0);
|
||||
DC_LOG_DEBUG(
|
||||
@ -1082,8 +1087,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
|
||||
if (REG(DC_IP_REQUEST_CNTL)) {
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 1);
|
||||
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
|
||||
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
|
||||
|
||||
if (hws->funcs.dpp_pg_control)
|
||||
hws->funcs.dpp_pg_control(hws, dpp->inst, false);
|
||||
|
||||
if (hws->funcs.hubp_pg_control)
|
||||
hws->funcs.hubp_pg_control(hws, hubp->inst, false);
|
||||
|
||||
dpp->funcs->dpp_reset(dpp);
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 0);
|
||||
|
@ -1062,8 +1062,13 @@ static void dcn20_power_on_plane(
|
||||
if (REG(DC_IP_REQUEST_CNTL)) {
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 1);
|
||||
dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
|
||||
dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
|
||||
|
||||
if (hws->funcs.dpp_pg_control)
|
||||
hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true);
|
||||
|
||||
if (hws->funcs.hubp_pg_control)
|
||||
hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true);
|
||||
|
||||
REG_SET(DC_IP_REQUEST_CNTL, 0,
|
||||
IP_REQUEST_EN, 0);
|
||||
DC_LOG_DEBUG(
|
||||
|
@ -2517,8 +2517,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
|
||||
* if this primary pipe has a bottom pipe in prev. state
|
||||
* and if the bottom pipe is still available (which it should be),
|
||||
* pick that pipe as secondary
|
||||
* Same logic applies for ODM pipes. Since mpo is not allowed with odm
|
||||
* check in else case.
|
||||
* Same logic applies for ODM pipes
|
||||
*/
|
||||
if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) {
|
||||
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx;
|
||||
@ -2526,7 +2525,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
|
||||
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
|
||||
secondary_pipe->pipe_idx = preferred_pipe_idx;
|
||||
}
|
||||
} else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
|
||||
}
|
||||
if (secondary_pipe == NULL &&
|
||||
dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) {
|
||||
preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx;
|
||||
if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
|
||||
secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
|
||||
|
@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
|
||||
.num_banks = 8,
|
||||
.num_chans = 4,
|
||||
.vmm_page_size_bytes = 4096,
|
||||
.dram_clock_change_latency_us = 23.84,
|
||||
.dram_clock_change_latency_us = 11.72,
|
||||
.return_bus_width_bytes = 64,
|
||||
.dispclk_dppclk_vco_speed_mhz = 3600,
|
||||
.xfc_bus_transport_time_us = 4,
|
||||
|
@ -1121,7 +1121,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu,
|
||||
static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
|
||||
{
|
||||
|
||||
return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct pptable_funcs renoir_ppt_funcs = {
|
||||
|
@ -3021,7 +3021,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set,
|
||||
|
||||
ret = handle_conflicting_encoders(state, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto fail;
|
||||
|
||||
ret = drm_atomic_commit(state);
|
||||
|
||||
|
@ -387,9 +387,16 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo,
|
||||
if (gbo->vmap_use_count > 0)
|
||||
goto out;
|
||||
|
||||
ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
|
||||
if (ret)
|
||||
return ret;
|
||||
/*
|
||||
* VRAM helpers unmap the BO only on demand. So the previous
|
||||
* page mapping might still be around. Only vmap if the there's
|
||||
* no mapping present.
|
||||
*/
|
||||
if (dma_buf_map_is_null(&gbo->map)) {
|
||||
ret = ttm_bo_vmap(&gbo->bo, &gbo->map);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
out:
|
||||
++gbo->vmap_use_count;
|
||||
@ -577,6 +584,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
|
||||
return;
|
||||
|
||||
ttm_bo_vunmap(bo, &gbo->map);
|
||||
dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */
|
||||
}
|
||||
|
||||
static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
|
||||
|
@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
|
||||
return -ENOENT;
|
||||
|
||||
*fence = drm_syncobj_fence_get(syncobj);
|
||||
drm_syncobj_put(syncobj);
|
||||
|
||||
if (*fence) {
|
||||
ret = dma_fence_chain_find_seqno(fence, point);
|
||||
if (!ret)
|
||||
return 0;
|
||||
goto out;
|
||||
dma_fence_put(*fence);
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
memset(&wait, 0, sizeof(wait));
|
||||
wait.task = current;
|
||||
@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
|
||||
if (wait.node.next)
|
||||
drm_syncobj_remove_wait(syncobj, &wait);
|
||||
|
||||
out:
|
||||
drm_syncobj_put(syncobj);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_syncobj_find_fence);
|
||||
|
@ -3725,7 +3725,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
|
||||
intel_ddi_init_dp_buf_reg(encoder, crtc_state);
|
||||
if (!is_mst)
|
||||
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
|
||||
intel_dp_configure_protocol_converter(intel_dp);
|
||||
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
|
||||
intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
|
||||
true);
|
||||
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
|
||||
|
@ -4014,7 +4014,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
|
||||
intel_de_posting_read(dev_priv, intel_dp->output_reg);
|
||||
}
|
||||
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
u8 tmp;
|
||||
@ -4033,8 +4034,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
|
||||
drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
|
||||
enableddisabled(intel_dp->has_hdmi_sink));
|
||||
|
||||
tmp = intel_dp->dfp.ycbcr_444_to_420 ?
|
||||
DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
|
||||
tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
|
||||
intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
|
||||
|
||||
if (drm_dp_dpcd_writeb(&intel_dp->aux,
|
||||
DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
|
||||
@ -4088,7 +4089,7 @@ static void intel_enable_dp(struct intel_atomic_state *state,
|
||||
}
|
||||
|
||||
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
|
||||
intel_dp_configure_protocol_converter(intel_dp);
|
||||
intel_dp_configure_protocol_converter(intel_dp, pipe_config);
|
||||
intel_dp_start_link_train(intel_dp, pipe_config);
|
||||
intel_dp_stop_link_train(intel_dp, pipe_config);
|
||||
|
||||
|
@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
|
||||
int intel_dp_retrain_link(struct intel_encoder *encoder,
|
||||
struct drm_modeset_acquire_ctx *ctx);
|
||||
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp);
|
||||
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state);
|
||||
void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
|
||||
const struct intel_crtc_state *crtc_state,
|
||||
bool enable);
|
||||
|
@ -2210,6 +2210,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
if (content_protection_type_changed) {
|
||||
mutex_lock(&hdcp->mutex);
|
||||
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
|
||||
drm_connector_get(&connector->base);
|
||||
schedule_work(&hdcp->prop_work);
|
||||
mutex_unlock(&hdcp->mutex);
|
||||
}
|
||||
@ -2221,6 +2222,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
|
||||
desired_and_not_enabled =
|
||||
hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED;
|
||||
mutex_unlock(&hdcp->mutex);
|
||||
/*
|
||||
* If HDCP already ENABLED and CP property is DESIRED, schedule
|
||||
* prop_work to update correct CP property to user space.
|
||||
*/
|
||||
if (!desired_and_not_enabled && !content_protection_type_changed) {
|
||||
drm_connector_get(&connector->base);
|
||||
schedule_work(&hdcp->prop_work);
|
||||
}
|
||||
}
|
||||
|
||||
if (desired_and_not_enabled || content_protection_type_changed)
|
||||
|
@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b,
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool __request_completed(const struct i915_request *rq)
|
||||
{
|
||||
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
|
||||
}
|
||||
|
||||
__maybe_unused static bool
|
||||
check_signal_order(struct intel_context *ce, struct i915_request *rq)
|
||||
{
|
||||
@ -257,7 +252,7 @@ static void signal_irq_work(struct irq_work *work)
|
||||
list_for_each_entry_rcu(rq, &ce->signals, signal_link) {
|
||||
bool release;
|
||||
|
||||
if (!__request_completed(rq))
|
||||
if (!__i915_request_is_complete(rq))
|
||||
break;
|
||||
|
||||
if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL,
|
||||
@ -379,7 +374,7 @@ static void insert_breadcrumb(struct i915_request *rq)
|
||||
* straight onto a signaled list, and queue the irq worker for
|
||||
* its signal completion.
|
||||
*/
|
||||
if (__request_completed(rq)) {
|
||||
if (__i915_request_is_complete(rq)) {
|
||||
if (__signal_request(rq) &&
|
||||
llist_add(&rq->signal_node, &b->signaled_requests))
|
||||
irq_work_queue(&b->irq_work);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user