mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 04:34:08 +08:00
Merge branches 'pm-cpufreq-fixes', 'pm-cpufreq-sched-fixes' and 'intel_pstate-fixes'
* pm-cpufreq-fixes: cpufreq: Restore policy min/max limits on CPU online * pm-cpufreq-sched-fixes: cpufreq: schedutil: Fix per-CPU structure initialization in sugov_start() * intel_pstate-fixes: cpufreq: intel_pstate: Fix policy data management in passive mode cpufreq: intel_pstate: One set of global limits in active mode
This commit is contained in:
commit
6488294e4a
@ -1142,16 +1142,17 @@ used by the kernel.
|
||||
|
||||
pids.max
|
||||
|
||||
A read-write single value file which exists on non-root cgroups. The
|
||||
default is "max".
|
||||
A read-write single value file which exists on non-root
|
||||
cgroups. The default is "max".
|
||||
|
||||
Hard limit of number of processes.
|
||||
Hard limit of number of processes.
|
||||
|
||||
pids.current
|
||||
|
||||
A read-only single value file which exists on all cgroups.
|
||||
A read-only single value file which exists on all cgroups.
|
||||
|
||||
The number of processes currently in the cgroup and its descendants.
|
||||
The number of processes currently in the cgroup and its
|
||||
descendants.
|
||||
|
||||
Organisational operations are not blocked by cgroup policies, so it is
|
||||
possible to have pids.current > pids.max. This can be done by either
|
||||
|
@ -71,6 +71,9 @@
|
||||
For Axon it can be absent, though my current driver
|
||||
doesn't handle phy-address yet so for now, keep
|
||||
0x00ffffff in it.
|
||||
- phy-handle : Used to describe configurations where a external PHY
|
||||
is used. Please refer to:
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
|
||||
operations (if absent the value is the same as
|
||||
rx-fifo-size). For Axon, either absent or 2048.
|
||||
@ -81,8 +84,22 @@
|
||||
offload, phandle of the TAH device node.
|
||||
- tah-channel : 1 cell, optional. If appropriate, channel used on the
|
||||
TAH engine.
|
||||
- fixed-link : Fixed-link subnode describing a link to a non-MDIO
|
||||
managed entity. See
|
||||
Documentation/devicetree/bindings/net/fixed-link.txt
|
||||
for details.
|
||||
- mdio subnode : When the EMAC has a phy connected to its local
|
||||
mdio, which us supported by the kernel's network
|
||||
PHY library in drivers/net/phy, there must be device
|
||||
tree subnode with the following required properties:
|
||||
- #address-cells: Must be <1>.
|
||||
- #size-cells: Must be <0>.
|
||||
|
||||
Example:
|
||||
For PHY definitions: Please refer to
|
||||
Documentation/devicetree/bindings/net/phy.txt and
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
|
||||
Examples:
|
||||
|
||||
EMAC0: ethernet@40000800 {
|
||||
device_type = "network";
|
||||
@ -104,6 +121,48 @@
|
||||
zmii-channel = <0>;
|
||||
};
|
||||
|
||||
EMAC1: ethernet@ef600c00 {
|
||||
device_type = "network";
|
||||
compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
|
||||
interrupt-parent = <&EMAC1>;
|
||||
interrupts = <0 1>;
|
||||
#interrupt-cells = <1>;
|
||||
#address-cells = <0>;
|
||||
#size-cells = <0>;
|
||||
interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
|
||||
1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
|
||||
reg = <0xef600c00 0x000000c4>;
|
||||
local-mac-address = [000000000000]; /* Filled in by U-Boot */
|
||||
mal-device = <&MAL0>;
|
||||
mal-tx-channel = <0>;
|
||||
mal-rx-channel = <0>;
|
||||
cell-index = <0>;
|
||||
max-frame-size = <9000>;
|
||||
rx-fifo-size = <16384>;
|
||||
tx-fifo-size = <2048>;
|
||||
fifo-entry-size = <10>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&phy0>;
|
||||
phy-map = <0x00000000>;
|
||||
rgmii-device = <&RGMII0>;
|
||||
rgmii-channel = <0>;
|
||||
tah-device = <&TAH0>;
|
||||
tah-channel = <0>;
|
||||
has-inverted-stacr-oc;
|
||||
has-new-stacr-staopc;
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
phy0: ethernet-phy@0 {
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
ii) McMAL node
|
||||
|
||||
Required properties:
|
||||
@ -145,4 +204,3 @@
|
||||
- revision : as provided by the RGMII new version register if
|
||||
available.
|
||||
For Axon: 0x0000012a
|
||||
|
||||
|
@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
|
||||
FALSE (router)
|
||||
|
||||
forwarding - BOOLEAN
|
||||
Enable IP forwarding on this interface.
|
||||
Enable IP forwarding on this interface. This controls whether packets
|
||||
received _on_ this interface can be forwarded.
|
||||
|
||||
mc_forwarding - BOOLEAN
|
||||
Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -411,3 +411,4 @@
|
||||
394 common pkey_mprotect sys_pkey_mprotect
|
||||
395 common pkey_alloc sys_pkey_alloc
|
||||
396 common pkey_free sys_pkey_free
|
||||
397 common statx sys_statx
|
||||
|
@ -1073,6 +1073,10 @@ config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
config KEYS_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && KEYS
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Power management options"
|
||||
|
@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
|
||||
static inline bool system_uses_ttbr0_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||
!cpus_have_cap(ARM64_HAS_PAN);
|
||||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* cpu_suspend() - function to enter a low-power idle state
|
||||
* arm_cpuidle_suspend() - function to enter a low-power idle state
|
||||
* @arg: argument to pass to CPU suspend operations
|
||||
*
|
||||
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
|
||||
|
@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p, *cur_kprobe;
|
||||
|
@ -162,7 +162,7 @@ void __init kasan_init(void)
|
||||
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
||||
|
||||
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
|
||||
pfn_to_nid(virt_to_pfn(_text)));
|
||||
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
||||
|
||||
/*
|
||||
* vmemmap_populate() has populated the shadow region that covers the
|
||||
|
@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
||||
return val;
|
||||
}
|
||||
|
||||
#define xchg(ptr, with) \
|
||||
((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr))))
|
||||
#define xchg(ptr, with) \
|
||||
({ \
|
||||
(__typeof__(*(ptr))) __xchg((unsigned long)(with), \
|
||||
(ptr), \
|
||||
sizeof(*(ptr))); \
|
||||
})
|
||||
|
||||
#endif /* __ASM_OPENRISC_CMPXCHG_H */
|
||||
|
@ -211,7 +211,7 @@ do { \
|
||||
case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \
|
||||
case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \
|
||||
case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \
|
||||
case 8: __get_user_asm2(x, ptr, retval); \
|
||||
case 8: __get_user_asm2(x, ptr, retval); break; \
|
||||
default: (x) = __get_user_bad(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
|
||||
|
||||
@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3);
|
||||
DECLARE_EXPORT(__ashrdi3);
|
||||
DECLARE_EXPORT(__ashldi3);
|
||||
DECLARE_EXPORT(__lshrdi3);
|
||||
DECLARE_EXPORT(__ucmpdi2);
|
||||
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
EXPORT_SYMBOL(__copy_tofrom_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(memset);
|
||||
|
@ -90,6 +90,7 @@ void arch_cpu_idle(void)
|
||||
}
|
||||
|
||||
void (*pm_power_off) (void) = machine_power_off;
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
/*
|
||||
* When a process does an "exec", machine state like FPU and debug
|
||||
|
@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page)
|
||||
|
||||
#define flush_kernel_dcache_range(start,size) \
|
||||
flush_kernel_dcache_range_asm((start), (start)+(size));
|
||||
/* vmap range flushes and invalidates. Architecturally, we don't need
|
||||
* the invalidate, because the CPU should refuse to speculate once an
|
||||
* area has been flushed, so invalidate is left empty */
|
||||
static inline void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
void *cursor = vaddr;
|
||||
|
||||
for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
|
||||
struct page *page = vmalloc_to_page(cursor);
|
||||
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
|
||||
flush_kernel_dcache_page(page);
|
||||
}
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
void flush_kernel_vmap_range(void *vaddr, int size);
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define flush_cache_vmap(start, end) flush_cache_all()
|
||||
#define flush_cache_vunmap(start, end) flush_cache_all()
|
||||
|
@ -32,7 +32,8 @@
|
||||
* that put_user is the same as __put_user, etc.
|
||||
*/
|
||||
|
||||
#define access_ok(type, uaddr, size) (1)
|
||||
#define access_ok(type, uaddr, size) \
|
||||
( (uaddr) == (uaddr) )
|
||||
|
||||
#define put_user __put_user
|
||||
#define get_user __get_user
|
||||
|
@ -362,8 +362,9 @@
|
||||
#define __NR_copy_file_range (__NR_Linux + 346)
|
||||
#define __NR_preadv2 (__NR_Linux + 347)
|
||||
#define __NR_pwritev2 (__NR_Linux + 348)
|
||||
#define __NR_statx (__NR_Linux + 349)
|
||||
|
||||
#define __NR_Linux_syscalls (__NR_pwritev2 + 1)
|
||||
#define __NR_Linux_syscalls (__NR_statx + 1)
|
||||
|
||||
|
||||
#define __IGNORE_select /* newselect */
|
||||
|
@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
||||
void flush_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_vmap_range);
|
||||
|
||||
void invalidate_kernel_vmap_range(void *vaddr, int size)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
|
||||
if ((unsigned long)size > parisc_cache_flush_threshold)
|
||||
flush_data_cache();
|
||||
else
|
||||
flush_kernel_dcache_range_asm(start, start + size);
|
||||
}
|
||||
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
|
||||
|
@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
||||
*/
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_SECREL32:
|
||||
/* 32-bit section relative address. */
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_DPREL21L:
|
||||
/* left 21 bit of relative address */
|
||||
val = lrsel(val - dp, addend);
|
||||
@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
||||
*/
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_SECREL32:
|
||||
/* 32-bit section relative address. */
|
||||
*loc = fsel(val, addend);
|
||||
break;
|
||||
case R_PARISC_FPTR64:
|
||||
/* 64-bit function address */
|
||||
if(in_local(me, (void *)(val + addend))) {
|
||||
|
@ -39,7 +39,7 @@
|
||||
* the PDC INTRIGUE calls. This is done to eliminate bugs introduced
|
||||
* in various PDC revisions. The code is much more maintainable
|
||||
* and reliable this way vs having to debug on every version of PDC
|
||||
* on every box.
|
||||
* on every box.
|
||||
*/
|
||||
|
||||
#include <linux/capability.h>
|
||||
@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr);
|
||||
static int perf_release(struct inode *inode, struct file *file);
|
||||
static int perf_open(struct inode *inode, struct file *file);
|
||||
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos);
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos);
|
||||
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
static void perf_start_counters(void);
|
||||
static int perf_stop_counters(uint32_t *raddr);
|
||||
@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void);
|
||||
/*
|
||||
* configure:
|
||||
*
|
||||
* Configure the cpu with a given data image. First turn off the counters,
|
||||
* Configure the cpu with a given data image. First turn off the counters,
|
||||
* then download the image, then turn the counters back on.
|
||||
*/
|
||||
static int perf_config(uint32_t *image_ptr)
|
||||
@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr)
|
||||
error = perf_stop_counters(raddr);
|
||||
if (error != 0) {
|
||||
printk("perf_config: perf_stop_counters = %ld\n", error);
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk("Preparing to write image\n");
|
||||
@ -242,7 +242,7 @@ printk("Preparing to write image\n");
|
||||
error = perf_write_image((uint64_t *)image_ptr);
|
||||
if (error != 0) {
|
||||
printk("perf_config: DOWNLOAD = %ld\n", error);
|
||||
return -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk("Preparing to start counters\n");
|
||||
@ -254,7 +254,7 @@ printk("Preparing to start counters\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Open the device and initialize all of its memory. The device is only
|
||||
* Open the device and initialize all of its memory. The device is only
|
||||
* opened once, but can be "queried" by multiple processes that know its
|
||||
* file descriptor.
|
||||
*/
|
||||
@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t
|
||||
* called on the processor that the download should happen
|
||||
* on.
|
||||
*/
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos)
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
size_t image_size;
|
||||
uint32_t image_type;
|
||||
uint32_t interface_type;
|
||||
uint32_t test;
|
||||
|
||||
if (perf_processor_interface == ONYX_INTF)
|
||||
if (perf_processor_interface == ONYX_INTF)
|
||||
image_size = PCXU_IMAGE_SIZE;
|
||||
else if (perf_processor_interface == CUDA_INTF)
|
||||
else if (perf_processor_interface == CUDA_INTF)
|
||||
image_size = PCXW_IMAGE_SIZE;
|
||||
else
|
||||
else
|
||||
return -EFAULT;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
|
||||
|
||||
/* First check the machine type is correct for
|
||||
the requested image */
|
||||
if (((perf_processor_interface == CUDA_INTF) &&
|
||||
(interface_type != CUDA_INTF)) ||
|
||||
((perf_processor_interface == ONYX_INTF) &&
|
||||
(interface_type != ONYX_INTF)))
|
||||
if (((perf_processor_interface == CUDA_INTF) &&
|
||||
(interface_type != CUDA_INTF)) ||
|
||||
((perf_processor_interface == ONYX_INTF) &&
|
||||
(interface_type != ONYX_INTF)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Next check to make sure the requested image
|
||||
is valid */
|
||||
if (((interface_type == CUDA_INTF) &&
|
||||
if (((interface_type == CUDA_INTF) &&
|
||||
(test >= MAX_CUDA_IMAGES)) ||
|
||||
((interface_type == ONYX_INTF) &&
|
||||
(test >= MAX_ONYX_IMAGES)))
|
||||
((interface_type == ONYX_INTF) &&
|
||||
(test >= MAX_ONYX_IMAGES)))
|
||||
return -EINVAL;
|
||||
|
||||
/* Copy the image into the processor */
|
||||
if (interface_type == CUDA_INTF)
|
||||
if (interface_type == CUDA_INTF)
|
||||
return perf_config(cuda_images[test]);
|
||||
else
|
||||
return perf_config(onyx_images[test]);
|
||||
@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun
|
||||
static void perf_patch_images(void)
|
||||
{
|
||||
#if 0 /* FIXME!! */
|
||||
/*
|
||||
/*
|
||||
* NOTE: this routine is VERY specific to the current TLB image.
|
||||
* If the image is changed, this routine might also need to be changed.
|
||||
*/
|
||||
@ -367,9 +367,9 @@ static void perf_patch_images(void)
|
||||
extern void $i_dtlb_miss_2_0();
|
||||
extern void PA2_0_iva();
|
||||
|
||||
/*
|
||||
/*
|
||||
* We can only use the lower 32-bits, the upper 32-bits should be 0
|
||||
* anyway given this is in the kernel
|
||||
* anyway given this is in the kernel
|
||||
*/
|
||||
uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0);
|
||||
uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0);
|
||||
@ -377,21 +377,21 @@ static void perf_patch_images(void)
|
||||
|
||||
if (perf_processor_interface == ONYX_INTF) {
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[TLBMISS][15] &= 0xffffff00;
|
||||
onyx_images[TLBMISS][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
onyx_images[TLBMISS][17] = itlb_addr;
|
||||
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
|
||||
onyx_images[TLBHANDMISS][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
onyx_images[TLBHANDMISS][17] = itlb_addr;
|
||||
|
||||
/* clear last 2 bytes */
|
||||
onyx_images[BIG_CPI][15] &= 0xffffff00;
|
||||
onyx_images[BIG_CPI][15] &= 0xffffff00;
|
||||
/* set 2 bytes */
|
||||
onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24));
|
||||
onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00;
|
||||
@ -404,24 +404,24 @@ static void perf_patch_images(void)
|
||||
|
||||
} else if (perf_processor_interface == CUDA_INTF) {
|
||||
/* Cuda interface */
|
||||
cuda_images[TLBMISS][16] =
|
||||
cuda_images[TLBMISS][16] =
|
||||
(cuda_images[TLBMISS][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[TLBMISS][17] =
|
||||
cuda_images[TLBMISS][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000;
|
||||
|
||||
cuda_images[TLBHANDMISS][16] =
|
||||
cuda_images[TLBHANDMISS][16] =
|
||||
(cuda_images[TLBHANDMISS][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[TLBHANDMISS][17] =
|
||||
cuda_images[TLBHANDMISS][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000;
|
||||
|
||||
cuda_images[BIG_CPI][16] =
|
||||
cuda_images[BIG_CPI][16] =
|
||||
(cuda_images[BIG_CPI][16]&0xffff0000) |
|
||||
((dtlb_addr >> 8)&0x0000ffff);
|
||||
cuda_images[BIG_CPI][17] =
|
||||
cuda_images[BIG_CPI][17] =
|
||||
((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff);
|
||||
cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000;
|
||||
} else {
|
||||
@ -433,7 +433,7 @@ static void perf_patch_images(void)
|
||||
|
||||
/*
|
||||
* ioctl routine
|
||||
* All routines effect the processor that they are executed on. Thus you
|
||||
* All routines effect the processor that they are executed on. Thus you
|
||||
* must be running on the processor that you wish to change.
|
||||
*/
|
||||
|
||||
@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
}
|
||||
|
||||
/* copy out the Counters */
|
||||
if (copy_to_user((void __user *)arg, raddr,
|
||||
if (copy_to_user((void __user *)arg, raddr,
|
||||
sizeof (raddr)) != 0) {
|
||||
error = -EFAULT;
|
||||
break;
|
||||
@ -487,7 +487,7 @@ static const struct file_operations perf_fops = {
|
||||
.open = perf_open,
|
||||
.release = perf_release
|
||||
};
|
||||
|
||||
|
||||
static struct miscdevice perf_dev = {
|
||||
MISC_DYNAMIC_MINOR,
|
||||
PA_PERF_DEV,
|
||||
@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
||||
/* OR sticky2 (bit 1496) to counter2 bit 32 */
|
||||
tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000;
|
||||
raddr[2] = (uint32_t)tmp64;
|
||||
|
||||
|
||||
/* Counter3 is bits 1497 to 1528 */
|
||||
tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff;
|
||||
/* OR sticky3 (bit 1529) to counter3 bit 32 */
|
||||
@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
||||
userbuf[22] = 0;
|
||||
userbuf[23] = 0;
|
||||
|
||||
/*
|
||||
/*
|
||||
* Write back the zeroed bytes + the image given
|
||||
* the read was destructive.
|
||||
*/
|
||||
@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr)
|
||||
} else {
|
||||
|
||||
/*
|
||||
* Read RDR-15 which contains the counters and sticky bits
|
||||
* Read RDR-15 which contains the counters and sticky bits
|
||||
*/
|
||||
if (!perf_rdr_read_ubuf(15, userbuf)) {
|
||||
return -13;
|
||||
}
|
||||
|
||||
/*
|
||||
/*
|
||||
* Clear out the counters
|
||||
*/
|
||||
perf_rdr_clear(15);
|
||||
@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr)
|
||||
raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL);
|
||||
raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL);
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer)
|
||||
i = tentry->num_words;
|
||||
while (i--) {
|
||||
buffer[i] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check for bits an even number of 64 */
|
||||
if ((xbits = width & 0x03f) != 0) {
|
||||
@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr)
|
||||
}
|
||||
|
||||
runway = ioremap_nocache(cpu_device->hpa.start, 4096);
|
||||
if (!runway) {
|
||||
pr_err("perf_write_image: ioremap failed!\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Merge intrigue bits into Runway STATUS 0 */
|
||||
tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful;
|
||||
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
|
||||
__raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul),
|
||||
runway + RUNWAY_STATUS);
|
||||
|
||||
|
||||
/* Write RUNWAY DEBUG registers */
|
||||
for (i = 0; i < 8; i++) {
|
||||
__raw_writeq(*memaddr++, runway + RUNWAY_DEBUG);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -843,7 +847,7 @@ printk("perf_rdr_write\n");
|
||||
perf_rdr_shift_out_U(rdr_num, buffer[i]);
|
||||
} else {
|
||||
perf_rdr_shift_out_W(rdr_num, buffer[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
printk("perf_rdr_write done\n");
|
||||
}
|
||||
|
@ -142,6 +142,8 @@ void machine_power_off(void)
|
||||
|
||||
printk(KERN_EMERG "System shut down completed.\n"
|
||||
"Please power this system off now.");
|
||||
|
||||
for (;;);
|
||||
}
|
||||
|
||||
void (*pm_power_off)(void) = machine_power_off;
|
||||
|
@ -444,6 +444,7 @@
|
||||
ENTRY_SAME(copy_file_range)
|
||||
ENTRY_COMP(preadv2)
|
||||
ENTRY_COMP(pwritev2)
|
||||
ENTRY_SAME(statx)
|
||||
|
||||
|
||||
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
|
||||
|
@ -68,6 +68,7 @@ SECTIONS
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64_BOOT_WRAPPER
|
||||
. = ALIGN(256);
|
||||
.got :
|
||||
{
|
||||
__toc_start = .;
|
||||
|
@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
u32 *key = crypto_tfm_ctx(tfm);
|
||||
|
||||
*key = 0;
|
||||
*key = ~0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -51,6 +51,10 @@
|
||||
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
|
||||
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
|
||||
|
||||
/* Put a PPC bit into a "normal" bit position */
|
||||
#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
|
||||
((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* Macro for generating the ***_bits() functions */
|
||||
|
@ -66,6 +66,55 @@
|
||||
|
||||
#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
|
||||
P8_DSISR_MC_ERAT_MULTIHIT_SEC)
|
||||
|
||||
/*
|
||||
* Machine Check bits on power9
|
||||
*/
|
||||
#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
|
||||
|
||||
#define P9_SRR1_MC_IFETCH(srr1) ( \
|
||||
PPC_BITEXTRACT(srr1, 45, 0) | \
|
||||
PPC_BITEXTRACT(srr1, 44, 1) | \
|
||||
PPC_BITEXTRACT(srr1, 43, 2) | \
|
||||
PPC_BITEXTRACT(srr1, 36, 3) )
|
||||
|
||||
/* 0 is reserved */
|
||||
#define P9_SRR1_MC_IFETCH_UE 1
|
||||
#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
|
||||
#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
|
||||
#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
|
||||
#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
|
||||
#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
|
||||
/* 7 is reserved */
|
||||
#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
|
||||
#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
|
||||
/* 10 ? */
|
||||
#define P9_SRR1_MC_IFETCH_RA 11
|
||||
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
|
||||
#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
|
||||
#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
|
||||
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
|
||||
|
||||
/* DSISR bits for machine check (On Power9) */
|
||||
#define P9_DSISR_MC_UE (PPC_BIT(48))
|
||||
#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
|
||||
#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
|
||||
#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
|
||||
#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
|
||||
#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
|
||||
#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
|
||||
#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
|
||||
#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
|
||||
#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
|
||||
#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
|
||||
#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
|
||||
#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
|
||||
|
||||
/* SLB error bits */
|
||||
#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
|
||||
P9_DSISR_MC_SLB_PARITY_MFSLB | \
|
||||
P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
|
||||
|
||||
enum MCE_Version {
|
||||
MCE_V1 = 1,
|
||||
};
|
||||
@ -93,6 +142,9 @@ enum MCE_ErrorType {
|
||||
MCE_ERROR_TYPE_SLB = 2,
|
||||
MCE_ERROR_TYPE_ERAT = 3,
|
||||
MCE_ERROR_TYPE_TLB = 4,
|
||||
MCE_ERROR_TYPE_USER = 5,
|
||||
MCE_ERROR_TYPE_RA = 6,
|
||||
MCE_ERROR_TYPE_LINK = 7,
|
||||
};
|
||||
|
||||
enum MCE_UeErrorType {
|
||||
@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
|
||||
MCE_TLB_ERROR_MULTIHIT = 2,
|
||||
};
|
||||
|
||||
enum MCE_UserErrorType {
|
||||
MCE_USER_ERROR_INDETERMINATE = 0,
|
||||
MCE_USER_ERROR_TLBIE = 1,
|
||||
};
|
||||
|
||||
enum MCE_RaErrorType {
|
||||
MCE_RA_ERROR_INDETERMINATE = 0,
|
||||
MCE_RA_ERROR_IFETCH = 1,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
|
||||
MCE_RA_ERROR_LOAD = 4,
|
||||
MCE_RA_ERROR_STORE = 5,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
|
||||
MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
|
||||
};
|
||||
|
||||
enum MCE_LinkErrorType {
|
||||
MCE_LINK_ERROR_INDETERMINATE = 0,
|
||||
MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
|
||||
MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
|
||||
MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
|
||||
MCE_LINK_ERROR_STORE_TIMEOUT = 4,
|
||||
MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
|
||||
};
|
||||
|
||||
struct machine_check_event {
|
||||
enum MCE_Version version:8; /* 0x00 */
|
||||
uint8_t in_use; /* 0x01 */
|
||||
@ -166,6 +244,30 @@ struct machine_check_event {
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} tlb_error;
|
||||
|
||||
struct {
|
||||
enum MCE_UserErrorType user_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} user_error;
|
||||
|
||||
struct {
|
||||
enum MCE_RaErrorType ra_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} ra_error;
|
||||
|
||||
struct {
|
||||
enum MCE_LinkErrorType link_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} link_error;
|
||||
} u;
|
||||
};
|
||||
|
||||
@ -176,8 +278,12 @@ struct mce_error_info {
|
||||
enum MCE_SlbErrorType slb_error_type:8;
|
||||
enum MCE_EratErrorType erat_error_type:8;
|
||||
enum MCE_TlbErrorType tlb_error_type:8;
|
||||
enum MCE_UserErrorType user_error_type:8;
|
||||
enum MCE_RaErrorType ra_error_type:8;
|
||||
enum MCE_LinkErrorType link_error_type:8;
|
||||
} u;
|
||||
uint8_t reserved[2];
|
||||
enum MCE_Severity severity:8;
|
||||
enum MCE_Initiator initiator:8;
|
||||
};
|
||||
|
||||
#define MAX_MC_EVT 100
|
||||
|
@ -387,3 +387,4 @@ SYSCALL(copy_file_range)
|
||||
COMPAT_SYS_SPU(preadv2)
|
||||
COMPAT_SYS_SPU(pwritev2)
|
||||
SYSCALL(kexec_file_load)
|
||||
SYSCALL(statx)
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <uapi/asm/unistd.h>
|
||||
|
||||
|
||||
#define NR_syscalls 383
|
||||
#define NR_syscalls 384
|
||||
|
||||
#define __NR__exit __NR_exit
|
||||
|
||||
|
@ -393,5 +393,6 @@
|
||||
#define __NR_preadv2 380
|
||||
#define __NR_pwritev2 381
|
||||
#define __NR_kexec_file_load 382
|
||||
#define __NR_statx 383
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
|
||||
extern void __flush_tlb_power9(unsigned int action);
|
||||
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#if defined(CONFIG_E500)
|
||||
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
|
||||
@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_setup = __setup_cpu_power9,
|
||||
.cpu_restore = __restore_cpu_power9,
|
||||
.flush_tlb = __flush_tlb_power9,
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Power9 */
|
||||
@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_setup = __setup_cpu_power9,
|
||||
.cpu_restore = __restore_cpu_power9,
|
||||
.flush_tlb = __flush_tlb_power9,
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Cell Broadband Engine */
|
||||
|
@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
|
||||
case MCE_ERROR_TYPE_TLB:
|
||||
mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
mce->u.user_error.user_error_type = mce_err->u.user_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
mce->u.link_error.link_error_type = mce_err->u.link_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
||||
mce->gpr3 = regs->gpr[3];
|
||||
mce->in_use = 1;
|
||||
|
||||
mce->initiator = MCE_INITIATOR_CPU;
|
||||
/* Mark it recovered if we have handled it and MSR(RI=1). */
|
||||
if (handled && (regs->msr & MSR_RI))
|
||||
mce->disposition = MCE_DISPOSITION_RECOVERED;
|
||||
else
|
||||
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
|
||||
mce->severity = MCE_SEV_ERROR_SYNC;
|
||||
|
||||
mce->initiator = mce_err->initiator;
|
||||
mce->severity = mce_err->severity;
|
||||
|
||||
/*
|
||||
* Populate the mce error_type and type-specific error_type.
|
||||
@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
|
||||
mce->u.erat_error.effective_address_provided = true;
|
||||
mce->u.erat_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
|
||||
mce->u.user_error.effective_address_provided = true;
|
||||
mce->u.user_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
|
||||
mce->u.ra_error.effective_address_provided = true;
|
||||
mce->u.ra_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
|
||||
mce->u.link_error.effective_address_provided = true;
|
||||
mce->u.link_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
|
||||
mce->u.ue_error.effective_address_provided = true;
|
||||
mce->u.ue_error.effective_address = addr;
|
||||
@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
|
||||
"Parity",
|
||||
"Multihit",
|
||||
};
|
||||
static const char *mc_user_types[] = {
|
||||
"Indeterminate",
|
||||
"tlbie(l) invalid",
|
||||
};
|
||||
static const char *mc_ra_types[] = {
|
||||
"Indeterminate",
|
||||
"Instruction fetch (bad)",
|
||||
"Page table walk ifetch (bad)",
|
||||
"Page table walk ifetch (foreign)",
|
||||
"Load (bad)",
|
||||
"Store (bad)",
|
||||
"Page table walk Load/Store (bad)",
|
||||
"Page table walk Load/Store (foreign)",
|
||||
"Load/Store (foreign)",
|
||||
};
|
||||
static const char *mc_link_types[] = {
|
||||
"Indeterminate",
|
||||
"Instruction fetch (timeout)",
|
||||
"Page table walk ifetch (timeout)",
|
||||
"Load (timeout)",
|
||||
"Store (timeout)",
|
||||
"Page table walk Load/Store (timeout)",
|
||||
};
|
||||
|
||||
/* Print things out */
|
||||
if (evt->version != MCE_V1) {
|
||||
@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.tlb_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
subtype = evt->u.user_error.user_error_type <
|
||||
ARRAY_SIZE(mc_user_types) ?
|
||||
mc_user_types[evt->u.user_error.user_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: User [%s]\n", level, subtype);
|
||||
if (evt->u.user_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.user_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
subtype = evt->u.ra_error.ra_error_type <
|
||||
ARRAY_SIZE(mc_ra_types) ?
|
||||
mc_ra_types[evt->u.ra_error.ra_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: Real address [%s]\n", level, subtype);
|
||||
if (evt->u.ra_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.ra_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
subtype = evt->u.link_error.link_error_type <
|
||||
ARRAY_SIZE(mc_link_types) ?
|
||||
mc_link_types[evt->u.link_error.link_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: Link [%s]\n", level, subtype);
|
||||
if (evt->u.link_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.link_error.effective_address);
|
||||
break;
|
||||
default:
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
printk("%s Error type: Unknown\n", level);
|
||||
@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
|
||||
if (evt->u.tlb_error.effective_address_provided)
|
||||
return evt->u.tlb_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
if (evt->u.user_error.effective_address_provided)
|
||||
return evt->u.user_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
if (evt->u.ra_error.effective_address_provided)
|
||||
return evt->u.ra_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
if (evt->u.link_error.effective_address_provided)
|
||||
return evt->u.link_error.effective_address;
|
||||
break;
|
||||
default:
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
break;
|
||||
|
@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void flush_erat(void)
|
||||
{
|
||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
|
||||
#define MCE_FLUSH_SLB 1
|
||||
#define MCE_FLUSH_TLB 2
|
||||
#define MCE_FLUSH_ERAT 3
|
||||
|
||||
static int mce_flush(int what)
|
||||
{
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
if (what == MCE_FLUSH_SLB) {
|
||||
flush_and_reload_slb();
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
if (what == MCE_FLUSH_ERAT) {
|
||||
flush_erat();
|
||||
return 1;
|
||||
}
|
||||
if (what == MCE_FLUSH_TLB) {
|
||||
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
|
||||
cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
|
||||
{
|
||||
if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
|
||||
dsisr &= ~slb;
|
||||
if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
|
||||
dsisr &= ~erat;
|
||||
if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
|
||||
dsisr &= ~tlb;
|
||||
/* Any other errors we don't understand? */
|
||||
if (dsisr)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
|
||||
{
|
||||
long handled = 1;
|
||||
@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
|
||||
long handled = 1;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_error_info.initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
srr1 = regs->msr;
|
||||
nip = regs->nip;
|
||||
|
||||
@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
||||
long handled = 1;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_error_info.initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
srr1 = regs->msr;
|
||||
nip = regs->nip;
|
||||
|
||||
@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
||||
save_mce_event(regs, handled, &mce_error_info, nip, addr);
|
||||
return handled;
|
||||
}
|
||||
|
||||
static int mce_handle_derror_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t dsisr = regs->dsisr;
|
||||
|
||||
return mce_handle_flush_derrors(dsisr,
|
||||
P9_DSISR_MC_SLB_PARITY_MFSLB |
|
||||
P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
|
||||
|
||||
P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
|
||||
|
||||
P9_DSISR_MC_ERAT_MULTIHIT);
|
||||
}
|
||||
|
||||
static int mce_handle_ierror_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t srr1 = regs->msr;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_SLB_PARITY:
|
||||
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_SLB);
|
||||
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_TLB);
|
||||
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_ERAT);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_get_derror_p9(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err, uint64_t *addr)
|
||||
{
|
||||
uint64_t dsisr = regs->dsisr;
|
||||
|
||||
mce_err->severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_err->initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
if (dsisr & P9_DSISR_MC_USER_TLBIE)
|
||||
*addr = regs->nip;
|
||||
else
|
||||
*addr = regs->dar;
|
||||
|
||||
if (dsisr & P9_DSISR_MC_UE) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
|
||||
} else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
|
||||
} else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
|
||||
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_TLB;
|
||||
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_USER;
|
||||
mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
|
||||
} else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
|
||||
} else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_LOAD) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_get_ierror_p9(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err, uint64_t *addr)
|
||||
{
|
||||
uint64_t srr1 = regs->msr;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
|
||||
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
|
||||
mce_err->severity = MCE_SEV_FATAL;
|
||||
break;
|
||||
default:
|
||||
mce_err->severity = MCE_SEV_ERROR_SYNC;
|
||||
break;
|
||||
}
|
||||
|
||||
mce_err->initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
*addr = regs->nip;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_UE:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_SLB_PARITY:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
|
||||
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_TLB;
|
||||
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
long __machine_check_early_realmode_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t nip, addr;
|
||||
long handled;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
nip = regs->nip;
|
||||
|
||||
if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
|
||||
handled = mce_handle_derror_p9(regs);
|
||||
mce_get_derror_p9(regs, &mce_error_info, &addr);
|
||||
} else {
|
||||
handled = mce_handle_ierror_p9(regs);
|
||||
mce_get_ierror_p9(regs, &mce_error_info, &addr);
|
||||
}
|
||||
|
||||
/* Handle UE error. */
|
||||
if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
|
||||
handled = mce_handle_ue_error(regs);
|
||||
|
||||
save_mce_event(regs, handled, &mce_error_info, nip, addr);
|
||||
return handled;
|
||||
}
|
||||
|
@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
|
||||
sdsync = POWER7P_MMCRA_SDAR_VALID;
|
||||
else if (ppmu->flags & PPMU_ALT_SIPR)
|
||||
sdsync = POWER6_MMCRA_SDSYNC;
|
||||
else if (ppmu->flags & PPMU_NO_SIAR)
|
||||
sdsync = MMCRA_SAMPLE_ENABLE;
|
||||
else
|
||||
sdsync = MMCRA_SDSYNC;
|
||||
|
||||
|
@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
|
||||
return !(event & ~valid_mask);
|
||||
}
|
||||
|
||||
static u64 mmcra_sdar_mode(u64 event)
|
||||
static inline bool is_event_marked(u64 event)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
|
||||
if (event & EVENT_IS_MARKED)
|
||||
return true;
|
||||
|
||||
return MMCRA_SDAR_MODE_TLB;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
|
||||
{
|
||||
/*
|
||||
* MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
|
||||
* continous sampling mode.
|
||||
*
|
||||
* Incase of Power8:
|
||||
* MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
|
||||
* mode and will be un-changed when setting MMCRA[63] (Marked events).
|
||||
*
|
||||
* Incase of Power9:
|
||||
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
|
||||
* or if group already have any marked events.
|
||||
* Non-Marked events (for DD1):
|
||||
* MMCRA[SDAR_MODE] will be set to 0b01
|
||||
* For rest
|
||||
* MMCRA[SDAR_MODE] will be set from event code.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
|
||||
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
|
||||
else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
|
||||
else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
*mmcra |= MMCRA_SDAR_MODE_TLB;
|
||||
} else
|
||||
*mmcra |= MMCRA_SDAR_MODE_TLB;
|
||||
}
|
||||
|
||||
static u64 thresh_cmp_val(u64 value)
|
||||
@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
||||
value |= CNST_L1_QUAL_VAL(cache);
|
||||
}
|
||||
|
||||
if (event & EVENT_IS_MARKED) {
|
||||
if (is_event_marked(event)) {
|
||||
mask |= CNST_SAMPLE_MASK;
|
||||
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
|
||||
}
|
||||
@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
||||
}
|
||||
|
||||
/* In continuous sampling mode, update SDAR on TLB miss */
|
||||
mmcra |= mmcra_sdar_mode(event[i]);
|
||||
mmcra_sdar_mode(event[i], &mmcra);
|
||||
|
||||
if (event[i] & EVENT_IS_L1) {
|
||||
cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
|
||||
@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
||||
mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
|
||||
}
|
||||
|
||||
if (event[i] & EVENT_IS_MARKED) {
|
||||
if (is_event_marked(event[i])) {
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
|
||||
val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
|
||||
|
@ -246,6 +246,7 @@
|
||||
#define MMCRA_THR_CMP_SHIFT 32
|
||||
#define MMCRA_SDAR_MODE_SHIFT 42
|
||||
#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
|
||||
#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
|
||||
#define MMCRA_IFM_SHIFT 30
|
||||
|
||||
/* MMCR1 Threshold Compare bit constant for power9 */
|
||||
|
@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
|
||||
struct machine_check_event *evt)
|
||||
{
|
||||
int recovered = 0;
|
||||
uint64_t ea = get_mce_fault_addr(evt);
|
||||
|
||||
if (!(regs->msr & MSR_RI)) {
|
||||
/* If MSR_RI isn't set, we cannot recover */
|
||||
@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
|
||||
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
|
||||
/* Platform corrected itself */
|
||||
recovered = 1;
|
||||
} else if (ea && !is_kernel_addr(ea)) {
|
||||
} else if (evt->severity == MCE_SEV_FATAL) {
|
||||
/* Fatal machine check */
|
||||
pr_err("Machine check interrupt is fatal\n");
|
||||
recovered = 0;
|
||||
} else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
|
||||
(user_mode(regs) && !is_global_init(current))) {
|
||||
/*
|
||||
* Faulting address is not in kernel text. We should be fine.
|
||||
* We need to find which process uses this address.
|
||||
* For now, kill the task if we have received exception when
|
||||
* in userspace.
|
||||
*
|
||||
* TODO: Queue up this address for hwpoisioning later.
|
||||
*/
|
||||
if (user_mode(regs) && !is_global_init(current)) {
|
||||
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
|
||||
recovered = 1;
|
||||
} else
|
||||
recovered = 0;
|
||||
} else if (user_mode(regs) && !is_global_init(current) &&
|
||||
evt->severity == MCE_SEV_ERROR_SYNC) {
|
||||
/*
|
||||
* If we have received a synchronous error when in userspace
|
||||
* kill the task.
|
||||
*/
|
||||
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
|
||||
recovered = 1;
|
||||
}
|
||||
|
@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
|
||||
struct pci_bus *bus)
|
||||
struct pci_bus *bus,
|
||||
bool add_to_group)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
|
||||
set_dma_offset(&dev->dev, pe->tce_bypass_base);
|
||||
iommu_add_device(&dev->dev);
|
||||
if (add_to_group)
|
||||
iommu_add_device(&dev->dev);
|
||||
|
||||
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
||||
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
|
||||
pnv_ioda_setup_bus_dma(pe, dev->subordinate,
|
||||
add_to_group);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2191,7 +2194,7 @@ found:
|
||||
set_iommu_table_base(&pe->pdev->dev, tbl);
|
||||
iommu_add_device(&pe->pdev->dev);
|
||||
} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
|
||||
|
||||
return;
|
||||
fail:
|
||||
@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
|
||||
|
||||
pnv_pci_ioda2_set_bypass(pe, false);
|
||||
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
||||
if (pe->pbus)
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
||||
pnv_ioda2_table_free(tbl);
|
||||
}
|
||||
|
||||
@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
|
||||
table_group);
|
||||
|
||||
pnv_pci_ioda2_setup_default_config(pe);
|
||||
if (pe->pbus)
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
||||
}
|
||||
|
||||
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
|
||||
@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
||||
level_shift = entries_shift + 3;
|
||||
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
|
||||
|
||||
if ((level_shift - 3) * levels + page_shift >= 60)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate TCE table */
|
||||
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
||||
levels, tce_table_size, &offset, &total_allocated);
|
||||
@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
|
||||
if (pe->flags & PNV_IODA_PE_DEV)
|
||||
iommu_add_device(&pe->pdev->dev);
|
||||
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
@ -751,7 +751,9 @@ void __init hpte_init_pseries(void)
|
||||
mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
|
||||
mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
|
||||
mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
|
||||
mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
|
||||
mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
|
||||
}
|
||||
|
||||
void radix_init_pseries(void)
|
||||
|
@ -2101,8 +2101,8 @@ static int x86_pmu_event_init(struct perf_event *event)
|
||||
|
||||
static void refresh_pce(void *ignored)
|
||||
{
|
||||
if (current->mm)
|
||||
load_mm_cr4(current->mm);
|
||||
if (current->active_mm)
|
||||
load_mm_cr4(current->active_mm);
|
||||
}
|
||||
|
||||
static void x86_pmu_event_mapped(struct perf_event *event)
|
||||
@ -2110,6 +2110,18 @@ static void x86_pmu_event_mapped(struct perf_event *event)
|
||||
if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
|
||||
return;
|
||||
|
||||
/*
|
||||
* This function relies on not being called concurrently in two
|
||||
* tasks in the same mm. Otherwise one task could observe
|
||||
* perf_rdpmc_allowed > 1 and return all the way back to
|
||||
* userspace with CR4.PCE clear while another task is still
|
||||
* doing on_each_cpu_mask() to propagate CR4.PCE.
|
||||
*
|
||||
* For now, this can't happen because all callers hold mmap_sem
|
||||
* for write. If this changes, we'll need a different solution.
|
||||
*/
|
||||
lockdep_assert_held_exclusive(¤t->mm->mmap_sem);
|
||||
|
||||
if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1)
|
||||
on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
|
||||
}
|
||||
|
@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd)
|
||||
*(tmp + 1) = 0;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \
|
||||
defined(CONFIG_PARAVIRT))
|
||||
static inline void native_pud_clear(pud_t *pudp)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
|
@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
|
||||
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
|
||||
#endif
|
||||
|
||||
#ifndef __PAGETABLE_PMD_FOLDED
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
#define pud_clear(pud) native_pud_clear(pud)
|
||||
#endif
|
||||
|
||||
|
@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!enabled) {
|
||||
++disabled_cpus;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (boot_cpu_physical_apicid != -1U)
|
||||
ver = boot_cpu_apic_version;
|
||||
|
||||
cpu = __generic_processor_info(id, ver, enabled);
|
||||
cpu = generic_processor_info(id, ver);
|
||||
if (cpu >= 0)
|
||||
early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid;
|
||||
|
||||
@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void)
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
#include <acpi/processor.h>
|
||||
|
||||
int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
int nid;
|
||||
|
@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid)
|
||||
return nr_logical_cpuids++;
|
||||
}
|
||||
|
||||
int __generic_processor_info(int apicid, int version, bool enabled)
|
||||
int generic_processor_info(int apicid, int version)
|
||||
{
|
||||
int cpu, max = nr_cpu_ids;
|
||||
bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
|
||||
@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled)
|
||||
if (num_processors >= nr_cpu_ids) {
|
||||
int thiscpu = max + disabled_cpus;
|
||||
|
||||
if (enabled) {
|
||||
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
|
||||
"reached. Processor %d/0x%x ignored.\n",
|
||||
max, thiscpu, apicid);
|
||||
}
|
||||
pr_warning("APIC: NR_CPUS/possible_cpus limit of %i "
|
||||
"reached. Processor %d/0x%x ignored.\n",
|
||||
max, thiscpu, apicid);
|
||||
|
||||
disabled_cpus++;
|
||||
return -EINVAL;
|
||||
@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled)
|
||||
apic->x86_32_early_logical_apicid(cpu);
|
||||
#endif
|
||||
set_cpu_possible(cpu, true);
|
||||
|
||||
if (enabled) {
|
||||
num_processors++;
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
} else {
|
||||
disabled_cpus++;
|
||||
}
|
||||
physid_set(apicid, phys_cpu_present_map);
|
||||
set_cpu_present(cpu, true);
|
||||
num_processors++;
|
||||
|
||||
return cpu;
|
||||
}
|
||||
|
||||
int generic_processor_info(int apicid, int version)
|
||||
{
|
||||
return __generic_processor_info(apicid, version, true);
|
||||
}
|
||||
|
||||
int hard_smp_processor_id(void)
|
||||
{
|
||||
return read_apic_id();
|
||||
|
@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn)
|
||||
if (atomic_dec_and_test(&rdtgrp->waitcount) &&
|
||||
(rdtgrp->flags & RDT_DELETED)) {
|
||||
kernfs_unbreak_active_protection(kn);
|
||||
kernfs_put(kn);
|
||||
kernfs_put(rdtgrp->kn);
|
||||
kfree(rdtgrp);
|
||||
} else {
|
||||
kernfs_unbreak_active_protection(kn);
|
||||
|
@ -4,6 +4,7 @@
|
||||
* Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
|
||||
*/
|
||||
|
||||
#define DISABLE_BRANCH_PROFILING
|
||||
#include <linux/init.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
/*
|
||||
* most handlers of type NMI_UNKNOWN never return because
|
||||
* they just assume the NMI is theirs. Just a sanity check
|
||||
* to manage expectations
|
||||
* Indicate if there are multiple registrations on the
|
||||
* internal NMI handler call chains (SERR and IO_CHECK).
|
||||
*/
|
||||
WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head));
|
||||
WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head));
|
||||
WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head));
|
||||
|
||||
|
@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void)
|
||||
* the refined calibration and directly register it as a clocksource.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) {
|
||||
if (boot_cpu_has(X86_FEATURE_ART))
|
||||
art_related_clocksource = &clocksource_tsc;
|
||||
clocksource_register_khz(&clocksource_tsc, tsc_khz);
|
||||
return 0;
|
||||
}
|
||||
|
@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs)
|
||||
return sizeof(*regs);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
#define GCC_REALIGN_WORDS 3
|
||||
#else
|
||||
#define GCC_REALIGN_WORDS 1
|
||||
#endif
|
||||
|
||||
static bool is_last_task_frame(struct unwind_state *state)
|
||||
{
|
||||
unsigned long bp = (unsigned long)state->bp;
|
||||
unsigned long regs = (unsigned long)task_pt_regs(state->task);
|
||||
unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2;
|
||||
unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS;
|
||||
|
||||
/*
|
||||
* We have to check for the last task frame at two different locations
|
||||
* because gcc can occasionally decide to realign the stack pointer and
|
||||
* change the offset of the stack frame by a word in the prologue of a
|
||||
* function called by head/entry code.
|
||||
* change the offset of the stack frame in the prologue of a function
|
||||
* called by head/entry code. Examples:
|
||||
*
|
||||
* <start_secondary>:
|
||||
* push %edi
|
||||
* lea 0x8(%esp),%edi
|
||||
* and $0xfffffff8,%esp
|
||||
* pushl -0x4(%edi)
|
||||
* push %ebp
|
||||
* mov %esp,%ebp
|
||||
*
|
||||
* <x86_64_start_kernel>:
|
||||
* lea 0x8(%rsp),%r10
|
||||
* and $0xfffffffffffffff0,%rsp
|
||||
* pushq -0x8(%r10)
|
||||
* push %rbp
|
||||
* mov %rsp,%rbp
|
||||
*
|
||||
* Note that after aligning the stack, it pushes a duplicate copy of
|
||||
* the return address before pushing the frame pointer.
|
||||
*/
|
||||
return bp == regs - FRAME_HEADER_SIZE ||
|
||||
bp == regs - FRAME_HEADER_SIZE - sizeof(long);
|
||||
return (state->bp == last_bp ||
|
||||
(state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1)));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,3 +1,4 @@
|
||||
#define DISABLE_BRANCH_PROFILING
|
||||
#define pr_fmt(fmt) "kasan: " fmt
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/kasan.h>
|
||||
|
@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
|
||||
* we might run off the end of the bounds table if we are on
|
||||
* a 64-bit kernel and try to get 8 bytes.
|
||||
*/
|
||||
int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
|
||||
long __user *bd_entry_ptr)
|
||||
{
|
||||
u32 bd_entry_32;
|
||||
|
@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
|
||||
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
|
||||
# MISC Devices
|
||||
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o
|
||||
obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o
|
||||
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
|
||||
|
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Intel Merrifield power button support
|
||||
*
|
||||
* (C) Copyright 2017 Intel Corporation
|
||||
*
|
||||
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; version 2
|
||||
* of the License.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sfi.h>
|
||||
|
||||
#include <asm/intel-mid.h>
|
||||
#include <asm/intel_scu_ipc.h>
|
||||
|
||||
static struct resource mrfld_power_btn_resources[] = {
|
||||
{
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device mrfld_power_btn_dev = {
|
||||
.name = "msic_power_btn",
|
||||
.id = PLATFORM_DEVID_NONE,
|
||||
.num_resources = ARRAY_SIZE(mrfld_power_btn_resources),
|
||||
.resource = mrfld_power_btn_resources,
|
||||
};
|
||||
|
||||
static int mrfld_power_btn_scu_status_change(struct notifier_block *nb,
|
||||
unsigned long code, void *data)
|
||||
{
|
||||
if (code == SCU_DOWN) {
|
||||
platform_device_unregister(&mrfld_power_btn_dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return platform_device_register(&mrfld_power_btn_dev);
|
||||
}
|
||||
|
||||
static struct notifier_block mrfld_power_btn_scu_notifier = {
|
||||
.notifier_call = mrfld_power_btn_scu_status_change,
|
||||
};
|
||||
|
||||
static int __init register_mrfld_power_btn(void)
|
||||
{
|
||||
if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* We need to be sure that the SCU IPC is ready before
|
||||
* PMIC power button device can be registered:
|
||||
*/
|
||||
intel_scu_notifier_add(&mrfld_power_btn_scu_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(register_mrfld_power_btn);
|
||||
|
||||
static void __init *mrfld_power_btn_platform_data(void *info)
|
||||
{
|
||||
struct resource *res = mrfld_power_btn_resources;
|
||||
struct sfi_device_table_entry *pentry = info;
|
||||
|
||||
res->start = res->end = pentry->irq;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct devs_id mrfld_power_btn_dev_id __initconst = {
|
||||
.name = "bcove_power_btn",
|
||||
.type = SFI_DEV_TYPE_IPC,
|
||||
.delay = 1,
|
||||
.msic = 1,
|
||||
.get_platform_data = &mrfld_power_btn_platform_data,
|
||||
};
|
||||
|
||||
sfi_device(mrfld_power_btn_dev_id);
|
@ -19,7 +19,7 @@
|
||||
#include <asm/intel_scu_ipc.h>
|
||||
#include <asm/io_apic.h>
|
||||
|
||||
#define TANGIER_EXT_TIMER0_MSI 15
|
||||
#define TANGIER_EXT_TIMER0_MSI 12
|
||||
|
||||
static struct platform_device wdt_dev = {
|
||||
.name = "intel_mid_wdt",
|
||||
|
@ -17,16 +17,6 @@
|
||||
|
||||
#include "intel_mid_weak_decls.h"
|
||||
|
||||
static void penwell_arch_setup(void);
|
||||
/* penwell arch ops */
|
||||
static struct intel_mid_ops penwell_ops = {
|
||||
.arch_setup = penwell_arch_setup,
|
||||
};
|
||||
|
||||
static void mfld_power_off(void)
|
||||
{
|
||||
}
|
||||
|
||||
static unsigned long __init mfld_calibrate_tsc(void)
|
||||
{
|
||||
unsigned long fast_calibrate;
|
||||
@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void)
|
||||
static void __init penwell_arch_setup(void)
|
||||
{
|
||||
x86_platform.calibrate_tsc = mfld_calibrate_tsc;
|
||||
pm_power_off = mfld_power_off;
|
||||
}
|
||||
|
||||
static struct intel_mid_ops penwell_ops = {
|
||||
.arch_setup = penwell_arch_setup,
|
||||
};
|
||||
|
||||
void *get_penwell_ops(void)
|
||||
{
|
||||
return &penwell_ops;
|
||||
|
12
block/bio.c
12
block/bio.c
@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
||||
bio_list_init(&punt);
|
||||
bio_list_init(&nopunt);
|
||||
|
||||
while ((bio = bio_list_pop(current->bio_list)))
|
||||
while ((bio = bio_list_pop(¤t->bio_list[0])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[0] = nopunt;
|
||||
|
||||
*current->bio_list = nopunt;
|
||||
bio_list_init(&nopunt);
|
||||
while ((bio = bio_list_pop(¤t->bio_list[1])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[1] = nopunt;
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_merge(&bs->rescue_list, &punt);
|
||||
@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
* we retry with the original gfp_flags.
|
||||
*/
|
||||
|
||||
if (current->bio_list && !bio_list_empty(current->bio_list))
|
||||
if (current->bio_list &&
|
||||
(!bio_list_empty(¤t->bio_list[0]) ||
|
||||
!bio_list_empty(¤t->bio_list[1])))
|
||||
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
|
||||
|
||||
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
|
@ -1973,7 +1973,14 @@ end_io:
|
||||
*/
|
||||
blk_qc_t generic_make_request(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list_on_stack;
|
||||
/*
|
||||
* bio_list_on_stack[0] contains bios submitted by the current
|
||||
* make_request_fn.
|
||||
* bio_list_on_stack[1] contains bios that were submitted before
|
||||
* the current make_request_fn, but that haven't been processed
|
||||
* yet.
|
||||
*/
|
||||
struct bio_list bio_list_on_stack[2];
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
if (!generic_make_request_checks(bio))
|
||||
@ -1990,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
* should be added at the tail
|
||||
*/
|
||||
if (current->bio_list) {
|
||||
bio_list_add(current->bio_list, bio);
|
||||
bio_list_add(¤t->bio_list[0], bio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2009,18 +2016,17 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
* bio_list, and call into ->make_request() again.
|
||||
*/
|
||||
BUG_ON(bio->bi_next);
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
current->bio_list = &bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
current->bio_list = bio_list_on_stack;
|
||||
do {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
if (likely(blk_queue_enter(q, false) == 0)) {
|
||||
struct bio_list hold;
|
||||
struct bio_list lower, same;
|
||||
|
||||
/* Create a fresh bio_list for all subordinate requests */
|
||||
hold = bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
bio_list_on_stack[1] = bio_list_on_stack[0];
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
ret = q->make_request_fn(q, bio);
|
||||
|
||||
blk_queue_exit(q);
|
||||
@ -2030,19 +2036,19 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
*/
|
||||
bio_list_init(&lower);
|
||||
bio_list_init(&same);
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
|
||||
if (q == bdev_get_queue(bio->bi_bdev))
|
||||
bio_list_add(&same, bio);
|
||||
else
|
||||
bio_list_add(&lower, bio);
|
||||
/* now assemble so we handle the lowest level first */
|
||||
bio_list_merge(&bio_list_on_stack, &lower);
|
||||
bio_list_merge(&bio_list_on_stack, &same);
|
||||
bio_list_merge(&bio_list_on_stack, &hold);
|
||||
bio_list_merge(&bio_list_on_stack[0], &lower);
|
||||
bio_list_merge(&bio_list_on_stack[0], &same);
|
||||
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
|
||||
} else {
|
||||
bio_io_error(bio);
|
||||
}
|
||||
bio = bio_list_pop(current->bio_list);
|
||||
bio = bio_list_pop(&bio_list_on_stack[0]);
|
||||
} while (bio);
|
||||
current->bio_list = NULL; /* deactivate */
|
||||
|
||||
|
@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
|
||||
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||
struct blk_mq_tags *tags = set->tags[i];
|
||||
|
||||
if (!tags)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < tags->nr_tags; j++) {
|
||||
if (!tags->static_rqs[j])
|
||||
continue;
|
||||
|
@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
||||
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
|
||||
}
|
||||
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
|
||||
bool may_sleep)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_queue_data bd = {
|
||||
@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
||||
}
|
||||
|
||||
insert:
|
||||
blk_mq_sched_insert_request(rq, false, true, true, false);
|
||||
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
|
||||
rcu_read_lock();
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, false);
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, true);
|
||||
srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
|
||||
}
|
||||
goto done;
|
||||
|
@ -266,7 +266,7 @@ unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
|
||||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
const struct af_alg_type *type;
|
||||
@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
if (!type)
|
||||
goto unlock;
|
||||
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
|
||||
err = -ENOMEM;
|
||||
if (!sk2)
|
||||
goto unlock;
|
||||
@ -323,9 +323,10 @@ unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_accept);
|
||||
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
return af_alg_accept(sock->sk, newsock);
|
||||
return af_alg_accept(sock->sk, newsock, kern);
|
||||
}
|
||||
|
||||
static const struct proto_ops alg_proto_ops = {
|
||||
|
@ -239,7 +239,8 @@ unlock:
|
||||
return err ?: len;
|
||||
}
|
||||
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = af_alg_accept(ask->parent, newsock);
|
||||
err = af_alg_accept(ask->parent, newsock, kern);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
|
||||
}
|
||||
|
||||
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
||||
int flags)
|
||||
int flags, bool kern)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return hash_accept(sock, newsock, flags);
|
||||
return hash_accept(sock, newsock, flags, kern);
|
||||
}
|
||||
|
||||
static struct proto_ops algif_hash_ops_nokey = {
|
||||
|
@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
|
||||
|
||||
void __weak arch_unregister_cpu(int cpu) {}
|
||||
|
||||
int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
|
||||
{
|
||||
unsigned long long sta;
|
||||
@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
|
||||
pr->acpi_id = value;
|
||||
}
|
||||
|
||||
if (acpi_duplicate_processor_id(pr->acpi_id)) {
|
||||
dev_err(&device->dev,
|
||||
"Failed to get unique processor _UID (0x%x)\n",
|
||||
pr->acpi_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
|
||||
pr->acpi_id);
|
||||
if (invalid_phys_cpuid(pr->phys_id))
|
||||
@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
|
||||
static int nr_unique_ids __initdata;
|
||||
|
||||
/* The number of the duplicate processor IDs */
|
||||
static int nr_duplicate_ids __initdata;
|
||||
static int nr_duplicate_ids;
|
||||
|
||||
/* Used to store the unique processor IDs */
|
||||
static int unique_processor_ids[] __initdata = {
|
||||
@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
|
||||
};
|
||||
|
||||
/* Used to store the duplicate processor IDs */
|
||||
static int duplicate_processor_ids[] __initdata = {
|
||||
static int duplicate_processor_ids[] = {
|
||||
[0 ... NR_CPUS - 1] = -1,
|
||||
};
|
||||
|
||||
@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
|
||||
void **rv)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_object_type acpi_type;
|
||||
unsigned long long uid;
|
||||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
status = acpi_get_type(handle, &acpi_type);
|
||||
if (ACPI_FAILURE(status))
|
||||
acpi_handle_info(handle, "Not get the processor object\n");
|
||||
else
|
||||
processor_validated_ids_update(object.processor.proc_id);
|
||||
return false;
|
||||
|
||||
switch (acpi_type) {
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto err;
|
||||
uid = object.processor.proc_id;
|
||||
break;
|
||||
|
||||
case ACPI_TYPE_DEVICE:
|
||||
status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
|
||||
if (ACPI_FAILURE(status))
|
||||
goto err;
|
||||
break;
|
||||
default:
|
||||
goto err;
|
||||
}
|
||||
|
||||
processor_validated_ids_update(uid);
|
||||
return true;
|
||||
|
||||
err:
|
||||
acpi_handle_info(handle, "Invalid processor object\n");
|
||||
return false;
|
||||
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static void __init acpi_processor_check_duplicates(void)
|
||||
void __init acpi_processor_check_duplicates(void)
|
||||
{
|
||||
/* Search all processor nodes in ACPI namespace */
|
||||
/* check the correctness for all processors in ACPI namespace */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX,
|
||||
acpi_processor_ids_walk,
|
||||
NULL, NULL, NULL);
|
||||
acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
|
||||
NULL, NULL);
|
||||
}
|
||||
|
||||
bool __init acpi_processor_validate_proc_id(int proc_id)
|
||||
bool acpi_duplicate_processor_id(int proc_id)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
|
||||
acpi_wakeup_device_init();
|
||||
acpi_debugger_init();
|
||||
acpi_setup_sb_notify_handler();
|
||||
acpi_set_processor_mapping();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
|
||||
}
|
||||
|
||||
static int map_lapic_id(struct acpi_subtable_header *entry,
|
||||
u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
|
||||
u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
{
|
||||
struct acpi_madt_local_apic *lapic =
|
||||
container_of(entry, struct acpi_madt_local_apic, header);
|
||||
|
||||
if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (lapic->processor_id != acpi_id)
|
||||
@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
|
||||
}
|
||||
|
||||
static int map_x2apic_id(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
|
||||
bool ignore_disabled)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
{
|
||||
struct acpi_madt_local_x2apic *apic =
|
||||
container_of(entry, struct acpi_madt_local_x2apic, header);
|
||||
|
||||
if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (device_declaration && (apic->uid == acpi_id)) {
|
||||
@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
|
||||
}
|
||||
|
||||
static int map_lsapic_id(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
|
||||
bool ignore_disabled)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
|
||||
{
|
||||
struct acpi_madt_local_sapic *lsapic =
|
||||
container_of(entry, struct acpi_madt_local_sapic, header);
|
||||
|
||||
if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (device_declaration) {
|
||||
@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
|
||||
* Retrieve the ARM CPU physical identifier (MPIDR)
|
||||
*/
|
||||
static int map_gicc_mpidr(struct acpi_subtable_header *entry,
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
|
||||
bool ignore_disabled)
|
||||
int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
|
||||
{
|
||||
struct acpi_madt_generic_interrupt *gicc =
|
||||
container_of(entry, struct acpi_madt_generic_interrupt, header);
|
||||
|
||||
if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
|
||||
if (!(gicc->flags & ACPI_MADT_ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
/* device_declaration means Device object in DSDT, in the
|
||||
@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
|
||||
}
|
||||
|
||||
static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
|
||||
int type, u32 acpi_id, bool ignore_disabled)
|
||||
int type, u32 acpi_id)
|
||||
{
|
||||
unsigned long madt_end, entry;
|
||||
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
|
||||
@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
|
||||
struct acpi_subtable_header *header =
|
||||
(struct acpi_subtable_header *)entry;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_lapic_id(header, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_x2apic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_lsapic_id(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
|
||||
if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled))
|
||||
if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
|
||||
break;
|
||||
}
|
||||
entry += header->length;
|
||||
@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
|
||||
if (!madt)
|
||||
return PHYS_CPUID_INVALID;
|
||||
|
||||
rv = map_madt_entry(madt, 1, acpi_id, true);
|
||||
rv = map_madt_entry(madt, 1, acpi_id);
|
||||
|
||||
acpi_put_table((struct acpi_table_header *)madt);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
|
||||
bool ignore_disabled)
|
||||
static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
|
||||
|
||||
header = (struct acpi_subtable_header *)obj->buffer.pointer;
|
||||
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
|
||||
map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
|
||||
map_lapic_id(header, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
|
||||
map_lsapic_id(header, type, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
|
||||
map_x2apic_id(header, type, acpi_id, &phys_id);
|
||||
else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
|
||||
map_gicc_mpidr(header, type, acpi_id, &phys_id,
|
||||
ignore_disabled);
|
||||
map_gicc_mpidr(header, type, acpi_id, &phys_id);
|
||||
|
||||
exit:
|
||||
kfree(buffer.pointer);
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
|
||||
u32 acpi_id, bool ignore_disabled)
|
||||
phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
phys_cpuid_t phys_id;
|
||||
|
||||
phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
|
||||
phys_id = map_mat_entry(handle, type, acpi_id);
|
||||
if (invalid_phys_cpuid(phys_id))
|
||||
phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
|
||||
ignore_disabled);
|
||||
phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
|
||||
|
||||
return phys_id;
|
||||
}
|
||||
|
||||
phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
|
||||
{
|
||||
return __acpi_get_phys_id(handle, type, acpi_id, true);
|
||||
}
|
||||
|
||||
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_CPU
|
||||
static bool __init
|
||||
map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
|
||||
{
|
||||
int type, id;
|
||||
u32 acpi_id;
|
||||
acpi_status status;
|
||||
acpi_object_type acpi_type;
|
||||
unsigned long long tmp;
|
||||
union acpi_object object = { 0 };
|
||||
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
|
||||
|
||||
status = acpi_get_type(handle, &acpi_type);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
|
||||
switch (acpi_type) {
|
||||
case ACPI_TYPE_PROCESSOR:
|
||||
status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
acpi_id = object.processor.proc_id;
|
||||
|
||||
/* validate the acpi_id */
|
||||
if(acpi_processor_validate_proc_id(acpi_id))
|
||||
return false;
|
||||
break;
|
||||
case ACPI_TYPE_DEVICE:
|
||||
status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
|
||||
if (ACPI_FAILURE(status))
|
||||
return false;
|
||||
acpi_id = tmp;
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
|
||||
|
||||
*phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
|
||||
id = acpi_map_cpuid(*phys_id, acpi_id);
|
||||
|
||||
if (id < 0)
|
||||
return false;
|
||||
*cpuid = id;
|
||||
return true;
|
||||
}
|
||||
|
||||
static acpi_status __init
|
||||
set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
|
||||
void **rv)
|
||||
{
|
||||
phys_cpuid_t phys_id;
|
||||
int cpu_id;
|
||||
|
||||
if (!map_processor(handle, &phys_id, &cpu_id))
|
||||
return AE_ERROR;
|
||||
|
||||
acpi_map_cpu2node(handle, cpu_id, phys_id);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
void __init acpi_set_processor_mapping(void)
|
||||
{
|
||||
/* Set persistent cpu <-> node mapping for all processors. */
|
||||
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
|
||||
ACPI_UINT32_MAX, set_processor_node_mapping,
|
||||
NULL, NULL, NULL);
|
||||
}
|
||||
#else
|
||||
void __init acpi_set_processor_mapping(void) {}
|
||||
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
|
||||
|
||||
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
|
||||
static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
|
||||
u64 *phys_addr, int *ioapic_id)
|
||||
|
@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
|
||||
case AHCI_LS1043A:
|
||||
if (!qpriv->ecc_addr)
|
||||
return -EINVAL;
|
||||
writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
|
||||
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
|
||||
qpriv->ecc_addr);
|
||||
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
|
||||
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
|
||||
if (qpriv->is_dmacoherent)
|
||||
@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
|
||||
case AHCI_LS1046A:
|
||||
if (!qpriv->ecc_addr)
|
||||
return -EINVAL;
|
||||
writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
|
||||
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
|
||||
qpriv->ecc_addr);
|
||||
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
|
||||
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
|
||||
if (qpriv->is_dmacoherent)
|
||||
|
@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return AC_ERR_SYSTEM;
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
|
||||
|
||||
static void ata_tport_release(struct device *dev)
|
||||
{
|
||||
put_device(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
|
||||
device_initialize(dev);
|
||||
dev->type = &ata_port_type;
|
||||
|
||||
dev->parent = get_device(parent);
|
||||
dev->parent = parent;
|
||||
dev->release = ata_tport_release;
|
||||
dev_set_name(dev, "ata%d", ap->print_id);
|
||||
transport_setup_device(dev);
|
||||
@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
|
||||
|
||||
static void ata_tlink_release(struct device *dev)
|
||||
{
|
||||
put_device(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
|
||||
int error;
|
||||
|
||||
device_initialize(dev);
|
||||
dev->parent = get_device(&ap->tdev);
|
||||
dev->parent = &ap->tdev;
|
||||
dev->release = ata_tlink_release;
|
||||
if (ata_is_host_link(link))
|
||||
dev_set_name(dev, "link%d", ap->print_id);
|
||||
@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
|
||||
|
||||
static void ata_tdev_release(struct device *dev)
|
||||
{
|
||||
put_device(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
|
||||
int error;
|
||||
|
||||
device_initialize(dev);
|
||||
dev->parent = get_device(&link->tdev);
|
||||
dev->parent = &link->tdev;
|
||||
dev->release = ata_tdev_release;
|
||||
if (ata_is_host_link(link))
|
||||
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
|
||||
|
@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
|
||||
return restart_syscall();
|
||||
}
|
||||
|
||||
void assert_held_device_hotplug(void)
|
||||
{
|
||||
lockdep_assert_held(&device_hotplug_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLOCK
|
||||
static inline int device_is_not_partition(struct device *dev)
|
||||
{
|
||||
|
@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
|
||||
irq, err);
|
||||
return err;
|
||||
}
|
||||
omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
|
||||
|
||||
priv->clk = of_clk_get(pdev->dev.of_node, 0);
|
||||
priv->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (!IS_ERR(priv->clk)) {
|
||||
@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
|
||||
dev_err(&pdev->dev, "unable to enable the clk, "
|
||||
"err = %d\n", err);
|
||||
}
|
||||
|
||||
/*
|
||||
* On OMAP4, enabling the shutdown_oflo interrupt is
|
||||
* done in the interrupt mask register. There is no
|
||||
* such register on EIP76, and it's enabled by the
|
||||
* same bit in the control register
|
||||
*/
|
||||
if (priv->pdata->regs[RNG_INTMASK_REG])
|
||||
omap_rng_write(priv, RNG_INTMASK_REG,
|
||||
RNG_SHUTDOWN_OFLO_MASK);
|
||||
else
|
||||
omap_rng_write(priv, RNG_CONTROL_REG,
|
||||
RNG_SHUTDOWN_OFLO_MASK);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/atmel_tc.h>
|
||||
#include <linux/sched_clock.h>
|
||||
|
||||
|
||||
/*
|
||||
@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
|
||||
return (upper << 16) | lower;
|
||||
}
|
||||
|
||||
static u32 tc_get_cv32(void)
|
||||
{
|
||||
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
|
||||
}
|
||||
|
||||
static u64 tc_get_cycles32(struct clocksource *cs)
|
||||
{
|
||||
return tc_get_cv32();
|
||||
return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
|
||||
}
|
||||
|
||||
static struct clocksource clksrc = {
|
||||
@ -75,11 +69,6 @@ static struct clocksource clksrc = {
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static u64 notrace tc_read_sched_clock(void)
|
||||
{
|
||||
return tc_get_cv32();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS
|
||||
|
||||
struct tc_clkevt_device {
|
||||
@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
|
||||
clksrc.read = tc_get_cycles32;
|
||||
/* setup ony channel 0 */
|
||||
tcb_setup_single_chan(tc, best_divisor_idx);
|
||||
|
||||
/* register sched_clock on chips with single 32 bit counter */
|
||||
sched_clock_register(tc_read_sched_clock, 32, divided_rate);
|
||||
} else {
|
||||
/* tclib will give us three clocks no matter what the
|
||||
* underlying platform supports.
|
||||
|
@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
|
||||
return div64_u64(x << EXT_FRAC_BITS, y);
|
||||
}
|
||||
|
||||
static inline int32_t percent_ext_fp(int percent)
|
||||
{
|
||||
return div_ext_fp(percent, 100);
|
||||
}
|
||||
|
||||
/**
|
||||
* struct sample - Store performance sample
|
||||
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
|
||||
@ -359,9 +364,7 @@ static bool driver_registered __read_mostly;
|
||||
static bool acpi_ppc;
|
||||
#endif
|
||||
|
||||
static struct perf_limits performance_limits;
|
||||
static struct perf_limits powersave_limits;
|
||||
static struct perf_limits *limits;
|
||||
static struct perf_limits global;
|
||||
|
||||
static void intel_pstate_init_limits(struct perf_limits *limits)
|
||||
{
|
||||
@ -372,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits)
|
||||
limits->max_sysfs_pct = 100;
|
||||
}
|
||||
|
||||
static void intel_pstate_set_performance_limits(struct perf_limits *limits)
|
||||
{
|
||||
intel_pstate_init_limits(limits);
|
||||
limits->min_perf_pct = 100;
|
||||
limits->min_perf = int_ext_tofp(1);
|
||||
limits->min_sysfs_pct = 100;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(intel_pstate_driver_lock);
|
||||
static DEFINE_MUTEX(intel_pstate_limits_lock);
|
||||
|
||||
@ -502,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
|
||||
* correct max turbo frequency based on the turbo state.
|
||||
* Also need to convert to MHz as _PSS freq is in MHz.
|
||||
*/
|
||||
if (!limits->turbo_disabled)
|
||||
if (!global.turbo_disabled)
|
||||
cpu->acpi_perf_data.states[0].core_frequency =
|
||||
policy->cpuinfo.max_freq / 1000;
|
||||
cpu->valid_pss_table = true;
|
||||
@ -621,7 +616,7 @@ static inline void update_turbo_state(void)
|
||||
|
||||
cpu = all_cpu_data[0];
|
||||
rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
|
||||
limits->turbo_disabled =
|
||||
global.turbo_disabled =
|
||||
(misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
|
||||
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
|
||||
}
|
||||
@ -845,12 +840,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
|
||||
|
||||
static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
||||
{
|
||||
int min, hw_min, max, hw_max, cpu, range, adj_range;
|
||||
struct perf_limits *perf_limits = limits;
|
||||
int min, hw_min, max, hw_max, cpu;
|
||||
struct perf_limits *perf_limits = &global;
|
||||
u64 value, cap;
|
||||
|
||||
for_each_cpu(cpu, policy->cpus) {
|
||||
int max_perf_pct, min_perf_pct;
|
||||
struct cpudata *cpu_data = all_cpu_data[cpu];
|
||||
s16 epp;
|
||||
|
||||
@ -859,24 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
||||
|
||||
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
|
||||
hw_min = HWP_LOWEST_PERF(cap);
|
||||
if (limits->no_turbo)
|
||||
if (global.no_turbo)
|
||||
hw_max = HWP_GUARANTEED_PERF(cap);
|
||||
else
|
||||
hw_max = HWP_HIGHEST_PERF(cap);
|
||||
range = hw_max - hw_min;
|
||||
|
||||
max_perf_pct = perf_limits->max_perf_pct;
|
||||
min_perf_pct = perf_limits->min_perf_pct;
|
||||
max = fp_ext_toint(hw_max * perf_limits->max_perf);
|
||||
if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
min = max;
|
||||
else
|
||||
min = fp_ext_toint(hw_max * perf_limits->min_perf);
|
||||
|
||||
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
|
||||
adj_range = min_perf_pct * range / 100;
|
||||
min = hw_min + adj_range;
|
||||
|
||||
value &= ~HWP_MIN_PERF(~0L);
|
||||
value |= HWP_MIN_PERF(min);
|
||||
|
||||
adj_range = max_perf_pct * range / 100;
|
||||
max = hw_min + adj_range;
|
||||
|
||||
value &= ~HWP_MAX_PERF(~0L);
|
||||
value |= HWP_MAX_PERF(max);
|
||||
|
||||
@ -969,26 +961,18 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
|
||||
}
|
||||
|
||||
static void intel_pstate_update_policies(void)
|
||||
__releases(&intel_pstate_limits_lock)
|
||||
__acquires(&intel_pstate_limits_lock)
|
||||
{
|
||||
struct perf_limits *saved_limits = limits;
|
||||
int cpu;
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
cpufreq_update_policy(cpu);
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
limits = saved_limits;
|
||||
}
|
||||
|
||||
/************************** debugfs begin ************************/
|
||||
static int pid_param_set(void *data, u64 val)
|
||||
{
|
||||
*(u32 *)data = val;
|
||||
pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
|
||||
intel_pstate_reset_all_pid();
|
||||
return 0;
|
||||
}
|
||||
@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void)
|
||||
static ssize_t show_##file_name \
|
||||
(struct kobject *kobj, struct attribute *attr, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", limits->object); \
|
||||
return sprintf(buf, "%u\n", global.object); \
|
||||
}
|
||||
|
||||
static ssize_t intel_pstate_show_status(char *buf);
|
||||
@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj,
|
||||
}
|
||||
|
||||
update_turbo_state();
|
||||
if (limits->turbo_disabled)
|
||||
ret = sprintf(buf, "%u\n", limits->turbo_disabled);
|
||||
if (global.turbo_disabled)
|
||||
ret = sprintf(buf, "%u\n", global.turbo_disabled);
|
||||
else
|
||||
ret = sprintf(buf, "%u\n", limits->no_turbo);
|
||||
ret = sprintf(buf, "%u\n", global.no_turbo);
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
update_turbo_state();
|
||||
if (limits->turbo_disabled) {
|
||||
if (global.turbo_disabled) {
|
||||
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
limits->no_turbo = clamp_t(int, input, 0, 1);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
global.no_turbo = clamp_t(int, input, 0, 1);
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return count;
|
||||
@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
limits->max_perf_pct = min(limits->max_policy_pct,
|
||||
limits->max_sysfs_pct);
|
||||
limits->max_perf_pct = max(limits->min_policy_pct,
|
||||
limits->max_perf_pct);
|
||||
limits->max_perf_pct = max(limits->min_perf_pct,
|
||||
limits->max_perf_pct);
|
||||
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
global.max_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct);
|
||||
global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct);
|
||||
global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct);
|
||||
global.max_perf = percent_ext_fp(global.max_perf_pct);
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return count;
|
||||
@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
limits->min_perf_pct = max(limits->min_policy_pct,
|
||||
limits->min_sysfs_pct);
|
||||
limits->min_perf_pct = min(limits->max_policy_pct,
|
||||
limits->min_perf_pct);
|
||||
limits->min_perf_pct = min(limits->max_perf_pct,
|
||||
limits->min_perf_pct);
|
||||
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
global.min_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct);
|
||||
global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct);
|
||||
global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct);
|
||||
global.min_perf = percent_ext_fp(global.min_perf_pct);
|
||||
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return count;
|
||||
@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate)
|
||||
u32 vid;
|
||||
|
||||
val = (u64)pstate << 8;
|
||||
if (limits->no_turbo && !limits->turbo_disabled)
|
||||
if (global.no_turbo && !global.turbo_disabled)
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
vid_fp = cpudata->vid.min + mul_fp(
|
||||
@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate)
|
||||
u64 val;
|
||||
|
||||
val = (u64)pstate << 8;
|
||||
if (limits->no_turbo && !limits->turbo_disabled)
|
||||
if (global.no_turbo && !global.turbo_disabled)
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
return val;
|
||||
@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
||||
int max_perf = cpu->pstate.turbo_pstate;
|
||||
int max_perf_adj;
|
||||
int min_perf;
|
||||
struct perf_limits *perf_limits = limits;
|
||||
struct perf_limits *perf_limits = &global;
|
||||
|
||||
if (limits->no_turbo || limits->turbo_disabled)
|
||||
if (global.no_turbo || global.turbo_disabled)
|
||||
max_perf = cpu->pstate.max_pstate;
|
||||
|
||||
if (per_cpu_limits)
|
||||
@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
|
||||
|
||||
sample->busy_scaled = busy_frac * 100;
|
||||
|
||||
target = limits->no_turbo || limits->turbo_disabled ?
|
||||
target = global.no_turbo || global.turbo_disabled ?
|
||||
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
|
||||
target += target >> 2;
|
||||
target = mul_fp(target, busy_frac);
|
||||
@ -2080,36 +2058,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
|
||||
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||
struct perf_limits *limits)
|
||||
{
|
||||
int32_t max_policy_perf, min_policy_perf;
|
||||
|
||||
limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
|
||||
policy->cpuinfo.max_freq);
|
||||
limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
|
||||
max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
|
||||
max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
|
||||
if (policy->max == policy->min) {
|
||||
limits->min_policy_pct = limits->max_policy_pct;
|
||||
min_policy_perf = max_policy_perf;
|
||||
} else {
|
||||
limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
|
||||
policy->cpuinfo.max_freq);
|
||||
limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
|
||||
0, 100);
|
||||
min_policy_perf = div_ext_fp(policy->min,
|
||||
policy->cpuinfo.max_freq);
|
||||
min_policy_perf = clamp_t(int32_t, min_policy_perf,
|
||||
0, max_policy_perf);
|
||||
}
|
||||
|
||||
/* Normalize user input to [min_policy_pct, max_policy_pct] */
|
||||
limits->min_perf_pct = max(limits->min_policy_pct,
|
||||
limits->min_sysfs_pct);
|
||||
limits->min_perf_pct = min(limits->max_policy_pct,
|
||||
limits->min_perf_pct);
|
||||
limits->max_perf_pct = min(limits->max_policy_pct,
|
||||
limits->max_sysfs_pct);
|
||||
limits->max_perf_pct = max(limits->min_policy_pct,
|
||||
limits->max_perf_pct);
|
||||
/* Normalize user input to [min_perf, max_perf] */
|
||||
limits->min_perf = max(min_policy_perf,
|
||||
percent_ext_fp(limits->min_sysfs_pct));
|
||||
limits->min_perf = min(limits->min_perf, max_policy_perf);
|
||||
limits->max_perf = min(max_policy_perf,
|
||||
percent_ext_fp(limits->max_sysfs_pct));
|
||||
limits->max_perf = max(min_policy_perf, limits->max_perf);
|
||||
|
||||
/* Make sure min_perf_pct <= max_perf_pct */
|
||||
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
|
||||
/* Make sure min_perf <= max_perf */
|
||||
limits->min_perf = min(limits->min_perf, limits->max_perf);
|
||||
|
||||
limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
|
||||
limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
|
||||
limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
|
||||
limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
|
||||
limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
|
||||
limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
|
||||
|
||||
pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
|
||||
limits->max_perf_pct, limits->min_perf_pct);
|
||||
@ -2118,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
|
||||
static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpudata *cpu;
|
||||
struct perf_limits *perf_limits = NULL;
|
||||
struct perf_limits *perf_limits = &global;
|
||||
|
||||
if (!policy->cpuinfo.max_freq)
|
||||
return -ENODEV;
|
||||
@ -2141,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
|
||||
pr_debug("set performance\n");
|
||||
if (!perf_limits) {
|
||||
limits = &performance_limits;
|
||||
perf_limits = limits;
|
||||
}
|
||||
} else {
|
||||
pr_debug("set powersave\n");
|
||||
if (!perf_limits) {
|
||||
limits = &powersave_limits;
|
||||
perf_limits = limits;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
intel_pstate_update_perf_limits(policy, perf_limits);
|
||||
|
||||
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
|
||||
@ -2179,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
struct perf_limits *perf_limits;
|
||||
|
||||
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
perf_limits = &performance_limits;
|
||||
else
|
||||
perf_limits = &powersave_limits;
|
||||
|
||||
update_turbo_state();
|
||||
policy->cpuinfo.max_freq = perf_limits->turbo_disabled ||
|
||||
perf_limits->no_turbo ?
|
||||
policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ?
|
||||
cpu->pstate.max_freq :
|
||||
cpu->pstate.turbo_freq;
|
||||
|
||||
@ -2203,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
|
||||
unsigned int max_freq, min_freq;
|
||||
|
||||
max_freq = policy->cpuinfo.max_freq *
|
||||
perf_limits->max_sysfs_pct / 100;
|
||||
global.max_sysfs_pct / 100;
|
||||
min_freq = policy->cpuinfo.max_freq *
|
||||
perf_limits->min_sysfs_pct / 100;
|
||||
global.min_sysfs_pct / 100;
|
||||
cpufreq_verify_within_limits(policy, min_freq, max_freq);
|
||||
}
|
||||
|
||||
@ -2257,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
/* cpuinfo and default policy values */
|
||||
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
|
||||
update_turbo_state();
|
||||
policy->cpuinfo.max_freq = limits->turbo_disabled ?
|
||||
policy->cpuinfo.max_freq = global.turbo_disabled ?
|
||||
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
|
||||
policy->cpuinfo.max_freq *= cpu->pstate.scaling;
|
||||
|
||||
@ -2277,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
return ret;
|
||||
|
||||
policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
|
||||
if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
|
||||
if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE))
|
||||
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
|
||||
else
|
||||
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
||||
@ -2303,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
|
||||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
|
||||
update_turbo_state();
|
||||
policy->cpuinfo.max_freq = limits->turbo_disabled ?
|
||||
policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ?
|
||||
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
|
||||
|
||||
cpufreq_verify_within_cpu_limits(policy);
|
||||
@ -2311,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu,
|
||||
struct cpufreq_policy *policy,
|
||||
unsigned int target_freq)
|
||||
{
|
||||
unsigned int max_freq;
|
||||
|
||||
update_turbo_state();
|
||||
|
||||
max_freq = limits->no_turbo || limits->turbo_disabled ?
|
||||
cpu->pstate.max_freq : cpu->pstate.turbo_freq;
|
||||
policy->cpuinfo.max_freq = max_freq;
|
||||
if (policy->max > max_freq)
|
||||
policy->max = max_freq;
|
||||
|
||||
if (target_freq > max_freq)
|
||||
target_freq = max_freq;
|
||||
|
||||
return target_freq;
|
||||
}
|
||||
|
||||
static int intel_cpufreq_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
@ -2339,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
|
||||
struct cpufreq_freqs freqs;
|
||||
int target_pstate;
|
||||
|
||||
update_turbo_state();
|
||||
|
||||
freqs.old = policy->cur;
|
||||
freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq);
|
||||
freqs.new = target_freq;
|
||||
|
||||
cpufreq_freq_transition_begin(policy, &freqs);
|
||||
switch (relation) {
|
||||
@ -2372,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
|
||||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
int target_pstate;
|
||||
|
||||
target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
|
||||
update_turbo_state();
|
||||
|
||||
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
|
||||
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
|
||||
intel_pstate_update_pstate(cpu, target_pstate);
|
||||
@ -2427,13 +2364,7 @@ static int intel_pstate_register_driver(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
intel_pstate_init_limits(&powersave_limits);
|
||||
intel_pstate_set_performance_limits(&performance_limits);
|
||||
if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
|
||||
intel_pstate_driver == &intel_pstate)
|
||||
limits = &performance_limits;
|
||||
else
|
||||
limits = &powersave_limits;
|
||||
intel_pstate_init_limits(&global);
|
||||
|
||||
ret = cpufreq_register_driver(intel_pstate_driver);
|
||||
if (ret) {
|
||||
|
@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
|
||||
scatterwalk_done(&walk, out, 0);
|
||||
}
|
||||
|
||||
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
static void s5p_sg_done(struct s5p_aes_dev *dev)
|
||||
{
|
||||
if (dev->sg_dst_cpy) {
|
||||
dev_dbg(dev->dev,
|
||||
@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
}
|
||||
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
|
||||
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
|
||||
}
|
||||
|
||||
/* holding a lock outside */
|
||||
/* Calls the completion. Cannot be called with dev->lock hold. */
|
||||
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
{
|
||||
dev->req->base.complete(&dev->req->base, err);
|
||||
dev->busy = false;
|
||||
}
|
||||
@ -368,51 +371,44 @@ exit:
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if new transmitting (output) data is ready and its
|
||||
* address+length have to be written to device (by calling
|
||||
* s5p_set_dma_outdata()). False otherwise.
|
||||
* Returns -ERRNO on error (mapping of new data failed).
|
||||
* On success returns:
|
||||
* - 0 if there is no more data,
|
||||
* - 1 if new transmitting (output) data is ready and its address+length
|
||||
* have to be written to device (by calling s5p_set_dma_outdata()).
|
||||
*/
|
||||
static bool s5p_aes_tx(struct s5p_aes_dev *dev)
|
||||
static int s5p_aes_tx(struct s5p_aes_dev *dev)
|
||||
{
|
||||
int err = 0;
|
||||
bool ret = false;
|
||||
int ret = 0;
|
||||
|
||||
s5p_unset_outdata(dev);
|
||||
|
||||
if (!sg_is_last(dev->sg_dst)) {
|
||||
err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
|
||||
if (err)
|
||||
s5p_aes_complete(dev, err);
|
||||
else
|
||||
ret = true;
|
||||
} else {
|
||||
s5p_aes_complete(dev, err);
|
||||
|
||||
dev->busy = true;
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if new receiving (input) data is ready and its
|
||||
* address+length have to be written to device (by calling
|
||||
* s5p_set_dma_indata()). False otherwise.
|
||||
* Returns -ERRNO on error (mapping of new data failed).
|
||||
* On success returns:
|
||||
* - 0 if there is no more data,
|
||||
* - 1 if new receiving (input) data is ready and its address+length
|
||||
* have to be written to device (by calling s5p_set_dma_indata()).
|
||||
*/
|
||||
static bool s5p_aes_rx(struct s5p_aes_dev *dev)
|
||||
static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
|
||||
{
|
||||
int err;
|
||||
bool ret = false;
|
||||
int ret = 0;
|
||||
|
||||
s5p_unset_indata(dev);
|
||||
|
||||
if (!sg_is_last(dev->sg_src)) {
|
||||
err = s5p_set_indata(dev, sg_next(dev->sg_src));
|
||||
if (err)
|
||||
s5p_aes_complete(dev, err);
|
||||
else
|
||||
ret = true;
|
||||
ret = s5p_set_indata(dev, sg_next(dev->sg_src));
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct platform_device *pdev = dev_id;
|
||||
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
|
||||
bool set_dma_tx = false;
|
||||
bool set_dma_rx = false;
|
||||
int err_dma_tx = 0;
|
||||
int err_dma_rx = 0;
|
||||
bool tx_end = false;
|
||||
unsigned long flags;
|
||||
uint32_t status;
|
||||
int err;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
|
||||
/*
|
||||
* Handle rx or tx interrupt. If there is still data (scatterlist did not
|
||||
* reach end), then map next scatterlist entry.
|
||||
* In case of such mapping error, s5p_aes_complete() should be called.
|
||||
*
|
||||
* If there is no more data in tx scatter list, call s5p_aes_complete()
|
||||
* and schedule new tasklet.
|
||||
*/
|
||||
status = SSS_READ(dev, FCINTSTAT);
|
||||
if (status & SSS_FCINTSTAT_BRDMAINT)
|
||||
set_dma_rx = s5p_aes_rx(dev);
|
||||
if (status & SSS_FCINTSTAT_BTDMAINT)
|
||||
set_dma_tx = s5p_aes_tx(dev);
|
||||
err_dma_rx = s5p_aes_rx(dev);
|
||||
|
||||
if (status & SSS_FCINTSTAT_BTDMAINT) {
|
||||
if (sg_is_last(dev->sg_dst))
|
||||
tx_end = true;
|
||||
err_dma_tx = s5p_aes_tx(dev);
|
||||
}
|
||||
|
||||
SSS_WRITE(dev, FCINTPEND, status);
|
||||
|
||||
/*
|
||||
* Writing length of DMA block (either receiving or transmitting)
|
||||
* will start the operation immediately, so this should be done
|
||||
* at the end (even after clearing pending interrupts to not miss the
|
||||
* interrupt).
|
||||
*/
|
||||
if (set_dma_tx)
|
||||
s5p_set_dma_outdata(dev, dev->sg_dst);
|
||||
if (set_dma_rx)
|
||||
s5p_set_dma_indata(dev, dev->sg_src);
|
||||
if (err_dma_rx < 0) {
|
||||
err = err_dma_rx;
|
||||
goto error;
|
||||
}
|
||||
if (err_dma_tx < 0) {
|
||||
err = err_dma_tx;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (tx_end) {
|
||||
s5p_sg_done(dev);
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
s5p_aes_complete(dev, 0);
|
||||
dev->busy = true;
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
} else {
|
||||
/*
|
||||
* Writing length of DMA block (either receiving or
|
||||
* transmitting) will start the operation immediately, so this
|
||||
* should be done at the end (even after clearing pending
|
||||
* interrupts to not miss the interrupt).
|
||||
*/
|
||||
if (err_dma_tx == 1)
|
||||
s5p_set_dma_outdata(dev, dev->sg_dst);
|
||||
if (err_dma_rx == 1)
|
||||
s5p_set_dma_indata(dev, dev->sg_src);
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
||||
error:
|
||||
s5p_sg_done(dev);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
s5p_aes_complete(dev, err);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -597,8 +633,9 @@ outdata_error:
|
||||
s5p_unset_indata(dev);
|
||||
|
||||
indata_error:
|
||||
s5p_aes_complete(dev, err);
|
||||
s5p_sg_done(dev);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
s5p_aes_complete(dev, err);
|
||||
}
|
||||
|
||||
static void s5p_tasklet_cb(unsigned long data)
|
||||
@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
|
||||
dev_warn(dev, "feed control interrupt is not available.\n");
|
||||
goto err_irq;
|
||||
}
|
||||
err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
|
||||
IRQF_SHARED, pdev->name, pdev);
|
||||
err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
|
||||
s5p_aes_interrupt, IRQF_ONESHOT,
|
||||
pdev->name, pdev);
|
||||
if (err < 0) {
|
||||
dev_warn(dev, "feed control interrupt is not available.\n");
|
||||
goto err_irq;
|
||||
|
@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
int rc = VM_FAULT_SIGBUS;
|
||||
phys_addr_t phys;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PAGE_SIZE;
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size != dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
vmf->pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
phys_addr_t phys;
|
||||
pgoff_t pgoff;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PMD_SIZE;
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size < dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
else if (fault_size > dax_region->align)
|
||||
return VM_FAULT_FALLBACK;
|
||||
|
||||
/* if we are outside of the VMA */
|
||||
if (pmd_addr < vmf->vma->vm_start ||
|
||||
(pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma, pmd_addr);
|
||||
phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
phys_addr_t phys;
|
||||
pgoff_t pgoff;
|
||||
pfn_t pfn;
|
||||
unsigned int fault_size = PUD_SIZE;
|
||||
|
||||
|
||||
if (check_vma(dax_dev, vmf->vma, __func__))
|
||||
return VM_FAULT_SIGBUS;
|
||||
@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
if (fault_size < dax_region->align)
|
||||
return VM_FAULT_SIGBUS;
|
||||
else if (fault_size > dax_region->align)
|
||||
return VM_FAULT_FALLBACK;
|
||||
|
||||
/* if we are outside of the VMA */
|
||||
if (pud_addr < vmf->vma->vm_start ||
|
||||
(pud_addr + PUD_SIZE) > vmf->vma->vm_end)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
pgoff = linear_page_index(vmf->vma, pud_addr);
|
||||
phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
|
||||
if (phys == -1) {
|
||||
dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
|
||||
dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
|
||||
pgoff);
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
@ -3,6 +3,4 @@
|
||||
# of AMDSOC/AMDGPU drm driver.
|
||||
# It provides the HW control for ACP related functionalities.
|
||||
|
||||
subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
|
||||
|
||||
AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
|
||||
|
@ -240,6 +240,8 @@ free_partial_kdata:
|
||||
for (; i >= 0; i--)
|
||||
drm_free_large(p->chunks[i].kdata);
|
||||
kfree(p->chunks);
|
||||
p->chunks = NULL;
|
||||
p->nchunks = 0;
|
||||
put_ctx:
|
||||
amdgpu_ctx_put(p->ctx);
|
||||
free_chunk:
|
||||
|
@ -2590,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
|
||||
use_bank = 0;
|
||||
}
|
||||
|
||||
*pos &= 0x3FFFF;
|
||||
*pos &= (1UL << 22) - 1;
|
||||
|
||||
if (use_bank) {
|
||||
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
||||
@ -2666,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
|
||||
use_bank = 0;
|
||||
}
|
||||
|
||||
*pos &= 0x3FFFF;
|
||||
*pos &= (1UL << 22) - 1;
|
||||
|
||||
if (use_bank) {
|
||||
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
|
||||
|
@ -3464,6 +3464,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
(adev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
}
|
||||
} else if (adev->asic_type == CHIP_OLAND) {
|
||||
if ((adev->pdev->device == 0x6604) &&
|
||||
(adev->pdev->subsystem_vendor == 0x1028) &&
|
||||
(adev->pdev->subsystem_device == 0x066F)) {
|
||||
max_sclk = 75000;
|
||||
}
|
||||
}
|
||||
|
||||
if (rps->vce_active) {
|
||||
|
@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
|
||||
/* rev0 hardware requires workarounds to support PG */
|
||||
adev->pg_flags = 0;
|
||||
if (adev->rev_id != 0x00) {
|
||||
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
|
||||
adev->pg_flags |=
|
||||
AMD_PG_SUPPORT_GFX_SMG |
|
||||
AMD_PG_SUPPORT_GFX_PIPELINE |
|
||||
AMD_PG_SUPPORT_CP |
|
||||
|
@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
if (bgate) {
|
||||
cgs_set_powergating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_UNGATE);
|
||||
AMD_PG_STATE_GATE);
|
||||
cgs_set_clockgating_state(hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_GATE);
|
||||
|
@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
|
||||
|
||||
clk_prepare_enable(hwdev->pxlclk);
|
||||
|
||||
/* mclk needs to be set to the same or higher rate than pxlclk */
|
||||
clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
|
||||
/* We rely on firmware to set mclk to a sensible level. */
|
||||
clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
|
||||
|
||||
hwdev->modeset(hwdev, &vm);
|
||||
|
@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
|
||||
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
|
||||
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
|
||||
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
|
||||
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
|
||||
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
|
||||
};
|
||||
|
||||
#define MALIDP_DE_DEFAULT_PREFETCH_START 5
|
||||
|
@ -37,6 +37,8 @@
|
||||
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
|
||||
#define MALIDP_LAYER_COMP_SIZE 0x010
|
||||
#define MALIDP_LAYER_OFFSET 0x014
|
||||
#define MALIDP550_LS_ENABLE 0x01c
|
||||
#define MALIDP550_LS_R1_IN_SIZE 0x020
|
||||
|
||||
/*
|
||||
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
|
||||
@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
|
||||
LAYER_V_VAL(plane->state->crtc_y),
|
||||
mp->layer->base + MALIDP_LAYER_OFFSET);
|
||||
|
||||
if (mp->layer->id == DE_SMART)
|
||||
malidp_hw_write(mp->hwdev,
|
||||
LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
|
||||
mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
|
||||
|
||||
/* first clear the rotation bits */
|
||||
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
|
||||
val &= ~LAYER_ROT_MASK;
|
||||
@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
|
||||
plane->hwdev = malidp->dev;
|
||||
plane->layer = &map->layers[i];
|
||||
|
||||
/* Skip the features which the SMART layer doesn't have */
|
||||
if (id == DE_SMART)
|
||||
if (id == DE_SMART) {
|
||||
/*
|
||||
* Enable the first rectangle in the SMART layer to be
|
||||
* able to use it as a drm plane.
|
||||
*/
|
||||
malidp_hw_write(malidp->dev, 1,
|
||||
plane->layer->base + MALIDP550_LS_ENABLE);
|
||||
/* Skip the features which the SMART layer doesn't have. */
|
||||
continue;
|
||||
}
|
||||
|
||||
drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
|
||||
malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
|
||||
|
@ -84,6 +84,7 @@
|
||||
/* Stride register offsets relative to Lx_BASE */
|
||||
#define MALIDP_DE_LG_STRIDE 0x18
|
||||
#define MALIDP_DE_LV_STRIDE0 0x18
|
||||
#define MALIDP550_DE_LS_R1_STRIDE 0x28
|
||||
|
||||
/* macros to set values into registers */
|
||||
#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
|
||||
|
@ -293,6 +293,7 @@ enum plane_id {
|
||||
PLANE_PRIMARY,
|
||||
PLANE_SPRITE0,
|
||||
PLANE_SPRITE1,
|
||||
PLANE_SPRITE2,
|
||||
PLANE_CURSOR,
|
||||
I915_MAX_PLANES,
|
||||
};
|
||||
|
@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
|
||||
|
||||
ret = -ENODEV;
|
||||
if (obj->ops->pwrite)
|
||||
ret = obj->ops->pwrite(obj, args);
|
||||
if (ret != -ENODEV)
|
||||
goto err;
|
||||
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_ALL,
|
||||
@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
|
||||
*/
|
||||
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
|
||||
obj->mm.madv = __I915_MADV_PURGED;
|
||||
obj->mm.pages = ERR_PTR(-EFAULT);
|
||||
}
|
||||
|
||||
/* Try to discard unwanted pages */
|
||||
@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
|
||||
|
||||
__i915_gem_object_reset_page_iter(obj);
|
||||
|
||||
obj->ops->put_pages(obj, pages);
|
||||
if (!IS_ERR(pages))
|
||||
obj->ops->put_pages(obj, pages);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&obj->mm.lock);
|
||||
}
|
||||
@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (unlikely(!obj->mm.pages)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||
err = ____i915_gem_object_get_pages(obj);
|
||||
if (err)
|
||||
goto unlock;
|
||||
@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
|
||||
|
||||
pinned = true;
|
||||
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
|
||||
if (unlikely(!obj->mm.pages)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
|
||||
ret = ____i915_gem_object_get_pages(obj);
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
@ -2563,6 +2572,75 @@ err_unlock:
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
|
||||
const struct drm_i915_gem_pwrite *arg)
|
||||
{
|
||||
struct address_space *mapping = obj->base.filp->f_mapping;
|
||||
char __user *user_data = u64_to_user_ptr(arg->data_ptr);
|
||||
u64 remain, offset;
|
||||
unsigned int pg;
|
||||
|
||||
/* Before we instantiate/pin the backing store for our use, we
|
||||
* can prepopulate the shmemfs filp efficiently using a write into
|
||||
* the pagecache. We avoid the penalty of instantiating all the
|
||||
* pages, important if the user is just writing to a few and never
|
||||
* uses the object on the GPU, and using a direct write into shmemfs
|
||||
* allows it to avoid the cost of retrieving a page (either swapin
|
||||
* or clearing-before-use) before it is overwritten.
|
||||
*/
|
||||
if (READ_ONCE(obj->mm.pages))
|
||||
return -ENODEV;
|
||||
|
||||
/* Before the pages are instantiated the object is treated as being
|
||||
* in the CPU domain. The pages will be clflushed as required before
|
||||
* use, and we can freely write into the pages directly. If userspace
|
||||
* races pwrite with any other operation; corruption will ensue -
|
||||
* that is userspace's prerogative!
|
||||
*/
|
||||
|
||||
remain = arg->size;
|
||||
offset = arg->offset;
|
||||
pg = offset_in_page(offset);
|
||||
|
||||
do {
|
||||
unsigned int len, unwritten;
|
||||
struct page *page;
|
||||
void *data, *vaddr;
|
||||
int err;
|
||||
|
||||
len = PAGE_SIZE - pg;
|
||||
if (len > remain)
|
||||
len = remain;
|
||||
|
||||
err = pagecache_write_begin(obj->base.filp, mapping,
|
||||
offset, len, 0,
|
||||
&page, &data);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
vaddr = kmap(page);
|
||||
unwritten = copy_from_user(vaddr + pg, user_data, len);
|
||||
kunmap(page);
|
||||
|
||||
err = pagecache_write_end(obj->base.filp, mapping,
|
||||
offset, len, len - unwritten,
|
||||
page, data);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (unwritten)
|
||||
return -EFAULT;
|
||||
|
||||
remain -= len;
|
||||
user_data += len;
|
||||
offset += len;
|
||||
pg = 0;
|
||||
} while (remain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ban_context(const struct i915_gem_context *ctx)
|
||||
{
|
||||
return (i915_gem_context_is_bannable(ctx) &&
|
||||
@ -3029,6 +3107,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
|
||||
if (args->timeout_ns < 0)
|
||||
args->timeout_ns = 0;
|
||||
|
||||
/*
|
||||
* Apparently ktime isn't accurate enough and occasionally has a
|
||||
* bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
|
||||
* things up to make the test happy. We allow up to 1 jiffy.
|
||||
*
|
||||
* This is a regression from the timespec->ktime conversion.
|
||||
*/
|
||||
if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
|
||||
args->timeout_ns = 0;
|
||||
}
|
||||
|
||||
i915_gem_object_put(obj);
|
||||
@ -3974,8 +4062,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
|
||||
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
|
||||
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
|
||||
I915_GEM_OBJECT_IS_SHRINKABLE,
|
||||
|
||||
.get_pages = i915_gem_object_get_pages_gtt,
|
||||
.put_pages = i915_gem_object_put_pages_gtt,
|
||||
|
||||
.pwrite = i915_gem_object_pwrite_gtt,
|
||||
};
|
||||
|
||||
struct drm_i915_gem_object *
|
||||
|
@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
|
||||
* those as well to make room for our guard pages.
|
||||
*/
|
||||
if (check_color) {
|
||||
if (vma->node.start + vma->node.size == node->start) {
|
||||
if (vma->node.color == node->color)
|
||||
if (node->start + node->size == target->start) {
|
||||
if (node->color == target->color)
|
||||
continue;
|
||||
}
|
||||
if (vma->node.start == node->start + node->size) {
|
||||
if (vma->node.color == node->color)
|
||||
if (node->start == target->start + target->size) {
|
||||
if (node->color == target->color)
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops {
|
||||
struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
|
||||
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
|
||||
|
||||
int (*pwrite)(struct drm_i915_gem_object *,
|
||||
const struct drm_i915_gem_pwrite *);
|
||||
|
||||
int (*dmabuf_export)(struct drm_i915_gem_object *);
|
||||
void (*release)(struct drm_i915_gem_object *);
|
||||
};
|
||||
|
@ -512,10 +512,36 @@ err_unpin:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
i915_vma_remove(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
|
||||
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
|
||||
GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
|
||||
|
||||
drm_mm_remove_node(&vma->node);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
|
||||
|
||||
/* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist.
|
||||
*/
|
||||
if (--obj->bind_count == 0)
|
||||
list_move_tail(&obj->global_link,
|
||||
&to_i915(obj->base.dev)->mm.unbound_list);
|
||||
|
||||
/* And finally now the object is completely decoupled from this vma,
|
||||
* we can drop its hold on the backing storage and allow it to be
|
||||
* reaped by the shrinker.
|
||||
*/
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
|
||||
}
|
||||
|
||||
int __i915_vma_do_pin(struct i915_vma *vma,
|
||||
u64 size, u64 alignment, u64 flags)
|
||||
{
|
||||
unsigned int bound = vma->flags;
|
||||
const unsigned int bound = vma->flags;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
|
||||
@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
|
||||
|
||||
if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
|
||||
ret = -EBUSY;
|
||||
goto err;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
if ((bound & I915_VMA_BIND_MASK) == 0) {
|
||||
ret = i915_vma_insert(vma, size, alignment, flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_unpin;
|
||||
}
|
||||
|
||||
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_remove;
|
||||
|
||||
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
|
||||
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
|
||||
return 0;
|
||||
|
||||
err:
|
||||
err_remove:
|
||||
if ((bound & I915_VMA_BIND_MASK) == 0) {
|
||||
GEM_BUG_ON(vma->pages);
|
||||
i915_vma_remove(vma);
|
||||
}
|
||||
err_unpin:
|
||||
__i915_vma_unpin(vma);
|
||||
return ret;
|
||||
}
|
||||
@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
}
|
||||
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
|
||||
|
||||
drm_mm_remove_node(&vma->node);
|
||||
list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
|
||||
|
||||
if (vma->pages != obj->mm.pages) {
|
||||
GEM_BUG_ON(!vma->pages);
|
||||
sg_free_table(vma->pages);
|
||||
@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma)
|
||||
}
|
||||
vma->pages = NULL;
|
||||
|
||||
/* Since the unbound list is global, only move to that list if
|
||||
* no more VMAs exist. */
|
||||
if (--obj->bind_count == 0)
|
||||
list_move_tail(&obj->global_link,
|
||||
&to_i915(obj->base.dev)->mm.unbound_list);
|
||||
|
||||
/* And finally now the object is completely decoupled from this vma,
|
||||
* we can drop its hold on the backing storage and allow it to be
|
||||
* reaped by the shrinker.
|
||||
*/
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
|
||||
i915_vma_remove(vma);
|
||||
|
||||
destroy:
|
||||
if (unlikely(i915_vma_is_closed(vma)))
|
||||
|
@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
|
||||
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
|
||||
crtc->base.mode = crtc->base.state->mode;
|
||||
|
||||
DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
|
||||
old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
|
||||
pipe_config->pipe_src_w, pipe_config->pipe_src_h);
|
||||
|
||||
/*
|
||||
* Update pipe size and adjust fitter if needed: the reason for this is
|
||||
* that in compute_mode_changes we check the native mode (not the pfit
|
||||
@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
|
||||
struct intel_crtc_scaler_state *scaler_state =
|
||||
&crtc->config->scaler_state;
|
||||
|
||||
DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
|
||||
|
||||
if (crtc->config->pch_pfit.enabled) {
|
||||
int id;
|
||||
|
||||
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
|
||||
DRM_ERROR("Requesting pfit without getting a scaler first\n");
|
||||
if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
|
||||
return;
|
||||
}
|
||||
|
||||
id = scaler_state->scaler_id;
|
||||
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
|
||||
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
|
||||
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
|
||||
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
|
||||
|
||||
DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
|
||||
}
|
||||
}
|
||||
|
||||
@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
|
||||
} while (progress);
|
||||
}
|
||||
|
||||
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_atomic_state *state, *next;
|
||||
struct llist_node *freed;
|
||||
|
||||
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
|
||||
llist_for_each_entry_safe(state, next, freed, freed)
|
||||
drm_atomic_state_put(&state->base);
|
||||
}
|
||||
|
||||
static void intel_atomic_helper_free_state_worker(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
|
||||
|
||||
intel_atomic_helper_free_state(dev_priv);
|
||||
}
|
||||
|
||||
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_device *dev = state->dev;
|
||||
@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
* can happen also when the device is completely off.
|
||||
*/
|
||||
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
|
||||
|
||||
intel_atomic_helper_free_state(dev_priv);
|
||||
}
|
||||
|
||||
static void intel_atomic_commit_work(struct work_struct *work)
|
||||
@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
|
||||
to_intel_atomic_state(old_crtc_state->state);
|
||||
bool modeset = needs_modeset(crtc->state);
|
||||
|
||||
if (!modeset &&
|
||||
(intel_cstate->base.color_mgmt_changed ||
|
||||
intel_cstate->update_pipe)) {
|
||||
intel_color_set_csc(crtc->state);
|
||||
intel_color_load_luts(crtc->state);
|
||||
}
|
||||
|
||||
/* Perform vblank evasion around commit operation */
|
||||
intel_pipe_update_start(intel_crtc);
|
||||
|
||||
if (modeset)
|
||||
goto out;
|
||||
|
||||
if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
|
||||
intel_color_set_csc(crtc->state);
|
||||
intel_color_load_luts(crtc->state);
|
||||
}
|
||||
|
||||
if (intel_cstate->update_pipe)
|
||||
intel_update_pipe_config(intel_crtc, old_intel_cstate);
|
||||
else if (INTEL_GEN(dev_priv) >= 9)
|
||||
@ -16599,18 +16611,6 @@ fail:
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
}
|
||||
|
||||
static void intel_atomic_helper_free_state(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
|
||||
struct intel_atomic_state *state, *next;
|
||||
struct llist_node *freed;
|
||||
|
||||
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
|
||||
llist_for_each_entry_safe(state, next, freed, freed)
|
||||
drm_atomic_state_put(&state->base);
|
||||
}
|
||||
|
||||
int intel_modeset_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev)
|
||||
dev->mode_config.funcs = &intel_mode_funcs;
|
||||
|
||||
INIT_WORK(&dev_priv->atomic_helper.free_work,
|
||||
intel_atomic_helper_free_state);
|
||||
intel_atomic_helper_free_state_worker);
|
||||
|
||||
intel_init_quirks(dev);
|
||||
|
||||
|
@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
bool *enabled, int width, int height)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
|
||||
unsigned long conn_configured, mask;
|
||||
unsigned long conn_configured, conn_seq, mask;
|
||||
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
|
||||
int i, j;
|
||||
bool *save_enabled;
|
||||
bool fallback = true;
|
||||
int num_connectors_enabled = 0;
|
||||
int num_connectors_detected = 0;
|
||||
int pass = 0;
|
||||
|
||||
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
|
||||
if (!save_enabled)
|
||||
@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
|
||||
mask = BIT(count) - 1;
|
||||
conn_configured = 0;
|
||||
retry:
|
||||
conn_seq = conn_configured;
|
||||
for (i = 0; i < count; i++) {
|
||||
struct drm_fb_helper_connector *fb_conn;
|
||||
struct drm_connector *connector;
|
||||
@ -387,7 +387,7 @@ retry:
|
||||
if (conn_configured & BIT(i))
|
||||
continue;
|
||||
|
||||
if (pass == 0 && !connector->has_tile)
|
||||
if (conn_seq == 0 && !connector->has_tile)
|
||||
continue;
|
||||
|
||||
if (connector->status == connector_status_connected)
|
||||
@ -498,10 +498,8 @@ retry:
|
||||
conn_configured |= BIT(i);
|
||||
}
|
||||
|
||||
if ((conn_configured & mask) != mask) {
|
||||
pass++;
|
||||
if ((conn_configured & mask) != mask && conn_configured != conn_seq)
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the BIOS didn't enable everything it could, fall back to have the
|
||||
|
@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
|
||||
break;
|
||||
}
|
||||
|
||||
/* When byt can survive without system hang with dynamic
|
||||
* sw freq adjustments, this restriction can be lifted.
|
||||
*/
|
||||
if (IS_VALLEYVIEW(dev_priv))
|
||||
goto skip_hw_write;
|
||||
|
||||
I915_WRITE(GEN6_RP_UP_EI,
|
||||
GT_INTERVAL_FROM_US(dev_priv, ei_up));
|
||||
I915_WRITE(GEN6_RP_UP_THRESHOLD,
|
||||
@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
|
||||
GEN6_RP_UP_BUSY_AVG |
|
||||
GEN6_RP_DOWN_IDLE_AVG);
|
||||
|
||||
skip_hw_write:
|
||||
dev_priv->rps.power = new_power;
|
||||
dev_priv->rps.up_threshold = threshold_up;
|
||||
dev_priv->rps.down_threshold = threshold_down;
|
||||
@ -7916,10 +7923,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
|
||||
* @timeout_base_ms: timeout for polling with preemption enabled
|
||||
*
|
||||
* Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
|
||||
* reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
|
||||
* reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
|
||||
* The request is acknowledged once the PCODE reply dword equals @reply after
|
||||
* applying @reply_mask. Polling is first attempted with preemption enabled
|
||||
* for @timeout_base_ms and if this times out for another 10 ms with
|
||||
* for @timeout_base_ms and if this times out for another 50 ms with
|
||||
* preemption disabled.
|
||||
*
|
||||
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
|
||||
@ -7955,14 +7962,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
|
||||
* worst case) _and_ PCODE was busy for some reason even after a
|
||||
* (queued) request and @timeout_base_ms delay. As a workaround retry
|
||||
* the poll with preemption disabled to maximize the number of
|
||||
* requests. Increase the timeout from @timeout_base_ms to 10ms to
|
||||
* requests. Increase the timeout from @timeout_base_ms to 50ms to
|
||||
* account for interrupts that could reduce the number of these
|
||||
* requests.
|
||||
* requests, and for any quirks of the PCODE firmware that delays
|
||||
* the request completion.
|
||||
*/
|
||||
DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
|
||||
WARN_ON_ONCE(timeout_base_ms > 3);
|
||||
preempt_disable();
|
||||
ret = wait_for_atomic(COND, 10);
|
||||
ret = wait_for_atomic(COND, 50);
|
||||
preempt_enable();
|
||||
|
||||
out:
|
||||
|
@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane,
|
||||
int scaler_id = plane_state->scaler_id;
|
||||
const struct intel_scaler *scaler;
|
||||
|
||||
DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
|
||||
plane_id, PS_PLANE_SEL(plane_id));
|
||||
|
||||
scaler = &crtc_state->scaler_state.scalers[scaler_id];
|
||||
|
||||
I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
|
||||
|
@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
|
||||
|
||||
for_each_fw_domain_masked(d, fw_domains, dev_priv)
|
||||
fw_domain_wait_ack(d);
|
||||
|
||||
dev_priv->uncore.fw_domains_active |= fw_domains;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
|
||||
fw_domain_put(d);
|
||||
fw_domain_posting_read(d);
|
||||
}
|
||||
|
||||
dev_priv->uncore.fw_domains_active &= ~fw_domains;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
|
||||
if (WARN_ON(domain->wake_count == 0))
|
||||
domain->wake_count++;
|
||||
|
||||
if (--domain->wake_count == 0) {
|
||||
if (--domain->wake_count == 0)
|
||||
dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
|
||||
dev_priv->uncore.fw_domains_active &= ~domain->mask;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
|
||||
|
||||
@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
|
||||
fw_domains &= ~domain->mask;
|
||||
}
|
||||
|
||||
if (fw_domains) {
|
||||
if (fw_domains)
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
|
||||
dev_priv->uncore.fw_domains_active |= fw_domains;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
|
||||
fw_domain_arm_timer(domain);
|
||||
|
||||
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
|
||||
dev_priv->uncore.fw_domains_active |= fw_domains;
|
||||
}
|
||||
|
||||
static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
|
||||
|
@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
|
||||
struct drm_gem_object *obj = buffer->priv;
|
||||
int ret = 0;
|
||||
|
||||
if (WARN_ON(!obj->filp))
|
||||
return -EINVAL;
|
||||
|
||||
ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -2984,6 +2984,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
|
||||
(rdev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
}
|
||||
} else if (rdev->family == CHIP_OLAND) {
|
||||
if ((rdev->pdev->device == 0x6604) &&
|
||||
(rdev->pdev->subsystem_vendor == 0x1028) &&
|
||||
(rdev->pdev->subsystem_device == 0x066F)) {
|
||||
max_sclk = 75000;
|
||||
}
|
||||
}
|
||||
|
||||
if (rps->vce_active) {
|
||||
|
@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
||||
mutex_lock(&tilcdc_crtc->enable_lock);
|
||||
@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
|
||||
tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
|
||||
LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
|
||||
LCDC_PALETTE_LOAD_MODE_MASK);
|
||||
|
||||
/* There is no real chance for a race here as the time stamp
|
||||
* is taken before the raster DMA is started. The spin-lock is
|
||||
* taken to have a memory barrier after taking the time-stamp
|
||||
* and to avoid a context switch between taking the stamp and
|
||||
* enabling the raster.
|
||||
*/
|
||||
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
|
||||
tilcdc_crtc->last_vblank = ktime_get();
|
||||
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
|
||||
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
|
||||
|
||||
drm_crtc_vblank_on(crtc);
|
||||
|
||||
@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
|
||||
}
|
||||
|
||||
drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
|
||||
tilcdc_crtc->last_vblank = 0;
|
||||
|
||||
tilcdc_crtc->enabled = false;
|
||||
mutex_unlock(&tilcdc_crtc->enable_lock);
|
||||
@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
|
||||
{
|
||||
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
unsigned long flags;
|
||||
|
||||
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
|
||||
|
||||
@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
|
||||
drm_framebuffer_reference(fb);
|
||||
|
||||
crtc->primary->fb = fb;
|
||||
tilcdc_crtc->event = event;
|
||||
|
||||
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
|
||||
mutex_lock(&tilcdc_crtc->enable_lock);
|
||||
|
||||
if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
|
||||
if (tilcdc_crtc->enabled) {
|
||||
unsigned long flags;
|
||||
ktime_t next_vblank;
|
||||
s64 tdiff;
|
||||
|
||||
next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
|
||||
1000000 / crtc->hwmode.vrefresh);
|
||||
spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
|
||||
|
||||
next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
|
||||
1000000 / crtc->hwmode.vrefresh);
|
||||
tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
|
||||
|
||||
if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
|
||||
tilcdc_crtc->next_fb = fb;
|
||||
else
|
||||
set_scanout(crtc, fb);
|
||||
|
||||
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
|
||||
}
|
||||
|
||||
if (tilcdc_crtc->next_fb != fb)
|
||||
set_scanout(crtc, fb);
|
||||
|
||||
tilcdc_crtc->event = event;
|
||||
|
||||
spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
|
||||
mutex_unlock(&tilcdc_crtc->enable_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
|
||||
|
||||
fail:
|
||||
tilcdc_crtc_destroy(crtc);
|
||||
return -ENOMEM;
|
||||
return ret;
|
||||
}
|
||||
|
@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (hostif->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
dev_info(&udev->dev,
|
||||
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
|
||||
__func__, le16_to_cpu(udev->descriptor.idVendor),
|
||||
|
@ -392,6 +392,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
|
||||
* To get all the fields, copy all archdata
|
||||
*/
|
||||
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
|
||||
dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
|
||||
struct dm_offload *o = container_of(cb, struct dm_offload, cb);
|
||||
struct bio_list list;
|
||||
struct bio *bio;
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&o->cb.list);
|
||||
|
||||
if (unlikely(!current->bio_list))
|
||||
return;
|
||||
|
||||
list = *current->bio_list;
|
||||
bio_list_init(current->bio_list);
|
||||
for (i = 0; i < 2; i++) {
|
||||
list = current->bio_list[i];
|
||||
bio_list_init(¤t->bio_list[i]);
|
||||
|
||||
while ((bio = bio_list_pop(&list))) {
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
if (unlikely(!bs) || bs == fs_bio_set) {
|
||||
bio_list_add(current->bio_list, bio);
|
||||
continue;
|
||||
while ((bio = bio_list_pop(&list))) {
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
if (unlikely(!bs) || bs == fs_bio_set) {
|
||||
bio_list_add(¤t->bio_list[i], bio);
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_add(&bs->rescue_list, bio);
|
||||
queue_work(bs->rescue_workqueue, &bs->rescue_work);
|
||||
spin_unlock(&bs->rescue_lock);
|
||||
}
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_add(&bs->rescue_list, bio);
|
||||
queue_work(bs->rescue_workqueue, &bs->rescue_work);
|
||||
spin_unlock(&bs->rescue_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
|
||||
bm_lockres->flags |= DLM_LKF_NOQUEUE;
|
||||
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
|
||||
if (ret == -EAGAIN) {
|
||||
memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
|
||||
s = read_resync_info(mddev, bm_lockres);
|
||||
if (s) {
|
||||
pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
|
||||
@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
|
||||
lockres_free(cinfo->bitmap_lockres);
|
||||
unlock_all_bitmaps(mddev);
|
||||
dlm_release_lockspace(cinfo->lockspace, 2);
|
||||
kfree(cinfo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(md_flush_request);
|
||||
|
||||
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
{
|
||||
struct mddev *mddev = cb->data;
|
||||
md_wakeup_thread(mddev->thread);
|
||||
kfree(cb);
|
||||
}
|
||||
EXPORT_SYMBOL(md_unplug);
|
||||
|
||||
static inline struct mddev *mddev_get(struct mddev *mddev)
|
||||
{
|
||||
atomic_inc(&mddev->active);
|
||||
@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
|
||||
}
|
||||
sb = page_address(rdev->sb_page);
|
||||
sb->data_size = cpu_to_le64(num_sectors);
|
||||
sb->super_offset = rdev->sb_start;
|
||||
sb->super_offset = cpu_to_le64(rdev->sb_start);
|
||||
sb->sb_csum = calc_sb_1_csum(sb);
|
||||
do {
|
||||
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
|
||||
@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
|
||||
/* Check if any mddev parameters have changed */
|
||||
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
|
||||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
|
||||
(mddev->layout != le64_to_cpu(sb->layout)) ||
|
||||
(mddev->layout != le32_to_cpu(sb->layout)) ||
|
||||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
|
||||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
|
||||
return true;
|
||||
@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
|
||||
mddev->layout = info->layout;
|
||||
mddev->chunk_sectors = info->chunk_size >> 9;
|
||||
|
||||
mddev->max_disks = MD_SB_DISKS;
|
||||
|
||||
if (mddev->persistent) {
|
||||
mddev->flags = 0;
|
||||
mddev->sb_flags = 0;
|
||||
mddev->max_disks = MD_SB_DISKS;
|
||||
mddev->flags = 0;
|
||||
mddev->sb_flags = 0;
|
||||
}
|
||||
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
||||
|
||||
@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
|
||||
return -ENOSPC;
|
||||
}
|
||||
rv = mddev->pers->resize(mddev, num_sectors);
|
||||
if (!rv)
|
||||
revalidate_disk(mddev->gendisk);
|
||||
if (!rv) {
|
||||
if (mddev->queue) {
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user