mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-04 01:24:12 +08:00
ASoC: Last minute fixes for v4.4
A few final fixes for v4.4, the main one being the two patches to the new Sky Lake drivers which fix a previous incorrect fix that went in during an earlier -rc. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJWjUsZAAoJECTWi3JdVIfQ5eYH/3od9mB5GiX8hVwcbdeozdoa jov83C8roMBB5/ebRhIHXf1VI64axp2/Zv2hPjlHdoEjcVPjmdFRn0mno7w9NZqC 271VdCpjXyB/U9PrFi0GK0ByeO+Ru33bqfzL25HpFgD0TQDYFB8N/533Qp4bZV24 D/a/D4e3tUUhtKwIKDf1KfVp2hOKBEiD0Tyai2YIXBCszC8xltCowTE2yZ38aYA0 f6Q+xPkCkgvCw7cE+n+PSQy7EoVH62Wol3ysrxk6anlGoSIH8ut3ZfMlncfgUCFm izJuiWKogm0SXHJh78MmgBFY0Xg4Fot3mJN6OaVzo8/TrYD4ERVhG/IBXrS/K30= =SaxY -----END PGP SIGNATURE----- Merge tag 'asoc-fix-v4.4-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus ASoC: Last minute fixes for v4.4 A few final fixes for v4.4, the main one being the two patches to the new Sky Lake drivers which fix a previous incorrect fix that went in during an earlier -rc.
This commit is contained in:
commit
3f37b26f8d
@ -40,18 +40,18 @@ Optional properties:
|
||||
|
||||
Slave Properties:
|
||||
Required properties:
|
||||
- phy_id : Specifies slave phy id
|
||||
- phy-mode : See ethernet.txt file in the same directory
|
||||
|
||||
Optional properties:
|
||||
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
|
||||
- mac-address : See ethernet.txt file in the same directory
|
||||
- phy_id : Specifies slave phy id
|
||||
- phy-handle : See ethernet.txt file in the same directory
|
||||
|
||||
Slave sub-nodes:
|
||||
- fixed-link : See fixed-link.txt file in the same directory
|
||||
Either the properties phy_id and phy-mode,
|
||||
or the sub-node fixed-link can be specified
|
||||
Either the property phy_id, or the sub-node
|
||||
fixed-link can be specified
|
||||
|
||||
Note: "ti,hwmods" field is used to fetch the base address and irq
|
||||
resources from TI, omap hwmod data base during device registration.
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -81,7 +81,7 @@ endif
|
||||
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
|
||||
|
||||
# Modules with short calls might break for calls into builtin-kernel
|
||||
KBUILD_CFLAGS_MODULE += -mlong-calls
|
||||
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
|
||||
|
||||
# Finally dump eveything into kernel build system
|
||||
KBUILD_CFLAGS += $(cflags-y)
|
||||
|
@ -62,9 +62,7 @@ extern int ioc_exists;
|
||||
#define ARC_REG_IC_IVIC 0x10
|
||||
#define ARC_REG_IC_CTRL 0x11
|
||||
#define ARC_REG_IC_IVIL 0x19
|
||||
#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
|
||||
#define ARC_REG_IC_PTAG 0x1E
|
||||
#endif
|
||||
#define ARC_REG_IC_PTAG_HI 0x1F
|
||||
|
||||
/* Bit val in IC_CTRL */
|
||||
|
@ -293,13 +293,13 @@ static void init_unwind_hdr(struct unwind_table *table,
|
||||
const u32 *cie = cie_for_fde(fde, table);
|
||||
signed ptrType;
|
||||
|
||||
if (cie == ¬_fde) /* only process FDE here */
|
||||
if (cie == ¬_fde)
|
||||
continue;
|
||||
if (cie == NULL || cie == &bad_cie)
|
||||
continue; /* say FDE->CIE.version != 1 */
|
||||
goto ret_err;
|
||||
ptrType = fde_pointer_type(cie);
|
||||
if (ptrType < 0)
|
||||
continue;
|
||||
goto ret_err;
|
||||
|
||||
ptr = (const u8 *)(fde + 2);
|
||||
if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
|
||||
@ -315,14 +315,14 @@ static void init_unwind_hdr(struct unwind_table *table,
|
||||
}
|
||||
|
||||
if (tableSize || !n)
|
||||
return;
|
||||
goto ret_err;
|
||||
|
||||
hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
|
||||
+ 2 * n * sizeof(unsigned long);
|
||||
|
||||
header = alloc(hdrSize);
|
||||
if (!header)
|
||||
return;
|
||||
goto ret_err;
|
||||
|
||||
header->version = 1;
|
||||
header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
|
||||
@ -343,10 +343,6 @@ static void init_unwind_hdr(struct unwind_table *table,
|
||||
|
||||
if (fde[1] == 0xffffffff)
|
||||
continue; /* this is a CIE */
|
||||
|
||||
if (*(u8 *)(cie + 2) != 1)
|
||||
continue; /* FDE->CIE.version not supported */
|
||||
|
||||
ptr = (const u8 *)(fde + 2);
|
||||
header->table[n].start = read_pointer(&ptr,
|
||||
(const u8 *)(fde + 1) +
|
||||
@ -365,6 +361,10 @@ static void init_unwind_hdr(struct unwind_table *table,
|
||||
table->hdrsz = hdrSize;
|
||||
smp_wmb();
|
||||
table->header = (const void *)header;
|
||||
return;
|
||||
|
||||
ret_err:
|
||||
panic("Attention !!! Dwarf FDE parsing errors\n");;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
@ -523,8 +523,7 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
|
||||
|
||||
if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
|
||||
|| (*cie & (sizeof(*cie) - 1))
|
||||
|| (cie[1] != 0xffffffff)
|
||||
|| ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */
|
||||
|| (cie[1] != 0xffffffff))
|
||||
return NULL; /* this is not a (valid) CIE */
|
||||
return cie;
|
||||
}
|
||||
@ -605,9 +604,6 @@ static signed fde_pointer_type(const u32 *cie)
|
||||
const u8 *ptr = (const u8 *)(cie + 2);
|
||||
unsigned version = *ptr;
|
||||
|
||||
if (version != 1)
|
||||
return -1; /* unsupported */
|
||||
|
||||
if (*++ptr) {
|
||||
const char *aug;
|
||||
const u8 *end = (const u8 *)(cie + 1) + *cie;
|
||||
@ -1019,9 +1015,7 @@ int arc_unwind(struct unwind_frame_info *frame)
|
||||
ptr = (const u8 *)(cie + 2);
|
||||
end = (const u8 *)(cie + 1) + *cie;
|
||||
frame->call_frame = 1;
|
||||
if ((state.version = *ptr) != 1)
|
||||
cie = NULL; /* unsupported version */
|
||||
else if (*++ptr) {
|
||||
if (*++ptr) {
|
||||
/* check if augmentation size is first (thus present) */
|
||||
if (*ptr == 'z') {
|
||||
while (++ptr < end && *ptr) {
|
||||
|
@ -111,7 +111,7 @@ void __kunmap_atomic(void *kv)
|
||||
}
|
||||
EXPORT_SYMBOL(__kunmap_atomic);
|
||||
|
||||
noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
|
||||
static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
|
||||
{
|
||||
pgd_t *pgd_k;
|
||||
pud_t *pud_k;
|
||||
@ -127,7 +127,7 @@ noinline pte_t *alloc_kmap_pgtable(unsigned long kvaddr)
|
||||
return pte_k;
|
||||
}
|
||||
|
||||
void kmap_init(void)
|
||||
void __init kmap_init(void)
|
||||
{
|
||||
/* Due to recursive include hell, we can't do this in processor.h */
|
||||
BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
|
||||
|
@ -154,7 +154,7 @@
|
||||
&fec {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_HIGH>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -94,7 +94,7 @@
|
||||
&fec {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -154,7 +154,7 @@
|
||||
&fec {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -155,7 +155,7 @@
|
||||
&fec {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -145,7 +145,7 @@
|
||||
&fec {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_enet>;
|
||||
phy-mode = "rgmii";
|
||||
phy-mode = "rgmii-id";
|
||||
phy-reset-gpios = <&gpio1 30 GPIO_ACTIVE_LOW>;
|
||||
status = "okay";
|
||||
};
|
||||
|
@ -113,14 +113,14 @@
|
||||
&clks {
|
||||
assigned-clocks = <&clks IMX6QDL_PLL4_BYPASS_SRC>,
|
||||
<&clks IMX6QDL_PLL4_BYPASS>,
|
||||
<&clks IMX6QDL_CLK_PLL4_POST_DIV>,
|
||||
<&clks IMX6QDL_CLK_LDB_DI0_SEL>,
|
||||
<&clks IMX6QDL_CLK_LDB_DI1_SEL>;
|
||||
<&clks IMX6QDL_CLK_LDB_DI1_SEL>,
|
||||
<&clks IMX6QDL_CLK_PLL4_POST_DIV>;
|
||||
assigned-clock-parents = <&clks IMX6QDL_CLK_LVDS2_IN>,
|
||||
<&clks IMX6QDL_PLL4_BYPASS_SRC>,
|
||||
<&clks IMX6QDL_CLK_PLL3_USB_OTG>,
|
||||
<&clks IMX6QDL_CLK_PLL3_USB_OTG>;
|
||||
assigned-clock-rates = <0>, <0>, <24576000>;
|
||||
assigned-clock-rates = <0>, <0>, <0>, <0>, <24576000>;
|
||||
};
|
||||
|
||||
&ecspi1 {
|
||||
|
@ -189,3 +189,7 @@
|
||||
};
|
||||
};
|
||||
|
||||
&uart3 {
|
||||
interrupts-extended = <&wakeupgen GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH
|
||||
&omap4_pmx_core OMAP4_UART3_RX>;
|
||||
};
|
||||
|
@ -83,6 +83,7 @@
|
||||
reg = <0x5d>;
|
||||
interrupt-parent = <&pio>;
|
||||
interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>; /* PA3 */
|
||||
touchscreen-swapped-x-y;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -399,7 +399,7 @@
|
||||
|
||||
/* CPU DFLL clock */
|
||||
clock@0,70110000 {
|
||||
status = "okay";
|
||||
status = "disabled";
|
||||
vdd-cpu-supply = <&vdd_cpu>;
|
||||
nvidia,i2c-fs-rate = <400000>;
|
||||
};
|
||||
|
@ -193,15 +193,44 @@ struct oabi_flock64 {
|
||||
pid_t l_pid;
|
||||
} __attribute__ ((packed,aligned(4)));
|
||||
|
||||
static long do_locks(unsigned int fd, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct flock64 kernel;
|
||||
struct oabi_flock64 user;
|
||||
mm_segment_t fs;
|
||||
long ret;
|
||||
|
||||
if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
|
||||
sizeof(user)))
|
||||
return -EFAULT;
|
||||
kernel.l_type = user.l_type;
|
||||
kernel.l_whence = user.l_whence;
|
||||
kernel.l_start = user.l_start;
|
||||
kernel.l_len = user.l_len;
|
||||
kernel.l_pid = user.l_pid;
|
||||
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_fcntl64(fd, cmd, (unsigned long)&kernel);
|
||||
set_fs(fs);
|
||||
|
||||
if (!ret && (cmd == F_GETLK64 || cmd == F_OFD_GETLK)) {
|
||||
user.l_type = kernel.l_type;
|
||||
user.l_whence = kernel.l_whence;
|
||||
user.l_start = kernel.l_start;
|
||||
user.l_len = kernel.l_len;
|
||||
user.l_pid = kernel.l_pid;
|
||||
if (copy_to_user((struct oabi_flock64 __user *)arg,
|
||||
&user, sizeof(user)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct oabi_flock64 user;
|
||||
struct flock64 kernel;
|
||||
mm_segment_t fs = USER_DS; /* initialized to kill a warning */
|
||||
unsigned long local_arg = arg;
|
||||
int ret;
|
||||
|
||||
switch (cmd) {
|
||||
case F_OFD_GETLK:
|
||||
case F_OFD_SETLK:
|
||||
@ -209,39 +238,11 @@ asmlinkage long sys_oabi_fcntl64(unsigned int fd, unsigned int cmd,
|
||||
case F_GETLK64:
|
||||
case F_SETLK64:
|
||||
case F_SETLKW64:
|
||||
if (copy_from_user(&user, (struct oabi_flock64 __user *)arg,
|
||||
sizeof(user)))
|
||||
return -EFAULT;
|
||||
kernel.l_type = user.l_type;
|
||||
kernel.l_whence = user.l_whence;
|
||||
kernel.l_start = user.l_start;
|
||||
kernel.l_len = user.l_len;
|
||||
kernel.l_pid = user.l_pid;
|
||||
local_arg = (unsigned long)&kernel;
|
||||
fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
return do_locks(fd, cmd, arg);
|
||||
|
||||
default:
|
||||
return sys_fcntl64(fd, cmd, arg);
|
||||
}
|
||||
|
||||
ret = sys_fcntl64(fd, cmd, local_arg);
|
||||
|
||||
switch (cmd) {
|
||||
case F_GETLK64:
|
||||
if (!ret) {
|
||||
user.l_type = kernel.l_type;
|
||||
user.l_whence = kernel.l_whence;
|
||||
user.l_start = kernel.l_start;
|
||||
user.l_len = kernel.l_len;
|
||||
user.l_pid = kernel.l_pid;
|
||||
if (copy_to_user((struct oabi_flock64 __user *)arg,
|
||||
&user, sizeof(user)))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
case F_SETLK64:
|
||||
case F_SETLKW64:
|
||||
set_fs(fs);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct oabi_epoll_event {
|
||||
|
@ -65,6 +65,8 @@ config SOC_AM43XX
|
||||
select MACH_OMAP_GENERIC
|
||||
select MIGHT_HAVE_CACHE_L2X0
|
||||
select HAVE_ARM_SCU
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select HAVE_ARM_TWD
|
||||
|
||||
config SOC_DRA7XX
|
||||
bool "TI DRA7XX"
|
||||
|
@ -320,6 +320,12 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
|
||||
return r;
|
||||
}
|
||||
|
||||
#if !defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
|
||||
void tick_broadcast(const struct cpumask *mask)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init omap2_gp_clockevent_init(int gptimer_id,
|
||||
const char *fck_source,
|
||||
const char *property)
|
||||
|
@ -3,6 +3,7 @@ generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += exec.h
|
||||
generic-y += irq_work.h
|
||||
generic-y += kvm_para.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += mm-arch-hooks.h
|
||||
generic-y += module.h
|
||||
|
@ -168,13 +168,21 @@ static inline void _writel(unsigned long l, unsigned long addr)
|
||||
#define writew_relaxed writew
|
||||
#define writel_relaxed writel
|
||||
|
||||
#define ioread8 read
|
||||
#define ioread8 readb
|
||||
#define ioread16 readw
|
||||
#define ioread32 readl
|
||||
#define iowrite8 writeb
|
||||
#define iowrite16 writew
|
||||
#define iowrite32 writel
|
||||
|
||||
#define ioread8_rep(p, dst, count) insb((unsigned long)(p), (dst), (count))
|
||||
#define ioread16_rep(p, dst, count) insw((unsigned long)(p), (dst), (count))
|
||||
#define ioread32_rep(p, dst, count) insl((unsigned long)(p), (dst), (count))
|
||||
|
||||
#define iowrite8_rep(p, src, count) outsb((unsigned long)(p), (src), (count))
|
||||
#define iowrite16_rep(p, src, count) outsw((unsigned long)(p), (src), (count))
|
||||
#define iowrite32_rep(p, src, count) outsl((unsigned long)(p), (src), (count))
|
||||
|
||||
#define ioread16be(addr) be16_to_cpu(readw(addr))
|
||||
#define ioread32be(addr) be32_to_cpu(readl(addr))
|
||||
#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
|
||||
|
@ -599,7 +599,7 @@ extern void __put_user_unknown(void);
|
||||
* On error, the variable @x is set to zero.
|
||||
*/
|
||||
#define __get_user_unaligned(x,ptr) \
|
||||
__get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
|
||||
__get_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
|
||||
|
||||
/*
|
||||
* Yuck. We need two variants, one for 64bit operation and one
|
||||
@ -620,8 +620,8 @@ extern void __get_user_unaligned_unknown(void);
|
||||
do { \
|
||||
switch (size) { \
|
||||
case 1: __get_data_asm(val, "lb", ptr); break; \
|
||||
case 2: __get_user_unaligned_asm(val, "ulh", ptr); break; \
|
||||
case 4: __get_user_unaligned_asm(val, "ulw", ptr); break; \
|
||||
case 2: __get_data_unaligned_asm(val, "ulh", ptr); break; \
|
||||
case 4: __get_data_unaligned_asm(val, "ulw", ptr); break; \
|
||||
case 8: __GET_USER_UNALIGNED_DW(val, ptr); break; \
|
||||
default: __get_user_unaligned_unknown(); break; \
|
||||
} \
|
||||
@ -1122,9 +1122,15 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
|
||||
__cu_to = (to); \
|
||||
__cu_from = (from); \
|
||||
__cu_len = (n); \
|
||||
might_fault(); \
|
||||
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
|
||||
__cu_len); \
|
||||
if (eva_kernel_access()) { \
|
||||
__cu_len = __invoke_copy_from_kernel(__cu_to, \
|
||||
__cu_from, \
|
||||
__cu_len); \
|
||||
} else { \
|
||||
might_fault(); \
|
||||
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
|
||||
__cu_len); \
|
||||
} \
|
||||
__cu_len; \
|
||||
})
|
||||
|
||||
@ -1229,16 +1235,28 @@ __clear_user(void __user *addr, __kernel_size_t size)
|
||||
{
|
||||
__kernel_size_t res;
|
||||
|
||||
might_fault();
|
||||
__asm__ __volatile__(
|
||||
"move\t$4, %1\n\t"
|
||||
"move\t$5, $0\n\t"
|
||||
"move\t$6, %2\n\t"
|
||||
__MODULE_JAL(__bzero)
|
||||
"move\t%0, $6"
|
||||
: "=r" (res)
|
||||
: "r" (addr), "r" (size)
|
||||
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
||||
if (eva_kernel_access()) {
|
||||
__asm__ __volatile__(
|
||||
"move\t$4, %1\n\t"
|
||||
"move\t$5, $0\n\t"
|
||||
"move\t$6, %2\n\t"
|
||||
__MODULE_JAL(__bzero_kernel)
|
||||
"move\t%0, $6"
|
||||
: "=r" (res)
|
||||
: "r" (addr), "r" (size)
|
||||
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
||||
} else {
|
||||
might_fault();
|
||||
__asm__ __volatile__(
|
||||
"move\t$4, %1\n\t"
|
||||
"move\t$5, $0\n\t"
|
||||
"move\t$6, %2\n\t"
|
||||
__MODULE_JAL(__bzero)
|
||||
"move\t%0, $6"
|
||||
: "=r" (res)
|
||||
: "r" (addr), "r" (size)
|
||||
: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
@ -1384,7 +1402,7 @@ static inline long strlen_user(const char __user *s)
|
||||
might_fault();
|
||||
__asm__ __volatile__(
|
||||
"move\t$4, %1\n\t"
|
||||
__MODULE_JAL(__strlen_kernel_asm)
|
||||
__MODULE_JAL(__strlen_user_asm)
|
||||
"move\t%0, $2"
|
||||
: "=r" (res)
|
||||
: "r" (s)
|
||||
|
@ -257,7 +257,6 @@ LEAF(mips_cps_core_init)
|
||||
has_mt t0, 3f
|
||||
|
||||
.set push
|
||||
.set mips64r2
|
||||
.set mt
|
||||
|
||||
/* Only allow 1 TC per VPE to execute... */
|
||||
@ -376,7 +375,6 @@ LEAF(mips_cps_boot_vpes)
|
||||
nop
|
||||
|
||||
.set push
|
||||
.set mips64r2
|
||||
.set mt
|
||||
|
||||
1: /* Enter VPE configuration state */
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <asm/fpu.h>
|
||||
#include <asm/msa.h>
|
||||
|
||||
extern void *__bzero_kernel(void *__s, size_t __count);
|
||||
extern void *__bzero(void *__s, size_t __count);
|
||||
extern long __strncpy_from_kernel_nocheck_asm(char *__to,
|
||||
const char *__from, long __len);
|
||||
@ -64,6 +65,7 @@ EXPORT_SYMBOL(__copy_from_user_eva);
|
||||
EXPORT_SYMBOL(__copy_in_user_eva);
|
||||
EXPORT_SYMBOL(__copy_to_user_eva);
|
||||
EXPORT_SYMBOL(__copy_user_inatomic_eva);
|
||||
EXPORT_SYMBOL(__bzero_kernel);
|
||||
#endif
|
||||
EXPORT_SYMBOL(__bzero);
|
||||
EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
|
||||
|
@ -283,6 +283,8 @@ LEAF(memset)
|
||||
1:
|
||||
#ifndef CONFIG_EVA
|
||||
FEXPORT(__bzero)
|
||||
#else
|
||||
FEXPORT(__bzero_kernel)
|
||||
#endif
|
||||
__BUILD_BZERO LEGACY_MODE
|
||||
|
||||
|
@ -221,7 +221,6 @@ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
||||
static int rt288x_pci_probe(struct platform_device *pdev)
|
||||
{
|
||||
void __iomem *io_map_base;
|
||||
int i;
|
||||
|
||||
rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE);
|
||||
|
||||
|
@ -39,7 +39,6 @@ extern void msp_serial_setup(void);
|
||||
void msp7120_reset(void)
|
||||
{
|
||||
void *start, *end, *iptr;
|
||||
register int i;
|
||||
|
||||
/* Diasble all interrupts */
|
||||
local_irq_disable();
|
||||
|
@ -26,7 +26,7 @@ static inline void kb_wait(void)
|
||||
/* XXX This ends up at the ARC firmware prompt ... */
|
||||
void sni_machine_restart(char *command)
|
||||
{
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
/* This does a normal via the keyboard controller like a PC.
|
||||
We can do that easier ... */
|
||||
|
@ -26,8 +26,8 @@ aflags-vdso := $(ccflags-vdso) \
|
||||
# the comments on that file.
|
||||
#
|
||||
ifndef CONFIG_CPU_MIPSR6
|
||||
ifeq ($(call ld-ifversion, -gt, 22400000, y),)
|
||||
$(warning MIPS VDSO requires binutils > 2.24)
|
||||
ifeq ($(call ld-ifversion, -lt, 22500000, y),y)
|
||||
$(warning MIPS VDSO requires binutils >= 2.25)
|
||||
obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
|
||||
ccflags-vdso += -DDISABLE_MIPS_VDSO
|
||||
endif
|
||||
|
@ -435,6 +435,55 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs, int in_syscall)
|
||||
regs->gr[28]);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check how the syscall number gets loaded into %r20 within
|
||||
* the delay branch in userspace and adjust as needed.
|
||||
*/
|
||||
|
||||
static void check_syscallno_in_delay_branch(struct pt_regs *regs)
|
||||
{
|
||||
u32 opcode, source_reg;
|
||||
u32 __user *uaddr;
|
||||
int err;
|
||||
|
||||
/* Usually we don't have to restore %r20 (the system call number)
|
||||
* because it gets loaded in the delay slot of the branch external
|
||||
* instruction via the ldi instruction.
|
||||
* In some cases a register-to-register copy instruction might have
|
||||
* been used instead, in which case we need to copy the syscall
|
||||
* number into the source register before returning to userspace.
|
||||
*/
|
||||
|
||||
/* A syscall is just a branch, so all we have to do is fiddle the
|
||||
* return pointer so that the ble instruction gets executed again.
|
||||
*/
|
||||
regs->gr[31] -= 8; /* delayed branching */
|
||||
|
||||
/* Get assembler opcode of code in delay branch */
|
||||
uaddr = (unsigned int *) ((regs->gr[31] & ~3) + 4);
|
||||
err = get_user(opcode, uaddr);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
/* Check if delay branch uses "ldi int,%r20" */
|
||||
if ((opcode & 0xffff0000) == 0x34140000)
|
||||
return; /* everything ok, just return */
|
||||
|
||||
/* Check if delay branch uses "nop" */
|
||||
if (opcode == INSN_NOP)
|
||||
return;
|
||||
|
||||
/* Check if delay branch uses "copy %rX,%r20" */
|
||||
if ((opcode & 0xffe0ffff) == 0x08000254) {
|
||||
source_reg = (opcode >> 16) & 31;
|
||||
regs->gr[source_reg] = regs->gr[20];
|
||||
return;
|
||||
}
|
||||
|
||||
pr_warn("syscall restart: %s (pid %d): unexpected opcode 0x%08x\n",
|
||||
current->comm, task_pid_nr(current), opcode);
|
||||
}
|
||||
|
||||
static inline void
|
||||
syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
|
||||
{
|
||||
@ -457,10 +506,7 @@ syscall_restart(struct pt_regs *regs, struct k_sigaction *ka)
|
||||
}
|
||||
/* fallthrough */
|
||||
case -ERESTARTNOINTR:
|
||||
/* A syscall is just a branch, so all
|
||||
* we have to do is fiddle the return pointer.
|
||||
*/
|
||||
regs->gr[31] -= 8; /* delayed branching */
|
||||
check_syscallno_in_delay_branch(regs);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -510,15 +556,9 @@ insert_restart_trampoline(struct pt_regs *regs)
|
||||
}
|
||||
case -ERESTARTNOHAND:
|
||||
case -ERESTARTSYS:
|
||||
case -ERESTARTNOINTR: {
|
||||
/* Hooray for delayed branching. We don't
|
||||
* have to restore %r20 (the system call
|
||||
* number) because it gets loaded in the delay
|
||||
* slot of the branch external instruction.
|
||||
*/
|
||||
regs->gr[31] -= 8;
|
||||
case -ERESTARTNOINTR:
|
||||
check_syscallno_in_delay_branch(regs);
|
||||
return;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
||||
{
|
||||
/*
|
||||
* Check for illegal transactional state bit combination
|
||||
* and if we find it, force the TS field to a safe state.
|
||||
*/
|
||||
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
|
||||
msr &= ~MSR_TS_MASK;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
kvmppc_end_cede(vcpu);
|
||||
}
|
||||
|
@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
|
||||
}
|
||||
if (separator)
|
||||
ptr += sprintf(ptr, "%c", separator);
|
||||
/*
|
||||
* Use four '%' characters below because of the
|
||||
* following two conversions:
|
||||
*
|
||||
* 1) sprintf: %%%%r -> %%r
|
||||
* 2) printk : %%r -> %r
|
||||
*/
|
||||
if (operand->flags & OPERAND_GPR)
|
||||
ptr += sprintf(ptr, "%%r%i", value);
|
||||
ptr += sprintf(ptr, "%%%%r%i", value);
|
||||
else if (operand->flags & OPERAND_FPR)
|
||||
ptr += sprintf(ptr, "%%f%i", value);
|
||||
ptr += sprintf(ptr, "%%%%f%i", value);
|
||||
else if (operand->flags & OPERAND_AR)
|
||||
ptr += sprintf(ptr, "%%a%i", value);
|
||||
ptr += sprintf(ptr, "%%%%a%i", value);
|
||||
else if (operand->flags & OPERAND_CR)
|
||||
ptr += sprintf(ptr, "%%c%i", value);
|
||||
ptr += sprintf(ptr, "%%%%c%i", value);
|
||||
else if (operand->flags & OPERAND_VR)
|
||||
ptr += sprintf(ptr, "%%v%i", value);
|
||||
ptr += sprintf(ptr, "%%%%v%i", value);
|
||||
else if (operand->flags & OPERAND_PCREL)
|
||||
ptr += sprintf(ptr, "%lx", (signed int) value
|
||||
+ addr);
|
||||
|
@ -95,6 +95,7 @@
|
||||
* really available. So we simply advertise only "crypto" support.
|
||||
*/
|
||||
#define HWCAP_SPARC_CRYPTO 0x04000000 /* CRYPTO insns available */
|
||||
#define HWCAP_SPARC_ADI 0x08000000 /* ADI available */
|
||||
|
||||
#define CORE_DUMP_USE_REGSET
|
||||
|
||||
|
@ -417,8 +417,13 @@
|
||||
#define __NR_bpf 349
|
||||
#define __NR_execveat 350
|
||||
#define __NR_membarrier 351
|
||||
#define __NR_userfaultfd 352
|
||||
#define __NR_bind 353
|
||||
#define __NR_listen 354
|
||||
#define __NR_setsockopt 355
|
||||
#define __NR_mlock2 356
|
||||
|
||||
#define NR_syscalls 352
|
||||
#define NR_syscalls 357
|
||||
|
||||
/* Bitmask values returned from kern_features system call. */
|
||||
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
|
||||
|
@ -946,6 +946,12 @@ ENTRY(__retl_one)
|
||||
mov 1, %o0
|
||||
ENDPROC(__retl_one)
|
||||
|
||||
ENTRY(__retl_one_fp)
|
||||
VISExitHalf
|
||||
retl
|
||||
mov 1, %o0
|
||||
ENDPROC(__retl_one_fp)
|
||||
|
||||
ENTRY(__ret_one_asi)
|
||||
wr %g0, ASI_AIUS, %asi
|
||||
ret
|
||||
@ -958,6 +964,13 @@ ENTRY(__retl_one_asi)
|
||||
mov 1, %o0
|
||||
ENDPROC(__retl_one_asi)
|
||||
|
||||
ENTRY(__retl_one_asi_fp)
|
||||
wr %g0, ASI_AIUS, %asi
|
||||
VISExitHalf
|
||||
retl
|
||||
mov 1, %o0
|
||||
ENDPROC(__retl_one_asi_fp)
|
||||
|
||||
ENTRY(__retl_o1)
|
||||
retl
|
||||
mov %o1, %o0
|
||||
|
@ -1828,11 +1828,18 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
|
||||
void
|
||||
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
{
|
||||
u64 saved_fault_address = current_thread_info()->fault_address;
|
||||
u8 saved_fault_code = get_thread_fault_code();
|
||||
mm_segment_t old_fs;
|
||||
|
||||
perf_callchain_store(entry, regs->tpc);
|
||||
|
||||
if (!current->mm)
|
||||
return;
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(USER_DS);
|
||||
|
||||
flushw_user();
|
||||
|
||||
pagefault_disable();
|
||||
@ -1843,4 +1850,8 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
||||
perf_callchain_user_64(entry, regs);
|
||||
|
||||
pagefault_enable();
|
||||
|
||||
set_fs(old_fs);
|
||||
set_thread_fault_code(saved_fault_code);
|
||||
current_thread_info()->fault_address = saved_fault_address;
|
||||
}
|
||||
|
@ -73,7 +73,13 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
|
||||
andn %l1, %l4, %l1
|
||||
srl %l4, 20, %l4
|
||||
ba,pt %xcc, rtrap_no_irq_enable
|
||||
wrpr %l4, %pil
|
||||
nop
|
||||
/* Do not actually set the %pil here. We will do that
|
||||
* below after we clear PSTATE_IE in the %pstate register.
|
||||
* If we re-enable interrupts here, we can recurse down
|
||||
* the hardirq stack potentially endlessly, causing a
|
||||
* stack overflow.
|
||||
*/
|
||||
|
||||
.align 64
|
||||
.globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
|
||||
|
@ -380,7 +380,8 @@ static const char *hwcaps[] = {
|
||||
*/
|
||||
"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
|
||||
"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
|
||||
"ima", "cspare", "pause", "cbcond",
|
||||
"ima", "cspare", "pause", "cbcond", NULL /*reserved for crypto */,
|
||||
"adp",
|
||||
};
|
||||
|
||||
static const char *crypto_hwcaps[] = {
|
||||
@ -396,7 +397,7 @@ void cpucap_info(struct seq_file *m)
|
||||
seq_puts(m, "cpucaps\t\t: ");
|
||||
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
|
||||
unsigned long bit = 1UL << i;
|
||||
if (caps & bit) {
|
||||
if (hwcaps[i] && (caps & bit)) {
|
||||
seq_printf(m, "%s%s",
|
||||
printed ? "," : "", hwcaps[i]);
|
||||
printed++;
|
||||
@ -450,7 +451,7 @@ static void __init report_hwcaps(unsigned long caps)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
|
||||
unsigned long bit = 1UL << i;
|
||||
if (caps & bit)
|
||||
if (hwcaps[i] && (caps & bit))
|
||||
report_one_hwcap(&printed, hwcaps[i]);
|
||||
}
|
||||
if (caps & HWCAP_SPARC_CRYPTO)
|
||||
@ -485,7 +486,7 @@ static unsigned long __init mdesc_cpu_hwcap_list(void)
|
||||
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
|
||||
unsigned long bit = 1UL << i;
|
||||
|
||||
if (!strcmp(prop, hwcaps[i])) {
|
||||
if (hwcaps[i] && !strcmp(prop, hwcaps[i])) {
|
||||
caps |= bit;
|
||||
break;
|
||||
}
|
||||
|
@ -35,18 +35,18 @@ sys_call_table:
|
||||
/*80*/ .long sys_setgroups16, sys_getpgrp, sys_setgroups, sys_setitimer, sys_ftruncate64
|
||||
/*85*/ .long sys_swapon, sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
|
||||
/*90*/ .long sys_dup2, sys_setfsuid, sys_fcntl, sys_select, sys_setfsgid
|
||||
/*95*/ .long sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
|
||||
/*95*/ .long sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
|
||||
/*100*/ .long sys_getpriority, sys_rt_sigreturn, sys_rt_sigaction, sys_rt_sigprocmask, sys_rt_sigpending
|
||||
/*105*/ .long sys_rt_sigtimedwait, sys_rt_sigqueueinfo, sys_rt_sigsuspend, sys_setresuid, sys_getresuid
|
||||
/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
|
||||
/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_nis_syscall, sys_getcwd
|
||||
/*110*/ .long sys_setresgid, sys_getresgid, sys_setregid, sys_recvmsg, sys_sendmsg
|
||||
/*115*/ .long sys_getgroups, sys_gettimeofday, sys_getrusage, sys_getsockopt, sys_getcwd
|
||||
/*120*/ .long sys_readv, sys_writev, sys_settimeofday, sys_fchown16, sys_fchmod
|
||||
/*125*/ .long sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
|
||||
/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_nis_syscall, sys_nis_syscall
|
||||
/*135*/ .long sys_nis_syscall, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
|
||||
/*140*/ .long sys_sendfile64, sys_nis_syscall, sys_futex, sys_gettid, sys_getrlimit
|
||||
/*125*/ .long sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, sys_truncate
|
||||
/*130*/ .long sys_ftruncate, sys_flock, sys_lstat64, sys_sendto, sys_shutdown
|
||||
/*135*/ .long sys_socketpair, sys_mkdir, sys_rmdir, sys_utimes, sys_stat64
|
||||
/*140*/ .long sys_sendfile64, sys_getpeername, sys_futex, sys_gettid, sys_getrlimit
|
||||
/*145*/ .long sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
|
||||
/*150*/ .long sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
|
||||
/*150*/ .long sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
|
||||
/*155*/ .long sys_fcntl64, sys_inotify_rm_watch, sys_statfs, sys_fstatfs, sys_oldumount
|
||||
/*160*/ .long sys_sched_setaffinity, sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
|
||||
/*165*/ .long sys_quotactl, sys_set_tid_address, sys_mount, sys_ustat, sys_setxattr
|
||||
@ -87,4 +87,5 @@ sys_call_table:
|
||||
/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
|
||||
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
|
||||
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
||||
/*350*/ .long sys_execveat, sys_membarrier
|
||||
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
||||
/*355*/ .long sys_setsockopt, sys_mlock2
|
||||
|
@ -37,15 +37,15 @@ sys_call_table32:
|
||||
/*80*/ .word sys_setgroups16, sys_getpgrp, sys_setgroups, compat_sys_setitimer, sys32_ftruncate64
|
||||
.word sys_swapon, compat_sys_getitimer, sys_setuid, sys_sethostname, sys_setgid
|
||||
/*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid
|
||||
.word sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall
|
||||
.word sys_fsync, sys_setpriority, sys_socket, sys_connect, sys_accept
|
||||
/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending
|
||||
.word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid
|
||||
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall
|
||||
.word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, sys_nis_syscall, sys_getcwd
|
||||
/*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, compat_sys_recvmsg, compat_sys_sendmsg
|
||||
.word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, compat_sys_getsockopt, sys_getcwd
|
||||
/*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod
|
||||
.word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
|
||||
/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall
|
||||
.word sys_nis_syscall, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
|
||||
.word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
|
||||
/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown
|
||||
.word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
|
||||
/*140*/ .word sys_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit
|
||||
.word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
|
||||
/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
|
||||
@ -88,7 +88,8 @@ sys_call_table32:
|
||||
.word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
|
||||
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
|
||||
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
||||
/*350*/ .word sys32_execveat, sys_membarrier
|
||||
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
||||
.word compat_sys_setsockopt, sys_mlock2
|
||||
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
@ -168,4 +169,5 @@ sys_call_table:
|
||||
.word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
|
||||
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
|
||||
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
|
||||
/*350*/ .word sys64_execveat, sys_membarrier
|
||||
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
|
||||
.word sys_setsockopt, sys_mlock2
|
||||
|
@ -11,6 +11,14 @@
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define EX_LD_FP(x) \
|
||||
98: x; \
|
||||
.section __ex_table,"a";\
|
||||
.align 4; \
|
||||
.word 98b, __retl_one_asi_fp;\
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#ifndef ASI_AIUS
|
||||
#define ASI_AIUS 0x11
|
||||
#endif
|
||||
|
@ -11,6 +11,14 @@
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define EX_ST_FP(x) \
|
||||
98: x; \
|
||||
.section __ex_table,"a";\
|
||||
.align 4; \
|
||||
.word 98b, __retl_one_asi_fp;\
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#ifndef ASI_AIUS
|
||||
#define ASI_AIUS 0x11
|
||||
#endif
|
||||
|
@ -34,10 +34,16 @@
|
||||
#ifndef EX_LD
|
||||
#define EX_LD(x) x
|
||||
#endif
|
||||
#ifndef EX_LD_FP
|
||||
#define EX_LD_FP(x) x
|
||||
#endif
|
||||
|
||||
#ifndef EX_ST
|
||||
#define EX_ST(x) x
|
||||
#endif
|
||||
#ifndef EX_ST_FP
|
||||
#define EX_ST_FP(x) x
|
||||
#endif
|
||||
|
||||
#ifndef EX_RETVAL
|
||||
#define EX_RETVAL(x) x
|
||||
@ -134,40 +140,40 @@
|
||||
fsrc2 %x6, %f12; \
|
||||
fsrc2 %x7, %f14;
|
||||
#define FREG_LOAD_1(base, x0) \
|
||||
EX_LD(LOAD(ldd, base + 0x00, %x0))
|
||||
EX_LD_FP(LOAD(ldd, base + 0x00, %x0))
|
||||
#define FREG_LOAD_2(base, x0, x1) \
|
||||
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD(LOAD(ldd, base + 0x08, %x1));
|
||||
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x08, %x1));
|
||||
#define FREG_LOAD_3(base, x0, x1, x2) \
|
||||
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD(LOAD(ldd, base + 0x10, %x2));
|
||||
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x10, %x2));
|
||||
#define FREG_LOAD_4(base, x0, x1, x2, x3) \
|
||||
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD(LOAD(ldd, base + 0x10, %x2)); \
|
||||
EX_LD(LOAD(ldd, base + 0x18, %x3));
|
||||
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x18, %x3));
|
||||
#define FREG_LOAD_5(base, x0, x1, x2, x3, x4) \
|
||||
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD(LOAD(ldd, base + 0x10, %x2)); \
|
||||
EX_LD(LOAD(ldd, base + 0x18, %x3)); \
|
||||
EX_LD(LOAD(ldd, base + 0x20, %x4));
|
||||
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x20, %x4));
|
||||
#define FREG_LOAD_6(base, x0, x1, x2, x3, x4, x5) \
|
||||
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD(LOAD(ldd, base + 0x10, %x2)); \
|
||||
EX_LD(LOAD(ldd, base + 0x18, %x3)); \
|
||||
EX_LD(LOAD(ldd, base + 0x20, %x4)); \
|
||||
EX_LD(LOAD(ldd, base + 0x28, %x5));
|
||||
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x28, %x5));
|
||||
#define FREG_LOAD_7(base, x0, x1, x2, x3, x4, x5, x6) \
|
||||
EX_LD(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD(LOAD(ldd, base + 0x10, %x2)); \
|
||||
EX_LD(LOAD(ldd, base + 0x18, %x3)); \
|
||||
EX_LD(LOAD(ldd, base + 0x20, %x4)); \
|
||||
EX_LD(LOAD(ldd, base + 0x28, %x5)); \
|
||||
EX_LD(LOAD(ldd, base + 0x30, %x6));
|
||||
EX_LD_FP(LOAD(ldd, base + 0x00, %x0)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x08, %x1)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x10, %x2)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x18, %x3)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x20, %x4)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x28, %x5)); \
|
||||
EX_LD_FP(LOAD(ldd, base + 0x30, %x6));
|
||||
|
||||
.register %g2,#scratch
|
||||
.register %g3,#scratch
|
||||
@ -275,11 +281,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
nop
|
||||
/* fall through for 0 < low bits < 8 */
|
||||
110: sub %o4, 64, %g2
|
||||
EX_LD(LOAD_BLK(%g2, %f0))
|
||||
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD(LOAD_BLK(%o4, %f16))
|
||||
EX_LD_FP(LOAD_BLK(%g2, %f0))
|
||||
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD_FP(LOAD_BLK(%o4, %f16))
|
||||
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f14, f16)
|
||||
EX_ST(STORE_BLK(%f0, %o4 + %g3))
|
||||
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
|
||||
FREG_MOVE_8(f16, f18, f20, f22, f24, f26, f28, f30)
|
||||
subcc %g1, 64, %g1
|
||||
add %o4, 64, %o4
|
||||
@ -290,10 +296,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
120: sub %o4, 56, %g2
|
||||
FREG_LOAD_7(%g2, f0, f2, f4, f6, f8, f10, f12)
|
||||
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD(LOAD_BLK(%o4, %f16))
|
||||
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD_FP(LOAD_BLK(%o4, %f16))
|
||||
FREG_FROB(f0, f2, f4, f6, f8, f10, f12, f16, f18)
|
||||
EX_ST(STORE_BLK(%f0, %o4 + %g3))
|
||||
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
|
||||
FREG_MOVE_7(f18, f20, f22, f24, f26, f28, f30)
|
||||
subcc %g1, 64, %g1
|
||||
add %o4, 64, %o4
|
||||
@ -304,10 +310,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
130: sub %o4, 48, %g2
|
||||
FREG_LOAD_6(%g2, f0, f2, f4, f6, f8, f10)
|
||||
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD(LOAD_BLK(%o4, %f16))
|
||||
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD_FP(LOAD_BLK(%o4, %f16))
|
||||
FREG_FROB(f0, f2, f4, f6, f8, f10, f16, f18, f20)
|
||||
EX_ST(STORE_BLK(%f0, %o4 + %g3))
|
||||
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
|
||||
FREG_MOVE_6(f20, f22, f24, f26, f28, f30)
|
||||
subcc %g1, 64, %g1
|
||||
add %o4, 64, %o4
|
||||
@ -318,10 +324,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
140: sub %o4, 40, %g2
|
||||
FREG_LOAD_5(%g2, f0, f2, f4, f6, f8)
|
||||
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD(LOAD_BLK(%o4, %f16))
|
||||
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD_FP(LOAD_BLK(%o4, %f16))
|
||||
FREG_FROB(f0, f2, f4, f6, f8, f16, f18, f20, f22)
|
||||
EX_ST(STORE_BLK(%f0, %o4 + %g3))
|
||||
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
|
||||
FREG_MOVE_5(f22, f24, f26, f28, f30)
|
||||
subcc %g1, 64, %g1
|
||||
add %o4, 64, %o4
|
||||
@ -332,10 +338,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
150: sub %o4, 32, %g2
|
||||
FREG_LOAD_4(%g2, f0, f2, f4, f6)
|
||||
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD(LOAD_BLK(%o4, %f16))
|
||||
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD_FP(LOAD_BLK(%o4, %f16))
|
||||
FREG_FROB(f0, f2, f4, f6, f16, f18, f20, f22, f24)
|
||||
EX_ST(STORE_BLK(%f0, %o4 + %g3))
|
||||
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
|
||||
FREG_MOVE_4(f24, f26, f28, f30)
|
||||
subcc %g1, 64, %g1
|
||||
add %o4, 64, %o4
|
||||
@ -346,10 +352,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
160: sub %o4, 24, %g2
|
||||
FREG_LOAD_3(%g2, f0, f2, f4)
|
||||
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD(LOAD_BLK(%o4, %f16))
|
||||
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD_FP(LOAD_BLK(%o4, %f16))
|
||||
FREG_FROB(f0, f2, f4, f16, f18, f20, f22, f24, f26)
|
||||
EX_ST(STORE_BLK(%f0, %o4 + %g3))
|
||||
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
|
||||
FREG_MOVE_3(f26, f28, f30)
|
||||
subcc %g1, 64, %g1
|
||||
add %o4, 64, %o4
|
||||
@ -360,10 +366,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
170: sub %o4, 16, %g2
|
||||
FREG_LOAD_2(%g2, f0, f2)
|
||||
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD(LOAD_BLK(%o4, %f16))
|
||||
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD_FP(LOAD_BLK(%o4, %f16))
|
||||
FREG_FROB(f0, f2, f16, f18, f20, f22, f24, f26, f28)
|
||||
EX_ST(STORE_BLK(%f0, %o4 + %g3))
|
||||
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
|
||||
FREG_MOVE_2(f28, f30)
|
||||
subcc %g1, 64, %g1
|
||||
add %o4, 64, %o4
|
||||
@ -374,10 +380,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
180: sub %o4, 8, %g2
|
||||
FREG_LOAD_1(%g2, f0)
|
||||
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD(LOAD_BLK(%o4, %f16))
|
||||
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
|
||||
EX_LD_FP(LOAD_BLK(%o4, %f16))
|
||||
FREG_FROB(f0, f16, f18, f20, f22, f24, f26, f28, f30)
|
||||
EX_ST(STORE_BLK(%f0, %o4 + %g3))
|
||||
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
|
||||
FREG_MOVE_1(f30)
|
||||
subcc %g1, 64, %g1
|
||||
add %o4, 64, %o4
|
||||
@ -387,10 +393,10 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
nop
|
||||
|
||||
190:
|
||||
1: EX_ST(STORE_INIT(%g0, %o4 + %g3))
|
||||
1: EX_ST_FP(STORE_INIT(%g0, %o4 + %g3))
|
||||
subcc %g1, 64, %g1
|
||||
EX_LD(LOAD_BLK(%o4, %f0))
|
||||
EX_ST(STORE_BLK(%f0, %o4 + %g3))
|
||||
EX_LD_FP(LOAD_BLK(%o4, %f0))
|
||||
EX_ST_FP(STORE_BLK(%f0, %o4 + %g3))
|
||||
add %o4, 64, %o4
|
||||
bne,pt %xcc, 1b
|
||||
LOAD(prefetch, %o4 + 64, #one_read)
|
||||
|
@ -11,6 +11,14 @@
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define EX_LD_FP(x) \
|
||||
98: x; \
|
||||
.section __ex_table,"a";\
|
||||
.align 4; \
|
||||
.word 98b, __retl_one_asi_fp;\
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#ifndef ASI_AIUS
|
||||
#define ASI_AIUS 0x11
|
||||
#endif
|
||||
|
@ -11,6 +11,14 @@
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define EX_ST_FP(x) \
|
||||
98: x; \
|
||||
.section __ex_table,"a";\
|
||||
.align 4; \
|
||||
.word 98b, __retl_one_asi_fp;\
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#ifndef ASI_AIUS
|
||||
#define ASI_AIUS 0x11
|
||||
#endif
|
||||
|
@ -48,10 +48,16 @@
|
||||
#ifndef EX_LD
|
||||
#define EX_LD(x) x
|
||||
#endif
|
||||
#ifndef EX_LD_FP
|
||||
#define EX_LD_FP(x) x
|
||||
#endif
|
||||
|
||||
#ifndef EX_ST
|
||||
#define EX_ST(x) x
|
||||
#endif
|
||||
#ifndef EX_ST_FP
|
||||
#define EX_ST_FP(x) x
|
||||
#endif
|
||||
|
||||
#ifndef EX_RETVAL
|
||||
#define EX_RETVAL(x) x
|
||||
@ -210,17 +216,17 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
sub %o2, %o4, %o2
|
||||
alignaddr %o1, %g0, %g1
|
||||
add %o1, %o4, %o1
|
||||
EX_LD(LOAD(ldd, %g1 + 0x00, %f0))
|
||||
1: EX_LD(LOAD(ldd, %g1 + 0x08, %f2))
|
||||
EX_LD_FP(LOAD(ldd, %g1 + 0x00, %f0))
|
||||
1: EX_LD_FP(LOAD(ldd, %g1 + 0x08, %f2))
|
||||
subcc %o4, 0x40, %o4
|
||||
EX_LD(LOAD(ldd, %g1 + 0x10, %f4))
|
||||
EX_LD(LOAD(ldd, %g1 + 0x18, %f6))
|
||||
EX_LD(LOAD(ldd, %g1 + 0x20, %f8))
|
||||
EX_LD(LOAD(ldd, %g1 + 0x28, %f10))
|
||||
EX_LD(LOAD(ldd, %g1 + 0x30, %f12))
|
||||
EX_LD(LOAD(ldd, %g1 + 0x38, %f14))
|
||||
EX_LD_FP(LOAD(ldd, %g1 + 0x10, %f4))
|
||||
EX_LD_FP(LOAD(ldd, %g1 + 0x18, %f6))
|
||||
EX_LD_FP(LOAD(ldd, %g1 + 0x20, %f8))
|
||||
EX_LD_FP(LOAD(ldd, %g1 + 0x28, %f10))
|
||||
EX_LD_FP(LOAD(ldd, %g1 + 0x30, %f12))
|
||||
EX_LD_FP(LOAD(ldd, %g1 + 0x38, %f14))
|
||||
faligndata %f0, %f2, %f16
|
||||
EX_LD(LOAD(ldd, %g1 + 0x40, %f0))
|
||||
EX_LD_FP(LOAD(ldd, %g1 + 0x40, %f0))
|
||||
faligndata %f2, %f4, %f18
|
||||
add %g1, 0x40, %g1
|
||||
faligndata %f4, %f6, %f20
|
||||
@ -229,14 +235,14 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
faligndata %f10, %f12, %f26
|
||||
faligndata %f12, %f14, %f28
|
||||
faligndata %f14, %f0, %f30
|
||||
EX_ST(STORE(std, %f16, %o0 + 0x00))
|
||||
EX_ST(STORE(std, %f18, %o0 + 0x08))
|
||||
EX_ST(STORE(std, %f20, %o0 + 0x10))
|
||||
EX_ST(STORE(std, %f22, %o0 + 0x18))
|
||||
EX_ST(STORE(std, %f24, %o0 + 0x20))
|
||||
EX_ST(STORE(std, %f26, %o0 + 0x28))
|
||||
EX_ST(STORE(std, %f28, %o0 + 0x30))
|
||||
EX_ST(STORE(std, %f30, %o0 + 0x38))
|
||||
EX_ST_FP(STORE(std, %f16, %o0 + 0x00))
|
||||
EX_ST_FP(STORE(std, %f18, %o0 + 0x08))
|
||||
EX_ST_FP(STORE(std, %f20, %o0 + 0x10))
|
||||
EX_ST_FP(STORE(std, %f22, %o0 + 0x18))
|
||||
EX_ST_FP(STORE(std, %f24, %o0 + 0x20))
|
||||
EX_ST_FP(STORE(std, %f26, %o0 + 0x28))
|
||||
EX_ST_FP(STORE(std, %f28, %o0 + 0x30))
|
||||
EX_ST_FP(STORE(std, %f30, %o0 + 0x38))
|
||||
add %o0, 0x40, %o0
|
||||
bne,pt %icc, 1b
|
||||
LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
|
||||
|
@ -11,6 +11,14 @@
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define EX_LD_FP(x) \
|
||||
98: x; \
|
||||
.section __ex_table,"a";\
|
||||
.align 4; \
|
||||
.word 98b, __retl_one_fp;\
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define FUNC_NAME ___copy_from_user
|
||||
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
|
||||
#define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest
|
||||
|
@ -11,6 +11,14 @@
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define EX_ST_FP(x) \
|
||||
98: x; \
|
||||
.section __ex_table,"a";\
|
||||
.align 4; \
|
||||
.word 98b, __retl_one_fp;\
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define FUNC_NAME ___copy_to_user
|
||||
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
|
||||
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
|
||||
|
@ -25,10 +25,16 @@
|
||||
#ifndef EX_LD
|
||||
#define EX_LD(x) x
|
||||
#endif
|
||||
#ifndef EX_LD_FP
|
||||
#define EX_LD_FP(x) x
|
||||
#endif
|
||||
|
||||
#ifndef EX_ST
|
||||
#define EX_ST(x) x
|
||||
#endif
|
||||
#ifndef EX_ST_FP
|
||||
#define EX_ST_FP(x) x
|
||||
#endif
|
||||
|
||||
#ifndef EX_RETVAL
|
||||
#define EX_RETVAL(x) x
|
||||
@ -73,8 +79,8 @@
|
||||
faligndata %f8, %f9, %f62;
|
||||
|
||||
#define MAIN_LOOP_CHUNK(src, dest, fdest, fsrc, len, jmptgt) \
|
||||
EX_LD(LOAD_BLK(%src, %fdest)); \
|
||||
EX_ST(STORE_BLK(%fsrc, %dest)); \
|
||||
EX_LD_FP(LOAD_BLK(%src, %fdest)); \
|
||||
EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
|
||||
add %src, 0x40, %src; \
|
||||
subcc %len, 0x40, %len; \
|
||||
be,pn %xcc, jmptgt; \
|
||||
@ -89,12 +95,12 @@
|
||||
|
||||
#define DO_SYNC membar #Sync;
|
||||
#define STORE_SYNC(dest, fsrc) \
|
||||
EX_ST(STORE_BLK(%fsrc, %dest)); \
|
||||
EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
|
||||
add %dest, 0x40, %dest; \
|
||||
DO_SYNC
|
||||
|
||||
#define STORE_JUMP(dest, fsrc, target) \
|
||||
EX_ST(STORE_BLK(%fsrc, %dest)); \
|
||||
EX_ST_FP(STORE_BLK(%fsrc, %dest)); \
|
||||
add %dest, 0x40, %dest; \
|
||||
ba,pt %xcc, target; \
|
||||
nop;
|
||||
@ -103,7 +109,7 @@
|
||||
subcc %left, 8, %left;\
|
||||
bl,pn %xcc, 95f; \
|
||||
faligndata %f0, %f1, %f48; \
|
||||
EX_ST(STORE(std, %f48, %dest)); \
|
||||
EX_ST_FP(STORE(std, %f48, %dest)); \
|
||||
add %dest, 8, %dest;
|
||||
|
||||
#define UNEVEN_VISCHUNK_LAST(dest, f0, f1, left) \
|
||||
@ -160,8 +166,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
and %g2, 0x38, %g2
|
||||
|
||||
1: subcc %g1, 0x1, %g1
|
||||
EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
|
||||
EX_ST(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
|
||||
EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
|
||||
EX_ST_FP(STORE(stb, %o3, %o1 + %GLOBAL_SPARE))
|
||||
bgu,pt %XCC, 1b
|
||||
add %o1, 0x1, %o1
|
||||
|
||||
@ -172,20 +178,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
be,pt %icc, 3f
|
||||
alignaddr %o1, %g0, %o1
|
||||
|
||||
EX_LD(LOAD(ldd, %o1, %f4))
|
||||
1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
|
||||
EX_LD_FP(LOAD(ldd, %o1, %f4))
|
||||
1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
|
||||
add %o1, 0x8, %o1
|
||||
subcc %g2, 0x8, %g2
|
||||
faligndata %f4, %f6, %f0
|
||||
EX_ST(STORE(std, %f0, %o0))
|
||||
EX_ST_FP(STORE(std, %f0, %o0))
|
||||
be,pn %icc, 3f
|
||||
add %o0, 0x8, %o0
|
||||
|
||||
EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
|
||||
add %o1, 0x8, %o1
|
||||
subcc %g2, 0x8, %g2
|
||||
faligndata %f6, %f4, %f0
|
||||
EX_ST(STORE(std, %f0, %o0))
|
||||
EX_ST_FP(STORE(std, %f0, %o0))
|
||||
bne,pt %icc, 1b
|
||||
add %o0, 0x8, %o0
|
||||
|
||||
@ -208,13 +214,13 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
add %g1, %GLOBAL_SPARE, %g1
|
||||
subcc %o2, %g3, %o2
|
||||
|
||||
EX_LD(LOAD_BLK(%o1, %f0))
|
||||
EX_LD_FP(LOAD_BLK(%o1, %f0))
|
||||
add %o1, 0x40, %o1
|
||||
add %g1, %g3, %g1
|
||||
EX_LD(LOAD_BLK(%o1, %f16))
|
||||
EX_LD_FP(LOAD_BLK(%o1, %f16))
|
||||
add %o1, 0x40, %o1
|
||||
sub %GLOBAL_SPARE, 0x80, %GLOBAL_SPARE
|
||||
EX_LD(LOAD_BLK(%o1, %f32))
|
||||
EX_LD_FP(LOAD_BLK(%o1, %f32))
|
||||
add %o1, 0x40, %o1
|
||||
|
||||
/* There are 8 instances of the unrolled loop,
|
||||
@ -426,28 +432,28 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
62: FINISH_VISCHUNK(o0, f44, f46, g3)
|
||||
63: UNEVEN_VISCHUNK_LAST(o0, f46, f0, g3)
|
||||
|
||||
93: EX_LD(LOAD(ldd, %o1, %f2))
|
||||
93: EX_LD_FP(LOAD(ldd, %o1, %f2))
|
||||
add %o1, 8, %o1
|
||||
subcc %g3, 8, %g3
|
||||
faligndata %f0, %f2, %f8
|
||||
EX_ST(STORE(std, %f8, %o0))
|
||||
EX_ST_FP(STORE(std, %f8, %o0))
|
||||
bl,pn %xcc, 95f
|
||||
add %o0, 8, %o0
|
||||
EX_LD(LOAD(ldd, %o1, %f0))
|
||||
EX_LD_FP(LOAD(ldd, %o1, %f0))
|
||||
add %o1, 8, %o1
|
||||
subcc %g3, 8, %g3
|
||||
faligndata %f2, %f0, %f8
|
||||
EX_ST(STORE(std, %f8, %o0))
|
||||
EX_ST_FP(STORE(std, %f8, %o0))
|
||||
bge,pt %xcc, 93b
|
||||
add %o0, 8, %o0
|
||||
|
||||
95: brz,pt %o2, 2f
|
||||
mov %g1, %o1
|
||||
|
||||
1: EX_LD(LOAD(ldub, %o1, %o3))
|
||||
1: EX_LD_FP(LOAD(ldub, %o1, %o3))
|
||||
add %o1, 1, %o1
|
||||
subcc %o2, 1, %o2
|
||||
EX_ST(STORE(stb, %o3, %o0))
|
||||
EX_ST_FP(STORE(stb, %o3, %o0))
|
||||
bne,pt %xcc, 1b
|
||||
add %o0, 1, %o0
|
||||
|
||||
|
@ -11,6 +11,14 @@
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define EX_LD_FP(x) \
|
||||
98: x; \
|
||||
.section __ex_table,"a";\
|
||||
.align 4; \
|
||||
.word 98b, __retl_one_fp;\
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define FUNC_NAME U3copy_from_user
|
||||
#define LOAD(type,addr,dest) type##a [addr] %asi, dest
|
||||
#define EX_RETVAL(x) 0
|
||||
|
@ -11,6 +11,14 @@
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define EX_ST_FP(x) \
|
||||
98: x; \
|
||||
.section __ex_table,"a";\
|
||||
.align 4; \
|
||||
.word 98b, __retl_one_fp;\
|
||||
.text; \
|
||||
.align 4;
|
||||
|
||||
#define FUNC_NAME U3copy_to_user
|
||||
#define STORE(type,src,addr) type##a src, [addr] ASI_AIUS
|
||||
#define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS
|
||||
|
@ -24,10 +24,16 @@
|
||||
#ifndef EX_LD
|
||||
#define EX_LD(x) x
|
||||
#endif
|
||||
#ifndef EX_LD_FP
|
||||
#define EX_LD_FP(x) x
|
||||
#endif
|
||||
|
||||
#ifndef EX_ST
|
||||
#define EX_ST(x) x
|
||||
#endif
|
||||
#ifndef EX_ST_FP
|
||||
#define EX_ST_FP(x) x
|
||||
#endif
|
||||
|
||||
#ifndef EX_RETVAL
|
||||
#define EX_RETVAL(x) x
|
||||
@ -120,8 +126,8 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
and %g2, 0x38, %g2
|
||||
|
||||
1: subcc %g1, 0x1, %g1
|
||||
EX_LD(LOAD(ldub, %o1 + 0x00, %o3))
|
||||
EX_ST(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
|
||||
EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
|
||||
EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
|
||||
bgu,pt %XCC, 1b
|
||||
add %o1, 0x1, %o1
|
||||
|
||||
@ -132,20 +138,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
be,pt %icc, 3f
|
||||
alignaddr %o1, %g0, %o1
|
||||
|
||||
EX_LD(LOAD(ldd, %o1, %f4))
|
||||
1: EX_LD(LOAD(ldd, %o1 + 0x8, %f6))
|
||||
EX_LD_FP(LOAD(ldd, %o1, %f4))
|
||||
1: EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
|
||||
add %o1, 0x8, %o1
|
||||
subcc %g2, 0x8, %g2
|
||||
faligndata %f4, %f6, %f0
|
||||
EX_ST(STORE(std, %f0, %o0))
|
||||
EX_ST_FP(STORE(std, %f0, %o0))
|
||||
be,pn %icc, 3f
|
||||
add %o0, 0x8, %o0
|
||||
|
||||
EX_LD(LOAD(ldd, %o1 + 0x8, %f4))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
|
||||
add %o1, 0x8, %o1
|
||||
subcc %g2, 0x8, %g2
|
||||
faligndata %f6, %f4, %f2
|
||||
EX_ST(STORE(std, %f2, %o0))
|
||||
EX_ST_FP(STORE(std, %f2, %o0))
|
||||
bne,pt %icc, 1b
|
||||
add %o0, 0x8, %o0
|
||||
|
||||
@ -155,25 +161,25 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
LOAD(prefetch, %o1 + 0x080, #one_read)
|
||||
LOAD(prefetch, %o1 + 0x0c0, #one_read)
|
||||
LOAD(prefetch, %o1 + 0x100, #one_read)
|
||||
EX_LD(LOAD(ldd, %o1 + 0x000, %f0))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0))
|
||||
LOAD(prefetch, %o1 + 0x140, #one_read)
|
||||
EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
|
||||
LOAD(prefetch, %o1 + 0x180, #one_read)
|
||||
EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
|
||||
LOAD(prefetch, %o1 + 0x1c0, #one_read)
|
||||
faligndata %f0, %f2, %f16
|
||||
EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
|
||||
faligndata %f2, %f4, %f18
|
||||
EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
|
||||
faligndata %f4, %f6, %f20
|
||||
EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
|
||||
faligndata %f6, %f8, %f22
|
||||
|
||||
EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
|
||||
faligndata %f8, %f10, %f24
|
||||
EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
|
||||
faligndata %f10, %f12, %f26
|
||||
EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
|
||||
|
||||
subcc GLOBAL_SPARE, 0x80, GLOBAL_SPARE
|
||||
add %o1, 0x40, %o1
|
||||
@ -184,26 +190,26 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
.align 64
|
||||
1:
|
||||
EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
|
||||
faligndata %f12, %f14, %f28
|
||||
EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
|
||||
faligndata %f14, %f0, %f30
|
||||
EX_ST(STORE_BLK(%f16, %o0))
|
||||
EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
|
||||
EX_ST_FP(STORE_BLK(%f16, %o0))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
|
||||
faligndata %f0, %f2, %f16
|
||||
add %o0, 0x40, %o0
|
||||
|
||||
EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
|
||||
faligndata %f2, %f4, %f18
|
||||
EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
|
||||
faligndata %f4, %f6, %f20
|
||||
EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
|
||||
subcc %o3, 0x01, %o3
|
||||
faligndata %f6, %f8, %f22
|
||||
EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
|
||||
|
||||
faligndata %f8, %f10, %f24
|
||||
EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
|
||||
LOAD(prefetch, %o1 + 0x1c0, #one_read)
|
||||
faligndata %f10, %f12, %f26
|
||||
bg,pt %XCC, 1b
|
||||
@ -211,29 +217,29 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
/* Finally we copy the last full 64-byte block. */
|
||||
2:
|
||||
EX_LD(LOAD(ldd, %o1 + 0x008, %f2))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
|
||||
faligndata %f12, %f14, %f28
|
||||
EX_LD(LOAD(ldd, %o1 + 0x010, %f4))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
|
||||
faligndata %f14, %f0, %f30
|
||||
EX_ST(STORE_BLK(%f16, %o0))
|
||||
EX_LD(LOAD(ldd, %o1 + 0x018, %f6))
|
||||
EX_ST_FP(STORE_BLK(%f16, %o0))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
|
||||
faligndata %f0, %f2, %f16
|
||||
EX_LD(LOAD(ldd, %o1 + 0x020, %f8))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
|
||||
faligndata %f2, %f4, %f18
|
||||
EX_LD(LOAD(ldd, %o1 + 0x028, %f10))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
|
||||
faligndata %f4, %f6, %f20
|
||||
EX_LD(LOAD(ldd, %o1 + 0x030, %f12))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
|
||||
faligndata %f6, %f8, %f22
|
||||
EX_LD(LOAD(ldd, %o1 + 0x038, %f14))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
|
||||
faligndata %f8, %f10, %f24
|
||||
cmp %g1, 0
|
||||
be,pt %XCC, 1f
|
||||
add %o0, 0x40, %o0
|
||||
EX_LD(LOAD(ldd, %o1 + 0x040, %f0))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
|
||||
1: faligndata %f10, %f12, %f26
|
||||
faligndata %f12, %f14, %f28
|
||||
faligndata %f14, %f0, %f30
|
||||
EX_ST(STORE_BLK(%f16, %o0))
|
||||
EX_ST_FP(STORE_BLK(%f16, %o0))
|
||||
add %o0, 0x40, %o0
|
||||
add %o1, 0x40, %o1
|
||||
membar #Sync
|
||||
@ -253,20 +259,20 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
|
||||
|
||||
sub %o2, %g2, %o2
|
||||
be,a,pt %XCC, 1f
|
||||
EX_LD(LOAD(ldd, %o1 + 0x00, %f0))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0))
|
||||
|
||||
1: EX_LD(LOAD(ldd, %o1 + 0x08, %f2))
|
||||
1: EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2))
|
||||
add %o1, 0x8, %o1
|
||||
subcc %g2, 0x8, %g2
|
||||
faligndata %f0, %f2, %f8
|
||||
EX_ST(STORE(std, %f8, %o0))
|
||||
EX_ST_FP(STORE(std, %f8, %o0))
|
||||
be,pn %XCC, 2f
|
||||
add %o0, 0x8, %o0
|
||||
EX_LD(LOAD(ldd, %o1 + 0x08, %f0))
|
||||
EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0))
|
||||
add %o1, 0x8, %o1
|
||||
subcc %g2, 0x8, %g2
|
||||
faligndata %f2, %f0, %f8
|
||||
EX_ST(STORE(std, %f8, %o0))
|
||||
EX_ST_FP(STORE(std, %f8, %o0))
|
||||
bne,pn %XCC, 1b
|
||||
add %o0, 0x8, %o0
|
||||
|
||||
|
@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
|
||||
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
||||
return best && (best->edx & bit(X86_FEATURE_MTRR));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
|
||||
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
|
||||
}
|
||||
|
||||
static u8 mtrr_disabled_type(void)
|
||||
static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Intel SDM 11.11.2.2: all MTRRs are disabled when
|
||||
* IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
|
||||
* memory type is applied to all of physical memory.
|
||||
*
|
||||
* However, virtual machines can be run with CPUID such that
|
||||
* there are no MTRRs. In that case, the firmware will never
|
||||
* enable MTRRs and it is obviously undesirable to run the
|
||||
* guest entirely with UC memory and we use WB.
|
||||
*/
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
if (guest_cpuid_has_mtrr(vcpu))
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
else
|
||||
return MTRR_TYPE_WRBACK;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
|
||||
|
||||
for (seg = 0; seg < seg_num; seg++) {
|
||||
mtrr_seg = &fixed_seg_table[seg];
|
||||
if (mtrr_seg->start >= addr && addr < mtrr_seg->end)
|
||||
if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
|
||||
return seg;
|
||||
}
|
||||
|
||||
@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
|
||||
*start = range->base & PAGE_MASK;
|
||||
|
||||
mask = range->mask & PAGE_MASK;
|
||||
mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
|
||||
|
||||
/* This cannot overflow because writing to the reserved bits of
|
||||
* variable MTRRs causes a #GP.
|
||||
@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
if (var_mtrr_range_is_valid(cur))
|
||||
list_del(&mtrr_state->var_ranges[index].node);
|
||||
|
||||
/* Extend the mask with all 1 bits to the left, since those
|
||||
* bits must implicitly be 0. The bits are then cleared
|
||||
* when reading them.
|
||||
*/
|
||||
if (!is_mtrr_mask)
|
||||
cur->base = data;
|
||||
else
|
||||
cur->mask = data;
|
||||
cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
|
||||
|
||||
/* add it to the list if it's enabled. */
|
||||
if (var_mtrr_range_is_valid(cur)) {
|
||||
@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
|
||||
else
|
||||
*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
|
||||
|
||||
*pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
}
|
||||
|
||||
if (iter.mtrr_disabled)
|
||||
return mtrr_disabled_type();
|
||||
return mtrr_disabled_type(vcpu);
|
||||
|
||||
/* not contained in any MTRRs. */
|
||||
if (type == -1)
|
||||
|
@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
u32 exit_code = svm->vmcb->control.exit_code;
|
||||
|
||||
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
|
||||
|
||||
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
|
||||
vcpu->arch.cr0 = svm->vmcb->save.cr0;
|
||||
if (npt_enabled)
|
||||
@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
|
||||
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
|
||||
|
||||
trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
|
||||
|
||||
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
|
||||
kvm_before_handle_nmi(&svm->vcpu);
|
||||
|
||||
|
@ -2803,7 +2803,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
msr_info->data = vcpu->arch.ia32_xss;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
if (!guest_cpuid_has_rdtscp(vcpu))
|
||||
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
|
||||
return 1;
|
||||
/* Otherwise falls through */
|
||||
default:
|
||||
@ -2909,7 +2909,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
|
||||
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
if (!guest_cpuid_has_rdtscp(vcpu))
|
||||
if (!guest_cpuid_has_rdtscp(vcpu) && !msr_info->host_initiated)
|
||||
return 1;
|
||||
/* Check reserved bit, higher 32 bits should be zero */
|
||||
if ((data >> 32) != 0)
|
||||
@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
|
||||
u32 exit_reason = vmx->exit_reason;
|
||||
u32 vectoring_info = vmx->idt_vectoring_info;
|
||||
|
||||
trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
|
||||
|
||||
/*
|
||||
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
|
||||
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
|
||||
@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vmx->loaded_vmcs->launched = 1;
|
||||
|
||||
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
|
||||
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
|
||||
|
||||
/*
|
||||
* the KVM_REQ_EVENT optimization bit is only on for one entry, and if
|
||||
|
@ -3572,9 +3572,11 @@ static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
|
||||
|
||||
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
|
||||
{
|
||||
int i;
|
||||
mutex_lock(&kvm->arch.vpit->pit_state.lock);
|
||||
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
|
||||
kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
|
||||
for (i = 0; i < 3; i++)
|
||||
kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
|
||||
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
|
||||
return 0;
|
||||
}
|
||||
@ -3593,6 +3595,7 @@ static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
|
||||
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
|
||||
{
|
||||
int start = 0;
|
||||
int i;
|
||||
u32 prev_legacy, cur_legacy;
|
||||
mutex_lock(&kvm->arch.vpit->pit_state.lock);
|
||||
prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
|
||||
@ -3602,7 +3605,8 @@ static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
|
||||
memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
|
||||
sizeof(kvm->arch.vpit->pit_state.channels));
|
||||
kvm->arch.vpit->pit_state.flags = ps->flags;
|
||||
kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
|
||||
for (i = 0; i < 3; i++)
|
||||
kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count, start);
|
||||
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
|
||||
return 0;
|
||||
}
|
||||
@ -6515,6 +6519,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
if (req_immediate_exit)
|
||||
smp_send_reschedule(vcpu->cpu);
|
||||
|
||||
trace_kvm_entry(vcpu->vcpu_id);
|
||||
wait_lapic_expire(vcpu);
|
||||
__kvm_guest_enter();
|
||||
|
||||
if (unlikely(vcpu->arch.switch_db_regs)) {
|
||||
@ -6527,8 +6533,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
|
||||
}
|
||||
|
||||
trace_kvm_entry(vcpu->vcpu_id);
|
||||
wait_lapic_expire(vcpu);
|
||||
kvm_x86_ops->run(vcpu);
|
||||
|
||||
/*
|
||||
|
@ -470,7 +470,7 @@ long sys_sigreturn(void)
|
||||
struct sigcontext __user *sc = &frame->sc;
|
||||
int sig_size = (_NSIG_WORDS - 1) * sizeof(unsigned long);
|
||||
|
||||
if (copy_from_user(&set.sig[0], (void *)sc->oldmask, sizeof(set.sig[0])) ||
|
||||
if (copy_from_user(&set.sig[0], &sc->oldmask, sizeof(set.sig[0])) ||
|
||||
copy_from_user(&set.sig[1], frame->extramask, sig_size))
|
||||
goto segfault;
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#include <xen/xen.h>
|
||||
#include <xen/interface/xen.h>
|
||||
#include <xen/grant_table.h>
|
||||
#include <xen/events.h>
|
||||
|
@ -206,6 +206,22 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_delay_queue);
|
||||
|
||||
/**
|
||||
* blk_start_queue_async - asynchronously restart a previously stopped queue
|
||||
* @q: The &struct request_queue in question
|
||||
*
|
||||
* Description:
|
||||
* blk_start_queue_async() will clear the stop flag on the queue, and
|
||||
* ensure that the request_fn for the queue is run from an async
|
||||
* context.
|
||||
**/
|
||||
void blk_start_queue_async(struct request_queue *q)
|
||||
{
|
||||
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
||||
blk_run_queue_async(q);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_start_queue_async);
|
||||
|
||||
/**
|
||||
* blk_start_queue - restart a previously stopped queue
|
||||
* @q: The &struct request_queue in question
|
||||
@ -1689,8 +1705,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
struct request *req;
|
||||
unsigned int request_count = 0;
|
||||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
/*
|
||||
* low level driver can indicate that it wants pages above a
|
||||
* certain limit bounced to low memory (ie for highmem, or even
|
||||
@ -1698,6 +1712,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
*/
|
||||
blk_queue_bounce(q, &bio);
|
||||
|
||||
blk_queue_split(q, &bio, q->bio_split);
|
||||
|
||||
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
||||
bio->bi_error = -EIO;
|
||||
bio_endio(bio);
|
||||
|
@ -81,7 +81,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
||||
struct bio *new = NULL;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
|
||||
if (sectors + (bv.bv_len >> 9) > blk_max_size_offset(q, bio->bi_iter.bi_sector))
|
||||
goto split;
|
||||
|
||||
/*
|
||||
|
@ -47,7 +47,7 @@ struct skcipher_ctx {
|
||||
bool merge;
|
||||
bool enc;
|
||||
|
||||
struct ablkcipher_request req;
|
||||
struct skcipher_request req;
|
||||
};
|
||||
|
||||
struct skcipher_async_rsgl {
|
||||
@ -64,13 +64,13 @@ struct skcipher_async_req {
|
||||
};
|
||||
|
||||
#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
|
||||
crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
|
||||
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
|
||||
|
||||
#define GET_REQ_SIZE(ctx) \
|
||||
crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
|
||||
crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
|
||||
|
||||
#define GET_IV_SIZE(ctx) \
|
||||
crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
|
||||
crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
|
||||
|
||||
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
|
||||
sizeof(struct scatterlist) - 1)
|
||||
@ -302,8 +302,8 @@ static int skcipher_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
|
||||
unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
|
||||
unsigned ivsize = crypto_skcipher_ivsize(tfm);
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct af_alg_control con = {};
|
||||
long copied = 0;
|
||||
@ -507,7 +507,7 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct scatterlist *sg;
|
||||
struct skcipher_async_req *sreq;
|
||||
struct ablkcipher_request *req;
|
||||
struct skcipher_request *req;
|
||||
struct skcipher_async_rsgl *last_rsgl = NULL;
|
||||
unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
|
||||
unsigned int reqlen = sizeof(struct skcipher_async_req) +
|
||||
@ -531,9 +531,9 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
||||
}
|
||||
sg_init_table(sreq->tsg, tx_nents);
|
||||
memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
|
||||
ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req));
|
||||
ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
skcipher_async_cb, sk);
|
||||
skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
|
||||
skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
skcipher_async_cb, sk);
|
||||
|
||||
while (iov_iter_count(&msg->msg_iter)) {
|
||||
struct skcipher_async_rsgl *rsgl;
|
||||
@ -608,10 +608,10 @@ static int skcipher_recvmsg_async(struct socket *sock, struct msghdr *msg,
|
||||
if (mark)
|
||||
sg_mark_end(sreq->tsg + txbufs - 1);
|
||||
|
||||
ablkcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
|
||||
len, sreq->iv);
|
||||
err = ctx->enc ? crypto_ablkcipher_encrypt(req) :
|
||||
crypto_ablkcipher_decrypt(req);
|
||||
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
|
||||
len, sreq->iv);
|
||||
err = ctx->enc ? crypto_skcipher_encrypt(req) :
|
||||
crypto_skcipher_decrypt(req);
|
||||
if (err == -EINPROGRESS) {
|
||||
atomic_inc(&ctx->inflight);
|
||||
err = -EIOCBQUEUED;
|
||||
@ -632,7 +632,7 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
unsigned bs = crypto_ablkcipher_blocksize(crypto_ablkcipher_reqtfm(
|
||||
unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
|
||||
&ctx->req));
|
||||
struct skcipher_sg_list *sgl;
|
||||
struct scatterlist *sg;
|
||||
@ -669,14 +669,13 @@ static int skcipher_recvmsg_sync(struct socket *sock, struct msghdr *msg,
|
||||
if (!used)
|
||||
goto free;
|
||||
|
||||
ablkcipher_request_set_crypt(&ctx->req, sg,
|
||||
ctx->rsgl.sg, used,
|
||||
ctx->iv);
|
||||
skcipher_request_set_crypt(&ctx->req, sg, ctx->rsgl.sg, used,
|
||||
ctx->iv);
|
||||
|
||||
err = af_alg_wait_for_completion(
|
||||
ctx->enc ?
|
||||
crypto_ablkcipher_encrypt(&ctx->req) :
|
||||
crypto_ablkcipher_decrypt(&ctx->req),
|
||||
crypto_skcipher_encrypt(&ctx->req) :
|
||||
crypto_skcipher_decrypt(&ctx->req),
|
||||
&ctx->completion);
|
||||
|
||||
free:
|
||||
@ -751,17 +750,17 @@ static struct proto_ops algif_skcipher_ops = {
|
||||
|
||||
static void *skcipher_bind(const char *name, u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_ablkcipher(name, type, mask);
|
||||
return crypto_alloc_skcipher(name, type, mask);
|
||||
}
|
||||
|
||||
static void skcipher_release(void *private)
|
||||
{
|
||||
crypto_free_ablkcipher(private);
|
||||
crypto_free_skcipher(private);
|
||||
}
|
||||
|
||||
static int skcipher_setkey(void *private, const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return crypto_ablkcipher_setkey(private, key, keylen);
|
||||
return crypto_skcipher_setkey(private, key, keylen);
|
||||
}
|
||||
|
||||
static void skcipher_wait(struct sock *sk)
|
||||
@ -778,13 +777,13 @@ static void skcipher_sock_destruct(struct sock *sk)
|
||||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct skcipher_ctx *ctx = ask->private;
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req);
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
|
||||
|
||||
if (atomic_read(&ctx->inflight))
|
||||
skcipher_wait(sk);
|
||||
|
||||
skcipher_free_sgl(sk);
|
||||
sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm));
|
||||
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
|
||||
sock_kfree_s(sk, ctx, ctx->len);
|
||||
af_alg_release_parent(sk);
|
||||
}
|
||||
@ -793,20 +792,20 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
|
||||
{
|
||||
struct skcipher_ctx *ctx;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(private);
|
||||
unsigned int len = sizeof(*ctx) + crypto_skcipher_reqsize(private);
|
||||
|
||||
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(private),
|
||||
ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(private),
|
||||
GFP_KERNEL);
|
||||
if (!ctx->iv) {
|
||||
sock_kfree_s(sk, ctx, len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(ctx->iv, 0, crypto_ablkcipher_ivsize(private));
|
||||
memset(ctx->iv, 0, crypto_skcipher_ivsize(private));
|
||||
|
||||
INIT_LIST_HEAD(&ctx->tsgl);
|
||||
ctx->len = len;
|
||||
@ -819,9 +818,9 @@ static int skcipher_accept_parent(void *private, struct sock *sk)
|
||||
|
||||
ask->private = ctx;
|
||||
|
||||
ablkcipher_request_set_tfm(&ctx->req, private);
|
||||
ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
af_alg_complete, &ctx->completion);
|
||||
skcipher_request_set_tfm(&ctx->req, private);
|
||||
skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
|
||||
af_alg_complete, &ctx->completion);
|
||||
|
||||
sk->sk_destruct = skcipher_sock_destruct;
|
||||
|
||||
|
@ -200,7 +200,8 @@ static int acpi_pss_perf_init(struct acpi_processor *pr,
|
||||
goto err_remove_sysfs_thermal;
|
||||
}
|
||||
|
||||
sysfs_remove_link(&pr->cdev->device.kobj, "device");
|
||||
return 0;
|
||||
|
||||
err_remove_sysfs_thermal:
|
||||
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
|
||||
err_thermal_unregister:
|
||||
|
@ -219,6 +219,9 @@ static void end_cmd(struct nullb_cmd *cmd)
|
||||
{
|
||||
struct request_queue *q = NULL;
|
||||
|
||||
if (cmd->rq)
|
||||
q = cmd->rq->q;
|
||||
|
||||
switch (queue_mode) {
|
||||
case NULL_Q_MQ:
|
||||
blk_mq_end_request(cmd->rq, 0);
|
||||
@ -229,23 +232,19 @@ static void end_cmd(struct nullb_cmd *cmd)
|
||||
break;
|
||||
case NULL_Q_BIO:
|
||||
bio_endio(cmd->bio);
|
||||
goto free_cmd;
|
||||
break;
|
||||
}
|
||||
|
||||
if (cmd->rq)
|
||||
q = cmd->rq->q;
|
||||
free_cmd(cmd);
|
||||
|
||||
/* Restart queue if needed, as we are freeing a tag */
|
||||
if (q && !q->mq_ops && blk_queue_stopped(q)) {
|
||||
if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (blk_queue_stopped(q))
|
||||
blk_start_queue(q);
|
||||
blk_start_queue_async(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
free_cmd:
|
||||
free_cmd(cmd);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
|
||||
|
@ -342,13 +342,13 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
|
||||
|
||||
ret = _sunxi_rsb_run_xfer(rsb);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto unlock;
|
||||
|
||||
*buf = readl(rsb->regs + RSB_DATA);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&rsb->lock);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -527,9 +527,9 @@ static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb)
|
||||
*/
|
||||
|
||||
static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = {
|
||||
{ 0x3e3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
|
||||
{ 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
|
||||
{ 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */
|
||||
{ 0xe89, 0x45 }, /* Peripheral IC: AC100, ... */
|
||||
{ 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */
|
||||
};
|
||||
|
||||
static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)
|
||||
|
@ -31,7 +31,7 @@ static struct scpi_ops *scpi_ops;
|
||||
|
||||
static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
|
||||
{
|
||||
u8 domain = topology_physical_package_id(cpu_dev->id);
|
||||
int domain = topology_physical_package_id(cpu_dev->id);
|
||||
|
||||
if (domain < 0)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -1264,7 +1264,8 @@ struct amdgpu_cs_parser {
|
||||
struct ww_acquire_ctx ticket;
|
||||
|
||||
/* user fence */
|
||||
struct amdgpu_user_fence uf;
|
||||
struct amdgpu_user_fence uf;
|
||||
struct amdgpu_bo_list_entry uf_entry;
|
||||
};
|
||||
|
||||
struct amdgpu_job {
|
||||
|
@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
||||
struct drm_amdgpu_cs_chunk_fence *fence_data)
|
||||
{
|
||||
struct drm_gem_object *gobj;
|
||||
uint32_t handle;
|
||||
|
||||
handle = fence_data->handle;
|
||||
gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
|
||||
fence_data->handle);
|
||||
if (gobj == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
|
||||
p->uf.offset = fence_data->offset;
|
||||
|
||||
if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
|
||||
p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
|
||||
p->uf_entry.priority = 0;
|
||||
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
|
||||
p->uf_entry.tv.shared = true;
|
||||
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
||||
{
|
||||
union drm_amdgpu_cs *cs = data;
|
||||
@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
|
||||
|
||||
case AMDGPU_CHUNK_ID_FENCE:
|
||||
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
|
||||
if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
|
||||
uint32_t handle;
|
||||
struct drm_gem_object *gobj;
|
||||
struct drm_amdgpu_cs_chunk_fence *fence_data;
|
||||
|
||||
fence_data = (void *)p->chunks[i].kdata;
|
||||
handle = fence_data->handle;
|
||||
gobj = drm_gem_object_lookup(p->adev->ddev,
|
||||
p->filp, handle);
|
||||
if (gobj == NULL) {
|
||||
ret = -EINVAL;
|
||||
goto free_partial_kdata;
|
||||
}
|
||||
|
||||
p->uf.bo = gem_to_amdgpu_bo(gobj);
|
||||
amdgpu_bo_ref(p->uf.bo);
|
||||
drm_gem_object_unreference_unlocked(gobj);
|
||||
p->uf.offset = fence_data->offset;
|
||||
} else {
|
||||
if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
|
||||
ret = -EINVAL;
|
||||
goto free_partial_kdata;
|
||||
}
|
||||
|
||||
ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
|
||||
if (ret)
|
||||
goto free_partial_kdata;
|
||||
|
||||
break;
|
||||
|
||||
case AMDGPU_CHUNK_ID_DEPENDENCIES:
|
||||
@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
|
||||
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
|
||||
&p->validated);
|
||||
|
||||
if (p->uf.bo)
|
||||
list_add(&p->uf_entry.tv.head, &p->validated);
|
||||
|
||||
if (need_mmap_lock)
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
|
||||
@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
|
||||
for (i = 0; i < parser->num_ibs; i++)
|
||||
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
|
||||
kfree(parser->ibs);
|
||||
if (parser->uf.bo)
|
||||
amdgpu_bo_unref(&parser->uf.bo);
|
||||
amdgpu_bo_unref(&parser->uf.bo);
|
||||
amdgpu_bo_unref(&parser->uf_entry.robj);
|
||||
}
|
||||
|
||||
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
|
||||
|
@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
{
|
||||
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
|
||||
|
||||
if (!state->enable)
|
||||
return 0;
|
||||
|
||||
if (exynos_crtc->ops->atomic_check)
|
||||
return exynos_crtc->ops->atomic_check(exynos_crtc, state);
|
||||
|
||||
|
@ -2193,8 +2193,17 @@ struct drm_i915_gem_request {
|
||||
struct drm_i915_private *i915;
|
||||
struct intel_engine_cs *ring;
|
||||
|
||||
/** GEM sequence number associated with this request. */
|
||||
uint32_t seqno;
|
||||
/** GEM sequence number associated with the previous request,
|
||||
* when the HWS breadcrumb is equal to this the GPU is processing
|
||||
* this request.
|
||||
*/
|
||||
u32 previous_seqno;
|
||||
|
||||
/** GEM sequence number associated with this request,
|
||||
* when the HWS breadcrumb is equal or greater than this the GPU
|
||||
* has finished processing this request.
|
||||
*/
|
||||
u32 seqno;
|
||||
|
||||
/** Position in the ringbuffer of the start of the request */
|
||||
u32 head;
|
||||
@ -2839,6 +2848,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
|
||||
|
||||
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
|
||||
int __must_check i915_vma_unbind(struct i915_vma *vma);
|
||||
/*
|
||||
* BEWARE: Do not use the function below unless you can _absolutely_
|
||||
@ -2910,15 +2920,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
|
||||
return (int32_t)(seq1 - seq2) >= 0;
|
||||
}
|
||||
|
||||
static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
|
||||
bool lazy_coherency)
|
||||
{
|
||||
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
|
||||
return i915_seqno_passed(seqno, req->previous_seqno);
|
||||
}
|
||||
|
||||
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
|
||||
bool lazy_coherency)
|
||||
{
|
||||
u32 seqno;
|
||||
|
||||
BUG_ON(req == NULL);
|
||||
|
||||
seqno = req->ring->get_seqno(req->ring, lazy_coherency);
|
||||
|
||||
u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
|
||||
return i915_seqno_passed(seqno, req->seqno);
|
||||
}
|
||||
|
||||
|
@ -1146,23 +1146,74 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
|
||||
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
|
||||
}
|
||||
|
||||
static int __i915_spin_request(struct drm_i915_gem_request *req)
|
||||
static unsigned long local_clock_us(unsigned *cpu)
|
||||
{
|
||||
unsigned long t;
|
||||
|
||||
/* Cheaply and approximately convert from nanoseconds to microseconds.
|
||||
* The result and subsequent calculations are also defined in the same
|
||||
* approximate microseconds units. The principal source of timing
|
||||
* error here is from the simple truncation.
|
||||
*
|
||||
* Note that local_clock() is only defined wrt to the current CPU;
|
||||
* the comparisons are no longer valid if we switch CPUs. Instead of
|
||||
* blocking preemption for the entire busywait, we can detect the CPU
|
||||
* switch and use that as indicator of system load and a reason to
|
||||
* stop busywaiting, see busywait_stop().
|
||||
*/
|
||||
*cpu = get_cpu();
|
||||
t = local_clock() >> 10;
|
||||
put_cpu();
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static bool busywait_stop(unsigned long timeout, unsigned cpu)
|
||||
{
|
||||
unsigned this_cpu;
|
||||
|
||||
if (time_after(local_clock_us(&this_cpu), timeout))
|
||||
return true;
|
||||
|
||||
return this_cpu != cpu;
|
||||
}
|
||||
|
||||
static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
|
||||
{
|
||||
unsigned long timeout;
|
||||
unsigned cpu;
|
||||
|
||||
if (i915_gem_request_get_ring(req)->irq_refcount)
|
||||
/* When waiting for high frequency requests, e.g. during synchronous
|
||||
* rendering split between the CPU and GPU, the finite amount of time
|
||||
* required to set up the irq and wait upon it limits the response
|
||||
* rate. By busywaiting on the request completion for a short while we
|
||||
* can service the high frequency waits as quick as possible. However,
|
||||
* if it is a slow request, we want to sleep as quickly as possible.
|
||||
* The tradeoff between waiting and sleeping is roughly the time it
|
||||
* takes to sleep on a request, on the order of a microsecond.
|
||||
*/
|
||||
|
||||
if (req->ring->irq_refcount)
|
||||
return -EBUSY;
|
||||
|
||||
timeout = jiffies + 1;
|
||||
/* Only spin if we know the GPU is processing this request */
|
||||
if (!i915_gem_request_started(req, true))
|
||||
return -EAGAIN;
|
||||
|
||||
timeout = local_clock_us(&cpu) + 5;
|
||||
while (!need_resched()) {
|
||||
if (i915_gem_request_completed(req, true))
|
||||
return 0;
|
||||
|
||||
if (time_after_eq(jiffies, timeout))
|
||||
if (signal_pending_state(state, current))
|
||||
break;
|
||||
|
||||
if (busywait_stop(timeout, cpu))
|
||||
break;
|
||||
|
||||
cpu_relax_lowlatency();
|
||||
}
|
||||
|
||||
if (i915_gem_request_completed(req, false))
|
||||
return 0;
|
||||
|
||||
@ -1197,6 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const bool irq_test_in_progress =
|
||||
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
|
||||
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
|
||||
DEFINE_WAIT(wait);
|
||||
unsigned long timeout_expire;
|
||||
s64 before, now;
|
||||
@ -1229,7 +1281,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
before = ktime_get_raw_ns();
|
||||
|
||||
/* Optimistic spin for the next jiffie before touching IRQs */
|
||||
ret = __i915_spin_request(req);
|
||||
ret = __i915_spin_request(req, state);
|
||||
if (ret == 0)
|
||||
goto out;
|
||||
|
||||
@ -1241,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
for (;;) {
|
||||
struct timer_list timer;
|
||||
|
||||
prepare_to_wait(&ring->irq_queue, &wait,
|
||||
interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(&ring->irq_queue, &wait, state);
|
||||
|
||||
/* We need to check whether any gpu reset happened in between
|
||||
* the caller grabbing the seqno and now ... */
|
||||
@ -1260,7 +1311,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
break;
|
||||
}
|
||||
|
||||
if (interruptible && signal_pending(current)) {
|
||||
if (signal_pending_state(state, current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
@ -2554,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
|
||||
request->batch_obj = obj;
|
||||
|
||||
request->emitted_jiffies = jiffies;
|
||||
request->previous_seqno = ring->last_submitted_seqno;
|
||||
ring->last_submitted_seqno = request->seqno;
|
||||
list_add_tail(&request->list, &ring->request_list);
|
||||
|
||||
@ -4080,6 +4132,29 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
|
||||
return false;
|
||||
}
|
||||
|
||||
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
|
||||
{
|
||||
struct drm_i915_gem_object *obj = vma->obj;
|
||||
bool mappable, fenceable;
|
||||
u32 fence_size, fence_alignment;
|
||||
|
||||
fence_size = i915_gem_get_gtt_size(obj->base.dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode);
|
||||
fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode,
|
||||
true);
|
||||
|
||||
fenceable = (vma->node.size == fence_size &&
|
||||
(vma->node.start & (fence_alignment - 1)) == 0);
|
||||
|
||||
mappable = (vma->node.start + fence_size <=
|
||||
to_i915(obj->base.dev)->gtt.mappable_end);
|
||||
|
||||
obj->map_and_fenceable = mappable && fenceable;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
|
||||
struct i915_address_space *vm,
|
||||
@ -4147,25 +4222,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
|
||||
|
||||
if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
|
||||
(bound ^ vma->bound) & GLOBAL_BIND) {
|
||||
bool mappable, fenceable;
|
||||
u32 fence_size, fence_alignment;
|
||||
|
||||
fence_size = i915_gem_get_gtt_size(obj->base.dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode);
|
||||
fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
|
||||
obj->base.size,
|
||||
obj->tiling_mode,
|
||||
true);
|
||||
|
||||
fenceable = (vma->node.size == fence_size &&
|
||||
(vma->node.start & (fence_alignment - 1)) == 0);
|
||||
|
||||
mappable = (vma->node.start + fence_size <=
|
||||
dev_priv->gtt.mappable_end);
|
||||
|
||||
obj->map_and_fenceable = mappable && fenceable;
|
||||
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
|
||||
}
|
||||
|
||||
|
@ -2676,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
return ret;
|
||||
}
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
|
||||
}
|
||||
|
||||
|
@ -687,6 +687,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
|
||||
}
|
||||
|
||||
vma->bound |= GLOBAL_BIND;
|
||||
__i915_vma_set_map_and_fenceable(vma);
|
||||
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
|
||||
}
|
||||
|
||||
|
@ -116,6 +116,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc);
|
||||
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
|
||||
static void ironlake_pfit_enable(struct intel_crtc *crtc);
|
||||
static void intel_modeset_setup_hw_state(struct drm_device *dev);
|
||||
static void intel_pre_disable_primary(struct drm_crtc *crtc);
|
||||
|
||||
typedef struct {
|
||||
int min, max;
|
||||
@ -2607,6 +2608,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_plane *primary = intel_crtc->base.primary;
|
||||
struct drm_plane_state *plane_state = primary->state;
|
||||
struct drm_crtc_state *crtc_state = intel_crtc->base.state;
|
||||
struct intel_plane *intel_plane = to_intel_plane(primary);
|
||||
struct drm_framebuffer *fb;
|
||||
|
||||
if (!plane_config->fb)
|
||||
@ -2643,6 +2646,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We've failed to reconstruct the BIOS FB. Current display state
|
||||
* indicates that the primary plane is visible, but has a NULL FB,
|
||||
* which will lead to problems later if we don't fix it up. The
|
||||
* simplest solution is to just disable the primary plane now and
|
||||
* pretend the BIOS never had it enabled.
|
||||
*/
|
||||
to_intel_plane_state(plane_state)->visible = false;
|
||||
crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
|
||||
intel_pre_disable_primary(&intel_crtc->base);
|
||||
intel_plane->disable_plane(primary, &intel_crtc->base);
|
||||
|
||||
return;
|
||||
|
||||
valid_fb:
|
||||
@ -9910,14 +9925,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
|
||||
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
uint32_t cntl = 0, size = 0;
|
||||
|
||||
if (base) {
|
||||
if (on) {
|
||||
unsigned int width = intel_crtc->base.cursor->state->crtc_w;
|
||||
unsigned int height = intel_crtc->base.cursor->state->crtc_h;
|
||||
unsigned int stride = roundup_pow_of_two(width) * 4;
|
||||
@ -9972,16 +9987,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
|
||||
}
|
||||
}
|
||||
|
||||
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
|
||||
static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int pipe = intel_crtc->pipe;
|
||||
uint32_t cntl;
|
||||
uint32_t cntl = 0;
|
||||
|
||||
cntl = 0;
|
||||
if (base) {
|
||||
if (on) {
|
||||
cntl = MCURSOR_GAMMA_ENABLE;
|
||||
switch (intel_crtc->base.cursor->state->crtc_w) {
|
||||
case 64:
|
||||
@ -10032,18 +10046,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
||||
int y = cursor_state->crtc_y;
|
||||
u32 base = 0, pos = 0;
|
||||
|
||||
if (on)
|
||||
base = intel_crtc->cursor_addr;
|
||||
base = intel_crtc->cursor_addr;
|
||||
|
||||
if (x >= intel_crtc->config->pipe_src_w)
|
||||
base = 0;
|
||||
on = false;
|
||||
|
||||
if (y >= intel_crtc->config->pipe_src_h)
|
||||
base = 0;
|
||||
on = false;
|
||||
|
||||
if (x < 0) {
|
||||
if (x + cursor_state->crtc_w <= 0)
|
||||
base = 0;
|
||||
on = false;
|
||||
|
||||
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
|
||||
x = -x;
|
||||
@ -10052,16 +10065,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
||||
|
||||
if (y < 0) {
|
||||
if (y + cursor_state->crtc_h <= 0)
|
||||
base = 0;
|
||||
on = false;
|
||||
|
||||
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
|
||||
y = -y;
|
||||
}
|
||||
pos |= y << CURSOR_Y_SHIFT;
|
||||
|
||||
if (base == 0 && intel_crtc->cursor_base == 0)
|
||||
return;
|
||||
|
||||
I915_WRITE(CURPOS(pipe), pos);
|
||||
|
||||
/* ILK+ do this automagically */
|
||||
@ -10072,9 +10082,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
if (IS_845G(dev) || IS_I865G(dev))
|
||||
i845_update_cursor(crtc, base);
|
||||
i845_update_cursor(crtc, base, on);
|
||||
else
|
||||
i9xx_update_cursor(crtc, base);
|
||||
i9xx_update_cursor(crtc, base, on);
|
||||
}
|
||||
|
||||
static bool cursor_size_ok(struct drm_device *dev,
|
||||
@ -12113,18 +12123,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
|
||||
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_device *dev = state->dev;
|
||||
struct intel_encoder *encoder;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_state *connector_state;
|
||||
unsigned int used_ports = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Walk the connector list instead of the encoder
|
||||
* list to detect the problem on ddi platforms
|
||||
* where there's just one encoder per digital port.
|
||||
*/
|
||||
for_each_connector_in_state(state, connector, connector_state, i) {
|
||||
drm_for_each_connector(connector, dev) {
|
||||
struct drm_connector_state *connector_state;
|
||||
struct intel_encoder *encoder;
|
||||
|
||||
connector_state = drm_atomic_get_existing_connector_state(state, connector);
|
||||
if (!connector_state)
|
||||
connector_state = connector->state;
|
||||
|
||||
if (!connector_state->best_encoder)
|
||||
continue;
|
||||
|
||||
@ -13718,6 +13732,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
|
||||
struct drm_crtc *crtc = crtc_state->base.crtc;
|
||||
struct drm_framebuffer *fb = state->base.fb;
|
||||
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
|
||||
enum pipe pipe = to_intel_plane(plane)->pipe;
|
||||
unsigned stride;
|
||||
int ret;
|
||||
|
||||
@ -13751,6 +13766,22 @@ intel_check_cursor_plane(struct drm_plane *plane,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* There's something wrong with the cursor on CHV pipe C.
|
||||
* If it straddles the left edge of the screen then
|
||||
* moving it away from the edge or disabling it often
|
||||
* results in a pipe underrun, and often that can lead to
|
||||
* dead pipe (constant underrun reported, and it scans
|
||||
* out just a solid color). To recover from that, the
|
||||
* display power well must be turned off and on again.
|
||||
* Refuse the put the cursor into that compromised position.
|
||||
*/
|
||||
if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
|
||||
state->visible && state->base.crtc_x < 0) {
|
||||
DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -13774,9 +13805,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
|
||||
crtc = crtc ? crtc : plane->crtc;
|
||||
intel_crtc = to_intel_crtc(crtc);
|
||||
|
||||
if (intel_crtc->cursor_bo == obj)
|
||||
goto update;
|
||||
|
||||
if (!obj)
|
||||
addr = 0;
|
||||
else if (!INTEL_INFO(dev)->cursor_needs_physical)
|
||||
@ -13785,9 +13813,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
|
||||
addr = obj->phys_handle->busaddr;
|
||||
|
||||
intel_crtc->cursor_addr = addr;
|
||||
intel_crtc->cursor_bo = obj;
|
||||
|
||||
update:
|
||||
if (crtc->state->active)
|
||||
intel_crtc_update_cursor(crtc, state->visible);
|
||||
}
|
||||
|
@ -550,7 +550,6 @@ struct intel_crtc {
|
||||
int adjusted_x;
|
||||
int adjusted_y;
|
||||
|
||||
struct drm_i915_gem_object *cursor_bo;
|
||||
uint32_t cursor_addr;
|
||||
uint32_t cursor_cntl;
|
||||
uint32_t cursor_size;
|
||||
|
@ -1374,17 +1374,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
|
||||
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
|
||||
struct drm_i915_private *dev_priv = to_i915(connector->dev);
|
||||
bool live_status = false;
|
||||
unsigned int retry = 3;
|
||||
unsigned int try;
|
||||
|
||||
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
|
||||
connector->base.id, connector->name);
|
||||
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
|
||||
|
||||
while (!live_status && --retry) {
|
||||
for (try = 0; !live_status && try < 9; try++) {
|
||||
if (try)
|
||||
msleep(10);
|
||||
live_status = intel_digital_port_connected(dev_priv,
|
||||
hdmi_to_dig_port(intel_hdmi));
|
||||
mdelay(10);
|
||||
}
|
||||
|
||||
if (!live_status)
|
||||
|
@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
|
||||
fan->type = NVBIOS_THERM_FAN_UNK;
|
||||
}
|
||||
|
||||
fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
|
||||
fan->min_duty = nvbios_rd08(bios, data + 0x02);
|
||||
fan->max_duty = nvbios_rd08(bios, data + 0x03);
|
||||
|
||||
|
@ -1265,15 +1265,17 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
|
||||
return cma_protocol_roce_dev_port(device, port_num);
|
||||
}
|
||||
|
||||
static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
|
||||
const struct net_device *net_dev)
|
||||
static bool cma_match_net_dev(const struct rdma_cm_id *id,
|
||||
const struct net_device *net_dev,
|
||||
u8 port_num)
|
||||
{
|
||||
const struct rdma_addr *addr = &id_priv->id.route.addr;
|
||||
const struct rdma_addr *addr = &id->route.addr;
|
||||
|
||||
if (!net_dev)
|
||||
/* This request is an AF_IB request or a RoCE request */
|
||||
return addr->src_addr.ss_family == AF_IB ||
|
||||
cma_protocol_roce(&id_priv->id);
|
||||
return (!id->port_num || id->port_num == port_num) &&
|
||||
(addr->src_addr.ss_family == AF_IB ||
|
||||
cma_protocol_roce_dev_port(id->device, port_num));
|
||||
|
||||
return !addr->dev_addr.bound_dev_if ||
|
||||
(net_eq(dev_net(net_dev), addr->dev_addr.net) &&
|
||||
@ -1295,13 +1297,13 @@ static struct rdma_id_private *cma_find_listener(
|
||||
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
|
||||
if (cma_match_private_data(id_priv, ib_event->private_data)) {
|
||||
if (id_priv->id.device == cm_id->device &&
|
||||
cma_match_net_dev(id_priv, net_dev))
|
||||
cma_match_net_dev(&id_priv->id, net_dev, req->port))
|
||||
return id_priv;
|
||||
list_for_each_entry(id_priv_dev,
|
||||
&id_priv->listen_list,
|
||||
listen_list) {
|
||||
if (id_priv_dev->id.device == cm_id->device &&
|
||||
cma_match_net_dev(id_priv_dev, net_dev))
|
||||
cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
|
||||
return id_priv_dev;
|
||||
}
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
|
||||
mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
|
||||
ib_umem_release(msrq->umem);
|
||||
} else {
|
||||
kfree(msrq->wrid);
|
||||
kvfree(msrq->wrid);
|
||||
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
|
||||
&msrq->buf);
|
||||
mlx4_db_free(dev->dev, &msrq->db);
|
||||
|
@ -232,6 +232,10 @@ struct phy_info {
|
||||
u16 interface_type;
|
||||
};
|
||||
|
||||
enum ocrdma_flags {
|
||||
OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
|
||||
};
|
||||
|
||||
struct ocrdma_dev {
|
||||
struct ib_device ibdev;
|
||||
struct ocrdma_dev_attr attr;
|
||||
@ -287,6 +291,7 @@ struct ocrdma_dev {
|
||||
atomic_t update_sl;
|
||||
u16 pvid;
|
||||
u32 asic_id;
|
||||
u32 flags;
|
||||
|
||||
ulong last_stats_time;
|
||||
struct mutex stats_lock; /* provide synch for debugfs operations */
|
||||
@ -591,4 +596,9 @@ static inline u8 ocrdma_is_enabled_and_synced(u32 state)
|
||||
(state & OCRDMA_STATE_FLAG_SYNC);
|
||||
}
|
||||
|
||||
static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
|
||||
{
|
||||
return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -579,6 +579,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
|
||||
|
||||
cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
|
||||
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
|
||||
/* Request link events on this MQ. */
|
||||
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
|
||||
|
||||
cmd->async_cqid_ringsize = cq->id;
|
||||
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
|
||||
@ -819,20 +821,42 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
|
||||
}
|
||||
}
|
||||
|
||||
static void ocrdma_process_link_state(struct ocrdma_dev *dev,
|
||||
struct ocrdma_ae_mcqe *cqe)
|
||||
{
|
||||
struct ocrdma_ae_lnkst_mcqe *evt;
|
||||
u8 lstate;
|
||||
|
||||
evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
|
||||
lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
|
||||
|
||||
if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
|
||||
return;
|
||||
|
||||
if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
|
||||
ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
|
||||
}
|
||||
|
||||
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
|
||||
{
|
||||
/* async CQE processing */
|
||||
struct ocrdma_ae_mcqe *cqe = ae_cqe;
|
||||
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
|
||||
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
|
||||
|
||||
if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
|
||||
switch (evt_code) {
|
||||
case OCRDMA_ASYNC_LINK_EVE_CODE:
|
||||
ocrdma_process_link_state(dev, cqe);
|
||||
break;
|
||||
case OCRDMA_ASYNC_RDMA_EVE_CODE:
|
||||
ocrdma_dispatch_ibevent(dev, cqe);
|
||||
else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
|
||||
break;
|
||||
case OCRDMA_ASYNC_GRP5_EVE_CODE:
|
||||
ocrdma_process_grp5_aync(dev, cqe);
|
||||
else
|
||||
break;
|
||||
default:
|
||||
pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
|
||||
dev->id, evt_code);
|
||||
}
|
||||
}
|
||||
|
||||
static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
|
||||
@ -1363,7 +1387,8 @@ mbx_err:
|
||||
return status;
|
||||
}
|
||||
|
||||
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
|
||||
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
|
||||
u8 *lnk_state)
|
||||
{
|
||||
int status = -ENOMEM;
|
||||
struct ocrdma_get_link_speed_rsp *rsp;
|
||||
@ -1384,8 +1409,11 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
|
||||
goto mbx_err;
|
||||
|
||||
rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
|
||||
*lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
|
||||
>> OCRDMA_PHY_PS_SHIFT;
|
||||
if (lnk_speed)
|
||||
*lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
|
||||
>> OCRDMA_PHY_PS_SHIFT;
|
||||
if (lnk_state)
|
||||
*lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
|
||||
|
||||
mbx_err:
|
||||
kfree(cmd);
|
||||
@ -2515,9 +2543,10 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
|
||||
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
|
||||
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
|
||||
|
||||
if (vlan_id < 0x1000) {
|
||||
if (dev->pfc_state) {
|
||||
vlan_id = 0;
|
||||
if (vlan_id == 0xFFFF)
|
||||
vlan_id = 0;
|
||||
if (vlan_id || dev->pfc_state) {
|
||||
if (!vlan_id) {
|
||||
pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
|
||||
dev->id);
|
||||
pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
|
||||
|
@ -106,7 +106,8 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
|
||||
bool solicited, u16 cqe_popped);
|
||||
|
||||
/* verbs specific mailbox commands */
|
||||
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
|
||||
int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
|
||||
u8 *lnk_st);
|
||||
int ocrdma_query_config(struct ocrdma_dev *,
|
||||
struct ocrdma_mbx_query_config *config);
|
||||
|
||||
@ -153,5 +154,6 @@ char *port_speed_string(struct ocrdma_dev *dev);
|
||||
void ocrdma_init_service_level(struct ocrdma_dev *);
|
||||
void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
|
||||
void ocrdma_free_pd_range(struct ocrdma_dev *dev);
|
||||
void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate);
|
||||
|
||||
#endif /* __OCRDMA_HW_H__ */
|
||||
|
@ -290,6 +290,7 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
|
||||
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
|
||||
{
|
||||
int status = 0, i;
|
||||
u8 lstate = 0;
|
||||
struct ocrdma_dev *dev;
|
||||
|
||||
dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
|
||||
@ -319,6 +320,11 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
|
||||
if (status)
|
||||
goto alloc_err;
|
||||
|
||||
/* Query Link state and update */
|
||||
status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate);
|
||||
if (!status)
|
||||
ocrdma_update_link_state(dev, lstate);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
|
||||
if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
|
||||
goto sysfs_err;
|
||||
@ -373,7 +379,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
|
||||
ocrdma_remove_free(dev);
|
||||
}
|
||||
|
||||
static int ocrdma_open(struct ocrdma_dev *dev)
|
||||
static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
|
||||
{
|
||||
struct ib_event port_event;
|
||||
|
||||
@ -384,32 +390,9 @@ static int ocrdma_open(struct ocrdma_dev *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ocrdma_close(struct ocrdma_dev *dev)
|
||||
static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
|
||||
{
|
||||
int i;
|
||||
struct ocrdma_qp *qp, **cur_qp;
|
||||
struct ib_event err_event;
|
||||
struct ib_qp_attr attrs;
|
||||
int attr_mask = IB_QP_STATE;
|
||||
|
||||
attrs.qp_state = IB_QPS_ERR;
|
||||
mutex_lock(&dev->dev_lock);
|
||||
if (dev->qp_tbl) {
|
||||
cur_qp = dev->qp_tbl;
|
||||
for (i = 0; i < OCRDMA_MAX_QP; i++) {
|
||||
qp = cur_qp[i];
|
||||
if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
|
||||
/* change the QP state to ERROR */
|
||||
_ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
|
||||
|
||||
err_event.event = IB_EVENT_QP_FATAL;
|
||||
err_event.element.qp = &qp->ibqp;
|
||||
err_event.device = &dev->ibdev;
|
||||
ib_dispatch_event(&err_event);
|
||||
}
|
||||
}
|
||||
}
|
||||
mutex_unlock(&dev->dev_lock);
|
||||
|
||||
err_event.event = IB_EVENT_PORT_ERR;
|
||||
err_event.element.port_num = 1;
|
||||
@ -420,7 +403,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
|
||||
|
||||
static void ocrdma_shutdown(struct ocrdma_dev *dev)
|
||||
{
|
||||
ocrdma_close(dev);
|
||||
ocrdma_dispatch_port_error(dev);
|
||||
ocrdma_remove(dev);
|
||||
}
|
||||
|
||||
@ -431,18 +414,28 @@ static void ocrdma_shutdown(struct ocrdma_dev *dev)
|
||||
static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
|
||||
{
|
||||
switch (event) {
|
||||
case BE_DEV_UP:
|
||||
ocrdma_open(dev);
|
||||
break;
|
||||
case BE_DEV_DOWN:
|
||||
ocrdma_close(dev);
|
||||
break;
|
||||
case BE_DEV_SHUTDOWN:
|
||||
ocrdma_shutdown(dev);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate)
|
||||
{
|
||||
if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) {
|
||||
dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT;
|
||||
if (!lstate)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!lstate)
|
||||
ocrdma_dispatch_port_error(dev);
|
||||
else
|
||||
ocrdma_dispatch_port_active(dev);
|
||||
}
|
||||
|
||||
static struct ocrdma_driver ocrdma_drv = {
|
||||
.name = "ocrdma_driver",
|
||||
.add = ocrdma_add,
|
||||
|
@ -465,8 +465,11 @@ struct ocrdma_ae_qp_mcqe {
|
||||
u32 valid_ae_event;
|
||||
};
|
||||
|
||||
#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
|
||||
#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
|
||||
enum ocrdma_async_event_code {
|
||||
OCRDMA_ASYNC_LINK_EVE_CODE = 0x01,
|
||||
OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05,
|
||||
OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14
|
||||
};
|
||||
|
||||
enum ocrdma_async_grp5_events {
|
||||
OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
|
||||
@ -489,6 +492,44 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
|
||||
OCRDMA_MAX_ASYNC_ERRORS
|
||||
};
|
||||
|
||||
struct ocrdma_ae_lnkst_mcqe {
|
||||
u32 speed_state_ptn;
|
||||
u32 qos_reason_falut;
|
||||
u32 evt_tag;
|
||||
u32 valid_ae_event;
|
||||
};
|
||||
|
||||
enum {
|
||||
OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F,
|
||||
OCRDMA_AE_LSC_PT_SHIFT = 0x06,
|
||||
OCRDMA_AE_LSC_PT_MASK = (0x03 <<
|
||||
OCRDMA_AE_LSC_PT_SHIFT),
|
||||
OCRDMA_AE_LSC_LS_SHIFT = 0x08,
|
||||
OCRDMA_AE_LSC_LS_MASK = (0xFF <<
|
||||
OCRDMA_AE_LSC_LS_SHIFT),
|
||||
OCRDMA_AE_LSC_LD_SHIFT = 0x10,
|
||||
OCRDMA_AE_LSC_LD_MASK = (0xFF <<
|
||||
OCRDMA_AE_LSC_LD_SHIFT),
|
||||
OCRDMA_AE_LSC_PPS_SHIFT = 0x18,
|
||||
OCRDMA_AE_LSC_PPS_MASK = (0xFF <<
|
||||
OCRDMA_AE_LSC_PPS_SHIFT),
|
||||
OCRDMA_AE_LSC_PPF_MASK = 0xFF,
|
||||
OCRDMA_AE_LSC_ER_SHIFT = 0x08,
|
||||
OCRDMA_AE_LSC_ER_MASK = (0xFF <<
|
||||
OCRDMA_AE_LSC_ER_SHIFT),
|
||||
OCRDMA_AE_LSC_QOS_SHIFT = 0x10,
|
||||
OCRDMA_AE_LSC_QOS_MASK = (0xFFFF <<
|
||||
OCRDMA_AE_LSC_QOS_SHIFT)
|
||||
};
|
||||
|
||||
enum {
|
||||
OCRDMA_AE_LSC_PLINK_DOWN = 0x00,
|
||||
OCRDMA_AE_LSC_PLINK_UP = 0x01,
|
||||
OCRDMA_AE_LSC_LLINK_DOWN = 0x02,
|
||||
OCRDMA_AE_LSC_LLINK_MASK = 0x02,
|
||||
OCRDMA_AE_LSC_LLINK_UP = 0x03
|
||||
};
|
||||
|
||||
/* mailbox command request and responses */
|
||||
enum {
|
||||
OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2,
|
||||
@ -676,7 +717,7 @@ enum {
|
||||
OCRDMA_PHY_PFLT_SHIFT = 0x18,
|
||||
OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
|
||||
OCRDMA_QOS_LNKSP_SHIFT = 0x10,
|
||||
OCRDMA_LLST_MASK = 0xFF,
|
||||
OCRDMA_LINK_ST_MASK = 0x01,
|
||||
OCRDMA_PLFC_MASK = 0x00000400,
|
||||
OCRDMA_PLFC_SHIFT = 0x8,
|
||||
OCRDMA_PLRFC_MASK = 0x00000200,
|
||||
@ -691,7 +732,7 @@ struct ocrdma_get_link_speed_rsp {
|
||||
|
||||
u32 pflt_pps_ld_pnum;
|
||||
u32 qos_lsp;
|
||||
u32 res_lls;
|
||||
u32 res_lnk_st;
|
||||
};
|
||||
|
||||
enum {
|
||||
|
@ -171,7 +171,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
|
||||
int status;
|
||||
u8 speed;
|
||||
|
||||
status = ocrdma_mbx_get_link_speed(dev, &speed);
|
||||
status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
|
||||
if (status)
|
||||
speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
|
||||
|
||||
|
@ -75,7 +75,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
|
||||
struct nvm_block *blk;
|
||||
int i;
|
||||
|
||||
lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
|
||||
lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
|
||||
|
||||
for (i = 0; i < nr_blocks; i++) {
|
||||
if (blks[i] == 0)
|
||||
|
@ -4326,8 +4326,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
|
||||
}
|
||||
mddev_unlock(mddev);
|
||||
}
|
||||
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
||||
test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
|
||||
} else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
return -EBUSY;
|
||||
else if (cmd_match(page, "resync"))
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
@ -4340,8 +4339,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
|
||||
return -EINVAL;
|
||||
err = mddev_lock(mddev);
|
||||
if (!err) {
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
err = mddev->pers->start_reshape(mddev);
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
err = -EBUSY;
|
||||
else {
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
err = mddev->pers->start_reshape(mddev);
|
||||
}
|
||||
mddev_unlock(mddev);
|
||||
}
|
||||
if (err)
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/slab.h>
|
||||
|
@ -236,7 +236,7 @@ int ubi_debugfs_init(void)
|
||||
|
||||
dfs_rootdir = debugfs_create_dir("ubi", NULL);
|
||||
if (IS_ERR_OR_NULL(dfs_rootdir)) {
|
||||
int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
|
||||
int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV;
|
||||
|
||||
pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
|
||||
err);
|
||||
|
@ -1299,7 +1299,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
|
||||
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
|
||||
goto exit;
|
||||
|
||||
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
|
||||
crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
|
||||
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
|
||||
if (hdr_crc != crc) {
|
||||
ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
|
||||
|
@ -603,6 +603,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
|
||||
/**
|
||||
* do_sync_erase - run the erase worker synchronously.
|
||||
* @ubi: UBI device description object
|
||||
@ -615,20 +616,16 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
|
||||
static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
|
||||
int vol_id, int lnum, int torture)
|
||||
{
|
||||
struct ubi_work *wl_wrk;
|
||||
struct ubi_work wl_wrk;
|
||||
|
||||
dbg_wl("sync erase of PEB %i", e->pnum);
|
||||
|
||||
wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
|
||||
if (!wl_wrk)
|
||||
return -ENOMEM;
|
||||
wl_wrk.e = e;
|
||||
wl_wrk.vol_id = vol_id;
|
||||
wl_wrk.lnum = lnum;
|
||||
wl_wrk.torture = torture;
|
||||
|
||||
wl_wrk->e = e;
|
||||
wl_wrk->vol_id = vol_id;
|
||||
wl_wrk->lnum = lnum;
|
||||
wl_wrk->torture = torture;
|
||||
|
||||
return erase_worker(ubi, wl_wrk, 0);
|
||||
return __erase_worker(ubi, &wl_wrk);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1014,7 +1011,7 @@ out_unlock:
|
||||
}
|
||||
|
||||
/**
|
||||
* erase_worker - physical eraseblock erase worker function.
|
||||
* __erase_worker - physical eraseblock erase worker function.
|
||||
* @ubi: UBI device description object
|
||||
* @wl_wrk: the work object
|
||||
* @shutdown: non-zero if the worker has to free memory and exit
|
||||
@ -1025,8 +1022,7 @@ out_unlock:
|
||||
* needed. Returns zero in case of success and a negative error code in case of
|
||||
* failure.
|
||||
*/
|
||||
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
|
||||
int shutdown)
|
||||
static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
|
||||
{
|
||||
struct ubi_wl_entry *e = wl_wrk->e;
|
||||
int pnum = e->pnum;
|
||||
@ -1034,21 +1030,11 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
|
||||
int lnum = wl_wrk->lnum;
|
||||
int err, available_consumed = 0;
|
||||
|
||||
if (shutdown) {
|
||||
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
|
||||
kfree(wl_wrk);
|
||||
wl_entry_destroy(ubi, e);
|
||||
return 0;
|
||||
}
|
||||
|
||||
dbg_wl("erase PEB %d EC %d LEB %d:%d",
|
||||
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
|
||||
|
||||
err = sync_erase(ubi, e, wl_wrk->torture);
|
||||
if (!err) {
|
||||
/* Fine, we've erased it successfully */
|
||||
kfree(wl_wrk);
|
||||
|
||||
spin_lock(&ubi->wl_lock);
|
||||
wl_tree_add(e, &ubi->free);
|
||||
ubi->free_count++;
|
||||
@ -1066,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
|
||||
}
|
||||
|
||||
ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
|
||||
kfree(wl_wrk);
|
||||
|
||||
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
|
||||
err == -EBUSY) {
|
||||
@ -1075,6 +1060,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
|
||||
/* Re-schedule the LEB for erasure */
|
||||
err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
|
||||
if (err1) {
|
||||
wl_entry_destroy(ubi, e);
|
||||
err = err1;
|
||||
goto out_ro;
|
||||
}
|
||||
@ -1150,6 +1136,25 @@ out_ro:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
|
||||
int shutdown)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (shutdown) {
|
||||
struct ubi_wl_entry *e = wl_wrk->e;
|
||||
|
||||
dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
|
||||
kfree(wl_wrk);
|
||||
wl_entry_destroy(ubi, e);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = __erase_worker(ubi, wl_wrk);
|
||||
kfree(wl_wrk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
|
||||
* @ubi: UBI device description object
|
||||
|
@ -3430,25 +3430,29 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
|
||||
return rc;
|
||||
}
|
||||
|
||||
#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
|
||||
/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
|
||||
#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
|
||||
|
||||
/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
|
||||
#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
|
||||
|
||||
#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
|
||||
/* check if packet requires linearization (packet is too fragmented)
|
||||
no need to check fragmentation if page size > 8K (there will be no
|
||||
violation to FW restrictions) */
|
||||
static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
|
||||
u32 xmit_type)
|
||||
{
|
||||
int to_copy = 0;
|
||||
int hlen = 0;
|
||||
int first_bd_sz = 0;
|
||||
int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
|
||||
int to_copy = 0, hlen = 0;
|
||||
|
||||
/* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
|
||||
if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
|
||||
if (xmit_type & XMIT_GSO_ENC)
|
||||
num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
|
||||
|
||||
if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
|
||||
if (xmit_type & XMIT_GSO) {
|
||||
unsigned short lso_mss = skb_shinfo(skb)->gso_size;
|
||||
/* Check if LSO packet needs to be copied:
|
||||
3 = 1 (for headers BD) + 2 (for PBD and last BD) */
|
||||
int wnd_size = MAX_FETCH_BD - 3;
|
||||
int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
|
||||
/* Number of windows to check */
|
||||
int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
|
||||
int wnd_idx = 0;
|
||||
|
@ -848,8 +848,6 @@ void be_roce_dev_remove(struct be_adapter *);
|
||||
/*
|
||||
* internal function to open-close roce device during ifup-ifdown.
|
||||
*/
|
||||
void be_roce_dev_open(struct be_adapter *);
|
||||
void be_roce_dev_close(struct be_adapter *);
|
||||
void be_roce_dev_shutdown(struct be_adapter *);
|
||||
|
||||
#endif /* BE_H */
|
||||
|
@ -3299,8 +3299,10 @@ static int be_msix_register(struct be_adapter *adapter)
|
||||
|
||||
return 0;
|
||||
err_msix:
|
||||
for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
|
||||
for (i--; i >= 0; i--) {
|
||||
eqo = &adapter->eq_obj[i];
|
||||
free_irq(be_msix_vec_get(adapter, eqo), eqo);
|
||||
}
|
||||
dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
|
||||
status);
|
||||
be_msix_disable(adapter);
|
||||
@ -3432,8 +3434,6 @@ static int be_close(struct net_device *netdev)
|
||||
|
||||
be_disable_if_filters(adapter);
|
||||
|
||||
be_roce_dev_close(adapter);
|
||||
|
||||
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
|
||||
for_all_evt_queues(adapter, eqo, i) {
|
||||
napi_disable(&eqo->napi);
|
||||
@ -3601,8 +3601,6 @@ static int be_open(struct net_device *netdev)
|
||||
be_link_status_update(adapter, link_status);
|
||||
|
||||
netif_tx_start_all_queues(netdev);
|
||||
be_roce_dev_open(adapter);
|
||||
|
||||
#ifdef CONFIG_BE2NET_VXLAN
|
||||
if (skyhawk_chip(adapter))
|
||||
vxlan_get_rx_port(netdev);
|
||||
|
@ -116,40 +116,6 @@ void be_roce_dev_remove(struct be_adapter *adapter)
|
||||
}
|
||||
}
|
||||
|
||||
static void _be_roce_dev_open(struct be_adapter *adapter)
|
||||
{
|
||||
if (ocrdma_drv && adapter->ocrdma_dev &&
|
||||
ocrdma_drv->state_change_handler)
|
||||
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
|
||||
BE_DEV_UP);
|
||||
}
|
||||
|
||||
void be_roce_dev_open(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
_be_roce_dev_open(adapter);
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void _be_roce_dev_close(struct be_adapter *adapter)
|
||||
{
|
||||
if (ocrdma_drv && adapter->ocrdma_dev &&
|
||||
ocrdma_drv->state_change_handler)
|
||||
ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
|
||||
BE_DEV_DOWN);
|
||||
}
|
||||
|
||||
void be_roce_dev_close(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
mutex_lock(&be_adapter_list_lock);
|
||||
_be_roce_dev_close(adapter);
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void be_roce_dev_shutdown(struct be_adapter *adapter)
|
||||
{
|
||||
if (be_roce_supported(adapter)) {
|
||||
@ -177,8 +143,6 @@ int be_roce_register_driver(struct ocrdma_driver *drv)
|
||||
|
||||
_be_roce_dev_add(dev);
|
||||
netdev = dev->netdev;
|
||||
if (netif_running(netdev) && netif_oper_up(netdev))
|
||||
_be_roce_dev_open(dev);
|
||||
}
|
||||
mutex_unlock(&be_adapter_list_lock);
|
||||
return 0;
|
||||
|
@ -60,9 +60,7 @@ struct ocrdma_driver {
|
||||
void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
|
||||
};
|
||||
|
||||
enum {
|
||||
BE_DEV_UP = 0,
|
||||
BE_DEV_DOWN = 1,
|
||||
enum be_roce_event {
|
||||
BE_DEV_SHUTDOWN = 2
|
||||
};
|
||||
|
||||
|
@ -242,6 +242,13 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
|
||||
unsigned long flags;
|
||||
u64 ns, zero = 0;
|
||||
|
||||
/* mlx4_en_init_timestamp is called for each netdev.
|
||||
* mdev->ptp_clock is common for all ports, skip initialization if
|
||||
* was done for other port.
|
||||
*/
|
||||
if (mdev->ptp_clock)
|
||||
return;
|
||||
|
||||
rwlock_init(&mdev->clock_lock);
|
||||
|
||||
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
|
||||
|
@ -232,9 +232,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
|
||||
if (mdev->pndev[i])
|
||||
mlx4_en_destroy_netdev(mdev->pndev[i]);
|
||||
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
mlx4_en_remove_timestamp(mdev);
|
||||
|
||||
flush_workqueue(mdev->workqueue);
|
||||
destroy_workqueue(mdev->workqueue);
|
||||
(void) mlx4_mr_free(dev, &mdev->mr);
|
||||
@ -320,10 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
|
||||
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
|
||||
mdev->port_cnt++;
|
||||
|
||||
/* Initialize time stamp mechanism */
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
mlx4_en_init_timestamp(mdev);
|
||||
|
||||
/* Set default number of RX rings*/
|
||||
mlx4_en_set_num_rx_rings(mdev);
|
||||
|
||||
|
@ -2072,6 +2072,9 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
||||
/* flush any pending task for this netdev */
|
||||
flush_workqueue(mdev->workqueue);
|
||||
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
mlx4_en_remove_timestamp(mdev);
|
||||
|
||||
/* Detach the netdev so tasks would not attempt to access it */
|
||||
mutex_lock(&mdev->state_lock);
|
||||
mdev->pndev[priv->port] = NULL;
|
||||
@ -3058,9 +3061,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
||||
}
|
||||
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
|
||||
|
||||
/* Initialize time stamp mechanism */
|
||||
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
|
||||
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
||||
SERVICE_TASK_DELAY);
|
||||
mlx4_en_init_timestamp(mdev);
|
||||
|
||||
queue_delayed_work(mdev->workqueue, &priv->service_task,
|
||||
SERVICE_TASK_DELAY);
|
||||
|
||||
mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
|
||||
mdev->profile.prof[priv->port].rx_ppp,
|
||||
|
@ -1937,6 +1937,12 @@ static void refill_rx(struct net_device *dev)
|
||||
break; /* Better luck next round. */
|
||||
np->rx_dma[entry] = pci_map_single(np->pci_dev,
|
||||
skb->data, buflen, PCI_DMA_FROMDEVICE);
|
||||
if (pci_dma_mapping_error(np->pci_dev,
|
||||
np->rx_dma[entry])) {
|
||||
dev_kfree_skb_any(skb);
|
||||
np->rx_skbuff[entry] = NULL;
|
||||
break; /* Better luck next round. */
|
||||
}
|
||||
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
|
||||
}
|
||||
np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
|
||||
@ -2093,6 +2099,12 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
np->tx_skbuff[entry] = skb;
|
||||
np->tx_dma[entry] = pci_map_single(np->pci_dev,
|
||||
skb->data,skb->len, PCI_DMA_TODEVICE);
|
||||
if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) {
|
||||
np->tx_skbuff[entry] = NULL;
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
|
||||
|
||||
|
@ -252,7 +252,7 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
|
||||
state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
|
||||
}
|
||||
|
||||
if (!idc->vnic_wait_limit) {
|
||||
if (state != QLCNIC_DEV_NPAR_OPER) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"vNIC mode not operational, state check timed out.\n");
|
||||
return -EIO;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user