mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
Merge branch 'perf/urgent' into perf/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
7a7368a5f2
@ -9,6 +9,7 @@ Contents:
|
||||
batman-adv
|
||||
kapi
|
||||
z8530book
|
||||
msg_zerocopy
|
||||
|
||||
.. only:: subproject
|
||||
|
||||
@ -16,4 +17,3 @@ Contents:
|
||||
=======
|
||||
|
||||
* :ref:`genindex`
|
||||
|
||||
|
@ -72,6 +72,10 @@ this flag, a process must first signal intent by setting a socket option:
|
||||
if (setsockopt(fd, SOL_SOCKET, SO_ZEROCOPY, &one, sizeof(one)))
|
||||
error(1, errno, "setsockopt zerocopy");
|
||||
|
||||
Setting the socket option only works when the socket is in its initial
|
||||
(TCP_CLOSED) state. Trying to set the option for a socket returned by accept(),
|
||||
for example, will lead to an EBUSY error. In this case, the option should be set
|
||||
to the listening socket and it will be inherited by the accepted sockets.
|
||||
|
||||
Transmission
|
||||
------------
|
||||
|
@ -10134,7 +10134,7 @@ F: drivers/irqchip/irq-ompic.c
|
||||
F: drivers/irqchip/irq-or1k-*
|
||||
|
||||
OPENVSWITCH
|
||||
M: Pravin Shelar <pshelar@nicira.com>
|
||||
M: Pravin B Shelar <pshelar@ovn.org>
|
||||
L: netdev@vger.kernel.org
|
||||
L: dev@openvswitch.org
|
||||
W: http://openvswitch.org
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 15
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -35,6 +35,14 @@
|
||||
reg = <0x80 0x10>, <0x100 0x10>;
|
||||
#clock-cells = <0>;
|
||||
clocks = <&input_clk>;
|
||||
|
||||
/*
|
||||
* Set initial core pll output frequency to 90MHz.
|
||||
* It will be applied at the core pll driver probing
|
||||
* on early boot.
|
||||
*/
|
||||
assigned-clocks = <&core_clk>;
|
||||
assigned-clock-rates = <90000000>;
|
||||
};
|
||||
|
||||
core_intc: archs-intc@cpu {
|
||||
|
@ -35,6 +35,14 @@
|
||||
reg = <0x80 0x10>, <0x100 0x10>;
|
||||
#clock-cells = <0>;
|
||||
clocks = <&input_clk>;
|
||||
|
||||
/*
|
||||
* Set initial core pll output frequency to 100MHz.
|
||||
* It will be applied at the core pll driver probing
|
||||
* on early boot.
|
||||
*/
|
||||
assigned-clocks = <&core_clk>;
|
||||
assigned-clock-rates = <100000000>;
|
||||
};
|
||||
|
||||
core_intc: archs-intc@cpu {
|
||||
|
@ -114,6 +114,14 @@
|
||||
reg = <0x00 0x10>, <0x14B8 0x4>;
|
||||
#clock-cells = <0>;
|
||||
clocks = <&input_clk>;
|
||||
|
||||
/*
|
||||
* Set initial core pll output frequency to 1GHz.
|
||||
* It will be applied at the core pll driver probing
|
||||
* on early boot.
|
||||
*/
|
||||
assigned-clocks = <&core_clk>;
|
||||
assigned-clock-rates = <1000000000>;
|
||||
};
|
||||
|
||||
serial: serial@5000 {
|
||||
|
@ -49,10 +49,11 @@ CONFIG_SERIAL_8250_DW=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
# CONFIG_HW_RANDOM is not set
|
||||
# CONFIG_HWMON is not set
|
||||
CONFIG_DRM=y
|
||||
# CONFIG_DRM_FBDEV_EMULATION is not set
|
||||
CONFIG_DRM_UDL=y
|
||||
CONFIG_FB=y
|
||||
CONFIG_FB_UDL=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
|
@ -668,6 +668,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
|
||||
return 0;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" mov lp_count, %5 \n"
|
||||
" lp 3f \n"
|
||||
"1: ldb.ab %3, [%2, 1] \n"
|
||||
" breq.d %3, 0, 3f \n"
|
||||
@ -684,8 +685,8 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count)
|
||||
" .word 1b, 4b \n"
|
||||
" .previous \n"
|
||||
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
|
||||
: "g"(-EFAULT), "l"(count)
|
||||
: "memory");
|
||||
: "g"(-EFAULT), "r"(count)
|
||||
: "lp_count", "lp_start", "lp_end", "memory");
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -199,7 +199,7 @@ static void read_arc_build_cfg_regs(void)
|
||||
unsigned int exec_ctrl;
|
||||
|
||||
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
|
||||
cpu->extn.dual_enb = exec_ctrl & 1;
|
||||
cpu->extn.dual_enb = !(exec_ctrl & 1);
|
||||
|
||||
/* dual issue always present for this core */
|
||||
cpu->extn.dual = 1;
|
||||
|
@ -163,7 +163,7 @@ arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
|
||||
*/
|
||||
static int __print_sym(unsigned int address, void *unused)
|
||||
{
|
||||
__print_symbol(" %s\n", address);
|
||||
printk(" %pS\n", (void *)address);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -83,6 +83,7 @@ DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC)
|
||||
DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", __weak do_memory_error, BUS_ADRERR)
|
||||
DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT)
|
||||
DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN)
|
||||
DO_ERROR_INFO(SIGSEGV, "gcc generated __builtin_trap", do_trap5_error, 0)
|
||||
|
||||
/*
|
||||
* Entry Point for Misaligned Data access Exception, for emulating in software
|
||||
@ -115,6 +116,8 @@ void do_machine_check_fault(unsigned long address, struct pt_regs *regs)
|
||||
* Thus TRAP_S <n> can be used for specific purpose
|
||||
* -1 used for software breakpointing (gdb)
|
||||
* -2 used by kprobes
|
||||
* -5 __builtin_trap() generated by gcc (2018.03 onwards) for toggle such as
|
||||
* -fno-isolate-erroneous-paths-dereference
|
||||
*/
|
||||
void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
|
||||
{
|
||||
@ -134,6 +137,9 @@ void do_non_swi_trap(unsigned long address, struct pt_regs *regs)
|
||||
kgdb_trap(regs);
|
||||
break;
|
||||
|
||||
case 5:
|
||||
do_trap5_error(address, regs);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -155,3 +161,11 @@ void do_insterror_or_kprobe(unsigned long address, struct pt_regs *regs)
|
||||
|
||||
insterror_is_error(address, regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* abort() call generated by older gcc for __builtin_trap()
|
||||
*/
|
||||
void abort(void)
|
||||
{
|
||||
__asm__ __volatile__("trap_s 5\n");
|
||||
}
|
||||
|
@ -163,6 +163,9 @@ static void show_ecr_verbose(struct pt_regs *regs)
|
||||
else
|
||||
pr_cont("Bus Error, check PRM\n");
|
||||
#endif
|
||||
} else if (vec == ECR_V_TRAP) {
|
||||
if (regs->ecr_param == 5)
|
||||
pr_cont("gcc generated __builtin_trap\n");
|
||||
} else {
|
||||
pr_cont("Check Programmer's Manual\n");
|
||||
}
|
||||
|
@ -317,25 +317,23 @@ static void __init axs103_early_init(void)
|
||||
* Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
|
||||
* of fudging the freq in DT
|
||||
*/
|
||||
#define AXS103_QUAD_CORE_CPU_FREQ_HZ 50000000
|
||||
|
||||
unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
|
||||
if (num_cores > 2) {
|
||||
u32 freq = 50, orig;
|
||||
/*
|
||||
* TODO: use cpu node "cpu-freq" param instead of platform-specific
|
||||
* "/cpu_card/core_clk" as it works only if we use fixed-clock for cpu.
|
||||
*/
|
||||
u32 freq;
|
||||
int off = fdt_path_offset(initial_boot_params, "/cpu_card/core_clk");
|
||||
const struct fdt_property *prop;
|
||||
|
||||
prop = fdt_get_property(initial_boot_params, off,
|
||||
"clock-frequency", NULL);
|
||||
orig = be32_to_cpu(*(u32*)(prop->data)) / 1000000;
|
||||
"assigned-clock-rates", NULL);
|
||||
freq = be32_to_cpu(*(u32 *)(prop->data));
|
||||
|
||||
/* Patching .dtb in-place with new core clock value */
|
||||
if (freq != orig ) {
|
||||
freq = cpu_to_be32(freq * 1000000);
|
||||
if (freq != AXS103_QUAD_CORE_CPU_FREQ_HZ) {
|
||||
freq = cpu_to_be32(AXS103_QUAD_CORE_CPU_FREQ_HZ);
|
||||
fdt_setprop_inplace(initial_boot_params, off,
|
||||
"clock-frequency", &freq, sizeof(freq));
|
||||
"assigned-clock-rates", &freq, sizeof(freq));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -38,42 +38,6 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
|
||||
#define CREG_PAE (CREG_BASE + 0x180)
|
||||
#define CREG_PAE_UPDATE (CREG_BASE + 0x194)
|
||||
|
||||
#define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8)
|
||||
#define CREG_CORE_IF_CLK_DIV_2 0x1
|
||||
#define CGU_BASE ARC_PERIPHERAL_BASE
|
||||
#define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4)
|
||||
#define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0)
|
||||
#define CGU_PLL_STATUS_LOCK BIT(0)
|
||||
#define CGU_PLL_STATUS_ERR BIT(1)
|
||||
#define CGU_PLL_CTRL_1GHZ 0x3A10
|
||||
#define HSDK_PLL_LOCK_TIMEOUT 500
|
||||
|
||||
#define HSDK_PLL_LOCKED() \
|
||||
!!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK)
|
||||
|
||||
#define HSDK_PLL_ERR() \
|
||||
!!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR)
|
||||
|
||||
static void __init hsdk_set_cpu_freq_1ghz(void)
|
||||
{
|
||||
u32 timeout = HSDK_PLL_LOCK_TIMEOUT;
|
||||
|
||||
/*
|
||||
* As we set cpu clock which exceeds 500MHz, the divider for the interface
|
||||
* clock must be programmed to div-by-2.
|
||||
*/
|
||||
iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV);
|
||||
|
||||
/* Set cpu clock to 1GHz */
|
||||
iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL);
|
||||
|
||||
while (!HSDK_PLL_LOCKED() && timeout--)
|
||||
cpu_relax();
|
||||
|
||||
if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR())
|
||||
pr_err("Failed to setup CPU frequency to 1GHz!");
|
||||
}
|
||||
|
||||
#define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000)
|
||||
#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
|
||||
#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
|
||||
@ -98,12 +62,6 @@ static void __init hsdk_init_early(void)
|
||||
* minimum possible div-by-2.
|
||||
*/
|
||||
iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
|
||||
|
||||
/*
|
||||
* Setup CPU frequency to 1GHz.
|
||||
* TODO: remove it after smart hsdk pll driver will be introduced.
|
||||
*/
|
||||
hsdk_set_cpu_freq_1ghz();
|
||||
}
|
||||
|
||||
static const char *hsdk_compat[] __initconst = {
|
||||
|
@ -235,6 +235,7 @@ LEAF(mips_cps_core_init)
|
||||
has_mt t0, 3f
|
||||
|
||||
.set push
|
||||
.set MIPS_ISA_LEVEL_RAW
|
||||
.set mt
|
||||
|
||||
/* Only allow 1 TC per VPE to execute... */
|
||||
@ -388,6 +389,7 @@ LEAF(mips_cps_boot_vpes)
|
||||
#elif defined(CONFIG_MIPS_MT)
|
||||
|
||||
.set push
|
||||
.set MIPS_ISA_LEVEL_RAW
|
||||
.set mt
|
||||
|
||||
/* If the core doesn't support MT then return */
|
||||
|
@ -705,6 +705,18 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
|
||||
struct task_struct *t;
|
||||
int max_users;
|
||||
|
||||
/* If nothing to change, return right away, successfully. */
|
||||
if (value == mips_get_process_fp_mode(task))
|
||||
return 0;
|
||||
|
||||
/* Only accept a mode change if 64-bit FP enabled for o32. */
|
||||
if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* And only for o32 tasks. */
|
||||
if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Check the value is valid */
|
||||
if (value & ~known_bits)
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -419,25 +419,38 @@ static int gpr64_set(struct task_struct *target,
|
||||
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
static int fpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer,
|
||||
* !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
|
||||
* correspond 1:1 to buffer slots. Only general registers are copied.
|
||||
*/
|
||||
static int fpr_get_fpa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
void **kbuf, void __user **ubuf)
|
||||
{
|
||||
unsigned i;
|
||||
int err;
|
||||
return user_regset_copyout(pos, count, kbuf, ubuf,
|
||||
&target->thread.fpu,
|
||||
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer,
|
||||
* CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
|
||||
* general register slots are copied to buffer slots. Only general
|
||||
* registers are copied.
|
||||
*/
|
||||
static int fpr_get_msa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
void **kbuf, void __user **ubuf)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 fpr_val;
|
||||
int err;
|
||||
|
||||
/* XXX fcr31 */
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
|
||||
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu,
|
||||
0, sizeof(elf_fpregset_t));
|
||||
|
||||
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
|
||||
for (i = 0; i < NUM_FPU_REGS; i++) {
|
||||
fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
|
||||
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
err = user_regset_copyout(pos, count, kbuf, ubuf,
|
||||
&fpr_val, i * sizeof(elf_fpreg_t),
|
||||
(i + 1) * sizeof(elf_fpreg_t));
|
||||
if (err)
|
||||
@ -447,27 +460,64 @@ static int fpr_get(struct task_struct *target,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fpr_set(struct task_struct *target,
|
||||
/*
|
||||
* Copy the floating-point context to the supplied NT_PRFPREG buffer.
|
||||
* Choose the appropriate helper for general registers, and then copy
|
||||
* the FCSR register separately.
|
||||
*/
|
||||
static int fpr_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
unsigned i;
|
||||
const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
int err;
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
|
||||
err = fpr_get_fpa(target, &pos, &count, &kbuf, &ubuf);
|
||||
else
|
||||
err = fpr_get_msa(target, &pos, &count, &kbuf, &ubuf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu.fcr31,
|
||||
fcr31_pos, fcr31_pos + sizeof(u32));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context,
|
||||
* !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
|
||||
* context's general register slots. Only general registers are copied.
|
||||
*/
|
||||
static int fpr_set_fpa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
const void **kbuf, const void __user **ubuf)
|
||||
{
|
||||
return user_regset_copyin(pos, count, kbuf, ubuf,
|
||||
&target->thread.fpu,
|
||||
0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context,
|
||||
* CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
|
||||
* bits only of FP context's general register slots. Only general
|
||||
* registers are copied.
|
||||
*/
|
||||
static int fpr_set_msa(struct task_struct *target,
|
||||
unsigned int *pos, unsigned int *count,
|
||||
const void **kbuf, const void __user **ubuf)
|
||||
{
|
||||
unsigned int i;
|
||||
u64 fpr_val;
|
||||
|
||||
/* XXX fcr31 */
|
||||
|
||||
init_fp_ctx(target);
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[i]) == sizeof(elf_fpreg_t))
|
||||
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&target->thread.fpu,
|
||||
0, sizeof(elf_fpregset_t));
|
||||
int err;
|
||||
|
||||
BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
|
||||
for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
|
||||
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
|
||||
err = user_regset_copyin(pos, count, kbuf, ubuf,
|
||||
&fpr_val, i * sizeof(elf_fpreg_t),
|
||||
(i + 1) * sizeof(elf_fpreg_t));
|
||||
if (err)
|
||||
@ -478,6 +528,53 @@ static int fpr_set(struct task_struct *target,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the supplied NT_PRFPREG buffer to the floating-point context.
|
||||
* Choose the appropriate helper for general registers, and then copy
|
||||
* the FCSR register separately.
|
||||
*
|
||||
* We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
|
||||
* which is supposed to have been guaranteed by the kernel before
|
||||
* calling us, e.g. in `ptrace_regset'. We enforce that requirement,
|
||||
* so that we can safely avoid preinitializing temporaries for
|
||||
* partial register writes.
|
||||
*/
|
||||
static int fpr_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
|
||||
u32 fcr31;
|
||||
int err;
|
||||
|
||||
BUG_ON(count % sizeof(elf_fpreg_t));
|
||||
|
||||
if (pos + count > sizeof(elf_fpregset_t))
|
||||
return -EIO;
|
||||
|
||||
init_fp_ctx(target);
|
||||
|
||||
if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
|
||||
err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
|
||||
else
|
||||
err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (count > 0) {
|
||||
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&fcr31,
|
||||
fcr31_pos, fcr31_pos + sizeof(u32));
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ptrace_setfcr31(target, fcr31);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
enum mips_regset {
|
||||
REGSET_GPR,
|
||||
REGSET_FPR,
|
||||
|
@ -12,6 +12,7 @@
|
||||
for the semaphore. */
|
||||
|
||||
#define __PA_LDCW_ALIGNMENT 16
|
||||
#define __PA_LDCW_ALIGN_ORDER 4
|
||||
#define __ldcw_align(a) ({ \
|
||||
unsigned long __ret = (unsigned long) &(a)->lock[0]; \
|
||||
__ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \
|
||||
@ -29,6 +30,7 @@
|
||||
ldcd). */
|
||||
|
||||
#define __PA_LDCW_ALIGNMENT 4
|
||||
#define __PA_LDCW_ALIGN_ORDER 2
|
||||
#define __ldcw_align(a) (&(a)->slock)
|
||||
#define __LDCW "ldcw,co"
|
||||
|
||||
|
@ -870,7 +870,7 @@ static void print_parisc_device(struct parisc_device *dev)
|
||||
static int count;
|
||||
|
||||
print_pa_hwpath(dev, hw_path);
|
||||
printk(KERN_INFO "%d. %s at 0x%p [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
|
||||
printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
|
||||
++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
|
||||
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/ldcw.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
#include <linux/linkage.h>
|
||||
@ -46,6 +47,14 @@
|
||||
#endif
|
||||
|
||||
.import pa_tlb_lock,data
|
||||
.macro load_pa_tlb_lock reg
|
||||
#if __PA_LDCW_ALIGNMENT > 4
|
||||
load32 PA(pa_tlb_lock) + __PA_LDCW_ALIGNMENT-1, \reg
|
||||
depi 0,31,__PA_LDCW_ALIGN_ORDER, \reg
|
||||
#else
|
||||
load32 PA(pa_tlb_lock), \reg
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* space_to_prot macro creates a prot id from a space id */
|
||||
|
||||
@ -457,7 +466,7 @@
|
||||
.macro tlb_lock spc,ptp,pte,tmp,tmp1,fault
|
||||
#ifdef CONFIG_SMP
|
||||
cmpib,COND(=),n 0,\spc,2f
|
||||
load32 PA(pa_tlb_lock),\tmp
|
||||
load_pa_tlb_lock \tmp
|
||||
1: LDCW 0(\tmp),\tmp1
|
||||
cmpib,COND(=) 0,\tmp1,1b
|
||||
nop
|
||||
@ -480,7 +489,7 @@
|
||||
/* Release pa_tlb_lock lock. */
|
||||
.macro tlb_unlock1 spc,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
load32 PA(pa_tlb_lock),\tmp
|
||||
load_pa_tlb_lock \tmp
|
||||
tlb_unlock0 \spc,\tmp
|
||||
#endif
|
||||
.endm
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <asm/assembly.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/ldcw.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
@ -333,8 +334,12 @@ ENDPROC_CFI(flush_data_cache_local)
|
||||
|
||||
.macro tlb_lock la,flags,tmp
|
||||
#ifdef CONFIG_SMP
|
||||
ldil L%pa_tlb_lock,%r1
|
||||
ldo R%pa_tlb_lock(%r1),\la
|
||||
#if __PA_LDCW_ALIGNMENT > 4
|
||||
load32 pa_tlb_lock + __PA_LDCW_ALIGNMENT-1, \la
|
||||
depi 0,31,__PA_LDCW_ALIGN_ORDER, \la
|
||||
#else
|
||||
load32 pa_tlb_lock, \la
|
||||
#endif
|
||||
rsm PSW_SM_I,\flags
|
||||
1: LDCW 0(\la),\tmp
|
||||
cmpib,<>,n 0,\tmp,3f
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/ptrace.h>
|
||||
@ -183,6 +184,44 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Idle thread support
|
||||
*
|
||||
* Detect when running on QEMU with SeaBIOS PDC Firmware and let
|
||||
* QEMU idle the host too.
|
||||
*/
|
||||
|
||||
int running_on_qemu __read_mostly;
|
||||
|
||||
void __cpuidle arch_cpu_idle_dead(void)
|
||||
{
|
||||
/* nop on real hardware, qemu will offline CPU. */
|
||||
asm volatile("or %%r31,%%r31,%%r31\n":::);
|
||||
}
|
||||
|
||||
void __cpuidle arch_cpu_idle(void)
|
||||
{
|
||||
local_irq_enable();
|
||||
|
||||
/* nop on real hardware, qemu will idle sleep. */
|
||||
asm volatile("or %%r10,%%r10,%%r10\n":::);
|
||||
}
|
||||
|
||||
static int __init parisc_idle_init(void)
|
||||
{
|
||||
const char *marker;
|
||||
|
||||
/* check QEMU/SeaBIOS marker in PAGE0 */
|
||||
marker = (char *) &PAGE0->pad0;
|
||||
running_on_qemu = (memcmp(marker, "SeaBIOS", 8) == 0);
|
||||
|
||||
if (!running_on_qemu)
|
||||
cpu_idle_poll_ctrl(1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(parisc_idle_init);
|
||||
|
||||
/*
|
||||
* Copy architecture-specific thread state
|
||||
*/
|
||||
|
@ -631,11 +631,11 @@ void __init mem_init(void)
|
||||
mem_init_print_info(NULL);
|
||||
#ifdef CONFIG_DEBUG_KERNEL /* double-sanity-check paranoia */
|
||||
printk("virtual kernel memory layout:\n"
|
||||
" vmalloc : 0x%p - 0x%p (%4ld MB)\n"
|
||||
" memory : 0x%p - 0x%p (%4ld MB)\n"
|
||||
" .init : 0x%p - 0x%p (%4ld kB)\n"
|
||||
" .data : 0x%p - 0x%p (%4ld kB)\n"
|
||||
" .text : 0x%p - 0x%p (%4ld kB)\n",
|
||||
" vmalloc : 0x%px - 0x%px (%4ld MB)\n"
|
||||
" memory : 0x%px - 0x%px (%4ld MB)\n"
|
||||
" .init : 0x%px - 0x%px (%4ld kB)\n"
|
||||
" .data : 0x%px - 0x%px (%4ld kB)\n"
|
||||
" .text : 0x%px - 0x%px (%4ld kB)\n",
|
||||
|
||||
(void*)VMALLOC_START, (void*)VMALLOC_END,
|
||||
(VMALLOC_END - VMALLOC_START) >> 20,
|
||||
|
@ -145,6 +145,11 @@ static noinline int bad_area(struct pt_regs *regs, unsigned long address)
|
||||
return __bad_area(regs, address, SEGV_MAPERR);
|
||||
}
|
||||
|
||||
static noinline int bad_access(struct pt_regs *regs, unsigned long address)
|
||||
{
|
||||
return __bad_area(regs, address, SEGV_ACCERR);
|
||||
}
|
||||
|
||||
static int do_sigbus(struct pt_regs *regs, unsigned long address,
|
||||
unsigned int fault)
|
||||
{
|
||||
@ -490,7 +495,7 @@ retry:
|
||||
|
||||
good_area:
|
||||
if (unlikely(access_error(is_write, is_exec, vma)))
|
||||
return bad_area(regs, address);
|
||||
return bad_access(regs, address);
|
||||
|
||||
/*
|
||||
* If for any reason at all we couldn't handle the fault,
|
||||
|
@ -0,0 +1,75 @@
|
||||
CONFIG_SMP=y
|
||||
CONFIG_PCI=y
|
||||
CONFIG_PCIE_XILINX=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_IKCONFIG=y
|
||||
CONFIG_IKCONFIG_PROC=y
|
||||
CONFIG_CGROUPS=y
|
||||
CONFIG_CGROUP_SCHED=y
|
||||
CONFIG_CFS_BANDWIDTH=y
|
||||
CONFIG_CGROUP_BPF=y
|
||||
CONFIG_NAMESPACES=y
|
||||
CONFIG_USER_NS=y
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
CONFIG_EXPERT=y
|
||||
CONFIG_CHECKPOINT_RESTORE=y
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
CONFIG_INET=y
|
||||
CONFIG_IP_MULTICAST=y
|
||||
CONFIG_IP_ADVANCED_ROUTER=y
|
||||
CONFIG_IP_PNP=y
|
||||
CONFIG_IP_PNP_DHCP=y
|
||||
CONFIG_IP_PNP_BOOTP=y
|
||||
CONFIG_IP_PNP_RARP=y
|
||||
CONFIG_NETLINK_DIAG=y
|
||||
CONFIG_DEVTMPFS=y
|
||||
CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_VIRTIO_BLK=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_BLK_DEV_SR=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_SATA_AHCI=y
|
||||
CONFIG_SATA_AHCI_PLATFORM=y
|
||||
CONFIG_NETDEVICES=y
|
||||
CONFIG_VIRTIO_NET=y
|
||||
CONFIG_MACB=y
|
||||
CONFIG_E1000E=y
|
||||
CONFIG_R8169=y
|
||||
CONFIG_MICROSEMI_PHY=y
|
||||
CONFIG_INPUT_MOUSEDEV=y
|
||||
CONFIG_SERIAL_8250=y
|
||||
CONFIG_SERIAL_8250_CONSOLE=y
|
||||
CONFIG_SERIAL_OF_PLATFORM=y
|
||||
# CONFIG_PTP_1588_CLOCK is not set
|
||||
CONFIG_DRM=y
|
||||
CONFIG_DRM_RADEON=y
|
||||
CONFIG_FRAMEBUFFER_CONSOLE=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_XHCI_HCD=y
|
||||
CONFIG_USB_XHCI_PLATFORM=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_EHCI_HCD_PLATFORM=y
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
CONFIG_USB_OHCI_HCD_PLATFORM=y
|
||||
CONFIG_USB_STORAGE=y
|
||||
CONFIG_USB_UAS=y
|
||||
CONFIG_VIRTIO_MMIO=y
|
||||
CONFIG_RAS=y
|
||||
CONFIG_EXT4_FS=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
CONFIG_AUTOFS4_FS=y
|
||||
CONFIG_MSDOS_FS=y
|
||||
CONFIG_VFAT_FS=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_TMPFS_POSIX_ACL=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V4=y
|
||||
CONFIG_NFS_V4_1=y
|
||||
CONFIG_NFS_V4_2=y
|
||||
CONFIG_ROOT_NFS=y
|
||||
# CONFIG_RCU_TRACE is not set
|
||||
CONFIG_CRYPTO_USER_API_HASH=y
|
@ -17,10 +17,10 @@
|
||||
#include <linux/const.h>
|
||||
|
||||
/* Status register flags */
|
||||
#define SR_IE _AC(0x00000002, UL) /* Interrupt Enable */
|
||||
#define SR_PIE _AC(0x00000020, UL) /* Previous IE */
|
||||
#define SR_PS _AC(0x00000100, UL) /* Previously Supervisor */
|
||||
#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
|
||||
#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
|
||||
#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
|
||||
#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
|
||||
#define SR_SUM _AC(0x00040000, UL) /* Supervisor may access User Memory */
|
||||
|
||||
#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
|
||||
#define SR_FS_OFF _AC(0x00000000, UL)
|
||||
|
@ -21,8 +21,6 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
|
||||
|
||||
/*
|
||||
@ -36,8 +34,6 @@ extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
|
||||
|
||||
extern void iounmap(volatile void __iomem *addr);
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
/* Generic IO read/write. These perform native-endian accesses. */
|
||||
#define __raw_writeb __raw_writeb
|
||||
static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
|
||||
|
@ -27,25 +27,25 @@ static inline unsigned long arch_local_save_flags(void)
|
||||
/* unconditionally enable interrupts */
|
||||
static inline void arch_local_irq_enable(void)
|
||||
{
|
||||
csr_set(sstatus, SR_IE);
|
||||
csr_set(sstatus, SR_SIE);
|
||||
}
|
||||
|
||||
/* unconditionally disable interrupts */
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
csr_clear(sstatus, SR_IE);
|
||||
csr_clear(sstatus, SR_SIE);
|
||||
}
|
||||
|
||||
/* get status and disable interrupts */
|
||||
static inline unsigned long arch_local_irq_save(void)
|
||||
{
|
||||
return csr_read_clear(sstatus, SR_IE);
|
||||
return csr_read_clear(sstatus, SR_SIE);
|
||||
}
|
||||
|
||||
/* test flags */
|
||||
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
return !(flags & SR_IE);
|
||||
return !(flags & SR_SIE);
|
||||
}
|
||||
|
||||
/* test hardware interrupt enable bit */
|
||||
@ -57,7 +57,7 @@ static inline int arch_irqs_disabled(void)
|
||||
/* set interrupt enabled status */
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
csr_set(sstatus, flags & SR_IE);
|
||||
csr_set(sstatus, flags & SR_SIE);
|
||||
}
|
||||
|
||||
#endif /* _ASM_RISCV_IRQFLAGS_H */
|
||||
|
@ -20,8 +20,6 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
/* Page Upper Directory not used in RISC-V */
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#include <asm/page.h>
|
||||
@ -413,8 +411,6 @@ static inline void pgtable_cache_init(void)
|
||||
/* No page table caches to initialize */
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
|
||||
#define VMALLOC_END (PAGE_OFFSET - 1)
|
||||
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
|
||||
|
@ -66,7 +66,7 @@ struct pt_regs {
|
||||
#define REG_FMT "%08lx"
|
||||
#endif
|
||||
|
||||
#define user_mode(regs) (((regs)->sstatus & SR_PS) == 0)
|
||||
#define user_mode(regs) (((regs)->sstatus & SR_SPP) == 0)
|
||||
|
||||
|
||||
/* Helpers for working with the instruction pointer */
|
||||
|
@ -15,8 +15,6 @@
|
||||
#ifndef _ASM_RISCV_TLBFLUSH_H
|
||||
#define _ASM_RISCV_TLBFLUSH_H
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
/*
|
||||
@ -64,6 +62,4 @@ static inline void flush_tlb_kernel_range(unsigned long start,
|
||||
flush_tlb_all();
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#endif /* _ASM_RISCV_TLBFLUSH_H */
|
||||
|
@ -127,7 +127,6 @@ extern int fixup_exception(struct pt_regs *state);
|
||||
* call.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define __get_user_asm(insn, x, ptr, err) \
|
||||
do { \
|
||||
uintptr_t __tmp; \
|
||||
@ -153,13 +152,11 @@ do { \
|
||||
__disable_user_access(); \
|
||||
(x) = __x; \
|
||||
} while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define __get_user_8(x, ptr, err) \
|
||||
__get_user_asm("ld", x, ptr, err)
|
||||
#else /* !CONFIG_64BIT */
|
||||
#ifdef CONFIG_MMU
|
||||
#define __get_user_8(x, ptr, err) \
|
||||
do { \
|
||||
u32 __user *__ptr = (u32 __user *)(ptr); \
|
||||
@ -193,7 +190,6 @@ do { \
|
||||
(x) = (__typeof__(x))((__typeof__((x)-(x)))( \
|
||||
(((u64)__hi << 32) | __lo))); \
|
||||
} while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
|
||||
@ -267,8 +263,6 @@ do { \
|
||||
((x) = 0, -EFAULT); \
|
||||
})
|
||||
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#define __put_user_asm(insn, x, ptr, err) \
|
||||
do { \
|
||||
uintptr_t __tmp; \
|
||||
@ -292,14 +286,11 @@ do { \
|
||||
: "rJ" (__x), "i" (-EFAULT)); \
|
||||
__disable_user_access(); \
|
||||
} while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define __put_user_8(x, ptr, err) \
|
||||
__put_user_asm("sd", x, ptr, err)
|
||||
#else /* !CONFIG_64BIT */
|
||||
#ifdef CONFIG_MMU
|
||||
#define __put_user_8(x, ptr, err) \
|
||||
do { \
|
||||
u32 __user *__ptr = (u32 __user *)(ptr); \
|
||||
@ -329,7 +320,6 @@ do { \
|
||||
: "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
|
||||
__disable_user_access(); \
|
||||
} while (0)
|
||||
#endif /* CONFIG_MMU */
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
|
||||
@ -438,7 +428,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||
* will set "err" to -EFAULT, while successful accesses return the previous
|
||||
* value.
|
||||
*/
|
||||
#ifdef CONFIG_MMU
|
||||
#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
|
||||
({ \
|
||||
__typeof__(ptr) __ptr = (ptr); \
|
||||
@ -508,6 +497,5 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
|
||||
(err) = __err; \
|
||||
__ret; \
|
||||
})
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#endif /* _ASM_RISCV_UACCESS_H */
|
||||
|
@ -14,3 +14,4 @@
|
||||
#define __ARCH_HAVE_MMU
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
#include <uapi/asm/unistd.h>
|
||||
#include <uapi/asm/syscalls.h>
|
||||
|
@ -1,28 +0,0 @@
|
||||
/*
|
||||
* Copyright (C) 2017 SiFive
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RISCV_VDSO_SYSCALLS_H
|
||||
#define _ASM_RISCV_VDSO_SYSCALLS_H
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/* These syscalls are only used by the vDSO and are not in the uapi. */
|
||||
#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
|
||||
__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_VDSO_H */
|
26
arch/riscv/include/uapi/asm/syscalls.h
Normal file
26
arch/riscv/include/uapi/asm/syscalls.h
Normal file
@ -0,0 +1,26 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2017 SiFive
|
||||
*/
|
||||
|
||||
#ifndef _ASM__UAPI__SYSCALLS_H
|
||||
#define _ASM__UAPI__SYSCALLS_H
|
||||
|
||||
/*
|
||||
* Allows the instruction cache to be flushed from userspace. Despite RISC-V
|
||||
* having a direct 'fence.i' instruction available to userspace (which we
|
||||
* can't trap!), that's not actually viable when running on Linux because the
|
||||
* kernel might schedule a process on another hart. There is no way for
|
||||
* userspace to handle this without invoking the kernel (as it doesn't know the
|
||||
* thread->hart mappings), so we've defined a RISC-V specific system call to
|
||||
* flush the instruction cache.
|
||||
*
|
||||
* __NR_riscv_flush_icache is defined to flush the instruction cache over an
|
||||
* address range, with the flush applying to either all threads or just the
|
||||
* caller. We don't currently do anything with the address range, that's just
|
||||
* in there for forwards compatibility.
|
||||
*/
|
||||
#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
|
||||
__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
|
||||
|
||||
#endif
|
@ -196,7 +196,7 @@ handle_syscall:
|
||||
addi s2, s2, 0x4
|
||||
REG_S s2, PT_SEPC(sp)
|
||||
/* System calls run with interrupts enabled */
|
||||
csrs sstatus, SR_IE
|
||||
csrs sstatus, SR_SIE
|
||||
/* Trace syscalls, but only if requested by the user. */
|
||||
REG_L t0, TASK_TI_FLAGS(tp)
|
||||
andi t0, t0, _TIF_SYSCALL_TRACE
|
||||
@ -224,8 +224,8 @@ ret_from_syscall:
|
||||
|
||||
ret_from_exception:
|
||||
REG_L s0, PT_SSTATUS(sp)
|
||||
csrc sstatus, SR_IE
|
||||
andi s0, s0, SR_PS
|
||||
csrc sstatus, SR_SIE
|
||||
andi s0, s0, SR_SPP
|
||||
bnez s0, restore_all
|
||||
|
||||
resume_userspace:
|
||||
@ -255,7 +255,7 @@ work_pending:
|
||||
bnez s1, work_resched
|
||||
work_notifysig:
|
||||
/* Handle pending signals and notify-resume requests */
|
||||
csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
|
||||
csrs sstatus, SR_SIE /* Enable interrupts for do_notify_resume() */
|
||||
move a0, sp /* pt_regs */
|
||||
move a1, s0 /* current_thread_info->flags */
|
||||
tail do_notify_resume
|
||||
|
@ -76,7 +76,7 @@ void show_regs(struct pt_regs *regs)
|
||||
void start_thread(struct pt_regs *regs, unsigned long pc,
|
||||
unsigned long sp)
|
||||
{
|
||||
regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
|
||||
regs->sstatus = SR_SPIE /* User mode, irqs on */ | SR_FS_INITIAL;
|
||||
regs->sepc = pc;
|
||||
regs->sp = sp;
|
||||
set_fs(USER_DS);
|
||||
@ -110,7 +110,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
const register unsigned long gp __asm__ ("gp");
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gp = gp;
|
||||
childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
|
||||
childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
|
||||
|
||||
p->thread.ra = (unsigned long)ret_from_kernel_thread;
|
||||
p->thread.s[0] = usp; /* fn */
|
||||
|
@ -23,5 +23,4 @@
|
||||
void *sys_call_table[__NR_syscalls] = {
|
||||
[0 ... __NR_syscalls - 1] = sys_ni_syscall,
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/vdso-syscalls.h>
|
||||
};
|
||||
|
@ -13,7 +13,6 @@
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/vdso-syscalls.h>
|
||||
|
||||
.text
|
||||
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
|
||||
|
@ -63,7 +63,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
|
||||
goto vmalloc_fault;
|
||||
|
||||
/* Enable interrupts if they were enabled in the parent context. */
|
||||
if (likely(regs->sstatus & SR_PIE))
|
||||
if (likely(regs->sstatus & SR_SPIE))
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
|
@ -792,11 +792,12 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
|
||||
|
||||
if (kvm->arch.use_cmma) {
|
||||
/*
|
||||
* Get the last slot. They should be sorted by base_gfn, so the
|
||||
* last slot is also the one at the end of the address space.
|
||||
* We have verified above that at least one slot is present.
|
||||
* Get the first slot. They are reverse sorted by base_gfn, so
|
||||
* the first slot is also the one at the end of the address
|
||||
* space. We have verified above that at least one slot is
|
||||
* present.
|
||||
*/
|
||||
ms = slots->memslots + slots->used_slots - 1;
|
||||
ms = slots->memslots;
|
||||
/* round up so we only use full longs */
|
||||
ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
|
||||
/* allocate enough bytes to store all the bits */
|
||||
|
@ -1006,7 +1006,7 @@ static inline int do_essa(struct kvm_vcpu *vcpu, const int orc)
|
||||
cbrlo[entries] = gfn << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
if (orc) {
|
||||
if (orc && gfn < ms->bitmap_size) {
|
||||
/* increment only if we are really flipping the bit to 1 */
|
||||
if (!test_and_set_bit(gfn, ms->pgste_bitmap))
|
||||
atomic64_inc(&ms->dirty_pages);
|
||||
|
@ -9,6 +9,7 @@
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/sh_eth.h>
|
||||
#include <mach-se/mach/se.h>
|
||||
#include <mach-se/mach/mrshpc.h>
|
||||
#include <asm/machvec.h>
|
||||
@ -115,13 +116,23 @@ static struct platform_device heartbeat_device = {
|
||||
#if defined(CONFIG_CPU_SUBTYPE_SH7710) ||\
|
||||
defined(CONFIG_CPU_SUBTYPE_SH7712)
|
||||
/* SH771X Ethernet driver */
|
||||
static struct sh_eth_plat_data sh_eth_plat = {
|
||||
.phy = PHY_ID,
|
||||
.phy_interface = PHY_INTERFACE_MODE_MII,
|
||||
};
|
||||
|
||||
static struct resource sh_eth0_resources[] = {
|
||||
[0] = {
|
||||
.start = SH_ETH0_BASE,
|
||||
.end = SH_ETH0_BASE + 0x1B8,
|
||||
.end = SH_ETH0_BASE + 0x1B8 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = SH_TSU_BASE,
|
||||
.end = SH_TSU_BASE + 0x200 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[2] = {
|
||||
.start = SH_ETH0_IRQ,
|
||||
.end = SH_ETH0_IRQ,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
@ -132,7 +143,7 @@ static struct platform_device sh_eth0_device = {
|
||||
.name = "sh771x-ether",
|
||||
.id = 0,
|
||||
.dev = {
|
||||
.platform_data = PHY_ID,
|
||||
.platform_data = &sh_eth_plat,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(sh_eth0_resources),
|
||||
.resource = sh_eth0_resources,
|
||||
@ -141,10 +152,15 @@ static struct platform_device sh_eth0_device = {
|
||||
static struct resource sh_eth1_resources[] = {
|
||||
[0] = {
|
||||
.start = SH_ETH1_BASE,
|
||||
.end = SH_ETH1_BASE + 0x1B8,
|
||||
.end = SH_ETH1_BASE + 0x1B8 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = SH_TSU_BASE,
|
||||
.end = SH_TSU_BASE + 0x200 - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[2] = {
|
||||
.start = SH_ETH1_IRQ,
|
||||
.end = SH_ETH1_IRQ,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
@ -155,7 +171,7 @@ static struct platform_device sh_eth1_device = {
|
||||
.name = "sh771x-ether",
|
||||
.id = 1,
|
||||
.dev = {
|
||||
.platform_data = PHY_ID,
|
||||
.platform_data = &sh_eth_plat,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(sh_eth1_resources),
|
||||
.resource = sh_eth1_resources,
|
||||
|
@ -100,6 +100,7 @@
|
||||
/* Base address */
|
||||
#define SH_ETH0_BASE 0xA7000000
|
||||
#define SH_ETH1_BASE 0xA7000400
|
||||
#define SH_TSU_BASE 0xA7000800
|
||||
/* PHY ID */
|
||||
#if defined(CONFIG_CPU_SUBTYPE_SH7710)
|
||||
# define PHY_ID 0x00
|
||||
|
@ -755,14 +755,14 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsx_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsx_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsx_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNM, knl_rapl_init),
|
||||
|
@ -4985,6 +4985,25 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
"mov %%r13, %c[r13](%[svm]) \n\t"
|
||||
"mov %%r14, %c[r14](%[svm]) \n\t"
|
||||
"mov %%r15, %c[r15](%[svm]) \n\t"
|
||||
#endif
|
||||
/*
|
||||
* Clear host registers marked as clobbered to prevent
|
||||
* speculative use.
|
||||
*/
|
||||
"xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
|
||||
"xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
|
||||
"xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
|
||||
"xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
|
||||
"xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
|
||||
#ifdef CONFIG_X86_64
|
||||
"xor %%r8, %%r8 \n\t"
|
||||
"xor %%r9, %%r9 \n\t"
|
||||
"xor %%r10, %%r10 \n\t"
|
||||
"xor %%r11, %%r11 \n\t"
|
||||
"xor %%r12, %%r12 \n\t"
|
||||
"xor %%r13, %%r13 \n\t"
|
||||
"xor %%r14, %%r14 \n\t"
|
||||
"xor %%r15, %%r15 \n\t"
|
||||
#endif
|
||||
"pop %%" _ASM_BP
|
||||
:
|
||||
|
@ -9415,6 +9415,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
/* Save guest registers, load host registers, keep flags */
|
||||
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
|
||||
"pop %0 \n\t"
|
||||
"setbe %c[fail](%0)\n\t"
|
||||
"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
|
||||
"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
|
||||
__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
|
||||
@ -9431,12 +9432,23 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
"mov %%r13, %c[r13](%0) \n\t"
|
||||
"mov %%r14, %c[r14](%0) \n\t"
|
||||
"mov %%r15, %c[r15](%0) \n\t"
|
||||
"xor %%r8d, %%r8d \n\t"
|
||||
"xor %%r9d, %%r9d \n\t"
|
||||
"xor %%r10d, %%r10d \n\t"
|
||||
"xor %%r11d, %%r11d \n\t"
|
||||
"xor %%r12d, %%r12d \n\t"
|
||||
"xor %%r13d, %%r13d \n\t"
|
||||
"xor %%r14d, %%r14d \n\t"
|
||||
"xor %%r15d, %%r15d \n\t"
|
||||
#endif
|
||||
"mov %%cr2, %%" _ASM_AX " \n\t"
|
||||
"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
|
||||
|
||||
"xor %%eax, %%eax \n\t"
|
||||
"xor %%ebx, %%ebx \n\t"
|
||||
"xor %%esi, %%esi \n\t"
|
||||
"xor %%edi, %%edi \n\t"
|
||||
"pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
|
||||
"setbe %c[fail](%0) \n\t"
|
||||
".pushsection .rodata \n\t"
|
||||
".global vmx_return \n\t"
|
||||
"vmx_return: " _ASM_PTR " 2b \n\t"
|
||||
|
@ -562,6 +562,13 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
}
|
||||
}
|
||||
|
||||
void blk_drain_queue(struct request_queue *q)
|
||||
{
|
||||
spin_lock_irq(q->queue_lock);
|
||||
__blk_drain_queue(q, true);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* blk_queue_bypass_start - enter queue bypass mode
|
||||
* @q: queue of interest
|
||||
@ -689,8 +696,6 @@ void blk_cleanup_queue(struct request_queue *q)
|
||||
*/
|
||||
blk_freeze_queue(q);
|
||||
spin_lock_irq(lock);
|
||||
if (!q->mq_ops)
|
||||
__blk_drain_queue(q, true);
|
||||
queue_flag_set(QUEUE_FLAG_DEAD, q);
|
||||
spin_unlock_irq(lock);
|
||||
|
||||
|
@ -161,6 +161,8 @@ void blk_freeze_queue(struct request_queue *q)
|
||||
* exported to drivers as the only user for unfreeze is blk_mq.
|
||||
*/
|
||||
blk_freeze_queue_start(q);
|
||||
if (!q->mq_ops)
|
||||
blk_drain_queue(q);
|
||||
blk_mq_freeze_queue_wait(q);
|
||||
}
|
||||
|
||||
|
@ -330,4 +330,6 @@ static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
|
||||
}
|
||||
#endif /* CONFIG_BOUNCE */
|
||||
|
||||
extern void blk_drain_queue(struct request_queue *q);
|
||||
|
||||
#endif /* BLK_INTERNAL_H */
|
||||
|
@ -1581,9 +1581,8 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
static void __lo_release(struct loop_device *lo)
|
||||
{
|
||||
struct loop_device *lo = disk->private_data;
|
||||
int err;
|
||||
|
||||
if (atomic_dec_return(&lo->lo_refcnt))
|
||||
@ -1610,6 +1609,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
mutex_unlock(&lo->lo_ctl_mutex);
|
||||
}
|
||||
|
||||
static void lo_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
mutex_lock(&loop_index_mutex);
|
||||
__lo_release(disk->private_data);
|
||||
mutex_unlock(&loop_index_mutex);
|
||||
}
|
||||
|
||||
static const struct block_device_operations lo_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = lo_open,
|
||||
|
@ -3047,13 +3047,21 @@ static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
|
||||
mutex_unlock(&rbd_dev->watch_mutex);
|
||||
}
|
||||
|
||||
static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
|
||||
{
|
||||
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
|
||||
|
||||
strcpy(rbd_dev->lock_cookie, cookie);
|
||||
rbd_set_owner_cid(rbd_dev, &cid);
|
||||
queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
|
||||
}
|
||||
|
||||
/*
|
||||
* lock_rwsem must be held for write
|
||||
*/
|
||||
static int rbd_lock(struct rbd_device *rbd_dev)
|
||||
{
|
||||
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
|
||||
struct rbd_client_id cid = rbd_get_cid(rbd_dev);
|
||||
char cookie[32];
|
||||
int ret;
|
||||
|
||||
@ -3068,9 +3076,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
|
||||
return ret;
|
||||
|
||||
rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
|
||||
strcpy(rbd_dev->lock_cookie, cookie);
|
||||
rbd_set_owner_cid(rbd_dev, &cid);
|
||||
queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
|
||||
__rbd_lock(rbd_dev, cookie);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3856,7 +3862,7 @@ static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
|
||||
queue_delayed_work(rbd_dev->task_wq,
|
||||
&rbd_dev->lock_dwork, 0);
|
||||
} else {
|
||||
strcpy(rbd_dev->lock_cookie, cookie);
|
||||
__rbd_lock(rbd_dev, cookie);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4381,7 +4387,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
|
||||
segment_size = rbd_obj_bytes(&rbd_dev->header);
|
||||
blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
|
||||
q->limits.max_sectors = queue_max_hw_sectors(q);
|
||||
blk_queue_max_segments(q, segment_size / SECTOR_SIZE);
|
||||
blk_queue_max_segments(q, USHRT_MAX);
|
||||
blk_queue_max_segment_size(q, segment_size);
|
||||
blk_queue_io_min(q, segment_size);
|
||||
blk_queue_io_opt(q, segment_size);
|
||||
|
@ -2892,6 +2892,27 @@ void gpiod_set_raw_value(struct gpio_desc *desc, int value)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
|
||||
|
||||
/**
|
||||
* gpiod_set_value_nocheck() - set a GPIO line value without checking
|
||||
* @desc: the descriptor to set the value on
|
||||
* @value: value to set
|
||||
*
|
||||
* This sets the value of a GPIO line backing a descriptor, applying
|
||||
* different semantic quirks like active low and open drain/source
|
||||
* handling.
|
||||
*/
|
||||
static void gpiod_set_value_nocheck(struct gpio_desc *desc, int value)
|
||||
{
|
||||
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
|
||||
value = !value;
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
gpio_set_open_drain_value_commit(desc, value);
|
||||
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
|
||||
gpio_set_open_source_value_commit(desc, value);
|
||||
else
|
||||
gpiod_set_raw_value_commit(desc, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* gpiod_set_value() - assign a gpio's value
|
||||
* @desc: gpio whose value will be assigned
|
||||
@ -2906,16 +2927,8 @@ EXPORT_SYMBOL_GPL(gpiod_set_raw_value);
|
||||
void gpiod_set_value(struct gpio_desc *desc, int value)
|
||||
{
|
||||
VALIDATE_DESC_VOID(desc);
|
||||
/* Should be using gpiod_set_value_cansleep() */
|
||||
WARN_ON(desc->gdev->chip->can_sleep);
|
||||
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
|
||||
value = !value;
|
||||
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
|
||||
gpio_set_open_drain_value_commit(desc, value);
|
||||
else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
|
||||
gpio_set_open_source_value_commit(desc, value);
|
||||
else
|
||||
gpiod_set_raw_value_commit(desc, value);
|
||||
gpiod_set_value_nocheck(desc, value);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiod_set_value);
|
||||
|
||||
@ -3243,9 +3256,7 @@ void gpiod_set_value_cansleep(struct gpio_desc *desc, int value)
|
||||
{
|
||||
might_sleep_if(extra_checks);
|
||||
VALIDATE_DESC_VOID(desc);
|
||||
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
|
||||
value = !value;
|
||||
gpiod_set_raw_value_commit(desc, value);
|
||||
gpiod_set_value_nocheck(desc, value);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(gpiod_set_value_cansleep);
|
||||
|
||||
|
@ -314,7 +314,7 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map,
|
||||
}
|
||||
#endif
|
||||
|
||||
struct ib_device *__ib_device_get_by_index(u32 ifindex);
|
||||
struct ib_device *ib_device_get_by_index(u32 ifindex);
|
||||
/* RDMA device netlink */
|
||||
void nldev_init(void);
|
||||
void nldev_exit(void);
|
||||
|
@ -134,7 +134,7 @@ static int ib_device_check_mandatory(struct ib_device *device)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct ib_device *__ib_device_get_by_index(u32 index)
|
||||
static struct ib_device *__ib_device_get_by_index(u32 index)
|
||||
{
|
||||
struct ib_device *device;
|
||||
|
||||
@ -145,6 +145,22 @@ struct ib_device *__ib_device_get_by_index(u32 index)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Caller is responsible to return refrerence count by calling put_device()
|
||||
*/
|
||||
struct ib_device *ib_device_get_by_index(u32 index)
|
||||
{
|
||||
struct ib_device *device;
|
||||
|
||||
down_read(&lists_rwsem);
|
||||
device = __ib_device_get_by_index(index);
|
||||
if (device)
|
||||
get_device(&device->dev);
|
||||
|
||||
up_read(&lists_rwsem);
|
||||
return device;
|
||||
}
|
||||
|
||||
static struct ib_device *__ib_device_get_by_name(const char *name)
|
||||
{
|
||||
struct ib_device *device;
|
||||
|
@ -142,27 +142,34 @@ static int nldev_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
|
||||
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
|
||||
|
||||
device = __ib_device_get_by_index(index);
|
||||
device = ib_device_get_by_index(index);
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
||||
if (!msg)
|
||||
return -ENOMEM;
|
||||
if (!msg) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
|
||||
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
|
||||
0, 0);
|
||||
|
||||
err = fill_dev_info(msg, device);
|
||||
if (err) {
|
||||
nlmsg_free(msg);
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
nlmsg_end(msg, nlh);
|
||||
|
||||
put_device(&device->dev);
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_free:
|
||||
nlmsg_free(msg);
|
||||
err:
|
||||
put_device(&device->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int _nldev_get_dumpit(struct ib_device *device,
|
||||
@ -220,31 +227,40 @@ static int nldev_port_get_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
return -EINVAL;
|
||||
|
||||
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
|
||||
device = __ib_device_get_by_index(index);
|
||||
device = ib_device_get_by_index(index);
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
|
||||
if (!rdma_is_port_valid(device, port))
|
||||
return -EINVAL;
|
||||
if (!rdma_is_port_valid(device, port)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
|
||||
if (!msg)
|
||||
return -ENOMEM;
|
||||
if (!msg) {
|
||||
err = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
|
||||
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_GET),
|
||||
0, 0);
|
||||
|
||||
err = fill_port_info(msg, device, port);
|
||||
if (err) {
|
||||
nlmsg_free(msg);
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
goto err_free;
|
||||
|
||||
nlmsg_end(msg, nlh);
|
||||
put_device(&device->dev);
|
||||
|
||||
return rdma_nl_unicast(msg, NETLINK_CB(skb).portid);
|
||||
|
||||
err_free:
|
||||
nlmsg_free(msg);
|
||||
err:
|
||||
put_device(&device->dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nldev_port_get_dumpit(struct sk_buff *skb,
|
||||
@ -265,7 +281,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
|
||||
return -EINVAL;
|
||||
|
||||
ifindex = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
|
||||
device = __ib_device_get_by_index(ifindex);
|
||||
device = ib_device_get_by_index(ifindex);
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
@ -299,7 +315,9 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
|
||||
nlmsg_end(skb, nlh);
|
||||
}
|
||||
|
||||
out: cb->args[0] = idx;
|
||||
out:
|
||||
put_device(&device->dev);
|
||||
cb->args[0] = idx;
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
|
@ -642,7 +642,6 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
|
||||
goto err_free_mr;
|
||||
|
||||
mr->max_pages = max_num_sg;
|
||||
|
||||
err = mlx4_mr_enable(dev->dev, &mr->mmr);
|
||||
if (err)
|
||||
goto err_free_pl;
|
||||
@ -653,6 +652,7 @@ struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
|
||||
return &mr->ibmr;
|
||||
|
||||
err_free_pl:
|
||||
mr->ibmr.device = pd->device;
|
||||
mlx4_free_priv_pages(mr);
|
||||
err_free_mr:
|
||||
(void) mlx4_mr_free(dev->dev, &mr->mmr);
|
||||
|
@ -902,8 +902,8 @@ static int path_rec_start(struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
|
||||
struct net_device *dev)
|
||||
static struct ipoib_neigh *neigh_add_path(struct sk_buff *skb, u8 *daddr,
|
||||
struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
struct rdma_netdev *rn = netdev_priv(dev);
|
||||
@ -917,7 +917,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
++dev->stats.tx_dropped;
|
||||
dev_kfree_skb_any(skb);
|
||||
return;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* To avoid race condition, make sure that the
|
||||
* neigh will be added only once.
|
||||
*/
|
||||
if (unlikely(!list_empty(&neigh->list))) {
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return neigh;
|
||||
}
|
||||
|
||||
path = __path_find(dev, daddr + 4);
|
||||
@ -956,7 +964,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
|
||||
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
|
||||
IPOIB_QPN(daddr));
|
||||
ipoib_neigh_put(neigh);
|
||||
return;
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
neigh->ah = NULL;
|
||||
@ -973,7 +981,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
ipoib_neigh_put(neigh);
|
||||
return;
|
||||
return NULL;
|
||||
|
||||
err_path:
|
||||
ipoib_neigh_free(neigh);
|
||||
@ -983,6 +991,8 @@ err_drop:
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
ipoib_neigh_put(neigh);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
|
||||
@ -1091,8 +1101,9 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
case htons(ETH_P_TIPC):
|
||||
neigh = ipoib_neigh_get(dev, phdr->hwaddr);
|
||||
if (unlikely(!neigh)) {
|
||||
neigh_add_path(skb, phdr->hwaddr, dev);
|
||||
return NETDEV_TX_OK;
|
||||
neigh = neigh_add_path(skb, phdr->hwaddr, dev);
|
||||
if (likely(!neigh))
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
break;
|
||||
case htons(ETH_P_ARP):
|
||||
|
@ -816,7 +816,10 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb)
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (!neigh) {
|
||||
neigh = ipoib_neigh_alloc(daddr, dev);
|
||||
if (neigh) {
|
||||
/* Make sure that the neigh will be added only
|
||||
* once to mcast list.
|
||||
*/
|
||||
if (neigh && list_empty(&neigh->list)) {
|
||||
kref_get(&mcast->ah->ref);
|
||||
neigh->ah = mcast->ah;
|
||||
list_add_tail(&neigh->list, &mcast->neigh_list);
|
||||
|
@ -1013,8 +1013,7 @@ static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
|
||||
return -ENOMEM;
|
||||
|
||||
attr->qp_state = IB_QPS_INIT;
|
||||
attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
|
||||
IB_ACCESS_REMOTE_WRITE;
|
||||
attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE;
|
||||
attr->port_num = ch->sport->port;
|
||||
attr->pkey_index = 0;
|
||||
|
||||
@ -2078,7 +2077,7 @@ static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
|
||||
goto destroy_ib;
|
||||
}
|
||||
|
||||
guid = (__be16 *)¶m->primary_path->sgid.global.interface_id;
|
||||
guid = (__be16 *)¶m->primary_path->dgid.global.interface_id;
|
||||
snprintf(ch->ini_guid, sizeof(ch->ini_guid), "%04x:%04x:%04x:%04x",
|
||||
be16_to_cpu(guid[0]), be16_to_cpu(guid[1]),
|
||||
be16_to_cpu(guid[2]), be16_to_cpu(guid[3]));
|
||||
|
@ -163,7 +163,7 @@ static unsigned int get_time_pit(void)
|
||||
#define GET_TIME(x) do { x = (unsigned int)rdtsc(); } while (0)
|
||||
#define DELTA(x,y) ((y)-(x))
|
||||
#define TIME_NAME "TSC"
|
||||
#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_TILE)
|
||||
#elif defined(__alpha__) || defined(CONFIG_MN10300) || defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV) || defined(CONFIG_TILE)
|
||||
#define GET_TIME(x) do { x = get_cycles(); } while (0)
|
||||
#define DELTA(x,y) ((y)-(x))
|
||||
#define TIME_NAME "get_cycles"
|
||||
|
@ -1651,7 +1651,7 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf)
|
||||
return union_desc;
|
||||
|
||||
dev_err(&intf->dev,
|
||||
"Union descriptor to short (%d vs %zd\n)",
|
||||
"Union descriptor too short (%d vs %zd)\n",
|
||||
union_desc->bLength, sizeof(*union_desc));
|
||||
return NULL;
|
||||
}
|
||||
|
@ -326,8 +326,6 @@ static int xenkbd_probe(struct xenbus_device *dev,
|
||||
0, width, 0, 0);
|
||||
input_set_abs_params(mtouch, ABS_MT_POSITION_Y,
|
||||
0, height, 0, 0);
|
||||
input_set_abs_params(mtouch, ABS_MT_PRESSURE,
|
||||
0, 255, 0, 0);
|
||||
|
||||
ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT);
|
||||
if (ret) {
|
||||
|
@ -1613,7 +1613,7 @@ static int elantech_set_properties(struct elantech_data *etd)
|
||||
case 5:
|
||||
etd->hw_version = 3;
|
||||
break;
|
||||
case 6 ... 14:
|
||||
case 6 ... 15:
|
||||
etd->hw_version = 4;
|
||||
break;
|
||||
default:
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/async.h>
|
||||
#include <linux/i2c.h>
|
||||
@ -1261,10 +1262,13 @@ static int elants_i2c_probe(struct i2c_client *client,
|
||||
}
|
||||
|
||||
/*
|
||||
* Systems using device tree should set up interrupt via DTS,
|
||||
* the rest will use the default falling edge interrupts.
|
||||
* Platform code (ACPI, DTS) should normally set up interrupt
|
||||
* for us, but in case it did not let's fall back to using falling
|
||||
* edge to be compatible with older Chromebooks.
|
||||
*/
|
||||
irqflags = client->dev.of_node ? 0 : IRQF_TRIGGER_FALLING;
|
||||
irqflags = irq_get_trigger_type(client->irq);
|
||||
if (!irqflags)
|
||||
irqflags = IRQF_TRIGGER_FALLING;
|
||||
|
||||
error = devm_request_threaded_irq(&client->dev, client->irq,
|
||||
NULL, elants_i2c_irq,
|
||||
|
@ -10,8 +10,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/machine.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -1698,13 +1698,15 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
|
||||
domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
|
||||
domain->geometry.aperture_end = (1UL << ias) - 1;
|
||||
domain->geometry.force_aperture = true;
|
||||
smmu_domain->pgtbl_ops = pgtbl_ops;
|
||||
|
||||
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
free_io_pgtable_ops(pgtbl_ops);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return ret;
|
||||
smmu_domain->pgtbl_ops = pgtbl_ops;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
|
||||
@ -1731,7 +1733,7 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
|
||||
|
||||
static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
struct arm_smmu_master_data *master = fwspec->iommu_priv;
|
||||
struct arm_smmu_device *smmu = master->smmu;
|
||||
|
||||
@ -1739,6 +1741,13 @@ static void arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
|
||||
u32 sid = fwspec->ids[i];
|
||||
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
|
||||
|
||||
/* Bridged PCI devices may end up with duplicated IDs */
|
||||
for (j = 0; j < i; j++)
|
||||
if (fwspec->ids[j] == sid)
|
||||
break;
|
||||
if (j < i)
|
||||
continue;
|
||||
|
||||
arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
|
||||
}
|
||||
}
|
||||
|
@ -186,8 +186,9 @@ void led_blink_set(struct led_classdev *led_cdev,
|
||||
unsigned long *delay_on,
|
||||
unsigned long *delay_off)
|
||||
{
|
||||
led_stop_software_blink(led_cdev);
|
||||
del_timer_sync(&led_cdev->blink_timer);
|
||||
|
||||
clear_bit(LED_BLINK_SW, &led_cdev->work_flags);
|
||||
clear_bit(LED_BLINK_ONESHOT, &led_cdev->work_flags);
|
||||
clear_bit(LED_BLINK_ONESHOT_STOP, &led_cdev->work_flags);
|
||||
|
||||
|
@ -963,6 +963,7 @@ static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
|
||||
|
||||
switch (command) {
|
||||
case NAND_CMD_READ0:
|
||||
case NAND_CMD_READOOB:
|
||||
case NAND_CMD_PAGEPROG:
|
||||
info->use_ecc = 1;
|
||||
break;
|
||||
|
@ -526,7 +526,7 @@ static int flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
data = be32_to_cpup((__be32 *)&cf->data[0]);
|
||||
flexcan_write(data, &priv->tx_mb->data[0]);
|
||||
}
|
||||
if (cf->can_dlc > 3) {
|
||||
if (cf->can_dlc > 4) {
|
||||
data = be32_to_cpup((__be32 *)&cf->data[4]);
|
||||
flexcan_write(data, &priv->tx_mb->data[1]);
|
||||
}
|
||||
|
@ -395,6 +395,7 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
|
||||
|
||||
if (dev->can.state == CAN_STATE_ERROR_WARNING ||
|
||||
dev->can.state == CAN_STATE_ERROR_PASSIVE) {
|
||||
cf->can_id |= CAN_ERR_CRTL;
|
||||
cf->data[1] = (txerr > rxerr) ?
|
||||
CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE;
|
||||
}
|
||||
|
@ -449,7 +449,7 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
|
||||
dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
|
||||
rc);
|
||||
|
||||
return rc;
|
||||
return (rc > 0) ? 0 : rc;
|
||||
}
|
||||
|
||||
static void gs_usb_xmit_callback(struct urb *urb)
|
||||
|
@ -194,7 +194,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
|
||||
tbp = peer_tb;
|
||||
}
|
||||
|
||||
if (tbp[IFLA_IFNAME]) {
|
||||
if (ifmp && tbp[IFLA_IFNAME]) {
|
||||
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
|
||||
name_assign_type = NET_NAME_USER;
|
||||
} else {
|
||||
|
@ -1500,10 +1500,13 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds,
|
||||
{
|
||||
struct b53_device *dev = ds->priv;
|
||||
|
||||
/* Older models support a different tag format that we do not
|
||||
* support in net/dsa/tag_brcm.c yet.
|
||||
/* Older models (5325, 5365) support a different tag format that we do
|
||||
* not support in net/dsa/tag_brcm.c yet. 539x and 531x5 require managed
|
||||
* mode to be turned on which means we need to specifically manage ARL
|
||||
* misses on multicast addresses (TBD).
|
||||
*/
|
||||
if (is5325(dev) || is5365(dev) || !b53_can_enable_brcm_tags(ds, port))
|
||||
if (is5325(dev) || is5365(dev) || is539x(dev) || is531x5(dev) ||
|
||||
!b53_can_enable_brcm_tags(ds, port))
|
||||
return DSA_TAG_PROTO_NONE;
|
||||
|
||||
/* Broadcom BCM58xx chips have a flow accelerator on Port 8
|
||||
|
@ -602,7 +602,7 @@ struct vortex_private {
|
||||
struct sk_buff* rx_skbuff[RX_RING_SIZE];
|
||||
struct sk_buff* tx_skbuff[TX_RING_SIZE];
|
||||
unsigned int cur_rx, cur_tx; /* The next free ring entry */
|
||||
unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
|
||||
unsigned int dirty_tx; /* The ring entries to be free()ed. */
|
||||
struct vortex_extra_stats xstats; /* NIC-specific extra stats */
|
||||
struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
|
||||
dma_addr_t tx_skb_dma; /* Allocated DMA address for bus master ctrl DMA. */
|
||||
@ -618,7 +618,6 @@ struct vortex_private {
|
||||
|
||||
/* The remainder are related to chip state, mostly media selection. */
|
||||
struct timer_list timer; /* Media selection timer. */
|
||||
struct timer_list rx_oom_timer; /* Rx skb allocation retry timer */
|
||||
int options; /* User-settable misc. driver options. */
|
||||
unsigned int media_override:4, /* Passed-in media type. */
|
||||
default_media:4, /* Read from the EEPROM/Wn3_Config. */
|
||||
@ -760,7 +759,6 @@ static void mdio_sync(struct vortex_private *vp, int bits);
|
||||
static int mdio_read(struct net_device *dev, int phy_id, int location);
|
||||
static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
|
||||
static void vortex_timer(struct timer_list *t);
|
||||
static void rx_oom_timer(struct timer_list *t);
|
||||
static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
|
||||
@ -1601,7 +1599,6 @@ vortex_up(struct net_device *dev)
|
||||
|
||||
timer_setup(&vp->timer, vortex_timer, 0);
|
||||
mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
|
||||
timer_setup(&vp->rx_oom_timer, rx_oom_timer, 0);
|
||||
|
||||
if (vortex_debug > 1)
|
||||
pr_debug("%s: Initial media type %s.\n",
|
||||
@ -1676,7 +1673,7 @@ vortex_up(struct net_device *dev)
|
||||
window_write16(vp, 0x0040, 4, Wn4_NetDiag);
|
||||
|
||||
if (vp->full_bus_master_rx) { /* Boomerang bus master. */
|
||||
vp->cur_rx = vp->dirty_rx = 0;
|
||||
vp->cur_rx = 0;
|
||||
/* Initialize the RxEarly register as recommended. */
|
||||
iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
|
||||
iowrite32(0x0020, ioaddr + PktStatus);
|
||||
@ -1729,6 +1726,7 @@ vortex_open(struct net_device *dev)
|
||||
struct vortex_private *vp = netdev_priv(dev);
|
||||
int i;
|
||||
int retval;
|
||||
dma_addr_t dma;
|
||||
|
||||
/* Use the now-standard shared IRQ implementation. */
|
||||
if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
|
||||
@ -1753,7 +1751,11 @@ vortex_open(struct net_device *dev)
|
||||
break; /* Bad news! */
|
||||
|
||||
skb_reserve(skb, NET_IP_ALIGN); /* Align IP on 16 byte boundaries */
|
||||
vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
|
||||
dma = pci_map_single(VORTEX_PCI(vp), skb->data,
|
||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, dma))
|
||||
break;
|
||||
vp->rx_ring[i].addr = cpu_to_le32(dma);
|
||||
}
|
||||
if (i != RX_RING_SIZE) {
|
||||
pr_emerg("%s: no memory for rx ring\n", dev->name);
|
||||
@ -2067,6 +2069,12 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
int len = (skb->len + 3) & ~3;
|
||||
vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len,
|
||||
PCI_DMA_TODEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, vp->tx_skb_dma)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
spin_lock_irq(&vp->window_lock);
|
||||
window_set(vp, 7);
|
||||
iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
|
||||
@ -2593,7 +2601,7 @@ boomerang_rx(struct net_device *dev)
|
||||
int entry = vp->cur_rx % RX_RING_SIZE;
|
||||
void __iomem *ioaddr = vp->ioaddr;
|
||||
int rx_status;
|
||||
int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
|
||||
int rx_work_limit = RX_RING_SIZE;
|
||||
|
||||
if (vortex_debug > 5)
|
||||
pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
|
||||
@ -2614,7 +2622,8 @@ boomerang_rx(struct net_device *dev)
|
||||
} else {
|
||||
/* The packet length: up to 4.5K!. */
|
||||
int pkt_len = rx_status & 0x1fff;
|
||||
struct sk_buff *skb;
|
||||
struct sk_buff *skb, *newskb;
|
||||
dma_addr_t newdma;
|
||||
dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
|
||||
|
||||
if (vortex_debug > 4)
|
||||
@ -2633,9 +2642,27 @@ boomerang_rx(struct net_device *dev)
|
||||
pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
vp->rx_copy++;
|
||||
} else {
|
||||
/* Pre-allocate the replacement skb. If it or its
|
||||
* mapping fails then recycle the buffer thats already
|
||||
* in place
|
||||
*/
|
||||
newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
|
||||
if (!newskb) {
|
||||
dev->stats.rx_dropped++;
|
||||
goto clear_complete;
|
||||
}
|
||||
newdma = pci_map_single(VORTEX_PCI(vp), newskb->data,
|
||||
PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
if (dma_mapping_error(&VORTEX_PCI(vp)->dev, newdma)) {
|
||||
dev->stats.rx_dropped++;
|
||||
consume_skb(newskb);
|
||||
goto clear_complete;
|
||||
}
|
||||
|
||||
/* Pass up the skbuff already on the Rx ring. */
|
||||
skb = vp->rx_skbuff[entry];
|
||||
vp->rx_skbuff[entry] = NULL;
|
||||
vp->rx_skbuff[entry] = newskb;
|
||||
vp->rx_ring[entry].addr = cpu_to_le32(newdma);
|
||||
skb_put(skb, pkt_len);
|
||||
pci_unmap_single(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
|
||||
vp->rx_nocopy++;
|
||||
@ -2653,55 +2680,15 @@ boomerang_rx(struct net_device *dev)
|
||||
netif_rx(skb);
|
||||
dev->stats.rx_packets++;
|
||||
}
|
||||
entry = (++vp->cur_rx) % RX_RING_SIZE;
|
||||
}
|
||||
/* Refill the Rx ring buffers. */
|
||||
for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
|
||||
struct sk_buff *skb;
|
||||
entry = vp->dirty_rx % RX_RING_SIZE;
|
||||
if (vp->rx_skbuff[entry] == NULL) {
|
||||
skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
|
||||
if (skb == NULL) {
|
||||
static unsigned long last_jif;
|
||||
if (time_after(jiffies, last_jif + 10 * HZ)) {
|
||||
pr_warn("%s: memory shortage\n",
|
||||
dev->name);
|
||||
last_jif = jiffies;
|
||||
}
|
||||
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
|
||||
mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
|
||||
break; /* Bad news! */
|
||||
}
|
||||
|
||||
vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
|
||||
vp->rx_skbuff[entry] = skb;
|
||||
}
|
||||
clear_complete:
|
||||
vp->rx_ring[entry].status = 0; /* Clear complete bit. */
|
||||
iowrite16(UpUnstall, ioaddr + EL3_CMD);
|
||||
entry = (++vp->cur_rx) % RX_RING_SIZE;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we've hit a total OOM refilling the Rx ring we poll once a second
|
||||
* for some memory. Otherwise there is no way to restart the rx process.
|
||||
*/
|
||||
static void
|
||||
rx_oom_timer(struct timer_list *t)
|
||||
{
|
||||
struct vortex_private *vp = from_timer(vp, t, rx_oom_timer);
|
||||
struct net_device *dev = vp->mii.dev;
|
||||
|
||||
spin_lock_irq(&vp->lock);
|
||||
if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */
|
||||
boomerang_rx(dev);
|
||||
if (vortex_debug > 1) {
|
||||
pr_debug("%s: rx_oom_timer %s\n", dev->name,
|
||||
((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying");
|
||||
}
|
||||
spin_unlock_irq(&vp->lock);
|
||||
}
|
||||
|
||||
static void
|
||||
vortex_down(struct net_device *dev, int final_down)
|
||||
{
|
||||
@ -2711,7 +2698,6 @@ vortex_down(struct net_device *dev, int final_down)
|
||||
netdev_reset_queue(dev);
|
||||
netif_stop_queue(dev);
|
||||
|
||||
del_timer_sync(&vp->rx_oom_timer);
|
||||
del_timer_sync(&vp->timer);
|
||||
|
||||
/* Turn off statistics ASAP. We update dev->stats below. */
|
||||
|
@ -75,6 +75,9 @@ static struct workqueue_struct *ena_wq;
|
||||
MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
|
||||
|
||||
static int ena_rss_init_default(struct ena_adapter *adapter);
|
||||
static void check_for_admin_com_state(struct ena_adapter *adapter);
|
||||
static void ena_destroy_device(struct ena_adapter *adapter);
|
||||
static int ena_restore_device(struct ena_adapter *adapter);
|
||||
|
||||
static void ena_tx_timeout(struct net_device *dev)
|
||||
{
|
||||
@ -1565,7 +1568,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
|
||||
|
||||
static int ena_up_complete(struct ena_adapter *adapter)
|
||||
{
|
||||
int rc, i;
|
||||
int rc;
|
||||
|
||||
rc = ena_rss_configure(adapter);
|
||||
if (rc)
|
||||
@ -1584,17 +1587,6 @@ static int ena_up_complete(struct ena_adapter *adapter)
|
||||
|
||||
ena_napi_enable_all(adapter);
|
||||
|
||||
/* Enable completion queues interrupt */
|
||||
for (i = 0; i < adapter->num_queues; i++)
|
||||
ena_unmask_interrupt(&adapter->tx_ring[i],
|
||||
&adapter->rx_ring[i]);
|
||||
|
||||
/* schedule napi in case we had pending packets
|
||||
* from the last time we disable napi
|
||||
*/
|
||||
for (i = 0; i < adapter->num_queues; i++)
|
||||
napi_schedule(&adapter->ena_napi[i].napi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1731,7 +1723,7 @@ create_err:
|
||||
|
||||
static int ena_up(struct ena_adapter *adapter)
|
||||
{
|
||||
int rc;
|
||||
int rc, i;
|
||||
|
||||
netdev_dbg(adapter->netdev, "%s\n", __func__);
|
||||
|
||||
@ -1774,6 +1766,17 @@ static int ena_up(struct ena_adapter *adapter)
|
||||
|
||||
set_bit(ENA_FLAG_DEV_UP, &adapter->flags);
|
||||
|
||||
/* Enable completion queues interrupt */
|
||||
for (i = 0; i < adapter->num_queues; i++)
|
||||
ena_unmask_interrupt(&adapter->tx_ring[i],
|
||||
&adapter->rx_ring[i]);
|
||||
|
||||
/* schedule napi in case we had pending packets
|
||||
* from the last time we disable napi
|
||||
*/
|
||||
for (i = 0; i < adapter->num_queues; i++)
|
||||
napi_schedule(&adapter->ena_napi[i].napi);
|
||||
|
||||
return rc;
|
||||
|
||||
err_up:
|
||||
@ -1884,6 +1887,17 @@ static int ena_close(struct net_device *netdev)
|
||||
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
|
||||
ena_down(adapter);
|
||||
|
||||
/* Check for device status and issue reset if needed*/
|
||||
check_for_admin_com_state(adapter);
|
||||
if (unlikely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
|
||||
netif_err(adapter, ifdown, adapter->netdev,
|
||||
"Destroy failure, restarting device\n");
|
||||
ena_dump_stats_to_dmesg(adapter);
|
||||
/* rtnl lock already obtained in dev_ioctl() layer */
|
||||
ena_destroy_device(adapter);
|
||||
ena_restore_device(adapter);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2544,11 +2558,12 @@ static void ena_destroy_device(struct ena_adapter *adapter)
|
||||
|
||||
ena_com_set_admin_running_state(ena_dev, false);
|
||||
|
||||
ena_close(netdev);
|
||||
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
|
||||
ena_down(adapter);
|
||||
|
||||
/* Before releasing the ENA resources, a device reset is required.
|
||||
* (to prevent the device from accessing them).
|
||||
* In case the reset flag is set and the device is up, ena_close
|
||||
* In case the reset flag is set and the device is up, ena_down()
|
||||
* already perform the reset, so it can be skipped.
|
||||
*/
|
||||
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
|
||||
|
@ -70,7 +70,7 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
|
||||
netdev_err(bp->dev, "vf ndo called though sriov is disabled\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (vf_id >= bp->pf.max_vfs) {
|
||||
if (vf_id >= bp->pf.active_vfs) {
|
||||
netdev_err(bp->dev, "Invalid VF id %d\n", vf_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -421,7 +421,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
|
||||
}
|
||||
|
||||
/* If all IP and L4 fields are wildcarded then this is an L2 flow */
|
||||
if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
|
||||
if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
|
||||
is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
|
||||
flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
|
||||
} else {
|
||||
|
@ -344,7 +344,6 @@ struct adapter_params {
|
||||
|
||||
unsigned int sf_size; /* serial flash size in bytes */
|
||||
unsigned int sf_nsec; /* # of flash sectors */
|
||||
unsigned int sf_fw_start; /* start of FW image in flash */
|
||||
|
||||
unsigned int fw_vers; /* firmware version */
|
||||
unsigned int bs_vers; /* bootstrap version */
|
||||
|
@ -2844,8 +2844,6 @@ enum {
|
||||
SF_RD_DATA_FAST = 0xb, /* read flash */
|
||||
SF_RD_ID = 0x9f, /* read ID */
|
||||
SF_ERASE_SECTOR = 0xd8, /* erase sector */
|
||||
|
||||
FW_MAX_SIZE = 16 * SF_SEC_SIZE,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -3558,8 +3556,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
const __be32 *p = (const __be32 *)fw_data;
|
||||
const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
|
||||
unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
|
||||
unsigned int fw_img_start = adap->params.sf_fw_start;
|
||||
unsigned int fw_start_sec = fw_img_start / sf_sec_size;
|
||||
unsigned int fw_start_sec = FLASH_FW_START_SEC;
|
||||
unsigned int fw_size = FLASH_FW_MAX_SIZE;
|
||||
unsigned int fw_start = FLASH_FW_START;
|
||||
|
||||
if (!size) {
|
||||
dev_err(adap->pdev_dev, "FW image has no data\n");
|
||||
@ -3575,9 +3574,9 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
"FW image size differs from size in FW header\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (size > FW_MAX_SIZE) {
|
||||
if (size > fw_size) {
|
||||
dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
|
||||
FW_MAX_SIZE);
|
||||
fw_size);
|
||||
return -EFBIG;
|
||||
}
|
||||
if (!t4_fw_matches_chip(adap, hdr))
|
||||
@ -3604,11 +3603,11 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
*/
|
||||
memcpy(first_page, fw_data, SF_PAGE_SIZE);
|
||||
((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
|
||||
ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
|
||||
ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
addr = fw_img_start;
|
||||
addr = fw_start;
|
||||
for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
|
||||
addr += SF_PAGE_SIZE;
|
||||
fw_data += SF_PAGE_SIZE;
|
||||
@ -3618,7 +3617,7 @@ int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
|
||||
}
|
||||
|
||||
ret = t4_write_flash(adap,
|
||||
fw_img_start + offsetof(struct fw_hdr, fw_ver),
|
||||
fw_start + offsetof(struct fw_hdr, fw_ver),
|
||||
sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
|
||||
out:
|
||||
if (ret)
|
||||
|
@ -3469,6 +3469,10 @@ fec_probe(struct platform_device *pdev)
|
||||
goto failed_regulator;
|
||||
}
|
||||
} else {
|
||||
if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
|
||||
ret = -EPROBE_DEFER;
|
||||
goto failed_regulator;
|
||||
}
|
||||
fep->reg_phy = NULL;
|
||||
}
|
||||
|
||||
@ -3552,8 +3556,9 @@ failed_clk_ipg:
|
||||
failed_clk:
|
||||
if (of_phy_is_fixed_link(np))
|
||||
of_phy_deregister_fixed_link(np);
|
||||
failed_phy:
|
||||
of_node_put(phy_node);
|
||||
failed_phy:
|
||||
dev_id--;
|
||||
failed_ioremap:
|
||||
free_netdev(ndev);
|
||||
|
||||
|
@ -319,11 +319,10 @@ static int ptp_gianfar_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
||||
now = tmr_cnt_read(etsects);
|
||||
now += delta;
|
||||
tmr_cnt_write(etsects, now);
|
||||
set_fipers(etsects);
|
||||
|
||||
spin_unlock_irqrestore(&etsects->lock, flags);
|
||||
|
||||
set_fipers(etsects);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -331,7 +331,8 @@ struct e1000_adapter {
|
||||
enum e1000_state_t {
|
||||
__E1000_TESTING,
|
||||
__E1000_RESETTING,
|
||||
__E1000_DOWN
|
||||
__E1000_DOWN,
|
||||
__E1000_DISABLED
|
||||
};
|
||||
|
||||
#undef pr_fmt
|
||||
|
@ -945,7 +945,7 @@ static int e1000_init_hw_struct(struct e1000_adapter *adapter,
|
||||
static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct net_device *netdev;
|
||||
struct e1000_adapter *adapter;
|
||||
struct e1000_adapter *adapter = NULL;
|
||||
struct e1000_hw *hw;
|
||||
|
||||
static int cards_found;
|
||||
@ -955,6 +955,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
u16 tmp = 0;
|
||||
u16 eeprom_apme_mask = E1000_EEPROM_APME;
|
||||
int bars, need_ioport;
|
||||
bool disable_dev = false;
|
||||
|
||||
/* do not allocate ioport bars when not needed */
|
||||
need_ioport = e1000_is_need_ioport(pdev);
|
||||
@ -1259,11 +1260,13 @@ err_mdio_ioremap:
|
||||
iounmap(hw->ce4100_gbe_mdio_base_virt);
|
||||
iounmap(hw->hw_addr);
|
||||
err_ioremap:
|
||||
disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
|
||||
free_netdev(netdev);
|
||||
err_alloc_etherdev:
|
||||
pci_release_selected_regions(pdev, bars);
|
||||
err_pci_reg:
|
||||
pci_disable_device(pdev);
|
||||
if (!adapter || disable_dev)
|
||||
pci_disable_device(pdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1281,6 +1284,7 @@ static void e1000_remove(struct pci_dev *pdev)
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
bool disable_dev;
|
||||
|
||||
e1000_down_and_stop(adapter);
|
||||
e1000_release_manageability(adapter);
|
||||
@ -1299,9 +1303,11 @@ static void e1000_remove(struct pci_dev *pdev)
|
||||
iounmap(hw->flash_address);
|
||||
pci_release_selected_regions(pdev, adapter->bars);
|
||||
|
||||
disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags);
|
||||
free_netdev(netdev);
|
||||
|
||||
pci_disable_device(pdev);
|
||||
if (disable_dev)
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -5156,7 +5162,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
||||
if (netif_running(netdev))
|
||||
e1000_free_irq(adapter);
|
||||
|
||||
pci_disable_device(pdev);
|
||||
if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
|
||||
pci_disable_device(pdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -5200,6 +5207,10 @@ static int e1000_resume(struct pci_dev *pdev)
|
||||
pr_err("Cannot enable PCI device from suspend\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* flush memory to make sure state is correct */
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(__E1000_DISABLED, &adapter->flags);
|
||||
pci_set_master(pdev);
|
||||
|
||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
@ -5274,7 +5285,9 @@ static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
|
||||
|
||||
if (netif_running(netdev))
|
||||
e1000_down(adapter);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags))
|
||||
pci_disable_device(pdev);
|
||||
|
||||
/* Request a slot slot reset. */
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
@ -5302,6 +5315,10 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
|
||||
pr_err("Cannot re-enable PCI device after reset.\n");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
/* flush memory to make sure state is correct */
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(__E1000_DISABLED, &adapter->flags);
|
||||
pci_set_master(pdev);
|
||||
|
||||
pci_enable_wake(pdev, PCI_D3hot, 0);
|
||||
|
@ -1367,6 +1367,9 @@ out:
|
||||
* Checks to see of the link status of the hardware has changed. If a
|
||||
* change in link status has been detected, then we read the PHY registers
|
||||
* to get the current speed/duplex if link exists.
|
||||
*
|
||||
* Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
|
||||
* up).
|
||||
**/
|
||||
static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
||||
{
|
||||
@ -1382,7 +1385,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
||||
* Change or Rx Sequence Error interrupt.
|
||||
*/
|
||||
if (!mac->get_link_status)
|
||||
return 0;
|
||||
return 1;
|
||||
|
||||
/* First we want to see if the MII Status Register reports
|
||||
* link. If so, then we want to get the current speed/duplex
|
||||
@ -1613,10 +1616,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
||||
* different link partner.
|
||||
*/
|
||||
ret_val = e1000e_config_fc_after_link_up(hw);
|
||||
if (ret_val)
|
||||
if (ret_val) {
|
||||
e_dbg("Error configuring flow control\n");
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
|
||||
|
@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
|
||||
else
|
||||
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
|
||||
|
||||
/* Copy the address first, so that we avoid a possible race with
|
||||
* .set_rx_mode(). If we copy after changing the address in the filter
|
||||
* list, we might open ourselves to a narrow race window where
|
||||
* .set_rx_mode could delete our dev_addr filter and prevent traffic
|
||||
* from passing.
|
||||
*/
|
||||
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
||||
|
||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||
i40e_del_mac_filter(vsi, netdev->dev_addr);
|
||||
i40e_add_mac_filter(vsi, addr->sa_data);
|
||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
||||
if (vsi->type == I40E_VSI_MAIN) {
|
||||
i40e_status ret;
|
||||
|
||||
@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
|
||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
|
||||
/* Under some circumstances, we might receive a request to delete
|
||||
* our own device address from our uc list. Because we store the
|
||||
* device address in the VSI's MAC/VLAN filter list, we need to ignore
|
||||
* such requests and not delete our device address from this list.
|
||||
*/
|
||||
if (ether_addr_equal(addr, netdev->dev_addr))
|
||||
return 0;
|
||||
|
||||
i40e_del_mac_filter(vsi, addr);
|
||||
|
||||
return 0;
|
||||
@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
|
||||
/* Set Bit 7 to be valid */
|
||||
mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
|
||||
|
||||
/* Set L4type to both TCP and UDP support */
|
||||
mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
|
||||
/* Set L4type for TCP support */
|
||||
mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
|
||||
|
||||
/* Set cloud filter mode */
|
||||
mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
|
||||
@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
|
||||
is_valid_ether_addr(filter->src_mac)) ||
|
||||
(is_multicast_ether_addr(filter->dst_mac) &&
|
||||
is_multicast_ether_addr(filter->src_mac)))
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Make sure port is specified, otherwise bail out, for channel
|
||||
* specific cloud filter needs 'L4 port' to be non-zero
|
||||
/* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
|
||||
* ports are not supported via big buffer now.
|
||||
*/
|
||||
if (!filter->dst_port)
|
||||
return -EINVAL;
|
||||
if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* adding filter using src_port/src_ip is not supported at this stage */
|
||||
if (filter->src_port || filter->src_ipv4 ||
|
||||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* copy element needed to add cloud filter from filter */
|
||||
i40e_set_cld_element(filter, &cld_filter.element);
|
||||
@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
|
||||
is_multicast_ether_addr(filter->src_mac)) {
|
||||
/* MAC + IP : unsupported mode */
|
||||
if (filter->dst_ipv4)
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* since we validated that L4 port must be valid before
|
||||
* we get here, start with respective "flags" value
|
||||
@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
|
||||
|
||||
if (tc < 0) {
|
||||
dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
|
@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
|
||||
/* Walk through fragments adding latest fragment, testing it, and
|
||||
* then removing stale fragments from the sum.
|
||||
*/
|
||||
stale = &skb_shinfo(skb)->frags[0];
|
||||
for (;;) {
|
||||
for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
|
||||
int stale_size = skb_frag_size(stale);
|
||||
|
||||
sum += skb_frag_size(frag++);
|
||||
|
||||
/* The stale fragment may present us with a smaller
|
||||
* descriptor than the actual fragment size. To account
|
||||
* for that we need to remove all the data on the front and
|
||||
* figure out what the remainder would be in the last
|
||||
* descriptor associated with the fragment.
|
||||
*/
|
||||
if (stale_size > I40E_MAX_DATA_PER_TXD) {
|
||||
int align_pad = -(stale->page_offset) &
|
||||
(I40E_MAX_READ_REQ_SIZE - 1);
|
||||
|
||||
sum -= align_pad;
|
||||
stale_size -= align_pad;
|
||||
|
||||
do {
|
||||
sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
||||
stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
||||
} while (stale_size > I40E_MAX_DATA_PER_TXD);
|
||||
}
|
||||
|
||||
/* if sum is negative we failed to make sufficient progress */
|
||||
if (sum < 0)
|
||||
return true;
|
||||
@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
|
||||
if (!nr_frags--)
|
||||
break;
|
||||
|
||||
sum -= skb_frag_size(stale++);
|
||||
sum -= stale_size;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
|
||||
/* Walk through fragments adding latest fragment, testing it, and
|
||||
* then removing stale fragments from the sum.
|
||||
*/
|
||||
stale = &skb_shinfo(skb)->frags[0];
|
||||
for (;;) {
|
||||
for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
|
||||
int stale_size = skb_frag_size(stale);
|
||||
|
||||
sum += skb_frag_size(frag++);
|
||||
|
||||
/* The stale fragment may present us with a smaller
|
||||
* descriptor than the actual fragment size. To account
|
||||
* for that we need to remove all the data on the front and
|
||||
* figure out what the remainder would be in the last
|
||||
* descriptor associated with the fragment.
|
||||
*/
|
||||
if (stale_size > I40E_MAX_DATA_PER_TXD) {
|
||||
int align_pad = -(stale->page_offset) &
|
||||
(I40E_MAX_READ_REQ_SIZE - 1);
|
||||
|
||||
sum -= align_pad;
|
||||
stale_size -= align_pad;
|
||||
|
||||
do {
|
||||
sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
||||
stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
||||
} while (stale_size > I40E_MAX_DATA_PER_TXD);
|
||||
}
|
||||
|
||||
/* if sum is negative we failed to make sufficient progress */
|
||||
if (sum < 0)
|
||||
return true;
|
||||
@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
|
||||
if (!nr_frags--)
|
||||
break;
|
||||
|
||||
sum -= skb_frag_size(stale++);
|
||||
sum -= stale_size;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -1643,7 +1643,12 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
|
||||
return 0;
|
||||
}
|
||||
|
||||
wmb(); /* reset needs to be written before we read control register */
|
||||
/* Reset needs to be written before we read control register, and
|
||||
* we must wait for the HW to become responsive once again
|
||||
*/
|
||||
wmb();
|
||||
msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
|
||||
|
||||
end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
|
||||
do {
|
||||
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
|
||||
|
@ -59,6 +59,7 @@
|
||||
#define MLXSW_PCI_SW_RESET 0xF0010
|
||||
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
|
||||
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 5000
|
||||
#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
|
||||
#define MLXSW_PCI_FW_READY 0xA1844
|
||||
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
|
||||
#define MLXSW_PCI_FW_READY_MAGIC 0x5E
|
||||
|
@ -4376,7 +4376,10 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
|
||||
}
|
||||
if (!info->linking)
|
||||
break;
|
||||
if (netdev_has_any_upper_dev(upper_dev)) {
|
||||
if (netdev_has_any_upper_dev(upper_dev) &&
|
||||
(!netif_is_bridge_master(upper_dev) ||
|
||||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
|
||||
upper_dev))) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"spectrum: Enslaving a port to a device that already has an upper device is not supported");
|
||||
return -EINVAL;
|
||||
@ -4504,6 +4507,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
|
||||
u16 vid)
|
||||
{
|
||||
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
struct netdev_notifier_changeupper_info *info = ptr;
|
||||
struct netlink_ext_ack *extack;
|
||||
struct net_device *upper_dev;
|
||||
@ -4520,7 +4524,10 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
|
||||
}
|
||||
if (!info->linking)
|
||||
break;
|
||||
if (netdev_has_any_upper_dev(upper_dev)) {
|
||||
if (netdev_has_any_upper_dev(upper_dev) &&
|
||||
(!netif_is_bridge_master(upper_dev) ||
|
||||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
|
||||
upper_dev))) {
|
||||
NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -365,6 +365,8 @@ int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
struct net_device *brport_dev,
|
||||
struct net_device *br_dev);
|
||||
bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
|
||||
const struct net_device *br_dev);
|
||||
|
||||
/* spectrum.c */
|
||||
int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
|
@ -46,7 +46,8 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
int tclass_num, u32 min, u32 max,
|
||||
u32 probability, bool is_ecn)
|
||||
{
|
||||
char cwtp_cmd[max_t(u8, MLXSW_REG_CWTP_LEN, MLXSW_REG_CWTPM_LEN)];
|
||||
char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
|
||||
char cwtp_cmd[MLXSW_REG_CWTP_LEN];
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
||||
int err;
|
||||
|
||||
@ -60,10 +61,10 @@ mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mlxsw_reg_cwtpm_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num,
|
||||
mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
|
||||
MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
|
||||
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtp_cmd);
|
||||
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -3228,7 +3228,7 @@ static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
|
||||
{
|
||||
if (!removing)
|
||||
nh->should_offload = 1;
|
||||
else if (nh->offloaded)
|
||||
else
|
||||
nh->should_offload = 0;
|
||||
nh->update = 1;
|
||||
}
|
||||
|
@ -152,6 +152,12 @@ mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
|
||||
const struct net_device *br_dev)
|
||||
{
|
||||
return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
|
||||
}
|
||||
|
||||
static struct mlxsw_sp_bridge_device *
|
||||
mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
|
||||
struct net_device *br_dev)
|
||||
|
@ -568,6 +568,7 @@ nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
|
||||
return err;
|
||||
}
|
||||
nn_writeb(nn, ctrl_offset, entry->entry);
|
||||
nfp_net_irq_unmask(nn, entry->entry);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -582,6 +583,7 @@ static void nfp_net_aux_irq_free(struct nfp_net *nn, u32 ctrl_offset,
|
||||
unsigned int vector_idx)
|
||||
{
|
||||
nn_writeb(nn, ctrl_offset, 0xff);
|
||||
nn_pci_flush(nn);
|
||||
free_irq(nn->irq_entries[vector_idx].vector, nn);
|
||||
}
|
||||
|
||||
|
@ -147,7 +147,7 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
|
||||
[FWNLCR0] = 0x0090,
|
||||
[FWALCR0] = 0x0094,
|
||||
[TXNLCR1] = 0x00a0,
|
||||
[TXALCR1] = 0x00a0,
|
||||
[TXALCR1] = 0x00a4,
|
||||
[RXNLCR1] = 0x00a8,
|
||||
[RXALCR1] = 0x00ac,
|
||||
[FWNLCR1] = 0x00b0,
|
||||
@ -399,7 +399,7 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
|
||||
[FWNLCR0] = 0x0090,
|
||||
[FWALCR0] = 0x0094,
|
||||
[TXNLCR1] = 0x00a0,
|
||||
[TXALCR1] = 0x00a0,
|
||||
[TXALCR1] = 0x00a4,
|
||||
[RXNLCR1] = 0x00a8,
|
||||
[RXALCR1] = 0x00ac,
|
||||
[FWNLCR1] = 0x00b0,
|
||||
@ -3225,18 +3225,37 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
|
||||
/* ioremap the TSU registers */
|
||||
if (mdp->cd->tsu) {
|
||||
struct resource *rtsu;
|
||||
|
||||
rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
|
||||
if (IS_ERR(mdp->tsu_addr)) {
|
||||
ret = PTR_ERR(mdp->tsu_addr);
|
||||
if (!rtsu) {
|
||||
dev_err(&pdev->dev, "no TSU resource\n");
|
||||
ret = -ENODEV;
|
||||
goto out_release;
|
||||
}
|
||||
/* We can only request the TSU region for the first port
|
||||
* of the two sharing this TSU for the probe to succeed...
|
||||
*/
|
||||
if (devno % 2 == 0 &&
|
||||
!devm_request_mem_region(&pdev->dev, rtsu->start,
|
||||
resource_size(rtsu),
|
||||
dev_name(&pdev->dev))) {
|
||||
dev_err(&pdev->dev, "can't request TSU resource.\n");
|
||||
ret = -EBUSY;
|
||||
goto out_release;
|
||||
}
|
||||
mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
|
||||
resource_size(rtsu));
|
||||
if (!mdp->tsu_addr) {
|
||||
dev_err(&pdev->dev, "TSU region ioremap() failed.\n");
|
||||
ret = -ENOMEM;
|
||||
goto out_release;
|
||||
}
|
||||
mdp->port = devno % 2;
|
||||
ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
|
||||
/* initialize first or needed device */
|
||||
if (!devno || pd->needs_init) {
|
||||
/* Need to init only the first port of the two sharing a TSU */
|
||||
if (devno % 2 == 0) {
|
||||
if (mdp->cd->chip_reset)
|
||||
mdp->cd->chip_reset(ndev);
|
||||
|
||||
|
@ -364,9 +364,15 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
|
||||
bool stmmac_eee_init(struct stmmac_priv *priv)
|
||||
{
|
||||
struct net_device *ndev = priv->dev;
|
||||
int interface = priv->plat->interface;
|
||||
unsigned long flags;
|
||||
bool ret = false;
|
||||
|
||||
if ((interface != PHY_INTERFACE_MODE_MII) &&
|
||||
(interface != PHY_INTERFACE_MODE_GMII) &&
|
||||
!phy_interface_mode_is_rgmii(interface))
|
||||
goto out;
|
||||
|
||||
/* Using PCS we cannot dial with the phy registers at this stage
|
||||
* so we do not support extra feature like EEE.
|
||||
*/
|
||||
|
@ -825,6 +825,13 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
|
||||
if (skb_dst(skb)) {
|
||||
int mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr) -
|
||||
GENEVE_BASE_HLEN - info->options_len - 14;
|
||||
|
||||
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
|
||||
}
|
||||
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
if (geneve->collect_md) {
|
||||
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
|
||||
@ -864,6 +871,13 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
|
||||
if (IS_ERR(dst))
|
||||
return PTR_ERR(dst);
|
||||
|
||||
if (skb_dst(skb)) {
|
||||
int mtu = dst_mtu(dst) - sizeof(struct ipv6hdr) -
|
||||
GENEVE_BASE_HLEN - info->options_len - 14;
|
||||
|
||||
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
|
||||
}
|
||||
|
||||
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
|
||||
if (geneve->collect_md) {
|
||||
prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user