mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 21:38:32 +08:00
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS fixes from Ralf Baechle: "Here's a final round of fixes for 4.12: - Fix misordered instructions in assembly code making kenel startup via UHB unreliable. - Fix special case of MADDF and MADDF emulation. - Fix alignment issue in address calculation in pm-cps on 64 bit. - Fix IRQ tracing & lockdep when rescheduling - Systems with MAARs require post-DMA cache flushes. The reordering fix and the MADDF/MSUBF fix have sat in linux-next for a number of days. The others haven't propagated from my pull tree to linux-next yet but all have survived manual testing and Imagination's automated test system and there are no pending bug reports" * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: MIPS: Avoid accidental raw backtrace MIPS: Perform post-DMA cache flushes on systems with MAARs MIPS: Fix IRQ tracing & lockdep when rescheduling MIPS: pm-cps: Drop manual cache-line alignment of ready_count MIPS: math-emu: Handle zero accumulator case in MADDF and MSUBF separately MIPS: head: Reorder instructions missing a delay slot
This commit is contained in:
commit
79c4968169
@ -11,6 +11,7 @@
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/compiler.h>
|
||||
#include <asm/irqflags.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/stackframe.h>
|
||||
@ -119,6 +120,7 @@ work_pending:
|
||||
andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
|
||||
beqz t0, work_notifysig
|
||||
work_resched:
|
||||
TRACE_IRQS_OFF
|
||||
jal schedule
|
||||
|
||||
local_irq_disable # make sure need_resched and
|
||||
@ -155,6 +157,7 @@ syscall_exit_work:
|
||||
beqz t0, work_pending # trace bit set?
|
||||
local_irq_enable # could let syscall_trace_leave()
|
||||
# call schedule() instead
|
||||
TRACE_IRQS_ON
|
||||
move a0, sp
|
||||
jal syscall_trace_leave
|
||||
b resume_userspace
|
||||
|
@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
|
||||
beq t0, t1, dtb_found
|
||||
#endif
|
||||
li t1, -2
|
||||
beq a0, t1, dtb_found
|
||||
move t2, a1
|
||||
beq a0, t1, dtb_found
|
||||
|
||||
li t2, 0
|
||||
dtb_found:
|
||||
|
@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
|
||||
* state. Actually per-core rather than per-CPU.
|
||||
*/
|
||||
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
|
||||
static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
|
||||
|
||||
/* Indicates online CPUs coupled with the current CPU */
|
||||
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
|
||||
@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu)
|
||||
{
|
||||
enum cps_pm_state state;
|
||||
unsigned core = cpu_data[cpu].core;
|
||||
unsigned dlinesz = cpu_data[cpu].dcache.linesz;
|
||||
void *entry_fn, *core_rc;
|
||||
|
||||
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
|
||||
@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu)
|
||||
}
|
||||
|
||||
if (!per_cpu(ready_count, core)) {
|
||||
core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
|
||||
core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
|
||||
if (!core_rc) {
|
||||
pr_err("Failed allocate core %u ready_count\n", core);
|
||||
return -ENOMEM;
|
||||
}
|
||||
per_cpu(ready_count_alloc, core) = core_rc;
|
||||
|
||||
/* Ensure ready_count is aligned to a cacheline boundary */
|
||||
core_rc += dlinesz - 1;
|
||||
core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
|
||||
per_cpu(ready_count, core) = core_rc;
|
||||
}
|
||||
|
||||
|
@ -201,6 +201,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
regs.cp0_status = KSU_KERNEL;
|
||||
if (sp) {
|
||||
regs.regs[29] = (unsigned long)sp;
|
||||
regs.regs[31] = 0;
|
||||
|
@ -54,7 +54,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
|
||||
return ieee754dp_nanxcpt(z);
|
||||
case IEEE754_CLASS_DNORM:
|
||||
DPDNORMZ;
|
||||
/* QNAN is handled separately below */
|
||||
/* QNAN and ZERO cases are handled separately below */
|
||||
}
|
||||
|
||||
switch (CLPAIR(xc, yc)) {
|
||||
@ -210,6 +210,9 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
|
||||
}
|
||||
assert(rm & (DP_HIDDEN_BIT << 3));
|
||||
|
||||
if (zc == IEEE754_CLASS_ZERO)
|
||||
return ieee754dp_format(rs, re, rm);
|
||||
|
||||
/* And now the addition */
|
||||
assert(zm & DP_HIDDEN_BIT);
|
||||
|
||||
|
@ -54,7 +54,7 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
|
||||
return ieee754sp_nanxcpt(z);
|
||||
case IEEE754_CLASS_DNORM:
|
||||
SPDNORMZ;
|
||||
/* QNAN is handled separately below */
|
||||
/* QNAN and ZERO cases are handled separately below */
|
||||
}
|
||||
|
||||
switch (CLPAIR(xc, yc)) {
|
||||
@ -203,6 +203,9 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
|
||||
}
|
||||
assert(rm & (SP_HIDDEN_BIT << 3));
|
||||
|
||||
if (zc == IEEE754_CLASS_ZERO)
|
||||
return ieee754sp_format(rs, re, rm);
|
||||
|
||||
/* And now the addition */
|
||||
|
||||
assert(zm & SP_HIDDEN_BIT);
|
||||
|
@ -68,12 +68,25 @@ static inline struct page *dma_addr_to_page(struct device *dev,
|
||||
* systems and only the R10000 and R12000 are used in such systems, the
|
||||
* SGI IP28 Indigo² rsp. SGI IP32 aka O2.
|
||||
*/
|
||||
static inline int cpu_needs_post_dma_flush(struct device *dev)
|
||||
static inline bool cpu_needs_post_dma_flush(struct device *dev)
|
||||
{
|
||||
return !plat_device_is_coherent(dev) &&
|
||||
(boot_cpu_type() == CPU_R10000 ||
|
||||
boot_cpu_type() == CPU_R12000 ||
|
||||
boot_cpu_type() == CPU_BMIPS5000);
|
||||
if (plat_device_is_coherent(dev))
|
||||
return false;
|
||||
|
||||
switch (boot_cpu_type()) {
|
||||
case CPU_R10000:
|
||||
case CPU_R12000:
|
||||
case CPU_BMIPS5000:
|
||||
return true;
|
||||
|
||||
default:
|
||||
/*
|
||||
* Presence of MAARs suggests that the CPU supports
|
||||
* speculatively prefetching data, and therefore requires
|
||||
* the post-DMA flush/invalidate.
|
||||
*/
|
||||
return cpu_has_maar;
|
||||
}
|
||||
}
|
||||
|
||||
static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
|
||||
|
Loading…
Reference in New Issue
Block a user