2019-05-29 22:18:02 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2014-12-01 16:52:17 +08:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
|
|
|
|
* Author: Tony Xie <tony.xie@rock-chips.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/assembler.h>
|
ARM: mm: Make virt_to_pfn() a static inline
Making virt_to_pfn() a static inline taking a strongly typed
(const void *) makes the contract of a passing a pointer of that
type to the function explicit and exposes any misuse of the
macro virt_to_pfn() acting polymorphic and accepting many types
such as (void *), (unitptr_t) or (unsigned long) as arguments
without warnings.
Doing this is a bit intrusive: virt_to_pfn() requires
PHYS_PFN_OFFSET and PAGE_SHIFT to be defined, and this is defined in
<asm/page.h>, so this must be included *before* <asm/memory.h>.
The use of macros were obscuring the unclear inclusion order here,
as the macros would eventually be resolved, but a static inline
like this cannot be compiled with unresolved macros.
The naive solution to include <asm/page.h> at the top of
<asm/memory.h> does not work, because <asm/memory.h> sometimes
includes <asm/page.h> at the end of itself, which would create a
confusing inclusion loop. So instead, take the approach to always
unconditionally include <asm/page.h> at the end of <asm/memory.h>
arch/arm uses <asm/memory.h> explicitly in a lot of places,
however it turns out that if we just unconditionally include
<asm/memory.h> into <asm/page.h> and switch all inclusions of
<asm/memory.h> to <asm/page.h> instead, we enforce the right
order and <asm/memory.h> will always have access to the
definitions.
Put an inclusion guard in place making it impossible to include
<asm/memory.h> explicitly.
Link: https://lore.kernel.org/linux-mm/20220701160004.2ffff4e5ab59a55499f4c736@linux-foundation.org/
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
2022-06-02 16:18:32 +08:00
|
|
|
#include <asm/page.h>
|
2014-12-01 16:52:17 +08:00
|
|
|
|
|
|
|
.data
|
|
|
|
/*
|
|
|
|
* this code will be copied from
|
|
|
|
* ddr to sram for system resumeing.
|
|
|
|
* so it is ".data section".
|
|
|
|
*/
|
2017-07-26 19:49:31 +08:00
|
|
|
.align 2
|
2014-12-01 16:52:17 +08:00
|
|
|
|
|
|
|
ENTRY(rockchip_slp_cpu_resume)
|
|
|
|
setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set svc, irqs off
|
|
|
|
mrc p15, 0, r1, c0, c0, 5
|
|
|
|
and r1, r1, #0xf
|
|
|
|
cmp r1, #0
|
|
|
|
/* olny cpu0 can continue to run, the others is halt here */
|
|
|
|
beq cpu0run
|
|
|
|
secondary_loop:
|
|
|
|
wfe
|
|
|
|
b secondary_loop
|
|
|
|
cpu0run:
|
|
|
|
ldr r3, rkpm_bootdata_l2ctlr_f
|
|
|
|
cmp r3, #0
|
|
|
|
beq sp_set
|
|
|
|
ldr r3, rkpm_bootdata_l2ctlr
|
|
|
|
mcr p15, 1, r3, c9, c0, 2
|
|
|
|
sp_set:
|
|
|
|
ldr sp, rkpm_bootdata_cpusp
|
|
|
|
ldr r1, rkpm_bootdata_cpu_code
|
|
|
|
bx r1
|
|
|
|
ENDPROC(rockchip_slp_cpu_resume)
|
|
|
|
|
|
|
|
/* Parameters filled in by the kernel */
|
|
|
|
|
|
|
|
/* Flag for whether to restore L2CTLR on resume */
|
|
|
|
.global rkpm_bootdata_l2ctlr_f
|
|
|
|
rkpm_bootdata_l2ctlr_f:
|
|
|
|
.long 0
|
|
|
|
|
|
|
|
/* Saved L2CTLR to restore on resume */
|
|
|
|
.global rkpm_bootdata_l2ctlr
|
|
|
|
rkpm_bootdata_l2ctlr:
|
|
|
|
.long 0
|
|
|
|
|
|
|
|
/* CPU resume SP addr */
|
|
|
|
.globl rkpm_bootdata_cpusp
|
|
|
|
rkpm_bootdata_cpusp:
|
|
|
|
.long 0
|
|
|
|
|
|
|
|
/* CPU resume function (physical address) */
|
|
|
|
.globl rkpm_bootdata_cpu_code
|
|
|
|
rkpm_bootdata_cpu_code:
|
|
|
|
.long 0
|
|
|
|
|
|
|
|
ENTRY(rk3288_bootram_sz)
|
|
|
|
.word . - rockchip_slp_cpu_resume
|