mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 02:04:19 +08:00
0ea820cf9b
This follows the x86 xstate changes and implements a task_xstate slab cache that is dynamically sized to match one of hard FP/soft FP/FPU-less. This also tidies up and consolidates some of the SH-2A/SH-4 FPU fragmentation. Now fpu state restorers are commonly defined, with the init_fpu()/fpu_init() mess reworked to follow the x86 convention. The fpu_init() register initialization has been replaced by xstate setup followed by writing out to hardware via the standard restore path. As init_fpu() now performs a slab allocation a secondary lighterweight restorer is also introduced for the context switch. In the future the DSP state will be rolled in here, too. More work remains for math emulation and the SH-5 FPU, which presently uses its own special (UP-only) interfaces. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
83 lines
1.5 KiB
C
83 lines
1.5 KiB
C
#include <linux/sched.h>
|
|
#include <asm/processor.h>
|
|
#include <asm/fpu.h>
|
|
|
|
int init_fpu(struct task_struct *tsk)
|
|
{
|
|
if (tsk_used_math(tsk)) {
|
|
if ((boot_cpu_data.flags & CPU_HAS_FPU) && tsk == current)
|
|
unlazy_fpu(tsk, task_pt_regs(tsk));
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Memory allocation at the first usage of the FPU and other state.
|
|
*/
|
|
if (!tsk->thread.xstate) {
|
|
tsk->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
|
|
GFP_KERNEL);
|
|
if (!tsk->thread.xstate)
|
|
return -ENOMEM;
|
|
}
|
|
|
|
if (boot_cpu_data.flags & CPU_HAS_FPU) {
|
|
struct sh_fpu_hard_struct *fp = &tsk->thread.xstate->hardfpu;
|
|
memset(fp, 0, xstate_size);
|
|
fp->fpscr = FPSCR_INIT;
|
|
} else {
|
|
struct sh_fpu_soft_struct *fp = &tsk->thread.xstate->softfpu;
|
|
memset(fp, 0, xstate_size);
|
|
fp->fpscr = FPSCR_INIT;
|
|
}
|
|
|
|
set_stopped_child_used_math(tsk);
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_SH_FPU
|
|
void __fpu_state_restore(void)
|
|
{
|
|
struct task_struct *tsk = current;
|
|
|
|
restore_fpu(tsk);
|
|
|
|
task_thread_info(tsk)->status |= TS_USEDFPU;
|
|
tsk->fpu_counter++;
|
|
}
|
|
|
|
void fpu_state_restore(struct pt_regs *regs)
|
|
{
|
|
struct task_struct *tsk = current;
|
|
|
|
if (unlikely(!user_mode(regs))) {
|
|
printk(KERN_ERR "BUG: FPU is used in kernel mode.\n");
|
|
BUG();
|
|
return;
|
|
}
|
|
|
|
if (!tsk_used_math(tsk)) {
|
|
/*
|
|
* does a slab alloc which can sleep
|
|
*/
|
|
if (init_fpu(tsk)) {
|
|
/*
|
|
* ran out of memory!
|
|
*/
|
|
do_group_exit(SIGKILL);
|
|
return;
|
|
}
|
|
}
|
|
|
|
grab_fpu(regs);
|
|
|
|
__fpu_state_restore();
|
|
}
|
|
|
|
BUILD_TRAP_HANDLER(fpu_state_restore)
|
|
{
|
|
TRAP_HANDLER_DECL;
|
|
|
|
fpu_state_restore(regs);
|
|
}
|
|
#endif /* CONFIG_SH_FPU */
|