| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318 | /* *  linux/arch/arm/vfp/vfpmodule.c * *  Copyright (C) 2004 ARM Limited. *  Written by Deep Blue Solutions Limited. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */#include <linux/types.h>#include <linux/cpu.h>#include <linux/cpu_pm.h>#include <linux/hardirq.h>#include <linux/kernel.h>#include <linux/notifier.h>#include <linux/signal.h>#include <linux/sched.h>#include <linux/smp.h>#include <linux/init.h>#include <linux/uaccess.h>#include <linux/user.h>#include <asm/cp15.h>#include <asm/cputype.h>#include <asm/system_info.h>#include <asm/thread_notify.h>#include <asm/vfp.h>#include "vfpinstr.h"#include "vfp.h"/* * Our undef handlers (in entry.S) */void vfp_testing_entry(void);void vfp_support_entry(void);void vfp_null_entry(void);void (*vfp_vector)(void) = vfp_null_entry;/* * Dual-use variable. * Used in startup: set to non-zero if VFP checks fail * After startup, holds VFP architecture */unsigned int VFP_arch;/* * The pointer to the vfpstate structure of the thread which currently * owns the context held in the VFP hardware, or NULL if the hardware * context is invalid. * * For UP, this is sufficient to tell which thread owns the VFP context. * However, for SMP, we also need to check the CPU number stored in the * saved state too to catch migrations. */union vfp_state *vfp_current_hw_state[NR_CPUS];/* * Is 'thread's most up to date state stored in this CPUs hardware? * Must be called from non-preemptible context. */static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread){#ifdef CONFIG_SMP	if (thread->vfpstate.hard.cpu != cpu)		return false;#endif	return vfp_current_hw_state[cpu] == &thread->vfpstate;}/* * Force a reload of the VFP context from the thread structure.  We do * this by ensuring that access to the VFP hardware is disabled, and * clear vfp_current_hw_state.  Must be called from non-preemptible context. */static void vfp_force_reload(unsigned int cpu, struct thread_info *thread){	if (vfp_state_in_hw(cpu, thread)) {		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);		vfp_current_hw_state[cpu] = NULL;	}#ifdef CONFIG_SMP	thread->vfpstate.hard.cpu = NR_CPUS;#endif}/* * Per-thread VFP initialization. */static void vfp_thread_flush(struct thread_info *thread){	union vfp_state *vfp = &thread->vfpstate;	unsigned int cpu;	/*	 * Disable VFP to ensure we initialize it first.  We must ensure	 * that the modification of vfp_current_hw_state[] and hardware	 * disable are done for the same CPU and without preemption.	 *	 * Do this first to ensure that preemption won't overwrite our	 * state saving should access to the VFP be enabled at this point.	 */	cpu = get_cpu();	if (vfp_current_hw_state[cpu] == vfp)		vfp_current_hw_state[cpu] = NULL;	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);	put_cpu();	memset(vfp, 0, sizeof(union vfp_state));	vfp->hard.fpexc = FPEXC_EN;	vfp->hard.fpscr = FPSCR_ROUND_NEAREST;#ifdef CONFIG_SMP	vfp->hard.cpu = NR_CPUS;#endif}static void vfp_thread_exit(struct thread_info *thread){	/* release case: Per-thread VFP cleanup. */	union vfp_state *vfp = &thread->vfpstate;	unsigned int cpu = get_cpu();	if (vfp_current_hw_state[cpu] == vfp)		vfp_current_hw_state[cpu] = NULL;	put_cpu();}static void vfp_thread_copy(struct thread_info *thread){	struct thread_info *parent = current_thread_info();	vfp_sync_hwstate(parent);	thread->vfpstate = parent->vfpstate;#ifdef CONFIG_SMP	thread->vfpstate.hard.cpu = NR_CPUS;#endif}/* * When this function is called with the following 'cmd's, the following * is true while this function is being run: *  THREAD_NOFTIFY_SWTICH: *   - the previously running thread will not be scheduled onto another CPU. *   - the next thread to be run (v) will not be running on another CPU. *   - thread->cpu is the local CPU number *   - not preemptible as we're called in the middle of a thread switch *  THREAD_NOTIFY_FLUSH: *   - the thread (v) will be running on the local CPU, so *	v === current_thread_info() *   - thread->cpu is the local CPU number at the time it is accessed, *	but may change at any time. *   - we could be preempted if tree preempt rcu is enabled, so *	it is unsafe to use thread->cpu. *  THREAD_NOTIFY_EXIT *   - the thread (v) will be running on the local CPU, so *	v === current_thread_info() *   - thread->cpu is the local CPU number at the time it is accessed, *	but may change at any time. *   - we could be preempted if tree preempt rcu is enabled, so *	it is unsafe to use thread->cpu. */static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v){	struct thread_info *thread = v;	u32 fpexc;#ifdef CONFIG_SMP	unsigned int cpu;#endif	switch (cmd) {	case THREAD_NOTIFY_SWITCH:		fpexc = fmrx(FPEXC);#ifdef CONFIG_SMP		cpu = thread->cpu;		/*		 * On SMP, if VFP is enabled, save the old state in		 * case the thread migrates to a different CPU. The		 * restoring is done lazily.		 */		if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])			vfp_save_state(vfp_current_hw_state[cpu], fpexc);#endif		/*		 * Always disable VFP so we can lazily save/restore the		 * old state.		 */		fmxr(FPEXC, fpexc & ~FPEXC_EN);		break;	case THREAD_NOTIFY_FLUSH:		vfp_thread_flush(thread);		break;	case THREAD_NOTIFY_EXIT:		vfp_thread_exit(thread);		break;	case THREAD_NOTIFY_COPY:		vfp_thread_copy(thread);		break;	}	return NOTIFY_DONE;}static struct notifier_block vfp_notifier_block = {	.notifier_call	= vfp_notifier,};/* * Raise a SIGFPE for the current process. * sicode describes the signal being raised. */static void vfp_raise_sigfpe(unsigned int sicode, struct pt_regs *regs){	siginfo_t info;	memset(&info, 0, sizeof(info));	info.si_signo = SIGFPE;	info.si_code = sicode;	info.si_addr = (void __user *)(instruction_pointer(regs) - 4);	/*	 * This is the same as NWFPE, because it's not clear what	 * this is used for	 */	current->thread.error_code = 0;	current->thread.trap_no = 6;	send_sig_info(SIGFPE, &info, current);}static void vfp_panic(char *reason, u32 inst){	int i;	pr_err("VFP: Error: %s\n", reason);	pr_err("VFP: EXC 0x%08x SCR 0x%08x INST 0x%08x\n",		fmrx(FPEXC), fmrx(FPSCR), inst);	for (i = 0; i < 32; i += 2)		pr_err("VFP: s%2u: 0x%08x s%2u: 0x%08x\n",		       i, vfp_get_float(i), i+1, vfp_get_float(i+1));}/* * Process bitmask of exception conditions. */static void vfp_raise_exceptions(u32 exceptions, u32 inst, u32 fpscr, struct pt_regs *regs){	int si_code = 0;	pr_debug("VFP: raising exceptions %08x\n", exceptions);	if (exceptions == VFP_EXCEPTION_ERROR) {		vfp_panic("unhandled bounce", inst);		vfp_raise_sigfpe(0, regs);		return;	}	/*	 * If any of the status flags are set, update the FPSCR.	 * Comparison instructions always return at least one of	 * these flags set.	 */	if (exceptions & (FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V))		fpscr &= ~(FPSCR_N|FPSCR_Z|FPSCR_C|FPSCR_V);	fpscr |= exceptions;	fmxr(FPSCR, fpscr);#define RAISE(stat,en,sig)				\	if (exceptions & stat && fpscr & en)		\		si_code = sig;	/*	 * These are arranged in priority order, least to highest.	 */	RAISE(FPSCR_DZC, FPSCR_DZE, FPE_FLTDIV);	RAISE(FPSCR_IXC, FPSCR_IXE, FPE_FLTRES);	RAISE(FPSCR_UFC, FPSCR_UFE, FPE_FLTUND);	RAISE(FPSCR_OFC, FPSCR_OFE, FPE_FLTOVF);	RAISE(FPSCR_IOC, FPSCR_IOE, FPE_FLTINV);	if (si_code)		vfp_raise_sigfpe(si_code, regs);}/* * Emulate a VFP instruction. */static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs){	u32 exceptions = VFP_EXCEPTION_ERROR;	pr_debug("VFP: emulate: INST=0x%08x SCR=0x%08x\n", inst, fpscr);	if (INST_CPRTDO(inst)) {		if (!INST_CPRT(inst)) {			/*			 * CPDO			 */			if (vfp_single(inst)) {				exceptions = vfp_single_cpdo(inst, fpscr);			} else {				exceptions = vfp_double_cpdo(inst, fpscr);			}		} else {			/*			 * A CPRT instruction can not appear in FPINST2, nor			 * can it cause an exception.  Therefore, we do not
 |