|
@@ -316,3 +316,181 @@ static u32 vfp_emulate_instruction(u32 inst, u32 fpscr, struct pt_regs *regs)
|
|
|
/*
|
|
|
* A CPRT instruction can not appear in FPINST2, nor
|
|
|
* can it cause an exception. Therefore, we do not
|
|
|
+ * have to emulate it.
|
|
|
+ */
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * A CPDT instruction can not appear in FPINST2, nor can
|
|
|
+ * it cause an exception. Therefore, we do not have to
|
|
|
+ * emulate it.
|
|
|
+ */
|
|
|
+ }
|
|
|
+ return exceptions & ~VFP_NAN_FLAG;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Package up a bounce condition.
|
|
|
+ */
|
|
|
+void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
|
|
+{
|
|
|
+ u32 fpscr, orig_fpscr, fpsid, exceptions;
|
|
|
+
|
|
|
+ pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * At this point, FPEXC can have the following configuration:
|
|
|
+ *
|
|
|
+ * EX DEX IXE
|
|
|
+ * 0 1 x - synchronous exception
|
|
|
+ * 1 x 0 - asynchronous exception
|
|
|
+ * 1 x 1 - sychronous on VFP subarch 1 and asynchronous on later
|
|
|
+ * 0 0 1 - synchronous on VFP9 (non-standard subarch 1
|
|
|
+ * implementation), undefined otherwise
|
|
|
+ *
|
|
|
+ * Clear various bits and enable access to the VFP so we can
|
|
|
+ * handle the bounce.
|
|
|
+ */
|
|
|
+ fmxr(FPEXC, fpexc & ~(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK));
|
|
|
+
|
|
|
+ fpsid = fmrx(FPSID);
|
|
|
+ orig_fpscr = fpscr = fmrx(FPSCR);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Check for the special VFP subarch 1 and FPSCR.IXE bit case
|
|
|
+ */
|
|
|
+ if ((fpsid & FPSID_ARCH_MASK) == (1 << FPSID_ARCH_BIT)
|
|
|
+ && (fpscr & FPSCR_IXE)) {
|
|
|
+ /*
|
|
|
+ * Synchronous exception, emulate the trigger instruction
|
|
|
+ */
|
|
|
+ goto emulate;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (fpexc & FPEXC_EX) {
|
|
|
+#ifndef CONFIG_CPU_FEROCEON
|
|
|
+ /*
|
|
|
+ * Asynchronous exception. The instruction is read from FPINST
|
|
|
+ * and the interrupted instruction has to be restarted.
|
|
|
+ */
|
|
|
+ trigger = fmrx(FPINST);
|
|
|
+ regs->ARM_pc -= 4;
|
|
|
+#endif
|
|
|
+ } else if (!(fpexc & FPEXC_DEX)) {
|
|
|
+ /*
|
|
|
+ * Illegal combination of bits. It can be caused by an
|
|
|
+ * unallocated VFP instruction but with FPSCR.IXE set and not
|
|
|
+ * on VFP subarch 1.
|
|
|
+ */
|
|
|
+ vfp_raise_exceptions(VFP_EXCEPTION_ERROR, trigger, fpscr, regs);
|
|
|
+ goto exit;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Modify fpscr to indicate the number of iterations remaining.
|
|
|
+ * If FPEXC.EX is 0, FPEXC.DEX is 1 and the FPEXC.VV bit indicates
|
|
|
+ * whether FPEXC.VECITR or FPSCR.LEN is used.
|
|
|
+ */
|
|
|
+ if (fpexc & (FPEXC_EX | FPEXC_VV)) {
|
|
|
+ u32 len;
|
|
|
+
|
|
|
+ len = fpexc + (1 << FPEXC_LENGTH_BIT);
|
|
|
+
|
|
|
+ fpscr &= ~FPSCR_LENGTH_MASK;
|
|
|
+ fpscr |= (len & FPEXC_LENGTH_MASK) << (FPSCR_LENGTH_BIT - FPEXC_LENGTH_BIT);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Handle the first FP instruction. We used to take note of the
|
|
|
+ * FPEXC bounce reason, but this appears to be unreliable.
|
|
|
+ * Emulate the bounced instruction instead.
|
|
|
+ */
|
|
|
+ exceptions = vfp_emulate_instruction(trigger, fpscr, regs);
|
|
|
+ if (exceptions)
|
|
|
+ vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If there isn't a second FP instruction, exit now. Note that
|
|
|
+ * the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
|
|
|
+ */
|
|
|
+ if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
|
|
|
+ goto exit;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The barrier() here prevents fpinst2 being read
|
|
|
+ * before the condition above.
|
|
|
+ */
|
|
|
+ barrier();
|
|
|
+ trigger = fmrx(FPINST2);
|
|
|
+
|
|
|
+ emulate:
|
|
|
+ exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
|
|
|
+ if (exceptions)
|
|
|
+ vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
|
|
|
+ exit:
|
|
|
+ preempt_enable();
|
|
|
+}
|
|
|
+
|
|
|
+static void vfp_enable(void *unused)
|
|
|
+{
|
|
|
+ u32 access;
|
|
|
+
|
|
|
+ BUG_ON(preemptible());
|
|
|
+ access = get_copro_access();
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Enable full access to VFP (cp10 and cp11)
|
|
|
+ */
|
|
|
+ set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
|
|
|
+}
|
|
|
+
|
|
|
+#ifdef CONFIG_CPU_PM
|
|
|
+static int vfp_pm_suspend(void)
|
|
|
+{
|
|
|
+ struct thread_info *ti = current_thread_info();
|
|
|
+ u32 fpexc = fmrx(FPEXC);
|
|
|
+
|
|
|
+ /* if vfp is on, then save state for resumption */
|
|
|
+ if (fpexc & FPEXC_EN) {
|
|
|
+ pr_debug("%s: saving vfp state\n", __func__);
|
|
|
+ vfp_save_state(&ti->vfpstate, fpexc);
|
|
|
+
|
|
|
+ /* disable, just in case */
|
|
|
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
|
|
|
+ } else if (vfp_current_hw_state[ti->cpu]) {
|
|
|
+#ifndef CONFIG_SMP
|
|
|
+ fmxr(FPEXC, fpexc | FPEXC_EN);
|
|
|
+ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
|
|
|
+ fmxr(FPEXC, fpexc);
|
|
|
+#endif
|
|
|
+ }
|
|
|
+
|
|
|
+ /* clear any information we had about last context state */
|
|
|
+ vfp_current_hw_state[ti->cpu] = NULL;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void vfp_pm_resume(void)
|
|
|
+{
|
|
|
+ /* ensure we have access to the vfp */
|
|
|
+ vfp_enable(NULL);
|
|
|
+
|
|
|
+ /* and disable it to ensure the next usage restores the state */
|
|
|
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
|
|
|
+}
|
|
|
+
|
|
|
+static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
|
|
|
+ void *v)
|
|
|
+{
|
|
|
+ switch (cmd) {
|
|
|
+ case CPU_PM_ENTER:
|
|
|
+ vfp_pm_suspend();
|
|
|
+ break;
|
|
|
+ case CPU_PM_ENTER_FAILED:
|
|
|
+ case CPU_PM_EXIT:
|
|
|
+ vfp_pm_resume();
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ return NOTIFY_OK;
|
|
|
+}
|