|
@@ -903,3 +903,142 @@ vfp_single_multiply_accumulate(int sd, int sn, s32 m, u32 fpscr, u32 negate, cha
|
|
vfp_single_unpack(&vsn, v);
|
|
vfp_single_unpack(&vsn, v);
|
|
if (vsn.exponent == 0 && vsn.significand)
|
|
if (vsn.exponent == 0 && vsn.significand)
|
|
vfp_single_normalise_denormal(&vsn);
|
|
vfp_single_normalise_denormal(&vsn);
|
|
|
|
+
|
|
|
|
+ vfp_single_unpack(&vsm, m);
|
|
|
|
+ if (vsm.exponent == 0 && vsm.significand)
|
|
|
|
+ vfp_single_normalise_denormal(&vsm);
|
|
|
|
+
|
|
|
|
+ exceptions = vfp_single_multiply(&vsp, &vsn, &vsm, fpscr);
|
|
|
|
+ if (negate & NEG_MULTIPLY)
|
|
|
|
+ vsp.sign = vfp_sign_negate(vsp.sign);
|
|
|
|
+
|
|
|
|
+ v = vfp_get_float(sd);
|
|
|
|
+ pr_debug("VFP: s%u = %08x\n", sd, v);
|
|
|
|
+ vfp_single_unpack(&vsn, v);
|
|
|
|
+ if (negate & NEG_SUBTRACT)
|
|
|
|
+ vsn.sign = vfp_sign_negate(vsn.sign);
|
|
|
|
+
|
|
|
|
+ exceptions |= vfp_single_add(&vsd, &vsn, &vsp, fpscr);
|
|
|
|
+
|
|
|
|
+ return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, func);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Standard operations
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * sd = sd + (sn * sm)
|
|
|
|
+ */
|
|
|
|
+static u32 vfp_single_fmac(int sd, int sn, s32 m, u32 fpscr)
|
|
|
|
+{
|
|
|
|
+ return vfp_single_multiply_accumulate(sd, sn, m, fpscr, 0, "fmac");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * sd = sd - (sn * sm)
|
|
|
|
+ */
|
|
|
|
+static u32 vfp_single_fnmac(int sd, int sn, s32 m, u32 fpscr)
|
|
|
|
+{
|
|
|
|
+ return vfp_single_multiply_accumulate(sd, sn, m, fpscr, NEG_MULTIPLY, "fnmac");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * sd = -sd + (sn * sm)
|
|
|
|
+ */
|
|
|
|
+static u32 vfp_single_fmsc(int sd, int sn, s32 m, u32 fpscr)
|
|
|
|
+{
|
|
|
|
+ return vfp_single_multiply_accumulate(sd, sn, m, fpscr, NEG_SUBTRACT, "fmsc");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * sd = -sd - (sn * sm)
|
|
|
|
+ */
|
|
|
|
+static u32 vfp_single_fnmsc(int sd, int sn, s32 m, u32 fpscr)
|
|
|
|
+{
|
|
|
|
+ return vfp_single_multiply_accumulate(sd, sn, m, fpscr, NEG_SUBTRACT | NEG_MULTIPLY, "fnmsc");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * sd = sn * sm
|
|
|
|
+ */
|
|
|
|
+static u32 vfp_single_fmul(int sd, int sn, s32 m, u32 fpscr)
|
|
|
|
+{
|
|
|
|
+ struct vfp_single vsd, vsn, vsm;
|
|
|
|
+ u32 exceptions;
|
|
|
|
+ s32 n = vfp_get_float(sn);
|
|
|
|
+
|
|
|
|
+ pr_debug("VFP: s%u = %08x\n", sn, n);
|
|
|
|
+
|
|
|
|
+ vfp_single_unpack(&vsn, n);
|
|
|
|
+ if (vsn.exponent == 0 && vsn.significand)
|
|
|
|
+ vfp_single_normalise_denormal(&vsn);
|
|
|
|
+
|
|
|
|
+ vfp_single_unpack(&vsm, m);
|
|
|
|
+ if (vsm.exponent == 0 && vsm.significand)
|
|
|
|
+ vfp_single_normalise_denormal(&vsm);
|
|
|
|
+
|
|
|
|
+ exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
|
|
|
|
+ return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fmul");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * sd = -(sn * sm)
|
|
|
|
+ */
|
|
|
|
+static u32 vfp_single_fnmul(int sd, int sn, s32 m, u32 fpscr)
|
|
|
|
+{
|
|
|
|
+ struct vfp_single vsd, vsn, vsm;
|
|
|
|
+ u32 exceptions;
|
|
|
|
+ s32 n = vfp_get_float(sn);
|
|
|
|
+
|
|
|
|
+ pr_debug("VFP: s%u = %08x\n", sn, n);
|
|
|
|
+
|
|
|
|
+ vfp_single_unpack(&vsn, n);
|
|
|
|
+ if (vsn.exponent == 0 && vsn.significand)
|
|
|
|
+ vfp_single_normalise_denormal(&vsn);
|
|
|
|
+
|
|
|
|
+ vfp_single_unpack(&vsm, m);
|
|
|
|
+ if (vsm.exponent == 0 && vsm.significand)
|
|
|
|
+ vfp_single_normalise_denormal(&vsm);
|
|
|
|
+
|
|
|
|
+ exceptions = vfp_single_multiply(&vsd, &vsn, &vsm, fpscr);
|
|
|
|
+ vsd.sign = vfp_sign_negate(vsd.sign);
|
|
|
|
+ return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fnmul");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * sd = sn + sm
|
|
|
|
+ */
|
|
|
|
+static u32 vfp_single_fadd(int sd, int sn, s32 m, u32 fpscr)
|
|
|
|
+{
|
|
|
|
+ struct vfp_single vsd, vsn, vsm;
|
|
|
|
+ u32 exceptions;
|
|
|
|
+ s32 n = vfp_get_float(sn);
|
|
|
|
+
|
|
|
|
+ pr_debug("VFP: s%u = %08x\n", sn, n);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Unpack and normalise denormals.
|
|
|
|
+ */
|
|
|
|
+ vfp_single_unpack(&vsn, n);
|
|
|
|
+ if (vsn.exponent == 0 && vsn.significand)
|
|
|
|
+ vfp_single_normalise_denormal(&vsn);
|
|
|
|
+
|
|
|
|
+ vfp_single_unpack(&vsm, m);
|
|
|
|
+ if (vsm.exponent == 0 && vsm.significand)
|
|
|
|
+ vfp_single_normalise_denormal(&vsm);
|
|
|
|
+
|
|
|
|
+ exceptions = vfp_single_add(&vsd, &vsn, &vsm, fpscr);
|
|
|
|
+
|
|
|
|
+ return vfp_single_normaliseround(sd, &vsd, fpscr, exceptions, "fadd");
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * sd = sn - sm
|
|
|
|
+ */
|
|
|
|
+static u32 vfp_single_fsub(int sd, int sn, s32 m, u32 fpscr)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * Subtraction is addition with one sign inverted.
|
|
|
|
+ */
|
|
|
|
+ return vfp_single_fadd(sd, sn, vfp_single_packed_negate(m), fpscr);
|