|
@@ -0,0 +1,141 @@
|
|
|
+/*
|
|
|
+ * linux/arch/arm/vfp/vfp.h
|
|
|
+ *
|
|
|
+ * Copyright (C) 2004 ARM Limited.
|
|
|
+ * Written by Deep Blue Solutions Limited.
|
|
|
+ *
|
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
|
+ * published by the Free Software Foundation.
|
|
|
+ */
|
|
|
+
|
|
|
+static inline u32 vfp_shiftright32jamming(u32 val, unsigned int shift)
|
|
|
+{
|
|
|
+ if (shift) {
|
|
|
+ if (shift < 32)
|
|
|
+ val = val >> shift | ((val << (32 - shift)) != 0);
|
|
|
+ else
|
|
|
+ val = val != 0;
|
|
|
+ }
|
|
|
+ return val;
|
|
|
+}
|
|
|
+
|
|
|
+static inline u64 vfp_shiftright64jamming(u64 val, unsigned int shift)
|
|
|
+{
|
|
|
+ if (shift) {
|
|
|
+ if (shift < 64)
|
|
|
+ val = val >> shift | ((val << (64 - shift)) != 0);
|
|
|
+ else
|
|
|
+ val = val != 0;
|
|
|
+ }
|
|
|
+ return val;
|
|
|
+}
|
|
|
+
|
|
|
+static inline u32 vfp_hi64to32jamming(u64 val)
|
|
|
+{
|
|
|
+ u32 v;
|
|
|
+
|
|
|
+ asm(
|
|
|
+ "cmp %Q1, #1 @ vfp_hi64to32jamming\n\t"
|
|
|
+ "movcc %0, %R1\n\t"
|
|
|
+ "orrcs %0, %R1, #1"
|
|
|
+ : "=r" (v) : "r" (val) : "cc");
|
|
|
+
|
|
|
+ return v;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void add128(u64 *resh, u64 *resl, u64 nh, u64 nl, u64 mh, u64 ml)
|
|
|
+{
|
|
|
+ asm( "adds %Q0, %Q2, %Q4\n\t"
|
|
|
+ "adcs %R0, %R2, %R4\n\t"
|
|
|
+ "adcs %Q1, %Q3, %Q5\n\t"
|
|
|
+ "adc %R1, %R3, %R5"
|
|
|
+ : "=r" (nl), "=r" (nh)
|
|
|
+ : "0" (nl), "1" (nh), "r" (ml), "r" (mh)
|
|
|
+ : "cc");
|
|
|
+ *resh = nh;
|
|
|
+ *resl = nl;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void sub128(u64 *resh, u64 *resl, u64 nh, u64 nl, u64 mh, u64 ml)
|
|
|
+{
|
|
|
+ asm( "subs %Q0, %Q2, %Q4\n\t"
|
|
|
+ "sbcs %R0, %R2, %R4\n\t"
|
|
|
+ "sbcs %Q1, %Q3, %Q5\n\t"
|
|
|
+ "sbc %R1, %R3, %R5\n\t"
|
|
|
+ : "=r" (nl), "=r" (nh)
|
|
|
+ : "0" (nl), "1" (nh), "r" (ml), "r" (mh)
|
|
|
+ : "cc");
|
|
|
+ *resh = nh;
|
|
|
+ *resl = nl;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void mul64to128(u64 *resh, u64 *resl, u64 n, u64 m)
|
|
|
+{
|
|
|
+ u32 nh, nl, mh, ml;
|
|
|
+ u64 rh, rma, rmb, rl;
|
|
|
+
|
|
|
+ nl = n;
|
|
|
+ ml = m;
|
|
|
+ rl = (u64)nl * ml;
|
|
|
+
|
|
|
+ nh = n >> 32;
|
|
|
+ rma = (u64)nh * ml;
|
|
|
+
|
|
|
+ mh = m >> 32;
|
|
|
+ rmb = (u64)nl * mh;
|
|
|
+ rma += rmb;
|
|
|
+
|
|
|
+ rh = (u64)nh * mh;
|
|
|
+ rh += ((u64)(rma < rmb) << 32) + (rma >> 32);
|
|
|
+
|
|
|
+ rma <<= 32;
|
|
|
+ rl += rma;
|
|
|
+ rh += (rl < rma);
|
|
|
+
|
|
|
+ *resl = rl;
|
|
|
+ *resh = rh;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void shift64left(u64 *resh, u64 *resl, u64 n)
|
|
|
+{
|
|
|
+ *resh = n >> 63;
|
|
|
+ *resl = n << 1;
|
|
|
+}
|
|
|
+
|
|
|
+static inline u64 vfp_hi64multiply64(u64 n, u64 m)
|
|
|
+{
|
|
|
+ u64 rh, rl;
|
|
|
+ mul64to128(&rh, &rl, n, m);
|
|
|
+ return rh | (rl != 0);
|
|
|
+}
|
|
|
+
|
|
|
+static inline u64 vfp_estimate_div128to64(u64 nh, u64 nl, u64 m)
|
|
|
+{
|
|
|
+ u64 mh, ml, remh, reml, termh, terml, z;
|
|
|
+
|
|
|
+ if (nh >= m)
|
|
|
+ return ~0ULL;
|
|
|
+ mh = m >> 32;
|
|
|
+ if (mh << 32 <= nh) {
|
|
|
+ z = 0xffffffff00000000ULL;
|
|
|
+ } else {
|
|
|
+ z = nh;
|
|
|
+ do_div(z, mh);
|
|
|
+ z <<= 32;
|
|
|
+ }
|
|
|
+ mul64to128(&termh, &terml, m, z);
|
|
|
+ sub128(&remh, &reml, nh, nl, termh, terml);
|
|
|
+ ml = m << 32;
|
|
|
+ while ((s64)remh < 0) {
|
|
|
+ z -= 0x100000000ULL;
|
|
|
+ add128(&remh, &reml, remh, reml, mh, ml);
|
|
|
+ }
|
|
|
+ remh = (remh << 32) | (reml >> 32);
|
|
|
+ if (mh << 32 <= remh) {
|
|
|
+ z |= 0xffffffff;
|
|
|
+ } else {
|
|
|
+ do_div(remh, mh);
|
|
|
+ z |= remh;
|
|
|
+ }
|
|
|
+ return z;
|