|
@@ -300,3 +300,114 @@ __asm__ __volatile__("1: stb %r2,%1\n" \
|
|
|
"5:\n" \
|
|
|
".section __ex_table,\"a\"\n" \
|
|
|
" .long 1b - .\n" \
|
|
|
+ " lda $31, 5b-1b(%0)\n" \
|
|
|
+ " .long 2b - .\n" \
|
|
|
+ " lda $31, 5b-2b(%0)\n" \
|
|
|
+ " .long 3b - .\n" \
|
|
|
+ " lda $31, 5b-3b(%0)\n" \
|
|
|
+ " .long 4b - .\n" \
|
|
|
+ " lda $31, 5b-4b(%0)\n" \
|
|
|
+ ".previous" \
|
|
|
+ : "=r"(__pu_err), "=&r"(__pu_tmp1), \
|
|
|
+ "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
|
|
|
+ "=&r"(__pu_tmp4) \
|
|
|
+ : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
|
|
|
+}
|
|
|
+
|
|
|
+#define __put_user_8(x,addr) \
|
|
|
+{ \
|
|
|
+ long __pu_tmp1, __pu_tmp2; \
|
|
|
+ __asm__ __volatile__( \
|
|
|
+ "1: ldq_u %1,0(%4)\n" \
|
|
|
+ " insbl %3,%4,%2\n" \
|
|
|
+ " mskbl %1,%4,%1\n" \
|
|
|
+ " or %1,%2,%1\n" \
|
|
|
+ "2: stq_u %1,0(%4)\n" \
|
|
|
+ "3:\n" \
|
|
|
+ ".section __ex_table,\"a\"\n" \
|
|
|
+ " .long 1b - .\n" \
|
|
|
+ " lda $31, 3b-1b(%0)\n" \
|
|
|
+ " .long 2b - .\n" \
|
|
|
+ " lda $31, 3b-2b(%0)\n" \
|
|
|
+ ".previous" \
|
|
|
+ : "=r"(__pu_err), \
|
|
|
+ "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
|
|
|
+ : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
+
|
|
|
+/*
|
|
|
+ * Complex access routines
|
|
|
+ */
|
|
|
+
|
|
|
+/* This little bit of silliness is to get the GP loaded for a function
|
|
|
+ that ordinarily wouldn't. Otherwise we could have it done by the macro
|
|
|
+ directly, which can be optimized the linker. */
|
|
|
+#ifdef MODULE
|
|
|
+#define __module_address(sym) "r"(sym),
|
|
|
+#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym
|
|
|
+#else
|
|
|
+#define __module_address(sym)
|
|
|
+#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp"
|
|
|
+#endif
|
|
|
+
|
|
|
+extern void __copy_user(void);
|
|
|
+
|
|
|
+extern inline long
|
|
|
+__copy_tofrom_user_nocheck(void *to, const void *from, long len)
|
|
|
+{
|
|
|
+ register void * __cu_to __asm__("$6") = to;
|
|
|
+ register const void * __cu_from __asm__("$7") = from;
|
|
|
+ register long __cu_len __asm__("$0") = len;
|
|
|
+
|
|
|
+ __asm__ __volatile__(
|
|
|
+ __module_call(28, 3, __copy_user)
|
|
|
+ : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to)
|
|
|
+ : __module_address(__copy_user)
|
|
|
+ "0" (__cu_len), "1" (__cu_from), "2" (__cu_to)
|
|
|
+ : "$1","$2","$3","$4","$5","$28","memory");
|
|
|
+
|
|
|
+ return __cu_len;
|
|
|
+}
|
|
|
+
|
|
|
+extern inline long
|
|
|
+__copy_tofrom_user(void *to, const void *from, long len, const void __user *validate)
|
|
|
+{
|
|
|
+ if (__access_ok((unsigned long)validate, len, get_fs()))
|
|
|
+ len = __copy_tofrom_user_nocheck(to, from, len);
|
|
|
+ return len;
|
|
|
+}
|
|
|
+
|
|
|
+#define __copy_to_user(to,from,n) \
|
|
|
+({ \
|
|
|
+ __chk_user_ptr(to); \
|
|
|
+ __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \
|
|
|
+})
|
|
|
+#define __copy_from_user(to,from,n) \
|
|
|
+({ \
|
|
|
+ __chk_user_ptr(from); \
|
|
|
+ __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \
|
|
|
+})
|
|
|
+
|
|
|
+#define __copy_to_user_inatomic __copy_to_user
|
|
|
+#define __copy_from_user_inatomic __copy_from_user
|
|
|
+
|
|
|
+
|
|
|
+extern inline long
|
|
|
+copy_to_user(void __user *to, const void *from, long n)
|
|
|
+{
|
|
|
+ return __copy_tofrom_user((__force void *)to, from, n, to);
|
|
|
+}
|
|
|
+
|
|
|
+extern inline long
|
|
|
+copy_from_user(void *to, const void __user *from, long n)
|
|
|
+{
|
|
|
+ return __copy_tofrom_user(to, (__force void *)from, n, from);
|
|
|
+}
|
|
|
+
|
|
|
+extern void __do_clear_user(void);
|
|
|
+
|
|
|
+extern inline long
|
|
|
+__clear_user(void __user *to, long len)
|
|
|
+{
|