|
@@ -0,0 +1,115 @@
|
|
|
|
+/*
|
|
|
|
+ * arch/arm/include/asm/uaccess.h
|
|
|
|
+ *
|
|
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
|
|
+ * it under the terms of the GNU General Public License version 2 as
|
|
|
|
+ * published by the Free Software Foundation.
|
|
|
|
+ */
|
|
|
|
+#ifndef _ASMARM_UACCESS_H
|
|
|
|
+#define _ASMARM_UACCESS_H
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * User space memory access functions
|
|
|
|
+ */
|
|
|
|
+#include <linux/string.h>
|
|
|
|
+#include <linux/thread_info.h>
|
|
|
|
+#include <asm/errno.h>
|
|
|
|
+#include <asm/memory.h>
|
|
|
|
+#include <asm/domain.h>
|
|
|
|
+#include <asm/unified.h>
|
|
|
|
+#include <asm/compiler.h>
|
|
|
|
+
|
|
|
|
+#define VERIFY_READ 0
|
|
|
|
+#define VERIFY_WRITE 1
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * The exception table consists of pairs of addresses: the first is the
|
|
|
|
+ * address of an instruction that is allowed to fault, and the second is
|
|
|
|
+ * the address at which the program should continue. No registers are
|
|
|
|
+ * modified, so it is entirely up to the continuation code to figure out
|
|
|
|
+ * what to do.
|
|
|
|
+ *
|
|
|
|
+ * All the routines below use bits of fixup code that are out of line
|
|
|
|
+ * with the main instruction path. This means when everything is well,
|
|
|
|
+ * we don't even have to jump over them. Further, they do not intrude
|
|
|
|
+ * on our cache or tlb entries.
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+struct exception_table_entry
|
|
|
|
+{
|
|
|
|
+ unsigned long insn, fixup;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+extern int fixup_exception(struct pt_regs *regs);
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * These two are intentionally not defined anywhere - if the kernel
|
|
|
|
+ * code generates any references to them, that's a bug.
|
|
|
|
+ */
|
|
|
|
+extern int __get_user_bad(void);
|
|
|
|
+extern int __put_user_bad(void);
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Note that this is actually 0x1,0000,0000
|
|
|
|
+ */
|
|
|
|
+#define KERNEL_DS 0x00000000
|
|
|
|
+#define get_ds() (KERNEL_DS)
|
|
|
|
+
|
|
|
|
+#ifdef CONFIG_MMU
|
|
|
|
+
|
|
|
|
+#define USER_DS TASK_SIZE
|
|
|
|
+#define get_fs() (current_thread_info()->addr_limit)
|
|
|
|
+
|
|
|
|
+static inline void set_fs(mm_segment_t fs)
|
|
|
|
+{
|
|
|
|
+ current_thread_info()->addr_limit = fs;
|
|
|
|
+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#define segment_eq(a,b) ((a) == (b))
|
|
|
|
+
|
|
|
|
+#define __addr_ok(addr) ({ \
|
|
|
|
+ unsigned long flag; \
|
|
|
|
+ __asm__("cmp %2, %0; movlo %0, #0" \
|
|
|
|
+ : "=&r" (flag) \
|
|
|
|
+ : "0" (current_thread_info()->addr_limit), "r" (addr) \
|
|
|
|
+ : "cc"); \
|
|
|
|
+ (flag == 0); })
|
|
|
|
+
|
|
|
|
+/* We use 33-bit arithmetic here... */
|
|
|
|
+#define __range_ok(addr,size) ({ \
|
|
|
|
+ unsigned long flag, roksum; \
|
|
|
|
+ __chk_user_ptr(addr); \
|
|
|
|
+ __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
|
|
|
|
+ : "=&r" (flag), "=&r" (roksum) \
|
|
|
|
+ : "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
|
|
|
|
+ : "cc"); \
|
|
|
|
+ flag; })
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Single-value transfer routines. They automatically use the right
|
|
|
|
+ * size if we just have the right pointer type. Note that the functions
|
|
|
|
+ * which read from user space (*get_*) need to take care not to leak
|
|
|
|
+ * kernel data even if the calling code is buggy and fails to check
|
|
|
|
+ * the return value. This means zeroing out the destination variable
|
|
|
|
+ * or buffer on error. Normally this is done out of line by the
|
|
|
|
+ * fixup code, but there are a few places where it intrudes on the
|
|
|
|
+ * main code path. When we only write to user space, there is no
|
|
|
|
+ * problem.
|
|
|
|
+ */
|
|
|
|
+extern int __get_user_1(void *);
|
|
|
|
+extern int __get_user_2(void *);
|
|
|
|
+extern int __get_user_4(void *);
|
|
|
|
+
|
|
|
|
+#define __GUP_CLOBBER_1 "lr", "cc"
|
|
|
|
+#ifdef CONFIG_CPU_USE_DOMAINS
|
|
|
|
+#define __GUP_CLOBBER_2 "ip", "lr", "cc"
|
|
|
|
+#else
|
|
|
|
+#define __GUP_CLOBBER_2 "lr", "cc"
|
|
|
|
+#endif
|
|
|
|
+#define __GUP_CLOBBER_4 "lr", "cc"
|
|
|
|
+
|
|
|
|
+#define __get_user_x(__r2,__p,__e,__l,__s) \
|
|
|
|
+ __asm__ __volatile__ ( \
|
|
|
|
+ __asmeq("%0", "r0") __asmeq("%1", "r2") \
|
|
|
|
+ __asmeq("%3", "r1") \
|