123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138 |
- #ifndef _ASM_IA64_UACCESS_H
- #define _ASM_IA64_UACCESS_H
- /*
- * This file defines various macros to transfer memory areas across
- * the user/kernel boundary. This needs to be done carefully because
- * this code is executed in kernel mode and uses user-specified
- * addresses. Thus, we need to be careful not to let the user to
- * trick us into accessing kernel memory that would normally be
- * inaccessible. This code is also fairly performance sensitive,
- * so we want to spend as little time doing safety checks as
- * possible.
- *
- * To make matters a bit more interesting, these macros sometimes also
- * called from within the kernel itself, in which case the address
- * validity check must be skipped. The get_fs() macro tells us what
- * to do: if get_fs()==USER_DS, checking is performed, if
- * get_fs()==KERNEL_DS, checking is bypassed.
- *
- * Note that even if the memory area specified by the user is in a
- * valid address range, it is still possible that we'll get a page
- * fault while accessing it. This is handled by filling out an
- * exception handler fixup entry for each instruction that has the
- * potential to fault. When such a fault occurs, the page fault
- * handler checks to see whether the faulting instruction has a fixup
- * associated and, if so, sets r8 to -EFAULT and clears r9 to 0 and
- * then resumes execution at the continuation point.
- *
- * Based on <asm-alpha/uaccess.h>.
- *
- * Copyright (C) 1998, 1999, 2001-2004 Hewlett-Packard Co
- * David Mosberger-Tang <davidm@hpl.hp.com>
- */
- #include <linux/compiler.h>
- #include <linux/errno.h>
- #include <linux/sched.h>
- #include <linux/page-flags.h>
- #include <linux/mm.h>
- #include <asm/intrinsics.h>
- #include <asm/pgtable.h>
- #include <asm/io.h>
- /*
- * For historical reasons, the following macros are grossly misnamed:
- */
- #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
- #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
- #define VERIFY_READ 0
- #define VERIFY_WRITE 1
- #define get_ds() (KERNEL_DS)
- #define get_fs() (current_thread_info()->addr_limit)
- #define set_fs(x) (current_thread_info()->addr_limit = (x))
- #define segment_eq(a, b) ((a).seg == (b).seg)
- /*
- * When accessing user memory, we need to make sure the entire area really is in
- * user-level space. In order to do this efficiently, we make sure that the page at
- * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
- * point inside the virtually mapped linear page table.
- */
- #define __access_ok(addr, size, segment) \
- ({ \
- __chk_user_ptr(addr); \
- (likely((unsigned long) (addr) <= (segment).seg) \
- && ((segment).seg == KERNEL_DS.seg \
- || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \
- })
- #define access_ok(type, addr, size) __access_ok((addr), (size), get_fs())
- /*
- * These are the main single-value transfer routines. They automatically
- * use the right size if we just have the right pointer type.
- *
- * Careful to not
- * (a) re-use the arguments for side effects (sizeof/typeof is ok)
- * (b) require any knowledge of processes at this stage
- */
- #define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs())
- #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs())
- /*
- * The "__xxx" versions do not do address space checking, useful when
- * doing multiple accesses to the same area (the programmer has to do the
- * checks by hand with "access_ok()")
- */
- #define __put_user(x, ptr) __put_user_nocheck((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)))
- #define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
- extern long __put_user_unaligned_unknown (void);
- #define __put_user_unaligned(x, ptr) \
- ({ \
- long __ret; \
- switch (sizeof(*(ptr))) { \
- case 1: __ret = __put_user((x), (ptr)); break; \
- case 2: __ret = (__put_user((x), (u8 __user *)(ptr))) \
- | (__put_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
- case 4: __ret = (__put_user((x), (u16 __user *)(ptr))) \
- | (__put_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
- case 8: __ret = (__put_user((x), (u32 __user *)(ptr))) \
- | (__put_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
- default: __ret = __put_user_unaligned_unknown(); \
- } \
- __ret; \
- })
- extern long __get_user_unaligned_unknown (void);
- #define __get_user_unaligned(x, ptr) \
- ({ \
- long __ret; \
- switch (sizeof(*(ptr))) { \
- case 1: __ret = __get_user((x), (ptr)); break; \
- case 2: __ret = (__get_user((x), (u8 __user *)(ptr))) \
- | (__get_user((x) >> 8, ((u8 __user *)(ptr) + 1))); break; \
- case 4: __ret = (__get_user((x), (u16 __user *)(ptr))) \
- | (__get_user((x) >> 16, ((u16 __user *)(ptr) + 1))); break; \
- case 8: __ret = (__get_user((x), (u32 __user *)(ptr))) \
- | (__get_user((x) >> 32, ((u32 __user *)(ptr) + 1))); break; \
- default: __ret = __get_user_unaligned_unknown(); \
- } \
- __ret; \
- })
- #ifdef ASM_SUPPORTED
- struct __large_struct { unsigned long buf[100]; };
- # define __m(x) (*(struct __large_struct __user *)(x))
- /* We need to declare the __ex_table section before we can use it in .xdata. */
- asm (".section \"__ex_table\", \"a\"\n\t.previous");
- # define __get_user_size(val, addr, n, err) \
- do { \
|