| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170 | #ifndef __ALPHA_UACCESS_H#define __ALPHA_UACCESS_H#include <linux/errno.h>#include <linux/sched.h>/* * The fs value determines whether argument validity checking should be * performed or not.  If get_fs() == USER_DS, checking is performed, with * get_fs() == KERNEL_DS, checking is bypassed. * * Or at least it did once upon a time.  Nowadays it is a mask that * defines which bits of the address space are off limits.  This is a * wee bit faster than the above. * * For historical reasons, these macros are grossly misnamed. */#define KERNEL_DS	((mm_segment_t) { 0UL })#define USER_DS		((mm_segment_t) { -0x40000000000UL })#define VERIFY_READ	0#define VERIFY_WRITE	1#define get_fs()  (current_thread_info()->addr_limit)#define get_ds()  (KERNEL_DS)#define set_fs(x) (current_thread_info()->addr_limit = (x))#define segment_eq(a,b)	((a).seg == (b).seg)/* * Is a address valid? This does a straightforward calculation rather * than tests. * * Address valid if: *  - "addr" doesn't have any high-bits set *  - AND "size" doesn't have any high-bits set *  - AND "addr+size" doesn't have any high-bits set *  - OR we are in kernel mode. */#define __access_ok(addr,size,segment) \	(((segment).seg & (addr | size | (addr+size))) == 0)#define access_ok(type,addr,size)				\({								\	__chk_user_ptr(addr);					\	__access_ok(((unsigned long)(addr)),(size),get_fs());	\})/* * These are the main single-value transfer routines.  They automatically * use the right size if we just have the right pointer type. * * As the alpha uses the same address space for kernel and user * data, we can just do these as direct assignments.  (Of course, the * exception handling means that it's no longer "just"...) * * Careful to not * (a) re-use the arguments for side effects (sizeof/typeof is ok) * (b) require any knowledge of processes at this stage */#define put_user(x,ptr) \  __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs())#define get_user(x,ptr) \  __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs())/* * The "__xxx" versions do not do address space checking, useful when * doing multiple accesses to the same area (the programmer has to do the * checks by hand with "access_ok()") */#define __put_user(x,ptr) \  __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)))#define __get_user(x,ptr) \  __get_user_nocheck((x),(ptr),sizeof(*(ptr)))  /* * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to * encode the bits we need for resolving the exception.  See the * more extensive comments with fixup_inline_exception below for * more information. */extern void __get_user_unknown(void);#define __get_user_nocheck(x,ptr,size)				\({								\	long __gu_err = 0;					\	unsigned long __gu_val;					\	__chk_user_ptr(ptr);					\	switch (size) {						\	  case 1: __get_user_8(ptr); break;			\	  case 2: __get_user_16(ptr); break;			\	  case 4: __get_user_32(ptr); break;			\	  case 8: __get_user_64(ptr); break;			\	  default: __get_user_unknown(); break;			\	}							\	(x) = (__typeof__(*(ptr))) __gu_val;			\	__gu_err;						\})#define __get_user_check(x,ptr,size,segment)				\({									\	long __gu_err = -EFAULT;					\	unsigned long __gu_val = 0;					\	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\	if (__access_ok((unsigned long)__gu_addr,size,segment)) {	\		__gu_err = 0;						\		switch (size) {						\		  case 1: __get_user_8(__gu_addr); break;		\		  case 2: __get_user_16(__gu_addr); break;		\		  case 4: __get_user_32(__gu_addr); break;		\		  case 8: __get_user_64(__gu_addr); break;		\		  default: __get_user_unknown(); break;			\		}							\	}								\	(x) = (__typeof__(*(ptr))) __gu_val;				\	__gu_err;							\})struct __large_struct { unsigned long buf[100]; };#define __m(x) (*(struct __large_struct __user *)(x))#define __get_user_64(addr)				\	__asm__("1: ldq %0,%2\n"			\	"2:\n"						\	".section __ex_table,\"a\"\n"			\	"	.long 1b - .\n"				\	"	lda %0, 2b-1b(%1)\n"			\	".previous"					\		: "=r"(__gu_val), "=r"(__gu_err)	\		: "m"(__m(addr)), "1"(__gu_err))#define __get_user_32(addr)				\	__asm__("1: ldl %0,%2\n"			\	"2:\n"						\	".section __ex_table,\"a\"\n"			\	"	.long 1b - .\n"				\	"	lda %0, 2b-1b(%1)\n"			\	".previous"					\		: "=r"(__gu_val), "=r"(__gu_err)	\		: "m"(__m(addr)), "1"(__gu_err))#ifdef __alpha_bwx__/* Those lucky bastards with ev56 and later CPUs can do byte/word moves.  */#define __get_user_16(addr)				\	__asm__("1: ldwu %0,%2\n"			\	"2:\n"						\	".section __ex_table,\"a\"\n"			\	"	.long 1b - .\n"				\	"	lda %0, 2b-1b(%1)\n"			\	".previous"					\		: "=r"(__gu_val), "=r"(__gu_err)	\		: "m"(__m(addr)), "1"(__gu_err))#define __get_user_8(addr)				\	__asm__("1: ldbu %0,%2\n"			\	"2:\n"						\	".section __ex_table,\"a\"\n"			\	"	.long 1b - .\n"				\	"	lda %0, 2b-1b(%1)\n"			\	".previous"					\		: "=r"(__gu_val), "=r"(__gu_err)	\		: "m"(__m(addr)), "1"(__gu_err))#else/* Unfortunately, we can't get an unaligned access trap for the sub-word   load, so we have to do a general unaligned operation.  */
 |