| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537 | #ifndef _ASM_IA64_PROCESSOR_H#define _ASM_IA64_PROCESSOR_H/* * Copyright (C) 1998-2004 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> *	Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * * 11/24/98	S.Eranian	added ia64_set_iva() * 12/03/99	D. Mosberger	implement thread_saved_pc() via kernel unwind API * 06/16/00	A. Mallick	added csd/ssd/tssd for ia32 support */#include <asm/intrinsics.h>#include <asm/kregs.h>#include <asm/ptrace.h>#include <asm/ustack.h>#define __ARCH_WANT_UNLOCKED_CTXSW#define ARCH_HAS_PREFETCH_SWITCH_STACK#define IA64_NUM_PHYS_STACK_REG	96#define IA64_NUM_DBG_REGS	8#define DEFAULT_MAP_BASE	__IA64_UL_CONST(0x2000000000000000)#define DEFAULT_TASK_SIZE	__IA64_UL_CONST(0xa000000000000000)/* * TASK_SIZE really is a mis-named.  It really is the maximum user * space address (plus one).  On IA-64, there are five regions of 2TB * each (assuming 8KB page size), for a total of 8TB of user virtual * address space. */#define TASK_SIZE       	DEFAULT_TASK_SIZE/* * This decides where the kernel will search for a free chunk of vm * space during mmap's. */#define TASK_UNMAPPED_BASE	(current->thread.map_base)#define IA64_THREAD_FPH_VALID	(__IA64_UL(1) << 0)	/* floating-point high state valid? */#define IA64_THREAD_DBG_VALID	(__IA64_UL(1) << 1)	/* debug registers valid? */#define IA64_THREAD_PM_VALID	(__IA64_UL(1) << 2)	/* performance registers valid? */#define IA64_THREAD_UAC_NOPRINT	(__IA64_UL(1) << 3)	/* don't log unaligned accesses */#define IA64_THREAD_UAC_SIGBUS	(__IA64_UL(1) << 4)	/* generate SIGBUS on unaligned acc. */#define IA64_THREAD_MIGRATION	(__IA64_UL(1) << 5)	/* require migration							   sync at ctx sw */#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)	/* don't log any fpswa faults */#define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)	/* send a SIGFPE for fpswa faults */#define IA64_THREAD_UAC_SHIFT	3#define IA64_THREAD_UAC_MASK	(IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)#define IA64_THREAD_FPEMU_SHIFT	6#define IA64_THREAD_FPEMU_MASK	(IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)/* * This shift should be large enough to be able to represent 1000000000/itc_freq with good * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits * (this will give enough slack to represent 10 seconds worth of time as a scaled number). */#define IA64_NSEC_PER_CYC_SHIFT	30#ifndef __ASSEMBLY__#include <linux/cache.h>#include <linux/compiler.h>#include <linux/threads.h>#include <linux/types.h>#include <asm/fpu.h>#include <asm/page.h>#include <asm/percpu.h>#include <asm/rse.h>#include <asm/unwind.h>#include <linux/atomic.h>#ifdef CONFIG_NUMA#include <asm/nodedata.h>#endif/* like above but expressed as bitfields for more efficient access: */struct ia64_psr {	__u64 reserved0 : 1;	__u64 be : 1;	__u64 up : 1;	__u64 ac : 1;	__u64 mfl : 1;	__u64 mfh : 1;	__u64 reserved1 : 7;	__u64 ic : 1;	__u64 i : 1;	__u64 pk : 1;	__u64 reserved2 : 1;	__u64 dt : 1;	__u64 dfl : 1;	__u64 dfh : 1;	__u64 sp : 1;	__u64 pp : 1;	__u64 di : 1;	__u64 si : 1;	__u64 db : 1;	__u64 lp : 1;	__u64 tb : 1;	__u64 rt : 1;	__u64 reserved3 : 4;	__u64 cpl : 2;	__u64 is : 1;	__u64 mc : 1;	__u64 it : 1;	__u64 id : 1;	__u64 da : 1;	__u64 dd : 1;	__u64 ss : 1;	__u64 ri : 2;	__u64 ed : 1;	__u64 bn : 1;	__u64 reserved4 : 19;};union ia64_isr {	__u64  val;	struct {		__u64 code : 16;		__u64 vector : 8;		__u64 reserved1 : 8;		__u64 x : 1;		__u64 w : 1;		__u64 r : 1;		__u64 na : 1;		__u64 sp : 1;		__u64 rs : 1;		__u64 ir : 1;		__u64 ni : 1;		__u64 so : 1;		__u64 ei : 2;		__u64 ed : 1;		__u64 reserved2 : 20;	};};union ia64_lid {	__u64 val;	struct {		__u64  rv  : 16;		__u64  eid : 8;		__u64  id  : 8;		__u64  ig  : 32;	};};union ia64_tpr {	__u64 val;	struct {		__u64 ig0 : 4;		__u64 mic : 4;		__u64 rsv : 8;		__u64 mmi : 1;		__u64 ig1 : 47;	};};union ia64_itir {	__u64 val;	struct {		__u64 rv3  :  2; /* 0-1 */		__u64 ps   :  6; /* 2-7 */		__u64 key  : 24; /* 8-31 */		__u64 rv4  : 32; /* 32-63 */	};};union  ia64_rr {	__u64 val;	struct {		__u64  ve	:  1;  /* enable hw walker */		__u64  reserved0:  1;  /* reserved */		__u64  ps	:  6;  /* log page size */		__u64  rid	: 24;  /* region id */		__u64  reserved1: 32;  /* reserved */	};};/* * CPU type, hardware bug flags, and per-CPU state.  Frequently used * state comes earlier: */struct cpuinfo_ia64 {	unsigned int softirq_pending;	unsigned long itm_delta;	/* # of clock cycles between clock ticks */	unsigned long itm_next;		/* interval timer mask value to use for next clock tick */	unsigned long nsec_per_cyc;	/* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */	unsigned long unimpl_va_mask;	/* mask of unimplemented virtual address bits (from PAL) */	unsigned long unimpl_pa_mask;	/* mask of unimplemented physical address bits (from PAL) */	unsigned long itc_freq;		/* frequency of ITC counter */	unsigned long proc_freq;	/* frequency of processor */	unsigned long cyc_per_usec;	/* itc_freq/1000000 */	unsigned long ptce_base;	unsigned int ptce_count[2];	unsigned int ptce_stride[2];	struct task_struct *ksoftirqd;	/* kernel softirq daemon for this CPU */#ifdef CONFIG_SMP	unsigned long loops_per_jiffy;	int cpu;	unsigned int socket_id;	/* physical processor socket id */	unsigned short core_id;	/* core id */	unsigned short thread_id; /* thread id */	unsigned short num_log;	/* Total number of logical processors on				 * this socket that were successfully booted */	unsigned char cores_per_socket;	/* Cores per processor socket */	unsigned char threads_per_core;	/* Threads per core */#endif	/* CPUID-derived information: */	unsigned long ppn;	unsigned long features;	unsigned char number;	unsigned char revision;	unsigned char model;	unsigned char family;	unsigned char archrev;	char vendor[16];	char *model_name;#ifdef CONFIG_NUMA	struct ia64_node_data *node_data;#endif};DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);/* * The "local" data variable.  It refers to the per-CPU data of the currently executing * CPU, much like "current" points to the per-task data of the currently executing task. * Do not use the address of local_cpu_data, since it will be different from * cpu_data(smp_processor_id())! */#define local_cpu_data		(&__ia64_per_cpu_var(ia64_cpu_info))#define cpu_data(cpu)		(&per_cpu(ia64_cpu_info, cpu))extern void print_cpu_info (struct cpuinfo_ia64 *);typedef struct {	unsigned long seg;} mm_segment_t;#define SET_UNALIGN_CTL(task,value)								\({												\	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK)			\				| (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK));	\	0;											\})#define GET_UNALIGN_CTL(task,addr)								\({												\	put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT,	\		 (int __user *) (addr));							\})#define SET_FPEMU_CTL(task,value)								\({												\	(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK)		\			  | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK));	\	0;											\})#define GET_FPEMU_CTL(task,addr)								\({												\	put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT,	\		 (int __user *) (addr));							\})struct thread_struct {	__u32 flags;			/* various thread flags (see IA64_THREAD_*) */	/* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */	__u8 on_ustack;			/* executing on user-stacks? */	__u8 pad[3];	__u64 ksp;			/* kernel stack pointer */	__u64 map_base;			/* base address for get_unmapped_area() */	__u64 rbs_bot;			/* the base address for the RBS */	int last_fph_cpu;		/* CPU that may hold the contents of f32-f127 */#ifdef CONFIG_PERFMON	void *pfm_context;		     /* pointer to detailed PMU context */	unsigned long pfm_needs_checking;    /* when >0, pending perfmon work on kernel exit */# define INIT_THREAD_PM		.pfm_context =		NULL,     \				.pfm_needs_checking =	0UL,#else# define INIT_THREAD_PM#endif	unsigned long dbr[IA64_NUM_DBG_REGS];	unsigned long ibr[IA64_NUM_DBG_REGS];	struct ia64_fpreg fph[96];	/* saved/loaded on demand */};#define INIT_THREAD {						\	.flags =	0,					\	.on_ustack =	0,					\	.ksp =		0,					\	.map_base =	DEFAULT_MAP_BASE,			\	.rbs_bot =	STACK_TOP - DEFAULT_USER_STACK_SIZE,	\	.last_fph_cpu =  -1,					\	INIT_THREAD_PM						\	.dbr =		{0, },					\	.ibr =		{0, },					\	.fph =		{{{{0}}}, }				\}#define start_thread(regs,new_ip,new_sp) do {							\	regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL))		\			 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS));		\	regs->cr_iip = new_ip;									\	regs->ar_rsc = 0xf;		/* eager mode, privilege level 3 */			\	regs->ar_rnat = 0;									\	regs->ar_bspstore = current->thread.rbs_bot;						\	regs->ar_fpsr = FPSR_DEFAULT;								\	regs->loadrs = 0;									\	regs->r8 = get_dumpable(current->mm);	/* set "don't zap registers" flag */		\	regs->r12 = new_sp - 16;	/* allocate 16 byte scratch area */			\	if (unlikely(!get_dumpable(current->mm))) {							\		/*										\		 * Zap scratch regs to avoid leaking bits between processes with different	\		 * uid/privileges.								\		 */										\		regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0;					\		regs->r1 = 0; regs->r9  = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0;	\	}											\} while (0)/* Forward declarations, a strange C thing... */struct mm_struct;struct task_struct;/* * Free all resources held by a thread. This is called after the * parent of DEAD_TASK has collected the exit status of the task via * wait(). */#define release_thread(dead_task)/* Get wait channel for task P.  */extern unsigned long get_wchan (struct task_struct *p);/* Return instruction pointer of blocked task TSK.  */#define KSTK_EIP(tsk)					\  ({							\	struct pt_regs *_regs = task_pt_regs(tsk);	\	_regs->cr_iip + ia64_psr(_regs)->ri;		\  })/* Return stack pointer of blocked task TSK.  */#define KSTK_ESP(tsk)  ((tsk)->thread.ksp)extern void ia64_getreg_unknown_kr (void);extern void ia64_setreg_unknown_kr (void);#define ia64_get_kr(regnum)					\({								\	unsigned long r = 0;					\								\	switch (regnum) {					\	    case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break;	\	    case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break;	\	    case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break;	\	    case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break;	\	    case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break;	\	    case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break;	\	    case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break;	\	    case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break;	\	    default: ia64_getreg_unknown_kr(); break;		\	}							\	r;							\})#define ia64_set_kr(regnum, r) 					\({								\	switch (regnum) {					\	    case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break;	\	    case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break;	\	    case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break;	\	    case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break;	\	    case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break;	\	    case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break;	\	    case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break;	\	    case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break;	\	    default: ia64_setreg_unknown_kr(); break;		\	}							\})/* * The following three macros can't be inline functions because we don't have struct * task_struct at this point. *//* * Return TRUE if task T owns the fph partition of the CPU we're running on. * Must be called from code that has preemption disabled. */#define ia64_is_local_fpu_owner(t)								\({												\	struct task_struct *__ia64_islfo_task = (t);						\	(__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id()				\	 && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER));	\})/* * Mark task T as owning the fph partition of the CPU we're running on. * Must be called from code that has preemption disabled. */#define ia64_set_local_fpu_owner(t) do {						\	struct task_struct *__ia64_slfo_task = (t);					\	__ia64_slfo_task->thread.last_fph_cpu = smp_processor_id();			\	ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task);		\} while (0)/* Mark the fph partition of task T as being invalid on all CPUs.  */#define ia64_drop_fpu(t)	((t)->thread.last_fph_cpu = -1)extern void __ia64_init_fpu (void);extern void __ia64_save_fpu (struct ia64_fpreg *fph);extern void __ia64_load_fpu (struct ia64_fpreg *fph);extern void ia64_save_debug_regs (unsigned long *save_area);extern void ia64_load_debug_regs (unsigned long *save_area);#define ia64_fph_enable()	do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)#define ia64_fph_disable()	do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)/* load fp 0.0 into fph */static inline voidia64_init_fpu (void) {	ia64_fph_enable();	__ia64_init_fpu();	ia64_fph_disable();}/* save f32-f127 at FPH */static inline voidia64_save_fpu (struct ia64_fpreg *fph) {	ia64_fph_enable();	__ia64_save_fpu(fph);	ia64_fph_disable();}/* load f32-f127 from FPH */static inline voidia64_load_fpu (struct ia64_fpreg *fph) {	ia64_fph_enable();	__ia64_load_fpu(fph);	ia64_fph_disable();}static inline __u64ia64_clear_ic (void){	__u64 psr;	psr = ia64_getreg(_IA64_REG_PSR);	ia64_stop();	ia64_rsm(IA64_PSR_I | IA64_PSR_IC);	ia64_srlz_i();	return psr;}/* * Restore the psr. */static inline voidia64_set_psr (__u64 psr){	ia64_stop();	ia64_setreg(_IA64_REG_PSR_L, psr);	ia64_srlz_i();}/* * Insert a translation into an instruction and/or data translation * register. */static inline voidia64_itr (__u64 target_mask, __u64 tr_num,	  __u64 vmaddr, __u64 pte,	  __u64 log_page_size){	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);	ia64_stop();	if (target_mask & 0x1)		ia64_itri(tr_num, pte);	if (target_mask & 0x2)		ia64_itrd(tr_num, pte);}/* * Insert a translation into the instruction and/or data translation * cache. */static inline voidia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,	  __u64 log_page_size){	ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));	ia64_setreg(_IA64_REG_CR_IFA, vmaddr);	ia64_stop();	/* as per EAS2.6, itc must be the last instruction in an instruction group */	if (target_mask & 0x1)		ia64_itci(pte);	if (target_mask & 0x2)		ia64_itcd(pte);}/* * Purge a range of addresses from instruction and/or data translation * register(s). */static inline voidia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size){	if (target_mask & 0x1)		ia64_ptri(vmaddr, (log_size << 2));	if (target_mask & 0x2)		ia64_ptrd(vmaddr, (log_size << 2));}/* Set the interrupt vector address.  The address must be suitably aligned (32KB).  */static inline voidia64_set_iva (void *ivt_addr){	ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);	ia64_srlz_i();}/* Set the page table address and control bits.  */static inline voidia64_set_pta (__u64 pta){	/* Note: srlz.i implies srlz.d */
 |