| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194 | #ifndef _ASM_IA64_PROCESSOR_H#define _ASM_IA64_PROCESSOR_H/* * Copyright (C) 1998-2004 Hewlett-Packard Co *	David Mosberger-Tang <davidm@hpl.hp.com> *	Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com> * Copyright (C) 1999 Don Dugger <don.dugger@intel.com> * * 11/24/98	S.Eranian	added ia64_set_iva() * 12/03/99	D. Mosberger	implement thread_saved_pc() via kernel unwind API * 06/16/00	A. Mallick	added csd/ssd/tssd for ia32 support */#include <asm/intrinsics.h>#include <asm/kregs.h>#include <asm/ptrace.h>#include <asm/ustack.h>#define __ARCH_WANT_UNLOCKED_CTXSW#define ARCH_HAS_PREFETCH_SWITCH_STACK#define IA64_NUM_PHYS_STACK_REG	96#define IA64_NUM_DBG_REGS	8#define DEFAULT_MAP_BASE	__IA64_UL_CONST(0x2000000000000000)#define DEFAULT_TASK_SIZE	__IA64_UL_CONST(0xa000000000000000)/* * TASK_SIZE really is a mis-named.  It really is the maximum user * space address (plus one).  On IA-64, there are five regions of 2TB * each (assuming 8KB page size), for a total of 8TB of user virtual * address space. */#define TASK_SIZE       	DEFAULT_TASK_SIZE/* * This decides where the kernel will search for a free chunk of vm * space during mmap's. */#define TASK_UNMAPPED_BASE	(current->thread.map_base)#define IA64_THREAD_FPH_VALID	(__IA64_UL(1) << 0)	/* floating-point high state valid? */#define IA64_THREAD_DBG_VALID	(__IA64_UL(1) << 1)	/* debug registers valid? */#define IA64_THREAD_PM_VALID	(__IA64_UL(1) << 2)	/* performance registers valid? */#define IA64_THREAD_UAC_NOPRINT	(__IA64_UL(1) << 3)	/* don't log unaligned accesses */#define IA64_THREAD_UAC_SIGBUS	(__IA64_UL(1) << 4)	/* generate SIGBUS on unaligned acc. */#define IA64_THREAD_MIGRATION	(__IA64_UL(1) << 5)	/* require migration							   sync at ctx sw */#define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6)	/* don't log any fpswa faults */#define IA64_THREAD_FPEMU_SIGFPE  (__IA64_UL(1) << 7)	/* send a SIGFPE for fpswa faults */#define IA64_THREAD_UAC_SHIFT	3#define IA64_THREAD_UAC_MASK	(IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS)#define IA64_THREAD_FPEMU_SHIFT	6#define IA64_THREAD_FPEMU_MASK	(IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE)/* * This shift should be large enough to be able to represent 1000000000/itc_freq with good * accuracy while being small enough to fit 10*1000000000<<IA64_NSEC_PER_CYC_SHIFT in 64 bits * (this will give enough slack to represent 10 seconds worth of time as a scaled number). */#define IA64_NSEC_PER_CYC_SHIFT	30#ifndef __ASSEMBLY__#include <linux/cache.h>#include <linux/compiler.h>#include <linux/threads.h>#include <linux/types.h>#include <asm/fpu.h>#include <asm/page.h>#include <asm/percpu.h>#include <asm/rse.h>#include <asm/unwind.h>#include <linux/atomic.h>#ifdef CONFIG_NUMA#include <asm/nodedata.h>#endif/* like above but expressed as bitfields for more efficient access: */struct ia64_psr {	__u64 reserved0 : 1;	__u64 be : 1;	__u64 up : 1;	__u64 ac : 1;	__u64 mfl : 1;	__u64 mfh : 1;	__u64 reserved1 : 7;	__u64 ic : 1;	__u64 i : 1;	__u64 pk : 1;	__u64 reserved2 : 1;	__u64 dt : 1;	__u64 dfl : 1;	__u64 dfh : 1;	__u64 sp : 1;	__u64 pp : 1;	__u64 di : 1;	__u64 si : 1;	__u64 db : 1;	__u64 lp : 1;	__u64 tb : 1;	__u64 rt : 1;	__u64 reserved3 : 4;	__u64 cpl : 2;	__u64 is : 1;	__u64 mc : 1;	__u64 it : 1;	__u64 id : 1;	__u64 da : 1;	__u64 dd : 1;	__u64 ss : 1;	__u64 ri : 2;	__u64 ed : 1;	__u64 bn : 1;	__u64 reserved4 : 19;};union ia64_isr {	__u64  val;	struct {		__u64 code : 16;		__u64 vector : 8;		__u64 reserved1 : 8;		__u64 x : 1;		__u64 w : 1;		__u64 r : 1;		__u64 na : 1;		__u64 sp : 1;		__u64 rs : 1;		__u64 ir : 1;		__u64 ni : 1;		__u64 so : 1;		__u64 ei : 2;		__u64 ed : 1;		__u64 reserved2 : 20;	};};union ia64_lid {	__u64 val;	struct {		__u64  rv  : 16;		__u64  eid : 8;		__u64  id  : 8;		__u64  ig  : 32;	};};union ia64_tpr {	__u64 val;	struct {		__u64 ig0 : 4;		__u64 mic : 4;		__u64 rsv : 8;		__u64 mmi : 1;		__u64 ig1 : 47;	};};union ia64_itir {	__u64 val;	struct {		__u64 rv3  :  2; /* 0-1 */		__u64 ps   :  6; /* 2-7 */		__u64 key  : 24; /* 8-31 */		__u64 rv4  : 32; /* 32-63 */	};};union  ia64_rr {	__u64 val;	struct {		__u64  ve	:  1;  /* enable hw walker */		__u64  reserved0:  1;  /* reserved */		__u64  ps	:  6;  /* log page size */		__u64  rid	: 24;  /* region id */		__u64  reserved1: 32;  /* reserved */	};};/* * CPU type, hardware bug flags, and per-CPU state.  Frequently used * state comes earlier: */struct cpuinfo_ia64 {	unsigned int softirq_pending;	unsigned long itm_delta;	/* # of clock cycles between clock ticks */	unsigned long itm_next;		/* interval timer mask value to use for next clock tick */
 |