#ifndef _ASM_IA64_PROCESSOR_H #define _ASM_IA64_PROCESSOR_H /* * Copyright (C) 1998-2004 Hewlett-Packard Co * David Mosberger-Tang * Stephane Eranian * Copyright (C) 1999 Asit Mallick * Copyright (C) 1999 Don Dugger * * 11/24/98 S.Eranian added ia64_set_iva() * 12/03/99 D. Mosberger implement thread_saved_pc() via kernel unwind API * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 support */ #include #include #include #include #define __ARCH_WANT_UNLOCKED_CTXSW #define ARCH_HAS_PREFETCH_SWITCH_STACK #define IA64_NUM_PHYS_STACK_REG 96 #define IA64_NUM_DBG_REGS 8 #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) /* * TASK_SIZE really is a mis-named. It really is the maximum user * space address (plus one). On IA-64, there are five regions of 2TB * each (assuming 8KB page size), for a total of 8TB of user virtual * address space. */ #define TASK_SIZE DEFAULT_TASK_SIZE /* * This decides where the kernel will search for a free chunk of vm * space during mmap's. */ #define TASK_UNMAPPED_BASE (current->thread.map_base) #define IA64_THREAD_FPH_VALID (__IA64_UL(1) << 0) /* floating-point high state valid? */ #define IA64_THREAD_DBG_VALID (__IA64_UL(1) << 1) /* debug registers valid? */ #define IA64_THREAD_PM_VALID (__IA64_UL(1) << 2) /* performance registers valid? */ #define IA64_THREAD_UAC_NOPRINT (__IA64_UL(1) << 3) /* don't log unaligned accesses */ #define IA64_THREAD_UAC_SIGBUS (__IA64_UL(1) << 4) /* generate SIGBUS on unaligned acc. */ #define IA64_THREAD_MIGRATION (__IA64_UL(1) << 5) /* require migration sync at ctx sw */ #define IA64_THREAD_FPEMU_NOPRINT (__IA64_UL(1) << 6) /* don't log any fpswa faults */ #define IA64_THREAD_FPEMU_SIGFPE (__IA64_UL(1) << 7) /* send a SIGFPE for fpswa faults */ #define IA64_THREAD_UAC_SHIFT 3 #define IA64_THREAD_UAC_MASK (IA64_THREAD_UAC_NOPRINT | IA64_THREAD_UAC_SIGBUS) #define IA64_THREAD_FPEMU_SHIFT 6 #define IA64_THREAD_FPEMU_MASK (IA64_THREAD_FPEMU_NOPRINT | IA64_THREAD_FPEMU_SIGFPE) /* * This shift should be large enough to be able to represent 1000000000/itc_freq with good * accuracy while being small enough to fit 10*1000000000< #include #include #include #include #include #include #include #include #include #ifdef CONFIG_NUMA #include #endif /* like above but expressed as bitfields for more efficient access: */ struct ia64_psr { __u64 reserved0 : 1; __u64 be : 1; __u64 up : 1; __u64 ac : 1; __u64 mfl : 1; __u64 mfh : 1; __u64 reserved1 : 7; __u64 ic : 1; __u64 i : 1; __u64 pk : 1; __u64 reserved2 : 1; __u64 dt : 1; __u64 dfl : 1; __u64 dfh : 1; __u64 sp : 1; __u64 pp : 1; __u64 di : 1; __u64 si : 1; __u64 db : 1; __u64 lp : 1; __u64 tb : 1; __u64 rt : 1; __u64 reserved3 : 4; __u64 cpl : 2; __u64 is : 1; __u64 mc : 1; __u64 it : 1; __u64 id : 1; __u64 da : 1; __u64 dd : 1; __u64 ss : 1; __u64 ri : 2; __u64 ed : 1; __u64 bn : 1; __u64 reserved4 : 19; }; union ia64_isr { __u64 val; struct { __u64 code : 16; __u64 vector : 8; __u64 reserved1 : 8; __u64 x : 1; __u64 w : 1; __u64 r : 1; __u64 na : 1; __u64 sp : 1; __u64 rs : 1; __u64 ir : 1; __u64 ni : 1; __u64 so : 1; __u64 ei : 2; __u64 ed : 1; __u64 reserved2 : 20; }; }; union ia64_lid { __u64 val; struct { __u64 rv : 16; __u64 eid : 8; __u64 id : 8; __u64 ig : 32; }; }; union ia64_tpr { __u64 val; struct { __u64 ig0 : 4; __u64 mic : 4; __u64 rsv : 8; __u64 mmi : 1; __u64 ig1 : 47; }; }; union ia64_itir { __u64 val; struct { __u64 rv3 : 2; /* 0-1 */ __u64 ps : 6; /* 2-7 */ __u64 key : 24; /* 8-31 */ __u64 rv4 : 32; /* 32-63 */ }; }; union ia64_rr { __u64 val; struct { __u64 ve : 1; /* enable hw walker */ __u64 reserved0: 1; /* reserved */ __u64 ps : 6; /* log page size */ __u64 rid : 24; /* region id */ __u64 reserved1: 32; /* reserved */ }; }; /* * CPU type, hardware bug flags, and per-CPU state. Frequently used * state comes earlier: */ struct cpuinfo_ia64 { unsigned int softirq_pending; unsigned long itm_delta; /* # of clock cycles between clock ticks */ unsigned long itm_next; /* interval timer mask value to use for next clock tick */