|
@@ -192,3 +192,184 @@ struct cpuinfo_ia64 {
|
|
|
unsigned int softirq_pending;
|
|
|
unsigned long itm_delta; /* # of clock cycles between clock ticks */
|
|
|
unsigned long itm_next; /* interval timer mask value to use for next clock tick */
|
|
|
+ unsigned long nsec_per_cyc; /* (1000000000<<IA64_NSEC_PER_CYC_SHIFT)/itc_freq */
|
|
|
+ unsigned long unimpl_va_mask; /* mask of unimplemented virtual address bits (from PAL) */
|
|
|
+ unsigned long unimpl_pa_mask; /* mask of unimplemented physical address bits (from PAL) */
|
|
|
+ unsigned long itc_freq; /* frequency of ITC counter */
|
|
|
+ unsigned long proc_freq; /* frequency of processor */
|
|
|
+ unsigned long cyc_per_usec; /* itc_freq/1000000 */
|
|
|
+ unsigned long ptce_base;
|
|
|
+ unsigned int ptce_count[2];
|
|
|
+ unsigned int ptce_stride[2];
|
|
|
+ struct task_struct *ksoftirqd; /* kernel softirq daemon for this CPU */
|
|
|
+
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+ unsigned long loops_per_jiffy;
|
|
|
+ int cpu;
|
|
|
+ unsigned int socket_id; /* physical processor socket id */
|
|
|
+ unsigned short core_id; /* core id */
|
|
|
+ unsigned short thread_id; /* thread id */
|
|
|
+ unsigned short num_log; /* Total number of logical processors on
|
|
|
+ * this socket that were successfully booted */
|
|
|
+ unsigned char cores_per_socket; /* Cores per processor socket */
|
|
|
+ unsigned char threads_per_core; /* Threads per core */
|
|
|
+#endif
|
|
|
+
|
|
|
+ /* CPUID-derived information: */
|
|
|
+ unsigned long ppn;
|
|
|
+ unsigned long features;
|
|
|
+ unsigned char number;
|
|
|
+ unsigned char revision;
|
|
|
+ unsigned char model;
|
|
|
+ unsigned char family;
|
|
|
+ unsigned char archrev;
|
|
|
+ char vendor[16];
|
|
|
+ char *model_name;
|
|
|
+
|
|
|
+#ifdef CONFIG_NUMA
|
|
|
+ struct ia64_node_data *node_data;
|
|
|
+#endif
|
|
|
+};
|
|
|
+
|
|
|
+DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
|
|
|
+
|
|
|
+/*
|
|
|
+ * The "local" data variable. It refers to the per-CPU data of the currently executing
|
|
|
+ * CPU, much like "current" points to the per-task data of the currently executing task.
|
|
|
+ * Do not use the address of local_cpu_data, since it will be different from
|
|
|
+ * cpu_data(smp_processor_id())!
|
|
|
+ */
|
|
|
+#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
|
|
|
+#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
|
|
|
+
|
|
|
+extern void print_cpu_info (struct cpuinfo_ia64 *);
|
|
|
+
|
|
|
+typedef struct {
|
|
|
+ unsigned long seg;
|
|
|
+} mm_segment_t;
|
|
|
+
|
|
|
+#define SET_UNALIGN_CTL(task,value) \
|
|
|
+({ \
|
|
|
+ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
|
|
|
+ | (((value) << IA64_THREAD_UAC_SHIFT) & IA64_THREAD_UAC_MASK)); \
|
|
|
+ 0; \
|
|
|
+})
|
|
|
+#define GET_UNALIGN_CTL(task,addr) \
|
|
|
+({ \
|
|
|
+ put_user(((task)->thread.flags & IA64_THREAD_UAC_MASK) >> IA64_THREAD_UAC_SHIFT, \
|
|
|
+ (int __user *) (addr)); \
|
|
|
+})
|
|
|
+
|
|
|
+#define SET_FPEMU_CTL(task,value) \
|
|
|
+({ \
|
|
|
+ (task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_FPEMU_MASK) \
|
|
|
+ | (((value) << IA64_THREAD_FPEMU_SHIFT) & IA64_THREAD_FPEMU_MASK)); \
|
|
|
+ 0; \
|
|
|
+})
|
|
|
+#define GET_FPEMU_CTL(task,addr) \
|
|
|
+({ \
|
|
|
+ put_user(((task)->thread.flags & IA64_THREAD_FPEMU_MASK) >> IA64_THREAD_FPEMU_SHIFT, \
|
|
|
+ (int __user *) (addr)); \
|
|
|
+})
|
|
|
+
|
|
|
+struct thread_struct {
|
|
|
+ __u32 flags; /* various thread flags (see IA64_THREAD_*) */
|
|
|
+ /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
|
|
|
+ __u8 on_ustack; /* executing on user-stacks? */
|
|
|
+ __u8 pad[3];
|
|
|
+ __u64 ksp; /* kernel stack pointer */
|
|
|
+ __u64 map_base; /* base address for get_unmapped_area() */
|
|
|
+ __u64 rbs_bot; /* the base address for the RBS */
|
|
|
+ int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
|
|
|
+
|
|
|
+#ifdef CONFIG_PERFMON
|
|
|
+ void *pfm_context; /* pointer to detailed PMU context */
|
|
|
+ unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
|
|
|
+# define INIT_THREAD_PM .pfm_context = NULL, \
|
|
|
+ .pfm_needs_checking = 0UL,
|
|
|
+#else
|
|
|
+# define INIT_THREAD_PM
|
|
|
+#endif
|
|
|
+ unsigned long dbr[IA64_NUM_DBG_REGS];
|
|
|
+ unsigned long ibr[IA64_NUM_DBG_REGS];
|
|
|
+ struct ia64_fpreg fph[96]; /* saved/loaded on demand */
|
|
|
+};
|
|
|
+
|
|
|
+#define INIT_THREAD { \
|
|
|
+ .flags = 0, \
|
|
|
+ .on_ustack = 0, \
|
|
|
+ .ksp = 0, \
|
|
|
+ .map_base = DEFAULT_MAP_BASE, \
|
|
|
+ .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
|
|
|
+ .last_fph_cpu = -1, \
|
|
|
+ INIT_THREAD_PM \
|
|
|
+ .dbr = {0, }, \
|
|
|
+ .ibr = {0, }, \
|
|
|
+ .fph = {{{{0}}}, } \
|
|
|
+}
|
|
|
+
|
|
|
+#define start_thread(regs,new_ip,new_sp) do { \
|
|
|
+ regs->cr_ipsr = ((regs->cr_ipsr | (IA64_PSR_BITS_TO_SET | IA64_PSR_CPL)) \
|
|
|
+ & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_RI | IA64_PSR_IS)); \
|
|
|
+ regs->cr_iip = new_ip; \
|
|
|
+ regs->ar_rsc = 0xf; /* eager mode, privilege level 3 */ \
|
|
|
+ regs->ar_rnat = 0; \
|
|
|
+ regs->ar_bspstore = current->thread.rbs_bot; \
|
|
|
+ regs->ar_fpsr = FPSR_DEFAULT; \
|
|
|
+ regs->loadrs = 0; \
|
|
|
+ regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
|
|
|
+ regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
|
|
|
+ if (unlikely(!get_dumpable(current->mm))) { \
|
|
|
+ /* \
|
|
|
+ * Zap scratch regs to avoid leaking bits between processes with different \
|
|
|
+ * uid/privileges. \
|
|
|
+ */ \
|
|
|
+ regs->ar_pfs = 0; regs->b0 = 0; regs->pr = 0; \
|
|
|
+ regs->r1 = 0; regs->r9 = 0; regs->r11 = 0; regs->r13 = 0; regs->r15 = 0; \
|
|
|
+ } \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+/* Forward declarations, a strange C thing... */
|
|
|
+struct mm_struct;
|
|
|
+struct task_struct;
|
|
|
+
|
|
|
+/*
|
|
|
+ * Free all resources held by a thread. This is called after the
|
|
|
+ * parent of DEAD_TASK has collected the exit status of the task via
|
|
|
+ * wait().
|
|
|
+ */
|
|
|
+#define release_thread(dead_task)
|
|
|
+
|
|
|
+/* Get wait channel for task P. */
|
|
|
+extern unsigned long get_wchan (struct task_struct *p);
|
|
|
+
|
|
|
+/* Return instruction pointer of blocked task TSK. */
|
|
|
+#define KSTK_EIP(tsk) \
|
|
|
+ ({ \
|
|
|
+ struct pt_regs *_regs = task_pt_regs(tsk); \
|
|
|
+ _regs->cr_iip + ia64_psr(_regs)->ri; \
|
|
|
+ })
|
|
|
+
|
|
|
+/* Return stack pointer of blocked task TSK. */
|
|
|
+#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
|
|
|
+
|
|
|
+extern void ia64_getreg_unknown_kr (void);
|
|
|
+extern void ia64_setreg_unknown_kr (void);
|
|
|
+
|
|
|
+#define ia64_get_kr(regnum) \
|
|
|
+({ \
|
|
|
+ unsigned long r = 0; \
|
|
|
+ \
|
|
|
+ switch (regnum) { \
|
|
|
+ case 0: r = ia64_getreg(_IA64_REG_AR_KR0); break; \
|
|
|
+ case 1: r = ia64_getreg(_IA64_REG_AR_KR1); break; \
|
|
|
+ case 2: r = ia64_getreg(_IA64_REG_AR_KR2); break; \
|
|
|
+ case 3: r = ia64_getreg(_IA64_REG_AR_KR3); break; \
|
|
|
+ case 4: r = ia64_getreg(_IA64_REG_AR_KR4); break; \
|
|
|
+ case 5: r = ia64_getreg(_IA64_REG_AR_KR5); break; \
|
|
|
+ case 6: r = ia64_getreg(_IA64_REG_AR_KR6); break; \
|
|
|
+ case 7: r = ia64_getreg(_IA64_REG_AR_KR7); break; \
|
|
|
+ default: ia64_getreg_unknown_kr(); break; \
|
|
|
+ } \
|
|
|
+ r; \
|
|
|
+})
|