|
@@ -373,3 +373,165 @@ extern void ia64_setreg_unknown_kr (void);
|
|
|
} \
|
|
|
r; \
|
|
|
})
|
|
|
+
|
|
|
+#define ia64_set_kr(regnum, r) \
|
|
|
+({ \
|
|
|
+ switch (regnum) { \
|
|
|
+ case 0: ia64_setreg(_IA64_REG_AR_KR0, r); break; \
|
|
|
+ case 1: ia64_setreg(_IA64_REG_AR_KR1, r); break; \
|
|
|
+ case 2: ia64_setreg(_IA64_REG_AR_KR2, r); break; \
|
|
|
+ case 3: ia64_setreg(_IA64_REG_AR_KR3, r); break; \
|
|
|
+ case 4: ia64_setreg(_IA64_REG_AR_KR4, r); break; \
|
|
|
+ case 5: ia64_setreg(_IA64_REG_AR_KR5, r); break; \
|
|
|
+ case 6: ia64_setreg(_IA64_REG_AR_KR6, r); break; \
|
|
|
+ case 7: ia64_setreg(_IA64_REG_AR_KR7, r); break; \
|
|
|
+ default: ia64_setreg_unknown_kr(); break; \
|
|
|
+ } \
|
|
|
+})
|
|
|
+
|
|
|
+/*
|
|
|
+ * The following three macros can't be inline functions because we don't have struct
|
|
|
+ * task_struct at this point.
|
|
|
+ */
|
|
|
+
|
|
|
+/*
|
|
|
+ * Return TRUE if task T owns the fph partition of the CPU we're running on.
|
|
|
+ * Must be called from code that has preemption disabled.
|
|
|
+ */
|
|
|
+#define ia64_is_local_fpu_owner(t) \
|
|
|
+({ \
|
|
|
+ struct task_struct *__ia64_islfo_task = (t); \
|
|
|
+ (__ia64_islfo_task->thread.last_fph_cpu == smp_processor_id() \
|
|
|
+ && __ia64_islfo_task == (struct task_struct *) ia64_get_kr(IA64_KR_FPU_OWNER)); \
|
|
|
+})
|
|
|
+
|
|
|
+/*
|
|
|
+ * Mark task T as owning the fph partition of the CPU we're running on.
|
|
|
+ * Must be called from code that has preemption disabled.
|
|
|
+ */
|
|
|
+#define ia64_set_local_fpu_owner(t) do { \
|
|
|
+ struct task_struct *__ia64_slfo_task = (t); \
|
|
|
+ __ia64_slfo_task->thread.last_fph_cpu = smp_processor_id(); \
|
|
|
+ ia64_set_kr(IA64_KR_FPU_OWNER, (unsigned long) __ia64_slfo_task); \
|
|
|
+} while (0)
|
|
|
+
|
|
|
+/* Mark the fph partition of task T as being invalid on all CPUs. */
|
|
|
+#define ia64_drop_fpu(t) ((t)->thread.last_fph_cpu = -1)
|
|
|
+
|
|
|
+extern void __ia64_init_fpu (void);
|
|
|
+extern void __ia64_save_fpu (struct ia64_fpreg *fph);
|
|
|
+extern void __ia64_load_fpu (struct ia64_fpreg *fph);
|
|
|
+extern void ia64_save_debug_regs (unsigned long *save_area);
|
|
|
+extern void ia64_load_debug_regs (unsigned long *save_area);
|
|
|
+
|
|
|
+#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
|
|
|
+#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
|
|
|
+
|
|
|
+/* load fp 0.0 into fph */
|
|
|
+static inline void
|
|
|
+ia64_init_fpu (void) {
|
|
|
+ ia64_fph_enable();
|
|
|
+ __ia64_init_fpu();
|
|
|
+ ia64_fph_disable();
|
|
|
+}
|
|
|
+
|
|
|
+/* save f32-f127 at FPH */
|
|
|
+static inline void
|
|
|
+ia64_save_fpu (struct ia64_fpreg *fph) {
|
|
|
+ ia64_fph_enable();
|
|
|
+ __ia64_save_fpu(fph);
|
|
|
+ ia64_fph_disable();
|
|
|
+}
|
|
|
+
|
|
|
+/* load f32-f127 from FPH */
|
|
|
+static inline void
|
|
|
+ia64_load_fpu (struct ia64_fpreg *fph) {
|
|
|
+ ia64_fph_enable();
|
|
|
+ __ia64_load_fpu(fph);
|
|
|
+ ia64_fph_disable();
|
|
|
+}
|
|
|
+
|
|
|
+static inline __u64
|
|
|
+ia64_clear_ic (void)
|
|
|
+{
|
|
|
+ __u64 psr;
|
|
|
+ psr = ia64_getreg(_IA64_REG_PSR);
|
|
|
+ ia64_stop();
|
|
|
+ ia64_rsm(IA64_PSR_I | IA64_PSR_IC);
|
|
|
+ ia64_srlz_i();
|
|
|
+ return psr;
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Restore the psr.
|
|
|
+ */
|
|
|
+static inline void
|
|
|
+ia64_set_psr (__u64 psr)
|
|
|
+{
|
|
|
+ ia64_stop();
|
|
|
+ ia64_setreg(_IA64_REG_PSR_L, psr);
|
|
|
+ ia64_srlz_i();
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Insert a translation into an instruction and/or data translation
|
|
|
+ * register.
|
|
|
+ */
|
|
|
+static inline void
|
|
|
+ia64_itr (__u64 target_mask, __u64 tr_num,
|
|
|
+ __u64 vmaddr, __u64 pte,
|
|
|
+ __u64 log_page_size)
|
|
|
+{
|
|
|
+ ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
|
|
|
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
|
|
|
+ ia64_stop();
|
|
|
+ if (target_mask & 0x1)
|
|
|
+ ia64_itri(tr_num, pte);
|
|
|
+ if (target_mask & 0x2)
|
|
|
+ ia64_itrd(tr_num, pte);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Insert a translation into the instruction and/or data translation
|
|
|
+ * cache.
|
|
|
+ */
|
|
|
+static inline void
|
|
|
+ia64_itc (__u64 target_mask, __u64 vmaddr, __u64 pte,
|
|
|
+ __u64 log_page_size)
|
|
|
+{
|
|
|
+ ia64_setreg(_IA64_REG_CR_ITIR, (log_page_size << 2));
|
|
|
+ ia64_setreg(_IA64_REG_CR_IFA, vmaddr);
|
|
|
+ ia64_stop();
|
|
|
+ /* as per EAS2.6, itc must be the last instruction in an instruction group */
|
|
|
+ if (target_mask & 0x1)
|
|
|
+ ia64_itci(pte);
|
|
|
+ if (target_mask & 0x2)
|
|
|
+ ia64_itcd(pte);
|
|
|
+}
|
|
|
+
|
|
|
+/*
|
|
|
+ * Purge a range of addresses from instruction and/or data translation
|
|
|
+ * register(s).
|
|
|
+ */
|
|
|
+static inline void
|
|
|
+ia64_ptr (__u64 target_mask, __u64 vmaddr, __u64 log_size)
|
|
|
+{
|
|
|
+ if (target_mask & 0x1)
|
|
|
+ ia64_ptri(vmaddr, (log_size << 2));
|
|
|
+ if (target_mask & 0x2)
|
|
|
+ ia64_ptrd(vmaddr, (log_size << 2));
|
|
|
+}
|
|
|
+
|
|
|
+/* Set the interrupt vector address. The address must be suitably aligned (32KB). */
|
|
|
+static inline void
|
|
|
+ia64_set_iva (void *ivt_addr)
|
|
|
+{
|
|
|
+ ia64_setreg(_IA64_REG_CR_IVA, (__u64) ivt_addr);
|
|
|
+ ia64_srlz_i();
|
|
|
+}
|
|
|
+
|
|
|
+/* Set the page table address and control bits. */
|
|
|
+static inline void
|
|
|
+ia64_set_pta (__u64 pta)
|
|
|
+{
|
|
|
+ /* Note: srlz.i implies srlz.d */
|