| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254 | /****************************************************************************** * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> *                    VA Linux Systems Japan K.K. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA * */#ifndef _ASM_IA64_PARAVIRT_PRIVOP_H#define _ASM_IA64_PARAVIRT_PRIVOP_H#ifdef CONFIG_PARAVIRT#ifndef __ASSEMBLY__#include <linux/types.h>#include <asm/kregs.h> /* for IA64_PSR_I *//****************************************************************************** * replacement of intrinsics operations. */struct pv_cpu_ops {	void (*fc)(void *addr);	unsigned long (*thash)(unsigned long addr);	unsigned long (*get_cpuid)(int index);	unsigned long (*get_pmd)(int index);	unsigned long (*getreg)(int reg);	void (*setreg)(int reg, unsigned long val);	void (*ptcga)(unsigned long addr, unsigned long size);	unsigned long (*get_rr)(unsigned long index);	void (*set_rr)(unsigned long index, unsigned long val);	void (*set_rr0_to_rr4)(unsigned long val0, unsigned long val1,			       unsigned long val2, unsigned long val3,			       unsigned long val4);	void (*ssm_i)(void);	void (*rsm_i)(void);	unsigned long (*get_psr_i)(void);	void (*intrin_local_irq_restore)(unsigned long flags);};extern struct pv_cpu_ops pv_cpu_ops;extern void ia64_native_setreg_func(int regnum, unsigned long val);extern unsigned long ia64_native_getreg_func(int regnum);/************************************************//* Instructions paravirtualized for performance *//************************************************/#ifndef ASM_SUPPORTED#define paravirt_ssm_i()	pv_cpu_ops.ssm_i()#define paravirt_rsm_i()	pv_cpu_ops.rsm_i()#define __paravirt_getreg()	pv_cpu_ops.getreg()#endif/* mask for ia64_native_ssm/rsm() must be constant.("i" constraing). * static inline function doesn't satisfy it. */#define paravirt_ssm(mask)			\	do {					\		if ((mask) == IA64_PSR_I)	\			paravirt_ssm_i();	\		else				\			ia64_native_ssm(mask);	\	} while (0)#define paravirt_rsm(mask)			\	do {					\		if ((mask) == IA64_PSR_I)	\			paravirt_rsm_i();	\		else				\			ia64_native_rsm(mask);	\	} while (0)/* returned ip value should be the one in the caller, * not in __paravirt_getreg() */#define paravirt_getreg(reg)					\	({							\		unsigned long res;				\		if ((reg) == _IA64_REG_IP)			\			res = ia64_native_getreg(_IA64_REG_IP); \		else						\			res = __paravirt_getreg(reg);		\		res;						\	})/****************************************************************************** * replacement of hand written assembly codes. */struct pv_cpu_asm_switch {	unsigned long switch_to;	unsigned long leave_syscall;	unsigned long work_processed_syscall;	unsigned long leave_kernel;};void paravirt_cpu_asm_init(const struct pv_cpu_asm_switch *cpu_asm_switch);#endif /* __ASSEMBLY__ */#define IA64_PARAVIRT_ASM_FUNC(name)	paravirt_ ## name#else/* fallback for native case */#define IA64_PARAVIRT_ASM_FUNC(name)	ia64_native_ ## name#endif /* CONFIG_PARAVIRT */#if defined(CONFIG_PARAVIRT) && defined(ASM_SUPPORTED)#define paravirt_dv_serialize_data()	ia64_dv_serialize_data()#else#define paravirt_dv_serialize_data()	/* nothing */#endif/* these routines utilize privilege-sensitive or performance-sensitive * privileged instructions so the code must be replaced with * paravirtualized versions */#define ia64_switch_to			IA64_PARAVIRT_ASM_FUNC(switch_to)#define ia64_leave_syscall		IA64_PARAVIRT_ASM_FUNC(leave_syscall)#define ia64_work_processed_syscall	\	IA64_PARAVIRT_ASM_FUNC(work_processed_syscall)#define ia64_leave_kernel		IA64_PARAVIRT_ASM_FUNC(leave_kernel)#if defined(CONFIG_PARAVIRT)/****************************************************************************** * binary patching infrastructure */#define PARAVIRT_PATCH_TYPE_FC				1#define PARAVIRT_PATCH_TYPE_THASH			2#define PARAVIRT_PATCH_TYPE_GET_CPUID			3#define PARAVIRT_PATCH_TYPE_GET_PMD			4#define PARAVIRT_PATCH_TYPE_PTCGA			5#define PARAVIRT_PATCH_TYPE_GET_RR			6#define PARAVIRT_PATCH_TYPE_SET_RR			7#define PARAVIRT_PATCH_TYPE_SET_RR0_TO_RR4		8#define PARAVIRT_PATCH_TYPE_SSM_I			9#define PARAVIRT_PATCH_TYPE_RSM_I			10#define PARAVIRT_PATCH_TYPE_GET_PSR_I			11#define PARAVIRT_PATCH_TYPE_INTRIN_LOCAL_IRQ_RESTORE	12/* PARAVIRT_PATY_TYPE_[GS]ETREG + _IA64_REG_xxx */#define PARAVIRT_PATCH_TYPE_GETREG			0x10000000#define PARAVIRT_PATCH_TYPE_SETREG			0x20000000/* * struct task_struct* (*ia64_switch_to)(void* next_task); * void *ia64_leave_syscall; * void *ia64_work_processed_syscall * void *ia64_leave_kernel; */#define PARAVIRT_PATCH_TYPE_BR_START			0x30000000#define PARAVIRT_PATCH_TYPE_BR_SWITCH_TO		\	(PARAVIRT_PATCH_TYPE_BR_START + 0)#define PARAVIRT_PATCH_TYPE_BR_LEAVE_SYSCALL		\	(PARAVIRT_PATCH_TYPE_BR_START + 1)#define PARAVIRT_PATCH_TYPE_BR_WORK_PROCESSED_SYSCALL	\	(PARAVIRT_PATCH_TYPE_BR_START + 2)#define PARAVIRT_PATCH_TYPE_BR_LEAVE_KERNEL		\	(PARAVIRT_PATCH_TYPE_BR_START + 3)#ifdef ASM_SUPPORTED#include <asm/paravirt_patch.h>/* * pv_cpu_ops calling stub. * normal function call convension can't be written by gcc * inline assembly. * * from the caller's point of view, * the following registers will be clobbered. * r2, r3 * r8-r15 * r16, r17 * b6, b7 * p6-p15 * ar.ccv * * from the callee's point of view , * the following registers can be used. * r2, r3: scratch * r8: scratch, input argument0 and return value * r0-r15: scratch, input argument1-5 * b6: return pointer * b7: scratch * p6-p15: scratch * ar.ccv: scratch * * other registers must not be changed. especially * b0: rp: preserved. gcc ignores b0 in clobbered register. * r16: saved gp *//* 5 bundles */#define __PARAVIRT_BR							\	";;\n"								\	"{ .mlx\n"							\	"nop 0\n"							\	"movl r2 = %[op_addr]\n"/* get function pointer address */	\	";;\n"								\	"}\n"								\	"1:\n"								\	"{ .mii\n"							\	"ld8 r2 = [r2]\n"	/* load function descriptor address */	\	"mov r17 = ip\n"	/* get ip to calc return address */	\	"mov r16 = gp\n"	/* save gp */				\	";;\n"								\	"}\n"								\	"{ .mii\n"							\	"ld8 r3 = [r2], 8\n"	/* load entry address */		\	"adds r17 =  1f - 1b, r17\n"	/* calculate return address */	\	";;\n"								\	"mov b7 = r3\n"		/* set entry address */			\	"}\n"								\	"{ .mib\n"							\	"ld8 gp = [r2]\n"	/* load gp value */			\	"mov b6 = r17\n"	/* set return address */		\	"br.cond.sptk.few b7\n"	/* intrinsics are very short isns */	\	"}\n"								\	"1:\n"								\	"{ .mii\n"							\	"mov gp = r16\n"	/* restore gp value */			\	"nop 0\n"							\	"nop 0\n"							\	";;\n"								\	"}\n"#define PARAVIRT_OP(op)				\	[op_addr] "i"(&pv_cpu_ops.op)#define PARAVIRT_TYPE(type)			\	PARAVIRT_PATCH_TYPE_ ## type#define PARAVIRT_REG_CLOBBERS0					\	"r2", "r3", /*"r8",*/ "r9", "r10", "r11", "r14",	\		"r15", "r16", "r17"#define PARAVIRT_REG_CLOBBERS1					\	"r2","r3", /*"r8",*/ "r9", "r10", "r11", "r14",		\		"r15", "r16", "r17"#define PARAVIRT_REG_CLOBBERS2					\
 |