|  | @@ -183,3 +183,161 @@
 | 
	
		
			
				|  |  |  #  define v7wbi_always_flags	v7wbi_tlb_flags_up
 | 
	
		
			
				|  |  |  # endif
 | 
	
		
			
				|  |  |  # ifdef _TLB
 | 
	
		
			
				|  |  | +#  define MULTI_TLB 1
 | 
	
		
			
				|  |  | +# else
 | 
	
		
			
				|  |  | +#  define _TLB v7wbi
 | 
	
		
			
				|  |  | +# endif
 | 
	
		
			
				|  |  | +#else
 | 
	
		
			
				|  |  | +# define v7wbi_possible_flags	0
 | 
	
		
			
				|  |  | +# define v7wbi_always_flags	(-1UL)
 | 
	
		
			
				|  |  | +#endif
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#ifndef _TLB
 | 
	
		
			
				|  |  | +#error Unknown TLB model
 | 
	
		
			
				|  |  | +#endif
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#ifndef __ASSEMBLY__
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#include <linux/sched.h>
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +struct cpu_tlb_fns {
 | 
	
		
			
				|  |  | +	void (*flush_user_range)(unsigned long, unsigned long, struct vm_area_struct *);
 | 
	
		
			
				|  |  | +	void (*flush_kern_range)(unsigned long, unsigned long);
 | 
	
		
			
				|  |  | +	unsigned long tlb_flags;
 | 
	
		
			
				|  |  | +};
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/*
 | 
	
		
			
				|  |  | + * Select the calling method
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +#ifdef MULTI_TLB
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define __cpu_flush_user_tlb_range	cpu_tlb.flush_user_range
 | 
	
		
			
				|  |  | +#define __cpu_flush_kern_tlb_range	cpu_tlb.flush_kern_range
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#else
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define __cpu_flush_user_tlb_range	__glue(_TLB,_flush_user_tlb_range)
 | 
	
		
			
				|  |  | +#define __cpu_flush_kern_tlb_range	__glue(_TLB,_flush_kern_tlb_range)
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *);
 | 
	
		
			
				|  |  | +extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#endif
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +extern struct cpu_tlb_fns cpu_tlb;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define __cpu_tlb_flags			cpu_tlb.tlb_flags
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/*
 | 
	
		
			
				|  |  | + *	TLB Management
 | 
	
		
			
				|  |  | + *	==============
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *	The arch/arm/mm/tlb-*.S files implement these methods.
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *	The TLB specific code is expected to perform whatever tests it
 | 
	
		
			
				|  |  | + *	needs to determine if it should invalidate the TLB for each
 | 
	
		
			
				|  |  | + *	call.  Start addresses are inclusive and end addresses are
 | 
	
		
			
				|  |  | + *	exclusive; it is safe to round these addresses down.
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *	flush_tlb_all()
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *		Invalidate the entire TLB.
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *	flush_tlb_mm(mm)
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *		Invalidate all TLB entries in a particular address
 | 
	
		
			
				|  |  | + *		space.
 | 
	
		
			
				|  |  | + *		- mm	- mm_struct describing address space
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *	flush_tlb_range(mm,start,end)
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *		Invalidate a range of TLB entries in the specified
 | 
	
		
			
				|  |  | + *		address space.
 | 
	
		
			
				|  |  | + *		- mm	- mm_struct describing address space
 | 
	
		
			
				|  |  | + *		- start - start address (may not be aligned)
 | 
	
		
			
				|  |  | + *		- end	- end address (exclusive, may not be aligned)
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *	flush_tlb_page(vaddr,vma)
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *		Invalidate the specified page in the specified address range.
 | 
	
		
			
				|  |  | + *		- vaddr - virtual address (may not be aligned)
 | 
	
		
			
				|  |  | + *		- vma	- vma_struct describing address range
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *	flush_kern_tlb_page(kaddr)
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + *		Invalidate the TLB entry for the specified page.  The address
 | 
	
		
			
				|  |  | + *		will be in the kernels virtual memory space.  Current uses
 | 
	
		
			
				|  |  | + *		only require the D-TLB to be invalidated.
 | 
	
		
			
				|  |  | + *		- kaddr - Kernel virtual memory address
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +/*
 | 
	
		
			
				|  |  | + * We optimise the code below by:
 | 
	
		
			
				|  |  | + *  - building a set of TLB flags that might be set in __cpu_tlb_flags
 | 
	
		
			
				|  |  | + *  - building a set of TLB flags that will always be set in __cpu_tlb_flags
 | 
	
		
			
				|  |  | + *  - if we're going to need __cpu_tlb_flags, access it once and only once
 | 
	
		
			
				|  |  | + *
 | 
	
		
			
				|  |  | + * This allows us to build optimal assembly for the single-CPU type case,
 | 
	
		
			
				|  |  | + * and as close to optimal given the compiler constrants for multi-CPU
 | 
	
		
			
				|  |  | + * case.  We could do better for the multi-CPU case if the compiler
 | 
	
		
			
				|  |  | + * implemented the "%?" method, but this has been discontinued due to too
 | 
	
		
			
				|  |  | + * many people getting it wrong.
 | 
	
		
			
				|  |  | + */
 | 
	
		
			
				|  |  | +#define possible_tlb_flags	(v4_possible_flags | \
 | 
	
		
			
				|  |  | +				 v4wbi_possible_flags | \
 | 
	
		
			
				|  |  | +				 fr_possible_flags | \
 | 
	
		
			
				|  |  | +				 v4wb_possible_flags | \
 | 
	
		
			
				|  |  | +				 fa_possible_flags | \
 | 
	
		
			
				|  |  | +				 v6wbi_possible_flags | \
 | 
	
		
			
				|  |  | +				 v7wbi_possible_flags)
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define always_tlb_flags	(v4_always_flags & \
 | 
	
		
			
				|  |  | +				 v4wbi_always_flags & \
 | 
	
		
			
				|  |  | +				 fr_always_flags & \
 | 
	
		
			
				|  |  | +				 v4wb_always_flags & \
 | 
	
		
			
				|  |  | +				 fa_always_flags & \
 | 
	
		
			
				|  |  | +				 v6wbi_always_flags & \
 | 
	
		
			
				|  |  | +				 v7wbi_always_flags)
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define tlb_flag(f)	((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f)))
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define __tlb_op(f, insnarg, arg)					\
 | 
	
		
			
				|  |  | +	do {								\
 | 
	
		
			
				|  |  | +		if (always_tlb_flags & (f))				\
 | 
	
		
			
				|  |  | +			asm("mcr " insnarg				\
 | 
	
		
			
				|  |  | +			    : : "r" (arg) : "cc");			\
 | 
	
		
			
				|  |  | +		else if (possible_tlb_flags & (f))			\
 | 
	
		
			
				|  |  | +			asm("tst %1, %2\n\t"				\
 | 
	
		
			
				|  |  | +			    "mcrne " insnarg				\
 | 
	
		
			
				|  |  | +			    : : "r" (arg), "r" (__tlb_flag), "Ir" (f)	\
 | 
	
		
			
				|  |  | +			    : "cc");					\
 | 
	
		
			
				|  |  | +	} while (0)
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define tlb_op(f, regs, arg)	__tlb_op(f, "p15, 0, %0, " regs, arg)
 | 
	
		
			
				|  |  | +#define tlb_l2_op(f, regs, arg)	__tlb_op(f, "p15, 1, %0, " regs, arg)
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline void local_flush_tlb_all(void)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	const int zero = 0;
 | 
	
		
			
				|  |  | +	const unsigned int __tlb_flag = __cpu_tlb_flags;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	if (tlb_flag(TLB_WB))
 | 
	
		
			
				|  |  | +		dsb();
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
 | 
	
		
			
				|  |  | +	tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
 | 
	
		
			
				|  |  | +	tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
 | 
	
		
			
				|  |  | +	tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
 | 
	
		
			
				|  |  | +	tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	if (tlb_flag(TLB_BARRIER)) {
 | 
	
		
			
				|  |  | +		dsb();
 | 
	
		
			
				|  |  | +		isb();
 | 
	
		
			
				|  |  | +	}
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline void local_flush_tlb_mm(struct mm_struct *mm)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	const int zero = 0;
 | 
	
		
			
				|  |  | +	const int asid = ASID(mm);
 | 
	
		
			
				|  |  | +	const unsigned int __tlb_flag = __cpu_tlb_flags;
 |