|  | @@ -222,3 +222,141 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 | 
	
		
			
				|  |  |  
 | 
	
		
			
				|  |  |  #define atomic_inc(v)		atomic_add(1, v)
 | 
	
		
			
				|  |  |  #define atomic_dec(v)		atomic_sub(1, v)
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
 | 
	
		
			
				|  |  | +#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
 | 
	
		
			
				|  |  | +#define atomic_inc_return(v)    (atomic_add_return(1, v))
 | 
	
		
			
				|  |  | +#define atomic_dec_return(v)    (atomic_sub_return(1, v))
 | 
	
		
			
				|  |  | +#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define smp_mb__before_atomic_dec()	smp_mb()
 | 
	
		
			
				|  |  | +#define smp_mb__after_atomic_dec()	smp_mb()
 | 
	
		
			
				|  |  | +#define smp_mb__before_atomic_inc()	smp_mb()
 | 
	
		
			
				|  |  | +#define smp_mb__after_atomic_inc()	smp_mb()
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#ifndef CONFIG_GENERIC_ATOMIC64
 | 
	
		
			
				|  |  | +typedef struct {
 | 
	
		
			
				|  |  | +	u64 __aligned(8) counter;
 | 
	
		
			
				|  |  | +} atomic64_t;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +#define ATOMIC64_INIT(i) { (i) }
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline u64 atomic64_read(const atomic64_t *v)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	u64 result;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	__asm__ __volatile__("@ atomic64_read\n"
 | 
	
		
			
				|  |  | +"	ldrexd	%0, %H0, [%1]"
 | 
	
		
			
				|  |  | +	: "=&r" (result)
 | 
	
		
			
				|  |  | +	: "r" (&v->counter), "Qo" (v->counter)
 | 
	
		
			
				|  |  | +	);
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	return result;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline void atomic64_set(atomic64_t *v, u64 i)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	u64 tmp;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	__asm__ __volatile__("@ atomic64_set\n"
 | 
	
		
			
				|  |  | +"1:	ldrexd	%0, %H0, [%2]\n"
 | 
	
		
			
				|  |  | +"	strexd	%0, %3, %H3, [%2]\n"
 | 
	
		
			
				|  |  | +"	teq	%0, #0\n"
 | 
	
		
			
				|  |  | +"	bne	1b"
 | 
	
		
			
				|  |  | +	: "=&r" (tmp), "=Qo" (v->counter)
 | 
	
		
			
				|  |  | +	: "r" (&v->counter), "r" (i)
 | 
	
		
			
				|  |  | +	: "cc");
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline void atomic64_add(u64 i, atomic64_t *v)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	u64 result;
 | 
	
		
			
				|  |  | +	unsigned long tmp;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	__asm__ __volatile__("@ atomic64_add\n"
 | 
	
		
			
				|  |  | +"1:	ldrexd	%0, %H0, [%3]\n"
 | 
	
		
			
				|  |  | +"	adds	%0, %0, %4\n"
 | 
	
		
			
				|  |  | +"	adc	%H0, %H0, %H4\n"
 | 
	
		
			
				|  |  | +"	strexd	%1, %0, %H0, [%3]\n"
 | 
	
		
			
				|  |  | +"	teq	%1, #0\n"
 | 
	
		
			
				|  |  | +"	bne	1b"
 | 
	
		
			
				|  |  | +	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 | 
	
		
			
				|  |  | +	: "r" (&v->counter), "r" (i)
 | 
	
		
			
				|  |  | +	: "cc");
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	u64 result;
 | 
	
		
			
				|  |  | +	unsigned long tmp;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	smp_mb();
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	__asm__ __volatile__("@ atomic64_add_return\n"
 | 
	
		
			
				|  |  | +"1:	ldrexd	%0, %H0, [%3]\n"
 | 
	
		
			
				|  |  | +"	adds	%0, %0, %4\n"
 | 
	
		
			
				|  |  | +"	adc	%H0, %H0, %H4\n"
 | 
	
		
			
				|  |  | +"	strexd	%1, %0, %H0, [%3]\n"
 | 
	
		
			
				|  |  | +"	teq	%1, #0\n"
 | 
	
		
			
				|  |  | +"	bne	1b"
 | 
	
		
			
				|  |  | +	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 | 
	
		
			
				|  |  | +	: "r" (&v->counter), "r" (i)
 | 
	
		
			
				|  |  | +	: "cc");
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	smp_mb();
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	return result;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline void atomic64_sub(u64 i, atomic64_t *v)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	u64 result;
 | 
	
		
			
				|  |  | +	unsigned long tmp;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	__asm__ __volatile__("@ atomic64_sub\n"
 | 
	
		
			
				|  |  | +"1:	ldrexd	%0, %H0, [%3]\n"
 | 
	
		
			
				|  |  | +"	subs	%0, %0, %4\n"
 | 
	
		
			
				|  |  | +"	sbc	%H0, %H0, %H4\n"
 | 
	
		
			
				|  |  | +"	strexd	%1, %0, %H0, [%3]\n"
 | 
	
		
			
				|  |  | +"	teq	%1, #0\n"
 | 
	
		
			
				|  |  | +"	bne	1b"
 | 
	
		
			
				|  |  | +	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 | 
	
		
			
				|  |  | +	: "r" (&v->counter), "r" (i)
 | 
	
		
			
				|  |  | +	: "cc");
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	u64 result;
 | 
	
		
			
				|  |  | +	unsigned long tmp;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	smp_mb();
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	__asm__ __volatile__("@ atomic64_sub_return\n"
 | 
	
		
			
				|  |  | +"1:	ldrexd	%0, %H0, [%3]\n"
 | 
	
		
			
				|  |  | +"	subs	%0, %0, %4\n"
 | 
	
		
			
				|  |  | +"	sbc	%H0, %H0, %H4\n"
 | 
	
		
			
				|  |  | +"	strexd	%1, %0, %H0, [%3]\n"
 | 
	
		
			
				|  |  | +"	teq	%1, #0\n"
 | 
	
		
			
				|  |  | +"	bne	1b"
 | 
	
		
			
				|  |  | +	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 | 
	
		
			
				|  |  | +	: "r" (&v->counter), "r" (i)
 | 
	
		
			
				|  |  | +	: "cc");
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	smp_mb();
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	return result;
 | 
	
		
			
				|  |  | +}
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
 | 
	
		
			
				|  |  | +{
 | 
	
		
			
				|  |  | +	u64 oldval;
 | 
	
		
			
				|  |  | +	unsigned long res;
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	smp_mb();
 | 
	
		
			
				|  |  | +
 | 
	
		
			
				|  |  | +	do {
 | 
	
		
			
				|  |  | +		__asm__ __volatile__("@ atomic64_cmpxchg\n"
 | 
	
		
			
				|  |  | +		"ldrexd		%1, %H1, [%3]\n"
 |