| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362 | 
							- /*
 
-  *  arch/arm/include/asm/atomic.h
 
-  *
 
-  *  Copyright (C) 1996 Russell King.
 
-  *  Copyright (C) 2002 Deep Blue Solutions Ltd.
 
-  *
 
-  * This program is free software; you can redistribute it and/or modify
 
-  * it under the terms of the GNU General Public License version 2 as
 
-  * published by the Free Software Foundation.
 
-  */
 
- #ifndef __ASM_ARM_ATOMIC_H
 
- #define __ASM_ARM_ATOMIC_H
 
- #include <linux/compiler.h>
 
- #include <linux/types.h>
 
- #include <linux/irqflags.h>
 
- #include <asm/barrier.h>
 
- #include <asm/cmpxchg.h>
 
- #define ATOMIC_INIT(i)	{ (i) }
 
- #ifdef __KERNEL__
 
- /*
 
-  * On ARM, ordinary assignment (str instruction) doesn't clear the local
 
-  * strex/ldrex monitor on some implementations. The reason we can use it for
 
-  * atomic_set() is the clrex or dummy strex done on every exception return.
 
-  */
 
- #define atomic_read(v)	(*(volatile int *)&(v)->counter)
 
- #define atomic_set(v,i)	(((v)->counter) = (i))
 
- #if __LINUX_ARM_ARCH__ >= 6
 
- /*
 
-  * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 
-  * store exclusive to ensure that these are atomic.  We may loop
 
-  * to ensure that the update happens.
 
-  */
 
- static inline void atomic_add(int i, atomic_t *v)
 
- {
 
- 	unsigned long tmp;
 
- 	int result;
 
- 	__asm__ __volatile__("@ atomic_add\n"
 
- "1:	ldrex	%0, [%3]\n"
 
- "	add	%0, %0, %4\n"
 
- "	strex	%1, %0, [%3]\n"
 
- "	teq	%1, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 
- 	: "r" (&v->counter), "Ir" (i)
 
- 	: "cc");
 
- }
 
- static inline int atomic_add_return(int i, atomic_t *v)
 
- {
 
- 	unsigned long tmp;
 
- 	int result;
 
- 	smp_mb();
 
- 	__asm__ __volatile__("@ atomic_add_return\n"
 
- "1:	ldrex	%0, [%3]\n"
 
- "	add	%0, %0, %4\n"
 
- "	strex	%1, %0, [%3]\n"
 
- "	teq	%1, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 
- 	: "r" (&v->counter), "Ir" (i)
 
- 	: "cc");
 
- 	smp_mb();
 
- 	return result;
 
- }
 
- static inline void atomic_sub(int i, atomic_t *v)
 
- {
 
- 	unsigned long tmp;
 
- 	int result;
 
- 	__asm__ __volatile__("@ atomic_sub\n"
 
- "1:	ldrex	%0, [%3]\n"
 
- "	sub	%0, %0, %4\n"
 
- "	strex	%1, %0, [%3]\n"
 
- "	teq	%1, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 
- 	: "r" (&v->counter), "Ir" (i)
 
- 	: "cc");
 
- }
 
- static inline int atomic_sub_return(int i, atomic_t *v)
 
- {
 
- 	unsigned long tmp;
 
- 	int result;
 
- 	smp_mb();
 
- 	__asm__ __volatile__("@ atomic_sub_return\n"
 
- "1:	ldrex	%0, [%3]\n"
 
- "	sub	%0, %0, %4\n"
 
- "	strex	%1, %0, [%3]\n"
 
- "	teq	%1, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 
- 	: "r" (&v->counter), "Ir" (i)
 
- 	: "cc");
 
- 	smp_mb();
 
- 	return result;
 
- }
 
- static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 
- {
 
- 	unsigned long oldval, res;
 
- 	smp_mb();
 
- 	do {
 
- 		__asm__ __volatile__("@ atomic_cmpxchg\n"
 
- 		"ldrex	%1, [%3]\n"
 
- 		"mov	%0, #0\n"
 
- 		"teq	%1, %4\n"
 
- 		"strexeq %0, %5, [%3]\n"
 
- 		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
 
- 		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
 
- 		    : "cc");
 
- 	} while (res);
 
- 	smp_mb();
 
- 	return oldval;
 
- }
 
- static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 
- {
 
- 	unsigned long tmp, tmp2;
 
- 	__asm__ __volatile__("@ atomic_clear_mask\n"
 
- "1:	ldrex	%0, [%3]\n"
 
- "	bic	%0, %0, %4\n"
 
- "	strex	%1, %0, [%3]\n"
 
- "	teq	%1, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
 
- 	: "r" (addr), "Ir" (mask)
 
- 	: "cc");
 
- }
 
- #else /* ARM_ARCH_6 */
 
- #ifdef CONFIG_SMP
 
- #error SMP not supported on pre-ARMv6 CPUs
 
- #endif
 
- static inline int atomic_add_return(int i, atomic_t *v)
 
- {
 
- 	unsigned long flags;
 
- 	int val;
 
- 	raw_local_irq_save(flags);
 
- 	val = v->counter;
 
- 	v->counter = val += i;
 
- 	raw_local_irq_restore(flags);
 
- 	return val;
 
- }
 
- #define atomic_add(i, v)	(void) atomic_add_return(i, v)
 
- static inline int atomic_sub_return(int i, atomic_t *v)
 
- {
 
- 	unsigned long flags;
 
- 	int val;
 
- 	raw_local_irq_save(flags);
 
- 	val = v->counter;
 
- 	v->counter = val -= i;
 
- 	raw_local_irq_restore(flags);
 
- 	return val;
 
- }
 
- #define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
 
- static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 
- {
 
- 	int ret;
 
- 	unsigned long flags;
 
- 	raw_local_irq_save(flags);
 
- 	ret = v->counter;
 
- 	if (likely(ret == old))
 
- 		v->counter = new;
 
- 	raw_local_irq_restore(flags);
 
- 	return ret;
 
- }
 
- static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 
- {
 
- 	unsigned long flags;
 
- 	raw_local_irq_save(flags);
 
- 	*addr &= ~mask;
 
- 	raw_local_irq_restore(flags);
 
- }
 
- #endif /* __LINUX_ARM_ARCH__ */
 
- #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
- static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 
- {
 
- 	int c, old;
 
- 	c = atomic_read(v);
 
- 	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
 
- 		c = old;
 
- 	return c;
 
- }
 
- #define atomic_inc(v)		atomic_add(1, v)
 
- #define atomic_dec(v)		atomic_sub(1, v)
 
- #define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
 
- #define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
 
- #define atomic_inc_return(v)    (atomic_add_return(1, v))
 
- #define atomic_dec_return(v)    (atomic_sub_return(1, v))
 
- #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
 
- #define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
 
- #define smp_mb__before_atomic_dec()	smp_mb()
 
- #define smp_mb__after_atomic_dec()	smp_mb()
 
- #define smp_mb__before_atomic_inc()	smp_mb()
 
- #define smp_mb__after_atomic_inc()	smp_mb()
 
- #ifndef CONFIG_GENERIC_ATOMIC64
 
- typedef struct {
 
- 	u64 __aligned(8) counter;
 
- } atomic64_t;
 
- #define ATOMIC64_INIT(i) { (i) }
 
- static inline u64 atomic64_read(const atomic64_t *v)
 
- {
 
- 	u64 result;
 
- 	__asm__ __volatile__("@ atomic64_read\n"
 
- "	ldrexd	%0, %H0, [%1]"
 
- 	: "=&r" (result)
 
- 	: "r" (&v->counter), "Qo" (v->counter)
 
- 	);
 
- 	return result;
 
- }
 
- static inline void atomic64_set(atomic64_t *v, u64 i)
 
- {
 
- 	u64 tmp;
 
- 	__asm__ __volatile__("@ atomic64_set\n"
 
- "1:	ldrexd	%0, %H0, [%2]\n"
 
- "	strexd	%0, %3, %H3, [%2]\n"
 
- "	teq	%0, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (tmp), "=Qo" (v->counter)
 
- 	: "r" (&v->counter), "r" (i)
 
- 	: "cc");
 
- }
 
- static inline void atomic64_add(u64 i, atomic64_t *v)
 
- {
 
- 	u64 result;
 
- 	unsigned long tmp;
 
- 	__asm__ __volatile__("@ atomic64_add\n"
 
- "1:	ldrexd	%0, %H0, [%3]\n"
 
- "	adds	%0, %0, %4\n"
 
- "	adc	%H0, %H0, %H4\n"
 
- "	strexd	%1, %0, %H0, [%3]\n"
 
- "	teq	%1, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 
- 	: "r" (&v->counter), "r" (i)
 
- 	: "cc");
 
- }
 
- static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
 
- {
 
- 	u64 result;
 
- 	unsigned long tmp;
 
- 	smp_mb();
 
- 	__asm__ __volatile__("@ atomic64_add_return\n"
 
- "1:	ldrexd	%0, %H0, [%3]\n"
 
- "	adds	%0, %0, %4\n"
 
- "	adc	%H0, %H0, %H4\n"
 
- "	strexd	%1, %0, %H0, [%3]\n"
 
- "	teq	%1, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 
- 	: "r" (&v->counter), "r" (i)
 
- 	: "cc");
 
- 	smp_mb();
 
- 	return result;
 
- }
 
- static inline void atomic64_sub(u64 i, atomic64_t *v)
 
- {
 
- 	u64 result;
 
- 	unsigned long tmp;
 
- 	__asm__ __volatile__("@ atomic64_sub\n"
 
- "1:	ldrexd	%0, %H0, [%3]\n"
 
- "	subs	%0, %0, %4\n"
 
- "	sbc	%H0, %H0, %H4\n"
 
- "	strexd	%1, %0, %H0, [%3]\n"
 
- "	teq	%1, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 
- 	: "r" (&v->counter), "r" (i)
 
- 	: "cc");
 
- }
 
- static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
 
- {
 
- 	u64 result;
 
- 	unsigned long tmp;
 
- 	smp_mb();
 
- 	__asm__ __volatile__("@ atomic64_sub_return\n"
 
- "1:	ldrexd	%0, %H0, [%3]\n"
 
- "	subs	%0, %0, %4\n"
 
- "	sbc	%H0, %H0, %H4\n"
 
- "	strexd	%1, %0, %H0, [%3]\n"
 
- "	teq	%1, #0\n"
 
- "	bne	1b"
 
- 	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 
- 	: "r" (&v->counter), "r" (i)
 
- 	: "cc");
 
- 	smp_mb();
 
- 	return result;
 
- }
 
- static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
 
- {
 
- 	u64 oldval;
 
- 	unsigned long res;
 
- 	smp_mb();
 
- 	do {
 
- 		__asm__ __volatile__("@ atomic64_cmpxchg\n"
 
- 		"ldrexd		%1, %H1, [%3]\n"
 
 
  |