|
@@ -23,3 +23,202 @@
|
|
|
|
|
|
/*
|
|
|
* On ARM, ordinary assignment (str instruction) doesn't clear the local
|
|
|
+ * strex/ldrex monitor on some implementations. The reason we can use it for
|
|
|
+ * atomic_set() is the clrex or dummy strex done on every exception return.
|
|
|
+ */
|
|
|
+#define atomic_read(v) (*(volatile int *)&(v)->counter)
|
|
|
+#define atomic_set(v,i) (((v)->counter) = (i))
|
|
|
+
|
|
|
+#if __LINUX_ARM_ARCH__ >= 6
|
|
|
+
|
|
|
+/*
|
|
|
+ * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
|
|
|
+ * store exclusive to ensure that these are atomic. We may loop
|
|
|
+ * to ensure that the update happens.
|
|
|
+ */
|
|
|
+static inline void atomic_add(int i, atomic_t *v)
|
|
|
+{
|
|
|
+ unsigned long tmp;
|
|
|
+ int result;
|
|
|
+
|
|
|
+ __asm__ __volatile__("@ atomic_add\n"
|
|
|
+"1: ldrex %0, [%3]\n"
|
|
|
+" add %0, %0, %4\n"
|
|
|
+" strex %1, %0, [%3]\n"
|
|
|
+" teq %1, #0\n"
|
|
|
+" bne 1b"
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
+ : "r" (&v->counter), "Ir" (i)
|
|
|
+ : "cc");
|
|
|
+}
|
|
|
+
|
|
|
+static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
+{
|
|
|
+ unsigned long tmp;
|
|
|
+ int result;
|
|
|
+
|
|
|
+ smp_mb();
|
|
|
+
|
|
|
+ __asm__ __volatile__("@ atomic_add_return\n"
|
|
|
+"1: ldrex %0, [%3]\n"
|
|
|
+" add %0, %0, %4\n"
|
|
|
+" strex %1, %0, [%3]\n"
|
|
|
+" teq %1, #0\n"
|
|
|
+" bne 1b"
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
+ : "r" (&v->counter), "Ir" (i)
|
|
|
+ : "cc");
|
|
|
+
|
|
|
+ smp_mb();
|
|
|
+
|
|
|
+ return result;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void atomic_sub(int i, atomic_t *v)
|
|
|
+{
|
|
|
+ unsigned long tmp;
|
|
|
+ int result;
|
|
|
+
|
|
|
+ __asm__ __volatile__("@ atomic_sub\n"
|
|
|
+"1: ldrex %0, [%3]\n"
|
|
|
+" sub %0, %0, %4\n"
|
|
|
+" strex %1, %0, [%3]\n"
|
|
|
+" teq %1, #0\n"
|
|
|
+" bne 1b"
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
+ : "r" (&v->counter), "Ir" (i)
|
|
|
+ : "cc");
|
|
|
+}
|
|
|
+
|
|
|
+static inline int atomic_sub_return(int i, atomic_t *v)
|
|
|
+{
|
|
|
+ unsigned long tmp;
|
|
|
+ int result;
|
|
|
+
|
|
|
+ smp_mb();
|
|
|
+
|
|
|
+ __asm__ __volatile__("@ atomic_sub_return\n"
|
|
|
+"1: ldrex %0, [%3]\n"
|
|
|
+" sub %0, %0, %4\n"
|
|
|
+" strex %1, %0, [%3]\n"
|
|
|
+" teq %1, #0\n"
|
|
|
+" bne 1b"
|
|
|
+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
|
|
|
+ : "r" (&v->counter), "Ir" (i)
|
|
|
+ : "cc");
|
|
|
+
|
|
|
+ smp_mb();
|
|
|
+
|
|
|
+ return result;
|
|
|
+}
|
|
|
+
|
|
|
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
|
|
+{
|
|
|
+ unsigned long oldval, res;
|
|
|
+
|
|
|
+ smp_mb();
|
|
|
+
|
|
|
+ do {
|
|
|
+ __asm__ __volatile__("@ atomic_cmpxchg\n"
|
|
|
+ "ldrex %1, [%3]\n"
|
|
|
+ "mov %0, #0\n"
|
|
|
+ "teq %1, %4\n"
|
|
|
+ "strexeq %0, %5, [%3]\n"
|
|
|
+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
|
|
|
+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
|
|
|
+ : "cc");
|
|
|
+ } while (res);
|
|
|
+
|
|
|
+ smp_mb();
|
|
|
+
|
|
|
+ return oldval;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
|
+{
|
|
|
+ unsigned long tmp, tmp2;
|
|
|
+
|
|
|
+ __asm__ __volatile__("@ atomic_clear_mask\n"
|
|
|
+"1: ldrex %0, [%3]\n"
|
|
|
+" bic %0, %0, %4\n"
|
|
|
+" strex %1, %0, [%3]\n"
|
|
|
+" teq %1, #0\n"
|
|
|
+" bne 1b"
|
|
|
+ : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
|
|
|
+ : "r" (addr), "Ir" (mask)
|
|
|
+ : "cc");
|
|
|
+}
|
|
|
+
|
|
|
+#else /* ARM_ARCH_6 */
|
|
|
+
|
|
|
+#ifdef CONFIG_SMP
|
|
|
+#error SMP not supported on pre-ARMv6 CPUs
|
|
|
+#endif
|
|
|
+
|
|
|
+static inline int atomic_add_return(int i, atomic_t *v)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int val;
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ val = v->counter;
|
|
|
+ v->counter = val += i;
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+
|
|
|
+ return val;
|
|
|
+}
|
|
|
+#define atomic_add(i, v) (void) atomic_add_return(i, v)
|
|
|
+
|
|
|
+static inline int atomic_sub_return(int i, atomic_t *v)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+ int val;
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ val = v->counter;
|
|
|
+ v->counter = val -= i;
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+
|
|
|
+ return val;
|
|
|
+}
|
|
|
+#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
|
|
|
+
|
|
|
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
|
|
+{
|
|
|
+ int ret;
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ ret = v->counter;
|
|
|
+ if (likely(ret == old))
|
|
|
+ v->counter = new;
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
|
|
|
+{
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ raw_local_irq_save(flags);
|
|
|
+ *addr &= ~mask;
|
|
|
+ raw_local_irq_restore(flags);
|
|
|
+}
|
|
|
+
|
|
|
+#endif /* __LINUX_ARM_ARCH__ */
|
|
|
+
|
|
|
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
|
|
|
+
|
|
|
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
|
|
|
+{
|
|
|
+ int c, old;
|
|
|
+
|
|
|
+ c = atomic_read(v);
|
|
|
+ while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
|
|
|
+ c = old;
|
|
|
+ return c;
|
|
|
+}
|
|
|
+
|
|
|
+#define atomic_inc(v) atomic_add(1, v)
|
|
|
+#define atomic_dec(v) atomic_sub(1, v)
|