patch-2.4.2 linux/include/asm-s390x/atomic.h
Next file: linux/include/asm-s390x/bitops.h
Previous file: linux/include/asm-s390x/a.out.h
Back to the patch index
Back to the overall index
- Lines: 237
- Date:
Tue Feb 13 14:13:44 2001
- Orig file:
v2.4.1/linux/include/asm-s390x/atomic.h
- Orig date:
Wed Dec 31 16:00:00 1969
diff -u --recursive --new-file v2.4.1/linux/include/asm-s390x/atomic.h linux/include/asm-s390x/atomic.h
@@ -0,0 +1,236 @@
+#ifndef __ARCH_S390_ATOMIC__
+#define __ARCH_S390_ATOMIC__
+
+/*
+ * include/asm-s390x/atomic.h
+ *
+ * S390 version
+ * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Denis Joseph Barrow
+ *
+ * Derived from "include/asm-i386/bitops.h"
+ * Copyright (C) 1992, Linus Torvalds
+ *
+ */
+
+/*
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
+ * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
+ */
+
+typedef struct { volatile int counter; } atomic_t __attribute__ ((aligned (4)));
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0")
+
+static __inline__ int atomic_read(atomic_t *v)
+{
+ int retval;
+ __asm__ __volatile__("bcr 15,0\n\t"
+ "l %0,%1"
+ : "=d" (retval) : "m" (*v) );
+ return retval;
+}
+
+static __inline__ void atomic_set(atomic_t *v, int i)
+{
+ __asm__ __volatile__("st %1,%0\n\t"
+ "bcr 15,0"
+ : "=m" (*v) : "d" (i) );
+}
+
+static __inline__ void atomic_add(int i, atomic_t *v)
+{
+ __asm__ __volatile__(" la 2,%0\n"
+ " l 0,0(2)\n"
+ "0: lr 1,0\n"
+ " ar 1,%1\n"
+ " cs 0,1,0(2)\n"
+ " jl 0b"
+ : "+m" (*v) : "d" (i) : "0", "1", "2", "cc" );
+}
+
+static __inline__ int atomic_add_return (int i, atomic_t *v)
+{
+ int newval;
+ __asm__ __volatile__(" la 1,%0\n"
+ " l 0,0(1)\n"
+ "0: lr %1,0\n"
+ " ar %1,%2\n"
+ " cs 0,%1,0(1)\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (newval)
+ : "d" (i) : "0", "1", "cc" );
+ return newval;
+}
+
+static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
+{
+ int i;
+
+ __asm__ __volatile__(" l 0,%0\n"
+ "0: lr %1,0\n"
+ " ahi %1,1\n"
+ " cs 0,%1,%0\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (i) : : "0", "cc" );
+ return i != 0;
+}
+
+static __inline__ int atomic_add_negative(int i, atomic_t *v)
+{
+ int newval;
+ __asm__ __volatile__(" la 1,%0\n"
+ " l 0,0(1)\n"
+ "0: lr %1,0\n"
+ " ar %1,%2\n"
+ " cs 0,%1,0(1)\n"
+ " jl 0b\n"
+ : "+m" (*v), "=&d" (newval)
+ : "d" (i) : "0", "1", "cc" );
+ return newval < 0;
+}
+
+static __inline__ void atomic_sub(int i, atomic_t *v)
+{
+ __asm__ __volatile__(" la 2,%0\n"
+ " l 0,0(2)\n"
+ "0: lr 1,0\n"
+ " sr 1,%1\n"
+ " cs 0,1,0(2)\n"
+ " jl 0b"
+ : "+m" (*v) : "d" (i) : "0", "1", "2", "cc" );
+}
+
+static __inline__ void atomic_inc(volatile atomic_t *v)
+{
+ __asm__ __volatile__(" la 2,%0\n"
+ " l 0,0(2)\n"
+ "0: lr 1,0\n"
+ " ahi 1,1\n"
+ " cs 0,1,0(2)\n"
+ " jl 0b"
+ : "+m" (*v) : : "0", "1", "2", "cc" );
+}
+
+static __inline__ int atomic_inc_return(volatile atomic_t *v)
+{
+ int i;
+ __asm__ __volatile__(" la 1,%0\n"
+ " l 0,0(1)\n"
+ "0: lr %1,0\n"
+ " ahi %1,1\n"
+ " cs 0,%1,0(1)\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (i) : : "0", "1", "cc" );
+ return i;
+}
+
+static __inline__ void atomic_dec(volatile atomic_t *v)
+{
+ __asm__ __volatile__(" la 2,%0\n"
+ " l 0,0(2)\n"
+ "0: lr 1,0\n"
+ " ahi 1,-1\n"
+ " cs 0,1,0(2)\n"
+ " jl 0b"
+ : "+m" (*v) : : "0", "1", "2", "cc" );
+}
+
+static __inline__ int atomic_dec_return(volatile atomic_t *v)
+{
+ int i;
+ __asm__ __volatile__(" la 1,%0\n"
+ " l 0,0(1)\n"
+ "0: lr %1,0\n"
+ " ahi %1,-1\n"
+ " cs 0,%1,0(1)\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (i) : : "0", "1", "cc" );
+ return i;
+}
+
+static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
+{
+ int i;
+ __asm__ __volatile__(" la 1,%0\n"
+ " l 0,0(1)\n"
+ "0: lr %1,0\n"
+ " ahi %1,-1\n"
+ " cs 0,%1,0(1)\n"
+ " jl 0b"
+ : "+m" (*v), "=&d" (i) : : "0", "1", "cc");
+ return i == 0;
+}
+
+static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v)
+{
+ __asm__ __volatile__(" la 2,%0\n"
+ " l 0,0(2)\n"
+ "0: lr 1,0\n"
+ " nr 1,%1\n"
+ " cs 0,1,0(2)\n"
+ " jl 0b"
+ : "+m" (*v) : "d" (~(mask))
+ : "0", "1", "2", "cc" );
+}
+
+static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v)
+{
+ __asm__ __volatile__(" la 2,%0\n"
+ " l 0,0(2)\n"
+ "0: lr 1,0\n"
+ " or 1,%1\n"
+ " cs 0,1,0(2)\n"
+ " jl 0b"
+ : "+m" (*v) : "d" (mask) : "0", "1", "2", "cc" );
+}
+
+/*
+ returns 0 if expected_oldval==value in *v ( swap was successful )
+ returns 1 if unsuccessful.
+*/
+static __inline__ int
+atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
+{
+ int retval;
+
+ __asm__ __volatile__(
+ " la 1,%1\n"
+ " lr 0,%2\n"
+ " cs 0,%3,0(1)\n"
+ " ipm %0\n"
+ " srl %0,28"
+ : "=&r" (retval), "+m" (*v)
+ : "d" (expected_oldval) , "d" (new_val)
+ : "0", "1", "cc");
+ return retval;
+}
+
+/*
+ Spin till *v = expected_oldval then swap with newval.
+ */
+static __inline__ void
+atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v)
+{
+ __asm__ __volatile__(
+ " la 1,%0\n"
+ "0: lr 0,%1\n"
+ " cs 0,%2,0(1)\n"
+ " jl 0b\n"
+ : "+m" (*v)
+ : "d" (expected_oldval) , "d" (new_val)
+ : "cc", "0", "1");
+}
+
+#define atomic_compare_and_swap_debug(where,from,to) \
+if (atomic_compare_and_swap ((from), (to), (where))) {\
+ printk (KERN_WARNING"%s/%d atomic counter:%s couldn't be changed from %d(%s) to %d(%s), was %d\n",\
+ __FILE__,__LINE__,#where,(from),#from,(to),#to,atomic_read (where));\
+ atomic_set(where,(to));\
+}
+
+#endif /* __ARCH_S390_ATOMIC __ */
+
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)