patch-2.3.31 linux/include/asm-i386/system.h
Next file: linux/include/asm-i386/unistd.h
Previous file: linux/include/asm-alpha/system.h
Back to the patch index
Back to the overall index
- Lines: 59
- Date:
Tue Dec 7 14:10:48 1999
- Orig file:
v2.3.30/linux/include/asm-i386/system.h
- Orig date:
Tue Dec 7 09:32:49 1999
diff -u --recursive --new-file v2.3.30/linux/include/asm-i386/system.h linux/include/asm-i386/system.h
@@ -3,6 +3,7 @@
#include <linux/kernel.h>
#include <asm/segment.h>
+#include <linux/bitops.h> /* for LOCK_PREFIX */
#ifdef __KERNEL__
@@ -207,6 +208,50 @@
}
return x;
}
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#if CPU != 386
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+ unsigned long new, int size)
+{
+ unsigned long prev;
+ switch (size) {
+ case 1:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 2:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ case 4:
+ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
+ : "=a"(prev)
+ : "q"(new), "m"(*__xg(ptr)), "0"(old)
+ : "memory");
+ return prev;
+ }
+ return old;
+}
+
+#define cmpxchg(ptr,o,n)\
+ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
+ (unsigned long)(n),sizeof(*(ptr))))
+
+#else
+/* Compiling for a 386 proper. Is it worth implementing via cli/sti? */
+#endif
/*
* Force strict CPU ordering.
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)