This is the mail archive of the libc-alpha@sources.redhat.com mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

[PATCH] PPC atomic.h PPC64 fixes


The current sysdeps/powerpc/bits/atomic.h fails csu/tst-atomic.c and tst-atomic-long.c for PPC64.

For tst-atomic (sizeof(mem) == 4) the __arch_compare_and_exchange_bool_32_acq macro must sign extend the register loaded lwarx (mem) before the subtract from. In PPC64 most register to register ops are 64-bit even for (32-bit) int. In this case oldval was sign extend to 64-bit but lwarx is zero extended. So the extend sign word is required.

The __arch_atomic_exchange_and_add_64 macro was causing constraint match warnings because a int variable was being passed to I const constraint. Change the addi to add and changed the constraint to "r".

Finally is a good idea to use the "b" constraint for "base registers".

For tst-atomic-long (sizeof(mem) == 8) the atomic_decrement_if_positive macro was not defined for.



2003-03-27 Steven Munroe <sjmunroe at us dot ibm dot com>

	* sysdeps/powerpc/bits/atomic.h
	(__arch_compare_and_exchange_bool_32_acq): Move to
	[!__powerpc64__]
	[__powerpc64__] (__arch_compare_and_exchange_bool_32_acq):
	Define PPC64 specific version.
	[__powerpc64__] (__arch_compare_and_exchange_bool_64_acq):
	Change (mem) constraint to "b".
	[__powerpc64__] (__arch_atomic_exchange_and add_64):
	Replace addi with add. Change (value) contraint to "r".
	Change (mem) constraint to "b".
	[__powerpc64__] (__arch_atomic_decrement_if_positive_64):
	New macro.
	(__arch_atomic_exchange_32): Change (mem) constraint to "b".
	(__arch_atomic_exchange_and_add_32): Change (mem) constraint
	to "b".
	(__arch_atomic_decrement_if_positive_32): New macro.
	(atomic_decrement_if_positive): Use __arch* macros.

diff -urN libc23-cvstip-20030327/sysdeps/powerpc/bits/atomic.h libc23/sysdeps/powerpc/bits/atomic.h
--- libc23-cvstip-20030327/sysdeps/powerpc/bits/atomic.h	2003-03-25 22:01:47.000000000 -0600
+++ libc23/sysdeps/powerpc/bits/atomic.h	2003-03-27 14:56:14.000000000 -0600
@@ -65,27 +65,34 @@
  * Ultimately we should do separate _acq and _rel versions.
  */
 
+#ifdef __powerpc64__
+
 /*
- * XXX this may not work properly on 64-bit if the register
- * containing oldval has the high half non-zero for some reason.
+ * The 32-bit exchange_bool is different on powerpc64 because the subf 
+ * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned 
+ * (a load word and zero (high 32) form).
+ * In powerpc64 register values are 64-bit by default,  including oldval.
+ * Net we need to extend sign word the result of lwarx to 64-bit so the
+ * 64-bit subtract from gives the expected result and sets the condition
+ * correctly. 
  */
-#define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
+# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
 ({									      \
   unsigned int __tmp;							      \
   __asm __volatile (__ARCH_REL_INSTR "\n"				      \
 		    "1:	lwarx	%0,0,%1\n"				      \
+		    "	extsw	%0,%0\n"				      \
 		    "	subf.	%0,%2,%0\n"				      \
 		    "	bne	2f\n"					      \
 		    "	stwcx.	%3,0,%1\n"				      \
 		    "	bne-	1b\n"					      \
 		    "2:	" __ARCH_ACQ_INSTR				      \
 		    : "=&r" (__tmp)					      \
-		    : "r" (mem), "r" (oldval), "r" (newval)		      \
+		    : "b" (mem), "r" (oldval), "r" (newval)		      \
 		    : "cr0", "memory");					      \
   __tmp != 0;								      \
 })
 
-#ifdef __powerpc64__
 # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
 ({									      \
   unsigned long	__tmp;							      \
@@ -97,7 +104,7 @@
 		    "	bne-	1b\n"					      \
 		    "2:	" __ARCH_ACQ_INSTR				      \
 		    : "=&r" (__tmp)					      \
-		    : "r" (mem), "r" (oldval), "r" (newval)		      \
+		    : "b" (mem), "r" (oldval), "r" (newval)		      \
 		    : "cr0", "memory");					      \
   __tmp != 0;								      \
 })
@@ -110,7 +117,7 @@
 			"	stdcx.	%3,0,%2\n"			      \
 			"	bne-	1b"				      \
 			: "=&r" (__val), "=m" (*mem)			      \
-			: "r" (mem), "r" (value), "1" (*mem)		      \
+			: "b" (mem), "r" (value), "1" (*mem)		      \
 			: "cr0");					      \
       __val;								      \
     })
@@ -119,16 +126,47 @@
     ({									      \
       __typeof (*mem) __val, __tmp;					      \
       __asm __volatile ("1:	ldarx	%0,0,%3\n"			      \
-			"	addi	%1,%0,%4\n"			      \
+			"	add	%1,%0,%4\n"			      \
 			"	stdcx.	%1,0,%3\n"			      \
 			"	bne-	1b"				      \
 			: "=&b" (__val), "=&r" (__tmp), "=m" (*mem)	      \
-			: "r" (mem), "I" (value), "2" (*mem)		      \
+			: "b" (mem), "r" (value), "2" (*mem)		      \
 			: "cr0");					      \
       __val;								      \
     })
+      
+# define __arch_atomic_decrement_if_positive_64(mem) \
+  ({ int __val, __tmp;							      \
+     __asm __volatile ("1:	ldarx	%0,0,%3\n"			      \
+		       "	cmpdi	0,%0,0\n"			      \
+		       "	addi	%1,%0,-1\n"			      \
+		       "	ble	2f\n"				      \
+		       "	stdcx.	%1,0,%3\n"			      \
+		       "	bne-	1b\n"				      \
+		       "2:	" __ARCH_ACQ_INSTR			      \
+		       : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)	      \
+		       : "b" (mem), "2" (*mem)				      \
+		       : "cr0");					      \
+     __val;								      \
+  })
 
 #else /* powerpc32 */
+# define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
+({									      \
+  unsigned int __tmp;							      \
+  __asm __volatile (__ARCH_REL_INSTR "\n"				      \
+		    "1:	lwarx	%0,0,%1\n"				      \
+		    "	subf.	%0,%2,%0\n"				      \
+		    "	bne	2f\n"					      \
+		    "	stwcx.	%3,0,%1\n"				      \
+		    "	bne-	1b\n"					      \
+		    "2:	" __ARCH_ACQ_INSTR				      \
+		    : "=&r" (__tmp)					      \
+		    : "b" (mem), "r" (oldval), "r" (newval)		      \
+		    : "cr0", "memory");					      \
+  __tmp != 0;								      \
+})
+
 # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
   (abort (), 0)
 
@@ -136,6 +174,8 @@
     ({ abort (); (*mem) = (value); })
 # define __arch_atomic_exchange_and_add_64(mem, value) \
     ({ abort (); (*mem) = (value); })
+# define __arch_atomic_decrement_if_positive_64(mem) \
+    ({ abort (); (*mem) = (value); })
 #endif
 
 #define __arch_atomic_exchange_32(mem, value)				      \
@@ -146,7 +186,7 @@
 		      "		stwcx.	%3,0,%2\n"			      \
 		      "		bne-	1b"				      \
 		      : "=&r" (__val), "=m" (*mem)			      \
-		      : "r" (mem), "r" (value), "1" (*mem)		      \
+		      : "b" (mem), "r" (value), "1" (*mem)		      \
 		      : "cr0");						      \
     __val;								      \
   })
@@ -159,10 +199,26 @@
 		      "		stwcx.	%1,0,%3\n"			      \
 		      "		bne-	1b"				      \
 		      : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)	      \
-		      : "r" (mem), "r" (value), "2" (*mem)		      \
+		      : "b" (mem), "r" (value), "2" (*mem)		      \
 		      : "cr0");						      \
     __val;								      \
   })
+  
+#define __arch_atomic_decrement_if_positive_32(mem) \
+  ({ int __val, __tmp;							      \
+     __asm __volatile ("1:	lwarx	%0,0,%3\n"			      \
+		       "	cmpwi	0,%0,0\n"			      \
+		       "	addi	%1,%0,-1\n"			      \
+		       "	ble	2f\n"				      \
+		       "	stwcx.	%1,0,%3\n"			      \
+		       "	bne-	1b\n"				      \
+		       "2:	" __ARCH_ACQ_INSTR			      \
+		       : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)	      \
+		       : "b" (mem), "2" (*mem)				      \
+		       : "cr0");					      \
+     __val;								      \
+  })
+
 
 #define atomic_exchange(mem, value)					      \
   ({									      \
@@ -191,20 +247,14 @@
 
 /* Decrement *MEM if it is > 0, and return the old value.  */
 #define atomic_decrement_if_positive(mem) \
-  ({ if (sizeof (*mem) != 4)						      \
+  ({ __typeof (*(mem)) __result;						      \
+    if (sizeof (*mem) == 4)						      \
+      __result = __arch_atomic_decrement_if_positive_32 (mem);	      \
+    else if (sizeof (*mem) == 8)					      \
+      __result = __arch_atomic_decrement_if_positive_64 (mem);	      \
+    else 								      \
        abort ();							      \
-     int __val, __tmp;							      \
-     __asm __volatile ("1:	lwarx	%0,0,%3\n"			      \
-		       "	cmpwi	0,%0,0\n"			      \
-		       "	addi	%1,%0,-1\n"			      \
-		       "	ble	2f\n"				      \
-		       "	stwcx.	%1,0,%3\n"			      \
-		       "	bne-	1b\n"				      \
-		       "2:	" __ARCH_ACQ_INSTR			      \
-		       : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)	      \
-		       : "r" (mem), "2" (*mem)				      \
-		       : "cr0");					      \
-     __val;								      \
+    __result;								      \
   })
 
 

Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]