This is the mail archive of the glibc-cvs@sourceware.org mailing list for the glibc project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

GNU C Library master sources branch, master, updated. glibc-2.10-173-gf8b6cd2


This is an automated email from the git hooks/post-receive script. It was
generated because a ref change was pushed to the repository containing
the project "GNU C Library master sources".

The branch, master has been updated
       via  f8b6cd2182494252fdddafe72331b631afac08d8 (commit)
       via  e88726b483a275824e852f64476087568dbae7bb (commit)
       via  92618c954ff3c729c490f93bd15e621930656a47 (commit)
       via  30b1954abb02aa7ba8136fe728820cd769052efb (commit)
      from  f351f2b756e634fde2482a8f9790944db868162b (commit)

Those revisions listed above that are new to this repository have
not appeared on any other notification email; so we list those
revisions in full, below.

- Log -----------------------------------------------------------------
http://sources.redhat.com/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=f8b6cd2182494252fdddafe72331b631afac08d8

commit f8b6cd2182494252fdddafe72331b631afac08d8
Author: Ulrich Drepper <drepper@redhat.com>
Date:   Sat Jul 18 12:45:27 2009 -0700

    Extend pthread_cond_timedwait tests.

diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index c7e1d04..d5b812e 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,5 +1,8 @@
 2009-07-18  Ulrich Drepper  <drepper@redhat.com>
 
+	* tst-cond11.c (run_test): Add test to check that the timeout is
+	long enough.
+
 	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
 	(__pthread_cond_timedwait): If possible use FUTEX_WAIT_BITSET to
 	directly use absolute timeout.
diff --git a/nptl/tst-cond11.c b/nptl/tst-cond11.c
index 0de4d56..4d0c7dd 100644
--- a/nptl/tst-cond11.c
+++ b/nptl/tst-cond11.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2003, 2004 Free Software Foundation, Inc.
+/* Copyright (C) 2003, 2004, 2009 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
    Contributed by Ulrich Drepper <drepper@redhat.com>, 2003.
 
@@ -130,6 +130,20 @@ run_test (clockid_t cl)
       return 1;
     }
 
+  struct timespec ts2;
+  if (clock_gettime (cl, &ts2) != 0)
+    {
+      puts ("second clock_gettime failed");
+      return 1;
+    }
+
+  if (ts2.tv_sec < ts.tv_sec
+      || (ts2.tv_sec == ts.tv_sec && ts2.tv_nsec < ts.tv_nsec))
+    {
+      puts ("timeout too short");
+      return 1;
+    }
+
   if (pthread_mutex_unlock (&mut) != 0)
     {
       puts ("mutex_unlock failed");

http://sources.redhat.com/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=e88726b483a275824e852f64476087568dbae7bb

commit e88726b483a275824e852f64476087568dbae7bb
Author: Ulrich Drepper <drepper@redhat.com>
Date:   Sat Jul 18 12:44:12 2009 -0700

    Extend x86-64 pthread_cond_timedwait to use futex syscall with absolute timeout.

diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index 1ee3b19..c7e1d04 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,6 +1,10 @@
 2009-07-18  Ulrich Drepper  <drepper@redhat.com>
 
 	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+	(__pthread_cond_timedwait): If possible use FUTEX_WAIT_BITSET to
+	directly use absolute timeout.
+
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
 	(__pthread_cond_wait): Convert to using exception handler instead of
 	registered unwind buffer.
 	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
index a2ebfec..21115fd 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
@@ -58,21 +58,25 @@ __pthread_cond_timedwait:
 	pushq	%r14
 	cfi_adjust_cfa_offset(8)
 	cfi_rel_offset(%r14, 0)
-#define FRAME_SIZE 48
+#ifdef __ASSUME_FUTEX_CLOCK_REALTIME
+# define FRAME_SIZE 32
+#else
+# define FRAME_SIZE 48
+#endif
 	subq	$FRAME_SIZE, %rsp
 	cfi_adjust_cfa_offset(FRAME_SIZE)
 
 	cmpq	$1000000000, 8(%rdx)
 	movl	$EINVAL, %eax
-	jae	18f
+	jae	48f
 
 	/* Stack frame:
 
 	   rsp + 48
 	            +--------------------------+
-	   rsp + 40 | old wake_seq value       |
+	   rsp + 32 | timeout value            |
 	            +--------------------------+
-	   rsp + 24 | timeout value            |
+	   rsp + 24 | old wake_seq value       |
 	            +--------------------------+
 	   rsp + 16 | mutex pointer            |
 	            +--------------------------+
@@ -94,8 +98,18 @@ __pthread_cond_timedwait:
 	je	22f
 	movq	%rsi, dep_mutex(%rdi)
 
+22:
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+#  ifdef PIC
+	cmpl	$0, __have_futex_clock_realtime(%rip)
+#  else
+	cmpl	$0, __have_futex_clock_realtime
+#  endif
+	je	.Lreltmo
+#endif
+
 	/* Get internal lock.  */
-22:	movl	$1, %esi
+	movl	$1, %esi
 	xorl	%eax, %eax
 	LOCK
 #if cond_lock == 0
@@ -103,15 +117,15 @@ __pthread_cond_timedwait:
 #else
 	cmpxchgl %esi, cond_lock(%rdi)
 #endif
-	jnz	1f
+	jnz	31f
 
 	/* Unlock the mutex.  */
-2:	movq	16(%rsp), %rdi
+32:	movq	16(%rsp), %rdi
 	xorl	%esi, %esi
 	callq	__pthread_mutex_unlock_usercnt
 
 	testl	%eax, %eax
-	jne	16f
+	jne	46f
 
 	movq	8(%rsp), %rdi
 	incq	total_seq(%rdi)
@@ -122,69 +136,10 @@ __pthread_cond_timedwait:
 	movq	8(%rsp), %rdi
 	movq	wakeup_seq(%rdi), %r9
 	movl	broadcast_seq(%rdi), %edx
-	movq	%r9, 40(%rsp)
+	movq	%r9, 24(%rsp)
 	movl	%edx, 4(%rsp)
 
-	/* Get the current time.  */
-8:
-#ifdef __NR_clock_gettime
-	/* Get the clock number.  Note that the field in the condvar
-	   structure stores the number minus 1.  */
-	movq	8(%rsp), %rdi
-	movl	cond_nwaiters(%rdi), %edi
-	andl	$((1 << nwaiters_shift) - 1), %edi
-	/* Only clocks 0 and 1 are allowed so far.  Both are handled in the
-	   kernel.  */
-	leaq	24(%rsp), %rsi
-# ifdef SHARED
-	movq	__vdso_clock_gettime@GOTPCREL(%rip), %rax
-	movq	(%rax), %rax
-	PTR_DEMANGLE (%rax)
-	jz	26f
-	call	*%rax
-	jmp	27f
-# endif
-26:	movl	$__NR_clock_gettime, %eax
-	syscall
-27:
-# ifndef __ASSUME_POSIX_TIMERS
-	cmpq	$-ENOSYS, %rax
-	je	19f
-# endif
-
-	/* Compute relative timeout.  */
-	movq	(%r13), %rcx
-	movq	8(%r13), %rdx
-	subq	24(%rsp), %rcx
-	subq	32(%rsp), %rdx
-#else
-	leaq	24(%rsp), %rdi
-	xorl	%esi, %esi
-	movq	$VSYSCALL_ADDR_vgettimeofday, %rax
-	callq	*%rax
-
-	/* Compute relative timeout.  */
-	movq	32(%rsp), %rax
-	movl	$1000, %edx
-	mul	%rdx		/* Milli seconds to nano seconds.  */
-	movq	(%r13), %rcx
-	movq	8(%r13), %rdx
-	subq	24(%rsp), %rcx
-	subq	%rax, %rdx
-#endif
-	jns	12f
-	addq	$1000000000, %rdx
-	decq	%rcx
-12:	testq	%rcx, %rcx
-	movq	8(%rsp), %rdi
-	movq	$-ETIMEDOUT, %r14
-	js	6f
-
-	/* Store relative timeout.  */
-21:	movq	%rcx, 24(%rsp)
-	movq	%rdx, 32(%rsp)
-
-	movl	cond_futex(%rdi), %r12d
+38:	movl	cond_futex(%rdi), %r12d
 
 	/* Unlock.  */
 	LOCK
@@ -193,27 +148,26 @@ __pthread_cond_timedwait:
 #else
 	decl	cond_lock(%rdi)
 #endif
-	jne	3f
+	jne	33f
 
-.LcleanupSTART:
-4:	callq	__pthread_enable_asynccancel
+.LcleanupSTART1:
+34:	callq	__pthread_enable_asynccancel
 	movl	%eax, (%rsp)
 
-	leaq	24(%rsp), %r10
+	movq	%r13, %r10
 	cmpq	$-1, dep_mutex(%rdi)
-	movq	%r12, %rdx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	movl	$FUTEX_WAIT, %eax
-	movl	$(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+	movl	$FUTEX_WAIT_BITSET, %eax
+	movl	$(FUTEX_WAIT_BITSET|FUTEX_PRIVATE_FLAG), %esi
 	cmove	%eax, %esi
-#else
-	movl	$0, %eax
-	movl	%fs:PRIVATE_FUTEX, %esi
-	cmove	%eax, %esi
-# if FUTEX_WAIT != 0
-	orl	$FUTEX_WAIT, %esi
-# endif
-#endif
+	/* The following only works like this because we only support
+	   two clocks, represented using a single bit.  */
+	xorl	%eax, %eax
+	testl	$1, cond_nwaiters(%rdi)
+	movl	$FUTEX_CLOCK_REALTIME, %edx
+	movl	$0xffffffff, %r9d
+	cmove	%edx, %eax
+	orl	%eax, %esi
+	movq	%r12, %rdx
 	addq	$cond_futex, %rdi
 	movl	$SYS_futex, %eax
 	syscall
@@ -221,7 +175,7 @@ __pthread_cond_timedwait:
 
 	movl	(%rsp), %edi
 	callq	__pthread_disable_asynccancel
-.LcleanupEND:
+.LcleanupEND1:
 
 	/* Lock.  */
 	movq	8(%rsp), %rdi
@@ -233,45 +187,45 @@ __pthread_cond_timedwait:
 #else
 	cmpxchgl %esi, cond_lock(%rdi)
 #endif
-	jne	5f
+	jne	35f
 
-6:	movl	broadcast_seq(%rdi), %edx
+36:	movl	broadcast_seq(%rdi), %edx
 
 	movq	woken_seq(%rdi), %rax
 
 	movq	wakeup_seq(%rdi), %r9
 
 	cmpl	4(%rsp), %edx
-	jne	23f
+	jne	53f
 
-	cmpq	40(%rsp), %r9
-	jbe	15f
+	cmpq	24(%rsp), %r9
+	jbe	45f
 
 	cmpq	%rax, %r9
-	ja	9f
+	ja	39f
 
-15:	cmpq	$-ETIMEDOUT, %r14
-	jne	8b
+45:	cmpq	$-ETIMEDOUT, %r14
+	jne	38b
 
-13:	incq	wakeup_seq(%rdi)
+99:	incq	wakeup_seq(%rdi)
 	incl	cond_futex(%rdi)
 	movl	$ETIMEDOUT, %r14d
-	jmp	14f
+	jmp	44f
 
-23:	xorq	%r14, %r14
-	jmp	24f
+53:	xorq	%r14, %r14
+	jmp	54f
 
-9:	xorq	%r14, %r14
-14:	incq	woken_seq(%rdi)
+39:	xorq	%r14, %r14
+44:	incq	woken_seq(%rdi)
 
-24:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+54:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
 
 	/* Wake up a thread which wants to destroy the condvar object.  */
 	cmpq	$0xffffffffffffffff, total_seq(%rdi)
-	jne	25f
+	jne	55f
 	movl	cond_nwaiters(%rdi), %eax
 	andl	$~((1 << nwaiters_shift) - 1), %eax
-	jne	25f
+	jne	55f
 
 	addq	$cond_nwaiters, %rdi
 	cmpq	$-1, dep_mutex-cond_nwaiters(%rdi)
@@ -290,21 +244,21 @@ __pthread_cond_timedwait:
 	syscall
 	subq	$cond_nwaiters, %rdi
 
-25:	LOCK
+55:	LOCK
 #if cond_lock == 0
 	decl	(%rdi)
 #else
 	decl	cond_lock(%rdi)
 #endif
-	jne	10f
+	jne	40f
 
-11:	movq	16(%rsp), %rdi
+41:	movq	16(%rsp), %rdi
 	callq	__pthread_mutex_cond_lock
 
 	testq	%rax, %rax
 	cmoveq	%r14, %rax
 
-18:	addq	$FRAME_SIZE, %rsp
+48:	addq	$FRAME_SIZE, %rsp
 	cfi_adjust_cfa_offset(-FRAME_SIZE)
 	popq	%r14
 	cfi_adjust_cfa_offset(-8)
@@ -319,8 +273,7 @@ __pthread_cond_timedwait:
 	retq
 
 	/* Initial locking failed.  */
-1:
-	cfi_adjust_cfa_offset(3 * 8 + FRAME_SIZE)
+31:	cfi_adjust_cfa_offset(3 * 8 + FRAME_SIZE)
 	cfi_rel_offset(%r12, FRAME_SIZE + 16)
 	cfi_rel_offset(%r13, FRAME_SIZE + 8)
 	cfi_rel_offset(%r14, FRAME_SIZE)
@@ -332,10 +285,10 @@ __pthread_cond_timedwait:
 	movl	$LLL_SHARED, %esi
 	cmovne	%eax, %esi
 	callq	__lll_lock_wait
-	jmp	2b
+	jmp	32b
 
 	/* Unlock in loop requires wakeup.  */
-3:
+33:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
@@ -344,10 +297,10 @@ __pthread_cond_timedwait:
 	movl	$LLL_SHARED, %esi
 	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
-	jmp	4b
+	jmp	34b
 
 	/* Locking in loop failed.  */
-5:
+35:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
@@ -359,10 +312,10 @@ __pthread_cond_timedwait:
 #if cond_lock != 0
 	subq	$cond_lock, %rdi
 #endif
-	jmp	6b
+	jmp	36b
 
 	/* Unlock after loop requires wakeup.  */
-10:
+40:
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
 #endif
@@ -371,10 +324,10 @@ __pthread_cond_timedwait:
 	movl	$LLL_SHARED, %esi
 	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
-	jmp	11b
+	jmp	41b
 
 	/* The initial unlocking of the mutex failed.  */
-16:	movq	8(%rsp), %rdi
+46:	movq	8(%rsp), %rdi
 	movq	%rax, (%rsp)
 	LOCK
 #if cond_lock == 0
@@ -382,7 +335,7 @@ __pthread_cond_timedwait:
 #else
 	decl	cond_lock(%rdi)
 #endif
-	jne	17f
+	jne	47f
 
 #if cond_lock != 0
 	addq	$cond_lock, %rdi
@@ -393,23 +346,229 @@ __pthread_cond_timedwait:
 	cmovne	%eax, %esi
 	callq	__lll_unlock_wake
 
-17:	movq	(%rsp), %rax
-	jmp	18b
+47:	movq	(%rsp), %rax
+	jmp	48b
+
+
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+.Lreltmo:
+	/* Get internal lock.  */
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+# if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+# else
+	cmpxchgl %esi, cond_lock(%rdi)
+# endif
+	jnz	1f
+
+	/* Unlock the mutex.  */
+2:	movq	16(%rsp), %rdi
+	xorl	%esi, %esi
+	callq	__pthread_mutex_unlock_usercnt
+
+	testl	%eax, %eax
+	jne	46b
+
+	movq	8(%rsp), %rdi
+	incq	total_seq(%rdi)
+	incl	cond_futex(%rdi)
+	addl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Get and store current wakeup_seq value.  */
+	movq	8(%rsp), %rdi
+	movq	wakeup_seq(%rdi), %r9
+	movl	broadcast_seq(%rdi), %edx
+	movq	%r9, 24(%rsp)
+	movl	%edx, 4(%rsp)
 
-#if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
+	/* Get the current time.  */
+8:
+# ifdef __NR_clock_gettime
+	/* Get the clock number.  Note that the field in the condvar
+	   structure stores the number minus 1.  */
+	movq	8(%rsp), %rdi
+	movl	cond_nwaiters(%rdi), %edi
+	andl	$((1 << nwaiters_shift) - 1), %edi
+	/* Only clocks 0 and 1 are allowed so far.  Both are handled in the
+	   kernel.  */
+	leaq	32(%rsp), %rsi
+#  ifdef SHARED
+	movq	__vdso_clock_gettime@GOTPCREL(%rip), %rax
+	movq	(%rax), %rax
+	PTR_DEMANGLE (%rax)
+	jz	26f
+	call	*%rax
+	jmp	27f
+#  endif
+26:	movl	$__NR_clock_gettime, %eax
+	syscall
+27:
+#  ifndef __ASSUME_POSIX_TIMERS
+	cmpq	$-ENOSYS, %rax
+	je	19f
+#  endif
+
+	/* Compute relative timeout.  */
+	movq	(%r13), %rcx
+	movq	8(%r13), %rdx
+	subq	32(%rsp), %rcx
+	subq	40(%rsp), %rdx
+# else
+	leaq	24(%rsp), %rdi
+	xorl	%esi, %esi
+	movq	$VSYSCALL_ADDR_vgettimeofday, %rax
+	callq	*%rax
+
+	/* Compute relative timeout.  */
+	movq	40(%rsp), %rax
+	movl	$1000, %edx
+	mul	%rdx		/* Milli seconds to nano seconds.  */
+	movq	(%r13), %rcx
+	movq	8(%r13), %rdx
+	subq	32(%rsp), %rcx
+	subq	%rax, %rdx
+# endif
+	jns	12f
+	addq	$1000000000, %rdx
+	decq	%rcx
+12:	testq	%rcx, %rcx
+	movq	8(%rsp), %rdi
+	movq	$-ETIMEDOUT, %r14
+	js	6f
+
+	/* Store relative timeout.  */
+21:	movq	%rcx, 32(%rsp)
+	movq	%rdx, 40(%rsp)
+
+	movl	cond_futex(%rdi), %r12d
+
+	/* Unlock.  */
+	LOCK
+# if cond_lock == 0
+	decl	(%rdi)
+# else
+	decl	cond_lock(%rdi)
+# endif
+	jne	3f
+
+.LcleanupSTART2:
+4:	callq	__pthread_enable_asynccancel
+	movl	%eax, (%rsp)
+
+	leaq	32(%rsp), %r10
+	cmpq	$-1, dep_mutex(%rdi)
+	movq	%r12, %rdx
+# ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAIT, %eax
+	movl	$(FUTEX_WAIT|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+# else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+#  if FUTEX_WAIT != 0
+	orl	$FUTEX_WAIT, %esi
+#  endif
+# endif
+	addq	$cond_futex, %rdi
+	movl	$SYS_futex, %eax
+	syscall
+	movq	%rax, %r14
+
+	movl	(%rsp), %edi
+	callq	__pthread_disable_asynccancel
+.LcleanupEND2:
+
+	/* Lock.  */
+	movq	8(%rsp), %rdi
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+# if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+# else
+	cmpxchgl %esi, cond_lock(%rdi)
+# endif
+	jne	5f
+
+6:	movl	broadcast_seq(%rdi), %edx
+
+	movq	woken_seq(%rdi), %rax
+
+	movq	wakeup_seq(%rdi), %r9
+
+	cmpl	4(%rsp), %edx
+	jne	53b
+
+	cmpq	24(%rsp), %r9
+	jbe	45b
+
+	cmpq	%rax, %r9
+	ja	39b
+
+	cmpq	$-ETIMEDOUT, %r14
+	jne	8b
+
+	jmp	99b
+
+	/* Initial locking failed.  */
+1:	cfi_adjust_cfa_offset(3 * 8 + FRAME_SIZE)
+	cfi_rel_offset(%r12, FRAME_SIZE + 16)
+	cfi_rel_offset(%r13, FRAME_SIZE + 8)
+	cfi_rel_offset(%r14, FRAME_SIZE)
+# if cond_lock != 0
+	addq	$cond_lock, %rdi
+# endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+	jmp	2b
+
+	/* Unlock in loop requires wakeup.  */
+3:
+# if cond_lock != 0
+	addq	$cond_lock, %rdi
+# endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+	jmp	4b
+
+	/* Locking in loop failed.  */
+5:
+# if cond_lock != 0
+	addq	$cond_lock, %rdi
+# endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+# if cond_lock != 0
+	subq	$cond_lock, %rdi
+# endif
+	jmp	6b
+
+# if defined __NR_clock_gettime && !defined __ASSUME_POSIX_TIMERS
 	/* clock_gettime not available.  */
-19:	leaq	24(%rsp), %rdi
+19:	leaq	32(%rsp), %rdi
 	xorl	%esi, %esi
 	movq	$VSYSCALL_ADDR_vgettimeofday, %rax
 	callq	*%rax
 
 	/* Compute relative timeout.  */
-	movq	32(%rsp), %rax
+	movq	40(%rsp), %rax
 	movl	$1000, %edx
 	mul	%rdx		/* Milli seconds to nano seconds.  */
 	movq	(%r13), %rcx
 	movq	8(%r13), %rdx
-	subq	24(%rsp), %rcx
+	subq	32(%rsp), %rcx
 	subq	%rax, %rdx
 	jns	20f
 	addq	$1000000000, %rdx
@@ -419,6 +578,7 @@ __pthread_cond_timedwait:
 	movq	$-ETIMEDOUT, %r14
 	js	6b
 	jmp	21b
+# endif
 #endif
 	.size	__pthread_cond_timedwait, .-__pthread_cond_timedwait
 versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
@@ -575,10 +735,16 @@ __condvar_cleanup2:
 	.byte	DW_EH_PE_uleb128		# call-site format
 	.uleb128 .Lcstend-.Lcstbegin
 .Lcstbegin:
-	.uleb128 .LcleanupSTART-.LSTARTCODE
-	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 .LcleanupSTART1-.LSTARTCODE
+	.uleb128 .LcleanupEND1-.LcleanupSTART1
 	.uleb128 __condvar_cleanup2-.LSTARTCODE
 	.uleb128  0
+#ifndef __ASSUME_FUTEX_CLOCK_REALTIME
+	.uleb128 .LcleanupSTART2-.LSTARTCODE
+	.uleb128 .LcleanupEND2-.LcleanupSTART2
+	.uleb128 __condvar_cleanup2-.LSTARTCODE
+	.uleb128  0
+#endif
 	.uleb128 .LcallUR-.LSTARTCODE
 	.uleb128 .LENDCODE-.LcallUR
 	.uleb128 0

http://sources.redhat.com/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=92618c954ff3c729c490f93bd15e621930656a47

commit 92618c954ff3c729c490f93bd15e621930656a47
Author: Ulrich Drepper <drepper@redhat.com>
Date:   Sat Jul 18 08:53:18 2009 -0700

    Optimize x86-64 pthread_cond_timedwait.
    
    Instead of actively registering an unwind buffer we now use the
    exception handling functionality of the gcc runtime.

diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index 33fc8a1..1ee3b19 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -3,6 +3,8 @@
 	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
 	(__pthread_cond_wait): Convert to using exception handler instead of
 	registered unwind buffer.
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+	(__pthread_cond_timedwait): Likewise.
 
 2009-07-17  Ulrich Drepper  <drepper@redhat.com>
 
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
index 2b53591..a2ebfec 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
@@ -32,137 +32,23 @@
 	.text
 
 
-	.align	16
-	.type	__condvar_cleanup, @function
-	.globl	__condvar_cleanup
-	.hidden	__condvar_cleanup
-__condvar_cleanup:
-	cfi_startproc
-	pushq	%r12
-	cfi_adjust_cfa_offset(8)
-	cfi_rel_offset(%r12, 0)
-
-	/* Get internal lock.  */
-	movq	%rdi, %r8
-	movq	8(%rdi), %rdi
-	movl	$1, %esi
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %esi, (%rdi)
-#else
-	cmpxchgl %esi, cond_lock(%rdi)
-#endif
-	jz	1f
-
-#if cond_lock != 0
-	addq	$cond_lock, %rdi
-#endif
-	cmpq	$-1, dep_mutex-cond_lock(%rdi)
-	movl	$LLL_PRIVATE, %eax
-	movl	$LLL_SHARED, %esi
-	cmovne	%eax, %esi
-	callq	__lll_lock_wait
-#if cond_lock != 0
-	subq	$cond_lock, %rdi
-#endif
-
-1:	movl	broadcast_seq(%rdi), %edx
-	cmpl	4(%r8), %edx
-	jne	3f
-
-	/* We increment the wakeup_seq counter only if it is lower than
-	   total_seq.  If this is not the case the thread was woken and
-	   then canceled.  In this case we ignore the signal.  */
-	movq	total_seq(%rdi), %rax
-	cmpq	wakeup_seq(%rdi), %rax
-	jbe	6f
-	incq	wakeup_seq(%rdi)
-	incl	cond_futex(%rdi)
-6:	incq	woken_seq(%rdi)
-
-3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
-
-	/* Wake up a thread which wants to destroy the condvar object.  */
-	xorq	%r12, %r12
-	cmpq	$0xffffffffffffffff, total_seq(%rdi)
-	jne	4f
-	movl	cond_nwaiters(%rdi), %eax
-	andl	$~((1 << nwaiters_shift) - 1), %eax
-	jne	4f
-
-	addq	$cond_nwaiters, %rdi
-	cmpq	$-1, dep_mutex-cond_nwaiters(%rdi)
-	movl	$1, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	movl	$FUTEX_WAKE, %eax
-	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
-	cmove	%eax, %esi
-#else
-	movl	$0, %eax
-	movl	%fs:PRIVATE_FUTEX, %esi
-	cmove	%eax, %esi
-	orl	$FUTEX_WAKE, %esi
-#endif
-	movl	$SYS_futex, %eax
-	syscall
-	subq	$cond_nwaiters, %rdi
-	movl	$1, %r12d
-
-4:	LOCK
-#if cond_lock == 0
-	decl	(%rdi)
-#else
-	decl	cond_lock(%rdi)
-#endif
-	je	2f
-#if cond_lock != 0
-	addq	$cond_lock, %rdi
-#endif
-	cmpq	$-1, dep_mutex-cond_lock(%rdi)
-	movl	$LLL_PRIVATE, %eax
-	movl	$LLL_SHARED, %esi
-	cmovne	%eax, %esi
-	callq	__lll_unlock_wake
-
-	/* Wake up all waiters to make sure no signal gets lost.  */
-2:	testq	%r12, %r12
-	jnz	5f
-	addq	$cond_futex, %rdi
-	cmpq	$-1, dep_mutex-cond_futex(%rdi)
-	movl	$0x7fffffff, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	movl	$FUTEX_WAKE, %eax
-	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
-	cmove	%eax, %esi
-#else
-	movl	$0, %eax
-	movl	%fs:PRIVATE_FUTEX, %esi
-	cmove	%eax, %esi
-	orl	$FUTEX_WAKE, %esi
-#endif
-	movl	$SYS_futex, %eax
-	syscall
-
-5:	movq	16(%r8), %rdi
-	callq	__pthread_mutex_cond_lock
-
-	popq	%r12
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%r12)
-
-	retq
-	cfi_endproc
-	.size	__condvar_cleanup, .-__condvar_cleanup
-
-
 /* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
 			       const struct timespec *abstime)  */
 	.globl	__pthread_cond_timedwait
 	.type	__pthread_cond_timedwait, @function
 	.align	16
 __pthread_cond_timedwait:
+.LSTARTCODE:
 	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
 	pushq	%r12
 	cfi_adjust_cfa_offset(8)
 	cfi_rel_offset(%r12, 0)
@@ -172,7 +58,7 @@ __pthread_cond_timedwait:
 	pushq	%r14
 	cfi_adjust_cfa_offset(8)
 	cfi_rel_offset(%r14, 0)
-#define FRAME_SIZE 80
+#define FRAME_SIZE 48
 	subq	$FRAME_SIZE, %rsp
 	cfi_adjust_cfa_offset(FRAME_SIZE)
 
@@ -182,9 +68,7 @@ __pthread_cond_timedwait:
 
 	/* Stack frame:
 
-	   rsp + 80
-	            +--------------------------+
-	   rsp + 48 | cleanup buffer           |
+	   rsp + 48
 	            +--------------------------+
 	   rsp + 40 | old wake_seq value       |
 	            +--------------------------+
@@ -234,16 +118,6 @@ __pthread_cond_timedwait:
 	incl	cond_futex(%rdi)
 	addl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
 
-	/* Install cancellation handler.  */
-#ifdef PIC
-	leaq	__condvar_cleanup(%rip), %rsi
-#else
-	leaq	__condvar_cleanup, %rsi
-#endif
-	leaq	48(%rsp), %rdi
-	movq	%rsp, %rdx
-	callq	__pthread_cleanup_push
-
 	/* Get and store current wakeup_seq value.  */
 	movq	8(%rsp), %rdi
 	movq	wakeup_seq(%rdi), %r9
@@ -321,6 +195,7 @@ __pthread_cond_timedwait:
 #endif
 	jne	3f
 
+.LcleanupSTART:
 4:	callq	__pthread_enable_asynccancel
 	movl	%eax, (%rsp)
 
@@ -346,6 +221,7 @@ __pthread_cond_timedwait:
 
 	movl	(%rsp), %edi
 	callq	__pthread_disable_asynccancel
+.LcleanupEND:
 
 	/* Lock.  */
 	movq	8(%rsp), %rdi
@@ -422,11 +298,7 @@ __pthread_cond_timedwait:
 #endif
 	jne	10f
 
-	/* Remove cancellation handler.  */
-11:	movq	48+CLEANUP_PREV(%rsp), %rdx
-	movq	%rdx, %fs:CLEANUP
-
-	movq	16(%rsp), %rdi
+11:	movq	16(%rsp), %rdi
 	callq	__pthread_mutex_cond_lock
 
 	testq	%rax, %rax
@@ -548,7 +420,179 @@ __pthread_cond_timedwait:
 	js	6b
 	jmp	21b
 #endif
-	cfi_endproc
 	.size	__pthread_cond_timedwait, .-__pthread_cond_timedwait
 versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait,
 		  GLIBC_2_3_2)
+
+
+	.align	16
+	.type	__condvar_cleanup2, @function
+__condvar_cleanup2:
+	/* Stack frame:
+
+	   rsp + 72
+		    +--------------------------+
+	   rsp + 64 | %r12                     |
+		    +--------------------------+
+	   rsp + 56 | %r13                     |
+		    +--------------------------+
+	   rsp + 48 | %r14                     |
+		    +--------------------------+
+	   rsp + 24 | unused                   |
+	            +--------------------------+
+	   rsp + 16 | mutex pointer            |
+	            +--------------------------+
+	   rsp +  8 | condvar pointer          |
+	            +--------------------------+
+	   rsp +  4 | old broadcast_seq value  |
+	            +--------------------------+
+	   rsp +  0 | old cancellation mode    |
+	            +--------------------------+
+	*/
+
+	movq	%rax, 24(%rsp)
+
+	/* Get internal lock.  */
+	movq	8(%rsp), %rdi
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jz	1f
+
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+
+1:	movl	broadcast_seq(%rdi), %edx
+	cmpl	4(%rsp), %edx
+	jne	3f
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	movq	total_seq(%rdi), %rax
+	cmpq	wakeup_seq(%rdi), %rax
+	jbe	6f
+	incq	wakeup_seq(%rdi)
+	incl	cond_futex(%rdi)
+6:	incq	woken_seq(%rdi)
+
+3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	xorq	%r12, %r12
+	cmpq	$0xffffffffffffffff, total_seq(%rdi)
+	jne	4f
+	movl	cond_nwaiters(%rdi), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	4f
+
+	cmpq	$-1, dep_mutex(%rdi)
+	leaq	cond_nwaiters(%rdi), %rdi
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+	subq	$cond_nwaiters, %rdi
+	movl	$1, %r12d
+
+4:	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	je	2f
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+
+	/* Wake up all waiters to make sure no signal gets lost.  */
+2:	testq	%r12, %r12
+	jnz	5f
+	addq	$cond_futex, %rdi
+	cmpq	$-1, dep_mutex-cond_futex(%rdi)
+	movl	$0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+
+5:	movq	16(%rsp), %rdi
+	callq	__pthread_mutex_cond_lock
+
+	movq	24(%rsp), %rdi
+	movq	FRAME_SIZE(%rsp), %r14
+	movq	FRAME_SIZE+8(%rsp), %r13
+	movq	FRAME_SIZE+16(%rsp), %r12
+.LcallUR:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	__condvar_cleanup2, .-__condvar_cleanup2
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format
+	.byte	DW_EH_PE_omit			# @TType format
+	.byte	DW_EH_PE_uleb128		# call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 __condvar_cleanup2-.LSTARTCODE
+	.uleb128  0
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	8
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+	.quad	__gcc_personality_v0
+#endif

http://sources.redhat.com/git/gitweb.cgi?p=glibc.git;a=commitdiff;h=30b1954abb02aa7ba8136fe728820cd769052efb

commit 30b1954abb02aa7ba8136fe728820cd769052efb
Author: Ulrich Drepper <drepper@redhat.com>
Date:   Sat Jul 18 08:09:39 2009 -0700

    Optimize x86-64 pthread_cond_wait.
    
    Instead of actively registering an unwind buffer we now use the
    exception handling functionality of the gcc runtime.

diff --git a/nptl/ChangeLog b/nptl/ChangeLog
index 98d3a4d..33fc8a1 100644
--- a/nptl/ChangeLog
+++ b/nptl/ChangeLog
@@ -1,3 +1,9 @@
+2009-07-18  Ulrich Drepper  <drepper@redhat.com>
+
+	* sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+	(__pthread_cond_wait): Convert to using exception handler instead of
+	registered unwind buffer.
+
 2009-07-17  Ulrich Drepper  <drepper@redhat.com>
 
 	* sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S (sem_timedwait):
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
index ddcf106..2b53591 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_timedwait.S
@@ -31,6 +31,131 @@
 
 	.text
 
+
+	.align	16
+	.type	__condvar_cleanup, @function
+	.globl	__condvar_cleanup
+	.hidden	__condvar_cleanup
+__condvar_cleanup:
+	cfi_startproc
+	pushq	%r12
+	cfi_adjust_cfa_offset(8)
+	cfi_rel_offset(%r12, 0)
+
+	/* Get internal lock.  */
+	movq	%rdi, %r8
+	movq	8(%rdi), %rdi
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jz	1f
+
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+
+1:	movl	broadcast_seq(%rdi), %edx
+	cmpl	4(%r8), %edx
+	jne	3f
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	movq	total_seq(%rdi), %rax
+	cmpq	wakeup_seq(%rdi), %rax
+	jbe	6f
+	incq	wakeup_seq(%rdi)
+	incl	cond_futex(%rdi)
+6:	incq	woken_seq(%rdi)
+
+3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	xorq	%r12, %r12
+	cmpq	$0xffffffffffffffff, total_seq(%rdi)
+	jne	4f
+	movl	cond_nwaiters(%rdi), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	4f
+
+	addq	$cond_nwaiters, %rdi
+	cmpq	$-1, dep_mutex-cond_nwaiters(%rdi)
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+	subq	$cond_nwaiters, %rdi
+	movl	$1, %r12d
+
+4:	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	je	2f
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+
+	/* Wake up all waiters to make sure no signal gets lost.  */
+2:	testq	%r12, %r12
+	jnz	5f
+	addq	$cond_futex, %rdi
+	cmpq	$-1, dep_mutex-cond_futex(%rdi)
+	movl	$0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+
+5:	movq	16(%r8), %rdi
+	callq	__pthread_mutex_cond_lock
+
+	popq	%r12
+	cfi_adjust_cfa_offset(-8)
+	cfi_restore(%r12)
+
+	retq
+	cfi_endproc
+	.size	__condvar_cleanup, .-__condvar_cleanup
+
+
 /* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex,
 			       const struct timespec *abstime)  */
 	.globl	__pthread_cond_timedwait
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
index 146a414..c3c879c 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S
@@ -28,148 +28,32 @@
 
 	.text
 
-	.align	16
-	.type	__condvar_cleanup, @function
-	.globl	__condvar_cleanup
-	.hidden	__condvar_cleanup
-__condvar_cleanup:
-	cfi_startproc
-	pushq	%r12
-	cfi_adjust_cfa_offset(8)
-	cfi_rel_offset(%r12, 0)
-
-	/* Get internal lock.  */
-	movq	%rdi, %r8
-	movq	8(%rdi), %rdi
-	movl	$1, %esi
-	xorl	%eax, %eax
-	LOCK
-#if cond_lock == 0
-	cmpxchgl %esi, (%rdi)
-#else
-	cmpxchgl %esi, cond_lock(%rdi)
-#endif
-	jz	1f
-
-#if cond_lock != 0
-	addq	$cond_lock, %rdi
-#endif
-	cmpq	$-1, dep_mutex-cond_lock(%rdi)
-	movl	$LLL_PRIVATE, %eax
-	movl	$LLL_SHARED, %esi
-	cmovne	%eax, %esi
-	callq	__lll_lock_wait
-#if cond_lock != 0
-	subq	$cond_lock, %rdi
-#endif
-
-1:	movl	broadcast_seq(%rdi), %edx
-	cmpl	4(%r8), %edx
-	jne	3f
-
-	/* We increment the wakeup_seq counter only if it is lower than
-	   total_seq.  If this is not the case the thread was woken and
-	   then canceled.  In this case we ignore the signal.  */
-	movq	total_seq(%rdi), %rax
-	cmpq	wakeup_seq(%rdi), %rax
-	jbe	6f
-	incq	wakeup_seq(%rdi)
-	incl	cond_futex(%rdi)
-6:	incq	woken_seq(%rdi)
-
-3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
-
-	/* Wake up a thread which wants to destroy the condvar object.  */
-	xorq	%r12, %r12
-	cmpq	$0xffffffffffffffff, total_seq(%rdi)
-	jne	4f
-	movl	cond_nwaiters(%rdi), %eax
-	andl	$~((1 << nwaiters_shift) - 1), %eax
-	jne	4f
-
-	addq	$cond_nwaiters, %rdi
-	cmpq	$-1, dep_mutex-cond_nwaiters(%rdi)
-	movl	$1, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	movl	$FUTEX_WAKE, %eax
-	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
-	cmove	%eax, %esi
-#else
-	movl	$0, %eax
-	movl	%fs:PRIVATE_FUTEX, %esi
-	cmove	%eax, %esi
-	orl	$FUTEX_WAKE, %esi
-#endif
-	movl	$SYS_futex, %eax
-	syscall
-	subq	$cond_nwaiters, %rdi
-	movl	$1, %r12d
-
-4:	LOCK
-#if cond_lock == 0
-	decl	(%rdi)
-#else
-	decl	cond_lock(%rdi)
-#endif
-	je	2f
-#if cond_lock != 0
-	addq	$cond_lock, %rdi
-#endif
-	cmpq	$-1, dep_mutex-cond_lock(%rdi)
-	movl	$LLL_PRIVATE, %eax
-	movl	$LLL_SHARED, %esi
-	cmovne	%eax, %esi
-	callq	__lll_unlock_wake
-
-	/* Wake up all waiters to make sure no signal gets lost.  */
-2:	testq	%r12, %r12
-	jnz	5f
-	addq	$cond_futex, %rdi
-	cmpq	$-1, dep_mutex-cond_futex(%rdi)
-	movl	$0x7fffffff, %edx
-#ifdef __ASSUME_PRIVATE_FUTEX
-	movl	$FUTEX_WAKE, %eax
-	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
-	cmove	%eax, %esi
-#else
-	movl	$0, %eax
-	movl	%fs:PRIVATE_FUTEX, %esi
-	cmove	%eax, %esi
-	orl	$FUTEX_WAKE, %esi
-#endif
-	movl	$SYS_futex, %eax
-	syscall
-
-5:	movq	16(%r8), %rdi
-	callq	__pthread_mutex_cond_lock
-
-	popq	%r12
-	cfi_adjust_cfa_offset(-8)
-	cfi_restore(%r12)
-
-	retq
-	cfi_endproc
-	.size	__condvar_cleanup, .-__condvar_cleanup
-
-
 /* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex)  */
 	.globl	__pthread_cond_wait
 	.type	__pthread_cond_wait, @function
 	.align	16
 __pthread_cond_wait:
+.LSTARTCODE:
 	cfi_startproc
+#ifdef SHARED
+	cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect,
+			DW.ref.__gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART)
+#else
+	cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0)
+	cfi_lsda(DW_EH_PE_udata4, .LexceptSTART)
+#endif
+
 	pushq	%r12
 	cfi_adjust_cfa_offset(8)
 	cfi_rel_offset(%r12, 0)
-#define FRAME_SIZE 64
+#define FRAME_SIZE 32
 	subq	$FRAME_SIZE, %rsp
 	cfi_adjust_cfa_offset(FRAME_SIZE)
 
 	/* Stack frame:
 
-	   rsp + 64
-	            +--------------------------+
-	   rsp + 32 | cleanup buffer           |
+	   rsp + 32
 		    +--------------------------+
 	   rsp + 24 | old wake_seq value       |
 	            +--------------------------+
@@ -216,16 +100,6 @@ __pthread_cond_wait:
 	incl	cond_futex(%rdi)
 	addl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
 
-	/* Install cancellation handler.  */
-#ifdef PIC
-	leaq	__condvar_cleanup(%rip), %rsi
-#else
-	leaq	__condvar_cleanup, %rsi
-#endif
-	leaq	32(%rsp), %rdi
-	movq	%rsp, %rdx
-	callq	__pthread_cleanup_push
-
 	/* Get and store current wakeup_seq value.  */
 	movq	8(%rsp), %rdi
 	movq	wakeup_seq(%rdi), %r9
@@ -243,6 +117,7 @@ __pthread_cond_wait:
 #endif
 	jne	3f
 
+.LcleanupSTART:
 4:	callq	__pthread_enable_asynccancel
 	movl	%eax, (%rsp)
 
@@ -268,6 +143,7 @@ __pthread_cond_wait:
 
 	movl	(%rsp), %edi
 	callq	__pthread_disable_asynccancel
+.LcleanupEND:
 
 	/* Lock.  */
 	movq	8(%rsp), %rdi
@@ -333,11 +209,7 @@ __pthread_cond_wait:
 #endif
 	jne	10f
 
-	/* Remove cancellation handler.  */
-11:	movq	32+CLEANUP_PREV(%rsp), %rdx
-	movq	%rdx, %fs:CLEANUP
-
-	movq	16(%rsp), %rdi
+11:	movq	16(%rsp), %rdi
 	callq	__pthread_mutex_cond_lock
 14:	addq	$FRAME_SIZE, %rsp
 	cfi_adjust_cfa_offset(-FRAME_SIZE)
@@ -424,7 +296,175 @@ __pthread_cond_wait:
 
 13:	movq	%r10, %rax
 	jmp	14b
-	cfi_endproc
 	.size	__pthread_cond_wait, .-__pthread_cond_wait
 versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait,
 		  GLIBC_2_3_2)
+
+
+	.align	16
+	.type	__condvar_cleanup1, @function
+	.globl	__condvar_cleanup1
+	.hidden	__condvar_cleanup1
+__condvar_cleanup1:
+	/* Stack frame:
+
+	   rsp + 40
+		    +--------------------------+
+	   rsp + 32 | %r12                     |
+		    +--------------------------+
+	   rsp + 24 | unused                   |
+	            +--------------------------+
+	   rsp + 16 | mutex pointer            |
+	            +--------------------------+
+	   rsp +  8 | condvar pointer          |
+	            +--------------------------+
+	   rsp +  4 | old broadcast_seq value  |
+	            +--------------------------+
+	   rsp +  0 | old cancellation mode    |
+	            +--------------------------+
+	*/
+
+	movq	%rax, 24(%rsp)
+
+	/* Get internal lock.  */
+	movq	8(%rsp), %rdi
+	movl	$1, %esi
+	xorl	%eax, %eax
+	LOCK
+#if cond_lock == 0
+	cmpxchgl %esi, (%rdi)
+#else
+	cmpxchgl %esi, cond_lock(%rdi)
+#endif
+	jz	1f
+
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_lock_wait
+#if cond_lock != 0
+	subq	$cond_lock, %rdi
+#endif
+
+1:	movl	broadcast_seq(%rdi), %edx
+	cmpl	4(%rsp), %edx
+	jne	3f
+
+	/* We increment the wakeup_seq counter only if it is lower than
+	   total_seq.  If this is not the case the thread was woken and
+	   then canceled.  In this case we ignore the signal.  */
+	movq	total_seq(%rdi), %rax
+	cmpq	wakeup_seq(%rdi), %rax
+	jbe	6f
+	incq	wakeup_seq(%rdi)
+	incl	cond_futex(%rdi)
+6:	incq	woken_seq(%rdi)
+
+3:	subl	$(1 << nwaiters_shift), cond_nwaiters(%rdi)
+
+	/* Wake up a thread which wants to destroy the condvar object.  */
+	xorq	%r12, %r12
+	cmpq	$0xffffffffffffffff, total_seq(%rdi)
+	jne	4f
+	movl	cond_nwaiters(%rdi), %eax
+	andl	$~((1 << nwaiters_shift) - 1), %eax
+	jne	4f
+
+	cmpq	$-1, dep_mutex(%rdi)
+	leaq	cond_nwaiters(%rdi), %rdi
+	movl	$1, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+	subq	$cond_nwaiters, %rdi
+	movl	$1, %r12d
+
+4:	LOCK
+#if cond_lock == 0
+	decl	(%rdi)
+#else
+	decl	cond_lock(%rdi)
+#endif
+	je	2f
+#if cond_lock != 0
+	addq	$cond_lock, %rdi
+#endif
+	cmpq	$-1, dep_mutex-cond_lock(%rdi)
+	movl	$LLL_PRIVATE, %eax
+	movl	$LLL_SHARED, %esi
+	cmovne	%eax, %esi
+	callq	__lll_unlock_wake
+
+	/* Wake up all waiters to make sure no signal gets lost.  */
+2:	testq	%r12, %r12
+	jnz	5f
+	addq	$cond_futex, %rdi
+	cmpq	$-1, dep_mutex-cond_futex(%rdi)
+	movl	$0x7fffffff, %edx
+#ifdef __ASSUME_PRIVATE_FUTEX
+	movl	$FUTEX_WAKE, %eax
+	movl	$(FUTEX_WAKE|FUTEX_PRIVATE_FLAG), %esi
+	cmove	%eax, %esi
+#else
+	movl	$0, %eax
+	movl	%fs:PRIVATE_FUTEX, %esi
+	cmove	%eax, %esi
+	orl	$FUTEX_WAKE, %esi
+#endif
+	movl	$SYS_futex, %eax
+	syscall
+
+5:	movq	16(%rsp), %rdi
+	callq	__pthread_mutex_cond_lock
+
+	movq	24(%rsp), %rdi
+	movq	32(%rsp), %r12
+.LcallUR:
+	call	_Unwind_Resume@PLT
+	hlt
+.LENDCODE:
+	cfi_endproc
+	.size	__condvar_cleanup1, .-__condvar_cleanup1
+
+
+	.section .gcc_except_table,"a",@progbits
+.LexceptSTART:
+	.byte	DW_EH_PE_omit			# @LPStart format
+	.byte	DW_EH_PE_omit			# @TType format
+	.byte	DW_EH_PE_uleb128		# call-site format
+	.uleb128 .Lcstend-.Lcstbegin
+.Lcstbegin:
+	.uleb128 .LcleanupSTART-.LSTARTCODE
+	.uleb128 .LcleanupEND-.LcleanupSTART
+	.uleb128 __condvar_cleanup1-.LSTARTCODE
+	.uleb128  0
+	.uleb128 .LcallUR-.LSTARTCODE
+	.uleb128 .LENDCODE-.LcallUR
+	.uleb128 0
+	.uleb128  0
+.Lcstend:
+
+
+#ifdef SHARED
+	.hidden	DW.ref.__gcc_personality_v0
+	.weak	DW.ref.__gcc_personality_v0
+	.section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits
+	.align	8
+	.type	DW.ref.__gcc_personality_v0, @object
+	.size	DW.ref.__gcc_personality_v0, 8
+DW.ref.__gcc_personality_v0:
+	.quad	__gcc_personality_v0
+#endif

-----------------------------------------------------------------------

Summary of changes:
 nptl/ChangeLog                                     |   15 +
 .../sysv/linux/x86_64/pthread_cond_timedwait.S     |  615 +++++++++++++++-----
 .../unix/sysv/linux/x86_64/pthread_cond_wait.S     |  328 ++++++-----
 nptl/tst-cond11.c                                  |   16 +-
 4 files changed, 689 insertions(+), 285 deletions(-)


hooks/post-receive
-- 
GNU C Library master sources


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]