This is the mail archive of the pthreads-win32@sourceware.org mailing list for the pthreas-win32 project.


Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]
Other format: [Raw text]

Re: semaphores and handle leaks


Hi Morgan,

Could you try your sample code below with version 2.8.0 of the library. I believe the leak has been plugged. Sergey Fokin reported a race in sem_destroy() that, in your code below, may result in semaphores not being destroyed.

Where you init and destroy semaphores in thread1 ...

sem_init(E.synchLock, 0, 0);
 ...
sem_destroy(E.synchLock);

... if you were to check the return value from sem_destroy() I believe you would find that errno sometimes returns EBUSY. This bug has been fixed in 2.8.0.

For prior versions of the library, the following modification should provide a workaround (untested):-

while (sem_destroy(E.synchLock) != 0 && errno == EBUSY)
{
// Assuming can busy-wait on SMP systems - in pthreads-win32 this looks at the number of processors
// assigned to the process, which may be <= number in system. Not portable.
if (pthread_num_processors_np() < 2)
sched_yield();
}


Regards.
Ross

Morgan McLeod wrote:
Hello again.

Below is C++ code for a fairly simple program which exhibits the apparent handle leaks I described in my previous posting. I linked this with the standard STL rather than STLPort and it makes no difference. This is compiled to an EXE, not a DLL like my real application.

Again, please feel free to point out if I'm doing somethign wrong.

Thanks

-Morgan McLeod
Software Engineer
National Radio Astronomy Observatory
Charlottesville, Va


#include <stdio.h> #include <windows.h> #include <pthread.h> #include <semaphore.h> #include <list>

struct listElem {
   int num;
   sem_t *synchLock;
     listElem(int _num, sem_t *_synchLock)
     : num(_num),
       synchLock(_synchLock)
       {}

   ~listElem()
     {}
};

typedef std::list<listElem> semList_t;
semList_t list1;
semList_t list2;

// mutexes to protect the lists:
pthread_mutex_t mutex1;
pthread_mutex_t mutex2;

// flags to tell the threads to stop:
bool shutdownNow;
bool shutdownDone1;
bool shutdownDone2;

// thread 1 processes list1:
void *thread1(void *arg) {
while (true) {
if (shutdownNow) {
shutdownDone1 = true;
pthread_exit(NULL);
} pthread_mutex_lock(&mutex1);
if (list1.empty())
pthread_mutex_unlock(&mutex1);
else { // remove the front element from the list:
listElem E = list1.front();
list1.pop_front();
pthread_mutex_unlock(&mutex1);
// save the original semaphore:
sem_t *sem1 = E.synchLock;
// create and initialize a new semaphore.
// substitute it for the original:
sem_t sem2;
E.synchLock = &sem2;
sem_init(E.synchLock, 0, 0);
// put the item in list2 for processing by thread2:
pthread_mutex_lock(&mutex2);
list2.push_back(E);
pthread_mutex_unlock(&mutex2);
// Wait on, then destroy the substitute semaphore:
sem_wait(E.synchLock);
sem_destroy(E.synchLock);
// put back and post on the original semaphore:
E.synchLock = sem1;
sem_post(E.synchLock);


           printf("thread1: %d done\n", E.num);
       }
       Sleep(10);
   }
}

// thread2 processes list2:
void *thread2(void *arg) {
   while (true) {
       if (shutdownNow) {
           shutdownDone2 = true;
           pthread_exit(NULL);
       }             pthread_mutex_lock(&mutex2);
       if (list2.empty())
           pthread_mutex_unlock(&mutex2);
             else {
           listElem E = list2.front();
           list2.pop_front();
           pthread_mutex_unlock(&mutex2);
                     sem_post(E.synchLock);

           printf("thread2: %d done\n", E.num);
       }
       Sleep(10);
   }
}

const int COUNT = 1000;

int main(int, char*[]) {
   // Initialize flags:
   shutdownNow = shutdownDone1 = shutdownDone2 = false;
     // Pause to look at Task Manager.  Handles = 8:
   Sleep(5000);

   pthread_mutex_init(&mutex1, NULL);
   pthread_mutex_init(&mutex2, NULL);
     sem_t synchLocks[COUNT];
     for (int index = 0; index < COUNT; ++index) {
       sem_init(&synchLocks[index], 0, 0);
       listElem E(index, &synchLocks[index]);
       list1.push_back(E);
   }

// Handles = 2019. Starts to leak...

pthread_t T1;
pthread_create(&T1, NULL, thread1, NULL); pthread_t T2;
pthread_create(&T2, NULL, thread2, NULL); while (!list1.empty() || !list2.empty())
Sleep(10);
// Pause to look at Task Manager. Handles = 2261 (varies):
Sleep(5000);
shutdownNow = true;
while (!shutdownDone1 && !shutdownDone2)
Sleep(10);


   for (int index = 0; index < COUNT; ++index)
       sem_destroy(&synchLocks[index]);

   pthread_mutex_destroy(&mutex1);
   pthread_mutex_destroy(&mutex2);

   // Pause to look at Task Manager.  Handles = 264 (varies):
   Sleep(5000);
     printf("done\n");      return 0;
}









Index Nav: [Date Index] [Subject Index] [Author Index] [Thread Index]
Message Nav: [Date Prev] [Date Next] [Thread Prev] [Thread Next]