It's defined as 0, this diff should not introduce any functional changes. Index: lib/librthread//rthread.c =================================================================== RCS file: /cvs/src/lib/librthread/rthread.c,v retrieving revision 1.92 diff -u -p -r1.92 rthread.c --- lib/librthread//rthread.c 1 Sep 2016 10:41:02 -0000 1.92 +++ lib/librthread//rthread.c 3 Sep 2016 15:32:42 -0000 @@ -658,8 +658,7 @@ _rthread_dl_lock(int what) } else if (owner != self) { TAILQ_INSERT_TAIL(&lockers, self, waiting); while (owner != self) { - __thrsleep(self, 0 | _USING_TICKETS, NULL, - &lock.ticket, NULL); + __thrsleep(self, 0, NULL, &lock.ticket, NULL); _spinlock(&lock); } } Index: lib/librthread//rthread.h =================================================================== RCS file: /cvs/src/lib/librthread/rthread.h,v retrieving revision 1.58 diff -u -p -r1.58 rthread.h --- lib/librthread//rthread.h 7 May 2016 19:05:22 -0000 1.58 +++ lib/librthread//rthread.h 3 Sep 2016 15:32:42 -0000 @@ -37,7 +37,6 @@ #define RTHREAD_STACK_SIZE_DEF (256 * 1024) #endif -#define _USING_TICKETS 0 /* * tickets don't work yet? (or seem much slower, with lots of system time) * until then, keep the struct around to avoid excessive changes going Index: lib/librthread//rthread_file.c =================================================================== RCS file: /cvs/src/lib/librthread/rthread_file.c,v retrieving revision 1.8 diff -u -p -r1.8 rthread_file.c --- lib/librthread//rthread_file.c 7 May 2016 19:05:22 -0000 1.8 +++ lib/librthread//rthread_file.c 3 Sep 2016 15:32:42 -0000 @@ -205,8 +205,7 @@ _thread_flockfile(FILE * fp) */ TAILQ_INSERT_TAIL(&p->lockers,self,waiting); while (p->owner != self) { - __thrsleep(self, 0 | _USING_TICKETS, NULL, - &hash_lock.ticket, NULL); + __thrsleep(self, 0, NULL, &hash_lock.ticket, NULL); _spinlock(&hash_lock); } } Index: lib/librthread//rthread_rwlock.c =================================================================== RCS file: /cvs/src/lib/librthread/rthread_rwlock.c,v retrieving revision 1.6 diff -u -p -r1.6 rthread_rwlock.c --- lib/librthread//rthread_rwlock.c 2 Apr 2016 19:56:53 -0000 1.6 +++ lib/librthread//rthread_rwlock.c 3 Sep 2016 15:32:42 -0000 @@ -117,8 +117,8 @@ _rthread_rwlock_rdlock(pthread_rwlock_t error = EDEADLK; else { do { - if (__thrsleep(lock, CLOCK_REALTIME | _USING_TICKETS, - abstime, &lock->lock.ticket, NULL) == EWOULDBLOCK) + if (__thrsleep(lock, CLOCK_REALTIME, abstime, + &lock->lock.ticket, NULL) == EWOULDBLOCK) return (ETIMEDOUT); _spinlock(&lock->lock); } while (lock->owner != NULL || !TAILQ_EMPTY(&lock->writers)); @@ -180,8 +180,7 @@ _rthread_rwlock_wrlock(pthread_rwlock_t /* gotta block */ TAILQ_INSERT_TAIL(&lock->writers, thread, waiting); do { - do_wait = __thrsleep(thread, CLOCK_REALTIME | - _USING_TICKETS, abstime, + do_wait = __thrsleep(thread, CLOCK_REALTIME, abstime, &lock->lock.ticket, NULL) != EWOULDBLOCK; _spinlock(&lock->lock); } while (lock->owner != thread && do_wait); Index: lib/librthread//rthread_sem.c =================================================================== RCS file: /cvs/src/lib/librthread/rthread_sem.c,v retrieving revision 1.23 diff -u -p -r1.23 rthread_sem.c --- lib/librthread//rthread_sem.c 7 May 2016 19:05:22 -0000 1.23 +++ lib/librthread//rthread_sem.c 3 Sep 2016 15:32:42 -0000 @@ -71,9 +71,8 @@ _sem_wait(sem_t sem, int tryonly, const } else { sem->waitcount++; do { - r = __thrsleep(ident, CLOCK_REALTIME | - _USING_TICKETS, abstime, &sem->lock.ticket, - delayed_cancel); + r = __thrsleep(ident, CLOCK_REALTIME, abstime, + &sem->lock.ticket, delayed_cancel); _spinlock(&sem->lock); /* ignore interruptions other than cancelation */ if (r == EINTR && (delayed_cancel == NULL || Index: lib/librthread//rthread_sync.c =================================================================== RCS file: /cvs/src/lib/librthread/rthread_sync.c,v retrieving revision 1.42 diff -u -p -r1.42 rthread_sync.c --- lib/librthread//rthread_sync.c 7 May 2016 19:05:22 -0000 1.42 +++ lib/librthread//rthread_sync.c 3 Sep 2016 15:32:42 -0000 @@ -130,8 +130,7 @@ _rthread_mutex_lock(pthread_mutex_t *mut abort(); /* self-deadlock, possibly until timeout */ - while (__thrsleep(self, CLOCK_REALTIME | - _USING_TICKETS, abstime, + while (__thrsleep(self, CLOCK_REALTIME, abstime, &mutex->lock.ticket, NULL) != EWOULDBLOCK) _spinlock(&mutex->lock); return (ETIMEDOUT); @@ -148,8 +147,8 @@ _rthread_mutex_lock(pthread_mutex_t *mut /* add to the wait queue and block until at the head */ TAILQ_INSERT_TAIL(&mutex->lockers, self, waiting); while (mutex->owner != self) { - ret = __thrsleep(self, CLOCK_REALTIME | _USING_TICKETS, - abstime, &mutex->lock.ticket, NULL); + ret = __thrsleep(self, CLOCK_REALTIME, abstime, + &mutex->lock.ticket, NULL); _spinlock(&mutex->lock); assert(mutex->owner != NULL); if (ret == EWOULDBLOCK) { @@ -360,7 +359,7 @@ pthread_cond_timedwait(pthread_cond_t *c /* wait until we're the owner of the mutex again */ while (mutex->owner != self) { - error = __thrsleep(self, cond->clock | _USING_TICKETS, abstime, + error = __thrsleep(self, cond->clock, abstime, &mutex->lock.ticket, &self->delayed_cancel); /* @@ -510,8 +509,8 @@ pthread_cond_wait(pthread_cond_t *condp, /* wait until we're the owner of the mutex again */ while (mutex->owner != self) { - error = __thrsleep(self, 0 | _USING_TICKETS, NULL, - &mutex->lock.ticket, &self->delayed_cancel); + error = __thrsleep(self, 0, NULL, &mutex->lock.ticket, + &self->delayed_cancel); /* * If we took a normal signal (not from
-- Michal Mazurek