61
65
/* ********** basic thread control API ************
63
Many thread cases have an X amount of jobs, and only an Y amount of
64
threads are useful (typically amount of cpus)
66
This code can be used to start a maximum amount of 'thread slots', which
67
then can be filled in a loop with an idle timer.
69
A sample loop can look like this (pseudo c);
75
BLI_init_threads(&lb, do_something_func, maxthreads);
78
if(BLI_available_threads(&lb) && !(escape loop event)) {
79
// get new job (data pointer)
81
BLI_insert_thread(&lb, job);
83
else PIL_sleep_ms(50);
85
// find if a job is ready, this the do_something_func() should write in job somewhere
89
if(job was not removed) {
90
BLI_remove_thread(&lb, job);
95
// conditions to exit loop
96
if(if escape loop event) {
97
if(BLI_available_threadslots(&lb)==maxthreads)
102
BLI_end_threads(&lb);
67
* Many thread cases have an X amount of jobs, and only an Y amount of
68
* threads are useful (typically amount of cpus)
70
* This code can be used to start a maximum amount of 'thread slots', which
71
* then can be filled in a loop with an idle timer.
73
* A sample loop can look like this (pseudo c);
79
* BLI_init_threads(&lb, do_something_func, maxthreads);
82
* if (BLI_available_threads(&lb) && !(escape loop event)) {
83
* // get new job (data pointer)
84
* // tag job 'processed
85
* BLI_insert_thread(&lb, job);
87
* else PIL_sleep_ms(50);
89
* // find if a job is ready, this the do_something_func() should write in job somewhere
91
* for (go over all jobs)
93
* if (job was not removed) {
94
* BLI_remove_thread(&lb, job);
99
* // conditions to exit loop
100
* if (if escape loop event) {
101
* if (BLI_available_threadslots(&lb)==maxthreads)
106
* BLI_end_threads(&lb);
104
108
************************************************ */
105
109
static pthread_mutex_t _malloc_lock = PTHREAD_MUTEX_INITIALIZER;
106
110
static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
109
113
static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
110
114
static pthread_mutex_t _rcache_lock = PTHREAD_MUTEX_INITIALIZER;
111
115
static pthread_mutex_t _opengl_lock = PTHREAD_MUTEX_INITIALIZER;
116
static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
117
static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
112
118
static pthread_t mainid;
113
119
static int thread_levels= 0; /* threads can be invoked inside threads */
141
147
/* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
142
problem otherwise: scene render will kill of the mutex!
148
* problem otherwise: scene render will kill of the mutex!
145
151
void BLI_init_threads(ListBase *threadbase, void *(*do_thread)(void *), int tot)
149
if(threadbase != NULL && tot > 0) {
155
if (threadbase != NULL && tot > 0) {
150
156
threadbase->first= threadbase->last= NULL;
152
if(tot>RE_MAX_THREAD) tot= RE_MAX_THREAD;
153
else if(tot<1) tot= 1;
158
if (tot>RE_MAX_THREAD) tot= RE_MAX_THREAD;
159
else if (tot<1) tot= 1;
155
for(a=0; a<tot; a++) {
161
for (a=0; a<tot; a++) {
156
162
ThreadSlot *tslot= MEM_callocN(sizeof(ThreadSlot), "threadslot");
157
163
BLI_addtail(threadbase, tslot);
158
164
tslot->do_thread= do_thread;
163
if(thread_levels == 0) {
169
if (thread_levels == 0) {
164
170
MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
166
172
#if defined(__APPLE__) && (PARALLEL == 1) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2)
167
173
/* workaround for Apple gcc 4.2.1 omp vs background thread bug,
168
we copy gomp thread local storage pointer to setting it again
169
inside the thread that we start */
174
* we copy gomp thread local storage pointer to setting it again
175
* inside the thread that we start */
170
176
thread_tls_data = pthread_getspecific(gomp_tls_key);
207
213
#if defined(__APPLE__) && (PARALLEL == 1) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2)
208
214
/* workaround for Apple gcc 4.2.1 omp vs background thread bug,
209
set gomp thread local storage pointer which was copied beforehand */
215
* set gomp thread local storage pointer which was copied beforehand */
210
216
pthread_setspecific (gomp_tls_key, thread_tls_data);
213
219
return tslot->do_thread(tslot->callerdata);
216
int BLI_thread_is_main(void) {
222
int BLI_thread_is_main(void)
217
224
return pthread_equal(pthread_self(), mainid);
222
229
ThreadSlot *tslot;
224
for(tslot= threadbase->first; tslot; tslot= tslot->next) {
231
for (tslot= threadbase->first; tslot; tslot= tslot->next) {
227
234
tslot->callerdata= callerdata;
228
235
pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot);
237
244
ThreadSlot *tslot;
239
for(tslot= threadbase->first; tslot; tslot= tslot->next) {
240
if(tslot->callerdata==callerdata) {
246
for (tslot= threadbase->first; tslot; tslot= tslot->next) {
247
if (tslot->callerdata==callerdata) {
241
248
pthread_join(tslot->pthread, NULL);
242
249
tslot->callerdata= NULL;
250
257
ThreadSlot *tslot;
253
for(tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
260
for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
254
261
if (counter == index && tslot->avail == 0) {
255
262
pthread_join(tslot->pthread, NULL);
256
263
tslot->callerdata = NULL;
281
288
* this way we don't end up decrementing thread_levels on an empty threadbase
283
290
if (threadbase && threadbase->first != NULL) {
284
for(tslot= threadbase->first; tslot; tslot= tslot->next) {
285
if(tslot->avail==0) {
291
for (tslot= threadbase->first; tslot; tslot= tslot->next) {
292
if (tslot->avail==0) {
286
293
pthread_join(tslot->pthread, NULL);
313
320
mib[1] = HW_NCPU;
315
322
sysctl(mib, 2, &t, &len, NULL, 0);
316
# elif defined(__sgi)
317
t = sysconf(_SC_NPROC_ONLN);
319
324
t = (int)sysconf(_SC_NPROCESSORS_ONLN);
344
349
pthread_mutex_lock(&_rcache_lock);
345
350
else if (type==LOCK_OPENGL)
346
351
pthread_mutex_lock(&_opengl_lock);
352
else if (type==LOCK_NODES)
353
pthread_mutex_lock(&_nodes_lock);
354
else if (type==LOCK_MOVIECLIP)
355
pthread_mutex_lock(&_movieclip_lock);
349
358
void BLI_unlock_thread(int type)
354
363
pthread_mutex_unlock(&_preview_lock);
355
364
else if (type==LOCK_VIEWER)
356
365
pthread_mutex_unlock(&_viewer_lock);
357
else if(type==LOCK_CUSTOM1)
366
else if (type==LOCK_CUSTOM1)
358
367
pthread_mutex_unlock(&_custom1_lock);
359
else if(type==LOCK_RCACHE)
368
else if (type==LOCK_RCACHE)
360
369
pthread_mutex_unlock(&_rcache_lock);
361
else if(type==LOCK_OPENGL)
370
else if (type==LOCK_OPENGL)
362
371
pthread_mutex_unlock(&_opengl_lock);
372
else if (type==LOCK_NODES)
373
pthread_mutex_unlock(&_nodes_lock);
374
else if (type==LOCK_MOVIECLIP)
375
pthread_mutex_unlock(&_movieclip_lock);
365
378
/* Mutex Locks */
394
407
void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
396
if(mode == THREAD_LOCK_READ)
409
if (mode == THREAD_LOCK_READ)
397
410
pthread_rwlock_rdlock(mutex);
399
412
pthread_rwlock_wrlock(mutex);
478
491
WorkParam *p = MEM_callocN(sizeof(WorkParam), "workparam");
481
if (BLI_available_threads(&worker->threadbase) == 0)
494
if (BLI_available_threads(&worker->threadbase) == 0) {
483
495
index = worker->total;
484
while(index == worker->total)
496
while (index == worker->total) {
486
497
PIL_sleep_ms(worker->sleep_time);
488
for (index = 0; index < worker->total; index++)
490
if (worker->busy[index] == 0)
499
for (index = 0; index < worker->total; index++) {
500
if (worker->busy[index] == 0) {
492
501
BLI_remove_thread_index(&worker->threadbase, index);
500
508
index = BLI_available_thread_index(&worker->threadbase);
559
567
/* wait until there is work */
560
568
pthread_mutex_lock(&queue->mutex);
561
while(BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
569
while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
562
570
pthread_cond_wait(&queue->cond, &queue->mutex);
564
572
/* if we have something, pop it */
565
if(!BLI_gsqueue_is_empty(queue->queue))
573
if (!BLI_gsqueue_is_empty(queue->queue))
566
574
BLI_gsqueue_pop(queue->queue, &work);
568
576
pthread_mutex_unlock(&queue->mutex);
617
625
/* wait until there is work */
618
626
pthread_mutex_lock(&queue->mutex);
619
while(BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
620
if(pthread_cond_timedwait(&queue->cond, &queue->mutex, &timeout) == ETIMEDOUT)
627
while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
628
if (pthread_cond_timedwait(&queue->cond, &queue->mutex, &timeout) == ETIMEDOUT)
622
else if(PIL_check_seconds_timer() - t >= ms*0.001)
630
else if (PIL_check_seconds_timer() - t >= ms*0.001)
626
634
/* if we have something, pop it */
627
if(!BLI_gsqueue_is_empty(queue->queue))
635
if (!BLI_gsqueue_is_empty(queue->queue))
628
636
BLI_gsqueue_pop(queue->queue, &work);
630
638
pthread_mutex_unlock(&queue->mutex);
654
662
pthread_mutex_unlock(&queue->mutex);
665
void BLI_begin_threaded_malloc(void)
667
if (thread_levels == 0) {
668
MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
673
void BLI_end_threaded_malloc(void)
676
if (thread_levels==0)
677
MEM_set_lock_callback(NULL, NULL);