~siretart/ubuntu/utopic/blender/libav10

« back to all changes in this revision

Viewing changes to source/blender/blenlib/intern/threads.c

  • Committer: Package Import Robot
  • Author(s): Matteo F. Vescovi
  • Date: 2012-07-23 08:54:18 UTC
  • mfrom: (14.2.16 sid)
  • mto: (14.2.19 sid)
  • mto: This revision was merged to the branch mainline in revision 42.
  • Revision ID: package-import@ubuntu.com-20120723085418-9foz30v6afaf5ffs
Tags: 2.63a-2
* debian/: Cycles support added (Closes: #658075)
  For now, this top feature has been enabled only
  on [any-amd64 any-i386] architectures because
  of OpenImageIO failing on all others
* debian/: scripts installation path changed
  from /usr/lib to /usr/share:
  + debian/patches/: patchset re-worked for path changing
  + debian/control: "Breaks" field added on yafaray-exporter

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/**
 
1
/*
2
2
 *
3
 
 * $Id: threads.c 29622 2010-06-22 15:17:12Z blendix $
4
3
 *
5
4
 * ***** BEGIN GPL LICENSE BLOCK *****
6
5
 *
28
27
 * ***** END GPL LICENSE BLOCK *****
29
28
 */
30
29
 
 
30
/** \file blender/blenlib/intern/threads.c
 
31
 *  \ingroup bli
 
32
 */
 
33
 
 
34
 
31
35
#include <errno.h>
32
36
#include <string.h>
33
37
 
59
63
#endif
60
64
 
61
65
/* ********** basic thread control API ************ 
62
 
 
63
 
Many thread cases have an X amount of jobs, and only an Y amount of
64
 
threads are useful (typically amount of cpus)
65
 
 
66
 
This code can be used to start a maximum amount of 'thread slots', which
67
 
then can be filled in a loop with an idle timer. 
68
 
 
69
 
A sample loop can look like this (pseudo c);
70
 
 
71
 
        ListBase lb;
72
 
        int maxthreads= 2;
73
 
        int cont= 1;
74
 
 
75
 
        BLI_init_threads(&lb, do_something_func, maxthreads);
76
 
 
77
 
        while(cont) {
78
 
                if(BLI_available_threads(&lb) && !(escape loop event)) {
79
 
                        // get new job (data pointer)
80
 
                        // tag job 'processed 
81
 
                        BLI_insert_thread(&lb, job);
82
 
                }
83
 
                else PIL_sleep_ms(50);
84
 
                
85
 
                // find if a job is ready, this the do_something_func() should write in job somewhere
86
 
                cont= 0;
87
 
                for(go over all jobs)
88
 
                        if(job is ready) {
89
 
                                if(job was not removed) {
90
 
                                        BLI_remove_thread(&lb, job);
91
 
                                }
92
 
                        }
93
 
                        else cont= 1;
94
 
                }
95
 
                // conditions to exit loop 
96
 
                if(if escape loop event) {
97
 
                        if(BLI_available_threadslots(&lb)==maxthreads)
98
 
                                break;
99
 
                }
100
 
        }
101
 
 
102
 
        BLI_end_threads(&lb);
103
 
 
 
66
 * 
 
67
 * Many thread cases have an X amount of jobs, and only an Y amount of
 
68
 * threads are useful (typically amount of cpus)
 
69
 *
 
70
 * This code can be used to start a maximum amount of 'thread slots', which
 
71
 * then can be filled in a loop with an idle timer. 
 
72
 *
 
73
 * A sample loop can look like this (pseudo c);
 
74
 *
 
75
 *     ListBase lb;
 
76
 *     int maxthreads= 2;
 
77
 *     int cont= 1;
 
78
 * 
 
79
 *     BLI_init_threads(&lb, do_something_func, maxthreads);
 
80
 * 
 
81
 *     while (cont) {
 
82
 *         if (BLI_available_threads(&lb) && !(escape loop event)) {
 
83
 *             // get new job (data pointer)
 
84
 *             // tag job 'processed 
 
85
 *             BLI_insert_thread(&lb, job);
 
86
 *         }
 
87
 *         else PIL_sleep_ms(50);
 
88
 *         
 
89
 *         // find if a job is ready, this the do_something_func() should write in job somewhere
 
90
 *         cont= 0;
 
91
 *         for (go over all jobs)
 
92
 *             if (job is ready) {
 
93
 *                 if (job was not removed) {
 
94
 *                     BLI_remove_thread(&lb, job);
 
95
 *                 }
 
96
 *             }
 
97
 *             else cont= 1;
 
98
 *         }
 
99
 *         // conditions to exit loop 
 
100
 *         if (if escape loop event) {
 
101
 *             if (BLI_available_threadslots(&lb)==maxthreads)
 
102
 *                 break;
 
103
 *         }
 
104
 *     }
 
105
 * 
 
106
 *     BLI_end_threads(&lb);
 
107
 *
104
108
 ************************************************ */
105
109
static pthread_mutex_t _malloc_lock = PTHREAD_MUTEX_INITIALIZER;
106
110
static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
109
113
static pthread_mutex_t _custom1_lock = PTHREAD_MUTEX_INITIALIZER;
110
114
static pthread_mutex_t _rcache_lock = PTHREAD_MUTEX_INITIALIZER;
111
115
static pthread_mutex_t _opengl_lock = PTHREAD_MUTEX_INITIALIZER;
 
116
static pthread_mutex_t _nodes_lock = PTHREAD_MUTEX_INITIALIZER;
 
117
static pthread_mutex_t _movieclip_lock = PTHREAD_MUTEX_INITIALIZER;
112
118
static pthread_t mainid;
113
119
static int thread_levels= 0;    /* threads can be invoked inside threads */
114
120
 
139
145
}
140
146
 
141
147
/* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
142
 
   problem otherwise: scene render will kill of the mutex!
143
 
*/
 
148
 * problem otherwise: scene render will kill of the mutex!
 
149
 */
144
150
 
145
151
void BLI_init_threads(ListBase *threadbase, void *(*do_thread)(void *), int tot)
146
152
{
147
153
        int a;
148
154
 
149
 
        if(threadbase != NULL && tot > 0) {
 
155
        if (threadbase != NULL && tot > 0) {
150
156
                threadbase->first= threadbase->last= NULL;
151
157
        
152
 
                if(tot>RE_MAX_THREAD) tot= RE_MAX_THREAD;
153
 
                else if(tot<1) tot= 1;
 
158
                if (tot>RE_MAX_THREAD) tot= RE_MAX_THREAD;
 
159
                else if (tot<1) tot= 1;
154
160
        
155
 
                for(a=0; a<tot; a++) {
 
161
                for (a=0; a<tot; a++) {
156
162
                        ThreadSlot *tslot= MEM_callocN(sizeof(ThreadSlot), "threadslot");
157
163
                        BLI_addtail(threadbase, tslot);
158
164
                        tslot->do_thread= do_thread;
160
166
                }
161
167
        }
162
168
        
163
 
        if(thread_levels == 0) {
 
169
        if (thread_levels == 0) {
164
170
                MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
165
171
 
166
172
#if defined(__APPLE__) && (PARALLEL == 1) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2)
167
173
                /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
168
 
                   we copy gomp thread local storage pointer to setting it again
169
 
                   inside the thread that we start */
 
174
                 * we copy gomp thread local storage pointer to setting it again
 
175
                 * inside the thread that we start */
170
176
                thread_tls_data = pthread_getspecific(gomp_tls_key);
171
177
#endif
172
178
        }
180
186
        ThreadSlot *tslot;
181
187
        int counter=0;
182
188
        
183
 
        for(tslot= threadbase->first; tslot; tslot= tslot->next) {
184
 
                if(tslot->avail)
 
189
        for (tslot= threadbase->first; tslot; tslot= tslot->next) {
 
190
                if (tslot->avail)
185
191
                        counter++;
186
192
        }
187
193
        return counter;
193
199
        ThreadSlot *tslot;
194
200
        int counter=0;
195
201
        
196
 
        for(tslot= threadbase->first; tslot; tslot= tslot->next, counter++) {
197
 
                if(tslot->avail)
 
202
        for (tslot= threadbase->first; tslot; tslot= tslot->next, counter++) {
 
203
                if (tslot->avail)
198
204
                        return counter;
199
205
        }
200
206
        return 0;
206
212
 
207
213
#if defined(__APPLE__) && (PARALLEL == 1) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2)
208
214
        /* workaround for Apple gcc 4.2.1 omp vs background thread bug,
209
 
           set gomp thread local storage pointer which was copied beforehand */
 
215
         * set gomp thread local storage pointer which was copied beforehand */
210
216
        pthread_setspecific (gomp_tls_key, thread_tls_data);
211
217
#endif
212
218
 
213
219
        return tslot->do_thread(tslot->callerdata);
214
220
}
215
221
 
216
 
int BLI_thread_is_main(void) {
 
222
int BLI_thread_is_main(void)
 
223
{
217
224
        return pthread_equal(pthread_self(), mainid);
218
225
}
219
226
 
221
228
{
222
229
        ThreadSlot *tslot;
223
230
        
224
 
        for(tslot= threadbase->first; tslot; tslot= tslot->next) {
225
 
                if(tslot->avail) {
 
231
        for (tslot= threadbase->first; tslot; tslot= tslot->next) {
 
232
                if (tslot->avail) {
226
233
                        tslot->avail= 0;
227
234
                        tslot->callerdata= callerdata;
228
235
                        pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot);
236
243
{
237
244
        ThreadSlot *tslot;
238
245
        
239
 
        for(tslot= threadbase->first; tslot; tslot= tslot->next) {
240
 
                if(tslot->callerdata==callerdata) {
 
246
        for (tslot= threadbase->first; tslot; tslot= tslot->next) {
 
247
                if (tslot->callerdata==callerdata) {
241
248
                        pthread_join(tslot->pthread, NULL);
242
249
                        tslot->callerdata= NULL;
243
250
                        tslot->avail= 1;
250
257
        ThreadSlot *tslot;
251
258
        int counter=0;
252
259
        
253
 
        for(tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
 
260
        for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) {
254
261
                if (counter == index && tslot->avail == 0) {
255
262
                        pthread_join(tslot->pthread, NULL);
256
263
                        tslot->callerdata = NULL;
264
271
{
265
272
        ThreadSlot *tslot;
266
273
        
267
 
        for(tslot = threadbase->first; tslot; tslot = tslot->next) {
 
274
        for (tslot = threadbase->first; tslot; tslot = tslot->next) {
268
275
                if (tslot->avail == 0) {
269
276
                        pthread_join(tslot->pthread, NULL);
270
277
                        tslot->callerdata = NULL;
281
288
         * this way we don't end up decrementing thread_levels on an empty threadbase 
282
289
         * */
283
290
        if (threadbase && threadbase->first != NULL) {
284
 
                for(tslot= threadbase->first; tslot; tslot= tslot->next) {
285
 
                        if(tslot->avail==0) {
 
291
                for (tslot= threadbase->first; tslot; tslot= tslot->next) {
 
292
                        if (tslot->avail==0) {
286
293
                                pthread_join(tslot->pthread, NULL);
287
294
                        }
288
295
                }
290
297
        }
291
298
 
292
299
        thread_levels--;
293
 
        if(thread_levels==0)
 
300
        if (thread_levels==0)
294
301
                MEM_set_lock_callback(NULL, NULL);
295
302
}
296
303
 
313
320
        mib[1] = HW_NCPU;
314
321
        len = sizeof(t);
315
322
        sysctl(mib, 2, &t, &len, NULL, 0);
316
 
#       elif defined(__sgi)
317
 
        t = sysconf(_SC_NPROC_ONLN);
318
323
#       else
319
324
        t = (int)sysconf(_SC_NPROCESSORS_ONLN);
320
325
#       endif
344
349
                pthread_mutex_lock(&_rcache_lock);
345
350
        else if (type==LOCK_OPENGL)
346
351
                pthread_mutex_lock(&_opengl_lock);
 
352
        else if (type==LOCK_NODES)
 
353
                pthread_mutex_lock(&_nodes_lock);
 
354
        else if (type==LOCK_MOVIECLIP)
 
355
                pthread_mutex_lock(&_movieclip_lock);
347
356
}
348
357
 
349
358
void BLI_unlock_thread(int type)
354
363
                pthread_mutex_unlock(&_preview_lock);
355
364
        else if (type==LOCK_VIEWER)
356
365
                pthread_mutex_unlock(&_viewer_lock);
357
 
        else if(type==LOCK_CUSTOM1)
 
366
        else if (type==LOCK_CUSTOM1)
358
367
                pthread_mutex_unlock(&_custom1_lock);
359
 
        else if(type==LOCK_RCACHE)
 
368
        else if (type==LOCK_RCACHE)
360
369
                pthread_mutex_unlock(&_rcache_lock);
361
 
        else if(type==LOCK_OPENGL)
 
370
        else if (type==LOCK_OPENGL)
362
371
                pthread_mutex_unlock(&_opengl_lock);
 
372
        else if (type==LOCK_NODES)
 
373
                pthread_mutex_unlock(&_nodes_lock);
 
374
        else if (type==LOCK_MOVIECLIP)
 
375
                pthread_mutex_unlock(&_movieclip_lock);
363
376
}
364
377
 
365
378
/* Mutex Locks */
393
406
 
394
407
void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
395
408
{
396
 
        if(mode == THREAD_LOCK_READ)
 
409
        if (mode == THREAD_LOCK_READ)
397
410
                pthread_rwlock_rdlock(mutex);
398
411
        else
399
412
                pthread_rwlock_wrlock(mutex);
442
455
{
443
456
        ThreadedWorker *worker;
444
457
        
 
458
        (void)sleep_time; /* unused */
 
459
        
445
460
        worker = MEM_callocN(sizeof(ThreadedWorker), "threadedworker");
446
461
        
447
 
        if (tot > RE_MAX_THREAD)
448
 
        {
 
462
        if (tot > RE_MAX_THREAD) {
449
463
                tot = RE_MAX_THREAD;
450
464
        }
451
 
        else if (tot < 1)
452
 
        {
 
465
        else if (tot < 1) {
453
466
                tot= 1;
454
467
        }
455
468
        
478
491
        WorkParam *p = MEM_callocN(sizeof(WorkParam), "workparam");
479
492
        int index;
480
493
        
481
 
        if (BLI_available_threads(&worker->threadbase) == 0)
482
 
        {
 
494
        if (BLI_available_threads(&worker->threadbase) == 0) {
483
495
                index = worker->total;
484
 
                while(index == worker->total)
485
 
                {
 
496
                while (index == worker->total) {
486
497
                        PIL_sleep_ms(worker->sleep_time);
487
498
                        
488
 
                        for (index = 0; index < worker->total; index++)
489
 
                        {
490
 
                                if (worker->busy[index] == 0)
491
 
                                {
 
499
                        for (index = 0; index < worker->total; index++) {
 
500
                                if (worker->busy[index] == 0) {
492
501
                                        BLI_remove_thread_index(&worker->threadbase, index);
493
502
                                        break;
494
503
                                }
495
504
                        }
496
505
                }
497
506
        }
498
 
        else
499
 
        {
 
507
        else {
500
508
                index = BLI_available_thread_index(&worker->threadbase);
501
509
        }
502
510
        
518
526
        int nowait;
519
527
};
520
528
 
521
 
ThreadQueue *BLI_thread_queue_init()
 
529
ThreadQueue *BLI_thread_queue_init(void)
522
530
{
523
531
        ThreadQueue *queue;
524
532
 
558
566
 
559
567
        /* wait until there is work */
560
568
        pthread_mutex_lock(&queue->mutex);
561
 
        while(BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
 
569
        while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait)
562
570
                pthread_cond_wait(&queue->cond, &queue->mutex);
563
571
 
564
572
        /* if we have something, pop it */
565
 
        if(!BLI_gsqueue_is_empty(queue->queue))
 
573
        if (!BLI_gsqueue_is_empty(queue->queue))
566
574
                BLI_gsqueue_pop(queue->queue, &work);
567
575
 
568
576
        pthread_mutex_unlock(&queue->mutex);
616
624
 
617
625
        /* wait until there is work */
618
626
        pthread_mutex_lock(&queue->mutex);
619
 
        while(BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
620
 
                if(pthread_cond_timedwait(&queue->cond, &queue->mutex, &timeout) == ETIMEDOUT)
 
627
        while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) {
 
628
                if (pthread_cond_timedwait(&queue->cond, &queue->mutex, &timeout) == ETIMEDOUT)
621
629
                        break;
622
 
                else if(PIL_check_seconds_timer() - t >= ms*0.001)
 
630
                else if (PIL_check_seconds_timer() - t >= ms*0.001)
623
631
                        break;
624
632
        }
625
633
 
626
634
        /* if we have something, pop it */
627
 
        if(!BLI_gsqueue_is_empty(queue->queue))
 
635
        if (!BLI_gsqueue_is_empty(queue->queue))
628
636
                BLI_gsqueue_pop(queue->queue, &work);
629
637
 
630
638
        pthread_mutex_unlock(&queue->mutex);
654
662
        pthread_mutex_unlock(&queue->mutex);
655
663
}
656
664
 
 
665
void BLI_begin_threaded_malloc(void)
 
666
{
 
667
        if (thread_levels == 0) {
 
668
                MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
 
669
        }
 
670
        thread_levels++;
 
671
}
 
672
 
 
673
void BLI_end_threaded_malloc(void)
 
674
{
 
675
        thread_levels--;
 
676
        if (thread_levels==0)
 
677
                MEM_set_lock_callback(NULL, NULL);
 
678
}