1
/* $Id: os_core_linux_kernel.c 3553 2011-05-05 06:14:19Z nanang $ */
3
* Copyright (C) 2008-2011 Teluu Inc. (http://www.teluu.com)
4
* Copyright (C) 2003-2008 Benny Prijono <benny@prijono.org>
6
* This program is free software; you can redistribute it and/or modify
7
* it under the terms of the GNU General Public License as published by
8
* the Free Software Foundation; either version 2 of the License, or
9
* (at your option) any later version.
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
16
* You should have received a copy of the GNU General Public License
17
* along with this program; if not, write to the Free Software
18
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
#include <pj/assert.h>
24
#include <pj/except.h>
26
#include <pj/string.h>
27
#include <pj/compat/high_precision.h>
28
#include <pj/compat/sprintf.h>
30
#include <linux/config.h>
31
#include <linux/version.h>
32
#if defined(MODVERSIONS)
33
#include <linux/modversions.h>
35
#include <linux/kernel.h>
36
#include <linux/sched.h>
37
//#include <linux/tqueue.h>
38
#include <linux/wait.h>
39
#include <linux/signal.h>
41
#include <asm/atomic.h>
42
#include <asm/unistd.h>
43
#include <asm/semaphore.h>
45
#define THIS_FILE "oslinuxkern"
50
char obj_name[PJ_MAX_OBJ_NAME];
52
/** Linux task structure for thread. */
53
struct task_struct *thread;
55
/** Flags (specified in pj_thread_create) */
58
/** Task queue needed to launch thread. */
59
//struct tq_struct tq;
61
/** Semaphore needed to control thread startup. */
62
struct semaphore startstop_sem;
64
/** Semaphore to suspend thread during startup. */
65
struct semaphore suspend_sem;
67
/** Queue thread is waiting on. Gets initialized by
68
thread_initialize, can be used by thread itself.
70
wait_queue_head_t queue;
72
/** Flag to tell thread whether to die or not.
73
When the thread receives a signal, it must check
74
the value of terminate and call thread_deinitialize and terminate
79
/** Thread's entry. */
101
struct semaphore sem;
105
* Static global variables.
107
#define MAX_TLS_ID 32
108
static void *tls_values[MAX_TLS_ID];
110
static long thread_tls_id;
111
static spinlock_t critical_section = SPIN_LOCK_UNLOCKED;
112
static unsigned long spinlock_flags;
113
static pj_thread_t main_thread;
115
/* private functions */
116
//#define TRACE_(expr) PJ_LOG(3,expr)
120
/* This must be called in the context of the new thread. */
121
static void thread_initialize( pj_thread_t *thread )
123
TRACE_((THIS_FILE, "---new thread initializing..."));
126
pj_thread_local_set(thread_tls_id, thread);
128
/* fill in thread structure */
129
thread->thread = current;
130
pj_assert(thread->thread != NULL);
132
/* set signal mask to what we want to respond */
133
siginitsetinv(¤t->blocked,
134
sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGTERM));
136
/* initialise wait queue */
137
init_waitqueue_head(&thread->queue);
139
/* initialise termination flag */
140
thread->terminate = 0;
142
/* set name of this process (making sure obj_name is null
145
thread->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
146
sprintf(current->comm, thread->obj_name);
148
/* tell the creator that we are ready and let him continue */
149
up(&thread->startstop_sem);
152
/* cleanup of thread. Called by the exiting thread. */
153
static void thread_deinitialize(pj_thread_t *thread)
155
/* we are terminating */
157
/* lock the kernel, the exit will unlock it */
158
thread->thread = NULL;
161
/* notify the stop_kthread() routine that we are terminating. */
162
up(&thread->startstop_sem);
164
/* the kernel_thread that called clone() does a do_exit here. */
166
/* there is no race here between execution of the "killer" and
167
real termination of the thread (race window between up and do_exit),
168
since both the thread and the "killer" function are running with
169
the kernel lock held.
170
The kernel lock will be freed after the thread exited, so the code
171
is really not executed anymore as soon as the unload functions gets
172
the kernel lock back.
173
The init process may not have made the cleanup of the process here,
174
but the cleanup can be done safely with the module unloaded.
179
static int thread_proc(void *arg)
181
pj_thread_t *thread = arg;
183
TRACE_((THIS_FILE, "---new thread starting!"));
185
/* Initialize thread. */
186
thread_initialize( thread );
188
/* Wait if created suspended. */
189
if (thread->flags & PJ_THREAD_SUSPENDED) {
190
TRACE_((THIS_FILE, "---new thread suspended..."));
191
down(&thread->suspend_sem);
194
TRACE_((THIS_FILE, "---new thread running..."));
196
pj_assert(thread->func != NULL);
198
/* Call thread's entry. */
199
(*thread->func)(thread->arg);
201
TRACE_((THIS_FILE, "---thread exiting..."));
203
/* Cleanup thread. */
204
thread_deinitialize(thread);
209
/* The very task entry. */
210
static void kthread_launcher(void *arg)
212
TRACE_((THIS_FILE, "...launching thread!..."));
213
kernel_thread(&thread_proc, arg, 0);
216
PJ_DEF(pj_status_t) pj_init(void)
220
PJ_LOG(5, ("pj_init", "Initializing PJ Library.."));
222
rc = pj_thread_init();
223
if (rc != PJ_SUCCESS)
226
/* Initialize exception ID for the pool.
227
* Must do so after critical section is configured.
229
rc = pj_exception_id_alloc("PJLIB/No memory", &PJ_NO_MEMORY_EXCEPTION);
230
if (rc != PJ_SUCCESS)
236
PJ_DEF(pj_uint32_t) pj_getpid(void)
241
PJ_DEF(pj_status_t) pj_thread_register ( const char *cstr_thread_name,
243
pj_thread_t **ptr_thread)
246
pj_thread_t *thread = (pj_thread_t *)desc;
247
pj_str_t thread_name = pj_str((char*)cstr_thread_name);
249
/* Size sanity check. */
250
if (sizeof(pj_thread_desc) < sizeof(pj_thread_t)) {
251
pj_assert(!"Not enough pj_thread_desc size!");
255
/* If a thread descriptor has been registered before, just return it. */
256
if (pj_thread_local_get (thread_tls_id) != 0) {
257
// 2006-02-26 bennylp:
258
// This wouldn't work in all cases!.
259
// If thread is created by external module (e.g. sound thread),
260
// thread may be reused while the pool used for the thread descriptor
261
// has been deleted by application.
262
//*thread_ptr = (pj_thread_t*)pj_thread_local_get (thread_tls_id);
266
/* Initialize and set the thread entry. */
267
pj_bzero(desc, sizeof(struct pj_thread_t));
269
if(cstr_thread_name && pj_strlen(&thread_name) < sizeof(thread->obj_name)-1)
270
pj_sprintf(thread->obj_name, cstr_thread_name, thread->thread);
272
pj_snprintf(thread->obj_name, sizeof(thread->obj_name),
273
"thr%p", (void*)thread->thread);
276
thread_initialize(thread);
279
down(&thread->startstop_sem);
281
#if defined(PJ_OS_HAS_CHECK_STACK) && PJ_OS_HAS_CHECK_STACK!=0
282
thread->stk_start = &stack_ptr;
283
thread->stk_size = 0xFFFFFFFFUL;
284
thread->stk_max_usage = 0;
289
*ptr_thread = thread;
294
pj_status_t pj_thread_init(void)
299
rc = pj_thread_local_alloc(&thread_tls_id);
300
if (rc != PJ_SUCCESS)
303
return pj_thread_register("pjlib-main", (long*)&main_thread, &dummy);
306
PJ_DEF(pj_status_t) pj_thread_create( pj_pool_t *pool, const char *thread_name,
307
pj_thread_proc *proc, void *arg,
308
pj_size_t stack_size, unsigned flags,
309
pj_thread_t **ptr_thread)
313
TRACE_((THIS_FILE, "pj_thread_create()"));
315
PJ_ASSERT_RETURN(pool && proc && ptr_thread, PJ_EINVAL);
317
thread = pj_pool_zalloc(pool, sizeof(pj_thread_t));
321
PJ_UNUSED_ARG(stack_size);
325
thread_name = "thr%p";
327
if (strchr(thread_name, '%')) {
328
pj_snprintf(thread->obj_name, PJ_MAX_OBJ_NAME, thread_name, thread);
330
strncpy(thread->obj_name, thread_name, PJ_MAX_OBJ_NAME);
331
thread->obj_name[PJ_MAX_OBJ_NAME-1] = '\0';
334
/* Init thread's semaphore. */
335
TRACE_((THIS_FILE, "...init semaphores..."));
336
init_MUTEX_LOCKED(&thread->startstop_sem);
337
init_MUTEX_LOCKED(&thread->suspend_sem);
339
thread->flags = flags;
341
if ((flags & PJ_THREAD_SUSPENDED) == 0) {
342
up(&thread->suspend_sem);
345
/* Store the functions and argument. */
349
/* Save return value. */
350
*ptr_thread = thread;
352
/* Create the new thread by running a task through keventd. */
355
/* Initialize the task queue struct. */
357
INIT_LIST_HEAD(&thread->tq.list);
358
thread->tq.routine = kthread_launcher;
359
thread->tq.data = thread;
361
/* and schedule it for execution. */
362
schedule_task(&thread->tq);
364
kthread_launcher(thread);
366
/* Wait until thread has reached the setup_thread routine. */
367
TRACE_((THIS_FILE, "...wait for the new thread..."));
368
down(&thread->startstop_sem);
370
TRACE_((THIS_FILE, "...main thread resumed..."));
374
PJ_DEF(const char*) pj_thread_get_name(pj_thread_t *thread)
376
return thread->obj_name;
379
PJ_DEF(pj_status_t) pj_thread_resume(pj_thread_t *thread)
381
up(&thread->suspend_sem);
385
PJ_DEF(pj_thread_t*) pj_thread_this(void)
387
return (pj_thread_t*)pj_thread_local_get(thread_tls_id);
390
PJ_DEF(pj_status_t) pj_thread_join(pj_thread_t *p)
392
TRACE_((THIS_FILE, "pj_thread_join()"));
393
down(&p->startstop_sem);
394
TRACE_((THIS_FILE, " joined!"));
398
PJ_DEF(pj_status_t) pj_thread_destroy(pj_thread_t *thread)
400
PJ_ASSERT_RETURN(thread != NULL, PJ_EINVALIDOP);
404
PJ_DEF(pj_status_t) pj_thread_sleep(unsigned msec)
407
pj_thread_t *thread = pj_thread_this();
409
PJ_ASSERT_RETURN(thread != NULL, PJ_EBUG);
411
/* Use high precision calculation to make sure we don't
414
* ticks = HZ * msec / 1000
417
pj_highprec_mul(ticks, msec);
418
pj_highprec_div(ticks, 1000);
420
TRACE_((THIS_FILE, "this thread will sleep for %u ticks", ticks));
421
interruptible_sleep_on_timeout( &thread->queue, ticks);
426
///////////////////////////////////////////////////////////////////////////////
427
PJ_DEF(pj_status_t) pj_atomic_create( pj_pool_t *pool,
428
pj_atomic_value_t value,
429
pj_atomic_t **ptr_var)
431
pj_atomic_t *t = pj_pool_calloc(pool, 1, sizeof(pj_atomic_t));
432
if (!t) return PJ_ENOMEM;
434
atomic_set(&t->atom, value);
440
PJ_DEF(pj_status_t) pj_atomic_destroy( pj_atomic_t *var )
445
PJ_DEF(void) pj_atomic_set(pj_atomic_t *var, pj_atomic_value_t value)
447
atomic_set(&var->atom, value);
450
PJ_DEF(pj_atomic_value_t) pj_atomic_get(pj_atomic_t *var)
452
return atomic_read(&var->atom);
455
PJ_DEF(void) pj_atomic_inc(pj_atomic_t *var)
457
atomic_inc(&var->atom);
460
PJ_DEF(void) pj_atomic_dec(pj_atomic_t *var)
462
atomic_dec(&var->atom);
465
PJ_DEF(void) pj_atomic_add( pj_atomic_t *var, pj_atomic_value_t value )
467
atomic_add(value, &var->atom);
471
///////////////////////////////////////////////////////////////////////////////
472
PJ_DEF(pj_status_t) pj_thread_local_alloc(long *index)
474
if (tls_id >= MAX_TLS_ID)
482
PJ_DEF(void) pj_thread_local_free(long index)
484
pj_assert(index >= 0 && index < MAX_TLS_ID);
487
PJ_DEF(pj_status_t) pj_thread_local_set(long index, void *value)
489
pj_assert(index >= 0 && index < MAX_TLS_ID);
490
tls_values[index] = value;
494
PJ_DEF(void*) pj_thread_local_get(long index)
496
pj_assert(index >= 0 && index < MAX_TLS_ID);
497
return tls_values[index];
501
///////////////////////////////////////////////////////////////////////////////
502
PJ_DEF(void) pj_enter_critical_section(void)
504
spin_lock_irqsave(&critical_section, spinlock_flags);
507
PJ_DEF(void) pj_leave_critical_section(void)
509
spin_unlock_irqrestore(&critical_section, spinlock_flags);
513
///////////////////////////////////////////////////////////////////////////////
514
PJ_DEF(pj_status_t) pj_mutex_create( pj_pool_t *pool,
517
pj_mutex_t **ptr_mutex)
523
mutex = pj_pool_alloc(pool, sizeof(pj_mutex_t));
527
init_MUTEX(&mutex->sem);
529
mutex->recursive = (type == PJ_MUTEX_RECURSE);
531
mutex->own_count = 0;
538
PJ_DEF(pj_status_t) pj_mutex_create_simple( pj_pool_t *pool, const char *name,
541
return pj_mutex_create(pool, name, PJ_MUTEX_SIMPLE, mutex);
544
PJ_DEF(pj_status_t) pj_mutex_create_recursive( pj_pool_t *pool,
548
return pj_mutex_create( pool, name, PJ_MUTEX_RECURSE, mutex);
551
PJ_DEF(pj_status_t) pj_mutex_lock(pj_mutex_t *mutex)
553
PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
555
if (mutex->recursive) {
556
pj_thread_t *this_thread = pj_thread_this();
557
if (mutex->owner == this_thread) {
561
pj_assert(mutex->own_count == 0);
562
mutex->owner = this_thread;
563
mutex->own_count = 1;
571
PJ_DEF(pj_status_t) pj_mutex_trylock(pj_mutex_t *mutex)
575
PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
577
if (mutex->recursive) {
578
pj_thread_t *this_thread = pj_thread_this();
579
if (mutex->owner == this_thread) {
582
rc = down_interruptible(&mutex->sem);
584
return PJ_RETURN_OS_ERROR(-rc);
585
pj_assert(mutex->own_count == 0);
586
mutex->owner = this_thread;
587
mutex->own_count = 1;
590
int rc = down_trylock(&mutex->sem);
592
return PJ_RETURN_OS_ERROR(-rc);
597
PJ_DEF(pj_status_t) pj_mutex_unlock(pj_mutex_t *mutex)
599
PJ_ASSERT_RETURN(mutex, PJ_EINVAL);
601
if (mutex->recursive) {
602
pj_thread_t *this_thread = pj_thread_this();
603
if (mutex->owner == this_thread) {
604
pj_assert(mutex->own_count > 0);
606
if (mutex->own_count == 0) {
611
pj_assert(!"Not owner!");
612
return PJ_EINVALIDOP;
620
PJ_DEF(pj_status_t) pj_mutex_destroy(pj_mutex_t *mutex)
622
PJ_ASSERT_RETURN(mutex != NULL, PJ_EINVAL);
627
#if defined(PJ_DEBUG) && PJ_DEBUG != 0
628
PJ_DEF(pj_bool_t) pj_mutex_is_locked(pj_mutex_t *mutex)
630
if (mutex->recursive)
631
return mutex->owner == pj_thread_this();
635
#endif /* PJ_DEBUG */
638
#if defined(PJ_HAS_SEMAPHORE) && PJ_HAS_SEMAPHORE != 0
640
PJ_DEF(pj_status_t) pj_sem_create( pj_pool_t *pool,
650
PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
652
sem = pj_pool_alloc(pool, sizeof(pj_sem_t));
653
sema_init(&sem->sem, initial);
657
PJ_DEF(pj_status_t) pj_sem_wait(pj_sem_t *sem)
659
PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
665
PJ_DEF(pj_status_t) pj_sem_trywait(pj_sem_t *sem)
669
PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
671
rc = down_trylock(&sem->sem);
673
return PJ_RETURN_OS_ERROR(-rc);
679
PJ_DEF(pj_status_t) pj_sem_post(pj_sem_t *sem)
681
PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
687
PJ_DEF(pj_status_t) pj_sem_destroy(pj_sem_t *sem)
689
PJ_ASSERT_RETURN(pool && sem, PJ_EINVAL);
694
#endif /* PJ_HAS_SEMAPHORE */