26
26
#include <sys/time.h>
28
28
#include <common/config.h>
29
#include <common/eb32tree.h>
29
30
#include <common/memory.h>
30
31
#include <common/mini-clist.h>
31
32
#include <common/standard.h>
33
#include <common/ticks.h>
33
35
#include <types/task.h>
35
extern void *run_queue;
37
/* Principle of the wait queue.
39
* We want to be able to tell whether an expiration date is before of after the
40
* current time <now>. We KNOW that expiration dates are never too far apart,
41
* because they are measured in ticks (milliseconds). We also know that almost
42
* all dates will be in the future, and that a very small part of them will be
43
* in the past, they are the ones which have expired since last time we checked
44
* them. Using ticks, we know if a date is in the future or in the past, but we
45
* cannot use that to store sorted information because that reference changes
48
* We'll use the fact that the time wraps to sort timers. Timers above <now>
49
* are in the future, timers below <now> are in the past. Here, "above" and
50
* "below" are to be considered modulo 2^31.
52
* Timers are stored sorted in an ebtree. We use the new ability for ebtrees to
53
* lookup values starting from X to only expire tasks between <now> - 2^31 and
54
* <now>. If the end of the tree is reached while walking over it, we simply
55
* loop back to the beginning. That way, we have no problem keeping sorted
56
* wrapping timers in a tree, between (now - 24 days) and (now + 24 days). The
57
* keys in the tree always reflect their real position, none can be infinite.
58
* This reduces the number of checks to be performed.
60
* Another nice optimisation is to allow a timer to stay at an old place in the
61
* queue as long as it's not further than the real expiration date. That way,
62
* we use the tree as a place holder for a minorant of the real expiration
63
* date. Since we have a very low chance of hitting a timeout anyway, we can
64
* bounce the nodes to their right place when we scan the tree if we encounter
65
* a misplaced node once in a while. This even allows us not to remove the
66
* infinite timers from the wait queue.
68
* So, to summarize, we have :
69
* - node->key always defines current position in the wait queue
70
* - timer is the real expiration date (possibly infinite)
71
* - node->key is always before or equal to timer
73
* The run queue works similarly to the wait queue except that the current date
74
* is replaced by an insertion counter which can also wrap without any problem.
77
/* The farthest we can look back in a timer tree */
78
#define TIMER_LOOK_BACK (1U << 31)
80
/* a few exported variables */
81
extern unsigned int nb_tasks; /* total number of tasks */
82
extern unsigned int run_queue; /* run queue size */
83
extern unsigned int run_queue_cur;
84
extern unsigned int nb_tasks_cur;
85
extern unsigned int niced_tasks; /* number of niced tasks in the run queue */
36
86
extern struct pool_head *pool2_task;
38
/* perform minimal intializations, report 0 in case of error, 1 if OK. */
42
void *tree_delete(void *node);
44
/* puts the task <t> in run queue <q>, and returns <t> */
45
#define task_wakeup _task_wakeup
46
struct task *_task_wakeup(struct task *t);
47
static inline struct task *__task_wakeup(struct task *t)
49
if (t->state == TASK_RUNNING)
52
if (t->qlist.p != NULL)
55
DLIST_ADD(run_queue, &t->qlist);
56
t->state = TASK_RUNNING;
58
if (likely(t->wq != NULL)) {
66
/* removes the task <t> from the run queue if it was in it.
69
static inline struct task *task_sleep(struct task *t)
71
if (t->state == TASK_RUNNING) {
80
* unlinks the task from wherever it is queued :
81
* - eternity_queue, run_queue
82
* - wait queue : wq not null => remove carrier node too
87
extern struct eb32_node *last_timer; /* optimization: last queued timer */
89
/* return 0 if task is in run queue, otherwise non-zero */
90
static inline int task_in_rq(struct task *t)
92
return t->rq.node.leaf_p != NULL;
95
/* return 0 if task is in wait queue, otherwise non-zero */
96
static inline int task_in_wq(struct task *t)
98
return t->wq.node.leaf_p != NULL;
101
/* puts the task <t> in run queue with reason flags <f>, and returns <t> */
102
struct task *__task_wakeup(struct task *t);
103
static inline struct task *task_wakeup(struct task *t, unsigned int f)
105
if (likely(!task_in_rq(t)))
112
* Unlink the task from the wait queue, and possibly update the last_timer
113
* pointer. A pointer to the task itself is returned. The task *must* already
114
* be in the wait queue before calling this function. If unsure, use the safer
115
* task_unlink_wq() function.
117
static inline struct task *__task_unlink_wq(struct task *t)
120
if (last_timer == &t->wq)
125
static inline struct task *task_unlink_wq(struct task *t)
127
if (likely(task_in_wq(t)))
133
* Unlink the task from the run queue. The run_queue size and number of niced
134
* tasks are updated too. A pointer to the task itself is returned. The task
135
* *must* already be in the wait queue before calling this function. If unsure,
136
* use the safer task_unlink_rq() function.
138
static inline struct task *__task_unlink_rq(struct task *t)
147
static inline struct task *task_unlink_rq(struct task *t)
149
if (likely(task_in_rq(t)))
155
* Unlinks the task and adjusts run queue stats.
83
156
* A pointer to the task itself is returned.
85
158
static inline struct task *task_delete(struct task *t)
87
if (t->qlist.p != NULL) {
100
* frees a task. Its context must have been freed since it will be lost.
166
* Initialize a new task. The bare minimum is performed (queue pointers and
167
* state). The task is returned. This function should not be used outside of
170
static inline struct task *task_init(struct task *t)
172
t->wq.node.leaf_p = NULL;
173
t->rq.node.leaf_p = NULL;
174
t->state = TASK_SLEEPING;
181
* Allocate and initialise a new task. The new task is returned, or NULL in
182
* case of lack of memory. The task count is incremented. Tasks should only
183
* be allocated this way, and must be freed using task_free().
185
static inline struct task *task_new(void)
187
struct task *t = pool_alloc2(pool2_task);
196
* Free a task. Its context must have been freed since it will be lost.
197
* The task count is decremented.
102
199
static inline void task_free(struct task *t)
104
201
pool_free2(pool2_task, t);
107
/* inserts <task> into its assigned wait queue, where it may already be. In this case, it
108
* may be only moved or left where it was, depending on its timing requirements.
109
* <task> is returned.
205
/* Place <task> into the wait queue, where it may already be. If the expiration
206
* timer is infinite, do nothing and rely on wake_expired_task to clean up.
111
struct task *task_queue(struct task *task);
208
void __task_queue(struct task *task);
209
static inline void task_queue(struct task *task)
211
/* If we already have a place in the wait queue no later than the
212
* timeout we're trying to set, we'll stay there, because it is very
213
* unlikely that we will reach the timeout anyway. If the timeout
214
* has been disabled, it's useless to leave the queue as well. We'll
215
* rely on wake_expired_tasks() to catch the node and move it to the
216
* proper place should it ever happen. Finally we only add the task
217
* to the queue if it was not there or if it was further than what
220
if (!tick_isset(task->expire))
223
if (!task_in_wq(task) || tick_is_lt(task->expire, task->wq.key))
114
228
* This does 4 things :