~ubuntu-branches/ubuntu/hardy/ndiswrapper/hardy

« back to all changes in this revision

Viewing changes to driver/workqueue.c

  • Committer: Bazaar Package Importer
  • Author(s): Matthias Klose
  • Date: 2007-12-07 20:42:35 UTC
  • mfrom: (1.2.9 upstream)
  • Revision ID: james.westby@ubuntu.com-20071207204235-s43f889d3h1u6vrl
Tags: 1.50-1ubuntu1
* Merge with Debian; remaining changes:
  - Build for lpia.
  - debian/control:
    + Update description to point out that the kernel source package is
      not required with the standard Ubuntu kernel.
    + Change the Maintainer address.
  - debian/control:
    + Drop ndiswrapper-source.

Show diffs side-by-side

added added

removed removed

Lines of Context:
15
15
 
16
16
#include "ntoskernel.h"
17
17
 
18
 
/* workqueue implementation for 2.4 kernels */
 
18
struct workq_thread_data {
 
19
        workqueue_struct_t *workq;
 
20
        int index;
 
21
};
19
22
 
20
23
static int workq_thread(void *data)
21
24
{
22
 
        workqueue_struct_t *workq = data;
 
25
        struct workq_thread_data *thread_data = data;
 
26
        struct workqueue_thread *thread;
 
27
        workqueue_struct_t *workq;
23
28
        work_struct_t *work;
24
 
        unsigned long flags;
 
29
 
 
30
        workq = thread_data->workq;
 
31
        thread = &workq->threads[thread_data->index];
 
32
        WORKTRACE("%p, %d, %p", workq, thread_data->index, thread);
 
33
        strncpy(thread->name, current->comm, sizeof(thread->name));
25
34
 
26
35
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
27
 
        strncpy(current->comm, workq->name, sizeof(current->comm));
28
 
        current->comm[sizeof(current->comm) - 1] = 0;
29
36
        daemonize();
30
37
        reparent_to_init();
31
38
        current->nice -= 5;
 
39
        sigfillset(&current->blocked);
32
40
#else
33
 
        daemonize(workq->name);
 
41
        daemonize(thread->name);
34
42
        set_user_nice(current, -5);
35
43
#endif
36
44
 
37
 
#ifdef PF_NOFREEZE
38
 
        current->flags |= PF_NOFREEZE;
39
 
#else
40
 
        sigfillset(&current->blocked);
41
 
#endif
42
 
 
43
 
        workq->task = current;
44
 
        complete(xchg(&workq->completion, NULL));
45
 
        WORKTRACE("%s (%d) started", workq->name, workq->pid);
46
 
        while (workq->pending >= 0) {
47
 
                if (wait_condition(workq->pending, 0, TASK_INTERRUPTIBLE) < 0) {
 
45
        if (thread->task != current) {
 
46
                WARNING("invalid task: %p, %p", thread->task, current);
 
47
                thread->task = current;
 
48
        }
 
49
        thread->pid = current->pid;
 
50
        complete(xchg(&thread->completion, NULL));
 
51
        WORKTRACE("%s (%d) started", thread->name, thread->pid);
 
52
        while (1) {
 
53
                if (wait_condition(thread->pending, 0, TASK_INTERRUPTIBLE) < 0) {
48
54
                        /* TODO: deal with signal */
49
55
                        WARNING("signal not blocked?");
50
56
                        flush_signals(current);
52
58
                }
53
59
                while (1) {
54
60
                        struct list_head *entry;
 
61
                        unsigned long flags;
55
62
 
56
 
                        spin_lock_irqsave(&workq->lock, flags);
57
 
                        if (list_empty(&workq->work_list)) {
 
63
                        spin_lock_irqsave(&thread->lock, flags);
 
64
                        if (list_empty(&thread->work_list)) {
58
65
                                struct completion *completion;
59
 
                                if (workq->pending > 0)
60
 
                                        workq->pending = 0;
61
 
                                completion = workq->completion;
62
 
                                workq->completion = NULL;
63
 
                                spin_unlock_irqrestore(&workq->lock, flags);
 
66
                                if (thread->pending < 0) {
 
67
                                        spin_unlock_irqrestore(&thread->lock,
 
68
                                                               flags);
 
69
                                        goto out;
 
70
                                }
 
71
                                thread->pending = 0;
 
72
                                completion = thread->completion;
 
73
                                thread->completion = NULL;
 
74
                                spin_unlock_irqrestore(&thread->lock, flags);
64
75
                                if (completion)
65
76
                                        complete(completion);
66
77
                                break;
67
78
                        }
68
 
                        entry = workq->work_list.next;
 
79
                        entry = thread->work_list.next;
69
80
                        work = list_entry(entry, work_struct_t, list);
70
 
                        if (xchg(&work->workq, NULL))
 
81
                        if (xchg(&work->thread, NULL))
71
82
                                list_del(entry);
72
83
                        else
73
84
                                work = NULL;
74
 
                        spin_unlock_irqrestore(&workq->lock, flags);
 
85
                        spin_unlock_irqrestore(&thread->lock, flags);
 
86
                        DBG_BLOCK(4) {
 
87
                                WORKTRACE("%p, %p", work, thread);
 
88
                        }
75
89
                        if (work)
76
90
                                work->func(work->data);
77
91
                }
78
92
        }
79
93
 
80
 
        WORKTRACE("%s exiting", workq->name);
81
 
        workq->pid = 0;
 
94
out:
 
95
        WORKTRACE("%s exiting", thread->name);
 
96
        thread->pid = 0;
82
97
        return 0;
83
98
}
84
99
 
85
 
wfastcall void wrap_queue_work(workqueue_struct_t *workq, work_struct_t *work)
 
100
wfastcall int wrap_queue_work_on(workqueue_struct_t *workq, work_struct_t *work,
 
101
                                 int cpu)
86
102
{
 
103
        struct workqueue_thread *thread = &workq->threads[cpu];
87
104
        unsigned long flags;
88
 
 
89
 
        spin_lock_irqsave(&workq->lock, flags);
90
 
        if (!work->workq) {
91
 
                work->workq = workq;
92
 
                list_add_tail(&work->list, &workq->work_list);
93
 
                workq->pending++;
94
 
                wake_up_process(workq->task);
95
 
        }
96
 
        spin_unlock_irqrestore(&workq->lock, flags);
 
105
        int ret;
 
106
 
 
107
        assert(thread->pid > 0);
 
108
        DBG_BLOCK(4) {
 
109
                WORKTRACE("%p, %d", workq, cpu);
 
110
        }
 
111
        spin_lock_irqsave(&thread->lock, flags);
 
112
        if (work->thread)
 
113
                ret = 0;
 
114
        else {
 
115
                work->thread = thread;
 
116
                list_add_tail(&work->list, &thread->work_list);
 
117
                thread->pending = 1;
 
118
                wake_up_process(thread->task);
 
119
                ret = 1;
 
120
        }
 
121
        spin_unlock_irqrestore(&thread->lock, flags);
 
122
        return ret;
 
123
}
 
124
 
 
125
wfastcall int wrap_queue_work(workqueue_struct_t *workq, work_struct_t *work)
 
126
{
 
127
        if (NR_CPUS == 1 || workq->singlethread)
 
128
                return wrap_queue_work_on(workq, work, 0);
 
129
        else {
 
130
                typeof(workq->qon) qon;
 
131
                /* work is queued on threads in a round-robbin fashion */
 
132
                do {
 
133
                        qon = workq->qon % workq->num_cpus;
 
134
                        atomic_inc_var(workq->qon);
 
135
                } while (!workq->threads[qon].pid);
 
136
                return wrap_queue_work_on(workq, work, qon);
 
137
        }
97
138
}
98
139
 
99
140
void wrap_cancel_work(work_struct_t *work)
100
141
{
101
 
        workqueue_struct_t *workq;
 
142
        struct workqueue_thread *thread;
102
143
        unsigned long flags;
103
144
 
104
 
        if ((workq = xchg(&work->workq, NULL))) {
105
 
                spin_lock_irqsave(&workq->lock, flags);
 
145
        WORKTRACE("%p", work);
 
146
        if ((thread = xchg(&work->thread, NULL))) {
 
147
                WORKTRACE("%p", thread);
 
148
                spin_lock_irqsave(&thread->lock, flags);
106
149
                list_del(&work->list);
107
 
                spin_unlock_irqrestore(&workq->lock, flags);
 
150
                spin_unlock_irqrestore(&thread->lock, flags);
108
151
        }
109
152
}
110
153
 
111
 
workqueue_struct_t *wrap_create_wq(const char *name)
 
154
workqueue_struct_t *wrap_create_wq(const char *name, u8 singlethread, u8 freeze)
112
155
{
113
156
        struct completion started;
114
 
        workqueue_struct_t *workq = kmalloc(sizeof(*workq), GFP_KERNEL);
 
157
        workqueue_struct_t *workq;
 
158
        int i, n;
 
159
 
 
160
        if (singlethread)
 
161
                n = 1;
 
162
        else
 
163
                n = NR_CPUS;
 
164
        workq = kmalloc(sizeof(*workq) + n * sizeof(workq->threads[0]),
 
165
                        GFP_KERNEL);
115
166
        if (!workq) {
116
167
                WARNING("couldn't allocate memory");
117
168
                return NULL;
118
169
        }
119
 
        memset(workq, 0, sizeof(*workq));
120
 
        spin_lock_init(&workq->lock);
121
 
        strncpy(workq->name, name, sizeof(workq->name));
122
 
        workq->name[sizeof(workq->name) - 1] = 0;
123
 
        INIT_LIST_HEAD(&workq->work_list);
 
170
        memset(workq, 0, sizeof(*workq) + n * sizeof(workq->threads[0]));
 
171
        WORKTRACE("%p", workq);
124
172
        init_completion(&started);
125
 
        workq->completion = &started;
126
 
        workq->pid = kernel_thread(workq_thread, workq, 0);
127
 
        if (workq->pid <= 0) {
128
 
                kfree(workq);
129
 
                WARNING("couldn't start thread %s", name);
130
 
                return NULL;
 
173
        for_each_online_cpu(i) {
 
174
                struct workq_thread_data thread_data;
 
175
                spin_lock_init(&workq->threads[i].lock);
 
176
                INIT_LIST_HEAD(&workq->threads[i].work_list);
 
177
                INIT_COMPLETION(started);
 
178
                workq->threads[i].completion = &started;
 
179
                thread_data.workq = workq;
 
180
                thread_data.index = i;
 
181
                WORKTRACE("%p, %d, %p", workq, i, &workq->threads[i]);
 
182
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
 
183
                workq->threads[i].pid =
 
184
                        kernel_thread(workq_thread, &thread_data, CLONE_SIGHAND);
 
185
                if (workq->threads[i].pid < 0)
 
186
                        workq->threads[i].task = (void *)-ENOMEM;
 
187
                else
 
188
                        workq->threads[i].task =
 
189
                                find_task_by_pid(workq->threads[i].pid);
 
190
#else
 
191
                workq->threads[i].task =
 
192
                        kthread_create(workq_thread, &thread_data,
 
193
                                       "%s/%d", name, i);
 
194
#endif
 
195
                if (IS_ERR(workq->threads[i].task)) {
 
196
                        int j;
 
197
                        for (j = 0; j < i; j++)
 
198
                                wrap_destroy_wq_on(workq, j);
 
199
                        kfree(workq);
 
200
                        WARNING("couldn't start thread %s", name);
 
201
                        return NULL;
 
202
                }
 
203
#ifdef PF_NOFREEZE
 
204
                if (!freeze)
 
205
                        workq->threads[i].task->flags |= PF_NOFREEZE;
 
206
#endif
 
207
                kthread_bind(workq->threads[i].task, i);
 
208
                workq->num_cpus = max(workq->num_cpus, i);
 
209
                wake_up_process(workq->threads[i].task);
 
210
                wait_for_completion(&started);
 
211
                WORKTRACE("%s, %d: %p, %d", name, i,
 
212
                          workq, workq->threads[i].pid);
 
213
                if (singlethread)
 
214
                        break;
131
215
        }
132
 
        wait_for_completion(&started);
 
216
        workq->num_cpus++;
133
217
        return workq;
134
218
}
135
219
 
136
 
void wrap_flush_wq(workqueue_struct_t *workq)
 
220
void wrap_flush_wq_on(workqueue_struct_t *workq, int cpu)
137
221
{
 
222
        struct workqueue_thread *thread = &workq->threads[cpu];
138
223
        struct completion done;
 
224
 
 
225
        WORKTRACE("%p: %d, %s", workq, cpu, thread->name);
139
226
        init_completion(&done);
140
 
        workq->completion = &done;
141
 
        workq->pending = 1;
142
 
        wake_up_process(workq->task);
 
227
        thread->completion = &done;
 
228
        thread->pending = 1;
 
229
        wake_up_process(thread->task);
143
230
        wait_for_completion(&done);
144
231
        return;
145
232
}
146
233
 
 
234
void wrap_flush_wq(workqueue_struct_t *workq)
 
235
{
 
236
        int i, n;
 
237
 
 
238
        WORKTRACE("%p", workq);
 
239
        if (workq->singlethread)
 
240
                n = 1;
 
241
        else
 
242
                n = NR_CPUS;
 
243
        for (i = 0; i < n; i++)
 
244
                wrap_flush_wq_on(workq, i);
 
245
}
 
246
 
 
247
void wrap_destroy_wq_on(workqueue_struct_t *workq, int cpu)
 
248
{
 
249
        struct workqueue_thread *thread = &workq->threads[cpu];
 
250
 
 
251
        WORKTRACE("%p: %d, %s", workq, cpu, thread->name);
 
252
        if (!thread->pid)
 
253
                return;
 
254
        thread->pending = -1;
 
255
        wake_up_process(thread->task);
 
256
        while (thread->pid) {
 
257
                WORKTRACE("%d", thread->pid);
 
258
                schedule();
 
259
        }
 
260
}
 
261
 
147
262
void wrap_destroy_wq(workqueue_struct_t *workq)
148
263
{
149
 
        workq->pending = -1;
150
 
        wake_up_process(workq->task);
151
 
        while (workq->pid) {
152
 
                WORKTRACE("%d", workq->pid);
153
 
                schedule();
154
 
        }
 
264
        int i, n;
 
265
 
 
266
        WORKTRACE("%p", workq);
 
267
        if (workq->singlethread)
 
268
                n = 1;
 
269
        else
 
270
                n = NR_CPUS;
 
271
        for (i = 0; i < n; i++)
 
272
                wrap_destroy_wq_on(workq, i);
155
273
        kfree(workq);
156
274
}