~ubuntu-branches/ubuntu/jaunty/ndiswrapper/jaunty

« back to all changes in this revision

Viewing changes to driver/ntoskernel.c

  • Committer: Bazaar Package Importer
  • Author(s): Tollef Fog Heen
  • Date: 2006-07-10 14:06:05 UTC
  • mfrom: (1.2.4 upstream)
  • Revision ID: james.westby@ubuntu.com-20060710140605-56bdqtz5m7ze7aw3
Tags: 1.18-1ubuntu1
Sync with Debian.

Show diffs side-by-side

added added

removed removed

Lines of Context:
27
27
 * maximum range used by a driver is CACHE_MDL_PAGES; if a driver
28
28
 * requests an MDL for a bigger region, we allocate it with kmalloc;
29
29
 * otherwise, we allocate from the pool */
30
 
#define CACHE_MDL_PAGES 2
31
 
#define CACHE_MDL_SIZE (sizeof(struct mdl) + (sizeof(ULONG) * CACHE_MDL_PAGES))
 
30
 
 
31
#define CACHE_MDL_PAGES 3
 
32
#define CACHE_MDL_SIZE (sizeof(struct mdl) + \
 
33
                        (sizeof(PFN_NUMBER) * CACHE_MDL_PAGES))
32
34
struct wrap_mdl {
33
35
        struct nt_list list;
34
36
        char mdl[CACHE_MDL_SIZE];
35
37
};
36
38
 
 
39
struct thread_event_wait {
 
40
        wait_queue_head_t wq_head;
 
41
        BOOLEAN done;
 
42
#ifdef EVENT_DEBUG
 
43
        struct task_struct *task;
 
44
#endif
 
45
        struct thread_event_wait *next;
 
46
};
 
47
 
37
48
/* everything here is for all drivers/devices - not per driver/device */
38
 
static KSPIN_LOCK nt_event_lock;
39
 
KSPIN_LOCK ntoskernel_lock;
 
49
static NT_SPIN_LOCK dispatcher_lock;
 
50
static struct thread_event_wait *thread_event_wait_pool;
 
51
 
 
52
NT_SPIN_LOCK ntoskernel_lock;
40
53
static kmem_cache_t *mdl_cache;
41
54
static struct nt_list wrap_mdl_list;
42
55
 
43
 
static KSPIN_LOCK inter_lock;
44
 
 
45
56
struct work_struct kdpc_work;
46
57
static struct nt_list kdpc_list;
47
 
static KSPIN_LOCK kdpc_list_lock;
 
58
static NT_SPIN_LOCK kdpc_list_lock;
48
59
static void kdpc_worker(void *data);
49
60
 
50
61
static struct nt_list callback_objects;
60
71
static struct nt_list bus_driver_list;
61
72
static void del_bus_drivers(void);
62
73
 
63
 
struct work_struct wrap_work_item_work;
64
 
struct nt_list wrap_work_item_list;
65
 
KSPIN_LOCK wrap_work_item_list_lock;
66
 
static void wrap_work_item_worker(void *data);
67
 
 
68
 
KSPIN_LOCK irp_cancel_lock;
69
 
 
70
 
extern KSPIN_LOCK loader_lock;
 
74
static struct work_struct ntos_work_item_work;
 
75
static struct nt_list ntos_work_item_list;
 
76
static NT_SPIN_LOCK ntos_work_item_list_lock;
 
77
static void ntos_work_item_worker(void *data);
 
78
 
 
79
NT_SPIN_LOCK irp_cancel_lock;
 
80
 
 
81
extern NT_SPIN_LOCK loader_lock;
71
82
extern struct nt_list wrap_drivers;
72
83
static struct nt_list wrap_timer_list;
73
 
KSPIN_LOCK timer_lock;
 
84
NT_SPIN_LOCK timer_lock;
74
85
 
75
86
/* compute ticks (100ns) since 1601 until when system booted into
76
87
 * wrap_ticks_to_boot */
85
96
static int add_bus_driver(const char *name);
86
97
static void free_all_objects(void);
87
98
static BOOLEAN queue_kdpc(struct kdpc *kdpc);
 
99
BOOLEAN dequeue_kdpc(struct kdpc *kdpc);
88
100
 
89
101
WRAP_EXPORT_MAP("KeTickCount", &jiffies);
90
102
 
91
103
WRAP_EXPORT_MAP("NlsMbCodePageTag", FALSE);
92
104
 
 
105
#ifdef USE_OWN_NTOS_WORKQUEUE
 
106
struct workqueue_struct *ntos_wq;
 
107
#endif
 
108
 
93
109
int ntoskernel_init(void)
94
110
{
95
111
        struct timeval now;
96
112
 
97
 
        kspin_lock_init(&nt_event_lock);
98
 
        kspin_lock_init(&ntoskernel_lock);
99
 
        kspin_lock_init(&wrap_work_item_list_lock);
100
 
        kspin_lock_init(&kdpc_list_lock);
101
 
        kspin_lock_init(&irp_cancel_lock);
102
 
        kspin_lock_init(&inter_lock);
 
113
        nt_spin_lock_init(&dispatcher_lock);
 
114
        nt_spin_lock_init(&ntoskernel_lock);
 
115
        nt_spin_lock_init(&ntos_work_item_list_lock);
 
116
        nt_spin_lock_init(&kdpc_list_lock);
 
117
        nt_spin_lock_init(&irp_cancel_lock);
103
118
        InitializeListHead(&wrap_mdl_list);
104
119
        InitializeListHead(&kdpc_list);
105
120
        InitializeListHead(&callback_objects);
106
121
        InitializeListHead(&bus_driver_list);
107
122
        InitializeListHead(&object_list);
108
 
        InitializeListHead(&wrap_work_item_list);
 
123
        InitializeListHead(&ntos_work_item_list);
109
124
 
110
125
        INIT_WORK(&kdpc_work, kdpc_worker, NULL);
111
 
        INIT_WORK(&wrap_work_item_work, wrap_work_item_worker, NULL);
 
126
        INIT_WORK(&ntos_work_item_work, ntos_work_item_worker, NULL);
112
127
 
113
 
        kspin_lock_init(&timer_lock);
 
128
        nt_spin_lock_init(&timer_lock);
114
129
        InitializeListHead(&wrap_timer_list);
115
130
 
 
131
        thread_event_wait_pool = NULL;
116
132
        do_gettimeofday(&now);
117
133
        wrap_ticks_to_boot = (u64)now.tv_sec * TICKSPERSEC;
118
134
        wrap_ticks_to_boot += now.tv_usec * 10;
119
135
        wrap_ticks_to_boot -= jiffies * TICKSPERSEC / HZ;
120
136
        wrap_ticks_to_boot += TICKS_1601_TO_1970;
121
137
 
 
138
#ifdef USE_OWN_NTOS_WORKQUEUE
 
139
        ntos_wq = create_singlethread_workqueue("ntos_wq");
 
140
#endif
 
141
 
122
142
        if (add_bus_driver("PCI")
123
143
#ifdef CONFIG_USB
124
144
            || add_bus_driver("USB")
145
165
int ntoskernel_init_device(struct wrap_device *wd)
146
166
{
147
167
        InitializeListHead(&wd->timer_list);
148
 
        kspin_lock_init(&wd->timer_lock);
 
168
        nt_spin_lock_init(&wd->timer_lock);
149
169
#if defined(CONFIG_X86_64)
150
170
        *((ULONG64 *)&kuser_shared_data.system_time) = ticks_1601();
151
171
        shared_data_timer.data = (unsigned long)0;
158
178
 
159
179
void ntoskernel_exit_device(struct wrap_device *wd)
160
180
{
 
181
        struct nt_list *ent;
161
182
        KIRQL irql;
162
183
 
163
184
        TRACEENTER2("");
166
187
        /* cancel any timers left by bugyy windows driver; also free
167
188
         * the memory for timers */
168
189
        while (1) {
169
 
                struct nt_list *ent;
170
190
                struct wrap_timer *wrap_timer;
171
191
 
172
 
                irql = kspin_lock_irql(&timer_lock, DISPATCH_LEVEL);
 
192
                irql = nt_spin_lock_irql(&timer_lock, DISPATCH_LEVEL);
173
193
                ent = RemoveHeadList(&wd->timer_list);
174
 
                kspin_unlock_irql(&timer_lock, irql);
 
194
                nt_spin_unlock_irql(&timer_lock, irql);
175
195
                if (!ent)
176
196
                        break;
177
197
                wrap_timer = container_of(ent, struct wrap_timer, list);
182
202
                        WARNING("Buggy Windows driver left timer %p running",
183
203
                                &wrap_timer->timer);
184
204
                memset(wrap_timer, 0, sizeof(*wrap_timer));
185
 
                wrap_kfree(wrap_timer);
 
205
                slack_kfree(wrap_timer);
186
206
        }
187
207
        TRACEEXIT2(return);
188
208
}
198
218
        while (1) {
199
219
                struct wrap_timer *wrap_timer;
200
220
 
201
 
                irql = kspin_lock_irql(&timer_lock, DISPATCH_LEVEL);
 
221
                irql = nt_spin_lock_irql(&timer_lock, DISPATCH_LEVEL);
202
222
                cur = RemoveTailList(&wrap_timer_list);
203
 
                kspin_unlock_irql(&timer_lock, irql);
 
223
                nt_spin_unlock_irql(&timer_lock, irql);
204
224
                if (!cur)
205
225
                        break;
206
226
                wrap_timer = container_of(cur, struct wrap_timer, list);
208
228
                        WARNING("Buggy Windows driver left timer %p running",
209
229
                                &wrap_timer->timer);
210
230
                memset(wrap_timer, 0, sizeof(*wrap_timer));
211
 
                wrap_kfree(wrap_timer);
 
231
                slack_kfree(wrap_timer);
212
232
        }
213
233
 
214
234
        DBGTRACE2("freeing MDLs");
215
235
        if (mdl_cache) {
216
 
                irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
236
                irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
217
237
                if (!IsListEmpty(&wrap_mdl_list)) {
218
238
                        ERROR("Windows driver didn't free all MDLs; "
219
239
                              "freeing them now");
228
248
                                        kfree(p);
229
249
                        }
230
250
                }
231
 
                kspin_unlock_irql(&ntoskernel_lock, irql);
 
251
                nt_spin_unlock_irql(&ntoskernel_lock, irql);
232
252
                kmem_cache_destroy(mdl_cache);
233
253
                mdl_cache = NULL;
234
254
        }
235
255
 
236
256
        DBGTRACE2("freeing callbacks");
237
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
257
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
238
258
        while ((cur = RemoveHeadList(&callback_objects))) {
239
259
                struct callback_object *object;
240
260
                struct nt_list *ent;
246
266
                }
247
267
                kfree(object);
248
268
        }
249
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
269
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
270
 
 
271
        irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
 
272
        DBGTRACE2("freeing thread event pool");
 
273
        while (thread_event_wait_pool) {
 
274
                struct thread_event_wait *next;
 
275
                next = thread_event_wait_pool->next;
 
276
                kfree(thread_event_wait_pool);
 
277
                thread_event_wait_pool = next;
 
278
        }
 
279
        nt_spin_unlock_irql(&dispatcher_lock, irql);
250
280
 
251
281
        del_bus_drivers();
252
282
        free_all_objects();
253
283
#if defined(CONFIG_X86_64)
254
284
        del_timer_sync(&shared_data_timer);
255
285
#endif
 
286
#ifdef USE_OWN_NTOS_WORKQUEUE
 
287
        if (ntos_wq)
 
288
                destroy_workqueue(ntos_wq);
 
289
#endif
256
290
        TRACEEXIT2(return);
257
291
}
258
292
 
272
306
#endif
273
307
 
274
308
void *allocate_object(ULONG size, enum common_object_type type, char *name)
275
 
{                                                                       
 
309
{
276
310
        struct common_object_header *hdr;
277
 
        KIRQL irql;
278
311
        void *body;
 
312
        KIRQL irql;
279
313
 
280
314
        /* we pad header as prefix to body */
281
315
        hdr = ExAllocatePoolWithTag(NonPagedPool, OBJECT_SIZE(size), 0);
287
321
        hdr->type = type;
288
322
        hdr->ref_count = 1;
289
323
        hdr->name = name;
290
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
324
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
291
325
        /* threads are looked up often (in KeWaitForXXx), so optimize
292
326
         * for fast lookups of threads */
293
327
        if (type == OBJECT_TYPE_NT_THREAD)
294
328
                InsertHeadList(&object_list, &hdr->list);
295
329
        else
296
330
                InsertTailList(&object_list, &hdr->list);
297
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
331
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
298
332
        body = HEADER_TO_OBJECT(hdr);
299
333
        DBGTRACE3("allocated hdr: %p, body: %p", hdr, body);
300
334
        return body;
301
335
}
302
336
 
303
337
void free_object(void *object)
304
 
{                                                                       
 
338
{
305
339
        struct common_object_header *hdr;
306
340
        KIRQL irql;
 
341
 
307
342
        hdr = OBJECT_TO_HEADER(object);
308
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
343
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
309
344
        RemoveEntryList(&hdr->list);
310
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
345
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
311
346
        DBGTRACE3("freed hdr: %p, body: %p", hdr, object);
312
347
        ExFreePool(hdr);
313
348
}
315
350
static void free_all_objects(void)
316
351
{
317
352
        struct nt_list *cur;
318
 
        KIRQL irql;
319
353
 
320
354
        TRACEENTER2("freeing objects");
321
355
        /* delete all objects */
322
356
        while (1) {
323
357
                struct common_object_header *hdr;
 
358
                KIRQL irql;
324
359
 
325
 
                irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
360
                irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
326
361
                cur = RemoveHeadList(&object_list);
327
 
                kspin_unlock_irql(&ntoskernel_lock, irql);
 
362
                nt_spin_unlock_irql(&ntoskernel_lock, irql);
328
363
                if (!cur)
329
364
                        break;
330
365
                hdr = container_of(cur, struct common_object_header, list);
347
382
        }
348
383
        memset(bus_driver, 0, sizeof(*bus_driver));
349
384
        strncpy(bus_driver->name, name, sizeof(bus_driver->name));
350
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
385
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
351
386
        InsertTailList(&bus_driver_list, &bus_driver->list);
352
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
387
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
353
388
        DBGTRACE1("bus driver %s is at %p", name, &bus_driver->drv_obj);
354
389
        return STATUS_SUCCESS;
355
390
}
359
394
        struct nt_list *ent;
360
395
        KIRQL irql;
361
396
 
362
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
397
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
363
398
        while ((ent = RemoveHeadList(&bus_driver_list))) {
364
399
                struct bus_driver *bus_driver;
365
400
                bus_driver = container_of(ent, struct bus_driver, list);
366
401
                /* TODO: make sure all all drivers are shutdown/removed */
367
402
                kfree(bus_driver);
368
403
        }
369
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
404
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
370
405
}
371
406
 
372
407
struct driver_object *find_bus_driver(const char *name)
375
410
        struct driver_object *drv_obj;
376
411
        KIRQL irql;
377
412
 
378
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
413
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
379
414
        drv_obj = NULL;
380
415
        nt_list_for_each_entry(bus_driver, &bus_driver_list, list) {
381
416
                if (strcmp(bus_driver->name, name) == 0)
382
417
                        drv_obj = &bus_driver->drv_obj;
383
418
        }
384
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
419
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
385
420
        return drv_obj;
386
421
}
387
422
 
388
 
_FASTCALL struct nt_list *WRAP_EXPORT(ExfInterlockedInsertHeadList)
389
 
        (FASTCALL_DECL_3(struct nt_list *head, struct nt_list *entry,
390
 
                         KSPIN_LOCK *lock))
 
423
wfastcall struct nt_list *WRAP_EXPORT(ExfInterlockedInsertHeadList)
 
424
        (struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
391
425
{
392
426
        struct nt_list *first;
393
 
        KIRQL irql;
 
427
        unsigned long flags;
394
428
 
395
429
        TRACEENTER5("head = %p, entry = %p", head, entry);
396
 
        irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
 
430
        nt_spin_lock_irqsave(lock, flags);
397
431
        first = InsertHeadList(head, entry);
398
 
        kspin_unlock_irql(lock, irql);
 
432
        nt_spin_unlock_irqrestore(lock, flags);
399
433
        DBGTRACE5("head = %p, old = %p", head, first);
400
434
        return first;
401
435
}
402
436
 
403
 
_FASTCALL struct nt_list *WRAP_EXPORT(ExInterlockedInsertHeadList)
404
 
        (FASTCALL_DECL_3(struct nt_list *head, struct nt_list *entry,
405
 
                         KSPIN_LOCK *lock))
 
437
wfastcall struct nt_list *WRAP_EXPORT(ExInterlockedInsertHeadList)
 
438
        (struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
406
439
{
407
440
        TRACEENTER5("%p", head);
408
 
        return ExfInterlockedInsertHeadList(FASTCALL_ARGS_3(head, entry,
409
 
                                                            lock));
 
441
        return ExfInterlockedInsertHeadList(head, entry, lock);
410
442
}
411
443
 
412
 
_FASTCALL struct nt_list *WRAP_EXPORT(ExfInterlockedInsertTailList)
413
 
        (FASTCALL_DECL_3(struct nt_list *head, struct nt_list *entry,
414
 
                         KSPIN_LOCK *lock))
 
444
wfastcall struct nt_list *WRAP_EXPORT(ExfInterlockedInsertTailList)
 
445
        (struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
415
446
{
416
447
        struct nt_list *last;
417
 
        KIRQL irql;
 
448
        unsigned long flags;
418
449
 
419
450
        TRACEENTER5("head = %p, entry = %p", head, entry);
420
 
        irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
 
451
        nt_spin_lock_irqsave(lock, flags);
421
452
        last = InsertTailList(head, entry);
422
 
        kspin_unlock_irql(lock, irql);
 
453
        nt_spin_unlock_irqrestore(lock, flags);
423
454
        DBGTRACE5("head = %p, old = %p", head, last);
424
455
        return last;
425
456
}
426
457
 
427
 
_FASTCALL struct nt_list *WRAP_EXPORT(ExInterlockedInsertTailList)
428
 
        (FASTCALL_DECL_3(struct nt_list *head, struct nt_list *entry,
429
 
                         KSPIN_LOCK *lock))
 
458
wfastcall struct nt_list *WRAP_EXPORT(ExInterlockedInsertTailList)
 
459
        (struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
430
460
{
431
461
        TRACEENTER5("%p", head);
432
 
        return ExfInterlockedInsertTailList(FASTCALL_ARGS_3(head, entry,
433
 
                                                            lock));
 
462
        return ExfInterlockedInsertTailList(head, entry, lock);
434
463
}
435
464
 
436
 
_FASTCALL struct nt_list *WRAP_EXPORT(ExfInterlockedRemoveHeadList)
437
 
        (FASTCALL_DECL_2(struct nt_list *head, KSPIN_LOCK *lock))
 
465
wfastcall struct nt_list *WRAP_EXPORT(ExfInterlockedRemoveHeadList)
 
466
        (struct nt_list *head, NT_SPIN_LOCK *lock)
438
467
{
439
468
        struct nt_list *ret;
440
 
        KIRQL irql;
 
469
        unsigned long flags;
441
470
 
442
471
        TRACEENTER5("head = %p", head);
443
 
        irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
 
472
        nt_spin_lock_irqsave(lock, flags);
444
473
        ret = RemoveHeadList(head);
445
 
        kspin_unlock_irql(lock, irql);
 
474
        nt_spin_unlock_irqrestore(lock, flags);
446
475
        DBGTRACE5("head = %p, ret = %p", head, ret);
447
476
        return ret;
448
477
}
449
478
 
450
 
_FASTCALL struct nt_list *WRAP_EXPORT(ExInterlockedRemoveHeadList)
451
 
        (FASTCALL_DECL_2(struct nt_list *head, KSPIN_LOCK *lock))
 
479
wfastcall struct nt_list *WRAP_EXPORT(ExInterlockedRemoveHeadList)
 
480
        (struct nt_list *head, NT_SPIN_LOCK *lock)
452
481
{
453
482
        TRACEENTER5("%p", head);
454
 
        return ExfInterlockedRemoveHeadList(FASTCALL_ARGS_2(head, lock));
 
483
        return ExfInterlockedRemoveHeadList(head, lock);
455
484
}
456
485
 
457
 
_FASTCALL struct nt_list *WRAP_EXPORT(ExfInterlockedRemoveTailList)
458
 
        (FASTCALL_DECL_2(struct nt_list *head, KSPIN_LOCK *lock))
 
486
wfastcall struct nt_list *WRAP_EXPORT(ExfInterlockedRemoveTailList)
 
487
        (struct nt_list *head, NT_SPIN_LOCK *lock)
459
488
{
460
489
        struct nt_list *ret;
461
 
        KIRQL irql;
 
490
        unsigned long flags;
462
491
 
463
492
        TRACEENTER5("head = %p", head);
464
 
        irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
 
493
        nt_spin_lock_irqsave(lock, flags);
465
494
        ret = RemoveTailList(head);
466
 
        kspin_unlock_irql(lock, irql);
467
 
        DBGTRACE5("head = %p, ret = %p", head, ret);
468
 
        return ret;
469
 
}
470
 
 
471
 
_FASTCALL struct nt_list *WRAP_EXPORT(ExInterlockedRemoveTailList)
472
 
        (FASTCALL_DECL_2(struct nt_list *head, KSPIN_LOCK *lock))
473
 
{
474
 
        TRACEENTER5("%p", head);
475
 
        return ExfInterlockedRemoveTailList(FASTCALL_ARGS_2(head, lock));
476
 
}
477
 
 
478
 
STDCALL struct nt_slist *WRAP_EXPORT(ExpInterlockedPushEntrySList)
479
 
        (union nt_slist_head *head, struct nt_slist *entry)
480
 
{
481
 
        struct nt_slist *ret;
482
 
        KIRQL irql;
483
 
 
484
 
        TRACEENTER5("head = %p", head);
485
 
        irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
486
 
        ret = PushEntryList(head, entry);
487
 
        kspin_unlock_irql(&inter_lock, irql);
488
 
        DBGTRACE5("head = %p, ret = %p", head, ret);
489
 
        return ret;
490
 
}
491
 
 
492
 
_FASTCALL struct nt_slist *WRAP_EXPORT(ExInterlockedPushEntrySList)
493
 
        (FASTCALL_DECL_3(union nt_slist_head *head, struct nt_slist *entry,
494
 
                         KSPIN_LOCK *lock))
495
 
{
496
 
        TRACEENTER5("%p", head);
497
 
        return ExpInterlockedPushEntrySList(head, entry);
498
 
}
499
 
 
500
 
_FASTCALL struct nt_slist *WRAP_EXPORT(InterlockedPushEntrySList)
501
 
        (FASTCALL_DECL_2(union nt_slist_head *head, struct nt_slist *entry))
502
 
{
503
 
        TRACEENTER5("%p", head);
504
 
        return ExpInterlockedPushEntrySList(head, entry);
505
 
}
506
 
 
507
 
STDCALL struct nt_slist *WRAP_EXPORT(ExpInterlockedPopEntrySList)
508
 
        (union nt_slist_head *head)
509
 
{
510
 
        struct nt_slist *ret;
511
 
        KIRQL irql;
512
 
 
513
 
        TRACEENTER5("head = %p", head);
514
 
        irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
515
 
        ret = PopEntryList(head);
516
 
        kspin_unlock_irql(&inter_lock, irql);
517
 
        DBGTRACE5("head = %p, ret = %p", head, ret);
518
 
        return ret;
519
 
}
520
 
 
521
 
_FASTCALL struct nt_slist *WRAP_EXPORT(ExInterlockedPopEntrySList)
522
 
        (FASTCALL_DECL_2(union nt_slist_head *head, KSPIN_LOCK *lock))
523
 
{
524
 
        TRACEENTER5("%p", head);
525
 
        return ExpInterlockedPopEntrySList(head);
526
 
}
527
 
 
528
 
_FASTCALL struct nt_slist *WRAP_EXPORT(InterlockedPopEntrySList)
529
 
        (FASTCALL_DECL_1(union nt_slist_head *head))
530
 
{
531
 
        TRACEENTER5("%p", head);
532
 
        return ExpInterlockedPopEntrySList(head);
533
 
}
534
 
 
535
 
STDCALL USHORT WRAP_EXPORT(ExQueryDepthSList)
536
 
        (union nt_slist_head *head)
537
 
{
538
 
        TRACEENTER5("%p", head);
539
 
        return head->list.depth;
540
 
}
541
 
 
542
 
/* should be called with nt_event_lock held at DISPATCH_LEVEL */
543
 
static void initialize_dh(struct dispatch_header *dh, enum event_type type,
544
 
                          int state, enum dh_type dh_type)
 
495
        nt_spin_unlock_irqrestore(lock, flags);
 
496
        DBGTRACE5("head = %p, ret = %p", head, ret);
 
497
        return ret;
 
498
}
 
499
 
 
500
wfastcall struct nt_list *WRAP_EXPORT(ExInterlockedRemoveTailList)
 
501
        (struct nt_list *head, NT_SPIN_LOCK *lock)
 
502
{
 
503
        TRACEENTER5("%p", head);
 
504
        return ExfInterlockedRemoveTailList(head, lock);
 
505
}
 
506
 
 
507
wfastcall struct nt_slist *WRAP_EXPORT(ExInterlockedPushEntrySList)
 
508
        (nt_slist_header *head, struct nt_slist *entry, NT_SPIN_LOCK *lock)
 
509
{
 
510
        struct nt_slist *ret;
 
511
 
 
512
        TRACEENTER5("head = %p", head);
 
513
        ret = PushEntrySList(head, entry, lock);
 
514
        DBGTRACE5("head = %p, ret = %p", head, ret);
 
515
        return ret;
 
516
}
 
517
 
 
518
wstdcall struct nt_slist *WRAP_EXPORT(ExpInterlockedPushEntrySList)
 
519
        (nt_slist_header *head, struct nt_slist *entry)
 
520
{
 
521
        struct nt_slist *ret;
 
522
 
 
523
        TRACEENTER5("head = %p", head);
 
524
        ret = PushEntrySList(head, entry, &ntoskernel_lock);
 
525
        DBGTRACE5("head = %p, ret = %p", head, ret);
 
526
        return ret;
 
527
}
 
528
 
 
529
wfastcall struct nt_slist *WRAP_EXPORT(InterlockedPushEntrySList)
 
530
        (nt_slist_header *head, struct nt_slist *entry)
 
531
{
 
532
        struct nt_slist *ret;
 
533
 
 
534
        TRACEENTER5("head = %p", head);
 
535
        ret = PushEntrySList(head, entry, &ntoskernel_lock);
 
536
        DBGTRACE5("head = %p, ret = %p", head, ret);
 
537
        return ret;
 
538
}
 
539
 
 
540
wfastcall struct nt_slist *WRAP_EXPORT(ExInterlockedPopEntrySList)
 
541
        (nt_slist_header *head, NT_SPIN_LOCK *lock)
 
542
{
 
543
        struct nt_slist *ret;
 
544
 
 
545
        TRACEENTER5("head = %p", head);
 
546
        ret = PopEntrySList(head, lock);
 
547
        DBGTRACE5("head = %p, ret = %p", head, ret);
 
548
        return ret;
 
549
}
 
550
 
 
551
wstdcall struct nt_slist *WRAP_EXPORT(ExpInterlockedPopEntrySList)
 
552
        (nt_slist_header *head)
 
553
{
 
554
        struct nt_slist *ret;
 
555
 
 
556
        TRACEENTER5("head = %p", head);
 
557
        ret = PopEntrySList(head, &ntoskernel_lock);
 
558
        DBGTRACE5("head = %p, ret = %p", head, ret);
 
559
        return ret;
 
560
}
 
561
 
 
562
wfastcall struct nt_slist *WRAP_EXPORT(InterlockedPopEntrySList)
 
563
        (nt_slist_header *head)
 
564
{
 
565
        struct nt_slist *ret;
 
566
 
 
567
        TRACEENTER5("head = %p", head);
 
568
        ret = PopEntrySList(head, &ntoskernel_lock);
 
569
        DBGTRACE5("head = %p, ret = %p", head, ret);
 
570
        return ret;
 
571
}
 
572
 
 
573
wstdcall USHORT WRAP_EXPORT(ExQueryDepthSList)
 
574
        (nt_slist_header *head)
 
575
{
 
576
        TRACEENTER5("%p", head);
 
577
#ifdef CONFIG_X86_64
 
578
        return head->align & 0xffff;
 
579
#else
 
580
        return head->depth;
 
581
#endif
 
582
}
 
583
 
 
584
wfastcall LONG WRAP_EXPORT(InterlockedIncrement)
 
585
        (LONG volatile *val)
 
586
{
 
587
        LONG ret;
 
588
 
 
589
        TRACEENTER5("%p, %d", val, *val);
 
590
        __asm__ __volatile__(
 
591
                "\n"
 
592
                LOCK_PREFIX "xadd %1, %2\n\t"
 
593
                "inc %1\n\t"
 
594
                : "=r" (ret)
 
595
                : "0" (1), "m" (*val)
 
596
                : "memory");
 
597
        TRACEEXIT5(return ret);
 
598
}
 
599
 
 
600
wfastcall LONG WRAP_EXPORT(InterlockedDecrement)
 
601
        (LONG volatile *val)
 
602
{
 
603
        LONG ret;
 
604
 
 
605
        TRACEENTER5("%p, %d", val, *val);
 
606
        __asm__ __volatile__(
 
607
                "\n"
 
608
                LOCK_PREFIX "xadd %1, %2\n\t"
 
609
                "dec %1\n\t"
 
610
                : "=r" (ret)
 
611
                : "0" (-1), "m" (*val)
 
612
                : "memory");
 
613
        TRACEEXIT5(return ret);
 
614
}
 
615
 
 
616
wfastcall LONG WRAP_EXPORT(InterlockedExchange)
 
617
        (LONG volatile *target, LONG val)
 
618
{
 
619
        LONG ret;
 
620
        TRACEENTER5("");
 
621
        ret = xchg(target, val);
 
622
        TRACEEXIT5(return ret);
 
623
}
 
624
 
 
625
wfastcall LONG WRAP_EXPORT(InterlockedCompareExchange)
 
626
        (LONG volatile *dest, LONG xchg, LONG comperand)
 
627
{
 
628
        LONG ret;
 
629
 
 
630
        TRACEENTER5("");
 
631
        ret = cmpxchg(dest, comperand, xchg);
 
632
        TRACEEXIT5(return ret);
 
633
}
 
634
 
 
635
wfastcall void WRAP_EXPORT(ExInterlockedAddLargeStatistic)
 
636
        (LARGE_INTEGER volatile *plint, ULONG n)
 
637
{
 
638
        unsigned long flags;
 
639
        save_local_irq(flags);
 
640
#ifdef CONFIG_X86_64
 
641
        __asm__ __volatile__(
 
642
                "\n"
 
643
                LOCK_PREFIX "add %0, %1\n\t"
 
644
                :
 
645
                : "r" (n), "m" (*plint)
 
646
                : "memory");
 
647
#else
 
648
        __asm__ __volatile__(
 
649
                "\n"
 
650
                "1:\t"
 
651
                "   movl %1, %%ebx\n\t"
 
652
                "   movl %%edx, %%ecx\n\t"
 
653
                "   addl %%eax, %%ebx\n\t"
 
654
                "   adcl $0, %%ecx\n\t"
 
655
                    LOCK_PREFIX "cmpxchg8b (%0)\n\t"
 
656
                "   jnz 1b\n\t"
 
657
                : "+r" (plint)
 
658
                : "m" (n), "a" (ll_low(*plint)), "d" (ll_high(*plint))
 
659
                : "ebx", "ecx", "cc", "memory");
 
660
#endif
 
661
        restore_local_irq(flags);
 
662
}
 
663
 
 
664
/* should be called with dispatcher_lock held at DISPATCH_LEVEL */
 
665
static void initialize_dh(struct dispatcher_header *dh, enum dh_type type,
 
666
                          int state)
545
667
{
546
668
        memset(dh, 0, sizeof(*dh));
547
 
        dh->type = type;
 
669
        set_dh_type(dh, type);
548
670
        dh->signal_state = state;
549
 
        set_dh_type(dh, dh_type);
550
671
        InitializeListHead(&dh->wait_blocks);
551
672
}
552
673
 
555
676
        struct nt_timer *nt_timer = (struct nt_timer *)data;
556
677
        struct wrap_timer *wrap_timer;
557
678
        struct kdpc *kdpc;
558
 
        KIRQL irql;
559
679
 
560
680
        wrap_timer = nt_timer->wrap_timer;
561
681
        TRACEENTER5("%p(%p), %lu", wrap_timer, nt_timer, jiffies);
563
683
        BUG_ON(wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);
564
684
        BUG_ON(nt_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);
565
685
#endif
 
686
        KeSetEvent((struct nt_event *)nt_timer, 0, FALSE);
 
687
 
566
688
        kdpc = nt_timer->kdpc;
567
 
        KeSetEvent((struct nt_event *)nt_timer, 0, FALSE);
568
 
        if (kdpc && kdpc->func) {
569
 
                DBGTRACE5("kdpc %p (%p)", kdpc, kdpc->func);
570
 
                LIN2WIN4(kdpc->func, kdpc, kdpc->ctx,
571
 
                         kdpc->arg1, kdpc->arg2);
572
 
        }
 
689
        if (kdpc && kdpc->func)
 
690
//              queue_kdpc(kdpc);
 
691
                LIN2WIN4(kdpc->func, kdpc, kdpc->ctx, kdpc->arg1, kdpc->arg2);
573
692
 
574
 
        irql = kspin_lock_irql(&timer_lock, DISPATCH_LEVEL);
 
693
        nt_spin_lock(&timer_lock);
575
694
        if (wrap_timer->repeat)
576
695
                mod_timer(&wrap_timer->timer, jiffies + wrap_timer->repeat);
577
 
        kspin_unlock_irql(&timer_lock, irql);
 
696
        nt_spin_unlock(&timer_lock);
 
697
 
578
698
 
579
699
        TRACEEXIT5(return);
580
700
}
591
711
         * set and not allocate, but it is not guaranteed always to be
592
712
         * safe */
593
713
        TRACEENTER5("%p", nt_timer);
594
 
        /* we allocate memory for wrap_timer behind driver's back
595
 
         * and there is no NDIS/DDK function where this memory can be
596
 
         * freed, so we use wrap_kmalloc so it gets freed when driver
 
714
        /* we allocate memory for wrap_timer behind driver's back and
 
715
         * there is no NDIS/DDK function where this memory can be
 
716
         * freed, so we use slack_kmalloc so it gets freed when driver
597
717
         * is unloaded */
598
 
        wrap_timer = wrap_kmalloc(sizeof(*wrap_timer));
 
718
        wrap_timer = slack_kmalloc(sizeof(*wrap_timer));
599
719
        if (!wrap_timer) {
600
720
                ERROR("couldn't allocate memory for timer");
601
721
                return;
611
731
#endif
612
732
        nt_timer->wrap_timer = wrap_timer;
613
733
        nt_timer->kdpc = NULL;
614
 
        initialize_dh(&nt_timer->dh, type, 0, DH_NT_TIMER);
 
734
        initialize_dh(&nt_timer->dh, type, 0);
615
735
        nt_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;
616
736
        if (wd) {
617
 
                irql = kspin_lock_irql(&wd->timer_lock, DISPATCH_LEVEL);
 
737
                irql = nt_spin_lock_irql(&wd->timer_lock, DISPATCH_LEVEL);
618
738
                InsertTailList(&wd->timer_list, &wrap_timer->list);
619
 
                kspin_unlock_irql(&wd->timer_lock, irql);
 
739
                nt_spin_unlock_irql(&wd->timer_lock, irql);
620
740
        } else {
621
 
                irql = kspin_lock_irql(&timer_lock, DISPATCH_LEVEL);
 
741
                irql = nt_spin_lock_irql(&timer_lock, DISPATCH_LEVEL);
622
742
                InsertTailList(&wrap_timer_list, &wrap_timer->list);
623
 
                kspin_unlock_irql(&timer_lock, irql);
 
743
                nt_spin_unlock_irql(&timer_lock, irql);
624
744
        }
625
745
        DBGTRACE5("timer %p (%p)", wrap_timer, nt_timer);
626
746
        TRACEEXIT5(return);
627
747
}
628
748
 
629
 
STDCALL void WRAP_EXPORT(KeInitializeTimerEx)
 
749
wstdcall void WRAP_EXPORT(KeInitializeTimerEx)
630
750
        (struct nt_timer *nt_timer, enum timer_type type)
631
751
{
632
752
        TRACEENTER5("%p", nt_timer);
633
753
        wrap_init_timer(nt_timer, type, NULL);
634
754
}
635
755
 
636
 
STDCALL void WRAP_EXPORT(KeInitializeTimer)
 
756
wstdcall void WRAP_EXPORT(KeInitializeTimer)
637
757
        (struct nt_timer *nt_timer)
638
758
{
639
759
        TRACEENTER5("%p", nt_timer);
645
765
                       unsigned long repeat_hz, struct kdpc *kdpc)
646
766
{
647
767
        BOOLEAN ret;
 
768
        struct wrap_timer *wrap_timer;
648
769
        KIRQL irql;
649
 
        struct wrap_timer *wrap_timer;
650
770
 
651
771
        TRACEENTER4("%p, %lu, %lu, %p, %lu",
652
772
                    nt_timer, expires_hz, repeat_hz, kdpc, jiffies);
666
786
                wrap_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;
667
787
        }
668
788
#endif
669
 
        irql = kspin_lock_irql(&timer_lock, DISPATCH_LEVEL);
 
789
        irql = nt_spin_lock_irql(&timer_lock, DISPATCH_LEVEL);
670
790
        if (kdpc)
671
791
                nt_timer->kdpc = kdpc;
672
792
        wrap_timer->repeat = repeat_hz;
 
793
        nt_spin_unlock_irql(&timer_lock, irql);
673
794
        if (mod_timer(&wrap_timer->timer, jiffies + expires_hz))
674
795
                ret = TRUE;
675
796
        else
676
797
                ret = FALSE;
677
 
        kspin_unlock_irql(&timer_lock, irql);
678
798
        TRACEEXIT5(return ret);
679
799
}
680
800
 
681
 
STDCALL BOOLEAN WRAP_EXPORT(KeSetTimerEx)
 
801
wstdcall BOOLEAN WRAP_EXPORT(KeSetTimerEx)
682
802
        (struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,
683
803
         LONG period_ms, struct kdpc *kdpc)
684
804
{
690
810
        return wrap_set_timer(nt_timer, expires_hz, repeat_hz, kdpc);
691
811
}
692
812
 
693
 
STDCALL BOOLEAN WRAP_EXPORT(KeSetTimer)
 
813
wstdcall BOOLEAN WRAP_EXPORT(KeSetTimer)
694
814
        (struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,
695
815
         struct kdpc *kdpc)
696
816
{
698
818
        return KeSetTimerEx(nt_timer, duetime_ticks, 0, kdpc);
699
819
}
700
820
 
701
 
STDCALL BOOLEAN WRAP_EXPORT(KeCancelTimer)
 
821
wstdcall BOOLEAN WRAP_EXPORT(KeCancelTimer)
702
822
        (struct nt_timer *nt_timer)
703
823
{
704
824
        BOOLEAN canceled;
705
 
        KIRQL irql;
706
825
        struct wrap_timer *wrap_timer;
707
826
 
708
827
        TRACEENTER5("%p", nt_timer);
717
836
#endif
718
837
        /* del_timer_sync may not be called here, as this function can
719
838
         * be called at DISPATCH_LEVEL */
720
 
        irql = kspin_lock_irql(&timer_lock, DISPATCH_LEVEL);
 
839
        nt_spin_lock(&timer_lock);
721
840
        DBGTRACE5("deleting timer %p(%p)", wrap_timer, nt_timer);
722
841
        /* disable timer before deleting so it won't be re-armed after
723
842
         * deleting */
726
845
                canceled = TRUE;
727
846
        else
728
847
                canceled = FALSE;
729
 
        kspin_unlock_irql(&timer_lock, irql);
 
848
        nt_spin_unlock(&timer_lock);
730
849
        DBGTRACE5("canceled (%p): %d", wrap_timer, canceled);
731
850
        TRACEEXIT5(return canceled);
732
851
}
733
852
 
734
 
STDCALL void WRAP_EXPORT(KeInitializeDpc)
 
853
wstdcall void WRAP_EXPORT(KeInitializeDpc)
735
854
        (struct kdpc *kdpc, void *func, void *ctx)
736
855
{
737
856
        TRACEENTER3("%p, %p, %p", kdpc, func, ctx);
738
857
        memset(kdpc, 0, sizeof(*kdpc));
739
 
        kdpc->number = 0;
 
858
        kdpc->nr_cpu = FALSE;
740
859
        kdpc->func = func;
741
860
        kdpc->ctx  = ctx;
742
861
        InitializeListHead(&kdpc->list);
749
868
        KIRQL irql;
750
869
 
751
870
        while (1) {
752
 
                irql = kspin_lock_irql(&kdpc_list_lock, DISPATCH_LEVEL);
 
871
                irql = nt_spin_lock_irql(&kdpc_list_lock, DISPATCH_LEVEL);
753
872
                entry = RemoveHeadList(&kdpc_list);
754
873
                if (!entry) {
755
 
                        kspin_unlock_irql(&kdpc_list_lock, irql);
 
874
                        nt_spin_unlock_irql(&kdpc_list_lock, irql);
756
875
                        break;
757
876
                }
758
877
                kdpc = container_of(entry, struct kdpc, list);
759
 
                kdpc->number = 0;
760
 
                kspin_unlock_irql(&kdpc_list_lock, irql);
 
878
                kdpc->nr_cpu = FALSE;
 
879
                nt_spin_unlock_irql(&kdpc_list_lock, irql);
761
880
                DBGTRACE5("%p, %p, %p, %p, %p", kdpc, kdpc->func, kdpc->ctx,
762
881
                          kdpc->arg1, kdpc->arg2);
763
882
                irql = raise_irql(DISPATCH_LEVEL);
766
885
        }
767
886
}
768
887
 
769
 
STDCALL void KeFlushQueuedDpcs(void)
 
888
wstdcall void KeFlushQueuedDpcs(void)
770
889
{
771
890
        kdpc_worker(NULL);
772
891
}
773
892
 
774
893
static BOOLEAN queue_kdpc(struct kdpc *kdpc)
775
894
{
 
895
        BOOLEAN ret;
776
896
        KIRQL irql;
777
 
        BOOLEAN ret;
778
897
 
779
898
        TRACEENTER5("%p", kdpc);
780
899
        if (!kdpc)
781
900
                return FALSE;
782
 
        irql = kspin_lock_irql(&kdpc_list_lock, DISPATCH_LEVEL);
783
 
        if (kdpc->number) {
784
 
                if (kdpc->number != 1)
785
 
                        ERROR("kdpc->number: %d", kdpc->number);
 
901
        irql = nt_spin_lock_irql(&kdpc_list_lock, DISPATCH_LEVEL);
 
902
        if (kdpc->nr_cpu) {
 
903
                if (kdpc->nr_cpu != TRUE)
 
904
                        ERROR("kdpc->nr_cpu: %d", kdpc->nr_cpu);
786
905
                ret = FALSE;
787
906
        } else {
788
 
                kdpc->number = 1;
 
907
                ret = kdpc->nr_cpu = TRUE;
789
908
                InsertTailList(&kdpc_list, &kdpc->list);
790
 
                ret = TRUE;
 
909
                schedule_ntos_work(&kdpc_work);
791
910
        }
792
 
        kspin_unlock_irql(&kdpc_list_lock, irql);
793
 
        if (ret == TRUE)
794
 
                schedule_work(&kdpc_work);
 
911
        nt_spin_unlock_irql(&kdpc_list_lock, irql);
795
912
        TRACEEXIT5(return ret);
796
913
}
797
914
 
798
915
BOOLEAN dequeue_kdpc(struct kdpc *kdpc)
799
916
{
 
917
        BOOLEAN ret;
800
918
        KIRQL irql;
801
 
        BOOLEAN ret;
802
919
 
 
920
        TRACEENTER5("%p", kdpc);
803
921
        if (!kdpc)
804
922
                return FALSE;
805
 
        irql = kspin_lock_irql(&kdpc_list_lock, DISPATCH_LEVEL);
806
 
        if (kdpc->number) {
807
 
                if (kdpc->number != 1)
808
 
                        ERROR("kdpc->number: %d", kdpc->number);
 
923
        irql = nt_spin_lock_irql(&kdpc_list_lock, DISPATCH_LEVEL);
 
924
        if (kdpc->nr_cpu) {
 
925
                if (kdpc->nr_cpu != TRUE)
 
926
                        ERROR("kdpc->nr_cpu: %d", kdpc->nr_cpu);
 
927
                kdpc->nr_cpu = FALSE;
809
928
                RemoveEntryList(&kdpc->list);
810
 
                kdpc->number = 0;
811
929
                ret = TRUE;
812
930
        } else
813
931
                ret = FALSE;
814
 
        kspin_unlock_irql(&kdpc_list_lock, irql);
 
932
        nt_spin_unlock_irql(&kdpc_list_lock, irql);
815
933
        return ret;
816
934
}
817
935
 
818
 
STDCALL BOOLEAN WRAP_EXPORT(KeInsertQueueDpc)
 
936
wstdcall BOOLEAN WRAP_EXPORT(KeInsertQueueDpc)
819
937
        (struct kdpc *kdpc, void *arg1, void *arg2)
820
938
{
821
939
        BOOLEAN ret;
827
945
        TRACEEXIT5(return ret);
828
946
}
829
947
 
830
 
STDCALL BOOLEAN WRAP_EXPORT(KeRemoveQueueDpc)
 
948
wstdcall BOOLEAN WRAP_EXPORT(KeRemoveQueueDpc)
831
949
        (struct kdpc *kdpc)
832
950
{
833
951
        BOOLEAN ret;
837
955
        TRACEEXIT3(return ret);
838
956
}
839
957
 
840
 
static void wrap_work_item_worker(void *data)
 
958
static void ntos_work_item_worker(void *data)
841
959
{
842
 
        struct wrap_work_item *wrap_work_item;
 
960
        struct ntos_work_item *ntos_work_item;
843
961
        struct nt_list *cur;
844
 
        void (*func)(void *arg1, void *arg2) STDCALL;
 
962
        typeof(ntos_work_item->func) func;
845
963
        KIRQL irql;
846
964
 
847
965
        while (1) {
848
 
                irql = kspin_lock_irql(&wrap_work_item_list_lock,
849
 
                                       DISPATCH_LEVEL);
850
 
                cur = RemoveHeadList(&wrap_work_item_list);
851
 
                kspin_unlock_irql(&wrap_work_item_list_lock, irql);
 
966
                irql = nt_spin_lock_irql(&ntos_work_item_list_lock,
 
967
                                         DISPATCH_LEVEL);
 
968
                cur = RemoveHeadList(&ntos_work_item_list);
 
969
                nt_spin_unlock_irql(&ntos_work_item_list_lock, irql);
852
970
                if (!cur)
853
971
                        break;
854
 
                wrap_work_item = container_of(cur, struct wrap_work_item,
855
 
                                              list);
856
 
                func = wrap_work_item->func;
857
 
                DBGTRACE4("%p, %p, %p", func, wrap_work_item->arg1,
858
 
                          wrap_work_item->arg2);
859
 
                if (wrap_work_item->win_func == TRUE)
860
 
                        LIN2WIN2(func, wrap_work_item->arg1,
861
 
                                 wrap_work_item->arg2);
 
972
                ntos_work_item = container_of(cur, struct ntos_work_item, list);
 
973
                func = ntos_work_item->func;
 
974
                WORKTRACE("%p: executing %p, %p, %p", current, func,
 
975
                          ntos_work_item->arg1, ntos_work_item->arg2);
 
976
                if (ntos_work_item->func_type == WORKER_FUNC_WIN)
 
977
                        LIN2WIN2(func, ntos_work_item->arg1,
 
978
                                 ntos_work_item->arg2);
862
979
                else
863
 
                        func(wrap_work_item->arg1, wrap_work_item->arg2);
864
 
                kfree(wrap_work_item);
 
980
                        func(ntos_work_item->arg1, ntos_work_item->arg2);
 
981
                kfree(ntos_work_item);
865
982
        }
866
983
        return;
867
984
}
868
985
 
869
 
int schedule_wrap_work_item(WRAP_WORK_FUNC func, void *arg1, void *arg2,
870
 
                            BOOLEAN win_func)
 
986
int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2,
 
987
                            enum worker_func_type func_type)
871
988
{
872
 
        struct wrap_work_item *wrap_work_item;
 
989
        struct ntos_work_item *ntos_work_item;
873
990
        KIRQL irql;
874
991
 
875
 
        wrap_work_item = kmalloc(sizeof(*wrap_work_item), GFP_ATOMIC);
876
 
        if (!wrap_work_item) {
 
992
        WORKENTER("adding work: %p, %p, %p", func, arg1, arg2);
 
993
        ntos_work_item = kmalloc(sizeof(*ntos_work_item), GFP_ATOMIC);
 
994
        if (!ntos_work_item) {
877
995
                ERROR("couldn't allocate memory");
878
996
                return -ENOMEM;
879
997
        }
880
 
        wrap_work_item->func = func;
881
 
        wrap_work_item->arg1 = arg1;
882
 
        wrap_work_item->arg2 = arg2;
883
 
        wrap_work_item->win_func = win_func;
884
 
        irql = kspin_lock_irql(&wrap_work_item_list_lock, DISPATCH_LEVEL);
885
 
        InsertTailList(&wrap_work_item_list, &wrap_work_item->list);
886
 
        kspin_unlock_irql(&wrap_work_item_list_lock, irql);
887
 
        schedule_work(&wrap_work_item_work);
888
 
        return 0;
889
 
}
890
 
 
891
 
STDCALL void WRAP_EXPORT(KeInitializeSpinLock)
892
 
        (KSPIN_LOCK *lock)
893
 
{
894
 
        TRACEENTER6("%p", lock);
895
 
        kspin_lock_init(lock);
896
 
}
897
 
 
898
 
STDCALL void WRAP_EXPORT(KeAcquireSpinLock)
899
 
        (KSPIN_LOCK *lock, KIRQL *irql)
900
 
{
901
 
        TRACEENTER6("%p", lock);
902
 
        *irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
903
 
}
904
 
 
905
 
STDCALL void WRAP_EXPORT(KeReleaseSpinLock)
906
 
        (KSPIN_LOCK *lock, KIRQL oldirql)
907
 
{
908
 
        TRACEENTER6("%p", lock);
909
 
        kspin_unlock_irql(lock, oldirql);
910
 
}
911
 
 
912
 
STDCALL void WRAP_EXPORT(KeAcquireSpinLockAtDpcLevel)
913
 
        (KSPIN_LOCK *lock)
914
 
{
915
 
        TRACEENTER6("%p", lock);
916
 
        kspin_lock(lock);
917
 
}
918
 
 
919
 
STDCALL void WRAP_EXPORT(KeRaiseIrql)
 
998
        ntos_work_item->func = func;
 
999
        ntos_work_item->arg1 = arg1;
 
1000
        ntos_work_item->arg2 = arg2;
 
1001
        ntos_work_item->func_type = func_type;
 
1002
        irql = nt_spin_lock_irql(&ntos_work_item_list_lock, DISPATCH_LEVEL);
 
1003
        InsertTailList(&ntos_work_item_list, &ntos_work_item->list);
 
1004
        nt_spin_unlock_irql(&ntos_work_item_list_lock, irql);
 
1005
        schedule_ntos_work(&ntos_work_item_work);
 
1006
        WORKEXIT(return 0);
 
1007
}
 
1008
 
 
1009
wstdcall void WRAP_EXPORT(KeInitializeSpinLock)
 
1010
        (NT_SPIN_LOCK *lock)
 
1011
{
 
1012
        TRACEENTER6("%p", lock);
 
1013
        nt_spin_lock_init(lock);
 
1014
}
 
1015
 
 
1016
wstdcall void WRAP_EXPORT(KeAcquireSpinLock)
 
1017
        (NT_SPIN_LOCK *lock, KIRQL *irql)
 
1018
{
 
1019
        TRACEENTER6("%p", lock);
 
1020
        *irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);
 
1021
}
 
1022
 
 
1023
wstdcall void WRAP_EXPORT(KeReleaseSpinLock)
 
1024
        (NT_SPIN_LOCK *lock, KIRQL oldirql)
 
1025
{
 
1026
        TRACEENTER6("%p", lock);
 
1027
        nt_spin_unlock_irql(lock, oldirql);
 
1028
}
 
1029
 
 
1030
wstdcall void WRAP_EXPORT(KeAcquireSpinLockAtDpcLevel)
 
1031
        (NT_SPIN_LOCK *lock)
 
1032
{
 
1033
        TRACEENTER6("%p", lock);
 
1034
        nt_spin_lock(lock);
 
1035
}
 
1036
 
 
1037
wstdcall void WRAP_EXPORT(KeReleaseSpinLockFromDpcLevel)
 
1038
        (NT_SPIN_LOCK *lock)
 
1039
{
 
1040
        TRACEENTER6("%p", lock);
 
1041
        nt_spin_unlock(lock);
 
1042
}
 
1043
 
 
1044
wstdcall void WRAP_EXPORT(KeRaiseIrql)
920
1045
        (KIRQL newirql, KIRQL *oldirql)
921
1046
{
922
1047
        TRACEENTER6("%d", newirql);
923
1048
        *oldirql = raise_irql(newirql);
924
1049
}
925
1050
 
926
 
STDCALL KIRQL WRAP_EXPORT(KeRaiseIrqlToDpcLevel)
 
1051
wstdcall KIRQL WRAP_EXPORT(KeRaiseIrqlToDpcLevel)
927
1052
        (void)
928
1053
{
929
1054
        return raise_irql(DISPATCH_LEVEL);
930
1055
}
931
1056
 
932
 
STDCALL void WRAP_EXPORT(KeLowerIrql)
 
1057
wstdcall void WRAP_EXPORT(KeLowerIrql)
933
1058
        (KIRQL irql)
934
1059
{
935
1060
        TRACEENTER6("%d", irql);
936
1061
        lower_irql(irql);
937
1062
}
938
1063
 
939
 
STDCALL KIRQL WRAP_EXPORT(KeAcquireSpinLockRaiseToDpc)
940
 
        (KSPIN_LOCK *lock)
941
 
{
942
 
        TRACEENTER6("%p", lock);
943
 
        return kspin_lock_irql(lock, DISPATCH_LEVEL);
944
 
}
945
 
 
946
 
STDCALL void WRAP_EXPORT(KeAcquireSpinLockdAtDpcLevel)
947
 
        (KSPIN_LOCK *lock)
948
 
{
949
 
        TRACEENTER6("%p", lock);
950
 
        kspin_lock(lock);
951
 
}
952
 
 
953
 
STDCALL void WRAP_EXPORT(KeReleaseSpinLockFromDpcLevel)
954
 
        (KSPIN_LOCK *lock)
955
 
{
956
 
        TRACEENTER6("%p", lock);
957
 
        kspin_unlock(lock);
958
 
}
959
 
 
960
 
_FASTCALL LONG WRAP_EXPORT(InterlockedDecrement)
961
 
        (FASTCALL_DECL_1(LONG volatile *val))
962
 
{
963
 
        LONG x;
964
 
        KIRQL irql;
965
 
 
966
 
        TRACEENTER5("");
967
 
        irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
968
 
        (*val)--;
969
 
        x = *val;
970
 
        kspin_unlock_irql(&inter_lock, irql);
971
 
        TRACEEXIT5(return x);
972
 
}
973
 
 
974
 
_FASTCALL LONG WRAP_EXPORT(InterlockedIncrement)
975
 
        (FASTCALL_DECL_1(LONG volatile *val))
976
 
{
977
 
        LONG x;
978
 
        KIRQL irql;
979
 
 
980
 
        TRACEENTER5("");
981
 
        irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
982
 
        (*val)++;
983
 
        x = *val;
984
 
        kspin_unlock_irql(&inter_lock, irql);
985
 
        TRACEEXIT5(return x);
986
 
}
987
 
 
988
 
_FASTCALL LONG WRAP_EXPORT(InterlockedExchange)
989
 
        (FASTCALL_DECL_2(LONG volatile *target, LONG val))
990
 
{
991
 
        LONG x;
992
 
        KIRQL irql;
993
 
 
994
 
        TRACEENTER5("");
995
 
        irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
996
 
        x = *target;
997
 
        *target = val;
998
 
        kspin_unlock_irql(&inter_lock, irql);
999
 
        TRACEEXIT5(return x);
1000
 
}
1001
 
 
1002
 
_FASTCALL LONG WRAP_EXPORT(InterlockedCompareExchange)
1003
 
        (FASTCALL_DECL_3(LONG volatile *dest, LONG xchg, LONG comperand))
1004
 
{
1005
 
        LONG x;
1006
 
        KIRQL irql;
1007
 
 
1008
 
        TRACEENTER5("");
1009
 
        irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
1010
 
        x = *dest;
1011
 
        if (*dest == comperand)
1012
 
                *dest = xchg;
1013
 
        kspin_unlock_irql(&inter_lock, irql);
1014
 
        TRACEEXIT5(return x);
1015
 
}
1016
 
 
1017
 
_FASTCALL void WRAP_EXPORT(ExInterlockedAddLargeStatistic)
1018
 
        (FASTCALL_DECL_2(LARGE_INTEGER *plint, ULONG n))
1019
 
{
1020
 
        unsigned long flags;
1021
 
 
1022
 
        TRACEENTER5("%p = %llu, n = %u", plint, *plint, n);
1023
 
        kspin_lock_irqsave(&inter_lock, flags);
1024
 
        *plint += n;
1025
 
        kspin_unlock_irqrestore(&inter_lock, flags);
1026
 
}
1027
 
 
1028
 
STDCALL void *WRAP_EXPORT(ExAllocatePoolWithTag)
 
1064
wstdcall KIRQL WRAP_EXPORT(KeAcquireSpinLockRaiseToDpc)
 
1065
        (NT_SPIN_LOCK *lock)
 
1066
{
 
1067
        TRACEENTER6("%p", lock);
 
1068
        return nt_spin_lock_irql(lock, DISPATCH_LEVEL);
 
1069
}
 
1070
 
 
1071
#undef ExAllocatePoolWithTag
 
1072
 
 
1073
wstdcall void *WRAP_EXPORT(ExAllocatePoolWithTag)
1029
1074
        (enum pool_type pool_type, SIZE_T size, ULONG tag)
1030
1075
{
1031
1076
        void *addr;
1032
1077
 
1033
1078
        TRACEENTER4("pool_type: %d, size: %lu, tag: %u", pool_type,
1034
1079
                    size, tag);
1035
 
 
1036
1080
        if (size <= KMALLOC_THRESHOLD) {
1037
1081
                if (current_irql() < DISPATCH_LEVEL)
1038
1082
                        addr = kmalloc(size, GFP_KERNEL);
1039
1083
                else
1040
1084
                        addr = kmalloc(size, GFP_ATOMIC);
1041
1085
        } else {
1042
 
                if (current_irql() > PASSIVE_LEVEL)
1043
 
                        ERROR("Windows driver allocating %ld bytes in "
1044
 
                              "atomic context", size);
1045
 
                addr = vmalloc(size);
 
1086
                if (current_irql() < DISPATCH_LEVEL)
 
1087
                        addr = vmalloc(size);
 
1088
                else
 
1089
                        addr = __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM,
 
1090
                                         PAGE_KERNEL);
1046
1091
        }
 
1092
        DBGTRACE4("addr: %p, %lu", addr, size);
1047
1093
        TRACEEXIT4(return addr);
1048
1094
}
1049
1095
 
1050
 
STDCALL void wrap_vfree(void *addr, void *ctx)
 
1096
static wstdcall void vfree_nonatomic(void *addr, void *ctx)
1051
1097
{
1052
1098
        vfree(addr);
1053
1099
}
1054
1100
 
1055
 
STDCALL void WRAP_EXPORT(ExFreePool)
 
1101
wstdcall void WRAP_EXPORT(ExFreePool)
1056
1102
        (void *addr)
1057
1103
{
1058
1104
        DBGTRACE4("addr: %p", addr);
1061
1107
                kfree(addr);
1062
1108
        else {
1063
1109
                if (in_interrupt())
1064
 
                        schedule_wrap_work_item(wrap_vfree, addr, NULL, FALSE);
 
1110
                        schedule_ntos_work_item(vfree_nonatomic, addr,
 
1111
                                                NULL, WORKER_FUNC_LINUX);
1065
1112
                else
1066
1113
                        vfree(addr);
1067
1114
        }
1068
 
        return;
 
1115
        TRACEEXIT4(return);
1069
1116
}
1070
1117
 
1071
 
STDCALL void WRAP_EXPORT(ExFreePoolWithTag)
 
1118
wstdcall void WRAP_EXPORT(ExFreePoolWithTag)
1072
1119
        (void *addr, ULONG tag)
1073
1120
{
1074
1121
        ExFreePool(addr);
1077
1124
WRAP_FUNC_PTR_DECL(ExAllocatePoolWithTag)
1078
1125
WRAP_FUNC_PTR_DECL(ExFreePool)
1079
1126
 
1080
 
STDCALL void WRAP_EXPORT(ExInitializeNPagedLookasideList)
 
1127
wstdcall void WRAP_EXPORT(ExInitializeNPagedLookasideList)
1081
1128
        (struct npaged_lookaside_list *lookaside,
1082
1129
         LOOKASIDE_ALLOC_FUNC *alloc_func, LOOKASIDE_FREE_FUNC *free_func,
1083
1130
         ULONG flags, SIZE_T size, ULONG tag, USHORT depth)
1084
1131
{
1085
1132
        TRACEENTER3("lookaside: %p, size: %lu, flags: %u, head: %p, "
1086
1133
                    "alloc: %p, free: %p", lookaside, size, flags,
1087
 
                    lookaside->head.list.next, alloc_func, free_func);
 
1134
                    lookaside, alloc_func, free_func);
1088
1135
 
1089
1136
        memset(lookaside, 0, sizeof(*lookaside));
1090
1137
 
1105
1152
                lookaside->free_func = (LOOKASIDE_FREE_FUNC *)
1106
1153
                        WRAP_FUNC_PTR(ExFreePool);
1107
1154
 
1108
 
#ifndef X86_64
 
1155
#ifndef CONFIG_X86_64
1109
1156
        DBGTRACE3("lock: %p", &lookaside->obsolete);
1110
 
        kspin_lock_init(&lookaside->obsolete);
 
1157
        nt_spin_lock_init(&lookaside->obsolete);
1111
1158
#endif
1112
1159
        TRACEEXIT3(return);
1113
1160
}
1114
1161
 
1115
 
STDCALL void WRAP_EXPORT(ExDeleteNPagedLookasideList)
 
1162
wstdcall void WRAP_EXPORT(ExDeleteNPagedLookasideList)
1116
1163
        (struct npaged_lookaside_list *lookaside)
1117
1164
{
1118
1165
        struct nt_slist *entry;
1120
1167
 
1121
1168
        TRACEENTER3("lookaside = %p", lookaside);
1122
1169
        irql = raise_irql(DISPATCH_LEVEL);
1123
 
        while ((entry = ExpInterlockedPopEntrySList(&lookaside->head)))
1124
 
                ExFreePool(entry);
 
1170
        while ((entry = ExpInterlockedPopEntrySList(&lookaside->head))) {
 
1171
                if (lookaside->free_func == (LOOKASIDE_FREE_FUNC *)
 
1172
                    WRAP_FUNC_PTR(ExFreePool))
 
1173
                        ExFreePool(entry);
 
1174
                else
 
1175
                        LIN2WIN1(lookaside->free_func, entry);
 
1176
        }
1125
1177
        lower_irql(irql);
1126
 
        TRACEEXIT5(return);
 
1178
        TRACEEXIT3(return);
1127
1179
}
1128
1180
 
1129
 
STDCALL NTSTATUS WRAP_EXPORT(ExCreateCallback)
 
1181
#if defined(ALLOC_DEBUG) && ALLOC_DEBUG > 1
 
1182
#define ExAllocatePoolWithTag(pool_type, size, tag)                     \
 
1183
        wrap_ExAllocatePoolWithTag(pool_type, size, tag, __FILE__, __LINE__)
 
1184
#endif
 
1185
 
 
1186
wstdcall NTSTATUS WRAP_EXPORT(ExCreateCallback)
1130
1187
        (struct callback_object **object, struct object_attributes *attributes,
1131
1188
         BOOLEAN create, BOOLEAN allow_multiple_callbacks)
1132
1189
{
1134
1191
        KIRQL irql;
1135
1192
 
1136
1193
        TRACEENTER2("");
1137
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
1194
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
1138
1195
        nt_list_for_each_entry(obj, &callback_objects, callback_funcs) {
1139
1196
                if (obj->attributes == attributes) {
1140
 
                        kspin_unlock_irql(&ntoskernel_lock, irql);
 
1197
                        nt_spin_unlock_irql(&ntoskernel_lock, irql);
1141
1198
                        *object = obj;
1142
1199
                        return STATUS_SUCCESS;
1143
1200
                }
1144
1201
        }
1145
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
1202
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
1146
1203
        obj = allocate_object(sizeof(struct callback_object),
1147
1204
                              OBJECT_TYPE_CALLBACK, NULL);
1148
1205
        if (!obj)
1149
1206
                TRACEEXIT2(return STATUS_INSUFFICIENT_RESOURCES);
1150
1207
        InitializeListHead(&obj->callback_funcs);
1151
 
        kspin_lock_init(&obj->lock);
 
1208
        nt_spin_lock_init(&obj->lock);
1152
1209
        obj->allow_multiple_callbacks = allow_multiple_callbacks;
1153
1210
        obj->attributes = attributes;
1154
1211
        *object = obj;
1155
1212
        TRACEEXIT2(return STATUS_SUCCESS);
1156
1213
}
1157
1214
 
1158
 
STDCALL void *WRAP_EXPORT(ExRegisterCallback)
 
1215
wstdcall void *WRAP_EXPORT(ExRegisterCallback)
1159
1216
        (struct callback_object *object, PCALLBACK_FUNCTION func,
1160
1217
         void *context)
1161
1218
{
1163
1220
        KIRQL irql;
1164
1221
 
1165
1222
        TRACEENTER2("");
1166
 
        irql = kspin_lock_irql(&object->lock, DISPATCH_LEVEL);
 
1223
        irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
1167
1224
        if (object->allow_multiple_callbacks == FALSE &&
1168
1225
            !IsListEmpty(&object->callback_funcs)) {
1169
 
                kspin_unlock_irql(&object->lock, irql);
 
1226
                nt_spin_unlock_irql(&object->lock, irql);
1170
1227
                TRACEEXIT2(return NULL);
1171
1228
        }
1172
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
1229
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
1173
1230
        callback = kmalloc(sizeof(*callback), GFP_KERNEL);
1174
1231
        if (!callback) {
1175
1232
                ERROR("couldn't allocate memory");
1178
1235
        callback->func = func;
1179
1236
        callback->context = context;
1180
1237
        callback->object = object;
1181
 
        irql = kspin_lock_irql(&object->lock, DISPATCH_LEVEL);
 
1238
        irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
1182
1239
        InsertTailList(&object->callback_funcs, &callback->list);
1183
 
        kspin_unlock_irql(&object->lock, irql);
 
1240
        nt_spin_unlock_irql(&object->lock, irql);
1184
1241
        TRACEEXIT2(return callback);
1185
1242
}
1186
1243
 
1187
 
STDCALL void WRAP_EXPORT(ExUnregisterCallback)
 
1244
wstdcall void WRAP_EXPORT(ExUnregisterCallback)
1188
1245
        (struct callback_func *callback)
1189
1246
{
1190
1247
        struct callback_object *object;
1194
1251
        if (!callback)
1195
1252
                return;
1196
1253
        object = callback->object;
1197
 
        irql = kspin_lock_irql(&object->lock, DISPATCH_LEVEL);
 
1254
        irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
1198
1255
        RemoveEntryList(&callback->list);
1199
 
        kspin_unlock_irql(&object->lock, irql);
 
1256
        nt_spin_unlock_irql(&object->lock, irql);
1200
1257
        kfree(callback);
1201
1258
        return;
1202
1259
}
1203
1260
 
1204
 
STDCALL void WRAP_EXPORT(ExNotifyCallback)
 
1261
wstdcall void WRAP_EXPORT(ExNotifyCallback)
1205
1262
        (struct callback_object *object, void *arg1, void *arg2)
1206
1263
{
1207
1264
        struct callback_func *callback;
1208
1265
        KIRQL irql;
1209
1266
 
1210
1267
        TRACEENTER3("%p", object);
1211
 
        irql = kspin_lock_irql(&object->lock, DISPATCH_LEVEL);
 
1268
        irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
1212
1269
        nt_list_for_each_entry(callback, &object->callback_funcs, list){
1213
1270
                LIN2WIN3(callback->func, callback->context, arg1, arg2);
1214
1271
        }
1215
 
        kspin_unlock_irql(&object->lock, irql);
 
1272
        nt_spin_unlock_irql(&object->lock, irql);
1216
1273
        return;
1217
1274
}
1218
1275
 
1219
 
/* check and set signaled state; should be called with nt_event_lock held */
1220
 
/* @reset indicates if the event should be reset to not-signaled state
 
1276
/* check and set signaled state; should be called with dispatcher_lock held */
 
1277
/* @grab indicates if the event should be put in not-signaled state
1221
1278
 * - note that a semaphore may stay in signaled state for multiple
1222
 
 * 'resets' if the count is > 1 */
1223
 
static int inline check_reset_signaled_state(void *object,
1224
 
                                             struct nt_thread *thread,
1225
 
                                             int reset)
 
1279
 * 'grabs' if the count is > 1 */
 
1280
static int check_grab_signaled_state(struct dispatcher_header *dh,
 
1281
                                     struct task_struct *thread, int grab)
1226
1282
{
1227
 
        struct dispatch_header *dh;
1228
 
        struct nt_mutex *nt_mutex;
1229
 
 
1230
 
        dh = object;
1231
 
        nt_mutex = container_of(object, struct nt_mutex, dh);
1232
 
 
 
1283
        EVENTTRACE("%p, %p, %d, %d", dh, thread, grab, dh->signal_state);
1233
1284
        if (is_mutex_dh(dh)) {
 
1285
                struct nt_mutex *nt_mutex;
1234
1286
                /* either no thread owns the mutex or this thread owns
1235
1287
                 * it */
1236
 
                if (nt_mutex->owner_thread == NULL ||
1237
 
                    nt_mutex->owner_thread == thread) {
1238
 
                        assert(nt_mutex->owner_thread == NULL &&
1239
 
                               dh->signal_state == 1);
1240
 
                        if (reset) {
 
1288
                nt_mutex = container_of(dh, struct nt_mutex, dh);
 
1289
                EVENTTRACE("%p, %p", nt_mutex, nt_mutex->owner_thread);
 
1290
                assert(dh->signal_state <= 1);
 
1291
                assert(nt_mutex->owner_thread == NULL &&
 
1292
                       dh->signal_state == 1);
 
1293
                if (dh->signal_state > 0 || nt_mutex->owner_thread == thread) {
 
1294
                        if (grab) {
1241
1295
                                dh->signal_state--;
1242
1296
                                nt_mutex->owner_thread = thread;
1243
1297
                        }
1244
 
                        return 1;
 
1298
                        EVENTEXIT(return 1);
1245
1299
                }
1246
1300
        } else if (dh->signal_state > 0) {
1247
 
                /* if resetting, decrement signal_state for
 
1301
                /* if grab, decrement signal_state for
1248
1302
                 * synchronization or semaphore objects */
1249
 
                if (reset && (dh->type == SynchronizationEvent ||
1250
 
                              is_semaphore_dh(dh)))
 
1303
                if (grab && (dh->type == SynchronizationObject ||
 
1304
                             is_semaphore_dh(dh)))
1251
1305
                        dh->signal_state--;
1252
 
                return 1;
 
1306
                EVENTEXIT(return 1);
1253
1307
        }
1254
 
        return 0;
 
1308
        EVENTEXIT(return 0);
1255
1309
}
1256
1310
 
1257
 
/* this function should be called holding nt_event_lock spinlock at
 
1311
/* this function should be called holding dispatcher_lock spinlock at
1258
1312
 * DISPATCH_LEVEL */
1259
 
static void wakeup_threads(struct dispatch_header *dh)
 
1313
static void wakeup_threads(struct dispatcher_header *dh)
1260
1314
{
1261
 
        struct wait_block *wb;
 
1315
        struct nt_list *cur, *next;
 
1316
        struct wait_block *wb = NULL;
1262
1317
 
1263
 
        EVENTENTER("dh: %p", dh);
1264
 
        nt_list_for_each_entry(wb, &dh->wait_blocks, list) {
1265
 
                EVENTTRACE("wait block: %p, thread: %p", wb, wb->thread);
1266
 
                assert(wb->thread != NULL && wb->object == dh);
 
1318
        nt_list_for_each_safe(cur, next, &dh->wait_blocks) {
 
1319
                wb = container_of(cur, struct wait_block, list);
 
1320
                EVENTTRACE("%p: wait block: %p, thread: %p",
 
1321
                           dh, wb, wb->thread);
 
1322
                assert(wb->thread != NULL);
 
1323
                assert(wb->object == NULL);
1267
1324
                if (wb->thread &&
1268
 
                    check_reset_signaled_state(dh, wb->thread, 0)) {
1269
 
                        EVENTTRACE("waking up task: %p", wb->thread->task);
1270
 
                        wb->thread->event_wait_done = 1;
1271
 
                        wake_up(&wb->thread->event_wq);
1272
 
#if 0
1273
 
                        /* DDK says only one thread will be woken up,
1274
 
                         * but we let each waking thread to check if
1275
 
                         * the object is in signaled state anyway */
1276
 
                        if (dh->type == SynchronizationEvent)
 
1325
                    check_grab_signaled_state(dh, wb->thread, 1)) {
 
1326
                        struct thread_event_wait *thread_wait = wb->thread_wait;
 
1327
                        EVENTTRACE("%p: waking up task %p for %p", thread_wait,
 
1328
                                   wb->thread, dh);
 
1329
                        RemoveEntryList(&wb->list);
 
1330
                        wb->object = dh;
 
1331
                        thread_wait->done = 1;
 
1332
                        wake_up(&thread_wait->wq_head);
 
1333
                        if (dh->type == SynchronizationObject)
1277
1334
                                break;
1278
 
#endif
1279
1335
                } else
1280
 
                        EVENTTRACE("not waking up task: %p",
1281
 
                                   wb->thread->task);
 
1336
                        EVENTTRACE("not waking up task: %p", wb->thread);
1282
1337
        }
1283
1338
        EVENTEXIT(return);
1284
1339
}
1285
1340
 
1286
 
STDCALL NTSTATUS WRAP_EXPORT(KeWaitForMultipleObjects)
 
1341
/* We need workqueue to implement KeWaitFor routines
 
1342
 * below. (get/put)_thread_event_wait give/take back a workqueue. Both
 
1343
 * these are called holding dispatcher spinlock, so no locking here */
 
1344
static inline struct thread_event_wait *get_thread_event_wait(void)
 
1345
{
 
1346
        struct thread_event_wait *thread_event_wait;
 
1347
 
 
1348
        if (thread_event_wait_pool) {
 
1349
                thread_event_wait = thread_event_wait_pool;
 
1350
                thread_event_wait_pool = thread_event_wait_pool->next;
 
1351
        } else {
 
1352
                thread_event_wait = kmalloc(sizeof(*thread_event_wait),
 
1353
                                            GFP_ATOMIC);
 
1354
                if (!thread_event_wait) {
 
1355
                        WARNING("couldn't allocate memory");
 
1356
                        return NULL;
 
1357
                }
 
1358
                EVENTTRACE("allocated wq: %p", thread_event_wait);
 
1359
                init_waitqueue_head(&thread_event_wait->wq_head);
 
1360
        }
 
1361
#ifdef EVENT_DEBUG
 
1362
        thread_event_wait->task = current;
 
1363
#endif
 
1364
        EVENTTRACE("%p, %p, %p", thread_event_wait, current,
 
1365
                   thread_event_wait_pool);
 
1366
        thread_event_wait->done = 0;
 
1367
        return thread_event_wait;
 
1368
}
 
1369
 
 
1370
static void put_thread_event_wait(struct thread_event_wait *thread_event_wait)
 
1371
{
 
1372
        EVENTENTER("%p, %p", thread_event_wait, current);
 
1373
#ifdef EVENT_DEBUG
 
1374
        if (thread_event_wait->task != current)
 
1375
                ERROR("argh, task %p should be %p",
 
1376
                      current, thread_event_wait->task);
 
1377
        thread_event_wait->task = NULL;
 
1378
#endif
 
1379
        thread_event_wait->next = thread_event_wait_pool;
 
1380
        thread_event_wait_pool = thread_event_wait;
 
1381
        thread_event_wait->done = 0;
 
1382
}
 
1383
 
 
1384
wstdcall NTSTATUS WRAP_EXPORT(KeWaitForMultipleObjects)
1287
1385
        (ULONG count, void *object[], enum wait_type wait_type,
1288
1386
         KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode,
1289
1387
         BOOLEAN alertable, LARGE_INTEGER *timeout,
1292
1390
        int i, res = 0, wait_count;
1293
1391
        long wait_jiffies = 0;
1294
1392
        struct wait_block *wb, wb_array[THREAD_WAIT_OBJECTS];
1295
 
        struct dispatch_header *dh;
 
1393
        struct dispatcher_header *dh;
 
1394
        struct task_struct *thread;
 
1395
        struct thread_event_wait *thread_wait;
1296
1396
        KIRQL irql;
1297
 
        struct nt_thread *thread;
1298
 
        struct task_struct *task;
1299
 
 
1300
 
        task = get_current();
1301
 
        EVENTENTER("task: %p, count = %d, type: %d, reason = %u, "
1302
 
                   "waitmode = %u, alertable = %u, timeout = %p", task, count,
1303
 
                   wait_type, wait_reason, wait_mode, alertable, timeout);
1304
 
 
1305
 
        thread = KeGetCurrentThread();
1306
 
        EVENTTRACE("thread: %p", thread);
1307
 
        if (thread == NULL)
1308
 
                EVENTEXIT(return STATUS_RESOURCES);
 
1397
 
 
1398
        thread = current;
 
1399
        EVENTTRACE("thread: %p count = %d, type: %d, reason = %u, "
 
1400
                   "waitmode = %u, alertable = %u, timeout = %p", thread,
 
1401
                   count, wait_type, wait_reason, wait_mode, alertable,
 
1402
                   timeout);
1309
1403
 
1310
1404
        if (count > MAX_WAIT_OBJECTS)
1311
1405
                EVENTEXIT(return STATUS_INVALID_PARAMETER);
1319
1413
 
1320
1414
        /* TODO: should we allow threads to wait in non-alertable state? */
1321
1415
        alertable = TRUE;
1322
 
        irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1323
 
        /* first check if the wait can be satisfied or not, without
1324
 
         * grabbing the objects, except in the case of WaitAny */
 
1416
        irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
 
1417
        /* If *timeout == 0: In the case of WaitAny, if an object can
 
1418
         * be grabbed (object is in signaled state), grab and
 
1419
         * return. In the case of WaitAll, we have to first make sure
 
1420
         * all objects can be grabbed. If any/some of them can't be
 
1421
         * grabbed, either we return STATUS_TIMEOUT or wait for them,
 
1422
         * depending on how to satisfy wait. If all of them can be
 
1423
         * grabbed, we will grab them in the next loop below */
 
1424
 
1325
1425
        for (i = wait_count = 0; i < count; i++) {
1326
1426
                dh = object[i];
1327
1427
                EVENTTRACE("%p: event %p state: %d",
1328
 
                           task, dh, dh->signal_state);
 
1428
                           thread, dh, dh->signal_state);
1329
1429
                /* wait_type == 1 for WaitAny, 0 for WaitAll */
1330
 
                if (check_reset_signaled_state(dh, thread, wait_type)) {
 
1430
                if (check_grab_signaled_state(dh, thread, wait_type)) {
1331
1431
                        if (wait_type == WaitAny) {
1332
 
                                kspin_unlock_irql(&nt_event_lock, irql);
 
1432
                                nt_spin_unlock_irql(&dispatcher_lock, irql);
1333
1433
                                if (count > 1)
1334
1434
                                        EVENTEXIT(return STATUS_WAIT_0 + i);
1335
1435
                                else
1336
1436
                                        EVENTEXIT(return STATUS_SUCCESS);
1337
1437
                        }
1338
 
                } else
 
1438
                } else {
 
1439
                        EVENTTRACE("%p: wait for %p", thread, dh);
1339
1440
                        wait_count++;
1340
 
        }
1341
 
        if (timeout && *timeout == 0 && wait_count) {
1342
 
                kspin_unlock_irql(&nt_event_lock, irql);
1343
 
                EVENTEXIT(return STATUS_TIMEOUT);
1344
 
        }
1345
 
 
1346
 
        /* get the list of objects the thread (task) needs to wait on
1347
 
         * and add the thread on the wait list for each such object */
1348
 
        /* if *timeout == 0, this step will grab the objects */
1349
 
        thread->event_wait_done = 0;
1350
 
        for (i = wait_count = 0; i < count; i++) {
 
1441
                }
 
1442
        }
 
1443
 
 
1444
        if (wait_count) {
 
1445
                if (timeout && *timeout == 0) {
 
1446
                        nt_spin_unlock_irql(&dispatcher_lock, irql);
 
1447
                        EVENTEXIT(return STATUS_TIMEOUT);
 
1448
                }
 
1449
                thread_wait = get_thread_event_wait();
 
1450
                if (!thread_wait) {
 
1451
                        nt_spin_unlock_irql(&dispatcher_lock, irql);
 
1452
                        EVENTEXIT(return STATUS_RESOURCES);
 
1453
                }
 
1454
        } else
 
1455
                thread_wait = NULL;
 
1456
 
 
1457
        /* get the list of objects the thread needs to wait on and add
 
1458
         * the thread on the wait list for each such object */
 
1459
        /* if *timeout == 0, this step will grab all the objects */
 
1460
        for (i = 0; i < count; i++) {
1351
1461
                dh = object[i];
1352
1462
                EVENTTRACE("%p: event %p state: %d",
1353
 
                           task, dh, dh->signal_state);
1354
 
                if (check_reset_signaled_state(dh, thread, 1)) {
 
1463
                           thread, dh, dh->signal_state);
 
1464
                wb[i].object = NULL;
 
1465
                wb[i].thread_wait = thread_wait;
 
1466
                if (check_grab_signaled_state(dh, thread, 1)) {
1355
1467
                        EVENTTRACE("%p: event %p already signaled: %d",
1356
 
                                   task, dh, dh->signal_state);
 
1468
                                   thread, dh, dh->signal_state);
1357
1469
                        /* mark that we are not waiting on this object */
1358
1470
                        wb[i].thread = NULL;
1359
 
                        wb[i].object = NULL;
1360
1471
                } else {
1361
1472
                        assert(timeout == NULL || *timeout != 0);
 
1473
                        assert(thread_wait != NULL);
1362
1474
                        wb[i].thread = thread;
1363
 
                        wb[i].object = dh;
 
1475
                        EVENTTRACE("%p: need to wait on event %p", thread, dh);
1364
1476
                        InsertTailList(&dh->wait_blocks, &wb[i].list);
1365
 
                        wait_count++;
1366
 
                        EVENTTRACE("%p: waiting on event %p", task, dh);
1367
1477
                }
1368
1478
        }
1369
 
        kspin_unlock_irql(&nt_event_lock, irql);
1370
 
 
1371
 
        if (wait_count == 0)
 
1479
        nt_spin_unlock_irql(&dispatcher_lock, irql);
 
1480
        if (wait_count == 0) {
 
1481
                assert(thread_wait == NULL);
1372
1482
                EVENTEXIT(return STATUS_SUCCESS);
 
1483
        }
 
1484
        assert(thread_wait);
1373
1485
 
1374
1486
        assert(timeout == NULL || *timeout != 0);
1375
1487
        if (timeout == NULL)
1376
1488
                wait_jiffies = 0;
1377
1489
        else
1378
1490
                wait_jiffies = SYSTEM_TIME_TO_HZ(*timeout) + 1;
1379
 
        EVENTTRACE("%p: sleeping for %ld", task, wait_jiffies);
 
1491
        EVENTTRACE("%p: sleeping for %ld on %p",
 
1492
                   thread, wait_jiffies, thread_wait);
1380
1493
 
1381
1494
        while (wait_count) {
1382
1495
                if (wait_jiffies) {
1383
 
                        if (alertable)
1384
 
                                res = wait_event_interruptible_timeout(
1385
 
                                        thread->event_wq,
1386
 
                                        (thread->event_wait_done == 1),
1387
 
                                        wait_jiffies);
1388
 
                        else
1389
 
                                res = wait_event_timeout(
1390
 
                                        thread->event_wq,
1391
 
                                        (thread->event_wait_done == 1),
1392
 
                                        wait_jiffies);
 
1496
                        res = wait_event_interruptible_timeout(
 
1497
                                thread_wait->wq_head, (thread_wait->done == 1),
 
1498
                                wait_jiffies);
1393
1499
                } else {
1394
 
                        if (alertable)
1395
 
                                wait_event_interruptible(thread->event_wq,
1396
 
                                           (thread->event_wait_done == 1));
1397
 
                        else
1398
 
                                wait_event(thread->event_wq,
1399
 
                                           (thread->event_wait_done == 1));
 
1500
                        wait_event_interruptible(
 
1501
                                thread_wait->wq_head,(thread_wait->done == 1));
1400
1502
                        /* mark that it didn't timeout */
1401
1503
                        res = 1;
1402
1504
                }
1403
 
                irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1404
 
                thread->event_wait_done = 0;
 
1505
                thread_wait->done = 0;
 
1506
                irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
1405
1507
                if (signal_pending(current))
1406
1508
                        res = -ERESTARTSYS;
1407
 
                EVENTTRACE("%p: woke up, res = %d", task, res);
 
1509
                EVENTTRACE("%p woke up on %p, res = %d, done: %d", thread,
 
1510
                           thread_wait, res, thread_wait->done);
 
1511
#ifdef EVENT_DEBUG
 
1512
                if (thread_wait->task != current)
 
1513
                        ERROR("%p: argh, task %p should be %p", thread_wait,
 
1514
                              thread_wait->task, current);
 
1515
#endif
1408
1516
//              assert(res < 0 && alertable);
1409
1517
                if (res <= 0) {
1410
1518
                        /* timed out or interrupted; remove from wait list */
1411
1519
                        for (i = 0; i < count; i++) {
1412
1520
                                if (!wb[i].thread)
1413
1521
                                        continue;
1414
 
                                EVENTTRACE("%p: timedout, deq'ing %p",
1415
 
                                           task, wb[i].object);
 
1522
                                EVENTTRACE("%p: timedout, deq'ing %p (%p)",
 
1523
                                           thread, object[i], wb[i].object);
1416
1524
                                RemoveEntryList(&wb[i].list);
1417
 
                                wb[i].thread = NULL;
1418
 
                                wb[i].object = NULL;
1419
1525
                        }
1420
 
                        kspin_unlock_irql(&nt_event_lock, irql);
 
1526
                        put_thread_event_wait(thread_wait);
 
1527
                        nt_spin_unlock_irql(&dispatcher_lock, irql);
1421
1528
                        if (res < 0)
1422
1529
                                EVENTEXIT(return STATUS_ALERTED);
1423
1530
                        else
1427
1534
                for (i = 0; wait_count && i < count; i++) {
1428
1535
                        if (!wb[i].thread)
1429
1536
                                continue;
1430
 
                        dh = object[i];
1431
 
                        if (!check_reset_signaled_state(dh, thread, 1))
 
1537
                        EVENTTRACE("object: %p, %p", object[i], wb[i].object);
 
1538
                        if (!wb[i].object) {
 
1539
                                EVENTTRACE("not woken for %p", object[i]);
1432
1540
                                continue;
1433
 
                        RemoveEntryList(&wb[i].list);
 
1541
                        }
 
1542
                        DBG_BLOCK(1) {
 
1543
                                if (wb[i].object != object[i]) {
 
1544
                                        ERROR("argh, event not signalled? "
 
1545
                                              "%p, %p", wb[i].object,
 
1546
                                              object[i]);
 
1547
                                        continue;
 
1548
                                }
 
1549
                        }
 
1550
                        wb[i].object = NULL;
 
1551
                        wb[i].thread = NULL;
1434
1552
                        wait_count--;
1435
1553
                        if (wait_type == WaitAny) {
1436
1554
                                int j;
1437
1555
                                /* done; remove from rest of wait list */
1438
1556
                                for (j = i; j < count; j++)
1439
 
                                        if (wb[j].thread && wb[j].object)
 
1557
                                        if (wb[j].thread)
1440
1558
                                                RemoveEntryList(&wb[j].list);
1441
 
                                kspin_unlock_irql(&nt_event_lock, irql);
1442
 
                                if (count > 1)
1443
 
                                        EVENTEXIT(return STATUS_WAIT_0 + i);
1444
 
                                else
1445
 
                                        EVENTEXIT(return STATUS_SUCCESS);
 
1559
                                put_thread_event_wait(thread_wait);
 
1560
                                nt_spin_unlock_irql(&dispatcher_lock, irql);
 
1561
                                EVENTEXIT(return STATUS_WAIT_0 + i);
1446
1562
                        }
1447
1563
                }
1448
1564
                if (wait_count == 0) {
1449
 
                        kspin_unlock_irql(&nt_event_lock, irql);
 
1565
                        put_thread_event_wait(thread_wait);
 
1566
                        nt_spin_unlock_irql(&dispatcher_lock, irql);
1450
1567
                        EVENTEXIT(return STATUS_SUCCESS);
1451
1568
                }
1452
1569
                /* this thread is still waiting for more objects, so
1457
1574
                        wait_jiffies = res;
1458
1575
                else
1459
1576
                        wait_jiffies = 0;
1460
 
                kspin_unlock_irql(&nt_event_lock, irql);
 
1577
                nt_spin_unlock_irql(&dispatcher_lock, irql);
1461
1578
        }
1462
1579
        /* this should never reach, but compiler wants return value */
1463
 
        ERROR("%p: wait_jiffies: %ld", task, wait_jiffies);
 
1580
        ERROR("%p: wait_jiffies: %ld", thread, wait_jiffies);
1464
1581
        EVENTEXIT(return STATUS_SUCCESS);
1465
1582
}
1466
1583
 
1467
 
STDCALL NTSTATUS WRAP_EXPORT(KeWaitForSingleObject)
 
1584
wstdcall NTSTATUS WRAP_EXPORT(KeWaitForSingleObject)
1468
1585
        (void *object, KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode,
1469
1586
         BOOLEAN alertable, LARGE_INTEGER *timeout)
1470
1587
{
1472
1589
                                        wait_mode, alertable, timeout, NULL);
1473
1590
}
1474
1591
 
1475
 
STDCALL void WRAP_EXPORT(KeInitializeEvent)
 
1592
wstdcall void WRAP_EXPORT(KeInitializeEvent)
1476
1593
        (struct nt_event *nt_event, enum event_type type, BOOLEAN state)
1477
1594
{
1478
1595
        KIRQL irql;
1479
1596
 
1480
1597
        EVENTENTER("event = %p, type = %d, state = %d", nt_event, type, state);
1481
 
//      dump_bytes(__FUNCTION__, __builtin_return_address(0), 20);
1482
 
        irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1483
 
        initialize_dh(&nt_event->dh, type, state, DH_NT_EVENT);
1484
 
        kspin_unlock_irql(&nt_event_lock, irql);
 
1598
        irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
 
1599
        initialize_dh(&nt_event->dh, type, state);
 
1600
        nt_spin_unlock_irql(&dispatcher_lock, irql);
1485
1601
        EVENTEXIT(return);
1486
1602
}
1487
1603
 
1488
 
STDCALL LONG WRAP_EXPORT(KeSetEvent)
 
1604
wstdcall LONG WRAP_EXPORT(KeSetEvent)
1489
1605
        (struct nt_event *nt_event, KPRIORITY incr, BOOLEAN wait)
1490
1606
{
1491
1607
        LONG old_state;
1495
1611
                   nt_event, nt_event->dh.type, wait);
1496
1612
        if (wait == TRUE)
1497
1613
                WARNING("wait = %d, not yet implemented", wait);
1498
 
 
1499
 
        irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
 
1614
        irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
1500
1615
        old_state = nt_event->dh.signal_state;
1501
1616
        nt_event->dh.signal_state = 1;
1502
 
        wakeup_threads(&nt_event->dh);
1503
 
        kspin_unlock_irql(&nt_event_lock, irql);
 
1617
        if (old_state == 0)
 
1618
                wakeup_threads(&nt_event->dh);
 
1619
        nt_spin_unlock_irql(&dispatcher_lock, irql);
1504
1620
        EVENTEXIT(return old_state);
1505
1621
}
1506
1622
 
1507
 
STDCALL void WRAP_EXPORT(KeClearEvent)
 
1623
wstdcall void WRAP_EXPORT(KeClearEvent)
1508
1624
        (struct nt_event *nt_event)
1509
1625
{
1510
 
        KIRQL irql;
1511
 
 
1512
1626
        EVENTENTER("event = %p", nt_event);
1513
 
        irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1514
 
        nt_event->dh.signal_state = 0;
1515
 
        kspin_unlock_irql(&nt_event_lock, irql);
 
1627
        (void)xchg(&nt_event->dh.signal_state, 0);
1516
1628
        EVENTEXIT(return);
1517
1629
}
1518
1630
 
1519
 
STDCALL LONG WRAP_EXPORT(KeResetEvent)
 
1631
wstdcall LONG WRAP_EXPORT(KeResetEvent)
1520
1632
        (struct nt_event *nt_event)
1521
1633
{
1522
1634
        LONG old_state;
1523
 
        KIRQL irql;
1524
 
 
1525
 
        EVENTENTER("event = %p", nt_event);
1526
 
 
1527
 
        irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1528
 
        old_state = nt_event->dh.signal_state;
1529
 
        nt_event->dh.signal_state = 0;
1530
 
        kspin_unlock_irql(&nt_event_lock, irql);
1531
 
 
 
1635
        old_state = xchg(&nt_event->dh.signal_state, 0);
 
1636
        EVENTTRACE("old state: %d", old_state);
1532
1637
        EVENTEXIT(return old_state);
1533
1638
}
1534
1639
 
1535
 
STDCALL void WRAP_EXPORT(KeInitializeMutex)
1536
 
        (struct nt_mutex *mutex, BOOLEAN wait)
 
1640
wstdcall void WRAP_EXPORT(KeInitializeMutex)
 
1641
        (struct nt_mutex *mutex, ULONG level)
1537
1642
{
1538
1643
        KIRQL irql;
1539
1644
 
1540
1645
        EVENTENTER("%p", mutex);
1541
 
        irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1542
 
        initialize_dh(&mutex->dh, SynchronizationEvent, 1, DH_NT_MUTEX);
1543
 
        kspin_unlock_irql(&nt_event_lock, irql);
 
1646
        irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
 
1647
        initialize_dh(&mutex->dh, MutexObject, 1);
1544
1648
        mutex->dh.size = sizeof(*mutex);
1545
1649
        InitializeListHead(&mutex->list);
1546
1650
        mutex->abandoned = FALSE;
1547
1651
        mutex->apc_disable = 1;
1548
1652
        mutex->owner_thread = NULL;
 
1653
        nt_spin_unlock_irql(&dispatcher_lock, irql);
1549
1654
        EVENTEXIT(return);
1550
1655
}
1551
1656
 
1552
 
STDCALL LONG WRAP_EXPORT(KeReleaseMutex)
 
1657
wstdcall LONG WRAP_EXPORT(KeReleaseMutex)
1553
1658
        (struct nt_mutex *mutex, BOOLEAN wait)
1554
1659
{
1555
1660
        LONG ret;
1556
1661
        KIRQL irql;
 
1662
        struct task_struct *thread;
1557
1663
 
1558
 
        EVENTENTER("%p", mutex);
 
1664
        EVENTENTER("%p, %d, %p", mutex, wait, current);
1559
1665
        if (wait == TRUE)
1560
1666
                WARNING("wait: %d", wait);
1561
 
        irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1562
 
        ret = mutex->dh.signal_state++;
1563
 
        if (mutex->dh.signal_state > 0) {
1564
 
                mutex->owner_thread = NULL;
1565
 
                wakeup_threads(&mutex->dh);
1566
 
        }
1567
 
        kspin_unlock_irql(&nt_event_lock, irql);
 
1667
        thread = current;
 
1668
        irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
 
1669
        EVENTTRACE("%p, %p, %d", thread, mutex->owner_thread,
 
1670
                   mutex->dh.signal_state);
 
1671
        if ((mutex->owner_thread == thread) && (mutex->dh.signal_state <= 0)) {
 
1672
                if ((ret = mutex->dh.signal_state++) == 0) {
 
1673
                        mutex->owner_thread = NULL;
 
1674
                        wakeup_threads(&mutex->dh);
 
1675
                }
 
1676
        } else
 
1677
                ret = STATUS_MUTANT_NOT_OWNED;
 
1678
        nt_spin_unlock_irql(&dispatcher_lock, irql);
 
1679
        EVENTTRACE("ret: %08X", ret);
1568
1680
        EVENTEXIT(return ret);
1569
1681
}
1570
1682
 
1571
 
STDCALL void WRAP_EXPORT(KeInitializeSemaphore)
 
1683
wstdcall void WRAP_EXPORT(KeInitializeSemaphore)
1572
1684
        (struct nt_semaphore *semaphore, LONG count, LONG limit)
1573
1685
{
1574
1686
        KIRQL irql;
1577
1689
        /* if limit > 1, we need to satisfy as many waits (until count
1578
1690
         * becomes 0); so we keep decrementing count everytime a wait
1579
1691
         * is satisified */
1580
 
        irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1581
 
        initialize_dh(&semaphore->dh, NotificationEvent, count,
1582
 
                      DH_NT_SEMAPHORE);
1583
 
        kspin_unlock_irql(&nt_event_lock, irql);
 
1692
        irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
 
1693
        initialize_dh(&semaphore->dh, SemaphoreObject, count);
 
1694
        nt_spin_unlock_irql(&dispatcher_lock, irql);
1584
1695
        semaphore->dh.size = sizeof(*semaphore);
1585
1696
        semaphore->limit = limit;
1586
1697
        EVENTEXIT(return);
1587
1698
}
1588
1699
 
1589
 
STDCALL LONG WRAP_EXPORT(KeReleaseSemaphore)
 
1700
wstdcall LONG WRAP_EXPORT(KeReleaseSemaphore)
1590
1701
        (struct nt_semaphore *semaphore, KPRIORITY incr, LONG adjustment,
1591
1702
         BOOLEAN wait)
1592
1703
{
1594
1705
        KIRQL irql;
1595
1706
 
1596
1707
        EVENTENTER("%p", semaphore);
1597
 
        irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
 
1708
        irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
1598
1709
        ret = semaphore->dh.signal_state;
1599
1710
        assert(ret >= 0);
1600
1711
        if (semaphore->dh.signal_state + adjustment <= semaphore->limit)
1602
1713
        /* else raise exception */
1603
1714
        if (semaphore->dh.signal_state > 0)
1604
1715
                wakeup_threads(&semaphore->dh);
1605
 
        kspin_unlock_irql(&nt_event_lock, irql);
 
1716
        nt_spin_unlock_irql(&dispatcher_lock, irql);
1606
1717
        EVENTEXIT(return ret);
1607
1718
}
1608
1719
 
1609
 
STDCALL NTSTATUS WRAP_EXPORT(KeDelayExecutionThread)
 
1720
wstdcall NTSTATUS WRAP_EXPORT(KeDelayExecutionThread)
1610
1721
        (KPROCESSOR_MODE wait_mode, BOOLEAN alertable, LARGE_INTEGER *interval)
1611
1722
{
1612
1723
        int res;
1617
1728
 
1618
1729
        timeout = SYSTEM_TIME_TO_HZ(*interval) + 1;
1619
1730
        EVENTTRACE("thread: %p, interval: %Ld, timeout: %ld",
1620
 
                    get_current(), *interval, timeout);
 
1731
                    current, *interval, timeout);
1621
1732
        if (timeout <= 0)
1622
1733
                EVENTEXIT(return STATUS_SUCCESS);
1623
1734
 
1628
1739
                set_current_state(TASK_UNINTERRUPTIBLE);
1629
1740
 
1630
1741
        res = schedule_timeout(timeout);
1631
 
        EVENTTRACE("thread: %p, res: %d", get_current(), res);
 
1742
        EVENTTRACE("thread: %p, res: %d", current, res);
1632
1743
        if (res == 0)
1633
1744
                EVENTEXIT(return STATUS_SUCCESS);
1634
1745
        else
1635
1746
                EVENTEXIT(return STATUS_ALERTED);
1636
1747
}
1637
1748
 
1638
 
STDCALL KPRIORITY WRAP_EXPORT(KeQueryPriorityThread)
1639
 
        (struct nt_thread *thread)
 
1749
wstdcall KPRIORITY WRAP_EXPORT(KeQueryPriorityThread)
 
1750
        (struct task_struct *task)
1640
1751
{
1641
1752
        KPRIORITY prio;
1642
1753
 
1643
 
        EVENTENTER("thread: %p, task: %p", thread, thread->task);
 
1754
        EVENTENTER("task: %p", task);
1644
1755
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1645
1756
        prio = 1;
1646
1757
#else
1647
 
        if (rt_task(thread->task))
 
1758
        if (rt_task(task))
1648
1759
                prio = LOW_REALTIME_PRIORITY;
1649
1760
        else
1650
1761
                prio = MAXIMUM_PRIORITY;
1652
1763
        EVENTEXIT(return prio);
1653
1764
}
1654
1765
 
1655
 
STDCALL ULONGLONG WRAP_EXPORT(KeQueryInterruptTime)
 
1766
wstdcall ULONGLONG WRAP_EXPORT(KeQueryInterruptTime)
1656
1767
        (void)
1657
1768
{
1658
1769
        TRACEEXIT5(return jiffies * TICKSPERSEC / HZ);
1659
1770
}
1660
1771
 
1661
 
STDCALL ULONG WRAP_EXPORT(KeQueryTimeIncrement)
 
1772
wstdcall ULONG WRAP_EXPORT(KeQueryTimeIncrement)
1662
1773
        (void)
1663
1774
{
1664
1775
        TRACEEXIT5(return TICKSPERSEC / HZ);
1665
1776
}
1666
1777
 
1667
 
STDCALL void WRAP_EXPORT(KeQuerySystemTime)
 
1778
wstdcall void WRAP_EXPORT(KeQuerySystemTime)
1668
1779
        (LARGE_INTEGER *time)
1669
1780
{
1670
1781
        *time = ticks_1601();
1671
1782
        return;
1672
1783
}
1673
1784
 
1674
 
STDCALL void WRAP_EXPORT(KeQUeryTickCount)
 
1785
wstdcall void WRAP_EXPORT(KeQueryTickCount)
1675
1786
        (LARGE_INTEGER *j)
1676
1787
{
1677
1788
        *j = jiffies;
1678
1789
}
1679
1790
 
1680
 
STDCALL LARGE_INTEGER WRAP_EXPORT(KeQueryPerformanceCounter)
 
1791
wstdcall LARGE_INTEGER WRAP_EXPORT(KeQueryPerformanceCounter)
1681
1792
        (LARGE_INTEGER *counter)
1682
1793
{
1683
1794
        if (counter)
1685
1796
        return jiffies;
1686
1797
}
1687
1798
 
1688
 
STDCALL struct nt_thread *WRAP_EXPORT(KeGetCurrentThread)
 
1799
wstdcall struct task_struct *WRAP_EXPORT(KeGetCurrentThread)
1689
1800
        (void)
1690
1801
{
1691
 
        KIRQL irql;
1692
 
        struct task_struct *task = get_current();
1693
 
        struct nt_thread *ret;
1694
 
        struct common_object_header *header;
 
1802
        struct task_struct *task = current;
1695
1803
 
1696
1804
        DBGTRACE5("task: %p", task);
1697
 
        ret = NULL;
1698
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
1699
 
        nt_list_for_each_entry(header, &object_list, list) {
1700
 
                struct nt_thread *thread;
1701
 
                DBGTRACE5("header: %p, type: %d", header, header->type);
1702
 
                if (header->type != OBJECT_TYPE_NT_THREAD)
1703
 
                        continue;
1704
 
                thread = HEADER_TO_OBJECT(header);
1705
 
                DBGTRACE5("thread: %p, task: %p", thread, thread->task);
1706
 
                if (thread->task == task) {
1707
 
                        ret = thread;
1708
 
                        break;
1709
 
                }
1710
 
        }
1711
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
1712
 
        if (ret == NULL)
1713
 
                DBGTRACE1("couldn't find thread for task %p", task);
1714
 
        DBGTRACE5("current thread = %p", ret);
1715
 
        return ret;
 
1805
        return task;
1716
1806
}
1717
1807
 
1718
 
STDCALL KPRIORITY WRAP_EXPORT(KeSetPriorityThread)
1719
 
        (struct nt_thread *thread, KPRIORITY priority)
 
1808
wstdcall KPRIORITY WRAP_EXPORT(KeSetPriorityThread)
 
1809
        (struct task_struct *task, KPRIORITY priority)
1720
1810
{
1721
1811
        KPRIORITY old_prio;
1722
1812
 
1723
 
        TRACEENTER3("thread: %p, priority = %u", thread, priority);
 
1813
        TRACEENTER3("task: %p, priority = %u", task, priority);
1724
1814
 
1725
1815
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1726
1816
        /* FIXME: is there a way to set kernel thread prio on 2.4? */
1727
1817
        old_prio = LOW_PRIORITY;
1728
1818
#else
1729
 
        if (rt_task(thread->task))
 
1819
        if (rt_task(task))
1730
1820
                old_prio = LOW_REALTIME_PRIORITY;
1731
1821
        else
1732
1822
                old_prio = MAXIMUM_PRIORITY;
1733
1823
#if 0
1734
1824
        if (priority == LOW_REALTIME_PRIORITY)
1735
 
                set_user_nice(thread->task, -20);
 
1825
                set_user_nice(task, -20);
1736
1826
        else
1737
 
                set_user_nice(thread->task, 10);
 
1827
                set_user_nice(task, 10);
1738
1828
#endif
1739
1829
#endif
1740
1830
        return old_prio;
1741
1831
}
1742
1832
 
1743
1833
struct trampoline_context {
1744
 
        void (*start_routine)(void *) STDCALL;
 
1834
        void (*start_routine)(void *) wstdcall;
1745
1835
        void *context;
1746
1836
        struct nt_thread *thread;
1747
1837
};
1754
1844
        memcpy(&ctx, data, sizeof(ctx));
1755
1845
        kfree(data);
1756
1846
        thread = ctx.thread;
1757
 
        thread->task = get_current();
 
1847
        thread->task = current;
1758
1848
        thread->pid = thread->task->pid;
1759
 
 
 
1849
#ifdef PF_NOFREEZE
 
1850
        current->flags |= PF_NOFREEZE;
 
1851
#endif
 
1852
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
 
1853
        strncpy(current->comm, "windisdrvr", sizeof(current->comm));
 
1854
        current->comm[sizeof(current->comm) - 1] = 0;
 
1855
#endif
1760
1856
        DBGTRACE2("thread: %p, task: %p (%d)", thread, thread->task,
1761
1857
                  thread->pid);
1762
1858
        ctx.start_routine(ctx.context);
1764
1860
        return 0;
1765
1861
}
1766
1862
 
1767
 
struct nt_thread *wrap_create_thread(struct task_struct *task)
 
1863
static struct nt_thread *create_nt_thread(struct task_struct *task)
1768
1864
{
1769
1865
        struct nt_thread *thread;
1770
1866
        KIRQL irql;
1776
1872
                        thread->pid = task->pid;
1777
1873
                else
1778
1874
                        thread->pid = 0;
1779
 
                kspin_lock_init(&thread->lock);
1780
 
                init_waitqueue_head(&thread->event_wq);
 
1875
                nt_spin_lock_init(&thread->lock);
1781
1876
                InitializeListHead(&thread->irps);
1782
 
                irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1783
 
                initialize_dh(&thread->dh, NotificationEvent, 0, DH_NT_THREAD);
1784
 
                kspin_unlock_irql(&nt_event_lock, irql);
 
1877
                irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
 
1878
                initialize_dh(&thread->dh, ThreadObject, 0);
1785
1879
                thread->dh.size = sizeof(*thread);
1786
 
 
1787
 
                DBGTRACE1("thread: %p, task: %p, pid: %d",
 
1880
                nt_spin_unlock_irql(&dispatcher_lock, irql);
 
1881
                DBGTRACE2("thread: %p, task: %p, pid: %d",
1788
1882
                          thread, thread->task, thread->pid);
1789
1883
        } else
1790
1884
                ERROR("couldn't allocate thread object");
1791
1885
        return thread;
1792
1886
}
1793
1887
 
1794
 
void wrap_remove_thread(struct nt_thread *thread)
 
1888
static void remove_nt_thread(struct nt_thread *thread)
1795
1889
{
1796
 
        KIRQL irql;
1797
1890
        struct nt_list *ent;
1798
 
 
1799
 
        if (thread) {
1800
 
                DBGTRACE1("terminating thread: %p, task: %p, pid: %d",
1801
 
                          thread, thread->task, thread->task->pid);
1802
 
                /* TODO: make sure waitqueue is empty and destroy it */
1803
 
                while (1) {
1804
 
                        struct irp *irp;
1805
 
                        irql = kspin_lock_irql(&thread->lock, DISPATCH_LEVEL);
1806
 
                        ent = RemoveHeadList(&thread->irps);
1807
 
                        kspin_unlock_irql(&thread->lock, irql);
1808
 
                        if (!ent)
1809
 
                                break;
1810
 
                        irp = container_of(ent, struct irp, threads);
1811
 
                        IoCancelIrp(irp);
 
1891
        KIRQL irql;
 
1892
 
 
1893
        if (!thread) {
 
1894
                ERROR("couldn't find thread for task: %p", current);
 
1895
                return;
 
1896
        }
 
1897
        DBGTRACE1("terminating thread: %p, task: %p, pid: %d",
 
1898
                  thread, thread->task, thread->task->pid);
 
1899
        /* TODO: make sure waitqueue is empty and destroy it */
 
1900
        while (1) {
 
1901
                struct irp *irp;
 
1902
                irql = nt_spin_lock_irql(&thread->lock, DISPATCH_LEVEL);
 
1903
                ent = RemoveHeadList(&thread->irps);
 
1904
                nt_spin_unlock_irql(&thread->lock, irql);
 
1905
                if (!ent)
 
1906
                        break;
 
1907
                irp = container_of(ent, struct irp, threads);
 
1908
                IoCancelIrp(irp);
 
1909
        }
 
1910
        ObDereferenceObject(thread);
 
1911
}
 
1912
 
 
1913
struct nt_thread *get_current_nt_thread(void)
 
1914
{
 
1915
        struct task_struct *task = current;
 
1916
        struct nt_thread *ret;
 
1917
        struct common_object_header *header;
 
1918
        KIRQL irql;
 
1919
 
 
1920
        DBGTRACE5("task: %p", task);
 
1921
        ret = NULL;
 
1922
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
1923
        nt_list_for_each_entry(header, &object_list, list) {
 
1924
                struct nt_thread *thread;
 
1925
                DBGTRACE5("header: %p, type: %d", header, header->type);
 
1926
                if (header->type != OBJECT_TYPE_NT_THREAD)
 
1927
                        break;
 
1928
                thread = HEADER_TO_OBJECT(header);
 
1929
                DBGTRACE5("thread: %p, task: %p", thread, thread->task);
 
1930
                if (thread->task == task) {
 
1931
                        ret = thread;
 
1932
                        break;
1812
1933
                }
1813
 
                ObDereferenceObject(thread);
1814
 
        } else
1815
 
                ERROR("couldn't find thread for task: %p", get_current());
1816
 
        return;
 
1934
        }
 
1935
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
1936
        if (ret == NULL)
 
1937
                DBGTRACE3("couldn't find thread for task %p, %d",
 
1938
                          task, current->pid);
 
1939
        DBGTRACE5("current thread = %p", ret);
 
1940
        return ret;
1817
1941
}
1818
1942
 
1819
 
STDCALL NTSTATUS WRAP_EXPORT(PsCreateSystemThread)
 
1943
wstdcall NTSTATUS WRAP_EXPORT(PsCreateSystemThread)
1820
1944
        (void **phandle, ULONG access, void *obj_attr, void *process,
1821
 
         void *client_id, void (*start_routine)(void *) STDCALL, void *context)
 
1945
         void *client_id, void (*start_routine)(void *) wstdcall, void *context)
1822
1946
{
1823
1947
        struct trampoline_context *ctx;
1824
1948
        struct nt_thread *thread;
1837
1961
                TRACEEXIT2(return STATUS_RESOURCES);
1838
1962
        ctx->start_routine = start_routine;
1839
1963
        ctx->context = context;
1840
 
        thread = wrap_create_thread(NULL);
 
1964
        thread = create_nt_thread(NULL);
1841
1965
        if (!thread) {
1842
1966
                kfree(ctx);
1843
1967
                TRACEEXIT2(return STATUS_RESOURCES);
1846
1970
 
1847
1971
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
1848
1972
        pid = kernel_thread(thread_trampoline, ctx,
1849
 
                CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
 
1973
                            CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
1850
1974
        DBGTRACE2("pid = %d", pid);
1851
1975
        if (pid < 0) {
1852
1976
                kfree(ctx);
1854
1978
                TRACEEXIT2(return STATUS_FAILURE);
1855
1979
        }
1856
1980
        task = NULL;
1857
 
        DBGTRACE2("created task: %p (%d)", find_task_by_pid(pid), pid);
 
1981
        DBGTRACE2("created task: %p (%d)", task, pid);
1858
1982
#else
1859
 
        task = KTHREAD_RUN(thread_trampoline, ctx, DRIVER_NAME);
 
1983
        task = KTHREAD_RUN(thread_trampoline, ctx, "windisdrvr");
1860
1984
        if (IS_ERR(task)) {
1861
1985
                kfree(ctx);
1862
1986
                free_object(thread);
1864
1988
        }
1865
1989
        DBGTRACE2("created task: %p (%d)", task, task->pid);
1866
1990
#endif
1867
 
        *phandle = thread;
1868
 
        DBGTRACE2("created thread: %p", thread);
 
1991
        *phandle = OBJECT_TO_HEADER(thread);
 
1992
        DBGTRACE2("created thread: %p, %p", thread, *phandle);
1869
1993
        TRACEEXIT2(return STATUS_SUCCESS);
1870
1994
}
1871
1995
 
1872
 
STDCALL NTSTATUS WRAP_EXPORT(PsTerminateSystemThread)
 
1996
wstdcall NTSTATUS WRAP_EXPORT(PsTerminateSystemThread)
1873
1997
        (NTSTATUS status)
1874
1998
{
1875
1999
        struct nt_thread *thread;
1876
2000
 
1877
 
        thread = KeGetCurrentThread();
 
2001
        DBGTRACE2("%p, %08X", current, status);
 
2002
        thread = get_current_nt_thread();
1878
2003
        if (thread) {
1879
2004
                DBGTRACE2("setting event for thread: %p", thread);
1880
2005
                KeSetEvent((struct nt_event *)&thread->dh, 0, FALSE);
1881
2006
                DBGTRACE2("set event for thread: %p", thread);
1882
 
                wrap_remove_thread(thread);
 
2007
                remove_nt_thread(thread);
1883
2008
                complete_and_exit(NULL, status);
1884
2009
                ERROR("oops: %p, %d", thread->task, thread->pid);
1885
2010
        } else
1886
 
                ERROR("couldn't find thread for task: %p", get_current);
 
2011
                ERROR("couldn't find thread for task: %p", current);
1887
2012
        return STATUS_FAILURE;
1888
2013
}
1889
2014
 
1890
 
STDCALL BOOLEAN WRAP_EXPORT(KeRemoveEntryDeviceQueue)
 
2015
wstdcall BOOLEAN WRAP_EXPORT(KeRemoveEntryDeviceQueue)
1891
2016
        (struct kdevice_queue *dev_queue, struct kdevice_queue_entry *entry)
1892
2017
{
1893
2018
        struct kdevice_queue_entry *e;
1894
2019
        KIRQL irql;
1895
2020
 
1896
 
        irql = kspin_lock_irql(&dev_queue->lock, DISPATCH_LEVEL);
 
2021
        irql = nt_spin_lock_irql(&dev_queue->lock, DISPATCH_LEVEL);
1897
2022
        nt_list_for_each_entry(e, &dev_queue->list, list) {
1898
2023
                if (e == entry) {
1899
2024
                        RemoveEntryList(&e->list);
1900
 
                        kspin_unlock_irql(&dev_queue->lock, irql);
 
2025
                        nt_spin_unlock_irql(&dev_queue->lock, irql);
1901
2026
                        return TRUE;
1902
2027
                }
1903
2028
        }
1904
 
        kspin_unlock_irql(&dev_queue->lock, irql);
 
2029
        nt_spin_unlock_irql(&dev_queue->lock, irql);
1905
2030
        return FALSE;
1906
2031
}
1907
2032
 
1908
 
STDCALL BOOLEAN WRAP_EXPORT(KeSynchronizeExecution)
 
2033
wstdcall BOOLEAN WRAP_EXPORT(KeSynchronizeExecution)
1909
2034
        (struct kinterrupt *interrupt, PKSYNCHRONIZE_ROUTINE synch_routine,
1910
2035
         void *synch_context)
1911
2036
{
1912
 
        KSPIN_LOCK *spinlock;
 
2037
        NT_SPIN_LOCK *spinlock;
1913
2038
        BOOLEAN ret;
1914
 
        KIRQL irql = PASSIVE_LEVEL;
 
2039
        unsigned long flags;
1915
2040
 
1916
2041
        if (interrupt->actual_lock)
1917
2042
                spinlock = interrupt->actual_lock;
1918
2043
        else
1919
2044
                spinlock = &interrupt->lock;
1920
 
        if (interrupt->synch_irql == DISPATCH_LEVEL)
1921
 
                irql = kspin_lock_irql(spinlock, interrupt->synch_irql);
1922
 
        else
1923
 
                kspin_lock(spinlock);
 
2045
        nt_spin_lock_irqsave(spinlock, flags);
1924
2046
        ret = synch_routine(synch_context);
1925
 
        if (interrupt->synch_irql == DISPATCH_LEVEL)
1926
 
                kspin_unlock_irql(spinlock, irql);
1927
 
        else
1928
 
                kspin_unlock(spinlock);
 
2047
        nt_spin_unlock_irqrestore(spinlock, flags);
1929
2048
        return ret;
1930
2049
}
1931
2050
 
 
2051
wstdcall void *WRAP_EXPORT(MmAllocateContiguousMemorySpecifyCache)
 
2052
        (SIZE_T size, PHYSICAL_ADDRESS lowest, PHYSICAL_ADDRESS highest,
 
2053
         PHYSICAL_ADDRESS boundary, enum memory_caching_type cache_type)
 
2054
{
 
2055
        void *addr;
 
2056
        DBGTRACE2("%lu, %Lu, %Lu, %Lu, %d", size, lowest, highest, boundary,
 
2057
                  cache_type);
 
2058
        addr = ExAllocatePoolWithTag(NonPagedPool, size, 0);
 
2059
        DBGTRACE2("%p", addr);
 
2060
        return addr;
 
2061
}
 
2062
 
 
2063
wstdcall void WRAP_EXPORT(MmFreeContiguousMemorySpecifyCache)
 
2064
        (void *base, SIZE_T size, enum memory_caching_type cache_type)
 
2065
{
 
2066
        DBGTRACE2("%p", base);
 
2067
        ExFreePool(base);
 
2068
}
 
2069
 
 
2070
wstdcall PHYSICAL_ADDRESS WRAP_EXPORT(MmGetPhysicalAddress)
 
2071
        (void *base)
 
2072
{
 
2073
        DBGTRACE2("%p", base);
 
2074
        return virt_to_phys(base);
 
2075
}
 
2076
 
1932
2077
/* Atheros card with pciid 168C:0014 calls this function with 0xf0000
1933
2078
 * and 0xf6ef0 address, and then check for things that seem to be
1934
2079
 * related to ACPI: "_SM_" and "_DMI_". This may be the hack they do
1936
2081
 * probably get this device to work if we create a buffer with the
1937
2082
 * strings as required by the driver and return virtual address for
1938
2083
 * that address instead */
1939
 
STDCALL void *WRAP_EXPORT(MmMapIoSpace)
 
2084
wstdcall void *WRAP_EXPORT(MmMapIoSpace)
1940
2085
        (PHYSICAL_ADDRESS phys_addr, SIZE_T size,
1941
2086
         enum memory_caching_type cache)
1942
2087
{
1950
2095
        return virt;
1951
2096
}
1952
2097
 
1953
 
STDCALL void WRAP_EXPORT(MmUnmapIoSpace)
 
2098
wstdcall void WRAP_EXPORT(MmUnmapIoSpace)
1954
2099
        (void *addr, SIZE_T size)
1955
2100
{
1956
2101
        TRACEENTER1("%p, %lu", addr, size);
1958
2103
        return;
1959
2104
}
1960
2105
 
1961
 
STDCALL ULONG WRAP_EXPORT(MmSizeOfMdl)
 
2106
wstdcall ULONG WRAP_EXPORT(MmSizeOfMdl)
1962
2107
        (void *base, ULONG length)
1963
2108
{
1964
2109
        return (sizeof(struct mdl) +
1965
 
                SPAN_PAGES((ULONG_PTR)base, length) * sizeof(ULONG));
 
2110
                (sizeof(PFN_NUMBER) * SPAN_PAGES(base, length)));
1966
2111
}
1967
2112
 
1968
2113
struct mdl *allocate_init_mdl(void *virt, ULONG length)
1979
2124
                        alloc_flags = GFP_KERNEL;
1980
2125
                else
1981
2126
                        alloc_flags = GFP_ATOMIC;
1982
 
                
1983
2127
                wrap_mdl = kmem_cache_alloc(mdl_cache, alloc_flags);
1984
2128
                if (!wrap_mdl)
1985
2129
                        return NULL;
1986
 
                irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2130
                irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
1987
2131
                InsertHeadList(&wrap_mdl_list, &wrap_mdl->list);
1988
 
                kspin_unlock_irql(&ntoskernel_lock, irql);
 
2132
                nt_spin_unlock_irql(&ntoskernel_lock, irql);
1989
2133
                mdl = (struct mdl *)wrap_mdl->mdl;
1990
 
                DBGTRACE3("allocated mdl cache: %p(%p)", wrap_mdl, mdl);
 
2134
                DBGTRACE3("allocated mdl from cache: %p(%p), %p(%d)",
 
2135
                          wrap_mdl, mdl, virt, length);
1991
2136
                memset(mdl, 0, CACHE_MDL_SIZE);
1992
2137
                MmInitializeMdl(mdl, virt, length);
1993
2138
                /* mark the MDL as allocated from cache pool so when
2000
2145
                if (!wrap_mdl)
2001
2146
                        return NULL;
2002
2147
                mdl = (struct mdl *)wrap_mdl->mdl;
2003
 
                DBGTRACE3("allocated mdl: %p (%p)", wrap_mdl, mdl);
2004
 
                irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2148
                DBGTRACE3("allocated mdl from memory: %p(%p), %p(%d)",
 
2149
                          wrap_mdl, mdl, virt, length);
 
2150
                irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2005
2151
                InsertHeadList(&wrap_mdl_list, &wrap_mdl->list);
2006
 
                kspin_unlock_irql(&ntoskernel_lock, irql);
 
2152
                nt_spin_unlock_irql(&ntoskernel_lock, irql);
2007
2153
                memset(mdl, 0, mdl_size);
2008
2154
                MmInitializeMdl(mdl, virt, length);
2009
2155
        }
2010
 
        MmBuildMdlForNonPagedPool(mdl);
2011
2156
        return mdl;
2012
2157
}
2013
2158
 
2028
2173
                struct wrap_mdl *wrap_mdl;
2029
2174
                wrap_mdl = (struct wrap_mdl *)
2030
2175
                        ((char *)mdl - offsetof(struct wrap_mdl, mdl));
2031
 
                irql = kspin_lock_irql(&ntoskernel_lock,
2032
 
                                       DISPATCH_LEVEL);
 
2176
                irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2033
2177
                RemoveEntryList(&wrap_mdl->list);
2034
 
                kspin_unlock_irql(&ntoskernel_lock, irql);
2035
 
                
 
2178
                nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2179
 
2036
2180
                if (mdl->flags & MDL_CACHE_ALLOCATED) {
2037
 
                        DBGTRACE3("freeing mdl cache: %p (%hu)",
2038
 
                                  wrap_mdl, mdl->flags);
 
2181
                        DBGTRACE3("freeing mdl cache: %p, %p, %p",
 
2182
                                  wrap_mdl, mdl, mdl->mappedsystemva);
2039
2183
                        kmem_cache_free(mdl_cache, wrap_mdl);
2040
2184
                } else {
2041
 
                        DBGTRACE3("freeing mdl: %p (%hu)",
2042
 
                                  wrap_mdl, mdl->flags);
 
2185
                        DBGTRACE3("freeing mdl: %p, %p, %p",
 
2186
                                  wrap_mdl, mdl, mdl->mappedsystemva);
2043
2187
                        kfree(wrap_mdl);
2044
2188
                }
2045
2189
        }
2046
2190
        return;
2047
2191
}
2048
2192
 
2049
 
STDCALL void WRAP_EXPORT(IoBuildPartialMdl)
 
2193
wstdcall void WRAP_EXPORT(IoBuildPartialMdl)
2050
2194
        (struct mdl *source, struct mdl *target, void *virt, ULONG length)
2051
2195
{
2052
2196
        MmInitializeMdl(target, virt, length);
 
2197
        target->flags |= MDL_PARTIAL;
2053
2198
}
2054
2199
 
2055
2200
/* FIXME: We don't update MDL to physical page mapping, since in Linux
2056
2201
 * the pages are in memory anyway; if a driver treats an MDL as
2057
2202
 * opaque, we should be safe; otherwise, the driver may break */
2058
 
STDCALL void WRAP_EXPORT(MmBuildMdlForNonPagedPool)
 
2203
wstdcall void WRAP_EXPORT(MmBuildMdlForNonPagedPool)
2059
2204
        (struct mdl *mdl)
2060
2205
{
 
2206
        PFN_NUMBER *mdl_pages;
 
2207
        int i, n;
 
2208
        TRACEENTER4("%p", mdl);
 
2209
        /* already mapped */
 
2210
//      mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);
2061
2211
        mdl->flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2062
 
        mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);
 
2212
        DBGTRACE4("%p, %p, %p, %d, %d", mdl, mdl->mappedsystemva, mdl->startva,
 
2213
                  mdl->byteoffset, mdl->bytecount);
 
2214
        n = SPAN_PAGES(MmGetSystemAddressForMdl(mdl), MmGetMdlByteCount(mdl));
 
2215
        if (n > CACHE_MDL_PAGES)
 
2216
                WARNING("%p, %d, %d", MmGetSystemAddressForMdl(mdl),
 
2217
                        MmGetMdlByteCount(mdl), n);
 
2218
        mdl_pages = MmGetMdlPfnArray(mdl);
 
2219
        for (i = 0; i < n; i++)
 
2220
                mdl_pages[i] = (ULONG_PTR)mdl->startva + (i * PAGE_SIZE);
2063
2221
        return;
2064
2222
}
2065
2223
 
2066
 
STDCALL void *WRAP_EXPORT(MmMapLockedPages)
 
2224
wstdcall void *WRAP_EXPORT(MmMapLockedPages)
2067
2225
        (struct mdl *mdl, KPROCESSOR_MODE access_mode)
2068
2226
{
 
2227
        /* already mapped */
 
2228
//      mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);
2069
2229
        mdl->flags |= MDL_MAPPED_TO_SYSTEM_VA;
2070
 
        return MmGetMdlVirtualAddress(mdl);
 
2230
        /* what is the need for MDL_PARTIAL_HAS_BEEN_MAPPED? */
 
2231
        if (mdl->flags & MDL_PARTIAL)
 
2232
                mdl->flags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
 
2233
        return mdl->mappedsystemva;
2071
2234
}
2072
2235
 
2073
 
STDCALL void *WRAP_EXPORT(MmMapLockedPagesSpecifyCache)
 
2236
wstdcall void *WRAP_EXPORT(MmMapLockedPagesSpecifyCache)
2074
2237
        (struct mdl *mdl, KPROCESSOR_MODE access_mode,
2075
2238
         enum memory_caching_type cache_type, void *base_address,
2076
2239
         ULONG bug_check, enum mm_page_priority priority)
2078
2241
        return MmMapLockedPages(mdl, access_mode);
2079
2242
}
2080
2243
 
2081
 
STDCALL void WRAP_EXPORT(MmUnmapLockedPages)
 
2244
wstdcall void WRAP_EXPORT(MmUnmapLockedPages)
2082
2245
        (void *base, struct mdl *mdl)
2083
2246
{
2084
2247
        mdl->flags &= ~MDL_MAPPED_TO_SYSTEM_VA;
2085
2248
        return;
2086
2249
}
2087
2250
 
2088
 
STDCALL void WRAP_EXPORT(MmProbeAndLockPages)
 
2251
wstdcall void WRAP_EXPORT(MmProbeAndLockPages)
2089
2252
        (struct mdl *mdl, KPROCESSOR_MODE access_mode,
2090
2253
         enum lock_operation operation)
2091
2254
{
 
2255
        /* already locked */
2092
2256
        mdl->flags |= MDL_PAGES_LOCKED;
2093
2257
        return;
2094
2258
}
2095
2259
 
2096
 
STDCALL void WRAP_EXPORT(MmUnlockPages)
 
2260
wstdcall void WRAP_EXPORT(MmUnlockPages)
2097
2261
        (struct mdl *mdl)
2098
2262
{
2099
2263
        mdl->flags &= ~MDL_PAGES_LOCKED;
2100
2264
        return;
2101
2265
}
2102
2266
 
2103
 
STDCALL BOOLEAN WRAP_EXPORT(MmIsAddressValid)
 
2267
wstdcall BOOLEAN WRAP_EXPORT(MmIsAddressValid)
2104
2268
        (void *virt_addr)
2105
2269
{
2106
2270
        if (virt_addr_valid(virt_addr))
2109
2273
                return FALSE;
2110
2274
}
2111
2275
 
2112
 
STDCALL void *WRAP_EXPORT(MmLockPagableDataSection)
 
2276
wstdcall void *WRAP_EXPORT(MmLockPagableDataSection)
2113
2277
        (void *address)
2114
2278
{
2115
2279
        return address;
2116
2280
}
2117
2281
 
2118
 
STDCALL void WRAP_EXPORT(MmUnlockPagableImageSection)
 
2282
wstdcall void WRAP_EXPORT(MmUnlockPagableImageSection)
2119
2283
        (void *handle)
2120
2284
{
2121
2285
        return;
2122
2286
}
2123
2287
 
2124
 
STDCALL NTSTATUS WRAP_EXPORT(ObReferenceObjectByHandle)
 
2288
wstdcall NTSTATUS WRAP_EXPORT(ObReferenceObjectByHandle)
2125
2289
        (void *handle, ACCESS_MASK desired_access, void *obj_type,
2126
2290
         KPROCESSOR_MODE access_mode, void **object, void *handle_info)
2127
2291
{
2128
2292
        struct common_object_header *hdr;
2129
2293
        KIRQL irql;
2130
2294
 
2131
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2295
        DBGTRACE2("%p", handle);
 
2296
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2132
2297
        hdr = HANDLE_TO_HEADER(handle);
2133
2298
        hdr->ref_count++;
2134
2299
        *object = HEADER_TO_OBJECT(hdr);
2135
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
2300
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2301
        DBGTRACE2("%p, %p, %d, %p", hdr, object, hdr->ref_count, *object);
2136
2302
        return STATUS_SUCCESS;
2137
2303
}
2138
2304
 
2139
2305
/* DDK doesn't say if return value should be before incrementing or
2140
2306
 * after incrementing reference count, but according to #reactos
2141
2307
 * devels, it should be return value after incrementing */
2142
 
_FASTCALL LONG WRAP_EXPORT(ObfReferenceObject)
2143
 
        (FASTCALL_DECL_1(void *object))
 
2308
wfastcall LONG WRAP_EXPORT(ObfReferenceObject)
 
2309
        (void *object)
2144
2310
{
2145
2311
        struct common_object_header *hdr;
 
2312
        LONG ret;
2146
2313
        KIRQL irql;
2147
 
        LONG ret;
2148
2314
 
2149
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2315
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2150
2316
        hdr = OBJECT_TO_HEADER(object);
2151
2317
        ret = ++hdr->ref_count;
2152
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
2318
        DBGTRACE2("%p, %d, %p", hdr, hdr->ref_count, object);
 
2319
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
2153
2320
        return ret;
2154
2321
}
2155
2322
 
2156
 
_FASTCALL void WRAP_EXPORT(ObfDereferenceObject)
2157
 
        (FASTCALL_DECL_1(void *object))
 
2323
wfastcall void WRAP_EXPORT(ObfDereferenceObject)
 
2324
        (void *object)
2158
2325
{
2159
2326
        struct common_object_header *hdr;
2160
 
        KIRQL irql;
2161
2327
        int ref_count;
 
2328
        KIRQL irql;
2162
2329
 
2163
2330
        TRACEENTER2("object: %p", object);
2164
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2331
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2165
2332
        hdr = OBJECT_TO_HEADER(object);
2166
2333
        DBGTRACE2("hdr: %p", hdr);
2167
2334
        hdr->ref_count--;
2168
2335
        ref_count = hdr->ref_count;
2169
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
2336
        DBGTRACE2("object: %p, %d", object, ref_count);
 
2337
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
2170
2338
        if (ref_count < 0)
2171
2339
                ERROR("invalid object: %p (%d)", object, ref_count);
2172
2340
        if (ref_count <= 0) {
2174
2342
        }
2175
2343
}
2176
2344
 
2177
 
STDCALL NTSTATUS WRAP_EXPORT(ZwCreateFile)
 
2345
wstdcall NTSTATUS WRAP_EXPORT(ZwCreateFile)
2178
2346
        (void **handle, ULONG access_mask, struct object_attr *obj_attr,
2179
2347
         struct io_status_block *iosb, LARGE_INTEGER *size,
2180
2348
         ULONG file_attr, ULONG share_access, ULONG create_disposition,
2184
2352
        struct object_attr *oa;
2185
2353
        struct ansi_string ansi;
2186
2354
        struct wrap_bin_file *bin_file;
2187
 
        KIRQL irql;
2188
2355
        char *file_basename;
 
2356
        KIRQL irql;
2189
2357
 
2190
 
        TRACEENTER2("");
 
2358
        TRACEENTER2("%p", *handle);
2191
2359
        if (RtlUnicodeStringToAnsiString(&ansi, obj_attr->name, TRUE) !=
2192
2360
            STATUS_SUCCESS)
2193
2361
                TRACEEXIT2(return STATUS_INSUFFICIENT_RESOURCES);
2194
2362
        DBGTRACE2("Filename: %s", ansi.buf);
2195
2363
 
2196
 
        irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2364
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2197
2365
        nt_list_for_each_entry(header, &object_list, list) {
2198
2366
                if (header->type != OBJECT_TYPE_FILE)
2199
2367
                        continue;
2203
2371
                        bin_file = oa->file;
2204
2372
                        *handle = header;
2205
2373
                        iosb->status = FILE_OPENED;
2206
 
                        iosb->status_info = bin_file->size;
2207
 
                        kspin_unlock_irql(&ntoskernel_lock, irql);
 
2374
                        iosb->info = bin_file->size;
 
2375
                        nt_spin_unlock_irql(&ntoskernel_lock, irql);
2208
2376
                        TRACEEXIT2(return STATUS_SUCCESS);
2209
2377
                }
2210
2378
        }
2211
 
        kspin_unlock_irql(&ntoskernel_lock, irql);
 
2379
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
2212
2380
 
2213
2381
        oa = allocate_object(sizeof(struct object_attr), OBJECT_TYPE_FILE,
2214
2382
                             ansi.buf);
2215
2383
        if (!oa) {
2216
2384
                iosb->status = FILE_DOES_NOT_EXIST;
2217
 
                iosb->status_info = 0;
 
2385
                iosb->info = 0;
2218
2386
                RtlFreeAnsiString(&ansi);
2219
2387
                TRACEEXIT2(return STATUS_FAILURE);
2220
2388
        }
2230
2398
        if (bin_file) {
2231
2399
                oa->file = bin_file;
2232
2400
                iosb->status = FILE_OPENED;
2233
 
                iosb->status_info = bin_file->size;
 
2401
                iosb->info = bin_file->size;
2234
2402
                RtlFreeAnsiString(&ansi);
2235
2403
                TRACEEXIT2(return STATUS_SUCCESS);
2236
2404
        } else {
2237
2405
                iosb->status = FILE_DOES_NOT_EXIST;
2238
 
                iosb->status_info = 0;
 
2406
                iosb->info = 0;
2239
2407
                RtlFreeAnsiString(&ansi);
2240
2408
                TRACEEXIT2(return STATUS_FAILURE);
2241
2409
        }
2242
2410
}
2243
2411
 
2244
 
STDCALL NTSTATUS WRAP_EXPORT(ZwReadFile)
 
2412
wstdcall NTSTATUS WRAP_EXPORT(ZwReadFile)
2245
2413
        (void *handle, struct nt_event *event, void *apc_routine,
2246
2414
         void *apc_context, struct io_status_block *iosb, void *buffer,
2247
2415
         ULONG length, LARGE_INTEGER *byte_offset, ULONG *key)
2248
2416
{
2249
2417
        struct object_attr *oa;
 
2418
        struct common_object_header *coh;
2250
2419
        ULONG count;
2251
2420
        size_t offset;
2252
2421
        struct wrap_bin_file *file;
2253
2422
 
2254
 
        oa = HANDLE_TO_OBJECT(handle);
 
2423
        DBGTRACE2("%p", handle);
 
2424
        coh = handle;
 
2425
        if (coh->type != OBJECT_TYPE_FILE) {
 
2426
                ERROR("handle %p is not for a file: %d", handle, coh->type);
 
2427
                TRACEEXIT2(return STATUS_FAILURE);
 
2428
        }
 
2429
        oa = HANDLE_TO_OBJECT(coh);
2255
2430
        file = oa->file;
2256
2431
        DBGTRACE2("file: %s (%d)", file->name, file->size);
2257
2432
        if (byte_offset)
2262
2437
        DBGTRACE2("count: %u, offset: %zu, length: %u", count, offset, length);
2263
2438
        memcpy(buffer, ((void *)file->data) + offset, count);
2264
2439
        iosb->status = STATUS_SUCCESS;
2265
 
        iosb->status_info = count;
 
2440
        iosb->info = count;
2266
2441
        TRACEEXIT2(return STATUS_SUCCESS);
2267
2442
}
2268
2443
 
2269
 
STDCALL NTSTATUS WRAP_EXPORT(ZwClose)
 
2444
wstdcall NTSTATUS WRAP_EXPORT(ZwClose)
2270
2445
        (void *handle)
2271
2446
{
2272
2447
        struct object_attr *oa;
 
2448
        struct common_object_header *coh;
2273
2449
        struct wrap_bin_file *bin_file;
2274
 
        struct common_object_header *coh;
2275
2450
 
2276
 
        coh = handle;
2277
 
        if (coh == NULL) {
 
2451
        DBGTRACE2("%p", handle);
 
2452
        if (handle == NULL) {
2278
2453
                DBGTRACE1("");
2279
2454
                TRACEEXIT2(return STATUS_SUCCESS);
2280
2455
        }
 
2456
        coh = handle;
 
2457
        oa = HANDLE_TO_OBJECT(handle);
2281
2458
        if (coh->type == OBJECT_TYPE_FILE) {
2282
 
                oa = HANDLE_TO_OBJECT(handle);
2283
2459
                bin_file = oa->file;
2284
2460
                free_bin_file(bin_file);
2285
2461
                ObDereferenceObject(oa);
2286
 
        } else
2287
 
                WARNING("object type %d not implemented", coh->type);
 
2462
        } else if (coh->type == OBJECT_TYPE_NT_THREAD) {
 
2463
                DBGTRACE1("thread: %p", handle);
 
2464
        }
 
2465
        else
 
2466
                WARNING("closing handle %p not implemented", handle);
2288
2467
        TRACEEXIT2(return STATUS_SUCCESS);
2289
2468
}
2290
2469
 
2291
 
STDCALL NTSTATUS WRAP_EXPORT(ZwQueryInformationFile)
 
2470
wstdcall NTSTATUS WRAP_EXPORT(ZwQueryInformationFile)
2292
2471
        (void *handle, struct io_status_block *iosb, void *info,
2293
2472
         ULONG length, enum file_info_class class)
2294
2473
{
2296
2475
        struct file_name_info *fni;
2297
2476
        struct file_std_info *fsi;
2298
2477
        struct wrap_bin_file *file;
 
2478
        struct common_object_header *coh;
2299
2479
 
2300
2480
        TRACEENTER2("%p", handle);
 
2481
        coh = handle;
 
2482
        if (coh->type != OBJECT_TYPE_FILE) {
 
2483
                ERROR("handle %p is not for a file: %d", handle, coh->type);
 
2484
                TRACEEXIT2(return STATUS_FAILURE);
 
2485
        }
2301
2486
        oa = HANDLE_TO_OBJECT(handle);
2302
2487
        DBGTRACE2("attr: %p", oa);
2303
2488
        switch (class) {
2322
2507
        TRACEEXIT2(return STATUS_SUCCESS);
2323
2508
}
2324
2509
 
2325
 
STDCALL NTSTATUS WRAP_EXPORT(ZwCreateKey)
 
2510
wstdcall NTSTATUS WRAP_EXPORT(ZwCreateKey)
2326
2511
        (void **handle, ACCESS_MASK desired_access, struct object_attr *attr,
2327
2512
         ULONG title_index, struct unicode_string *class,
2328
2513
         ULONG create_options, ULONG *disposition)
2337
2522
        return STATUS_SUCCESS;
2338
2523
}
2339
2524
 
2340
 
STDCALL NTSTATUS WRAP_EXPORT(ZwOpenKey)
 
2525
wstdcall NTSTATUS WRAP_EXPORT(ZwOpenKey)
2341
2526
        (void **handle, ACCESS_MASK desired_access, struct object_attr *attr)
2342
2527
{
2343
2528
        struct ansi_string ansi;
2350
2535
        return STATUS_SUCCESS;
2351
2536
}
2352
2537
 
2353
 
STDCALL NTSTATUS WRAP_EXPORT(ZwSetValueKey)
 
2538
wstdcall NTSTATUS WRAP_EXPORT(ZwSetValueKey)
2354
2539
        (void *handle, struct unicode_string *name, ULONG title_index,
2355
2540
         ULONG type, void *data, ULONG data_size)
2356
2541
{
2363
2548
        return STATUS_SUCCESS;
2364
2549
}
2365
2550
 
2366
 
STDCALL NTSTATUS WRAP_EXPORT(ZwQueryValueKey)
 
2551
wstdcall NTSTATUS WRAP_EXPORT(ZwQueryValueKey)
2367
2552
        (void *handle, struct unicode_string *name,
2368
2553
         enum key_value_information_class class, void *info,
2369
2554
         ULONG length, ULONG *res_length)
2370
2555
{
2371
2556
        struct ansi_string ansi;
2372
 
        if (RtlUnicodeStringToAnsiString(&ansi, name, TRUE) ==
2373
 
            STATUS_SUCCESS) {
 
2557
        if (RtlUnicodeStringToAnsiString(&ansi, name, TRUE) == STATUS_SUCCESS) {
2374
2558
                DBGTRACE1("key: %s", ansi.buf);
2375
2559
                RtlFreeAnsiString(&ansi);
2376
2560
        }
2378
2562
        return STATUS_INVALID_PARAMETER;
2379
2563
}
2380
2564
 
2381
 
STDCALL NTSTATUS WRAP_EXPORT(WmiSystemControl)
 
2565
wstdcall NTSTATUS WRAP_EXPORT(WmiSystemControl)
2382
2566
        (struct wmilib_context *info, struct device_object *dev_obj,
2383
2567
         struct irp *irp, void *irp_disposition)
2384
2568
{
2386
2570
        return STATUS_SUCCESS;
2387
2571
}
2388
2572
 
2389
 
STDCALL NTSTATUS WRAP_EXPORT(WmiCompleteRequest)
 
2573
wstdcall NTSTATUS WRAP_EXPORT(WmiCompleteRequest)
2390
2574
        (struct device_object *dev_obj, struct irp *irp, NTSTATUS status,
2391
2575
         ULONG buffer_used, CCHAR priority_boost)
2392
2576
{
2394
2578
        return STATUS_SUCCESS;
2395
2579
}
2396
2580
 
2397
 
NOREGPARM NTSTATUS WRAP_EXPORT(WmiTraceMessage)
 
2581
noregparm NTSTATUS WRAP_EXPORT(WmiTraceMessage)
2398
2582
        (void *tracehandle, ULONG message_flags,
2399
2583
         void *message_guid, USHORT message_no, ...)
2400
2584
{
2402
2586
        TRACEEXIT2(return STATUS_SUCCESS);
2403
2587
}
2404
2588
 
2405
 
STDCALL NTSTATUS WRAP_EXPORT(WmiQueryTraceInformation)
 
2589
wstdcall NTSTATUS WRAP_EXPORT(WmiQueryTraceInformation)
2406
2590
        (enum trace_information_class trace_info_class, void *trace_info,
2407
2591
         ULONG *req_length, void *buf)
2408
2592
{
2410
2594
        TRACEEXIT2(return STATUS_SUCCESS);
2411
2595
}
2412
2596
 
2413
 
/* this function can't be STDCALL as it takes variable number of args */
2414
 
NOREGPARM ULONG WRAP_EXPORT(DbgPrint)
 
2597
/* this function can't be wstdcall as it takes variable number of args */
 
2598
noregparm ULONG WRAP_EXPORT(DbgPrint)
2415
2599
        (char *format, ...)
2416
2600
{
2417
2601
#ifdef DEBUG
2418
2602
        va_list args;
2419
 
        static char buf[1024];
 
2603
        static char buf[100];
2420
2604
 
2421
2605
        va_start(args, format);
2422
2606
        vsnprintf(buf, sizeof(buf), format, args);
2426
2610
        return STATUS_SUCCESS;
2427
2611
}
2428
2612
 
2429
 
STDCALL void WRAP_EXPORT(KeBugCheckEx)
 
2613
wstdcall void WRAP_EXPORT(KeBugCheckEx)
2430
2614
        (ULONG code, ULONG_PTR param1, ULONG_PTR param2,
2431
2615
         ULONG_PTR param3, ULONG_PTR param4)
2432
2616
{
2434
2618
        return;
2435
2619
}
2436
2620
 
2437
 
STDCALL void WRAP_EXPORT(ExSystemTimeToLocalTime)
 
2621
wstdcall void WRAP_EXPORT(ExSystemTimeToLocalTime)
2438
2622
        (LARGE_INTEGER *system_time, LARGE_INTEGER *local_time)
2439
2623
{
2440
2624
        *local_time = *system_time;
2441
2625
}
2442
2626
 
2443
 
STDCALL ULONG WRAP_EXPORT(ExSetTimerResolution)
 
2627
wstdcall ULONG WRAP_EXPORT(ExSetTimerResolution)
2444
2628
        (ULONG time, BOOLEAN set)
2445
2629
{
2446
2630
        /* yet another "innovation"! */
2447
2631
        return time;
2448
2632
}
2449
2633
 
2450
 
STDCALL void WRAP_EXPORT(DbgBreakPoint)(void){UNIMPL();}
2451
 
STDCALL void WRAP_EXPORT(_except_handler3)(void){UNIMPL();}
2452
 
STDCALL void WRAP_EXPORT(__C_specific_handler)(void){UNIMPL();}
 
2634
wstdcall void WRAP_EXPORT(DbgBreakPoint)(void){UNIMPL();}
 
2635
wstdcall void WRAP_EXPORT(_except_handler3)(void){UNIMPL();}
 
2636
wstdcall void WRAP_EXPORT(__C_specific_handler)(void){UNIMPL();}
2453
2637
void WRAP_EXPORT(_purecall)(void) { UNIMPL(); }
2454
2638
 
2455
2639
#include "ntoskernel_exports.h"