~ubuntu-branches/ubuntu/hardy/ndiswrapper/hardy

« back to all changes in this revision

Viewing changes to driver/ntoskernel.c

  • Committer: Bazaar Package Importer
  • Author(s): Matthias Klose
  • Date: 2007-12-07 20:42:35 UTC
  • mfrom: (1.2.9 upstream)
  • Revision ID: james.westby@ubuntu.com-20071207204235-s43f889d3h1u6vrl
Tags: 1.50-1ubuntu1
* Merge with Debian; remaining changes:
  - Build for lpia.
  - debian/control:
    + Update description to point out that the kernel source package is
      not required with the standard Ubuntu kernel.
    + Change the Maintainer address.
  - debian/control:
    + Drop ndiswrapper-source.

Show diffs side-by-side

added added

removed removed

Lines of Context:
37
37
};
38
38
 
39
39
/* everything here is for all drivers/devices - not per driver/device */
40
 
static NT_SPIN_LOCK dispatcher_lock;
41
 
NT_SPIN_LOCK ntoskernel_lock;
 
40
static spinlock_t dispatcher_lock;
 
41
spinlock_t ntoskernel_lock;
42
42
static void *mdl_cache;
43
43
static struct nt_list wrap_mdl_list;
44
44
 
46
46
static void kdpc_worker(worker_param_t dummy);
47
47
 
48
48
static struct nt_list kdpc_list;
49
 
static NT_SPIN_LOCK kdpc_list_lock;
 
49
static spinlock_t kdpc_list_lock;
50
50
 
51
51
static struct nt_list callback_objects;
52
52
 
62
62
 
63
63
static work_struct_t ntos_work;
64
64
static struct nt_list ntos_work_list;
65
 
static NT_SPIN_LOCK ntos_work_lock;
 
65
static spinlock_t ntos_work_lock;
66
66
static void ntos_work_worker(worker_param_t dummy);
67
67
static struct nt_thread *ntos_worker_thread;
68
 
 
69
 
NT_SPIN_LOCK irp_cancel_lock;
 
68
spinlock_t irp_cancel_lock;
 
69
static NT_SPIN_LOCK nt_list_lock;
70
70
 
71
71
extern struct nt_list wrap_drivers;
72
 
static struct nt_list wrap_timer_list;
73
 
NT_SPIN_LOCK timer_lock;
 
72
static struct nt_slist wrap_timer_slist;
74
73
 
75
74
/* compute ticks (100ns) since 1601 until when system booted into
76
75
 * wrap_ticks_to_boot */
86
85
 
87
86
WIN_SYMBOL_MAP("NlsMbCodePageTag", FALSE)
88
87
 
89
 
#ifdef USE_NTOS_WQ
 
88
#ifdef NTOS_WQ
90
89
workqueue_struct_t *ntos_wq;
91
90
#endif
92
91
 
93
 
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \
94
 
        !defined(preempt_enable) && !defined(CONFIG_PREEMPT)
95
 
volatile int preempt_count;
 
92
#ifdef WRAP_PREEMPT
 
93
DEFINE_PER_CPU(irql_info_t, irql_info);
96
94
#endif
97
95
 
98
96
#if defined(CONFIG_X86_64)
114
112
{
115
113
        struct common_object_header *hdr;
116
114
        void *body;
117
 
        KIRQL irql;
118
115
 
119
116
        /* we pad header as prefix to body */
120
117
        hdr = ExAllocatePoolWithTag(NonPagedPool, OBJECT_SIZE(size), 0);
136
133
        }
137
134
        hdr->type = type;
138
135
        hdr->ref_count = 1;
139
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
136
        spin_lock_bh(&ntoskernel_lock);
140
137
        /* threads are looked up often (in KeWaitForXXX), so optimize
141
138
         * for fast lookups of threads */
142
139
        if (type == OBJECT_TYPE_NT_THREAD)
143
140
                InsertHeadList(&object_list, &hdr->list);
144
141
        else
145
142
                InsertTailList(&object_list, &hdr->list);
146
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
143
        spin_unlock_bh(&ntoskernel_lock);
147
144
        body = HEADER_TO_OBJECT(hdr);
148
145
        TRACE3("allocated hdr: %p, body: %p", hdr, body);
149
146
        return body;
152
149
void free_object(void *object)
153
150
{
154
151
        struct common_object_header *hdr;
155
 
        KIRQL irql;
156
152
 
157
153
        hdr = OBJECT_TO_HEADER(object);
158
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
154
        spin_lock_bh(&ntoskernel_lock);
159
155
        RemoveEntryList(&hdr->list);
160
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
156
        spin_unlock_bh(&ntoskernel_lock);
161
157
        TRACE3("freed hdr: %p, body: %p", hdr, object);
162
158
        if (hdr->name.buf)
163
159
                ExFreePool(hdr->name.buf);
167
163
static int add_bus_driver(const char *name)
168
164
{
169
165
        struct bus_driver *bus_driver;
170
 
        KIRQL irql;
171
166
 
172
 
        bus_driver = kmalloc(sizeof(*bus_driver), GFP_KERNEL);
 
167
        bus_driver = kzalloc(sizeof(*bus_driver), GFP_KERNEL);
173
168
        if (!bus_driver) {
174
169
                ERROR("couldn't allocate memory");
175
170
                return -ENOMEM;
176
171
        }
177
 
        memset(bus_driver, 0, sizeof(*bus_driver));
178
172
        strncpy(bus_driver->name, name, sizeof(bus_driver->name));
179
173
        bus_driver->name[sizeof(bus_driver->name)-1] = 0;
180
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
174
        spin_lock_bh(&ntoskernel_lock);
181
175
        InsertTailList(&bus_driver_list, &bus_driver->list);
182
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
176
        spin_unlock_bh(&ntoskernel_lock);
183
177
        TRACE1("bus driver %s is at %p", name, &bus_driver->drv_obj);
184
178
        return STATUS_SUCCESS;
185
179
}
188
182
{
189
183
        struct bus_driver *bus_driver;
190
184
        struct driver_object *drv_obj;
191
 
        KIRQL irql;
192
185
 
193
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
186
        spin_lock_bh(&ntoskernel_lock);
194
187
        drv_obj = NULL;
195
188
        nt_list_for_each_entry(bus_driver, &bus_driver_list, list) {
196
 
                if (strcmp(bus_driver->name, name) == 0)
 
189
                if (strcmp(bus_driver->name, name) == 0) {
197
190
                        drv_obj = &bus_driver->drv_obj;
 
191
                        break;
 
192
                }
198
193
        }
199
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
194
        spin_unlock_bh(&ntoskernel_lock);
200
195
        return drv_obj;
201
196
}
202
197
 
284
279
        return ExfInterlockedRemoveTailList(head, lock);
285
280
}
286
281
 
 
282
wfastcall void WIN_FUNC(InitializeSListHead,1)
 
283
        (nt_slist_header *head)
 
284
{
 
285
        memset(head, 0, sizeof(*head));
 
286
}
 
287
 
287
288
wfastcall struct nt_slist *WIN_FUNC(ExInterlockedPushEntrySList,3)
288
289
        (nt_slist_header *head, struct nt_slist *entry, NT_SPIN_LOCK *lock)
289
290
{
298
299
{
299
300
        struct nt_slist *ret;
300
301
 
301
 
        ret = PushEntrySList(head, entry, &ntoskernel_lock);
 
302
        ret = PushEntrySList(head, entry, &nt_list_lock);
302
303
        return ret;
303
304
}
304
305
 
307
308
{
308
309
        struct nt_slist *ret;
309
310
 
310
 
        ret = PushEntrySList(head, entry, &ntoskernel_lock);
 
311
        ret = PushEntrySList(head, entry, &nt_list_lock);
311
312
        return ret;
312
313
}
313
314
 
325
326
{
326
327
        struct nt_slist *ret;
327
328
 
328
 
        ret = PopEntrySList(head, &ntoskernel_lock);
 
329
        ret = PopEntrySList(head, &nt_list_lock);
329
330
        return ret;
330
331
}
331
332
 
334
335
{
335
336
        struct nt_slist *ret;
336
337
 
337
 
        ret = PopEntrySList(head, &ntoskernel_lock);
 
338
        ret = PopEntrySList(head, &nt_list_lock);
338
339
        return ret;
339
340
}
340
341
 
376
377
        (LARGE_INTEGER volatile *plint, ULONG n)
377
378
{
378
379
        unsigned long flags;
379
 
        save_local_irq(flags);
 
380
 
 
381
        local_irq_save(flags);
380
382
#ifdef CONFIG_X86_64
381
383
        __asm__ __volatile__(
382
384
                "\n"
396
398
                : "m" (n), "A" (*plint)
397
399
                : "ebx", "ecx");
398
400
#endif
399
 
        restore_local_irq(flags);
 
401
        local_irq_restore(flags);
400
402
}
401
403
 
402
404
static void initialize_object(struct dispatcher_header *dh, enum dh_type type,
420
422
        BUG_ON(wrap_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);
421
423
        BUG_ON(nt_timer->wrap_timer_magic != WRAP_TIMER_MAGIC);
422
424
#endif
 
425
        kdpc = nt_timer->kdpc;
 
426
        if (kdpc)
 
427
                queue_kdpc(kdpc);
 
428
        KeSetEvent((struct nt_event *)nt_timer, 0, FALSE);
423
429
        if (wrap_timer->repeat)
424
430
                mod_timer(&wrap_timer->timer, jiffies + wrap_timer->repeat);
425
 
        KeSetEvent((struct nt_event *)nt_timer, 0, FALSE);
426
 
        kdpc = nt_timer->kdpc;
427
 
        if (kdpc && kdpc->func)
428
 
                queue_kdpc(kdpc);
429
 
 
430
431
        TIMEREXIT(return);
431
432
}
432
433
 
433
434
void wrap_init_timer(struct nt_timer *nt_timer, enum timer_type type,
434
 
                     struct kdpc *kdpc, struct ndis_miniport_block *nmb)
 
435
                     struct ndis_mp_block *nmb)
435
436
{
436
437
        struct wrap_timer *wrap_timer;
437
 
        KIRQL irql;
438
438
 
439
439
        /* TODO: if a timer is initialized more than once, we allocate
440
440
         * memory for wrap_timer more than once for the same nt_timer,
447
447
         * freed, so we use slack_kmalloc so it gets freed when driver
448
448
         * is unloaded */
449
449
        if (nmb)
450
 
                wrap_timer = kmalloc(sizeof(*wrap_timer), gfp_irql());
 
450
                wrap_timer = kmalloc(sizeof(*wrap_timer), irql_gfp());
451
451
        else
452
452
                wrap_timer = slack_kmalloc(sizeof(*wrap_timer));
453
453
        if (!wrap_timer) {
464
464
        wrap_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;
465
465
#endif
466
466
        nt_timer->wrap_timer = wrap_timer;
467
 
        nt_timer->kdpc = kdpc;
 
467
        nt_timer->kdpc = NULL;
468
468
        initialize_object(&nt_timer->dh, type, 0);
469
469
        nt_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;
470
 
        irql = nt_spin_lock_irql(&timer_lock, DISPATCH_LEVEL);
471
 
        if (nmb)
472
 
                InsertTailList(&nmb->wnd->wrap_timer_list, &wrap_timer->list);
473
 
        else
474
 
                InsertTailList(&wrap_timer_list, &wrap_timer->list);
475
 
        nt_spin_unlock_irql(&timer_lock, irql);
476
470
        TIMERTRACE("timer %p (%p)", wrap_timer, nt_timer);
 
471
        spin_lock_bh(&ntoskernel_lock);
 
472
        if (nmb) {
 
473
                wrap_timer->slist.next = nmb->wnd->wrap_timer_slist.next;
 
474
                nmb->wnd->wrap_timer_slist.next = &wrap_timer->slist;
 
475
        } else {
 
476
                wrap_timer->slist.next = wrap_timer_slist.next;
 
477
                wrap_timer_slist.next = &wrap_timer->slist;
 
478
        }
 
479
        spin_unlock_bh(&ntoskernel_lock);
477
480
        TIMEREXIT(return);
478
481
}
479
482
 
481
484
        (struct nt_timer *nt_timer, enum timer_type type)
482
485
{
483
486
        TIMERENTER("%p", nt_timer);
484
 
        wrap_init_timer(nt_timer, type, NULL, NULL);
 
487
        wrap_init_timer(nt_timer, type, NULL);
485
488
}
486
489
 
487
490
wstdcall void WIN_FUNC(KeInitializeTimer,1)
488
491
        (struct nt_timer *nt_timer)
489
492
{
490
493
        TIMERENTER("%p", nt_timer);
491
 
        wrap_init_timer(nt_timer, NotificationTimer, NULL, NULL);
 
494
        wrap_init_timer(nt_timer, NotificationTimer, NULL);
492
495
}
493
496
 
494
497
/* expires and repeat are in HZ */
500
503
        TIMERENTER("%p, %lu, %lu, %p, %lu",
501
504
                   nt_timer, expires_hz, repeat_hz, kdpc, jiffies);
502
505
 
503
 
        KeClearEvent((struct nt_event *)nt_timer);
504
506
        wrap_timer = nt_timer->wrap_timer;
505
507
        TIMERTRACE("%p", wrap_timer);
506
508
#ifdef TIMER_DEBUG
508
510
                WARNING("bad timers: %p, %p, %p", wrap_timer, nt_timer,
509
511
                        wrap_timer->nt_timer);
510
512
        if (nt_timer->wrap_timer_magic != WRAP_TIMER_MAGIC) {
511
 
                WARNING("Buggy Windows timer didn't initialize timer %p",
 
513
                WARNING("buggy Windows timer didn't initialize timer %p",
512
514
                        nt_timer);
513
515
                return FALSE;
514
516
        }
518
520
                wrap_timer->wrap_timer_magic = WRAP_TIMER_MAGIC;
519
521
        }
520
522
#endif
521
 
        if (kdpc)
522
 
                nt_timer->kdpc = kdpc;
 
523
        KeClearEvent((struct nt_event *)nt_timer);
 
524
        nt_timer->kdpc = kdpc;
523
525
        wrap_timer->repeat = repeat_hz;
524
526
        if (mod_timer(&wrap_timer->timer, jiffies + expires_hz))
525
527
                TIMEREXIT(return TRUE);
534
536
        unsigned long expires_hz, repeat_hz;
535
537
 
536
538
        TIMERENTER("%p, %Ld, %d", nt_timer, duetime_ticks, period_ms);
537
 
        expires_hz = SYSTEM_TIME_TO_HZ(duetime_ticks) + 1;
 
539
        expires_hz = SYSTEM_TIME_TO_HZ(duetime_ticks);
538
540
        repeat_hz = MSEC_TO_HZ(period_ms);
539
541
        return wrap_set_timer(nt_timer, expires_hz, repeat_hz, kdpc);
540
542
}
567
569
         * won't be re-armed after deleting */
568
570
        wrap_timer->repeat = 0;
569
571
        ret = del_timer(&wrap_timer->timer);
570
 
        if (nt_timer->kdpc)
571
 
                dequeue_kdpc(nt_timer->kdpc);
 
572
        /* the documentation for KeCancelTimer suggests the DPC is
 
573
         * deqeued, but actually DPC is left to run */
572
574
        if (ret)
573
575
                TIMEREXIT(return TRUE);
574
576
        else
591
593
        memset(kdpc, 0, sizeof(*kdpc));
592
594
        kdpc->func = func;
593
595
        kdpc->ctx  = ctx;
 
596
        InitializeListHead(&kdpc->list);
594
597
}
595
598
 
596
599
static void kdpc_worker(worker_param_t dummy)
601
604
        KIRQL irql;
602
605
 
603
606
        WORKENTER("");
 
607
        irql = raise_irql(DISPATCH_LEVEL);
604
608
        while (1) {
605
 
                nt_spin_lock_irqsave(&kdpc_list_lock, flags);
 
609
                spin_lock_irqsave(&kdpc_list_lock, flags);
606
610
                entry = RemoveHeadList(&kdpc_list);
607
611
                if (entry) {
608
612
                        kdpc = container_of(entry, struct kdpc, list);
610
614
                        kdpc->queued = 0;
611
615
                } else
612
616
                        kdpc = NULL;
613
 
                nt_spin_unlock_irqrestore(&kdpc_list_lock, flags);
 
617
                spin_unlock_irqrestore(&kdpc_list_lock, flags);
614
618
                if (!kdpc)
615
619
                        break;
616
 
                irql = raise_irql(DISPATCH_LEVEL);
617
620
                WORKTRACE("%p, %p, %p, %p, %p", kdpc, kdpc->func, kdpc->ctx,
618
621
                          kdpc->arg1, kdpc->arg2);
 
622
                assert_irql(_irql_ == DISPATCH_LEVEL);
619
623
                LIN2WIN4(kdpc->func, kdpc, kdpc->ctx, kdpc->arg1, kdpc->arg2);
620
 
                lower_irql(irql);
 
624
                assert_irql(_irql_ == DISPATCH_LEVEL);
621
625
        }
 
626
        lower_irql(irql);
622
627
        WORKEXIT(return);
623
628
}
624
629
 
634
639
        unsigned long flags;
635
640
 
636
641
        WORKENTER("%p", kdpc);
637
 
        nt_spin_lock_irqsave(&kdpc_list_lock, flags);
 
642
        spin_lock_irqsave(&kdpc_list_lock, flags);
638
643
        if (kdpc->queued)
639
644
                ret = FALSE;
640
645
        else {
645
650
                kdpc->queued = 1;
646
651
                ret = TRUE;
647
652
        }
648
 
        nt_spin_unlock_irqrestore(&kdpc_list_lock, flags);
 
653
        spin_unlock_irqrestore(&kdpc_list_lock, flags);
649
654
        if (ret == TRUE)
650
655
                schedule_ntos_work(&kdpc_work);
651
656
        WORKTRACE("%d", ret);
658
663
        unsigned long flags;
659
664
 
660
665
        WORKENTER("%p", kdpc);
661
 
        nt_spin_lock_irqsave(&kdpc_list_lock, flags);
 
666
        spin_lock_irqsave(&kdpc_list_lock, flags);
662
667
        if (kdpc->queued) {
663
668
                RemoveEntryList(&kdpc->list);
664
669
                kdpc->queued = 0;
665
670
                ret = TRUE;
666
671
        } else
667
672
                ret = FALSE;
668
 
        nt_spin_unlock_irqrestore(&kdpc_list_lock, flags);
 
673
        spin_unlock_irqrestore(&kdpc_list_lock, flags);
669
674
        WORKTRACE("%d", ret);
670
675
        return ret;
671
676
}
695
700
{
696
701
        struct ntos_work_item *ntos_work_item;
697
702
        struct nt_list *cur;
698
 
        KIRQL irql;
699
703
 
700
704
        while (1) {
701
 
                irql = nt_spin_lock_irql(&ntos_work_lock, DISPATCH_LEVEL);
 
705
                spin_lock_bh(&ntos_work_lock);
702
706
                cur = RemoveHeadList(&ntos_work_list);
703
 
                nt_spin_unlock_irql(&ntos_work_lock, irql);
 
707
                spin_unlock_bh(&ntos_work_lock);
704
708
                if (!cur)
705
709
                        break;
706
710
                ntos_work_item = container_of(cur, struct ntos_work_item, list);
717
721
int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2)
718
722
{
719
723
        struct ntos_work_item *ntos_work_item;
720
 
        KIRQL irql;
721
724
 
722
725
        WORKENTER("adding work: %p, %p, %p", func, arg1, arg2);
723
 
        ntos_work_item = kmalloc(sizeof(*ntos_work_item), gfp_irql());
 
726
        ntos_work_item = kmalloc(sizeof(*ntos_work_item), irql_gfp());
724
727
        if (!ntos_work_item) {
725
728
                ERROR("couldn't allocate memory");
726
729
                return -ENOMEM;
728
731
        ntos_work_item->func = func;
729
732
        ntos_work_item->arg1 = arg1;
730
733
        ntos_work_item->arg2 = arg2;
731
 
        irql = nt_spin_lock_irql(&ntos_work_lock, DISPATCH_LEVEL);
 
734
        spin_lock_bh(&ntos_work_lock);
732
735
        InsertTailList(&ntos_work_list, &ntos_work_item->list);
733
 
        nt_spin_unlock_irql(&ntos_work_lock, irql);
 
736
        spin_unlock_bh(&ntos_work_lock);
734
737
        schedule_ntos_work(&ntos_work);
735
738
        WORKEXIT(return 0);
736
739
}
805
808
        void *addr;
806
809
 
807
810
        ENTER4("pool_type: %d, size: %lu, tag: 0x%x", pool_type, size, tag);
 
811
        assert_irql(_irql_ <= DISPATCH_LEVEL);
808
812
        if (size < PAGE_SIZE)
809
 
                addr = kmalloc(size, gfp_irql());
 
813
                addr = kmalloc(size, irql_gfp());
810
814
        else {
811
 
                KIRQL irql = current_irql();
812
 
 
813
 
                if (irql < DISPATCH_LEVEL) {
 
815
                if (irql_gfp() & GFP_ATOMIC) {
 
816
                        addr = __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM,
 
817
                                         PAGE_KERNEL);
 
818
                        TRACE1("%p, %lu", addr, size);
 
819
                } else {
814
820
                        addr = vmalloc(size);
815
821
                        TRACE1("%p, %lu", addr, size);
816
 
                } else if (irql == DISPATCH_LEVEL) {
817
 
                        addr = __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM,
818
 
                                         PAGE_KERNEL);
819
 
                        TRACE1("%p, %lu", addr, size);
820
 
                } else {
821
 
                        TRACE1("Windows driver allocating %lu bytes in "
822
 
                               "interrupt context: 0x%x", size, preempt_count());
823
 
                        DBG_BLOCK(2) {
824
 
                                dump_stack();
825
 
                        }
826
 
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
827
 
                        /* 2.6.19+ kernels don't allow __vmalloc (even
828
 
                         * with GFP_ATOMIC) in interrupt context */
829
 
                        addr = NULL;
830
 
#else
831
 
                        addr = __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM,
832
 
                                         PAGE_KERNEL);
833
 
#endif
834
 
                        if (addr)
835
 
                                TRACE1("%p, %lu", addr, size);
836
 
                        else
837
 
                                WARNING("couldn't allocate %lu bytes of memory "
838
 
                                        "at %d", size, irql);
839
822
                }
840
823
        }
841
 
 
842
824
        DBG_BLOCK(1) {
843
825
                if (addr)
844
826
                        TRACE4("addr: %p, %lu", addr, size);
845
827
                else
846
828
                        TRACE1("failed: %lu", size);
847
829
        }
848
 
 
849
830
        return addr;
850
831
}
851
832
WIN_FUNC_DECL(ExAllocatePoolWithTag,3)
854
835
        (void *addr, ULONG tag)
855
836
{
856
837
        TRACE4("%p", addr);
857
 
        if ((unsigned long)addr >= VMALLOC_START &&
858
 
            (unsigned long)addr < VMALLOC_END)
 
838
        if ((unsigned long)addr < VMALLOC_START ||
 
839
            (unsigned long)addr >= VMALLOC_END)
 
840
                kfree(addr);
 
841
        else
859
842
                vfree(addr);
860
 
        else
861
 
                kfree(addr);
862
843
 
863
844
        EXIT4(return);
864
845
}
923
904
         BOOLEAN create, BOOLEAN allow_multiple_callbacks)
924
905
{
925
906
        struct callback_object *obj;
926
 
        KIRQL irql;
927
907
 
928
908
        ENTER2("");
929
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
909
        spin_lock_bh(&ntoskernel_lock);
930
910
        nt_list_for_each_entry(obj, &callback_objects, callback_funcs) {
931
911
                if (obj->attributes == attributes) {
932
 
                        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
912
                        spin_unlock_bh(&ntoskernel_lock);
933
913
                        *object = obj;
934
914
                        return STATUS_SUCCESS;
935
915
                }
936
916
        }
937
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
917
        spin_unlock_bh(&ntoskernel_lock);
938
918
        obj = allocate_object(sizeof(struct callback_object),
939
919
                              OBJECT_TYPE_CALLBACK, NULL);
940
920
        if (!obj)
960
940
                nt_spin_unlock_irql(&object->lock, irql);
961
941
                EXIT2(return NULL);
962
942
        }
963
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
943
        nt_spin_unlock_irql(&object->lock, irql);
964
944
        callback = kmalloc(sizeof(*callback), GFP_KERNEL);
965
945
        if (!callback) {
966
946
                ERROR("couldn't allocate memory");
1095
1075
         * depending on how to satisfy wait. If all of them can be
1096
1076
         * grabbed, we will grab them in the next loop below */
1097
1077
 
1098
 
        nt_spin_lock_bh(&dispatcher_lock);
 
1078
        spin_lock_bh(&dispatcher_lock);
1099
1079
        for (i = wait_count = 0; i < count; i++) {
1100
1080
                dh = object[i];
1101
1081
                EVENTTRACE("%p: event %p (%d)", current, dh, dh->signal_state);
1102
1082
                /* wait_type == 1 for WaitAny, 0 for WaitAll */
1103
1083
                if (grab_object(dh, current, wait_type)) {
1104
1084
                        if (wait_type == WaitAny) {
1105
 
                                nt_spin_unlock_bh(&dispatcher_lock);
 
1085
                                spin_unlock_bh(&dispatcher_lock);
1106
1086
                                EVENTEXIT(return STATUS_WAIT_0 + i);
1107
1087
                        }
1108
1088
                } else {
1112
1092
        }
1113
1093
 
1114
1094
        if (timeout && *timeout == 0 && wait_count) {
1115
 
                nt_spin_unlock_bh(&dispatcher_lock);
 
1095
                spin_unlock_bh(&dispatcher_lock);
1116
1096
                EVENTEXIT(return STATUS_TIMEOUT);
1117
1097
        }
1118
1098
 
1136
1116
                        InsertTailList(&dh->wait_blocks, &wb[i].list);
1137
1117
                }
1138
1118
        }
1139
 
        nt_spin_unlock_bh(&dispatcher_lock);
 
1119
        spin_unlock_bh(&dispatcher_lock);
1140
1120
        if (wait_count == 0)
1141
1121
                EVENTEXIT(return STATUS_SUCCESS);
1142
1122
 
1144
1124
        if (timeout == NULL)
1145
1125
                wait_hz = 0;
1146
1126
        else
1147
 
                wait_hz = SYSTEM_TIME_TO_HZ(*timeout) + 1;
 
1127
                wait_hz = SYSTEM_TIME_TO_HZ(*timeout);
1148
1128
 
1149
 
        assert(current_irql() < DISPATCH_LEVEL);
 
1129
        DBG_BLOCK(2) {
 
1130
                KIRQL irql = current_irql();
 
1131
                if (irql >= DISPATCH_LEVEL) {
 
1132
                        TRACE2("wait in atomic context: %Lu, %lu, %d, %ld",
 
1133
                               *timeout, wait_hz, in_atomic(), in_interrupt());
 
1134
                }
 
1135
        }
 
1136
        assert_irql(_irql_ < DISPATCH_LEVEL);
1150
1137
        EVENTTRACE("%p: sleep for %ld on %p", current, wait_hz, &wait_done);
1151
1138
        /* we don't honor 'alertable' - according to decription for
1152
1139
         * this, even if waiting in non-alertable state, thread may be
1153
1140
         * alerted in some circumstances */
1154
1141
        while (wait_count) {
1155
1142
                res = wait_condition(wait_done, wait_hz, TASK_INTERRUPTIBLE);
1156
 
                nt_spin_lock_bh(&dispatcher_lock);
 
1143
                spin_lock_bh(&dispatcher_lock);
1157
1144
                EVENTTRACE("%p woke up: %d, %d", current, res, wait_done);
1158
1145
                /* the event may have been set by the time
1159
1146
                 * wrap_wait_event returned and spinlock obtained, so
1166
1153
                                        continue;
1167
1154
                                EVENTTRACE("%p: timedout, dequeue %p (%p)",
1168
1155
                                           current, object[i], wb[i].object);
 
1156
                                assert(wb[i].object == NULL);
1169
1157
                                RemoveEntryList(&wb[i].list);
1170
1158
                        }
1171
 
                        nt_spin_unlock_bh(&dispatcher_lock);
 
1159
                        spin_unlock_bh(&dispatcher_lock);
1172
1160
                        if (res < 0)
1173
1161
                                EVENTEXIT(return STATUS_ALERTED);
1174
1162
                        else
1194
1182
                                        if (wb[j].thread && !wb[j].object)
1195
1183
                                                RemoveEntryList(&wb[j].list);
1196
1184
                                }
1197
 
                                nt_spin_unlock_bh(&dispatcher_lock);
 
1185
                                spin_unlock_bh(&dispatcher_lock);
1198
1186
                                EVENTEXIT(return STATUS_WAIT_0 + i);
1199
1187
                        }
1200
1188
                }
1201
1189
                wait_done = 0;
1202
 
                nt_spin_unlock_bh(&dispatcher_lock);
 
1190
                spin_unlock_bh(&dispatcher_lock);
1203
1191
                if (wait_count == 0)
1204
1192
                        EVENTEXIT(return STATUS_SUCCESS);
1205
1193
 
1239
1227
        EVENTENTER("%p, %d", nt_event, nt_event->dh.type);
1240
1228
        if (wait == TRUE)
1241
1229
                WARNING("wait = %d, not yet implemented", wait);
1242
 
        nt_spin_lock_bh(&dispatcher_lock);
 
1230
        spin_lock_bh(&dispatcher_lock);
1243
1231
        old_state = nt_event->dh.signal_state;
1244
1232
        nt_event->dh.signal_state = 1;
1245
1233
        if (old_state == 0)
1246
1234
                object_signalled(&nt_event->dh);
1247
 
        nt_spin_unlock_bh(&dispatcher_lock);
 
1235
        spin_unlock_bh(&dispatcher_lock);
1248
1236
        EVENTEXIT(return old_state);
1249
1237
}
1250
1238
 
1252
1240
        (struct nt_event *nt_event)
1253
1241
{
1254
1242
        EVENTENTER("%p", nt_event);
1255
 
        nt_spin_lock_bh(&dispatcher_lock);
1256
1243
        nt_event->dh.signal_state = 0;
1257
 
        nt_spin_unlock_bh(&dispatcher_lock);
1258
1244
        EVENTEXIT(return);
1259
1245
}
1260
1246
 
1264
1250
        LONG old_state;
1265
1251
 
1266
1252
        EVENTENTER("%p", nt_event);
1267
 
        nt_spin_lock_bh(&dispatcher_lock);
1268
 
        old_state = nt_event->dh.signal_state;
1269
 
        nt_event->dh.signal_state = 0;
1270
 
        nt_spin_unlock_bh(&dispatcher_lock);
1271
 
        EVENTTRACE("%d", old_state);
 
1253
        old_state = xchg(&nt_event->dh.signal_state, 0);
1272
1254
        EVENTEXIT(return old_state);
1273
1255
}
1274
1256
 
1277
1259
{
1278
1260
        LONG state;
1279
1261
 
1280
 
        nt_spin_lock_bh(&dispatcher_lock);
1281
1262
        state = nt_event->dh.signal_state;
1282
 
        nt_spin_unlock_bh(&dispatcher_lock);
1283
1263
        EVENTTRACE("%d", state);
1284
1264
        return state;
1285
1265
}
1288
1268
        (struct nt_mutex *mutex, ULONG level)
1289
1269
{
1290
1270
        EVENTENTER("%p", mutex);
1291
 
        nt_spin_lock_bh(&dispatcher_lock);
1292
1271
        initialize_object(&mutex->dh, MutexObject, 1);
1293
1272
        mutex->dh.size = sizeof(*mutex);
1294
1273
        InitializeListHead(&mutex->list);
1295
1274
        mutex->abandoned = FALSE;
1296
1275
        mutex->apc_disable = 1;
1297
1276
        mutex->owner_thread = NULL;
1298
 
        nt_spin_unlock_bh(&dispatcher_lock);
1299
1277
        EVENTEXIT(return);
1300
1278
}
1301
1279
 
1309
1287
        if (wait == TRUE)
1310
1288
                WARNING("wait: %d", wait);
1311
1289
        thread = current;
1312
 
        nt_spin_lock_bh(&dispatcher_lock);
 
1290
        spin_lock_bh(&dispatcher_lock);
1313
1291
        EVENTTRACE("%p, %p, %p, %d", mutex, thread, mutex->owner_thread,
1314
1292
                   mutex->dh.signal_state);
1315
1293
        if ((mutex->owner_thread == thread) && (mutex->dh.signal_state <= 0)) {
1325
1303
        }
1326
1304
        EVENTTRACE("%p, %p, %p, %d", mutex, thread, mutex->owner_thread,
1327
1305
                   mutex->dh.signal_state);
1328
 
        nt_spin_unlock_bh(&dispatcher_lock);
 
1306
        spin_unlock_bh(&dispatcher_lock);
1329
1307
        EVENTEXIT(return ret);
1330
1308
}
1331
1309
 
1349
1327
        LONG ret;
1350
1328
 
1351
1329
        EVENTENTER("%p", semaphore);
1352
 
        nt_spin_lock_bh(&dispatcher_lock);
 
1330
        spin_lock_bh(&dispatcher_lock);
1353
1331
        ret = semaphore->dh.signal_state;
1354
1332
        assert(ret >= 0);
1355
1333
        if (semaphore->dh.signal_state + adjustment <= semaphore->limit)
1361
1339
        }
1362
1340
        if (semaphore->dh.signal_state > 0)
1363
1341
                object_signalled(&semaphore->dh);
1364
 
        nt_spin_unlock_bh(&dispatcher_lock);
 
1342
        spin_unlock_bh(&dispatcher_lock);
1365
1343
        EVENTEXIT(return ret);
1366
1344
}
1367
1345
 
1374
1352
        if (wait_mode != 0)
1375
1353
                ERROR("invalid wait_mode %d", wait_mode);
1376
1354
 
1377
 
        timeout = SYSTEM_TIME_TO_HZ(*interval) + 1;
 
1355
        timeout = SYSTEM_TIME_TO_HZ(*interval);
1378
1356
        EVENTTRACE("%p, %Ld, %ld", current, *interval, timeout);
1379
1357
        if (timeout <= 0)
1380
1358
                EVENTEXIT(return STATUS_SUCCESS);
1425
1403
        return jiffies;
1426
1404
}
1427
1405
 
 
1406
wstdcall KAFFINITY WIN_FUNC(KeQueryActiveProcessors,0)
 
1407
        (void)
 
1408
{
 
1409
        int i, n;
 
1410
        KAFFINITY bits = 0;
 
1411
#ifdef num_online_cpus
 
1412
        n = num_online_cpus();
 
1413
#else
 
1414
        n = NR_CPUS;
 
1415
#endif
 
1416
        for (i = 0; i < n; i++)
 
1417
                bits = (bits << 1) | 1;
 
1418
        return bits;
 
1419
}
 
1420
 
1428
1421
struct nt_thread *get_current_nt_thread(void)
1429
1422
{
1430
1423
        struct task_struct *task = current;
1431
1424
        struct nt_thread *thread;
1432
1425
        struct common_object_header *header;
1433
 
        KIRQL irql;
1434
1426
 
1435
1427
        TRACE6("task: %p", task);
1436
1428
        thread = NULL;
1437
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
1429
        spin_lock_bh(&ntoskernel_lock);
1438
1430
        nt_list_for_each_entry(header, &object_list, list) {
1439
1431
                TRACE6("%p, %d", header, header->type);
1440
1432
                if (header->type != OBJECT_TYPE_NT_THREAD)
1446
1438
                else
1447
1439
                        thread = NULL;
1448
1440
        }
1449
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
1441
        spin_unlock_bh(&ntoskernel_lock);
1450
1442
        if (thread == NULL)
1451
1443
                TRACE4("couldn't find thread for task %p, %d", task, task->pid);
1452
1444
        TRACE6("%p", thread);
1457
1449
{
1458
1450
        struct task_struct *task;
1459
1451
        struct common_object_header *header;
1460
 
        KIRQL irql;
1461
1452
 
1462
1453
        TRACE6("%p", thread);
1463
1454
        task = NULL;
1464
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
1455
        spin_lock_bh(&ntoskernel_lock);
1465
1456
        nt_list_for_each_entry(header, &object_list, list) {
1466
1457
                TRACE6("%p, %d", header, header->type);
1467
1458
                if (header->type != OBJECT_TYPE_NT_THREAD)
1471
1462
                        break;
1472
1463
                }
1473
1464
        }
1474
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
1465
        spin_unlock_bh(&ntoskernel_lock);
1475
1466
        if (task == NULL)
1476
1467
                TRACE2("%p: couldn't find task for %p", current, thread);
1477
1468
        return task;
1480
1471
static struct nt_thread *create_nt_thread(struct task_struct *task)
1481
1472
{
1482
1473
        struct nt_thread *thread;
1483
 
        thread = allocate_object(sizeof(*thread),
1484
 
                                 OBJECT_TYPE_NT_THREAD, NULL);
 
1474
        thread = allocate_object(sizeof(*thread), OBJECT_TYPE_NT_THREAD, NULL);
1485
1475
        if (!thread) {
1486
1476
                ERROR("couldn't allocate thread object");
1487
1477
                EXIT2(return NULL);
1513
1503
        struct task_struct *task;
1514
1504
 
1515
1505
        TRACE2("%p", thread);
 
1506
#ifdef CONFIG_X86_64
1516
1507
        /* sis163u driver for amd64 passes 0x1f from thread created by
1517
1508
         * PsCreateSystemThread - no idea what is 0x1f */
1518
1509
        if (thread == (void *)0x1f)
1519
1510
                thread = get_current_nt_thread();
1520
 
        if (!thread)
 
1511
#endif
 
1512
        if (!thread) {
 
1513
                TRACE2("invalid thread");
1521
1514
                EXIT2(return LOW_REALTIME_PRIORITY);
 
1515
        }
1522
1516
        task = get_nt_thread_task(thread);
1523
 
        if (!task)
 
1517
        if (!task) {
 
1518
                TRACE2("couldn't find task for thread: %p", thread);
1524
1519
                EXIT2(return LOW_REALTIME_PRIORITY);
 
1520
        }
1525
1521
 
1526
1522
        if (thread_priority(thread->task) <= 0)
1527
1523
                prio = LOW_PRIORITY;
1541
1537
        struct task_struct *task;
1542
1538
 
1543
1539
        TRACE2("thread: %p, priority = %u", thread, prio);
 
1540
#ifdef CONFIG_X86_64
1544
1541
        if (thread == (void *)0x1f)
1545
1542
                thread = get_current_nt_thread();
1546
 
        if (!thread)
 
1543
#endif
 
1544
        if (!thread) {
 
1545
                TRACE2("invalid thread");
1547
1546
                EXIT2(return LOW_REALTIME_PRIORITY);
 
1547
        }
1548
1548
        task = get_nt_thread_task(thread);
1549
 
        if (!task)
 
1549
        if (!task) {
 
1550
                TRACE2("couldn't find task for thread: %p", thread);
1550
1551
                EXIT2(return LOW_REALTIME_PRIORITY);
 
1552
        }
1551
1553
 
1552
1554
        if (thread_priority(task) <= 0)
1553
1555
                old_prio = LOW_PRIORITY;
1563
1565
        else if (prio == HIGH_PRIORITY)
1564
1566
                set_thread_priority(task, -10);
1565
1567
 
1566
 
        TRACE2("%d, %d", old_prio, thread_priority(task));
 
1568
        TRACE2("%d, %d", old_prio, (int)thread_priority(task));
1567
1569
        return old_prio;
1568
1570
}
1569
1571
 
1570
 
struct thread_trampoline_info {
 
1572
struct thread_trampoline {
1571
1573
        void (*func)(void *) wstdcall;
1572
1574
        void *ctx;
1573
1575
        struct nt_thread *thread;
1574
1576
        struct completion started;
1575
1577
};
1576
1578
 
1577
 
static int thread_trampoline(void *data)
 
1579
static int ntdriver_thread(void *data)
1578
1580
{
1579
 
        struct thread_trampoline_info *thread_info = data;
 
1581
        struct thread_trampoline *thread_tramp = data;
 
1582
        /* yes, a tramp! */
 
1583
        typeof(thread_tramp->func) func = thread_tramp->func;
 
1584
        typeof(thread_tramp->ctx) ctx = thread_tramp->ctx;
1580
1585
 
1581
 
        thread_info->thread->task = current;
1582
 
        thread_info->thread->pid = current->pid;
1583
 
        TRACE2("thread: %p, task: %p (%d)", thread_info->thread,
 
1586
        thread_tramp->thread->task = current;
 
1587
        thread_tramp->thread->pid = current->pid;
 
1588
        TRACE2("thread: %p, task: %p (%d)", thread_tramp->thread,
1584
1589
               current, current->pid);
1585
 
        complete(&thread_info->started);
 
1590
        complete(&thread_tramp->started);
1586
1591
 
1587
1592
#ifdef PF_NOFREEZE
1588
1593
        current->flags |= PF_NOFREEZE;
1589
1594
#endif
1590
 
        strncpy(current->comm, "windisdrvr", sizeof(current->comm));
 
1595
        strncpy(current->comm, "ntdriver", sizeof(current->comm));
1591
1596
        current->comm[sizeof(current->comm)-1] = 0;
1592
 
        LIN2WIN1(thread_info->func, thread_info->ctx);
 
1597
        LIN2WIN1(func, ctx);
1593
1598
        ERROR("task: %p", current);
1594
1599
        return 0;
1595
1600
}
1596
1601
 
1597
1602
wstdcall NTSTATUS WIN_FUNC(PsCreateSystemThread,7)
1598
 
        (void **phandle, ULONG access, void *obj_attr, void *process,
 
1603
        (void **handle, ULONG access, void *obj_attr, void *process,
1599
1604
         void *client_id, void (*func)(void *) wstdcall, void *ctx)
1600
1605
{
1601
 
        struct nt_thread *thread;
1602
 
        struct thread_trampoline_info thread_info;
1603
 
        no_warn_unused struct task_struct *task;
1604
 
        no_warn_unused int pid;
 
1606
        struct thread_trampoline thread_tramp;
1605
1607
 
1606
 
        ENTER2("phandle = %p, access = %u, obj_attr = %p, process = %p, "
1607
 
               "client_id = %p, func = %p, context = %p", phandle, access,
 
1608
        ENTER2("handle = %p, access = %u, obj_attr = %p, process = %p, "
 
1609
               "client_id = %p, func = %p, context = %p", handle, access,
1608
1610
               obj_attr, process, client_id, func, ctx);
1609
1611
 
1610
 
        thread = create_nt_thread(NULL);
1611
 
        if (!thread) {
 
1612
        thread_tramp.thread = create_nt_thread(NULL);
 
1613
        if (!thread_tramp.thread) {
1612
1614
                ERROR("couldn't allocate thread object");
1613
1615
                EXIT2(return STATUS_RESOURCES);
1614
1616
        }
1615
 
        TRACE2("thread: %p", thread);
1616
 
        thread_info.thread = thread;
1617
 
        thread_info.func = func;
1618
 
        thread_info.ctx = ctx;
1619
 
        init_completion(&thread_info.started);
 
1617
        TRACE2("thread: %p", thread_tramp.thread);
 
1618
        thread_tramp.func = func;
 
1619
        thread_tramp.ctx = ctx;
 
1620
        init_completion(&thread_tramp.started);
1620
1621
 
1621
1622
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7)
1622
 
        pid = kernel_thread(thread_trampoline, &thread_info, CLONE_SIGHAND);
1623
 
        TRACE2("pid = %d", pid);
1624
 
        if (pid < 0) {
1625
 
                free_object(thread_info.thread);
 
1623
        thread_tramp.thread->pid = kernel_thread(ntdriver_thread, &thread_tramp,
 
1624
                                                 CLONE_SIGHAND);
 
1625
        TRACE2("pid = %d", thread_tramp.thread->pid);
 
1626
        if (thread_tramp.thread->pid < 0) {
 
1627
                free_object(thread_tramp.thread);
1626
1628
                EXIT2(return STATUS_FAILURE);
1627
1629
        }
1628
 
        TRACE2("created task: %d", pid);
 
1630
        TRACE2("created task: %d", thread_tramp.thread->pid);
1629
1631
#else
1630
 
        task = kthread_run(thread_trampoline, &thread_info, "windisdrvr");
1631
 
        if (IS_ERR(task)) {
1632
 
                free_object(thread_info.thread);
 
1632
        thread_tramp.thread->task = kthread_run(ntdriver_thread,
 
1633
                                                &thread_tramp, "ntdriver");
 
1634
        if (IS_ERR(thread_tramp.thread->task)) {
 
1635
                free_object(thread_tramp.thread);
1633
1636
                EXIT2(return STATUS_FAILURE);
1634
1637
        }
1635
 
        TRACE2("created task: %p (%d)", task, task->pid);
 
1638
        TRACE2("created task: %p", thread_tramp.thread->task);
1636
1639
#endif
1637
1640
 
1638
 
        wait_for_completion(&thread_info.started);
1639
 
        *phandle = OBJECT_TO_HEADER(thread_info.thread);
1640
 
        TRACE2("created thread: %p, %p", thread_info.thread, *phandle);
 
1641
        wait_for_completion(&thread_tramp.started);
 
1642
        *handle = OBJECT_TO_HEADER(thread_tramp.thread);
 
1643
        TRACE2("created thread: %p, %p", thread_tramp.thread, *handle);
1641
1644
        EXIT2(return STATUS_SUCCESS);
1642
1645
}
1643
1646
 
1649
1652
        TRACE2("%p, %08X", current, status);
1650
1653
        thread = get_current_nt_thread();
1651
1654
        TRACE2("%p", thread);
1652
 
        if (!thread) {
 
1655
        if (thread) {
 
1656
                KeSetEvent((struct nt_event *)&thread->dh, 0, FALSE);
 
1657
                while (1) {
 
1658
                        struct nt_list *ent;
 
1659
                        struct irp *irp;
 
1660
                        KIRQL irql;
 
1661
                        irql = nt_spin_lock_irql(&thread->lock, DISPATCH_LEVEL);
 
1662
                        ent = RemoveHeadList(&thread->irps);
 
1663
                        nt_spin_unlock_irql(&thread->lock, irql);
 
1664
                        if (!ent)
 
1665
                                break;
 
1666
                        irp = container_of(ent, struct irp, thread_list);
 
1667
                        IOTRACE("%p", irp);
 
1668
                        IoCancelIrp(irp);
 
1669
                }
 
1670
                /* the driver may later query this status with
 
1671
                 * ZwQueryInformationThread */
 
1672
                thread->status = status;
 
1673
        } else
1653
1674
                ERROR("couldn't find thread for task: %p", current);
1654
 
                return STATUS_FAILURE;
1655
 
        }
1656
 
        KeSetEvent((struct nt_event *)&thread->dh, 0, FALSE);
1657
 
        while (1) {
1658
 
                struct nt_list *ent;
1659
 
                struct irp *irp;
1660
 
                KIRQL irql;
1661
 
                irql = nt_spin_lock_irql(&thread->lock, DISPATCH_LEVEL);
1662
 
                ent = RemoveHeadList(&thread->irps);
1663
 
                nt_spin_unlock_irql(&thread->lock, irql);
1664
 
                if (!ent)
1665
 
                        break;
1666
 
                irp = container_of(ent, struct irp, thread_list);
1667
 
                IOTRACE("%p", irp);
1668
 
                IoCancelIrp(irp);
1669
 
        }
1670
 
        /* the driver may later query this status with
1671
 
         * ZwQueryInformationThread */
1672
 
        thread->status = status;
 
1675
 
1673
1676
        complete_and_exit(NULL, status);
1674
1677
        ERROR("oops: %p, %d", thread->task, thread->pid);
1675
1678
        return STATUS_FAILURE;
1700
1703
        BOOLEAN ret;
1701
1704
        unsigned long flags;
1702
1705
 
1703
 
        nt_spin_lock_irqsave(&interrupt->lock, flags);
 
1706
        nt_spin_lock_irqsave(interrupt->actual_lock, flags);
1704
1707
        ret = LIN2WIN1(synch_routine, ctx);
1705
 
        nt_spin_unlock_irqrestore(&interrupt->lock, flags);
 
1708
        nt_spin_unlock_irqrestore(interrupt->actual_lock, flags);
 
1709
        TRACE6("%d", ret);
1706
1710
        return ret;
1707
1711
}
1708
1712
 
1711
1715
         PHYSICAL_ADDRESS boundary, enum memory_caching_type cache_type)
1712
1716
{
1713
1717
        void *addr;
1714
 
        addr = wrap_get_free_pages(gfp_irql(), size);
1715
 
        TRACE4("%p, %lu", addr, size);
 
1718
        unsigned int flags;
 
1719
 
 
1720
        ENTER2("%lu, 0x%lx, 0x%lx, 0x%lx, %d", size, (long)lowest,
 
1721
               (long)highest, (long)boundary, cache_type);
 
1722
        flags = irql_gfp();
 
1723
        addr = wrap_get_free_pages(flags, size);
 
1724
        TRACE2("%p, %lu, 0x%x", addr, size, flags);
 
1725
        if (addr && ((virt_to_phys(addr) + size) <= highest))
 
1726
                EXIT2(return addr);
 
1727
#ifdef CONFIG_X86_64
 
1728
        /* GFP_DMA is really only 16MB even on x86-64, but there is no
 
1729
         * other zone available */
 
1730
        if (highest <= DMA_31BIT_MASK)
 
1731
                flags |= __GFP_DMA;
 
1732
        else if (highest <= DMA_32BIT_MASK)
 
1733
                flags |= __GFP_DMA32;
 
1734
#else
 
1735
        if (highest <= DMA_24BIT_MASK)
 
1736
                flags |= __GFP_DMA;
 
1737
        else if (highest > DMA_30BIT_MASK)
 
1738
                flags |= __GFP_HIGHMEM;
 
1739
#endif
 
1740
        addr = wrap_get_free_pages(flags, size);
 
1741
        TRACE2("%p, %lu, 0x%x", addr, size, flags);
1716
1742
        return addr;
1717
1743
}
1718
1744
 
1719
1745
wstdcall void WIN_FUNC(MmFreeContiguousMemorySpecifyCache,3)
1720
1746
        (void *base, SIZE_T size, enum memory_caching_type cache_type)
1721
1747
{
1722
 
        TRACE4("%p, %lu", base, size);
 
1748
        TRACE2("%p, %lu", base, size);
1723
1749
        free_pages((unsigned long)base, get_order(size));
1724
1750
}
1725
1751
 
1726
1752
wstdcall PHYSICAL_ADDRESS WIN_FUNC(MmGetPhysicalAddress,1)
1727
1753
        (void *base)
1728
1754
{
1729
 
        TRACE2("%p", base);
1730
 
        return virt_to_phys(base);
 
1755
        unsigned long phy = virt_to_phys(base);
 
1756
        TRACE2("%p, %p", base, (void *)phy);
 
1757
        return phy;
1731
1758
}
1732
1759
 
1733
1760
/* Atheros card with pciid 168C:0014 calls this function with 0xf0000
1773
1800
        int mdl_size = MmSizeOfMdl(virt, length);
1774
1801
 
1775
1802
        if (mdl_size <= MDL_CACHE_SIZE) {
1776
 
                wrap_mdl = kmem_cache_alloc(mdl_cache, gfp_irql());
 
1803
                wrap_mdl = kmem_cache_alloc(mdl_cache, irql_gfp());
1777
1804
                if (!wrap_mdl)
1778
1805
                        return NULL;
1779
 
                nt_spin_lock_bh(&dispatcher_lock);
 
1806
                spin_lock_bh(&dispatcher_lock);
1780
1807
                InsertHeadList(&wrap_mdl_list, &wrap_mdl->list);
1781
 
                nt_spin_unlock_bh(&dispatcher_lock);
 
1808
                spin_unlock_bh(&dispatcher_lock);
1782
1809
                mdl = wrap_mdl->mdl;
1783
1810
                TRACE3("allocated mdl from cache: %p(%p), %p(%d)",
1784
1811
                       wrap_mdl, mdl, virt, length);
1789
1816
                mdl->flags = MDL_ALLOCATED_FIXED_SIZE | MDL_CACHE_ALLOCATED;
1790
1817
        } else {
1791
1818
                wrap_mdl =
1792
 
                        kmalloc(sizeof(*wrap_mdl) + mdl_size, gfp_irql());
 
1819
                        kmalloc(sizeof(*wrap_mdl) + mdl_size, irql_gfp());
1793
1820
                if (!wrap_mdl)
1794
1821
                        return NULL;
1795
1822
                mdl = wrap_mdl->mdl;
1796
1823
                TRACE3("allocated mdl from memory: %p(%p), %p(%d)",
1797
1824
                       wrap_mdl, mdl, virt, length);
1798
 
                nt_spin_lock_bh(&dispatcher_lock);
 
1825
                spin_lock_bh(&dispatcher_lock);
1799
1826
                InsertHeadList(&wrap_mdl_list, &wrap_mdl->list);
1800
 
                nt_spin_unlock_bh(&dispatcher_lock);
 
1827
                spin_unlock_bh(&dispatcher_lock);
1801
1828
                memset(mdl, 0, mdl_size);
1802
1829
                MmInitializeMdl(mdl, virt, length);
1803
1830
                mdl->flags = MDL_ALLOCATED_FIXED_SIZE;
1819
1846
        else {
1820
1847
                struct wrap_mdl *wrap_mdl = (struct wrap_mdl *)
1821
1848
                        ((char *)mdl - offsetof(struct wrap_mdl, mdl));
1822
 
                nt_spin_lock_bh(&dispatcher_lock);
 
1849
                spin_lock_bh(&dispatcher_lock);
1823
1850
                RemoveEntryList(&wrap_mdl->list);
1824
 
                nt_spin_unlock_bh(&dispatcher_lock);
 
1851
                spin_unlock_bh(&dispatcher_lock);
1825
1852
 
1826
1853
                if (mdl->flags & MDL_CACHE_ALLOCATED) {
1827
1854
                        TRACE3("freeing mdl cache: %p, %p, %p",
1986
2013
 
1987
2014
wstdcall NTSTATUS WIN_FUNC(ZwCreateFile,11)
1988
2015
        (void **handle, ACCESS_MASK access_mask,
1989
 
         struct object_attributes *obj_attr,struct io_status_block *iosb,
 
2016
         struct object_attributes *obj_attr, struct io_status_block *iosb,
1990
2017
         LARGE_INTEGER *size, ULONG file_attr, ULONG share_access,
1991
2018
         ULONG create_disposition, ULONG create_options, void *ea_buffer,
1992
2019
         ULONG ea_length)
1996
2023
        struct ansi_string ansi;
1997
2024
        struct wrap_bin_file *bin_file;
1998
2025
        char *file_basename;
1999
 
        KIRQL irql;
2000
2026
        NTSTATUS status;
2001
2027
 
2002
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2028
        spin_lock_bh(&ntoskernel_lock);
2003
2029
        nt_list_for_each_entry(coh, &object_list, list) {
2004
2030
                if (coh->type != OBJECT_TYPE_FILE)
2005
2031
                        continue;
2008
2034
                        fo = HEADER_TO_OBJECT(coh);
2009
2035
                        bin_file = fo->wrap_bin_file;
2010
2036
                        *handle = coh;
2011
 
                        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2037
                        spin_unlock_bh(&ntoskernel_lock);
2012
2038
                        ObReferenceObject(fo);
2013
2039
                        iosb->status = FILE_OPENED;
2014
2040
                        iosb->info = bin_file->size;
2015
2041
                        EXIT2(return STATUS_SUCCESS);
2016
2042
                }
2017
2043
        }
2018
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2044
        spin_unlock_bh(&ntoskernel_lock);
2019
2045
 
2020
2046
        if (RtlUnicodeStringToAnsiString(&ansi, obj_attr->name, TRUE) !=
2021
2047
            STATUS_SUCCESS)
2042
2068
                TRACE2("%s, %s", bin_file->name, file_basename);
2043
2069
                fo->flags = FILE_OPENED;
2044
2070
        } else if (access_mask & FILE_WRITE_DATA) {
2045
 
                bin_file = kmalloc(sizeof(*bin_file), GFP_KERNEL);
 
2071
                bin_file = kzalloc(sizeof(*bin_file), GFP_KERNEL);
2046
2072
                if (bin_file) {
2047
 
                        memset(bin_file, 0, sizeof(*bin_file));
2048
2073
                        strncpy(bin_file->name, file_basename,
2049
2074
                                sizeof(bin_file->name));
2050
2075
                        bin_file->name[sizeof(bin_file->name)-1] = 0;
2083
2108
        EXIT2(return status);
2084
2109
}
2085
2110
 
 
2111
wstdcall NTSTATUS WIN_FUNC(ZwOpenFile,6)
 
2112
        (void **handle, ACCESS_MASK access_mask,
 
2113
         struct object_attributes *obj_attr, struct io_status_block *iosb,
 
2114
         ULONG share_access, ULONG open_options)
 
2115
{
 
2116
        LARGE_INTEGER size;
 
2117
        return ZwCreateFile(handle, access_mask, obj_attr, iosb, &size, 0,
 
2118
                            share_access, 0, open_options, NULL, 0);
 
2119
}
 
2120
 
2086
2121
wstdcall NTSTATUS WIN_FUNC(ZwReadFile,9)
2087
2122
        (void *handle, struct nt_event *event, void *apc_routine,
2088
2123
         void *apc_context, struct io_status_block *iosb, void *buffer,
2093
2128
        ULONG count;
2094
2129
        size_t offset;
2095
2130
        struct wrap_bin_file *file;
2096
 
        KIRQL irql;
2097
2131
 
2098
2132
        TRACE2("%p", handle);
2099
2133
        coh = handle;
2104
2138
        fo = HANDLE_TO_OBJECT(coh);
2105
2139
        file = fo->wrap_bin_file;
2106
2140
        TRACE2("file: %s (%zu)", file->name, file->size);
2107
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2141
        spin_lock_bh(&ntoskernel_lock);
2108
2142
        if (byte_offset)
2109
2143
                offset = *byte_offset;
2110
2144
        else
2113
2147
        TRACE2("count: %u, offset: %zu, length: %u", count, offset, length);
2114
2148
        memcpy(buffer, ((void *)file->data) + offset, count);
2115
2149
        fo->current_byte_offset = offset + count;
2116
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2150
        spin_unlock_bh(&ntoskernel_lock);
2117
2151
        iosb->status = STATUS_SUCCESS;
2118
2152
        iosb->info = count;
2119
2153
        EXIT2(return STATUS_SUCCESS);
2128
2162
        struct common_object_header *coh;
2129
2163
        struct wrap_bin_file *file;
2130
2164
        unsigned long offset;
2131
 
        KIRQL irql;
2132
2165
 
2133
2166
        TRACE2("%p", handle);
2134
2167
        coh = handle;
2139
2172
        fo = HANDLE_TO_OBJECT(coh);
2140
2173
        file = fo->wrap_bin_file;
2141
2174
        TRACE2("file: %zu, %u", file->size, length);
2142
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2175
        spin_lock_bh(&ntoskernel_lock);
2143
2176
        if (byte_offset)
2144
2177
                offset = *byte_offset;
2145
2178
        else
2155
2188
                iosb->info = length;
2156
2189
                fo->current_byte_offset = offset + length;
2157
2190
        }
2158
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2191
        spin_unlock_bh(&ntoskernel_lock);
2159
2192
        EXIT2(return iosb->status);
2160
2193
}
2161
2194
 
2437
2470
        TODO();
2438
2471
}
2439
2472
 
 
2473
void WIN_FUNC(__chkstk,0)
 
2474
        (void)
 
2475
{
 
2476
        TODO();
 
2477
}
 
2478
 
2440
2479
#include "ntoskernel_exports.h"
2441
2480
 
2442
2481
struct worker_init_struct {
2480
2519
{
2481
2520
        struct timeval now;
2482
2521
 
2483
 
        nt_spin_lock_init(&dispatcher_lock);
2484
 
        nt_spin_lock_init(&ntoskernel_lock);
2485
 
        nt_spin_lock_init(&ntos_work_lock);
2486
 
        nt_spin_lock_init(&kdpc_list_lock);
2487
 
        nt_spin_lock_init(&irp_cancel_lock);
 
2522
        spin_lock_init(&dispatcher_lock);
 
2523
        spin_lock_init(&ntoskernel_lock);
 
2524
        spin_lock_init(&ntos_work_lock);
 
2525
        spin_lock_init(&kdpc_list_lock);
 
2526
        spin_lock_init(&irp_cancel_lock);
2488
2527
        InitializeListHead(&wrap_mdl_list);
2489
2528
        InitializeListHead(&kdpc_list);
2490
2529
        InitializeListHead(&callback_objects);
2492
2531
        InitializeListHead(&object_list);
2493
2532
        InitializeListHead(&ntos_work_list);
2494
2533
 
 
2534
        nt_spin_lock_init(&nt_list_lock);
 
2535
 
2495
2536
        initialize_work(&kdpc_work, kdpc_worker, NULL);
2496
2537
        initialize_work(&ntos_work, ntos_work_worker, NULL);
2497
 
        nt_spin_lock_init(&timer_lock);
2498
 
        InitializeListHead(&wrap_timer_list);
 
2538
        wrap_timer_slist.next = NULL;
2499
2539
 
2500
2540
        do_gettimeofday(&now);
2501
2541
        wrap_ticks_to_boot = TICKS_1601_TO_1970;
2503
2543
        wrap_ticks_to_boot += now.tv_usec * 10;
2504
2544
        wrap_ticks_to_boot -= jiffies * TICKSPERJIFFY;
2505
2545
        TRACE2("%Lu", wrap_ticks_to_boot);
2506
 
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) && \
2507
 
        !defined(preempt_enable) && !defined(CONFIG_PREEMPT)
2508
 
        preempt_count = 0;
 
2546
 
 
2547
#ifdef WRAP_PREEMPT
 
2548
        do {
 
2549
                int cpu;
 
2550
                for_each_possible_cpu(cpu) {
 
2551
                        irql_info_t *info;
 
2552
                        info = &per_cpu(irql_info, cpu);
 
2553
                        mutex_init(&(info->lock));
 
2554
                        info->task = NULL;
 
2555
                        info->count = 0;
 
2556
                }
 
2557
        } while (0);
2509
2558
#endif
2510
2559
 
2511
 
#ifdef USE_NTOS_WQ
 
2560
#ifdef NTOS_WQ
2512
2561
        ntos_wq = create_singlethread_workqueue("ntos_wq");
2513
2562
        if (!ntos_wq) {
2514
2563
                WARNING("couldn't create ntos_wq thread");
2528
2577
                ntoskernel_exit();
2529
2578
                return -ENOMEM;
2530
2579
        }
2531
 
        mdl_cache = kmem_cache_create("wrap_mdl",
2532
 
                                      sizeof(struct wrap_mdl) + MDL_CACHE_SIZE,
2533
 
                                      0, 0, NULL, NULL);
 
2580
        mdl_cache =
 
2581
                wrap_kmem_cache_create("wrap_mdl",
 
2582
                                       sizeof(struct wrap_mdl) + MDL_CACHE_SIZE,
 
2583
                                       0, 0);
2534
2584
        TRACE2("%p", mdl_cache);
2535
2585
        if (!mdl_cache) {
2536
2586
                ERROR("couldn't allocate MDL cache");
2544
2594
        init_timer(&shared_data_timer);
2545
2595
        shared_data_timer.function = update_user_shared_data_proc;
2546
2596
        shared_data_timer.data = (unsigned long)0;
2547
 
        mod_timer(&shared_data_timer, jiffies + MSEC_TO_HZ(30));
2548
2597
#endif
2549
2598
        return 0;
2550
2599
}
2551
2600
 
2552
2601
int ntoskernel_init_device(struct wrap_device *wd)
2553
2602
{
 
2603
#if defined(CONFIG_X86_64)
 
2604
        if (kuser_shared_data.reserved1)
 
2605
                mod_timer(&shared_data_timer, jiffies + MSEC_TO_HZ(30));
 
2606
#endif
2554
2607
        return 0;
2555
2608
}
2556
2609
 
2565
2618
void ntoskernel_exit(void)
2566
2619
{
2567
2620
        struct nt_list *cur;
2568
 
        KIRQL irql;
2569
2621
 
2570
2622
        ENTER2("");
2571
2623
 
2573
2625
        TRACE2("freeing timers");
2574
2626
        while (1) {
2575
2627
                struct wrap_timer *wrap_timer;
2576
 
                KIRQL irql;
 
2628
                struct nt_slist *slist;
2577
2629
 
2578
 
                irql = nt_spin_lock_irql(&timer_lock, DISPATCH_LEVEL);
2579
 
                cur = RemoveTailList(&wrap_timer_list);
2580
 
                nt_spin_unlock_irql(&timer_lock, irql);
2581
 
                if (!cur)
 
2630
                spin_lock_bh(&ntoskernel_lock);
 
2631
                if ((slist = wrap_timer_slist.next))
 
2632
                        wrap_timer_slist.next = slist->next;
 
2633
                spin_unlock_bh(&ntoskernel_lock);
 
2634
                TIMERTRACE("%p", slist);
 
2635
                if (!slist)
2582
2636
                        break;
2583
 
                wrap_timer = container_of(cur, struct wrap_timer, list);
 
2637
                wrap_timer = container_of(slist, struct wrap_timer, slist);
2584
2638
                if (del_timer_sync(&wrap_timer->timer))
2585
2639
                        WARNING("Buggy Windows driver left timer %p running",
2586
2640
                                wrap_timer->nt_timer);
2590
2644
 
2591
2645
        TRACE2("freeing MDLs");
2592
2646
        if (mdl_cache) {
2593
 
                irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2647
                spin_lock_bh(&ntoskernel_lock);
2594
2648
                if (!IsListEmpty(&wrap_mdl_list))
2595
2649
                        ERROR("Windows driver didn't free all MDLs; "
2596
2650
                              "freeing them now");
2602
2656
                        else
2603
2657
                                kfree(wrap_mdl);
2604
2658
                }
2605
 
                nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2659
                spin_unlock_bh(&ntoskernel_lock);
2606
2660
                kmem_cache_destroy(mdl_cache);
2607
2661
                mdl_cache = NULL;
2608
2662
        }
2609
2663
 
2610
2664
        TRACE2("freeing callbacks");
2611
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2665
        spin_lock_bh(&ntoskernel_lock);
2612
2666
        while ((cur = RemoveHeadList(&callback_objects))) {
2613
2667
                struct callback_object *object;
2614
2668
                struct nt_list *ent;
2620
2674
                }
2621
2675
                kfree(object);
2622
2676
        }
2623
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2677
        spin_unlock_bh(&ntoskernel_lock);
2624
2678
 
2625
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2679
        spin_lock_bh(&ntoskernel_lock);
2626
2680
        while ((cur = RemoveHeadList(&bus_driver_list))) {
2627
2681
                struct bus_driver *bus_driver;
2628
2682
                bus_driver = container_of(cur, struct bus_driver, list);
2629
2683
                /* TODO: make sure all all drivers are shutdown/removed */
2630
2684
                kfree(bus_driver);
2631
2685
        }
2632
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2686
        spin_unlock_bh(&ntoskernel_lock);
2633
2687
 
2634
2688
#if defined(CONFIG_X86_64)
2635
2689
        del_timer_sync(&shared_data_timer);
2636
2690
#endif
2637
 
#ifdef USE_NTOS_WQ
 
2691
#ifdef NTOS_WQ
2638
2692
        if (ntos_wq)
2639
2693
                destroy_workqueue(ntos_wq);
2640
2694
#endif
2642
2696
        if (ntos_worker_thread)
2643
2697
                ObDereferenceObject(ntos_worker_thread);
2644
2698
        ENTER2("freeing objects");
2645
 
        irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
 
2699
        spin_lock_bh(&ntoskernel_lock);
2646
2700
        while ((cur = RemoveHeadList(&object_list))) {
2647
2701
                struct common_object_header *hdr;
2648
2702
                hdr = container_of(cur, struct common_object_header, list);
2649
 
                WARNING("object %p type %d was not freed, freeing it now",
2650
 
                        HEADER_TO_OBJECT(hdr), hdr->type);
 
2703
                if (hdr->type == OBJECT_TYPE_NT_THREAD)
 
2704
                        TRACE1("object %p(%d) was not freed, freeing it now",
 
2705
                               HEADER_TO_OBJECT(hdr), hdr->type);
 
2706
                else
 
2707
                        WARNING("object %p(%d) was not freed, freeing it now",
 
2708
                                HEADER_TO_OBJECT(hdr), hdr->type);
2651
2709
                ExFreePool(hdr);
2652
2710
        }
2653
 
        nt_spin_unlock_irql(&ntoskernel_lock, irql);
 
2711
        spin_unlock_bh(&ntoskernel_lock);
2654
2712
 
2655
2713
        EXIT2(return);
2656
2714
}