375
410
struct driver_object *drv_obj;
378
irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
413
irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
380
415
nt_list_for_each_entry(bus_driver, &bus_driver_list, list) {
381
416
if (strcmp(bus_driver->name, name) == 0)
382
417
drv_obj = &bus_driver->drv_obj;
384
kspin_unlock_irql(&ntoskernel_lock, irql);
419
nt_spin_unlock_irql(&ntoskernel_lock, irql);
388
_FASTCALL struct nt_list *WRAP_EXPORT(ExfInterlockedInsertHeadList)
389
(FASTCALL_DECL_3(struct nt_list *head, struct nt_list *entry,
423
wfastcall struct nt_list *WRAP_EXPORT(ExfInterlockedInsertHeadList)
424
(struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
392
426
struct nt_list *first;
395
429
TRACEENTER5("head = %p, entry = %p", head, entry);
396
irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
430
nt_spin_lock_irqsave(lock, flags);
397
431
first = InsertHeadList(head, entry);
398
kspin_unlock_irql(lock, irql);
432
nt_spin_unlock_irqrestore(lock, flags);
399
433
DBGTRACE5("head = %p, old = %p", head, first);
403
_FASTCALL struct nt_list *WRAP_EXPORT(ExInterlockedInsertHeadList)
404
(FASTCALL_DECL_3(struct nt_list *head, struct nt_list *entry,
437
wfastcall struct nt_list *WRAP_EXPORT(ExInterlockedInsertHeadList)
438
(struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
407
440
TRACEENTER5("%p", head);
408
return ExfInterlockedInsertHeadList(FASTCALL_ARGS_3(head, entry,
441
return ExfInterlockedInsertHeadList(head, entry, lock);
412
_FASTCALL struct nt_list *WRAP_EXPORT(ExfInterlockedInsertTailList)
413
(FASTCALL_DECL_3(struct nt_list *head, struct nt_list *entry,
444
wfastcall struct nt_list *WRAP_EXPORT(ExfInterlockedInsertTailList)
445
(struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
416
447
struct nt_list *last;
419
450
TRACEENTER5("head = %p, entry = %p", head, entry);
420
irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
451
nt_spin_lock_irqsave(lock, flags);
421
452
last = InsertTailList(head, entry);
422
kspin_unlock_irql(lock, irql);
453
nt_spin_unlock_irqrestore(lock, flags);
423
454
DBGTRACE5("head = %p, old = %p", head, last);
427
_FASTCALL struct nt_list *WRAP_EXPORT(ExInterlockedInsertTailList)
428
(FASTCALL_DECL_3(struct nt_list *head, struct nt_list *entry,
458
wfastcall struct nt_list *WRAP_EXPORT(ExInterlockedInsertTailList)
459
(struct nt_list *head, struct nt_list *entry, NT_SPIN_LOCK *lock)
431
461
TRACEENTER5("%p", head);
432
return ExfInterlockedInsertTailList(FASTCALL_ARGS_3(head, entry,
462
return ExfInterlockedInsertTailList(head, entry, lock);
436
_FASTCALL struct nt_list *WRAP_EXPORT(ExfInterlockedRemoveHeadList)
437
(FASTCALL_DECL_2(struct nt_list *head, KSPIN_LOCK *lock))
465
wfastcall struct nt_list *WRAP_EXPORT(ExfInterlockedRemoveHeadList)
466
(struct nt_list *head, NT_SPIN_LOCK *lock)
439
468
struct nt_list *ret;
442
471
TRACEENTER5("head = %p", head);
443
irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
472
nt_spin_lock_irqsave(lock, flags);
444
473
ret = RemoveHeadList(head);
445
kspin_unlock_irql(lock, irql);
474
nt_spin_unlock_irqrestore(lock, flags);
446
475
DBGTRACE5("head = %p, ret = %p", head, ret);
450
_FASTCALL struct nt_list *WRAP_EXPORT(ExInterlockedRemoveHeadList)
451
(FASTCALL_DECL_2(struct nt_list *head, KSPIN_LOCK *lock))
479
wfastcall struct nt_list *WRAP_EXPORT(ExInterlockedRemoveHeadList)
480
(struct nt_list *head, NT_SPIN_LOCK *lock)
453
482
TRACEENTER5("%p", head);
454
return ExfInterlockedRemoveHeadList(FASTCALL_ARGS_2(head, lock));
483
return ExfInterlockedRemoveHeadList(head, lock);
457
_FASTCALL struct nt_list *WRAP_EXPORT(ExfInterlockedRemoveTailList)
458
(FASTCALL_DECL_2(struct nt_list *head, KSPIN_LOCK *lock))
486
wfastcall struct nt_list *WRAP_EXPORT(ExfInterlockedRemoveTailList)
487
(struct nt_list *head, NT_SPIN_LOCK *lock)
460
489
struct nt_list *ret;
463
492
TRACEENTER5("head = %p", head);
464
irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
493
nt_spin_lock_irqsave(lock, flags);
465
494
ret = RemoveTailList(head);
466
kspin_unlock_irql(lock, irql);
467
DBGTRACE5("head = %p, ret = %p", head, ret);
471
_FASTCALL struct nt_list *WRAP_EXPORT(ExInterlockedRemoveTailList)
472
(FASTCALL_DECL_2(struct nt_list *head, KSPIN_LOCK *lock))
474
TRACEENTER5("%p", head);
475
return ExfInterlockedRemoveTailList(FASTCALL_ARGS_2(head, lock));
478
STDCALL struct nt_slist *WRAP_EXPORT(ExpInterlockedPushEntrySList)
479
(union nt_slist_head *head, struct nt_slist *entry)
481
struct nt_slist *ret;
484
TRACEENTER5("head = %p", head);
485
irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
486
ret = PushEntryList(head, entry);
487
kspin_unlock_irql(&inter_lock, irql);
488
DBGTRACE5("head = %p, ret = %p", head, ret);
492
_FASTCALL struct nt_slist *WRAP_EXPORT(ExInterlockedPushEntrySList)
493
(FASTCALL_DECL_3(union nt_slist_head *head, struct nt_slist *entry,
496
TRACEENTER5("%p", head);
497
return ExpInterlockedPushEntrySList(head, entry);
500
_FASTCALL struct nt_slist *WRAP_EXPORT(InterlockedPushEntrySList)
501
(FASTCALL_DECL_2(union nt_slist_head *head, struct nt_slist *entry))
503
TRACEENTER5("%p", head);
504
return ExpInterlockedPushEntrySList(head, entry);
507
STDCALL struct nt_slist *WRAP_EXPORT(ExpInterlockedPopEntrySList)
508
(union nt_slist_head *head)
510
struct nt_slist *ret;
513
TRACEENTER5("head = %p", head);
514
irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
515
ret = PopEntryList(head);
516
kspin_unlock_irql(&inter_lock, irql);
517
DBGTRACE5("head = %p, ret = %p", head, ret);
521
_FASTCALL struct nt_slist *WRAP_EXPORT(ExInterlockedPopEntrySList)
522
(FASTCALL_DECL_2(union nt_slist_head *head, KSPIN_LOCK *lock))
524
TRACEENTER5("%p", head);
525
return ExpInterlockedPopEntrySList(head);
528
_FASTCALL struct nt_slist *WRAP_EXPORT(InterlockedPopEntrySList)
529
(FASTCALL_DECL_1(union nt_slist_head *head))
531
TRACEENTER5("%p", head);
532
return ExpInterlockedPopEntrySList(head);
535
STDCALL USHORT WRAP_EXPORT(ExQueryDepthSList)
536
(union nt_slist_head *head)
538
TRACEENTER5("%p", head);
539
return head->list.depth;
542
/* should be called with nt_event_lock held at DISPATCH_LEVEL */
543
static void initialize_dh(struct dispatch_header *dh, enum event_type type,
544
int state, enum dh_type dh_type)
495
nt_spin_unlock_irqrestore(lock, flags);
496
DBGTRACE5("head = %p, ret = %p", head, ret);
500
wfastcall struct nt_list *WRAP_EXPORT(ExInterlockedRemoveTailList)
501
(struct nt_list *head, NT_SPIN_LOCK *lock)
503
TRACEENTER5("%p", head);
504
return ExfInterlockedRemoveTailList(head, lock);
507
wfastcall struct nt_slist *WRAP_EXPORT(ExInterlockedPushEntrySList)
508
(nt_slist_header *head, struct nt_slist *entry, NT_SPIN_LOCK *lock)
510
struct nt_slist *ret;
512
TRACEENTER5("head = %p", head);
513
ret = PushEntrySList(head, entry, lock);
514
DBGTRACE5("head = %p, ret = %p", head, ret);
518
wstdcall struct nt_slist *WRAP_EXPORT(ExpInterlockedPushEntrySList)
519
(nt_slist_header *head, struct nt_slist *entry)
521
struct nt_slist *ret;
523
TRACEENTER5("head = %p", head);
524
ret = PushEntrySList(head, entry, &ntoskernel_lock);
525
DBGTRACE5("head = %p, ret = %p", head, ret);
529
wfastcall struct nt_slist *WRAP_EXPORT(InterlockedPushEntrySList)
530
(nt_slist_header *head, struct nt_slist *entry)
532
struct nt_slist *ret;
534
TRACEENTER5("head = %p", head);
535
ret = PushEntrySList(head, entry, &ntoskernel_lock);
536
DBGTRACE5("head = %p, ret = %p", head, ret);
540
wfastcall struct nt_slist *WRAP_EXPORT(ExInterlockedPopEntrySList)
541
(nt_slist_header *head, NT_SPIN_LOCK *lock)
543
struct nt_slist *ret;
545
TRACEENTER5("head = %p", head);
546
ret = PopEntrySList(head, lock);
547
DBGTRACE5("head = %p, ret = %p", head, ret);
551
wstdcall struct nt_slist *WRAP_EXPORT(ExpInterlockedPopEntrySList)
552
(nt_slist_header *head)
554
struct nt_slist *ret;
556
TRACEENTER5("head = %p", head);
557
ret = PopEntrySList(head, &ntoskernel_lock);
558
DBGTRACE5("head = %p, ret = %p", head, ret);
562
wfastcall struct nt_slist *WRAP_EXPORT(InterlockedPopEntrySList)
563
(nt_slist_header *head)
565
struct nt_slist *ret;
567
TRACEENTER5("head = %p", head);
568
ret = PopEntrySList(head, &ntoskernel_lock);
569
DBGTRACE5("head = %p, ret = %p", head, ret);
573
wstdcall USHORT WRAP_EXPORT(ExQueryDepthSList)
574
(nt_slist_header *head)
576
TRACEENTER5("%p", head);
578
return head->align & 0xffff;
584
wfastcall LONG WRAP_EXPORT(InterlockedIncrement)
589
TRACEENTER5("%p, %d", val, *val);
590
__asm__ __volatile__(
592
LOCK_PREFIX "xadd %1, %2\n\t"
595
: "0" (1), "m" (*val)
597
TRACEEXIT5(return ret);
600
wfastcall LONG WRAP_EXPORT(InterlockedDecrement)
605
TRACEENTER5("%p, %d", val, *val);
606
__asm__ __volatile__(
608
LOCK_PREFIX "xadd %1, %2\n\t"
611
: "0" (-1), "m" (*val)
613
TRACEEXIT5(return ret);
616
wfastcall LONG WRAP_EXPORT(InterlockedExchange)
617
(LONG volatile *target, LONG val)
621
ret = xchg(target, val);
622
TRACEEXIT5(return ret);
625
wfastcall LONG WRAP_EXPORT(InterlockedCompareExchange)
626
(LONG volatile *dest, LONG xchg, LONG comperand)
631
ret = cmpxchg(dest, comperand, xchg);
632
TRACEEXIT5(return ret);
635
wfastcall void WRAP_EXPORT(ExInterlockedAddLargeStatistic)
636
(LARGE_INTEGER volatile *plint, ULONG n)
639
save_local_irq(flags);
641
__asm__ __volatile__(
643
LOCK_PREFIX "add %0, %1\n\t"
645
: "r" (n), "m" (*plint)
648
__asm__ __volatile__(
651
" movl %1, %%ebx\n\t"
652
" movl %%edx, %%ecx\n\t"
653
" addl %%eax, %%ebx\n\t"
654
" adcl $0, %%ecx\n\t"
655
LOCK_PREFIX "cmpxchg8b (%0)\n\t"
658
: "m" (n), "a" (ll_low(*plint)), "d" (ll_high(*plint))
659
: "ebx", "ecx", "cc", "memory");
661
restore_local_irq(flags);
664
/* should be called with dispatcher_lock held at DISPATCH_LEVEL */
665
static void initialize_dh(struct dispatcher_header *dh, enum dh_type type,
546
668
memset(dh, 0, sizeof(*dh));
669
set_dh_type(dh, type);
548
670
dh->signal_state = state;
549
set_dh_type(dh, dh_type);
550
671
InitializeListHead(&dh->wait_blocks);
837
955
TRACEEXIT3(return ret);
840
static void wrap_work_item_worker(void *data)
958
static void ntos_work_item_worker(void *data)
842
struct wrap_work_item *wrap_work_item;
960
struct ntos_work_item *ntos_work_item;
843
961
struct nt_list *cur;
844
void (*func)(void *arg1, void *arg2) STDCALL;
962
typeof(ntos_work_item->func) func;
848
irql = kspin_lock_irql(&wrap_work_item_list_lock,
850
cur = RemoveHeadList(&wrap_work_item_list);
851
kspin_unlock_irql(&wrap_work_item_list_lock, irql);
966
irql = nt_spin_lock_irql(&ntos_work_item_list_lock,
968
cur = RemoveHeadList(&ntos_work_item_list);
969
nt_spin_unlock_irql(&ntos_work_item_list_lock, irql);
854
wrap_work_item = container_of(cur, struct wrap_work_item,
856
func = wrap_work_item->func;
857
DBGTRACE4("%p, %p, %p", func, wrap_work_item->arg1,
858
wrap_work_item->arg2);
859
if (wrap_work_item->win_func == TRUE)
860
LIN2WIN2(func, wrap_work_item->arg1,
861
wrap_work_item->arg2);
972
ntos_work_item = container_of(cur, struct ntos_work_item, list);
973
func = ntos_work_item->func;
974
WORKTRACE("%p: executing %p, %p, %p", current, func,
975
ntos_work_item->arg1, ntos_work_item->arg2);
976
if (ntos_work_item->func_type == WORKER_FUNC_WIN)
977
LIN2WIN2(func, ntos_work_item->arg1,
978
ntos_work_item->arg2);
863
func(wrap_work_item->arg1, wrap_work_item->arg2);
864
kfree(wrap_work_item);
980
func(ntos_work_item->arg1, ntos_work_item->arg2);
981
kfree(ntos_work_item);
869
int schedule_wrap_work_item(WRAP_WORK_FUNC func, void *arg1, void *arg2,
986
int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2,
987
enum worker_func_type func_type)
872
struct wrap_work_item *wrap_work_item;
989
struct ntos_work_item *ntos_work_item;
875
wrap_work_item = kmalloc(sizeof(*wrap_work_item), GFP_ATOMIC);
876
if (!wrap_work_item) {
992
WORKENTER("adding work: %p, %p, %p", func, arg1, arg2);
993
ntos_work_item = kmalloc(sizeof(*ntos_work_item), GFP_ATOMIC);
994
if (!ntos_work_item) {
877
995
ERROR("couldn't allocate memory");
880
wrap_work_item->func = func;
881
wrap_work_item->arg1 = arg1;
882
wrap_work_item->arg2 = arg2;
883
wrap_work_item->win_func = win_func;
884
irql = kspin_lock_irql(&wrap_work_item_list_lock, DISPATCH_LEVEL);
885
InsertTailList(&wrap_work_item_list, &wrap_work_item->list);
886
kspin_unlock_irql(&wrap_work_item_list_lock, irql);
887
schedule_work(&wrap_work_item_work);
891
STDCALL void WRAP_EXPORT(KeInitializeSpinLock)
894
TRACEENTER6("%p", lock);
895
kspin_lock_init(lock);
898
STDCALL void WRAP_EXPORT(KeAcquireSpinLock)
899
(KSPIN_LOCK *lock, KIRQL *irql)
901
TRACEENTER6("%p", lock);
902
*irql = kspin_lock_irql(lock, DISPATCH_LEVEL);
905
STDCALL void WRAP_EXPORT(KeReleaseSpinLock)
906
(KSPIN_LOCK *lock, KIRQL oldirql)
908
TRACEENTER6("%p", lock);
909
kspin_unlock_irql(lock, oldirql);
912
STDCALL void WRAP_EXPORT(KeAcquireSpinLockAtDpcLevel)
915
TRACEENTER6("%p", lock);
919
STDCALL void WRAP_EXPORT(KeRaiseIrql)
998
ntos_work_item->func = func;
999
ntos_work_item->arg1 = arg1;
1000
ntos_work_item->arg2 = arg2;
1001
ntos_work_item->func_type = func_type;
1002
irql = nt_spin_lock_irql(&ntos_work_item_list_lock, DISPATCH_LEVEL);
1003
InsertTailList(&ntos_work_item_list, &ntos_work_item->list);
1004
nt_spin_unlock_irql(&ntos_work_item_list_lock, irql);
1005
schedule_ntos_work(&ntos_work_item_work);
1009
wstdcall void WRAP_EXPORT(KeInitializeSpinLock)
1010
(NT_SPIN_LOCK *lock)
1012
TRACEENTER6("%p", lock);
1013
nt_spin_lock_init(lock);
1016
wstdcall void WRAP_EXPORT(KeAcquireSpinLock)
1017
(NT_SPIN_LOCK *lock, KIRQL *irql)
1019
TRACEENTER6("%p", lock);
1020
*irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);
1023
wstdcall void WRAP_EXPORT(KeReleaseSpinLock)
1024
(NT_SPIN_LOCK *lock, KIRQL oldirql)
1026
TRACEENTER6("%p", lock);
1027
nt_spin_unlock_irql(lock, oldirql);
1030
wstdcall void WRAP_EXPORT(KeAcquireSpinLockAtDpcLevel)
1031
(NT_SPIN_LOCK *lock)
1033
TRACEENTER6("%p", lock);
1037
wstdcall void WRAP_EXPORT(KeReleaseSpinLockFromDpcLevel)
1038
(NT_SPIN_LOCK *lock)
1040
TRACEENTER6("%p", lock);
1041
nt_spin_unlock(lock);
1044
wstdcall void WRAP_EXPORT(KeRaiseIrql)
920
1045
(KIRQL newirql, KIRQL *oldirql)
922
1047
TRACEENTER6("%d", newirql);
923
1048
*oldirql = raise_irql(newirql);
926
STDCALL KIRQL WRAP_EXPORT(KeRaiseIrqlToDpcLevel)
1051
wstdcall KIRQL WRAP_EXPORT(KeRaiseIrqlToDpcLevel)
929
1054
return raise_irql(DISPATCH_LEVEL);
932
STDCALL void WRAP_EXPORT(KeLowerIrql)
1057
wstdcall void WRAP_EXPORT(KeLowerIrql)
935
1060
TRACEENTER6("%d", irql);
936
1061
lower_irql(irql);
939
STDCALL KIRQL WRAP_EXPORT(KeAcquireSpinLockRaiseToDpc)
942
TRACEENTER6("%p", lock);
943
return kspin_lock_irql(lock, DISPATCH_LEVEL);
946
STDCALL void WRAP_EXPORT(KeAcquireSpinLockdAtDpcLevel)
949
TRACEENTER6("%p", lock);
953
STDCALL void WRAP_EXPORT(KeReleaseSpinLockFromDpcLevel)
956
TRACEENTER6("%p", lock);
960
_FASTCALL LONG WRAP_EXPORT(InterlockedDecrement)
961
(FASTCALL_DECL_1(LONG volatile *val))
967
irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
970
kspin_unlock_irql(&inter_lock, irql);
971
TRACEEXIT5(return x);
974
_FASTCALL LONG WRAP_EXPORT(InterlockedIncrement)
975
(FASTCALL_DECL_1(LONG volatile *val))
981
irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
984
kspin_unlock_irql(&inter_lock, irql);
985
TRACEEXIT5(return x);
988
_FASTCALL LONG WRAP_EXPORT(InterlockedExchange)
989
(FASTCALL_DECL_2(LONG volatile *target, LONG val))
995
irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
998
kspin_unlock_irql(&inter_lock, irql);
999
TRACEEXIT5(return x);
1002
_FASTCALL LONG WRAP_EXPORT(InterlockedCompareExchange)
1003
(FASTCALL_DECL_3(LONG volatile *dest, LONG xchg, LONG comperand))
1009
irql = kspin_lock_irql(&inter_lock, DISPATCH_LEVEL);
1011
if (*dest == comperand)
1013
kspin_unlock_irql(&inter_lock, irql);
1014
TRACEEXIT5(return x);
1017
_FASTCALL void WRAP_EXPORT(ExInterlockedAddLargeStatistic)
1018
(FASTCALL_DECL_2(LARGE_INTEGER *plint, ULONG n))
1020
unsigned long flags;
1022
TRACEENTER5("%p = %llu, n = %u", plint, *plint, n);
1023
kspin_lock_irqsave(&inter_lock, flags);
1025
kspin_unlock_irqrestore(&inter_lock, flags);
1028
STDCALL void *WRAP_EXPORT(ExAllocatePoolWithTag)
1064
wstdcall KIRQL WRAP_EXPORT(KeAcquireSpinLockRaiseToDpc)
1065
(NT_SPIN_LOCK *lock)
1067
TRACEENTER6("%p", lock);
1068
return nt_spin_lock_irql(lock, DISPATCH_LEVEL);
1071
#undef ExAllocatePoolWithTag
1073
wstdcall void *WRAP_EXPORT(ExAllocatePoolWithTag)
1029
1074
(enum pool_type pool_type, SIZE_T size, ULONG tag)
1033
1078
TRACEENTER4("pool_type: %d, size: %lu, tag: %u", pool_type,
1036
1080
if (size <= KMALLOC_THRESHOLD) {
1037
1081
if (current_irql() < DISPATCH_LEVEL)
1038
1082
addr = kmalloc(size, GFP_KERNEL);
1040
1084
addr = kmalloc(size, GFP_ATOMIC);
1042
if (current_irql() > PASSIVE_LEVEL)
1043
ERROR("Windows driver allocating %ld bytes in "
1044
"atomic context", size);
1045
addr = vmalloc(size);
1086
if (current_irql() < DISPATCH_LEVEL)
1087
addr = vmalloc(size);
1089
addr = __vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM,
1092
DBGTRACE4("addr: %p, %lu", addr, size);
1047
1093
TRACEEXIT4(return addr);
1050
STDCALL void wrap_vfree(void *addr, void *ctx)
1096
static wstdcall void vfree_nonatomic(void *addr, void *ctx)
1055
STDCALL void WRAP_EXPORT(ExFreePool)
1101
wstdcall void WRAP_EXPORT(ExFreePool)
1058
1104
DBGTRACE4("addr: %p", addr);
1196
1253
object = callback->object;
1197
irql = kspin_lock_irql(&object->lock, DISPATCH_LEVEL);
1254
irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
1198
1255
RemoveEntryList(&callback->list);
1199
kspin_unlock_irql(&object->lock, irql);
1256
nt_spin_unlock_irql(&object->lock, irql);
1200
1257
kfree(callback);
1204
STDCALL void WRAP_EXPORT(ExNotifyCallback)
1261
wstdcall void WRAP_EXPORT(ExNotifyCallback)
1205
1262
(struct callback_object *object, void *arg1, void *arg2)
1207
1264
struct callback_func *callback;
1210
1267
TRACEENTER3("%p", object);
1211
irql = kspin_lock_irql(&object->lock, DISPATCH_LEVEL);
1268
irql = nt_spin_lock_irql(&object->lock, DISPATCH_LEVEL);
1212
1269
nt_list_for_each_entry(callback, &object->callback_funcs, list){
1213
1270
LIN2WIN3(callback->func, callback->context, arg1, arg2);
1215
kspin_unlock_irql(&object->lock, irql);
1272
nt_spin_unlock_irql(&object->lock, irql);
1219
/* check and set signaled state; should be called with nt_event_lock held */
1220
/* @reset indicates if the event should be reset to not-signaled state
1276
/* check and set signaled state; should be called with dispatcher_lock held */
1277
/* @grab indicates if the event should be put in not-signaled state
1221
1278
* - note that a semaphore may stay in signaled state for multiple
1222
* 'resets' if the count is > 1 */
1223
static int inline check_reset_signaled_state(void *object,
1224
struct nt_thread *thread,
1279
* 'grabs' if the count is > 1 */
1280
static int check_grab_signaled_state(struct dispatcher_header *dh,
1281
struct task_struct *thread, int grab)
1227
struct dispatch_header *dh;
1228
struct nt_mutex *nt_mutex;
1231
nt_mutex = container_of(object, struct nt_mutex, dh);
1283
EVENTTRACE("%p, %p, %d, %d", dh, thread, grab, dh->signal_state);
1233
1284
if (is_mutex_dh(dh)) {
1285
struct nt_mutex *nt_mutex;
1234
1286
/* either no thread owns the mutex or this thread owns
1236
if (nt_mutex->owner_thread == NULL ||
1237
nt_mutex->owner_thread == thread) {
1238
assert(nt_mutex->owner_thread == NULL &&
1239
dh->signal_state == 1);
1288
nt_mutex = container_of(dh, struct nt_mutex, dh);
1289
EVENTTRACE("%p, %p", nt_mutex, nt_mutex->owner_thread);
1290
assert(dh->signal_state <= 1);
1291
assert(nt_mutex->owner_thread == NULL &&
1292
dh->signal_state == 1);
1293
if (dh->signal_state > 0 || nt_mutex->owner_thread == thread) {
1241
1295
dh->signal_state--;
1242
1296
nt_mutex->owner_thread = thread;
1298
EVENTEXIT(return 1);
1246
1300
} else if (dh->signal_state > 0) {
1247
/* if resetting, decrement signal_state for
1301
/* if grab, decrement signal_state for
1248
1302
* synchronization or semaphore objects */
1249
if (reset && (dh->type == SynchronizationEvent ||
1250
is_semaphore_dh(dh)))
1303
if (grab && (dh->type == SynchronizationObject ||
1304
is_semaphore_dh(dh)))
1251
1305
dh->signal_state--;
1306
EVENTEXIT(return 1);
1308
EVENTEXIT(return 0);
1257
/* this function should be called holding nt_event_lock spinlock at
1311
/* this function should be called holding dispatcher_lock spinlock at
1258
1312
* DISPATCH_LEVEL */
1259
static void wakeup_threads(struct dispatch_header *dh)
1313
static void wakeup_threads(struct dispatcher_header *dh)
1261
struct wait_block *wb;
1315
struct nt_list *cur, *next;
1316
struct wait_block *wb = NULL;
1263
EVENTENTER("dh: %p", dh);
1264
nt_list_for_each_entry(wb, &dh->wait_blocks, list) {
1265
EVENTTRACE("wait block: %p, thread: %p", wb, wb->thread);
1266
assert(wb->thread != NULL && wb->object == dh);
1318
nt_list_for_each_safe(cur, next, &dh->wait_blocks) {
1319
wb = container_of(cur, struct wait_block, list);
1320
EVENTTRACE("%p: wait block: %p, thread: %p",
1321
dh, wb, wb->thread);
1322
assert(wb->thread != NULL);
1323
assert(wb->object == NULL);
1267
1324
if (wb->thread &&
1268
check_reset_signaled_state(dh, wb->thread, 0)) {
1269
EVENTTRACE("waking up task: %p", wb->thread->task);
1270
wb->thread->event_wait_done = 1;
1271
wake_up(&wb->thread->event_wq);
1273
/* DDK says only one thread will be woken up,
1274
* but we let each waking thread to check if
1275
* the object is in signaled state anyway */
1276
if (dh->type == SynchronizationEvent)
1325
check_grab_signaled_state(dh, wb->thread, 1)) {
1326
struct thread_event_wait *thread_wait = wb->thread_wait;
1327
EVENTTRACE("%p: waking up task %p for %p", thread_wait,
1329
RemoveEntryList(&wb->list);
1331
thread_wait->done = 1;
1332
wake_up(&thread_wait->wq_head);
1333
if (dh->type == SynchronizationObject)
1280
EVENTTRACE("not waking up task: %p",
1336
EVENTTRACE("not waking up task: %p", wb->thread);
1283
1338
EVENTEXIT(return);
1286
STDCALL NTSTATUS WRAP_EXPORT(KeWaitForMultipleObjects)
1341
/* We need workqueue to implement KeWaitFor routines
1342
* below. (get/put)_thread_event_wait give/take back a workqueue. Both
1343
* these are called holding dispatcher spinlock, so no locking here */
1344
static inline struct thread_event_wait *get_thread_event_wait(void)
1346
struct thread_event_wait *thread_event_wait;
1348
if (thread_event_wait_pool) {
1349
thread_event_wait = thread_event_wait_pool;
1350
thread_event_wait_pool = thread_event_wait_pool->next;
1352
thread_event_wait = kmalloc(sizeof(*thread_event_wait),
1354
if (!thread_event_wait) {
1355
WARNING("couldn't allocate memory");
1358
EVENTTRACE("allocated wq: %p", thread_event_wait);
1359
init_waitqueue_head(&thread_event_wait->wq_head);
1362
thread_event_wait->task = current;
1364
EVENTTRACE("%p, %p, %p", thread_event_wait, current,
1365
thread_event_wait_pool);
1366
thread_event_wait->done = 0;
1367
return thread_event_wait;
1370
static void put_thread_event_wait(struct thread_event_wait *thread_event_wait)
1372
EVENTENTER("%p, %p", thread_event_wait, current);
1374
if (thread_event_wait->task != current)
1375
ERROR("argh, task %p should be %p",
1376
current, thread_event_wait->task);
1377
thread_event_wait->task = NULL;
1379
thread_event_wait->next = thread_event_wait_pool;
1380
thread_event_wait_pool = thread_event_wait;
1381
thread_event_wait->done = 0;
1384
wstdcall NTSTATUS WRAP_EXPORT(KeWaitForMultipleObjects)
1287
1385
(ULONG count, void *object[], enum wait_type wait_type,
1288
1386
KWAIT_REASON wait_reason, KPROCESSOR_MODE wait_mode,
1289
1387
BOOLEAN alertable, LARGE_INTEGER *timeout,
1320
1414
/* TODO: should we allow threads to wait in non-alertable state? */
1321
1415
alertable = TRUE;
1322
irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1323
/* first check if the wait can be satisfied or not, without
1324
* grabbing the objects, except in the case of WaitAny */
1416
irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
1417
/* If *timeout == 0: In the case of WaitAny, if an object can
1418
* be grabbed (object is in signaled state), grab and
1419
* return. In the case of WaitAll, we have to first make sure
1420
* all objects can be grabbed. If any/some of them can't be
1421
* grabbed, either we return STATUS_TIMEOUT or wait for them,
1422
* depending on how to satisfy wait. If all of them can be
1423
* grabbed, we will grab them in the next loop below */
1325
1425
for (i = wait_count = 0; i < count; i++) {
1326
1426
dh = object[i];
1327
1427
EVENTTRACE("%p: event %p state: %d",
1328
task, dh, dh->signal_state);
1428
thread, dh, dh->signal_state);
1329
1429
/* wait_type == 1 for WaitAny, 0 for WaitAll */
1330
if (check_reset_signaled_state(dh, thread, wait_type)) {
1430
if (check_grab_signaled_state(dh, thread, wait_type)) {
1331
1431
if (wait_type == WaitAny) {
1332
kspin_unlock_irql(&nt_event_lock, irql);
1432
nt_spin_unlock_irql(&dispatcher_lock, irql);
1334
1434
EVENTEXIT(return STATUS_WAIT_0 + i);
1336
1436
EVENTEXIT(return STATUS_SUCCESS);
1439
EVENTTRACE("%p: wait for %p", thread, dh);
1341
if (timeout && *timeout == 0 && wait_count) {
1342
kspin_unlock_irql(&nt_event_lock, irql);
1343
EVENTEXIT(return STATUS_TIMEOUT);
1346
/* get the list of objects the thread (task) needs to wait on
1347
* and add the thread on the wait list for each such object */
1348
/* if *timeout == 0, this step will grab the objects */
1349
thread->event_wait_done = 0;
1350
for (i = wait_count = 0; i < count; i++) {
1445
if (timeout && *timeout == 0) {
1446
nt_spin_unlock_irql(&dispatcher_lock, irql);
1447
EVENTEXIT(return STATUS_TIMEOUT);
1449
thread_wait = get_thread_event_wait();
1451
nt_spin_unlock_irql(&dispatcher_lock, irql);
1452
EVENTEXIT(return STATUS_RESOURCES);
1457
/* get the list of objects the thread needs to wait on and add
1458
* the thread on the wait list for each such object */
1459
/* if *timeout == 0, this step will grab all the objects */
1460
for (i = 0; i < count; i++) {
1351
1461
dh = object[i];
1352
1462
EVENTTRACE("%p: event %p state: %d",
1353
task, dh, dh->signal_state);
1354
if (check_reset_signaled_state(dh, thread, 1)) {
1463
thread, dh, dh->signal_state);
1464
wb[i].object = NULL;
1465
wb[i].thread_wait = thread_wait;
1466
if (check_grab_signaled_state(dh, thread, 1)) {
1355
1467
EVENTTRACE("%p: event %p already signaled: %d",
1356
task, dh, dh->signal_state);
1468
thread, dh, dh->signal_state);
1357
1469
/* mark that we are not waiting on this object */
1358
1470
wb[i].thread = NULL;
1359
wb[i].object = NULL;
1361
1472
assert(timeout == NULL || *timeout != 0);
1473
assert(thread_wait != NULL);
1362
1474
wb[i].thread = thread;
1475
EVENTTRACE("%p: need to wait on event %p", thread, dh);
1364
1476
InsertTailList(&dh->wait_blocks, &wb[i].list);
1366
EVENTTRACE("%p: waiting on event %p", task, dh);
1369
kspin_unlock_irql(&nt_event_lock, irql);
1371
if (wait_count == 0)
1479
nt_spin_unlock_irql(&dispatcher_lock, irql);
1480
if (wait_count == 0) {
1481
assert(thread_wait == NULL);
1372
1482
EVENTEXIT(return STATUS_SUCCESS);
1484
assert(thread_wait);
1374
1486
assert(timeout == NULL || *timeout != 0);
1375
1487
if (timeout == NULL)
1376
1488
wait_jiffies = 0;
1378
1490
wait_jiffies = SYSTEM_TIME_TO_HZ(*timeout) + 1;
1379
EVENTTRACE("%p: sleeping for %ld", task, wait_jiffies);
1491
EVENTTRACE("%p: sleeping for %ld on %p",
1492
thread, wait_jiffies, thread_wait);
1381
1494
while (wait_count) {
1382
1495
if (wait_jiffies) {
1384
res = wait_event_interruptible_timeout(
1386
(thread->event_wait_done == 1),
1389
res = wait_event_timeout(
1391
(thread->event_wait_done == 1),
1496
res = wait_event_interruptible_timeout(
1497
thread_wait->wq_head, (thread_wait->done == 1),
1395
wait_event_interruptible(thread->event_wq,
1396
(thread->event_wait_done == 1));
1398
wait_event(thread->event_wq,
1399
(thread->event_wait_done == 1));
1500
wait_event_interruptible(
1501
thread_wait->wq_head,(thread_wait->done == 1));
1400
1502
/* mark that it didn't timeout */
1403
irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1404
thread->event_wait_done = 0;
1505
thread_wait->done = 0;
1506
irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
1405
1507
if (signal_pending(current))
1406
1508
res = -ERESTARTSYS;
1407
EVENTTRACE("%p: woke up, res = %d", task, res);
1509
EVENTTRACE("%p woke up on %p, res = %d, done: %d", thread,
1510
thread_wait, res, thread_wait->done);
1512
if (thread_wait->task != current)
1513
ERROR("%p: argh, task %p should be %p", thread_wait,
1514
thread_wait->task, current);
1408
1516
// assert(res < 0 && alertable);
1409
1517
if (res <= 0) {
1410
1518
/* timed out or interrupted; remove from wait list */
1411
1519
for (i = 0; i < count; i++) {
1412
1520
if (!wb[i].thread)
1414
EVENTTRACE("%p: timedout, deq'ing %p",
1415
task, wb[i].object);
1522
EVENTTRACE("%p: timedout, deq'ing %p (%p)",
1523
thread, object[i], wb[i].object);
1416
1524
RemoveEntryList(&wb[i].list);
1417
wb[i].thread = NULL;
1418
wb[i].object = NULL;
1420
kspin_unlock_irql(&nt_event_lock, irql);
1526
put_thread_event_wait(thread_wait);
1527
nt_spin_unlock_irql(&dispatcher_lock, irql);
1422
1529
EVENTEXIT(return STATUS_ALERTED);
1495
1611
nt_event, nt_event->dh.type, wait);
1496
1612
if (wait == TRUE)
1497
1613
WARNING("wait = %d, not yet implemented", wait);
1499
irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1614
irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
1500
1615
old_state = nt_event->dh.signal_state;
1501
1616
nt_event->dh.signal_state = 1;
1502
wakeup_threads(&nt_event->dh);
1503
kspin_unlock_irql(&nt_event_lock, irql);
1618
wakeup_threads(&nt_event->dh);
1619
nt_spin_unlock_irql(&dispatcher_lock, irql);
1504
1620
EVENTEXIT(return old_state);
1507
STDCALL void WRAP_EXPORT(KeClearEvent)
1623
wstdcall void WRAP_EXPORT(KeClearEvent)
1508
1624
(struct nt_event *nt_event)
1512
1626
EVENTENTER("event = %p", nt_event);
1513
irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1514
nt_event->dh.signal_state = 0;
1515
kspin_unlock_irql(&nt_event_lock, irql);
1627
(void)xchg(&nt_event->dh.signal_state, 0);
1516
1628
EVENTEXIT(return);
1519
STDCALL LONG WRAP_EXPORT(KeResetEvent)
1631
wstdcall LONG WRAP_EXPORT(KeResetEvent)
1520
1632
(struct nt_event *nt_event)
1522
1634
LONG old_state;
1525
EVENTENTER("event = %p", nt_event);
1527
irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1528
old_state = nt_event->dh.signal_state;
1529
nt_event->dh.signal_state = 0;
1530
kspin_unlock_irql(&nt_event_lock, irql);
1635
old_state = xchg(&nt_event->dh.signal_state, 0);
1636
EVENTTRACE("old state: %d", old_state);
1532
1637
EVENTEXIT(return old_state);
1535
STDCALL void WRAP_EXPORT(KeInitializeMutex)
1536
(struct nt_mutex *mutex, BOOLEAN wait)
1640
wstdcall void WRAP_EXPORT(KeInitializeMutex)
1641
(struct nt_mutex *mutex, ULONG level)
1540
1645
EVENTENTER("%p", mutex);
1541
irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1542
initialize_dh(&mutex->dh, SynchronizationEvent, 1, DH_NT_MUTEX);
1543
kspin_unlock_irql(&nt_event_lock, irql);
1646
irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
1647
initialize_dh(&mutex->dh, MutexObject, 1);
1544
1648
mutex->dh.size = sizeof(*mutex);
1545
1649
InitializeListHead(&mutex->list);
1546
1650
mutex->abandoned = FALSE;
1547
1651
mutex->apc_disable = 1;
1548
1652
mutex->owner_thread = NULL;
1653
nt_spin_unlock_irql(&dispatcher_lock, irql);
1549
1654
EVENTEXIT(return);
1552
STDCALL LONG WRAP_EXPORT(KeReleaseMutex)
1657
wstdcall LONG WRAP_EXPORT(KeReleaseMutex)
1553
1658
(struct nt_mutex *mutex, BOOLEAN wait)
1662
struct task_struct *thread;
1558
EVENTENTER("%p", mutex);
1664
EVENTENTER("%p, %d, %p", mutex, wait, current);
1559
1665
if (wait == TRUE)
1560
1666
WARNING("wait: %d", wait);
1561
irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1562
ret = mutex->dh.signal_state++;
1563
if (mutex->dh.signal_state > 0) {
1564
mutex->owner_thread = NULL;
1565
wakeup_threads(&mutex->dh);
1567
kspin_unlock_irql(&nt_event_lock, irql);
1668
irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
1669
EVENTTRACE("%p, %p, %d", thread, mutex->owner_thread,
1670
mutex->dh.signal_state);
1671
if ((mutex->owner_thread == thread) && (mutex->dh.signal_state <= 0)) {
1672
if ((ret = mutex->dh.signal_state++) == 0) {
1673
mutex->owner_thread = NULL;
1674
wakeup_threads(&mutex->dh);
1677
ret = STATUS_MUTANT_NOT_OWNED;
1678
nt_spin_unlock_irql(&dispatcher_lock, irql);
1679
EVENTTRACE("ret: %08X", ret);
1568
1680
EVENTEXIT(return ret);
1571
STDCALL void WRAP_EXPORT(KeInitializeSemaphore)
1683
wstdcall void WRAP_EXPORT(KeInitializeSemaphore)
1572
1684
(struct nt_semaphore *semaphore, LONG count, LONG limit)
1685
1796
return jiffies;
1688
STDCALL struct nt_thread *WRAP_EXPORT(KeGetCurrentThread)
1799
wstdcall struct task_struct *WRAP_EXPORT(KeGetCurrentThread)
1692
struct task_struct *task = get_current();
1693
struct nt_thread *ret;
1694
struct common_object_header *header;
1802
struct task_struct *task = current;
1696
1804
DBGTRACE5("task: %p", task);
1698
irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
1699
nt_list_for_each_entry(header, &object_list, list) {
1700
struct nt_thread *thread;
1701
DBGTRACE5("header: %p, type: %d", header, header->type);
1702
if (header->type != OBJECT_TYPE_NT_THREAD)
1704
thread = HEADER_TO_OBJECT(header);
1705
DBGTRACE5("thread: %p, task: %p", thread, thread->task);
1706
if (thread->task == task) {
1711
kspin_unlock_irql(&ntoskernel_lock, irql);
1713
DBGTRACE1("couldn't find thread for task %p", task);
1714
DBGTRACE5("current thread = %p", ret);
1718
STDCALL KPRIORITY WRAP_EXPORT(KeSetPriorityThread)
1719
(struct nt_thread *thread, KPRIORITY priority)
1808
wstdcall KPRIORITY WRAP_EXPORT(KeSetPriorityThread)
1809
(struct task_struct *task, KPRIORITY priority)
1721
1811
KPRIORITY old_prio;
1723
TRACEENTER3("thread: %p, priority = %u", thread, priority);
1813
TRACEENTER3("task: %p, priority = %u", task, priority);
1725
1815
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
1726
1816
/* FIXME: is there a way to set kernel thread prio on 2.4? */
1727
1817
old_prio = LOW_PRIORITY;
1729
if (rt_task(thread->task))
1730
1820
old_prio = LOW_REALTIME_PRIORITY;
1732
1822
old_prio = MAXIMUM_PRIORITY;
1734
1824
if (priority == LOW_REALTIME_PRIORITY)
1735
set_user_nice(thread->task, -20);
1825
set_user_nice(task, -20);
1737
set_user_nice(thread->task, 10);
1827
set_user_nice(task, 10);
1740
1830
return old_prio;
1743
1833
struct trampoline_context {
1744
void (*start_routine)(void *) STDCALL;
1834
void (*start_routine)(void *) wstdcall;
1746
1836
struct nt_thread *thread;
1776
1872
thread->pid = task->pid;
1778
1874
thread->pid = 0;
1779
kspin_lock_init(&thread->lock);
1780
init_waitqueue_head(&thread->event_wq);
1875
nt_spin_lock_init(&thread->lock);
1781
1876
InitializeListHead(&thread->irps);
1782
irql = kspin_lock_irql(&nt_event_lock, DISPATCH_LEVEL);
1783
initialize_dh(&thread->dh, NotificationEvent, 0, DH_NT_THREAD);
1784
kspin_unlock_irql(&nt_event_lock, irql);
1877
irql = nt_spin_lock_irql(&dispatcher_lock, DISPATCH_LEVEL);
1878
initialize_dh(&thread->dh, ThreadObject, 0);
1785
1879
thread->dh.size = sizeof(*thread);
1787
DBGTRACE1("thread: %p, task: %p, pid: %d",
1880
nt_spin_unlock_irql(&dispatcher_lock, irql);
1881
DBGTRACE2("thread: %p, task: %p, pid: %d",
1788
1882
thread, thread->task, thread->pid);
1790
1884
ERROR("couldn't allocate thread object");
1794
void wrap_remove_thread(struct nt_thread *thread)
1888
static void remove_nt_thread(struct nt_thread *thread)
1797
1890
struct nt_list *ent;
1800
DBGTRACE1("terminating thread: %p, task: %p, pid: %d",
1801
thread, thread->task, thread->task->pid);
1802
/* TODO: make sure waitqueue is empty and destroy it */
1805
irql = kspin_lock_irql(&thread->lock, DISPATCH_LEVEL);
1806
ent = RemoveHeadList(&thread->irps);
1807
kspin_unlock_irql(&thread->lock, irql);
1810
irp = container_of(ent, struct irp, threads);
1894
ERROR("couldn't find thread for task: %p", current);
1897
DBGTRACE1("terminating thread: %p, task: %p, pid: %d",
1898
thread, thread->task, thread->task->pid);
1899
/* TODO: make sure waitqueue is empty and destroy it */
1902
irql = nt_spin_lock_irql(&thread->lock, DISPATCH_LEVEL);
1903
ent = RemoveHeadList(&thread->irps);
1904
nt_spin_unlock_irql(&thread->lock, irql);
1907
irp = container_of(ent, struct irp, threads);
1910
ObDereferenceObject(thread);
1913
struct nt_thread *get_current_nt_thread(void)
1915
struct task_struct *task = current;
1916
struct nt_thread *ret;
1917
struct common_object_header *header;
1920
DBGTRACE5("task: %p", task);
1922
irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
1923
nt_list_for_each_entry(header, &object_list, list) {
1924
struct nt_thread *thread;
1925
DBGTRACE5("header: %p, type: %d", header, header->type);
1926
if (header->type != OBJECT_TYPE_NT_THREAD)
1928
thread = HEADER_TO_OBJECT(header);
1929
DBGTRACE5("thread: %p, task: %p", thread, thread->task);
1930
if (thread->task == task) {
1813
ObDereferenceObject(thread);
1815
ERROR("couldn't find thread for task: %p", get_current());
1935
nt_spin_unlock_irql(&ntoskernel_lock, irql);
1937
DBGTRACE3("couldn't find thread for task %p, %d",
1938
task, current->pid);
1939
DBGTRACE5("current thread = %p", ret);
1819
STDCALL NTSTATUS WRAP_EXPORT(PsCreateSystemThread)
1943
wstdcall NTSTATUS WRAP_EXPORT(PsCreateSystemThread)
1820
1944
(void **phandle, ULONG access, void *obj_attr, void *process,
1821
void *client_id, void (*start_routine)(void *) STDCALL, void *context)
1945
void *client_id, void (*start_routine)(void *) wstdcall, void *context)
1823
1947
struct trampoline_context *ctx;
1824
1948
struct nt_thread *thread;
1865
1989
DBGTRACE2("created task: %p (%d)", task, task->pid);
1868
DBGTRACE2("created thread: %p", thread);
1991
*phandle = OBJECT_TO_HEADER(thread);
1992
DBGTRACE2("created thread: %p, %p", thread, *phandle);
1869
1993
TRACEEXIT2(return STATUS_SUCCESS);
1872
STDCALL NTSTATUS WRAP_EXPORT(PsTerminateSystemThread)
1996
wstdcall NTSTATUS WRAP_EXPORT(PsTerminateSystemThread)
1873
1997
(NTSTATUS status)
1875
1999
struct nt_thread *thread;
1877
thread = KeGetCurrentThread();
2001
DBGTRACE2("%p, %08X", current, status);
2002
thread = get_current_nt_thread();
1879
2004
DBGTRACE2("setting event for thread: %p", thread);
1880
2005
KeSetEvent((struct nt_event *)&thread->dh, 0, FALSE);
1881
2006
DBGTRACE2("set event for thread: %p", thread);
1882
wrap_remove_thread(thread);
2007
remove_nt_thread(thread);
1883
2008
complete_and_exit(NULL, status);
1884
2009
ERROR("oops: %p, %d", thread->task, thread->pid);
1886
ERROR("couldn't find thread for task: %p", get_current);
2011
ERROR("couldn't find thread for task: %p", current);
1887
2012
return STATUS_FAILURE;
1890
STDCALL BOOLEAN WRAP_EXPORT(KeRemoveEntryDeviceQueue)
2015
wstdcall BOOLEAN WRAP_EXPORT(KeRemoveEntryDeviceQueue)
1891
2016
(struct kdevice_queue *dev_queue, struct kdevice_queue_entry *entry)
1893
2018
struct kdevice_queue_entry *e;
1896
irql = kspin_lock_irql(&dev_queue->lock, DISPATCH_LEVEL);
2021
irql = nt_spin_lock_irql(&dev_queue->lock, DISPATCH_LEVEL);
1897
2022
nt_list_for_each_entry(e, &dev_queue->list, list) {
1898
2023
if (e == entry) {
1899
2024
RemoveEntryList(&e->list);
1900
kspin_unlock_irql(&dev_queue->lock, irql);
2025
nt_spin_unlock_irql(&dev_queue->lock, irql);
1904
kspin_unlock_irql(&dev_queue->lock, irql);
2029
nt_spin_unlock_irql(&dev_queue->lock, irql);
1908
STDCALL BOOLEAN WRAP_EXPORT(KeSynchronizeExecution)
2033
wstdcall BOOLEAN WRAP_EXPORT(KeSynchronizeExecution)
1909
2034
(struct kinterrupt *interrupt, PKSYNCHRONIZE_ROUTINE synch_routine,
1910
2035
void *synch_context)
1912
KSPIN_LOCK *spinlock;
2037
NT_SPIN_LOCK *spinlock;
1914
KIRQL irql = PASSIVE_LEVEL;
2039
unsigned long flags;
1916
2041
if (interrupt->actual_lock)
1917
2042
spinlock = interrupt->actual_lock;
1919
2044
spinlock = &interrupt->lock;
1920
if (interrupt->synch_irql == DISPATCH_LEVEL)
1921
irql = kspin_lock_irql(spinlock, interrupt->synch_irql);
1923
kspin_lock(spinlock);
2045
nt_spin_lock_irqsave(spinlock, flags);
1924
2046
ret = synch_routine(synch_context);
1925
if (interrupt->synch_irql == DISPATCH_LEVEL)
1926
kspin_unlock_irql(spinlock, irql);
1928
kspin_unlock(spinlock);
2047
nt_spin_unlock_irqrestore(spinlock, flags);
2051
wstdcall void *WRAP_EXPORT(MmAllocateContiguousMemorySpecifyCache)
2052
(SIZE_T size, PHYSICAL_ADDRESS lowest, PHYSICAL_ADDRESS highest,
2053
PHYSICAL_ADDRESS boundary, enum memory_caching_type cache_type)
2056
DBGTRACE2("%lu, %Lu, %Lu, %Lu, %d", size, lowest, highest, boundary,
2058
addr = ExAllocatePoolWithTag(NonPagedPool, size, 0);
2059
DBGTRACE2("%p", addr);
2063
wstdcall void WRAP_EXPORT(MmFreeContiguousMemorySpecifyCache)
2064
(void *base, SIZE_T size, enum memory_caching_type cache_type)
2066
DBGTRACE2("%p", base);
2070
wstdcall PHYSICAL_ADDRESS WRAP_EXPORT(MmGetPhysicalAddress)
2073
DBGTRACE2("%p", base);
2074
return virt_to_phys(base);
1932
2077
/* Atheros card with pciid 168C:0014 calls this function with 0xf0000
1933
2078
* and 0xf6ef0 address, and then check for things that seem to be
1934
2079
* related to ACPI: "_SM_" and "_DMI_". This may be the hack they do
2028
2173
struct wrap_mdl *wrap_mdl;
2029
2174
wrap_mdl = (struct wrap_mdl *)
2030
2175
((char *)mdl - offsetof(struct wrap_mdl, mdl));
2031
irql = kspin_lock_irql(&ntoskernel_lock,
2176
irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2033
2177
RemoveEntryList(&wrap_mdl->list);
2034
kspin_unlock_irql(&ntoskernel_lock, irql);
2178
nt_spin_unlock_irql(&ntoskernel_lock, irql);
2036
2180
if (mdl->flags & MDL_CACHE_ALLOCATED) {
2037
DBGTRACE3("freeing mdl cache: %p (%hu)",
2038
wrap_mdl, mdl->flags);
2181
DBGTRACE3("freeing mdl cache: %p, %p, %p",
2182
wrap_mdl, mdl, mdl->mappedsystemva);
2039
2183
kmem_cache_free(mdl_cache, wrap_mdl);
2041
DBGTRACE3("freeing mdl: %p (%hu)",
2042
wrap_mdl, mdl->flags);
2185
DBGTRACE3("freeing mdl: %p, %p, %p",
2186
wrap_mdl, mdl, mdl->mappedsystemva);
2043
2187
kfree(wrap_mdl);
2049
STDCALL void WRAP_EXPORT(IoBuildPartialMdl)
2193
wstdcall void WRAP_EXPORT(IoBuildPartialMdl)
2050
2194
(struct mdl *source, struct mdl *target, void *virt, ULONG length)
2052
2196
MmInitializeMdl(target, virt, length);
2197
target->flags |= MDL_PARTIAL;
2055
2200
/* FIXME: We don't update MDL to physical page mapping, since in Linux
2056
2201
* the pages are in memory anyway; if a driver treats an MDL as
2057
2202
* opaque, we should be safe; otherwise, the driver may break */
2058
STDCALL void WRAP_EXPORT(MmBuildMdlForNonPagedPool)
2203
wstdcall void WRAP_EXPORT(MmBuildMdlForNonPagedPool)
2059
2204
(struct mdl *mdl)
2206
PFN_NUMBER *mdl_pages;
2208
TRACEENTER4("%p", mdl);
2209
/* already mapped */
2210
// mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);
2061
2211
mdl->flags |= MDL_SOURCE_IS_NONPAGED_POOL;
2062
mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);
2212
DBGTRACE4("%p, %p, %p, %d, %d", mdl, mdl->mappedsystemva, mdl->startva,
2213
mdl->byteoffset, mdl->bytecount);
2214
n = SPAN_PAGES(MmGetSystemAddressForMdl(mdl), MmGetMdlByteCount(mdl));
2215
if (n > CACHE_MDL_PAGES)
2216
WARNING("%p, %d, %d", MmGetSystemAddressForMdl(mdl),
2217
MmGetMdlByteCount(mdl), n);
2218
mdl_pages = MmGetMdlPfnArray(mdl);
2219
for (i = 0; i < n; i++)
2220
mdl_pages[i] = (ULONG_PTR)mdl->startva + (i * PAGE_SIZE);
2066
STDCALL void *WRAP_EXPORT(MmMapLockedPages)
2224
wstdcall void *WRAP_EXPORT(MmMapLockedPages)
2067
2225
(struct mdl *mdl, KPROCESSOR_MODE access_mode)
2227
/* already mapped */
2228
// mdl->mappedsystemva = MmGetMdlVirtualAddress(mdl);
2069
2229
mdl->flags |= MDL_MAPPED_TO_SYSTEM_VA;
2070
return MmGetMdlVirtualAddress(mdl);
2230
/* what is the need for MDL_PARTIAL_HAS_BEEN_MAPPED? */
2231
if (mdl->flags & MDL_PARTIAL)
2232
mdl->flags |= MDL_PARTIAL_HAS_BEEN_MAPPED;
2233
return mdl->mappedsystemva;
2073
STDCALL void *WRAP_EXPORT(MmMapLockedPagesSpecifyCache)
2236
wstdcall void *WRAP_EXPORT(MmMapLockedPagesSpecifyCache)
2074
2237
(struct mdl *mdl, KPROCESSOR_MODE access_mode,
2075
2238
enum memory_caching_type cache_type, void *base_address,
2076
2239
ULONG bug_check, enum mm_page_priority priority)
2112
STDCALL void *WRAP_EXPORT(MmLockPagableDataSection)
2276
wstdcall void *WRAP_EXPORT(MmLockPagableDataSection)
2113
2277
(void *address)
2115
2279
return address;
2118
STDCALL void WRAP_EXPORT(MmUnlockPagableImageSection)
2282
wstdcall void WRAP_EXPORT(MmUnlockPagableImageSection)
2124
STDCALL NTSTATUS WRAP_EXPORT(ObReferenceObjectByHandle)
2288
wstdcall NTSTATUS WRAP_EXPORT(ObReferenceObjectByHandle)
2125
2289
(void *handle, ACCESS_MASK desired_access, void *obj_type,
2126
2290
KPROCESSOR_MODE access_mode, void **object, void *handle_info)
2128
2292
struct common_object_header *hdr;
2131
irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2295
DBGTRACE2("%p", handle);
2296
irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2132
2297
hdr = HANDLE_TO_HEADER(handle);
2133
2298
hdr->ref_count++;
2134
2299
*object = HEADER_TO_OBJECT(hdr);
2135
kspin_unlock_irql(&ntoskernel_lock, irql);
2300
nt_spin_unlock_irql(&ntoskernel_lock, irql);
2301
DBGTRACE2("%p, %p, %d, %p", hdr, object, hdr->ref_count, *object);
2136
2302
return STATUS_SUCCESS;
2139
2305
/* DDK doesn't say if return value should be before incrementing or
2140
2306
* after incrementing reference count, but according to #reactos
2141
2307
* devels, it should be return value after incrementing */
2142
_FASTCALL LONG WRAP_EXPORT(ObfReferenceObject)
2143
(FASTCALL_DECL_1(void *object))
2308
wfastcall LONG WRAP_EXPORT(ObfReferenceObject)
2145
2311
struct common_object_header *hdr;
2149
irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2315
irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2150
2316
hdr = OBJECT_TO_HEADER(object);
2151
2317
ret = ++hdr->ref_count;
2152
kspin_unlock_irql(&ntoskernel_lock, irql);
2318
DBGTRACE2("%p, %d, %p", hdr, hdr->ref_count, object);
2319
nt_spin_unlock_irql(&ntoskernel_lock, irql);
2156
_FASTCALL void WRAP_EXPORT(ObfDereferenceObject)
2157
(FASTCALL_DECL_1(void *object))
2323
wfastcall void WRAP_EXPORT(ObfDereferenceObject)
2159
2326
struct common_object_header *hdr;
2163
2330
TRACEENTER2("object: %p", object);
2164
irql = kspin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2331
irql = nt_spin_lock_irql(&ntoskernel_lock, DISPATCH_LEVEL);
2165
2332
hdr = OBJECT_TO_HEADER(object);
2166
2333
DBGTRACE2("hdr: %p", hdr);
2167
2334
hdr->ref_count--;
2168
2335
ref_count = hdr->ref_count;
2169
kspin_unlock_irql(&ntoskernel_lock, irql);
2336
DBGTRACE2("object: %p, %d", object, ref_count);
2337
nt_spin_unlock_irql(&ntoskernel_lock, irql);
2170
2338
if (ref_count < 0)
2171
2339
ERROR("invalid object: %p (%d)", object, ref_count);
2172
2340
if (ref_count <= 0) {