2
* Copyright (C) 2003-2005 Pontus Fuchs, Giridhar Pemmasani
4
* This program is free software; you can redistribute it and/or modify
5
* it under the terms of the GNU General Public License as published by
6
* the Free Software Foundation; either version 2 of the License, or
7
* (at your option) any later version.
9
* This program is distributed in the hope that it will be useful,
10
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
* GNU General Public License for more details.
16
#ifndef _NTOSKERNEL_H_
17
#define _NTOSKERNEL_H_
19
#include <linux/types.h>
20
#include <linux/timer.h>
21
#include <linux/time.h>
22
#include <linux/module.h>
23
#include <linux/kmod.h>
25
#include <linux/netdevice.h>
26
#include <linux/wireless.h>
27
#include <linux/pci.h>
28
#include <linux/wait.h>
30
#include <linux/delay.h>
32
#include <linux/random.h>
33
#include <linux/ctype.h>
34
#include <linux/list.h>
35
#include <linux/sched.h>
36
#include <linux/usb.h>
37
#include <linux/spinlock.h>
39
#include <linux/version.h>
40
#include <linux/etherdevice.h>
41
#include <net/iw_handler.h>
42
#include <linux/ethtool.h>
43
#include <linux/if_arp.h>
44
#include <linux/rtnetlink.h>
45
#include <linux/highmem.h>
46
#include <linux/percpu.h>
47
#include <linux/kthread.h>
48
#include <linux/workqueue.h>
50
#if !defined(CONFIG_X86) && !defined(CONFIG_X86_64)
51
#error "this module is for x86 or x86_64 architectures only"
54
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
55
#define gfp_t unsigned int __nocast
57
static inline void *_kzalloc(size_t size, gfp_t flags)
59
void *p = kmalloc(size, flags);
60
if (likely(p != NULL))
65
#define kzalloc(size, flags) _kzalloc(size, flags)
68
/* Interrupt backwards compatibility stuff */
69
#include <linux/interrupt.h>
70
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29)
74
#define irqreturn_t void
76
#endif /* Linux < 2.6.29 */
78
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
80
#define mutex semaphore
81
#define mutex_init(m) sema_init(m, 1)
82
#define mutex_lock(m) down(m)
83
#define mutex_trylock(m) (!down_trylock(m))
84
#define mutex_unlock(m) up(m)
85
#define mutex_is_locked(m) (atomic_read(m.count) == 0)
87
#endif /* Linux < 2.6.16 */
89
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
90
#define set_cpus_allowed_ptr(task, mask) set_cpus_allowed(task, *mask)
91
#endif /* Linux < 2.6.26 */
94
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
95
#define cpumask_copy(dst, src) do { *dst = *src; } while (0)
96
#define cpumask_equal(mask1, mask2) cpus_equal(*mask1, *mask2)
97
#define cpumask_setall(mask) cpus_setall(*mask)
98
static cpumask_t cpumasks[NR_CPUS];
99
#define cpumask_of(cpu) \
101
cpumasks[cpu] = cpumask_of_cpu(cpu); \
104
#endif /* Linux < 2.6.28 */
105
#endif /* CONFIG_SMP */
107
#ifndef tsk_cpus_allowed
108
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
112
#define __packed __attribute__((packed))
115
/* pci functions in 2.6 kernels have problems allocating dma buffers,
116
* but seem to work fine with dma functions
118
#include <asm/dma-mapping.h>
120
#define PCI_DMA_ALLOC_COHERENT(pci_dev,size,dma_handle) \
121
dma_alloc_coherent(&pci_dev->dev,size,dma_handle, \
122
GFP_KERNEL | __GFP_REPEAT)
123
#define PCI_DMA_FREE_COHERENT(pci_dev,size,cpu_addr,dma_handle) \
124
dma_free_coherent(&pci_dev->dev,size,cpu_addr,dma_handle)
125
#define PCI_DMA_MAP_SINGLE(pci_dev,addr,size,direction) \
126
dma_map_single(&pci_dev->dev,addr,size,direction)
127
#define PCI_DMA_UNMAP_SINGLE(pci_dev,dma_handle,size,direction) \
128
dma_unmap_single(&pci_dev->dev,dma_handle,size,direction)
129
#define MAP_SG(pci_dev, sglist, nents, direction) \
130
dma_map_sg(&pci_dev->dev, sglist, nents, direction)
131
#define UNMAP_SG(pci_dev, sglist, nents, direction) \
132
dma_unmap_sg(&pci_dev->dev, sglist, nents, direction)
133
#define PCI_DMA_MAP_ERROR(dma_addr) dma_mapping_error(dma_addr)
136
#if defined(CONFIG_NET_RADIO) && !defined(CONFIG_WIRELESS_EXT)
137
#define CONFIG_WIRELESS_EXT
140
#define prepare_wait_condition(task, var, value) \
147
/* Wait in wait_state (e.g., TASK_INTERRUPTIBLE) for condition to
148
* become true; timeout is either jiffies (> 0) to wait or 0 to wait
150
* When timeout == 0, return value is
151
* > 0 if condition becomes true, or
152
* < 0 if signal is pending on the thread.
153
* When timeout > 0, return value is
154
* > 0 if condition becomes true before timeout,
155
* < 0 if signal is pending on the thread before timeout, or
156
* 0 if timedout (condition may have become true at the same time)
159
#define wait_condition(condition, timeout, wait_state) \
161
long ret = timeout ? timeout : 1; \
163
if (signal_pending(current)) { \
164
ret = -ERESTARTSYS; \
167
set_current_state(wait_state); \
169
__set_current_state(TASK_RUNNING); \
173
ret = schedule_timeout(ret); \
184
struct wrap_workqueue_struct;
186
struct wrap_work_struct {
187
struct list_head list;
188
void (*func)(struct wrap_work_struct *data);
190
/* whether/on which thread scheduled */
191
struct workqueue_thread *thread;
194
#define work_struct wrap_work_struct
195
#define workqueue_struct wrap_workqueue_struct
198
#define INIT_WORK(work, pfunc) \
200
(work)->func = (pfunc); \
201
(work)->data = (work); \
202
(work)->thread = NULL; \
205
#undef create_singlethread_workqueue
206
#define create_singlethread_workqueue(wq) wrap_create_wq(wq, 1, 0)
207
#undef create_workqueue
208
#define create_workqueue(wq) wrap_create_wq(wq, 0, 0)
209
#undef destroy_workqueue
210
#define destroy_workqueue(wq) wrap_destroy_wq(wq)
212
#define queue_work(wq, work) wrap_queue_work(wq, work)
213
#undef flush_workqueue
214
#define flush_workqueue(wq) wrap_flush_wq(wq)
216
struct workqueue_struct *wrap_create_wq(const char *name, u8 singlethread,
218
void wrap_destroy_wq(struct workqueue_struct *workq);
219
int wrap_queue_work(struct workqueue_struct *workq, struct work_struct *work);
220
void wrap_cancel_work(struct work_struct *work);
221
void wrap_flush_wq(struct workqueue_struct *workq);
225
/* Compatibility for Linux before 2.6.20 where INIT_WORK takes 3 arguments */
226
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) && \
227
!defined(INIT_WORK_NAR) && \
228
!defined(INIT_DELAYED_WORK_DEFERRABLE)
229
typedef void (*compat_work_func_t)(void *work);
230
typedef void (*work_func_t)(struct work_struct *work);
231
static inline void (INIT_WORK)(struct work_struct *work, work_func_t func)
233
INIT_WORK(work, (compat_work_func_t)func, work);
240
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
241
#define ISR_PT_REGS_PARAM_DECL
243
#define ISR_PT_REGS_PARAM_DECL , struct pt_regs *regs
246
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,16)
247
#define for_each_possible_cpu(_cpu) for_each_cpu(_cpu)
250
#ifndef CHECKSUM_PARTIAL
251
#define CHECKSUM_PARTIAL CHECKSUM_HW
255
#define IRQF_SHARED SA_SHIRQ
258
#ifndef UMH_WAIT_PROC
259
#define UMH_WAIT_PROC 1
262
#define memcpy_skb(skb, from, length) \
263
memcpy(skb_put(skb, length), from, length)
265
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
267
#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
272
#define __GFP_DMA32 GFP_DMA
275
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)
276
#define wrap_kmem_cache_create(name, size, align, flags) \
277
kmem_cache_create(name, size, align, flags, NULL, NULL)
279
#define wrap_kmem_cache_create(name, size, align, flags) \
280
kmem_cache_create(name, size, align, flags, NULL)
283
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
284
#define netdev_mc_count(dev) ((dev)->mc_count)
285
#define usb_alloc_coherent(dev, size, mem_flags, dma) (usb_buffer_alloc((dev), (size), (mem_flags), (dma)))
286
#define usb_free_coherent(dev, size, addr, dma) (usb_buffer_free((dev), (size), (addr), (dma)))
289
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
290
#define daemonize(name, ...) do {} while (0)
293
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
294
#define add_taint(flag, lockdep_ok) add_taint(flag)
297
#include "winnt_types.h"
298
#include "ndiswrapper.h"
299
#include "pe_linker.h"
304
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
305
static inline void netif_tx_lock(struct net_device *dev)
307
spin_lock(&dev->xmit_lock);
309
static inline void netif_tx_unlock(struct net_device *dev)
311
spin_unlock(&dev->xmit_lock);
313
static inline void netif_tx_lock_bh(struct net_device *dev)
315
spin_lock_bh(&dev->xmit_lock);
317
static inline void netif_tx_unlock_bh(struct net_device *dev)
319
spin_unlock_bh(&dev->xmit_lock);
323
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
324
static inline void netif_poll_enable(struct net_device *dev)
327
static inline void netif_poll_disable(struct net_device *dev)
332
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
333
#define proc_net_root init_net.proc_net
335
#define proc_net_root proc_net
338
#if ((LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) && \
339
(LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0))) || \
340
(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,42))
341
#ifndef skb_frag_page
342
#define skb_frag_page(frag) ((frag)->page)
346
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
347
#define netdev_notifier_info_to_dev(x) ((struct net_device *)(x))
350
#ifdef INIT_COMPLETION
351
static inline void reinit_completion(struct completion *x)
358
#define TICKSPERSEC 10000000
359
#define TICKSPERMSEC 10000
360
#define SECSPERDAY 86400
361
#define TICKSPERJIFFY ((TICKSPERSEC + HZ - 1) / HZ)
363
#define int_div_round(x, y) (((x) + (y - 1)) / (y))
365
/* 1601 to 1970 is 369 years plus 89 leap days */
366
#define SECS_1601_TO_1970 ((369 * 365 + 89) * (u64)SECSPERDAY)
367
#define TICKS_1601_TO_1970 (SECS_1601_TO_1970 * TICKSPERSEC)
369
/* 100ns units to HZ; if sys_time is negative, relative to current
370
* clock, otherwise from year 1601 */
371
#define SYSTEM_TIME_TO_HZ(sys_time) \
372
(((sys_time) <= 0) ? \
373
int_div_round(((u64)HZ * (-(sys_time))), TICKSPERSEC) : \
374
int_div_round(((s64)HZ * ((sys_time) - ticks_1601())), TICKSPERSEC))
376
#define MSEC_TO_HZ(ms) int_div_round((ms * HZ), 1000)
377
#define USEC_TO_HZ(us) int_div_round((us * HZ), 1000000)
379
extern u64 wrap_ticks_to_boot;
381
static inline u64 ticks_1601(void)
383
return wrap_ticks_to_boot + (u64)jiffies * TICKSPERJIFFY;
386
typedef void (*generic_func)(void);
395
#define WIN_SYMBOL(name, argc) \
396
{#name, (generic_func) win2lin_ ## name ## _ ## argc}
397
#define WIN_WIN_SYMBOL(name, argc) \
398
{#name, (generic_func) win2lin__win_ ## name ## _ ## argc}
399
#define WIN_FUNC_DECL(name, argc) \
400
extern typeof(name) win2lin_ ## name ## _ ## argc;
401
#define WIN_FUNC_PTR(name, argc) win2lin_ ## name ## _ ## argc
405
#define WIN_SYMBOL(name, argc) {#name, (generic_func)name}
406
#define WIN_WIN_SYMBOL(name, argc) {#name, (generic_func)_win_ ## name}
407
#define WIN_FUNC_DECL(name, argc)
408
#define WIN_FUNC_PTR(name, argc) name
412
#define WIN_FUNC(name, argc) (name)
413
/* map name s to f - if f is different from s */
414
#define WIN_SYMBOL_MAP(s, f)
416
#define POOL_TAG(A, B, C, D) \
417
((ULONG)((A) + ((B) << 8) + ((C) << 16) + ((D) << 24)))
420
char name[MAX_DRIVER_NAME_LEN];
421
UINT (*entry)(struct driver_object *, struct unicode_string *) wstdcall;
426
IMAGE_NT_HEADERS *nt_hdr;
427
IMAGE_OPTIONAL_HEADER *opt_hdr;
430
struct ndis_mp_block;
433
struct nt_slist slist;
434
struct timer_list timer;
435
struct nt_timer *nt_timer;
438
unsigned long wrap_timer_magic;
442
struct ntos_work_item {
449
struct wrap_device_setting {
451
char name[MAX_SETTING_NAME_LEN];
452
char value[MAX_SETTING_VALUE_LEN];
456
struct wrap_bin_file {
457
char name[MAX_DRIVER_NAME_LEN];
462
#define WRAP_DRIVER_CLIENT_ID 1
466
struct driver_object *drv_obj;
467
char name[MAX_DRIVER_NAME_LEN];
468
char version[MAX_SETTING_VALUE_LEN];
469
unsigned short num_pe_images;
470
struct pe_image pe_images[MAX_DRIVER_PE_IMAGES];
471
unsigned short num_bin_files;
472
struct wrap_bin_file *bin_files;
473
struct nt_list settings;
475
struct ndis_driver *ndis_driver;
479
HW_INITIALIZED = 1, HW_SUSPENDED, HW_HALTED, HW_DISABLED,
483
/* first part is (de)initialized once by loader */
490
char conf_file_name[MAX_DRIVER_NAME_LEN];
491
char driver_name[MAX_DRIVER_NAME_LEN];
492
struct wrap_driver *driver;
493
struct nt_list settings;
495
/* rest should be (de)initialized when a device is
497
struct cm_resource_list *resource_list;
498
unsigned long hw_status;
499
struct device_object *pdo;
502
struct pci_dev *pdev;
503
enum device_power_state wake_state;
506
struct usb_device *udev;
507
struct usb_interface *intf;
509
struct nt_list wrap_urb_list;
514
#define wrap_is_pci_bus(dev_bus) \
515
(WRAP_BUS(dev_bus) == WRAP_PCI_BUS || \
516
WRAP_BUS(dev_bus) == WRAP_PCMCIA_BUS)
518
/* earlier versions of ndiswrapper used 0 as USB_BUS */
519
#define wrap_is_usb_bus(dev_bus) \
520
(WRAP_BUS(dev_bus) == WRAP_USB_BUS || \
521
WRAP_BUS(dev_bus) == WRAP_INTERNAL_BUS)
523
#define wrap_is_usb_bus(dev_bus) 0
525
#define wrap_is_bluetooth_device(dev_bus) \
526
(WRAP_DEVICE(dev_bus) == WRAP_BLUETOOTH_DEVICE1 || \
527
WRAP_DEVICE(dev_bus) == WRAP_BLUETOOTH_DEVICE2)
529
extern struct workqueue_struct *ntos_wq;
530
extern struct workqueue_struct *ndis_wq;
531
extern struct workqueue_struct *wrapndis_wq;
533
#define atomic_unary_op(var, size, oper) \
536
__asm__ __volatile__( \
537
LOCK_PREFIX oper "b %b0\n\t" : "+m" (var)); \
538
else if (size == 2) \
539
__asm__ __volatile__( \
540
LOCK_PREFIX oper "w %w0\n\t" : "+m" (var)); \
541
else if (size == 4) \
542
__asm__ __volatile__( \
543
LOCK_PREFIX oper "l %0\n\t" : "+m" (var)); \
544
else if (size == 8) \
545
__asm__ __volatile__( \
546
LOCK_PREFIX oper "q %q0\n\t" : "+m" (var)); \
548
extern void _invalid_op_size_(void); \
549
_invalid_op_size_(); \
553
#define atomic_inc_var_size(var, size) atomic_unary_op(var, size, "inc")
555
#define atomic_inc_var(var) atomic_inc_var_size(var, sizeof(var))
557
#define atomic_dec_var_size(var, size) atomic_unary_op(var, size, "dec")
559
#define atomic_dec_var(var) atomic_dec_var_size(var, sizeof(var))
561
#define pre_atomic_add(var, i) \
564
__asm__ __volatile__( \
565
LOCK_PREFIX "xadd %0, %1\n\t" \
566
: "=r"(pre), "+m"(var) \
571
#define post_atomic_add(var, i) (pre_atomic_add(var, i) + i)
573
//#define DEBUG_IRQL 1
576
#define assert_irql(cond) \
578
KIRQL _irql_ = current_irql(); \
580
WARNING("assertion '%s' failed: %d", #cond, _irql_); \
587
#define assert_irql(cond) do { } while (0)
590
/* When preempt is enabled, we should preempt_disable to raise IRQL to
591
* DISPATCH_LEVEL, to be consistent with the semantics. However, using
592
* a mutex instead, so that only ndiswrapper threads run one at a time
593
* on a processor when at DISPATCH_LEVEL seems to be enough. So that
594
* is what we will use until we learn otherwise. If
595
* preempt_(en|dis)able is required for some reason, comment out
596
* following #define. */
598
#define WRAP_PREEMPT 1
600
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_RT)
602
#define WRAP_PREEMPT 1
606
//#undef WRAP_PREEMPT
614
cpumask_t cpus_allowed;
616
struct task_struct *task;
619
DECLARE_PER_CPU(struct irql_info, irql_info);
621
static inline KIRQL raise_irql(KIRQL newirql)
623
struct irql_info *info;
625
assert(newirql == DISPATCH_LEVEL);
626
info = &get_cpu_var(irql_info);
627
if (info->task == current) {
628
assert(info->count > 0);
629
assert(mutex_is_locked(&info->lock));
630
#if defined(CONFIG_SMP) && DEBUG >= 1
631
assert(cpumask_equal(tsk_cpus_allowed(current),
632
cpumask_of(smp_processor_id())));
635
put_cpu_var(irql_info);
636
return DISPATCH_LEVEL;
638
/* TODO: is this enough to pin down to current cpu? */
640
assert(task_cpu(current) == smp_processor_id());
641
cpumask_copy(&info->cpus_allowed, tsk_cpus_allowed(current));
642
set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
644
put_cpu_var(irql_info);
645
mutex_lock(&info->lock);
646
assert(info->count == 0);
647
assert(info->task == NULL);
649
info->task = current;
650
return PASSIVE_LEVEL;
653
static inline void lower_irql(KIRQL oldirql)
655
struct irql_info *info;
657
assert(oldirql <= DISPATCH_LEVEL);
658
info = &get_cpu_var(irql_info);
659
assert(info->task == current);
660
assert(mutex_is_locked(&info->lock));
661
assert(info->count > 0);
662
if (--info->count == 0) {
665
set_cpus_allowed_ptr(current, &info->cpus_allowed);
667
mutex_unlock(&info->lock);
669
put_cpu_var(irql_info);
672
static inline KIRQL current_irql(void)
675
if (in_irq() || irqs_disabled())
677
if (in_atomic() || in_interrupt())
678
EXIT4(return SOFT_IRQL);
679
count = get_cpu_var(irql_info).count;
680
put_cpu_var(irql_info);
682
EXIT6(return DISPATCH_LEVEL);
684
EXIT6(return PASSIVE_LEVEL);
689
static inline KIRQL current_irql(void)
691
if (in_irq() || irqs_disabled())
694
EXIT4(return SOFT_IRQL);
696
EXIT6(return DISPATCH_LEVEL);
698
EXIT6(return PASSIVE_LEVEL);
701
static inline KIRQL raise_irql(KIRQL newirql)
703
KIRQL ret = in_atomic() ? DISPATCH_LEVEL : PASSIVE_LEVEL;
704
assert(newirql == DISPATCH_LEVEL);
705
assert(current_irql() <= DISPATCH_LEVEL);
710
static inline void lower_irql(KIRQL oldirql)
712
assert(current_irql() == DISPATCH_LEVEL);
718
#define irql_gfp() (in_atomic() ? GFP_ATOMIC : GFP_KERNEL)
720
/* Windows spinlocks are of type ULONG_PTR which is not big enough to
721
* store Linux spinlocks; so we implement Windows spinlocks using
722
* ULONG_PTR space with our own functions/macros */
724
/* Windows seems to use 0 for unlocked state of spinlock - if Linux
725
* convention of 1 for unlocked state is used, at least prism54 driver
728
#define NT_SPIN_LOCK_UNLOCKED 0
729
#define NT_SPIN_LOCK_LOCKED 1
731
static inline void nt_spin_lock_init(NT_SPIN_LOCK *lock)
733
*lock = NT_SPIN_LOCK_UNLOCKED;
738
static inline void nt_spin_lock(NT_SPIN_LOCK *lock)
741
unsigned long lockval = xchg(lock, NT_SPIN_LOCK_LOCKED);
743
if (likely(lockval == NT_SPIN_LOCK_UNLOCKED))
745
if (unlikely(lockval > NT_SPIN_LOCK_LOCKED)) {
746
ERROR("bad spinlock: 0x%lx at %p", lockval, lock);
749
/* "rep; nop" doesn't change cx register, it's a "pause" */
750
__asm__ __volatile__("rep; nop");
754
static inline void nt_spin_unlock(NT_SPIN_LOCK *lock)
756
unsigned long lockval = xchg(lock, NT_SPIN_LOCK_UNLOCKED);
758
if (likely(lockval == NT_SPIN_LOCK_LOCKED))
760
WARNING("unlocking unlocked spinlock: 0x%lx at %p", lockval, lock);
765
#define nt_spin_lock(lock) do { } while (0)
767
#define nt_spin_unlock(lock) do { } while (0)
771
/* When kernel would've disabled preempt (e.g., in interrupt
772
* handlers), we need to fake preempt so driver thinks it is running
775
/* raise IRQL to given (higher) IRQL if necessary before locking */
776
static inline KIRQL nt_spin_lock_irql(NT_SPIN_LOCK *lock, KIRQL newirql)
778
KIRQL oldirql = raise_irql(newirql);
783
/* lower IRQL to given (lower) IRQL if necessary after unlocking */
784
static inline void nt_spin_unlock_irql(NT_SPIN_LOCK *lock, KIRQL oldirql)
786
nt_spin_unlock(lock);
790
#define nt_spin_lock_irqsave(lock, flags) \
792
local_irq_save(flags); \
794
nt_spin_lock(lock); \
797
#define nt_spin_unlock_irqrestore(lock, flags) \
799
nt_spin_unlock(lock); \
800
preempt_enable_no_resched(); \
801
local_irq_restore(flags); \
802
preempt_check_resched(); \
805
static inline ULONG SPAN_PAGES(void *ptr, SIZE_T length)
807
return PAGE_ALIGN(((unsigned long)ptr & (PAGE_SIZE - 1)) + length)
813
/* TODO: can these be implemented without using spinlock? */
815
static inline struct nt_slist *PushEntrySList(nt_slist_header *head,
816
struct nt_slist *entry,
819
KIRQL irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);
820
entry->next = head->next;
823
nt_spin_unlock_irql(lock, irql);
824
TRACE4("%p, %p, %p", head, entry, entry->next);
828
static inline struct nt_slist *PopEntrySList(nt_slist_header *head,
831
struct nt_slist *entry;
832
KIRQL irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);
835
head->next = entry->next;
838
nt_spin_unlock_irql(lock, irql);
839
TRACE4("%p, %p", head, entry);
845
#define u64_low_32(x) ((u32)x)
846
#define u64_high_32(x) ((u32)(x >> 32))
848
static inline u64 nt_cmpxchg8b(volatile u64 *ptr, u64 old, u64 new)
852
__asm__ __volatile__(
854
LOCK_PREFIX "cmpxchg8b %0\n"
855
: "+m" (*ptr), "=A" (prev)
856
: "A" (old), "b" (u64_low_32(new)), "c" (u64_high_32(new)));
860
/* slist routines below update slist atomically - no need for
863
static inline struct nt_slist *PushEntrySList(nt_slist_header *head,
864
struct nt_slist *entry,
867
nt_slist_header old, new;
869
old.align = head->align;
870
entry->next = old.next;
872
new.depth = old.depth + 1;
873
} while (nt_cmpxchg8b(&head->align, old.align, new.align) != old.align);
874
TRACE4("%p, %p, %p", head, entry, old.next);
878
static inline struct nt_slist *PopEntrySList(nt_slist_header *head,
881
struct nt_slist *entry;
882
nt_slist_header old, new;
884
old.align = head->align;
888
new.next = entry->next;
889
new.depth = old.depth - 1;
890
} while (nt_cmpxchg8b(&head->align, old.align, new.align) != old.align);
891
TRACE4("%p, %p", head, entry);
897
#define sleep_hz(n) \
899
set_current_state(TASK_INTERRUPTIBLE); \
900
schedule_timeout(n); \
903
int ntoskernel_init(void);
904
void ntoskernel_exit(void);
905
int ntoskernel_init_device(struct wrap_device *wd);
906
void ntoskernel_exit_device(struct wrap_device *wd);
907
void *allocate_object(ULONG size, enum common_object_type type,
908
struct unicode_string *name);
914
static inline int usb_init(void) { return 0; }
915
static inline void usb_exit(void) {}
917
int usb_init_device(struct wrap_device *wd);
918
void usb_exit_device(struct wrap_device *wd);
920
int wrap_procfs_init(void);
921
void wrap_procfs_remove(void);
923
int link_pe_images(struct pe_image *pe_image, unsigned short n);
925
int stricmp(const char *s1, const char *s2);
926
void dump_bytes(const char *name, const u8 *from, int len);
927
struct mdl *allocate_init_mdl(void *virt, ULONG length);
928
void free_mdl(struct mdl *mdl);
929
struct driver_object *find_bus_driver(const char *name);
930
void free_custom_extensions(struct driver_extension *drv_obj_ext);
931
struct nt_thread *get_current_nt_thread(void);
932
u64 ticks_1601(void);
933
int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2);
934
void wrap_init_timer(struct nt_timer *nt_timer, enum timer_type type,
935
struct ndis_mp_block *nmb);
936
BOOLEAN wrap_set_timer(struct nt_timer *nt_timer, unsigned long expires_hz,
937
unsigned long repeat_hz, struct kdpc *kdpc);
939
LONG InterlockedDecrement(LONG volatile *val) wfastcall;
940
LONG InterlockedIncrement(LONG volatile *val) wfastcall;
941
struct nt_list *ExInterlockedInsertHeadList
942
(struct nt_list *head, struct nt_list *entry,
943
NT_SPIN_LOCK *lock) wfastcall;
944
struct nt_list *ExInterlockedInsertTailList
945
(struct nt_list *head, struct nt_list *entry,
946
NT_SPIN_LOCK *lock) wfastcall;
947
struct nt_list *ExInterlockedRemoveHeadList
948
(struct nt_list *head, NT_SPIN_LOCK *lock) wfastcall;
949
NTSTATUS IofCallDriver(struct device_object *dev_obj, struct irp *irp) wfastcall;
950
KIRQL KfRaiseIrql(KIRQL newirql) wfastcall;
951
void KfLowerIrql(KIRQL oldirql) wfastcall;
952
KIRQL KfAcquireSpinLock(NT_SPIN_LOCK *lock) wfastcall;
953
void KfReleaseSpinLock(NT_SPIN_LOCK *lock, KIRQL oldirql) wfastcall;
954
void IofCompleteRequest(struct irp *irp, CHAR prio_boost) wfastcall;
955
void KefReleaseSpinLockFromDpcLevel(NT_SPIN_LOCK *lock) wfastcall;
957
LONG ObfReferenceObject(void *object) wfastcall;
958
void ObfDereferenceObject(void *object) wfastcall;
960
#define ObReferenceObject(object) ObfReferenceObject(object)
961
#define ObDereferenceObject(object) ObfDereferenceObject(object)
963
/* prevent expansion of ExAllocatePoolWithTag macro */
964
void *(ExAllocatePoolWithTag)(enum pool_type pool_type, SIZE_T size,
967
void ExFreePool(void *p) wstdcall;
968
ULONG MmSizeOfMdl(void *base, ULONG length) wstdcall;
969
void __iomem *MmMapIoSpace(PHYSICAL_ADDRESS phys_addr, SIZE_T size,
970
enum memory_caching_type cache) wstdcall;
971
void MmUnmapIoSpace(void __iomem *addr, SIZE_T size) wstdcall;
972
void MmProbeAndLockPages(struct mdl *mdl, KPROCESSOR_MODE access_mode,
973
enum lock_operation operation) wstdcall;
974
void MmUnlockPages(struct mdl *mdl) wstdcall;
975
void KeInitializeEvent(struct nt_event *nt_event,
976
enum event_type type, BOOLEAN state) wstdcall;
977
LONG KeSetEvent(struct nt_event *nt_event, KPRIORITY incr,
978
BOOLEAN wait) wstdcall;
979
LONG KeResetEvent(struct nt_event *nt_event) wstdcall;
980
BOOLEAN queue_kdpc(struct kdpc *kdpc);
981
BOOLEAN dequeue_kdpc(struct kdpc *kdpc);
983
NTSTATUS IoConnectInterrupt(struct kinterrupt **kinterrupt,
984
PKSERVICE_ROUTINE service_routine,
985
void *service_context, NT_SPIN_LOCK *lock,
986
ULONG vector, KIRQL irql, KIRQL synch_irql,
987
enum kinterrupt_mode interrupt_mode,
988
BOOLEAN shareable, KAFFINITY processor_enable_mask,
989
BOOLEAN floating_save) wstdcall;
990
void IoDisconnectInterrupt(struct kinterrupt *interrupt) wstdcall;
991
BOOLEAN KeSynchronizeExecution(struct kinterrupt *interrupt,
992
PKSYNCHRONIZE_ROUTINE synch_routine,
995
NTSTATUS KeWaitForSingleObject(void *object, KWAIT_REASON reason,
996
KPROCESSOR_MODE waitmode, BOOLEAN alertable,
997
LARGE_INTEGER *timeout) wstdcall;
998
void MmBuildMdlForNonPagedPool(struct mdl *mdl) wstdcall;
999
NTSTATUS IoCreateDevice(struct driver_object *driver, ULONG dev_ext_length,
1000
struct unicode_string *dev_name, DEVICE_TYPE dev_type,
1001
ULONG dev_chars, BOOLEAN exclusive,
1002
struct device_object **dev_obj) wstdcall;
1003
NTSTATUS IoCreateSymbolicLink(struct unicode_string *link,
1004
struct unicode_string *dev_name) wstdcall;
1005
void IoDeleteDevice(struct device_object *dev) wstdcall;
1006
void IoDetachDevice(struct device_object *topdev) wstdcall;
1007
struct device_object *IoGetAttachedDevice(struct device_object *dev) wstdcall;
1008
struct device_object *IoGetAttachedDeviceReference
1009
(struct device_object *dev) wstdcall;
1010
NTSTATUS IoAllocateDriverObjectExtension
1011
(struct driver_object *drv_obj, void *client_id, ULONG extlen,
1012
void **ext) wstdcall;
1013
void *IoGetDriverObjectExtension(struct driver_object *drv,
1014
void *client_id) wstdcall;
1015
struct device_object *IoAttachDeviceToDeviceStack
1016
(struct device_object *src, struct device_object *dst) wstdcall;
1017
BOOLEAN IoCancelIrp(struct irp *irp) wstdcall;
1018
struct irp *IoBuildSynchronousFsdRequest
1019
(ULONG major_func, struct device_object *dev_obj, void *buf,
1020
ULONG length, LARGE_INTEGER *offset, struct nt_event *event,
1021
struct io_status_block *status) wstdcall;
1023
NTSTATUS IoPassIrpDown(struct device_object *dev_obj, struct irp *irp) wstdcall;
1024
WIN_FUNC_DECL(IoPassIrpDown,2);
1025
NTSTATUS IoSyncForwardIrp(struct device_object *dev_obj,
1026
struct irp *irp) wstdcall;
1027
NTSTATUS IoAsyncForwardIrp(struct device_object *dev_obj,
1028
struct irp *irp) wstdcall;
1029
NTSTATUS IoInvalidDeviceRequest(struct device_object *dev_obj,
1030
struct irp *irp) wstdcall;
1032
void KeInitializeSpinLock(NT_SPIN_LOCK *lock) wstdcall;
1033
void IoAcquireCancelSpinLock(KIRQL *irql) wstdcall;
1034
void IoReleaseCancelSpinLock(KIRQL irql) wstdcall;
1036
NTSTATUS RtlUnicodeStringToAnsiString
1037
(struct ansi_string *dst, const struct unicode_string *src,
1038
BOOLEAN dup) wstdcall;
1039
NTSTATUS RtlAnsiStringToUnicodeString
1040
(struct unicode_string *dst, const struct ansi_string *src,
1041
BOOLEAN dup) wstdcall;
1042
void RtlInitAnsiString(struct ansi_string *dst, const char *src) wstdcall;
1043
void RtlInitUnicodeString(struct unicode_string *dest,
1044
const wchar_t *src) wstdcall;
1045
void RtlFreeUnicodeString(struct unicode_string *string) wstdcall;
1046
void RtlFreeAnsiString(struct ansi_string *string) wstdcall;
1047
LONG RtlCompareUnicodeString(const struct unicode_string *s1,
1048
const struct unicode_string *s2,
1049
BOOLEAN case_insensitive) wstdcall;
1050
NTSTATUS RtlUpcaseUnicodeString(struct unicode_string *dst,
1051
struct unicode_string *src,
1052
BOOLEAN alloc) wstdcall;
1053
BOOLEAN KeCancelTimer(struct nt_timer *nt_timer) wstdcall;
1054
void KeInitializeDpc(struct kdpc *kdpc, void *func, void *ctx) wstdcall;
1056
extern spinlock_t ntoskernel_lock;
1057
extern spinlock_t irp_cancel_lock;
1058
extern struct nt_list object_list;
1059
extern CCHAR cpu_count;
1060
#ifdef CONFIG_X86_64
1061
extern struct kuser_shared_data kuser_shared_data;
1064
#define IoCompleteRequest(irp, prio) IofCompleteRequest(irp, prio)
1065
#define IoCallDriver(dev, irp) IofCallDriver(dev, irp)
1067
#if defined(IO_DEBUG)
1068
#define DUMP_IRP(_irp) \
1070
struct io_stack_location *_irp_sl; \
1071
_irp_sl = IoGetCurrentIrpStackLocation(_irp); \
1072
IOTRACE("irp: %p, stack size: %d, cl: %d, sl: %p, dev_obj: %p, " \
1073
"mj_fn: %d, minor_fn: %d, nt_urb: %p, event: %p", \
1074
_irp, _irp->stack_count, (_irp)->current_location, \
1075
_irp_sl, _irp_sl->dev_obj, _irp_sl->major_fn, \
1076
_irp_sl->minor_fn, IRP_URB(_irp), \
1077
(_irp)->user_event); \
1080
#define DUMP_IRP(_irp) do { } while (0)
1083
#endif // _NTOSKERNEL_H_