~ubuntu-branches/debian/squeeze/ndiswrapper/squeeze

« back to all changes in this revision

Viewing changes to driver/ntoskernel.h

  • Committer: Bazaar Package Importer
  • Author(s): Julian Andres Klode, Kel Modderman, Julian Andres Klode
  • Date: 2008-07-14 21:56:42 UTC
  • mfrom: (1.2.2 upstream)
  • Revision ID: james.westby@ubuntu.com-20080714215642-e3l2gjhq31gkmdfj
Tags: 1.53-1
[ Kel Modderman ]
* New upstream release. (Closes: #484019)
* Include driver/*.sh in debianized ndiswrapper module tarball.
* The distclean Makefile target no longer exists, use clean instead.
* No longer need to define KSRC in debian/rules/modules, upstream Makefile
  requires only the KBUILD variable.
* Major behaviour change in ndiswrapper-common wrappers for loadndisdriver
  and ndiswrapper utils:
  - check required utils_version attribute of currently installed
    ndiswrapper kernel module, use this version instead of latest version if
    called interactively
  - fallback to detection of latest API version when the information cannot
    be determined from the kernel module (not yet installed)
* Remove dilinger from Uploaders as per his request.
* Remove commented out quilt related code from debian/rules.

[ Julian Andres Klode ]
* debian/control{,.modules.in}:
  - Make me the new maintainer and move Kel to uploaders
  - Add Dm-Upload-Allowed
  - Remove VCS-* fields first, until the new repository has been created
  - Add information to description that pre-compiled modules may be available
  - Update the Standards Version to 3.8.0
* debian/copyright: Follow machine-interpretable copyright
* debian/patches/modules-build.diff: build correctly if only the modules
  target is built or in parallel (LP: #241547) (backported from SVN, r2670)
* Build-Depend on quilt and add README.source

Show diffs side-by-side

added added

removed removed

Lines of Context:
19
19
#include <linux/types.h>
20
20
#include <linux/timer.h>
21
21
#include <linux/time.h>
 
22
#include <linux/module.h>
 
23
#include <linux/kmod.h>
22
24
 
23
25
#include <linux/netdevice.h>
24
26
#include <linux/wireless.h>
29
31
#include <linux/mm.h>
30
32
#include <linux/random.h>
31
33
#include <linux/ctype.h>
 
34
#include <linux/list.h>
 
35
#include <linux/sched.h>
32
36
#include <linux/usb.h>
33
 
 
34
37
#include <linux/spinlock.h>
35
38
#include <asm/mman.h>
36
 
 
37
39
#include <linux/version.h>
38
 
 
39
 
#include "winnt_types.h"
40
 
#include "ndiswrapper.h"
41
 
#include "pe_linker.h"
42
 
 
43
 
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7)
 
40
#include <linux/etherdevice.h>
 
41
#include <net/iw_handler.h>
 
42
#include <linux/ethtool.h>
 
43
#include <linux/if_arp.h>
 
44
#include <linux/rtnetlink.h>
 
45
#include <linux/highmem.h>
 
46
#include <linux/percpu.h>
44
47
#include <linux/kthread.h>
45
 
#endif
46
 
 
47
 
#if !defined(CONFIG_USB) && defined(CONFIG_USB_MODULE)
48
 
#define CONFIG_USB 1
49
 
#endif
50
 
 
51
 
#define addr_offset(driver) (__builtin_return_address(0) - (driver)->entry)
52
 
 
53
 
/* Workqueue / task queue backwards compatibility stuff */
54
 
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41)
55
48
#include <linux/workqueue.h>
 
49
 
 
50
#if !defined(CONFIG_X86) && !defined(CONFIG_X86_64)
 
51
#error "this module is for x86 or x86_64 architectures only"
 
52
#endif
 
53
 
 
54
/* Interrupt backwards compatibility stuff */
 
55
#include <linux/interrupt.h>
 
56
#ifndef IRQ_HANDLED
 
57
#define IRQ_HANDLED
 
58
#define IRQ_NONE
 
59
#define irqreturn_t void
 
60
#endif
 
61
 
56
62
/* pci functions in 2.6 kernels have problems allocating dma buffers,
57
63
 * but seem to work fine with dma functions
58
64
 */
59
 
typedef struct workqueue_struct *workqueue;
60
65
#include <asm/dma-mapping.h>
61
66
 
62
 
#define PCI_DMA_ALLOC_COHERENT(pci_dev,size,dma_handle) \
63
 
        dma_alloc_coherent(&pci_dev->dev,size,dma_handle, \
 
67
#define PCI_DMA_ALLOC_COHERENT(pci_dev,size,dma_handle)                 \
 
68
        dma_alloc_coherent(&pci_dev->dev,size,dma_handle,               \
64
69
                           GFP_KERNEL | __GFP_REPEAT)
65
 
#define PCI_DMA_FREE_COHERENT(pci_dev,size,cpu_addr,dma_handle) \
 
70
#define PCI_DMA_FREE_COHERENT(pci_dev,size,cpu_addr,dma_handle)         \
66
71
        dma_free_coherent(&pci_dev->dev,size,cpu_addr,dma_handle)
67
 
#define PCI_DMA_MAP_SINGLE(pci_dev,addr,size,direction) \
 
72
#define PCI_DMA_MAP_SINGLE(pci_dev,addr,size,direction)         \
68
73
        dma_map_single(&pci_dev->dev,addr,size,direction)
69
 
#define PCI_DMA_UNMAP_SINGLE(pci_dev,dma_handle,size,direction) \
 
74
#define PCI_DMA_UNMAP_SINGLE(pci_dev,dma_handle,size,direction)         \
70
75
        dma_unmap_single(&pci_dev->dev,dma_handle,size,direction)
71
 
#define MAP_SG(pci_dev, sglist, nents, direction) \
 
76
#define MAP_SG(pci_dev, sglist, nents, direction)               \
72
77
        dma_map_sg(&pci_dev->dev, sglist, nents, direction)
73
 
#define UNMAP_SG(pci_dev, sglist, nents, direction) \
 
78
#define UNMAP_SG(pci_dev, sglist, nents, direction)             \
74
79
        dma_unmap_sg(&pci_dev->dev, sglist, nents, direction)
75
 
 
76
 
#else // linux version <= 2.5.41
77
 
 
78
 
#define PCI_DMA_ALLOC_COHERENT(dev,size,dma_handle) \
79
 
        pci_alloc_consistent(dev,size,dma_handle)
80
 
#define PCI_DMA_FREE_COHERENT(dev,size,cpu_addr,dma_handle) \
81
 
        pci_free_consistent(dev,size,cpu_addr,dma_handle)
82
 
#define PCI_DMA_MAP_SINGLE(dev,addr,size,direction) \
83
 
        pci_map_single(dev,addr,size,direction)
84
 
#define PCI_DMA_UNMAP_SINGLE(dev,dma_handle,size,direction) \
85
 
        pci_unmap_single(dev,dma_handle,size,direction)
86
 
#define MAP_SG(dev, sglist, nents, direction) \
87
 
        pci_map_sg(dev, sglist, nents, direction)
88
 
#define UNMAP_SG(dev, sglist, nents, direction) \
89
 
        pci_unmap_sg(dev, sglist, nents, direction)
90
 
#include <linux/tqueue.h>
91
 
#define work_struct tq_struct
92
 
#define INIT_WORK INIT_TQUEUE
93
 
#define DECLARE_WORK(n, f, d) struct tq_struct n = { \
94
 
                list: LIST_HEAD_INIT(n.list),        \
95
 
                sync: 0,                             \
96
 
                routine: f,                          \
97
 
                data: d                              \
98
 
}
99
 
#define schedule_work schedule_task
100
 
#define flush_scheduled_work flush_scheduled_tasks
101
 
typedef task_queue workqueue;
102
 
#include <linux/smp_lock.h>
103
 
 
104
 
/* RedHat kernels #define irqs_disabled this way */
105
 
#ifndef irqs_disabled
106
 
#define irqs_disabled()                \
107
 
({                                     \
108
 
        unsigned long flags;           \
109
 
       __save_flags(flags);            \
110
 
       !(flags & (1<<9));              \
 
80
#define PCI_DMA_MAP_ERROR(dma_addr) dma_mapping_error(dma_addr)
 
81
 
 
82
 
 
83
#if defined(CONFIG_NET_RADIO) && !defined(CONFIG_WIRELESS_EXT)
 
84
#define CONFIG_WIRELESS_EXT
 
85
#endif
 
86
 
 
87
#define prepare_wait_condition(task, var, value)        \
 
88
do {                                                    \
 
89
        var = value;                                    \
 
90
        task = current;                                 \
 
91
        barrier();                                      \
 
92
} while (0)
 
93
 
 
94
/* Wait in wait_state (e.g., TASK_INTERRUPTIBLE) for condition to
 
95
 * become true; timeout is either jiffies (> 0) to wait or 0 to wait
 
96
 * forever.
 
97
 * When timeout == 0, return value is
 
98
 *    > 0 if condition becomes true, or
 
99
 *    < 0 if signal is pending on the thread.
 
100
 * When timeout > 0, return value is
 
101
 *    > 0 if condition becomes true before timeout,
 
102
 *    < 0 if signal is pending on the thread before timeout, or
 
103
 *    0 if timedout (condition may have become true at the same time)
 
104
 */
 
105
 
 
106
#define wait_condition(condition, timeout, wait_state)          \
 
107
({                                                              \
 
108
        long ret = timeout ? timeout : 1;                       \
 
109
        while (1) {                                             \
 
110
                if (signal_pending(current)) {                  \
 
111
                        ret = -ERESTARTSYS;                     \
 
112
                        break;                                  \
 
113
                }                                               \
 
114
                set_current_state(wait_state);                  \
 
115
                if (condition) {                                \
 
116
                        __set_current_state(TASK_RUNNING);      \
 
117
                        break;                                  \
 
118
                }                                               \
 
119
                if (timeout) {                                  \
 
120
                        ret = schedule_timeout(ret);            \
 
121
                        if (!ret)                               \
 
122
                                break;                          \
 
123
                } else                                          \
 
124
                        schedule();                             \
 
125
        }                                                       \
 
126
        ret;                                                    \
111
127
})
112
 
#endif
113
 
 
114
 
#ifndef in_atomic
115
 
#ifdef CONFIG_PREEMPT
116
 
#define in_atomic() ((preempt_get_count() & ~PREEMPT_ACTIVE) != kernel_locked())
117
 
#else
118
 
#define in_atomic() (in_interrupt())
119
 
#endif // CONFIG_PREEMPT
120
 
#endif // in_atomic
121
 
 
122
 
#define __GFP_NOWARN 0
123
 
 
124
 
#endif // LINUX_VERSION_CODE
 
128
 
 
129
#ifdef WRAP_WQ
 
130
 
 
131
struct workqueue_struct;
 
132
 
 
133
struct workqueue_thread {
 
134
        spinlock_t lock;
 
135
        struct task_struct *task;
 
136
        struct completion *completion;
 
137
        char name[16];
 
138
        int pid;
 
139
        /* whether any work_structs pending? <0 implies quit */
 
140
        s8 pending;
 
141
        /* list of work_structs pending */
 
142
        struct list_head work_list;
 
143
};
 
144
 
 
145
typedef struct workqueue_struct {
 
146
        u8 singlethread;
 
147
        u8 qon;
 
148
        int num_cpus;
 
149
        struct workqueue_thread threads[0];
 
150
} workqueue_struct_t;
 
151
 
 
152
typedef struct {
 
153
        struct list_head list;
 
154
        void (*func)(void *data);
 
155
        void *data;
 
156
        /* whether/on which thread scheduled */
 
157
        struct workqueue_thread *thread;
 
158
} work_struct_t;
 
159
 
 
160
#define initialize_work(work, pfunc, pdata)                     \
 
161
        do {                                                    \
 
162
                (work)->func = (pfunc);                         \
 
163
                (work)->data = (pdata);                         \
 
164
                (work)->thread = NULL;                          \
 
165
        } while (0)
 
166
 
 
167
#undef create_singlethread_workqueue
 
168
#define create_singlethread_workqueue(name) wrap_create_wq(name, 1, 0)
 
169
#undef create_workqueue
 
170
#define create_workqueue(name) wrap_create_wq(name, 0, 0)
 
171
#undef destroy_workqueue
 
172
#define destroy_workqueue wrap_destroy_wq
 
173
#undef queue_work
 
174
#define queue_work wrap_queue_work
 
175
#undef flush_workqueue
 
176
#define flush_workqueue wrap_flush_wq
 
177
 
 
178
workqueue_struct_t *wrap_create_wq(const char *name, u8 singlethread, u8 freeze);
 
179
void wrap_destroy_wq_on(workqueue_struct_t *workq, int cpu);
 
180
void wrap_destroy_wq(workqueue_struct_t *workq);
 
181
int wrap_queue_work_on(workqueue_struct_t *workq, work_struct_t *work,
 
182
                       int cpu);
 
183
int wrap_queue_work(workqueue_struct_t *workq, work_struct_t *work);
 
184
void wrap_cancel_work(work_struct_t *work);
 
185
void wrap_flush_wq_on(workqueue_struct_t *workq, int cpu);
 
186
void wrap_flush_wq(workqueue_struct_t *workq);
 
187
typedef void *worker_param_t;
 
188
#define worker_param_data(param, type, member) param
 
189
 
 
190
#else // WRAP_WQ
 
191
 
 
192
typedef struct workqueue_struct workqueue_struct_t;
 
193
typedef struct work_struct work_struct_t;
 
194
 
 
195
#if defined(INIT_WORK_NAR) || defined(INIT_DELAYED_WORK_DEFERRABLE)
 
196
#define initialize_work(work, func, data) INIT_WORK(work, func)
 
197
typedef struct work_struct *worker_param_t;
 
198
#define worker_param_data(param, type, member)  \
 
199
        container_of(param, type, member)
 
200
#else
 
201
#define initialize_work(work, func, data) INIT_WORK(work, func, data)
 
202
typedef void *worker_param_t;
 
203
#define worker_param_data(param, type, member) param
 
204
#endif // INIT_WORK_NAR
 
205
 
 
206
#endif // WRAP_WQ
 
207
 
 
208
struct nt_thread *wrap_worker_init(workqueue_struct_t *wq);
 
209
 
 
210
#ifdef module_param
 
211
#define WRAP_MODULE_PARM_INT(name, perm) module_param(name, int, perm)
 
212
#define WRAP_MODULE_PARM_STRING(name, perm) module_param(name, charp, perm)
 
213
#else
 
214
#define WRAP_MODULE_PARM_INT(name, perm) MODULE_PARM(name, "i")
 
215
#define WRAP_MODULE_PARM_STRING(name, perm) MODULE_PARM(name, "s")
 
216
#endif
 
217
 
 
218
#ifndef LOCK_PREFIX
 
219
#ifdef LOCK
 
220
#define LOCK_PREFIX LOCK
 
221
#else
 
222
#ifdef CONFIG_SMP
 
223
#define LOCK_PREFIX "lock ; "
 
224
#else
 
225
#define LOCK_PREFIX ""
 
226
#endif
 
227
#endif
 
228
#endif
 
229
 
 
230
#ifndef NETDEV_TX_OK
 
231
#define NETDEV_TX_OK 0
 
232
#endif
 
233
 
 
234
#ifndef NETDEV_TX_BUSY
 
235
#define NETDEV_TX_BUSY 1
 
236
#endif
 
237
 
 
238
#ifndef CHECKSUM_HW
 
239
#define CHECKSUM_HW CHECKSUM_PARTIAL
 
240
#endif
125
241
 
126
242
#ifndef offset_in_page
127
243
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
128
244
#endif
129
245
 
130
 
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
131
 
#include <linux/scatterlist.h>
132
 
#else
133
 
#define sg_init_one(sg, addr, len) do {                          \
134
 
                (sg)->page = virt_to_page(addr);                 \
135
 
                (sg)->offset = offset_in_page(addr);             \
136
 
                (sg)->length = len;                              \
137
 
        } while (0)
138
 
#endif // KERNEL_VERSION(2,6,9)
139
 
 
140
 
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,23)
141
 
#define HAVE_ETHTOOL 1
142
 
#endif
143
 
 
144
 
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
145
 
 
146
 
#ifndef preempt_enable
147
 
#define preempt_enable()  do { } while (0)
148
 
#endif
149
 
#ifndef preempt_disable
150
 
#define preempt_disable() do { } while (0)
151
 
#endif
152
 
 
153
 
#ifndef container_of
154
 
#define container_of(ptr, type, member)                                 \
155
 
({                                                                      \
156
 
        const typeof( ((type *)0)->member ) *__mptr = (ptr);            \
157
 
        (type *)( (char *)__mptr - offsetof(type,member) );             \
158
 
})
159
 
#endif
160
 
 
161
 
#ifndef virt_addr_valid
162
 
#define virt_addr_valid(addr) VALID_PAGE(virt_to_page(addr))
163
 
#endif
164
 
 
165
 
#ifndef SET_NETDEV_DEV
166
 
#define SET_NETDEV_DEV(net,pdev) do { } while (0)
167
 
#endif
168
 
 
169
 
#endif // LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
170
 
 
171
 
#ifdef CONFIG_SOFTWARE_SUSPEND2
172
 
#define KTHREAD_RUN(a,b,c) kthread_run(a,b,0,c)
173
 
#else
174
 
#define KTHREAD_RUN(a,b,c) kthread_run(a,b,c)
175
 
#endif
176
 
 
177
 
#ifdef CONFIG_X86_64
178
 
#define LIN2WIN1(func, arg1)                    \
179
 
        lin_to_win1(func, (unsigned long)arg1)
180
 
#define LIN2WIN2(func, arg1, arg2)                                      \
181
 
        lin_to_win2(func, (unsigned long)arg1, (unsigned long)arg2)
182
 
#define LIN2WIN3(func, arg1, arg2, arg3)                                \
183
 
        lin_to_win3(func, (unsigned long)arg1, (unsigned long)arg2,     \
184
 
                    (unsigned long)arg3)
185
 
#define LIN2WIN4(func, arg1, arg2, arg3, arg4)                          \
186
 
        lin_to_win4(func, (unsigned long)arg1, (unsigned long)arg2,     \
187
 
                    (unsigned long)arg3, (unsigned long)arg4)
188
 
#define LIN2WIN5(func, arg1, arg2, arg3, arg4, arg5)                    \
189
 
        lin_to_win5(func, (unsigned long)arg1, (unsigned long)arg2,     \
190
 
                    (unsigned long)arg3, (unsigned long)arg4,           \
191
 
                    (unsigned long)arg5)
192
 
#define LIN2WIN6(func, arg1, arg2, arg3, arg4, arg5, arg6)              \
193
 
        lin_to_win6(func, (unsigned long)arg1, (unsigned long)arg2,     \
194
 
                    (unsigned long)arg3, (unsigned long)arg4,           \
195
 
                    (unsigned long)arg5, (unsigned long)arg6)
196
 
#else
197
 
#define LIN2WIN1(func, arg1) func(arg1)
198
 
#define LIN2WIN2(func, arg1, arg2) func(arg1, arg2)
199
 
#define LIN2WIN3(func, arg1, arg2, arg3) func(arg1, arg2, arg3)
200
 
#define LIN2WIN4(func, arg1, arg2, arg3, arg4) func(arg1, arg2, arg3, arg4)
201
 
#define LIN2WIN5(func, arg1, arg2, arg3, arg4, arg5)    \
202
 
        func(arg1, arg2, arg3, arg4, arg5)
203
 
#define LIN2WIN6(func, arg1, arg2, arg3, arg4, arg5, arg6)      \
204
 
        func(arg1, arg2, arg3, arg4, arg5, arg6)
205
 
#endif
206
 
 
207
 
#ifndef __wait_event_interruptible_timeout
208
 
#define __wait_event_interruptible_timeout(wq, condition, ret)          \
209
 
do {                                                                    \
210
 
        wait_queue_t __wait;                                            \
211
 
        init_waitqueue_entry(&__wait, current);                         \
212
 
                                                                        \
213
 
        add_wait_queue(&wq, &__wait);                                   \
214
 
        for (;;) {                                                      \
215
 
                set_current_state(TASK_INTERRUPTIBLE);                  \
216
 
                if (condition)                                          \
217
 
                        break;                                          \
218
 
                if (!signal_pending(current)) {                         \
219
 
                        ret = schedule_timeout(ret);                    \
220
 
                        if (!ret)                                       \
221
 
                                break;                                  \
222
 
                        continue;                                       \
223
 
                }                                                       \
224
 
                ret = -ERESTARTSYS;                                     \
225
 
                break;                                                  \
226
 
        }                                                               \
227
 
        current->state = TASK_RUNNING;                                  \
228
 
        remove_wait_queue(&wq, &__wait);                                \
229
 
} while (0)
230
 
#endif
231
 
 
232
 
#ifndef wait_event_interruptible_timeout
233
 
#define wait_event_interruptible_timeout(wq, condition, timeout)        \
234
 
({                                                                      \
235
 
        long __ret = timeout;                                           \
236
 
        if (!(condition))                                               \
237
 
                __wait_event_interruptible_timeout(wq, condition, __ret); \
238
 
        __ret;                                                          \
239
 
})
240
 
#endif
241
 
 
242
 
#ifndef __wait_event_timeout
243
 
#define __wait_event_timeout(wq, condition, ret)                        \
244
 
do {                                                                    \
245
 
        wait_queue_t __wait;                                            \
246
 
        init_waitqueue_entry(&__wait, current);                         \
247
 
                                                                        \
248
 
        add_wait_queue(&wq, &__wait);                                   \
249
 
        for (;;) {                                                      \
250
 
                set_current_state(TASK_UNINTERRUPTIBLE);                \
251
 
                if (condition)                                          \
252
 
                        break;                                          \
253
 
                ret = schedule_timeout(ret);                            \
254
 
                if (!ret)                                               \
255
 
                        break;                                          \
256
 
        }                                                               \
257
 
        current->state = TASK_RUNNING;                                  \
258
 
        remove_wait_queue(&wq, &__wait);                                \
259
 
} while (0)
260
 
#endif
261
 
 
262
 
#ifndef wait_event_timeout
263
 
#define wait_event_timeout(wq, condition, timeout)                      \
264
 
({                                                                      \
265
 
        long __ret = timeout;                                           \
266
 
        if (!(condition))                                               \
267
 
                __wait_event_timeout(wq, condition, __ret);             \
268
 
         __ret;                                                         \
269
 
})
270
 
#endif
271
 
 
272
 
/* Interrupt backwards compatibility stuff */
273
 
#include <linux/interrupt.h>
274
 
#ifndef IRQ_HANDLED
275
 
#define IRQ_HANDLED
276
 
#define IRQ_NONE
277
 
#define irqreturn_t void
278
 
#endif
279
 
 
280
 
#ifndef free_netdev
281
 
#define free_netdev kfree
282
 
#endif
283
 
 
284
 
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
285
 
#define NW_MODULE_PARM_INT(name, perm) module_param(name, int, perm)
286
 
#define NW_MODULE_PARM_STRING(name, perm) module_param(name, charp, perm)
287
 
#else
288
 
#define NW_MODULE_PARM_INT(name, perm) MODULE_PARM(name, "i")
289
 
#define NW_MODULE_PARM_STRING(name, perm) MODULE_PARM(name, "s")
290
 
#endif
291
 
 
292
 
/* this ugly hack is to handle RH kernels; I don't know any better,
293
 
 * but this has to be fixed soon */
294
 
#ifndef rt_task
295
 
#define rt_task(p) ((p)->prio < MAX_RT_PRIO)
296
 
#endif
297
 
 
298
 
#define KMALLOC_THRESHOLD 131072
 
246
#ifndef PMSG_SUSPEND
 
247
#ifdef PM_SUSPEND
 
248
/* this is not correct - the value of PM_SUSPEND is different from
 
249
 * PMSG_SUSPEND, but ndiswrapper doesn't care about the value when
 
250
 * suspending */
 
251
#define PMSG_SUSPEND PM_SUSPEND
 
252
#define PSMG_ON PM_ON
 
253
#else
 
254
typedef u32 pm_message_t;
 
255
#define PMSG_SUSPEND 2
 
256
#define PMSG_ON 0
 
257
#endif
 
258
#endif
 
259
 
 
260
#ifndef PCI_D0
 
261
#define PCI_D0 0
 
262
#endif
 
263
 
 
264
#ifndef PCI_D3hot
 
265
#define PCI_D3hot 3
 
266
#endif
 
267
 
 
268
#ifndef PCI_D3cold
 
269
#define PCI_D3cold 3
 
270
#endif
 
271
 
 
272
#ifndef PM_EVENT_SUSPEND
 
273
#define PM_EVENT_SUSPEND 2
 
274
#endif
 
275
 
 
276
#if !defined(HAVE_NETDEV_PRIV)
 
277
#define netdev_priv(dev)  ((dev)->priv)
 
278
#endif
 
279
 
 
280
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18)
 
281
#define ISR_PT_REGS_PARAM_DECL
 
282
#define ISR_PT_REGS_ARG
 
283
#else
 
284
#define ISR_PT_REGS_PARAM_DECL , struct pt_regs *regs
 
285
#define ISR_PT_REGS_ARG , NULL
 
286
#endif
 
287
 
 
288
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,16)
 
289
#define for_each_possible_cpu(_cpu) for_each_cpu(_cpu)
 
290
#endif
 
291
 
 
292
#ifndef flush_icache_range
 
293
#define flush_icache_range(start, end) do { } while (0)
 
294
#endif
 
295
 
 
296
#ifndef CHECKSUM_PARTIAL
 
297
#define CHECKSUM_PARTIAL CHECKSUM_HW
 
298
#endif
 
299
 
 
300
#ifndef IRQF_SHARED
 
301
#define IRQF_SHARED SA_SHIRQ
 
302
#endif
 
303
 
 
304
#define memcpy_skb(skb, from, length)                   \
 
305
        memcpy(skb_put(skb, length), from, length)
 
306
 
 
307
#ifndef DMA_24BIT_MASK
 
308
#define DMA_24BIT_MASK 0x0000000000ffffffULL
 
309
#endif
 
310
 
 
311
#ifndef DMA_30BIT_MASK
 
312
#define DMA_30BIT_MASK 0x000000003fffffffULL
 
313
#endif
 
314
 
 
315
#ifndef DMA_31BIT_MASK
 
316
#define DMA_31BIT_MASK 0x000000007fffffffULL
 
317
#endif
 
318
 
 
319
#ifndef DMA_32BIT_MASK
 
320
#define DMA_32BIT_MASK 0x00000000ffffffffULL
 
321
#endif
 
322
 
 
323
#ifndef __GFP_DMA32
 
324
#define __GFP_DMA32 GFP_DMA
 
325
#endif
 
326
 
 
327
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,22)
 
328
#define wrap_kmem_cache_create(name, size, align, flags)        \
 
329
        kmem_cache_create(name, size, align, flags, NULL, NULL)
 
330
#else
 
331
#define wrap_kmem_cache_create(name, size, align, flags)        \
 
332
        kmem_cache_create(name, size, align, flags, NULL)
 
333
#endif
 
334
 
 
335
#include "winnt_types.h"
 
336
#include "ndiswrapper.h"
 
337
#include "pe_linker.h"
 
338
#include "wrapmem.h"
 
339
#include "lin2win.h"
 
340
#include "loader.h"
 
341
 
 
342
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
 
343
static inline void netif_tx_lock(struct net_device *dev)
 
344
{
 
345
        spin_lock(&dev->xmit_lock);
 
346
}
 
347
static inline void netif_tx_unlock(struct net_device *dev)
 
348
{
 
349
        spin_unlock(&dev->xmit_lock);
 
350
}
 
351
static inline void netif_tx_lock_bh(struct net_device *dev)
 
352
{
 
353
        spin_lock_bh(&dev->xmit_lock);
 
354
}
 
355
static inline void netif_tx_unlock_bh(struct net_device *dev)
 
356
{
 
357
        spin_unlock_bh(&dev->xmit_lock);
 
358
}
 
359
#endif
 
360
 
 
361
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
 
362
static inline void netif_poll_enable(struct net_device *dev)
 
363
{
 
364
}
 
365
static inline void netif_poll_disable(struct net_device *dev)
 
366
{
 
367
}
 
368
#endif
 
369
 
 
370
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
 
371
#define proc_net_root init_net.proc_net
 
372
#else
 
373
#define proc_net_root proc_net
 
374
#endif
299
375
 
300
376
/* TICK is 100ns */
301
 
#define TICKSPERSEC             10000000
302
 
#define SECSPERDAY              86400
 
377
#define TICKSPERSEC             10000000
 
378
#define TICKSPERMSEC            10000
 
379
#define SECSPERDAY              86400
 
380
#define TICKSPERJIFFY           ((TICKSPERSEC + HZ - 1) / HZ)
 
381
 
 
382
#define int_div_round(x, y) (((x) + (y - 1)) / (y))
303
383
 
304
384
/* 1601 to 1970 is 369 years plus 89 leap days */
305
 
#define SECS_1601_TO_1970       ((369 * 365 + 89) * (u64)SECSPERDAY)
306
 
#define TICKS_1601_TO_1970      (SECS_1601_TO_1970 * TICKSPERSEC)
307
 
 
308
 
#define UNIMPL() do {                                                   \
309
 
                printk(KERN_ERR "%s --UNIMPLEMENTED--\n", __FUNCTION__ ); \
310
 
        } while (0)
311
 
 
312
 
typedef void (*WRAP_EXPORT_FUNC)(void);
 
385
#define SECS_1601_TO_1970       ((369 * 365 + 89) * (u64)SECSPERDAY)
 
386
#define TICKS_1601_TO_1970      (SECS_1601_TO_1970 * TICKSPERSEC)
 
387
 
 
388
/* 100ns units to HZ; if sys_time is negative, relative to current
 
389
 * clock, otherwise from year 1601 */
 
390
#define SYSTEM_TIME_TO_HZ(sys_time)                                     \
 
391
        (((sys_time) <= 0) ? \
 
392
         int_div_round(((u64)HZ * (-(sys_time))), TICKSPERSEC) :        \
 
393
         int_div_round(((s64)HZ * ((sys_time) - ticks_1601())), TICKSPERSEC))
 
394
 
 
395
#define MSEC_TO_HZ(ms) int_div_round((ms * HZ), 1000)
 
396
#define USEC_TO_HZ(us) int_div_round((us * HZ), 1000000)
 
397
 
 
398
extern u64 wrap_ticks_to_boot;
 
399
 
 
400
static inline u64 ticks_1601(void)
 
401
{
 
402
        return wrap_ticks_to_boot + (u64)jiffies * TICKSPERJIFFY;
 
403
}
 
404
 
 
405
typedef void (*generic_func)(void);
313
406
 
314
407
struct wrap_export {
315
408
        const char *name;
316
 
        WRAP_EXPORT_FUNC func;
 
409
        generic_func func;
317
410
};
318
411
 
319
412
#ifdef CONFIG_X86_64
320
 
#define WRAP_EXPORT_SYMBOL(f) {#f, (WRAP_EXPORT_FUNC)x86_64_ ## f}
321
 
#define WRAP_EXPORT_WIN_FUNC(f) {#f, (WRAP_EXPORT_FUNC)x86_64__win_ ## f}
322
 
#define WRAP_FUNC_PTR(f) &x86_64_ ## f
 
413
 
 
414
#define WIN_SYMBOL(name, argc)                                  \
 
415
        {#name, (generic_func) win2lin_ ## name ## _ ## argc}
 
416
#define WIN_WIN_SYMBOL(name, argc)                                      \
 
417
        {#name, (generic_func) win2lin__win_ ## name ## _ ## argc}
 
418
#define WIN_FUNC_DECL(name, argc)                       \
 
419
        extern typeof(name) win2lin_ ## name ## _ ## argc;
 
420
#define WIN_FUNC_PTR(name, argc) win2lin_ ## name ## _ ## argc
 
421
 
323
422
#else
324
 
#define WRAP_EXPORT_SYMBOL(f) {#f, (WRAP_EXPORT_FUNC)f}
325
 
#define WRAP_EXPORT_WIN_FUNC(f) {#f, (WRAP_EXPORT_FUNC)_win_ ## f}
326
 
#define WRAP_FUNC_PTR(f) &f
 
423
 
 
424
#define WIN_SYMBOL(name, argc) {#name, (generic_func)name}
 
425
#define WIN_WIN_SYMBOL(name, argc) {#name, (generic_func)_win_ ## name}
 
426
#define WIN_FUNC_DECL(name, argc)
 
427
#define WIN_FUNC_PTR(name, argc) name
 
428
 
327
429
#endif
328
 
/* map name s to function f - if f is different from s */
329
 
#define WRAP_EXPORT_MAP(s,f)
330
 
#define WRAP_EXPORT(x) x
331
 
 
332
 
struct wrap_alloc {
333
 
        struct list_head list;
334
 
        void *ptr;
335
 
};
 
430
 
 
431
#define WIN_FUNC(name, argc) name
 
432
/* map name s to f - if f is different from s */
 
433
#define WIN_SYMBOL_MAP(s, f)
 
434
 
 
435
#define POOL_TAG(A, B, C, D)                                    \
 
436
        ((ULONG)((A) + ((B) << 8) + ((C) << 16) + ((D) << 24)))
336
437
 
337
438
struct pe_image {
338
439
        char name[MAX_DRIVER_NAME_LEN];
339
 
        void *entry;
 
440
        UINT (*entry)(struct driver_object *, struct unicode_string *) wstdcall;
340
441
        void *image;
341
442
        int size;
342
443
        int type;
345
446
        IMAGE_OPTIONAL_HEADER *opt_hdr;
346
447
};
347
448
 
348
 
extern KSPIN_LOCK atomic_lock;
349
 
extern KSPIN_LOCK cancel_lock;
350
 
 
351
 
#define DEBUG_IRQL 1
352
 
 
353
 
#define WRAPPER_TIMER_MAGIC 47697249
354
 
struct wrapper_timer {
355
 
        struct list_head list;
 
449
struct ndis_mp_block;
 
450
 
 
451
struct wrap_timer {
 
452
        struct nt_slist slist;
356
453
        struct timer_list timer;
357
 
#ifdef DEBUG_TIMER
358
 
        unsigned long wrapper_timer_magic;
359
 
#endif
 
454
        struct nt_timer *nt_timer;
360
455
        long repeat;
361
 
        int active;
362
 
        struct ktimer *ktimer;
363
 
        struct kdpc *kdpc;
364
 
        KSPIN_LOCK lock;
365
 
};
366
 
 
367
 
typedef struct mdl ndis_buffer;
368
 
 
369
 
int ntoskernel_init(void);
370
 
void ntoskernel_exit(void);
371
 
STDCALL void *ExAllocatePoolWithTag(enum pool_type pool_type, SIZE_T size,
372
 
                                    ULONG tag);
373
 
STDCALL void ExFreePool(void *p);
374
 
STDCALL ULONG MmSizeOfMdl(void *base, ULONG length);
375
 
STDCALL void KeInitializeEvent(struct kevent *kevent,
376
 
                               enum event_type type, BOOLEAN state);
377
 
STDCALL LONG KeSetEvent(struct kevent *kevent, KPRIORITY incr, BOOLEAN wait);
378
 
STDCALL LONG KeResetEvent(struct kevent *kevent);
379
 
STDCALL NTSTATUS KeWaitForSingleObject(void *object, KWAIT_REASON reason,
380
 
                                        KPROCESSOR_MODE waitmode,
381
 
                                        BOOLEAN alertable,
382
 
                                        LARGE_INTEGER *timeout);
383
 
struct mdl *allocate_init_mdl(void *virt, ULONG length);
384
 
void free_mdl(struct mdl *mdl);
385
 
STDCALL struct mdl *IoAllocateMdl(void *virt, ULONG length, BOOLEAN second_buf,
386
 
                                  BOOLEAN charge_quota, struct irp *irp);
387
 
STDCALL void IoFreeMdl(struct mdl *mdl);
388
 
STDCALL void NdisFreeBuffer(ndis_buffer *buffer);
389
 
ULONGLONG ticks_1601(void);
390
 
 
391
 
STDCALL KIRQL KeGetCurrentIrql(void);
392
 
STDCALL void KeInitializeSpinLock(KSPIN_LOCK *lock);
393
 
STDCALL void KeAcquireSpinLock(KSPIN_LOCK *lock, KIRQL *irql);
394
 
STDCALL void KeReleaseSpinLock(KSPIN_LOCK *lock, KIRQL oldirql);
395
 
STDCALL KIRQL KeAcquireSpinLockRaiseToDpc(KSPIN_LOCK *lock);
396
 
 
397
 
_FASTCALL KIRQL KfRaiseIrql(FASTCALL_DECL_1(KIRQL newirql));
398
 
_FASTCALL void KfLowerIrql(FASTCALL_DECL_1(KIRQL oldirql));
399
 
_FASTCALL KIRQL KfAcquireSpinLock(FASTCALL_DECL_1(KSPIN_LOCK *lock));
400
 
_FASTCALL void
401
 
KfReleaseSpinLock(FASTCALL_DECL_2(KSPIN_LOCK *lock, KIRQL oldirql));
402
 
_FASTCALL void
403
 
IofCompleteRequest(FASTCALL_DECL_2(struct irp *irp, CHAR prio_boost));
404
 
_FASTCALL void
405
 
KefReleaseSpinLockFromDpcLevel(FASTCALL_DECL_1(KSPIN_LOCK *lock));
406
 
STDCALL NTSTATUS RtlUnicodeStringToAnsiString(struct ansi_string *dst,
407
 
                                               struct unicode_string *src,
408
 
                                               BOOLEAN dup);
409
 
STDCALL NTSTATUS RtlAnsiStringToUnicodeString(struct unicode_string *dst,
410
 
                                               struct ansi_string *src,
411
 
                                               BOOLEAN dup);
412
 
STDCALL void RtlInitAnsiString(struct ansi_string *dst, CHAR *src);
413
 
STDCALL void RtlInitString(struct ansi_string *dst, CHAR *src);
414
 
STDCALL void RtlFreeUnicodeString(struct unicode_string *string);
415
 
STDCALL void RtlFreeAnsiString(struct ansi_string *string);
416
 
 
417
 
unsigned long lin_to_win1(void *func, unsigned long);
418
 
unsigned long lin_to_win2(void *func, unsigned long, unsigned long);
419
 
unsigned long lin_to_win3(void *func, unsigned long, unsigned long,
420
 
                          unsigned long);
421
 
unsigned long lin_to_win4(void *func, unsigned long, unsigned long,
422
 
                          unsigned long, unsigned long);
423
 
unsigned long lin_to_win5(void *func, unsigned long, unsigned long,
424
 
                          unsigned long, unsigned long, unsigned long);
425
 
unsigned long lin_to_win6(void *func, unsigned long, unsigned long,
426
 
                          unsigned long, unsigned long, unsigned long,
427
 
                          unsigned long);
428
 
 
429
 
#define MSG(level, fmt, ...)                            \
430
 
        printk(level "ndiswrapper (%s:%d): " fmt "\n",  \
431
 
               __FUNCTION__, __LINE__ , ## __VA_ARGS__)
432
 
#define WARNING(fmt, ...) MSG(KERN_WARNING, fmt, ## __VA_ARGS__)
433
 
#define ERROR(fmt, ...) MSG(KERN_ERR, fmt , ## __VA_ARGS__)
434
 
#define INFO(fmt, ...) MSG(KERN_INFO, fmt , ## __VA_ARGS__)
435
 
 
436
 
static inline KIRQL current_irql(void)
 
456
#ifdef TIMER_DEBUG
 
457
        unsigned long wrap_timer_magic;
 
458
#endif
 
459
};
 
460
 
 
461
struct ntos_work_item {
 
462
        struct nt_list list;
 
463
        void *arg1;
 
464
        void *arg2;
 
465
        NTOS_WORK_FUNC func;
 
466
};
 
467
 
 
468
struct wrap_device_setting {
 
469
        struct nt_list list;
 
470
        char name[MAX_SETTING_NAME_LEN];
 
471
        char value[MAX_SETTING_VALUE_LEN];
 
472
        void *encoded;
 
473
};
 
474
 
 
475
struct wrap_bin_file {
 
476
        char name[MAX_DRIVER_NAME_LEN];
 
477
        size_t size;
 
478
        void *data;
 
479
};
 
480
 
 
481
#define WRAP_DRIVER_CLIENT_ID 1
 
482
 
 
483
struct wrap_driver {
 
484
        struct nt_list list;
 
485
        struct driver_object *drv_obj;
 
486
        char name[MAX_DRIVER_NAME_LEN];
 
487
        char version[MAX_SETTING_VALUE_LEN];
 
488
        unsigned short num_pe_images;
 
489
        struct pe_image pe_images[MAX_DRIVER_PE_IMAGES];
 
490
        unsigned short num_bin_files;
 
491
        struct wrap_bin_file *bin_files;
 
492
        struct nt_list wrap_devices;
 
493
        struct nt_list settings;
 
494
        int dev_type;
 
495
        struct ndis_driver *ndis_driver;
 
496
};
 
497
 
 
498
enum hw_status {
 
499
        HW_INITIALIZED = 1, HW_SUSPENDED, HW_HALTED, HW_PRESENT,
 
500
};
 
501
 
 
502
struct wrap_device {
 
503
        /* first part is (de)initialized once by loader */
 
504
        struct nt_list list;
 
505
        int dev_bus;
 
506
        int vendor;
 
507
        int device;
 
508
        int subvendor;
 
509
        int subdevice;
 
510
        char conf_file_name[MAX_DRIVER_NAME_LEN];
 
511
        char driver_name[MAX_DRIVER_NAME_LEN];
 
512
        struct wrap_driver *driver;
 
513
        struct nt_list settings;
 
514
 
 
515
        /* rest should be (de)initialized when a device is
 
516
         * (un)plugged */
 
517
        struct cm_resource_list *resource_list;
 
518
        unsigned long hw_status;
 
519
        struct device_object *pdo;
 
520
        union {
 
521
                struct {
 
522
                        struct pci_dev *pdev;
 
523
                        enum device_power_state wake_state;
 
524
                } pci;
 
525
                struct {
 
526
                        struct usb_device *udev;
 
527
                        struct usb_interface *intf;
 
528
                        int num_alloc_urbs;
 
529
                        struct nt_list wrap_urb_list;
 
530
                } usb;
 
531
        };
 
532
        union {
 
533
                struct ndis_device *wnd;
 
534
        };
 
535
};
 
536
 
 
537
#define wrap_is_pci_bus(dev_bus)                        \
 
538
        (WRAP_BUS(dev_bus) == WRAP_PCI_BUS ||           \
 
539
         WRAP_BUS(dev_bus) == WRAP_PCMCIA_BUS)
 
540
#ifdef ENABLE_USB
 
541
/* earlier versions of ndiswrapper used 0 as USB_BUS */
 
542
#define wrap_is_usb_bus(dev_bus)                        \
 
543
        (WRAP_BUS(dev_bus) == WRAP_USB_BUS ||           \
 
544
         WRAP_BUS(dev_bus) == WRAP_INTERNAL_BUS)
 
545
#else
 
546
#define wrap_is_usb_bus(dev_bus) 0
 
547
#endif
 
548
#define wrap_is_bluetooth_device(dev_bus)                       \
 
549
        (WRAP_DEVICE(dev_bus) == WRAP_BLUETOOTH_DEVICE1 ||      \
 
550
         WRAP_DEVICE(dev_bus) == WRAP_BLUETOOTH_DEVICE2)
 
551
 
 
552
extern workqueue_struct_t *ntos_wq;
 
553
#define schedule_ntos_work(work_struct) queue_work(ntos_wq, work_struct)
 
554
#define schedule_work(work_struct) queue_work(ntos_wq, work_struct)
 
555
 
 
556
extern workqueue_struct_t *ndis_wq;
 
557
#define schedule_ndis_work(work_struct) queue_work(ndis_wq, work_struct)
 
558
 
 
559
extern workqueue_struct_t *wrapndis_wq;
 
560
#define schedule_wrapndis_work(work_struct) queue_work(wrapndis_wq, work_struct)
 
561
 
 
562
#define atomic_unary_op(var, size, oper)                                \
 
563
do {                                                                    \
 
564
        if (size == 1)                                                  \
 
565
                __asm__ __volatile__(                                   \
 
566
                        LOCK_PREFIX oper "b %b0\n\t" : "+m" (var));     \
 
567
        else if (size == 2)                                             \
 
568
                __asm__ __volatile__(                                   \
 
569
                        LOCK_PREFIX oper "w %w0\n\t" : "+m" (var));     \
 
570
        else if (size == 4)                                             \
 
571
                __asm__ __volatile__(                                   \
 
572
                        LOCK_PREFIX oper "l %0\n\t" : "+m" (var));      \
 
573
        else if (size == 8)                                             \
 
574
                __asm__ __volatile__(                                   \
 
575
                        LOCK_PREFIX oper "q %q0\n\t" : "+m" (var));     \
 
576
        else {                                                          \
 
577
                extern void _invalid_op_size_(void);                    \
 
578
                _invalid_op_size_();                                    \
 
579
        }                                                               \
 
580
} while (0)
 
581
 
 
582
#define atomic_inc_var_size(var, size) atomic_unary_op(var, size, "inc")
 
583
 
 
584
#define atomic_inc_var(var) atomic_inc_var_size(var, sizeof(var))
 
585
 
 
586
#define atomic_dec_var_size(var, size) atomic_unary_op(var, size, "dec")
 
587
 
 
588
#define atomic_dec_var(var) atomic_dec_var_size(var, sizeof(var))
 
589
 
 
590
#define pre_atomic_add(var, i)                                  \
 
591
({                                                              \
 
592
        typeof(var) pre;                                        \
 
593
        __asm__ __volatile__(                                   \
 
594
                LOCK_PREFIX "xadd %0, %1\n\t"                   \
 
595
                : "=r"(pre), "+m"(var)                          \
 
596
                : "0"(i));                                      \
 
597
        pre;                                                    \
 
598
})
 
599
 
 
600
#define post_atomic_add(var, i) (pre_atomic_add(var, i) + i)
 
601
 
 
602
#ifndef in_atomic
 
603
#define in_atomic() in_interrupt()
 
604
#endif
 
605
 
 
606
#ifndef preempt_enable_no_resched
 
607
#define preempt_enable_no_resched() preempt_enable()
 
608
#endif
 
609
 
 
610
//#define DEBUG_IRQL 1
 
611
 
 
612
#ifdef DEBUG_IRQL
 
613
#define assert_irql(cond)                                               \
 
614
do {                                                                    \
 
615
        KIRQL _irql_ = current_irql();                                  \
 
616
        if (!(cond)) {                                                  \
 
617
                WARNING("assertion '%s' failed: %d", #cond, _irql_);    \
 
618
                DBG_BLOCK(4) {                                          \
 
619
                        dump_stack();                                   \
 
620
                }                                                       \
 
621
        }                                                               \
 
622
} while (0)
 
623
#else
 
624
#define assert_irql(cond) do { } while (0)
 
625
#endif
 
626
 
 
627
/* When preempt is enabled, we should preempt_disable to raise IRQL to
 
628
 * DISPATCH_LEVEL, to be consistent with the semantics. However, using
 
629
 * a mutex instead, so that only ndiswrapper threads run one at a time
 
630
 * on a processor when at DISPATCH_LEVEL seems to be enough. So that
 
631
 * is what we will use until we learn otherwise. If
 
632
 * preempt_(en|dis)able is required for some reason, comment out
 
633
 * following #define. */
 
634
 
 
635
#define WRAP_PREEMPT 1
 
636
 
 
637
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PREEMPT_RT)
 
638
#ifndef WRAP_PREEMPT
 
639
#define WRAP_PREEMPT 1
 
640
#endif
 
641
#endif
 
642
 
 
643
//#undef WRAP_PREEMPT
 
644
 
 
645
#ifdef WRAP_PREEMPT
 
646
 
 
647
typedef struct {
 
648
        int count;
 
649
        struct mutex lock;
 
650
#ifdef CONFIG_SMP
 
651
        typeof(current->cpus_allowed) cpus_allowed;
 
652
#endif
 
653
        struct task_struct *task;
 
654
} irql_info_t;
 
655
 
 
656
DECLARE_PER_CPU(irql_info_t, irql_info);
 
657
 
 
658
static inline KIRQL raise_irql(KIRQL newirql)
437
659
{
438
 
        if (in_atomic() || irqs_disabled())
 
660
        irql_info_t *info;
 
661
 
 
662
        assert(newirql == DISPATCH_LEVEL);
 
663
        info = &get_cpu_var(irql_info);
 
664
        if (info->task == current) {
 
665
                assert(info->count > 0);
 
666
                assert(mutex_is_locked(&info->lock));
 
667
#if defined(CONFIG_SMP) && defined(DEBUG)
 
668
                do {
 
669
                        cpumask_t cpumask;
 
670
                        cpumask = cpumask_of_cpu(smp_processor_id());
 
671
                        cpus_xor(cpumask, cpumask, current->cpus_allowed);
 
672
                        assert(cpus_empty(cpumask));
 
673
                } while (0);
 
674
#endif
 
675
                info->count++;
 
676
                put_cpu_var(irql_info);
439
677
                return DISPATCH_LEVEL;
440
 
        else
441
 
                return PASSIVE_LEVEL;
 
678
        }
 
679
        /* TODO: is this enough to pin down to current cpu? */
 
680
#ifdef CONFIG_SMP
 
681
        assert(task_cpu(current) == smp_processor_id());
 
682
        info->cpus_allowed = current->cpus_allowed;
 
683
        current->cpus_allowed = cpumask_of_cpu(smp_processor_id());
 
684
#endif
 
685
        put_cpu_var(irql_info);
 
686
        mutex_lock(&info->lock);
 
687
        assert(info->count == 0);
 
688
        assert(info->task == NULL);
 
689
        info->count = 1;
 
690
        info->task = current;
 
691
        return PASSIVE_LEVEL;
 
692
}
 
693
 
 
694
static inline void lower_irql(KIRQL oldirql)
 
695
{                                                                       
 
696
        irql_info_t *info;
 
697
 
 
698
        assert(oldirql <= DISPATCH_LEVEL);
 
699
        info = &get_cpu_var(irql_info);
 
700
        assert(info->task == current);
 
701
        assert(mutex_is_locked(&info->lock));
 
702
        assert(info->count > 0);
 
703
        if (--info->count == 0) {
 
704
                info->task = NULL;
 
705
#ifdef CONFIG_SMP
 
706
                current->cpus_allowed = info->cpus_allowed;
 
707
#endif
 
708
                mutex_unlock(&info->lock);
 
709
        }
 
710
        put_cpu_var(irql_info);
 
711
}
 
712
 
 
713
static inline KIRQL current_irql(void)
 
714
{
 
715
        int count;
 
716
        if (in_irq() || irqs_disabled())
 
717
                EXIT4(return DIRQL);
 
718
        if (in_atomic() || in_interrupt())
 
719
                EXIT4(return SOFT_IRQL);
 
720
        count = get_cpu_var(irql_info).count;
 
721
        put_cpu_var(irql_info);
 
722
        if (count)
 
723
                EXIT6(return DISPATCH_LEVEL);
 
724
        else
 
725
                EXIT6(return PASSIVE_LEVEL);
 
726
}
 
727
 
 
728
#else
 
729
 
 
730
static inline KIRQL current_irql(void)
 
731
{
 
732
        if (in_irq() || irqs_disabled())
 
733
                EXIT4(return DIRQL);
 
734
        if (in_interrupt())
 
735
                EXIT4(return SOFT_IRQL);
 
736
        if (in_atomic())
 
737
                EXIT6(return DISPATCH_LEVEL);
 
738
        else
 
739
                EXIT6(return PASSIVE_LEVEL);
442
740
}
443
741
 
444
742
static inline KIRQL raise_irql(KIRQL newirql)
445
743
{
446
 
        KIRQL irql = current_irql();
447
 
        if (irql < DISPATCH_LEVEL && newirql == DISPATCH_LEVEL) {
448
 
                local_bh_disable();
449
 
                preempt_disable();
450
 
        }
451
 
        return irql;
 
744
        KIRQL ret = in_atomic() ? DISPATCH_LEVEL : PASSIVE_LEVEL;
 
745
        assert(newirql == DISPATCH_LEVEL);
 
746
        assert(current_irql() <= DISPATCH_LEVEL);
 
747
        preempt_disable();
 
748
        return ret;
452
749
}
453
750
 
454
751
static inline void lower_irql(KIRQL oldirql)
455
752
{
456
 
        KIRQL irql = current_irql();
457
 
        if (oldirql < DISPATCH_LEVEL && irql == DISPATCH_LEVEL) {
458
 
                preempt_enable();
459
 
                local_bh_enable();
460
 
        }
 
753
        assert(current_irql() == DISPATCH_LEVEL);
 
754
        preempt_enable();
461
755
}
462
756
 
 
757
#endif
 
758
 
 
759
#define irql_gfp() (in_atomic() ? GFP_ATOMIC : GFP_KERNEL)
 
760
 
463
761
/* Windows spinlocks are of type ULONG_PTR which is not big enough to
464
762
 * store Linux spinlocks; so we implement Windows spinlocks using
465
763
 * ULONG_PTR space with our own functions/macros */
466
764
 
467
 
/* the reason for value of unlocked spinlock to be 0, instead of 1
468
 
 * (which is what linux spinlocks use), is that some drivers don't
469
 
 * first call to initialize spinlock; in those case, the value of the
470
 
 * lock seems to be 0 (presumably in Windows value of unlocked
471
 
 * spinlock is 0).
472
 
 */
473
 
#define KSPIN_LOCK_UNLOCKED 0
474
 
#define KSPIN_LOCK_LOCKED 1
475
 
 
476
 
#define kspin_lock_init(lock) *(lock) = KSPIN_LOCK_UNLOCKED
 
765
/* Windows seems to use 0 for unlocked state of spinlock - if Linux
 
766
 * convention of 1 for unlocked state is used, at least prism54 driver
 
767
 * crashes */
 
768
 
 
769
#define NT_SPIN_LOCK_UNLOCKED 0
 
770
#define NT_SPIN_LOCK_LOCKED 1
 
771
 
 
772
static inline void  nt_spin_lock_init(NT_SPIN_LOCK *lock)
 
773
{
 
774
        *lock = NT_SPIN_LOCK_UNLOCKED;
 
775
}
477
776
 
478
777
#ifdef CONFIG_SMP
479
778
 
480
 
#ifdef __HAVE_ARCH_CMPXCHG
481
 
 
482
 
#define kspin_lock(lock)                                                \
483
 
        while (cmpxchg(lock, KSPIN_LOCK_UNLOCKED, KSPIN_LOCK_LOCKED) != \
484
 
               KSPIN_LOCK_UNLOCKED)
485
 
 
486
 
#else
487
 
 
488
 
extern spinlock_t spinlock_kspin_lock;
489
 
#define kspin_lock(lock)                                \
490
 
do {                                                    \
491
 
        while (1) {                                     \
492
 
                spin_lock(&spinlock_kspin_lock);        \
493
 
                if (*(lock) == KSPIN_LOCK_UNLOCKED)     \
494
 
                        break;                          \
495
 
                spin_unlock(&spinlock_kspin_lock);      \
496
 
        }                                               \
497
 
        *(lock) = KSPIN_LOCK_LOCKED;                    \
498
 
        spin_unlock(&spinlock_kspin_lock);              \
499
 
} while (0)
500
 
                
501
 
#endif // __HAVE_ARCH_CMPXCHG
502
 
 
503
 
#define kspin_unlock(lock) xchg(lock, KSPIN_LOCK_UNLOCKED)
504
 
 
505
 
#else
506
 
 
507
 
#define kspin_lock(lock) *(lock) = KSPIN_LOCK_LOCKED
508
 
#define kspin_unlock(lock) *(lock) = KSPIN_LOCK_UNLOCKED
 
779
static inline void nt_spin_lock(NT_SPIN_LOCK *lock)
 
780
{
 
781
        __asm__ __volatile__(
 
782
                "1:\t"
 
783
                "  xchgl %1, %0\n\t"
 
784
                "  testl %1, %1\n\t"
 
785
                "  jz 3f\n"
 
786
                "2:\t"
 
787
                "  rep; nop\n\t"
 
788
                "  cmpl %2, %0\n\t"
 
789
                "  je 1b\n\t"
 
790
                "  jmp 2b\n"
 
791
                "3:\n\t"
 
792
                : "+m" (*lock)
 
793
                : "r" (NT_SPIN_LOCK_LOCKED), "i" (NT_SPIN_LOCK_UNLOCKED));
 
794
}
 
795
 
 
796
static inline void nt_spin_unlock(NT_SPIN_LOCK *lock)
 
797
{
 
798
        *lock = NT_SPIN_LOCK_UNLOCKED;
 
799
}
 
800
 
 
801
#else // CONFIG_SMP
 
802
 
 
803
#define nt_spin_lock(lock) do { } while (0)
 
804
 
 
805
#define nt_spin_unlock(lock)  do { } while (0)
509
806
 
510
807
#endif // CONFIG_SMP
511
808
 
512
 
/* raise IRQL to given (higher) IRQL if necessary after locking */
513
 
#define kspin_lock_irql(lock, newirql)                                  \
514
 
({                                                                      \
515
 
        KIRQL _cur_irql_ = current_irql();                              \
516
 
        KSPIN_LOCK _val_ = *(lock);                                     \
517
 
        if (_val_ > KSPIN_LOCK_LOCKED)                                  \
518
 
                ERROR("illegal spinlock: %p(%lu)", lock, _val_);        \
519
 
        if (_cur_irql_ < DISPATCH_LEVEL && newirql == DISPATCH_LEVEL) { \
520
 
                local_bh_disable();                                     \
521
 
                preempt_disable();                                      \
522
 
        }                                                               \
523
 
        kspin_lock(lock);                                               \
524
 
        _cur_irql_;                                                     \
525
 
})
 
809
/* When kernel would've disabled preempt (e.g., in interrupt
 
810
 * handlers), we need to fake preempt so driver thinks it is running
 
811
 * at right IRQL */
 
812
 
 
813
/* raise IRQL to given (higher) IRQL if necessary before locking */
 
814
static inline KIRQL nt_spin_lock_irql(NT_SPIN_LOCK *lock, KIRQL newirql)
 
815
{
 
816
        KIRQL oldirql = raise_irql(newirql);
 
817
        nt_spin_lock(lock);
 
818
        return oldirql;
 
819
}
526
820
 
527
821
/* lower IRQL to given (lower) IRQL if necessary after unlocking */
528
 
#define kspin_unlock_irql(lock, oldirql)                                \
529
 
do {                                                                    \
530
 
        KIRQL _cur_irql_ = current_irql();                              \
531
 
        KSPIN_LOCK _val_ = *(lock);                                     \
532
 
        if (_val_ > KSPIN_LOCK_LOCKED)                                  \
533
 
                ERROR("illegal spinlock: %p(%lu)", lock, _val_);        \
534
 
        kspin_unlock(lock);                                             \
535
 
        if (oldirql < DISPATCH_LEVEL && _cur_irql_ == DISPATCH_LEVEL) { \
536
 
                preempt_enable();                                       \
537
 
                local_bh_enable();                                      \
538
 
        }                                                               \
539
 
} while (0)
 
822
static inline void nt_spin_unlock_irql(NT_SPIN_LOCK *lock, KIRQL oldirql)
 
823
{
 
824
        nt_spin_unlock(lock);
 
825
        lower_irql(oldirql);
 
826
}
540
827
 
541
 
#define kspin_lock_irqsave(lock, flags)                                 \
 
828
#define nt_spin_lock_irqsave(lock, flags)                               \
542
829
do {                                                                    \
543
 
        KSPIN_LOCK _val_ = *(lock);                                     \
544
 
        if (_val_ > KSPIN_LOCK_LOCKED)                                  \
545
 
                ERROR("illegal spinlock: %p(%lu)", lock, _val_);        \
546
830
        local_irq_save(flags);                                          \
547
831
        preempt_disable();                                              \
548
 
        kspin_lock(lock);                                               \
 
832
        nt_spin_lock(lock);                                             \
549
833
} while (0)
550
834
 
551
 
#define kspin_unlock_irqrestore(lock, flags)                            \
 
835
#define nt_spin_unlock_irqrestore(lock, flags)                          \
552
836
do {                                                                    \
553
 
        KSPIN_LOCK _val_ = *(lock);                                     \
554
 
        if (_val_ > KSPIN_LOCK_LOCKED)                                  \
555
 
                ERROR("illegal spinlock: %p(%lu)", lock, _val_);        \
556
 
        kspin_unlock(lock);                                             \
 
837
        nt_spin_unlock(lock);                                           \
 
838
        preempt_enable_no_resched();                                    \
557
839
        local_irq_restore(flags);                                       \
558
 
        preempt_enable();                                               \
559
 
} while (0)
560
 
 
561
 
static inline void wrapper_set_timer_dpc(struct wrapper_timer *wrapper_timer,
562
 
                                         struct kdpc *kdpc)
563
 
{
564
 
        wrapper_timer->kdpc = kdpc;
565
 
}
566
 
 
567
 
static inline void init_dpc(struct kdpc *kdpc, void *func, void *ctx)
568
 
{
569
 
        kdpc->func = func;
570
 
        kdpc->ctx  = ctx;
571
 
}
572
 
 
573
 
static inline ULONG SPAN_PAGES(ULONG_PTR ptr, SIZE_T length)
574
 
{
575
 
        ULONG n;
576
 
 
577
 
        n = (((ULONG_PTR)ptr & (PAGE_SIZE - 1)) +
578
 
             length + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
579
 
 
580
 
        return n;
581
 
}
582
 
 
583
 
/* DEBUG macros */
584
 
 
585
 
#define DBGTRACE(fmt, ...) do { } while (0)
586
 
#define DBGTRACE1(fmt, ...) do { } while (0)
587
 
#define DBGTRACE2(fmt, ...) do { } while (0)
588
 
#define DBGTRACE3(fmt, ...) do { }  while (0)
589
 
#define DBGTRACE4(fmt, ...) do { } while (0)
590
 
#define DBGTRACE5(fmt, ...) do { } while (0)
591
 
 
592
 
/* for a block of code */
593
 
#define DBG_BLOCK() while (0)
594
 
 
595
 
extern int debug;
596
 
 
597
 
#if defined DEBUG
598
 
#undef DBGTRACE
599
 
#define DBGTRACE(level, fmt, ...) do {                                  \
600
 
                if (level <= debug)                                     \
601
 
                        printk(KERN_INFO "%s (%s:%d): " fmt "\n",       \
602
 
                               DRIVER_NAME, __FUNCTION__,               \
603
 
                               __LINE__ , ## __VA_ARGS__);              \
604
 
        } while (0)
605
 
#undef DBG_BLOCK
606
 
#define DBG_BLOCK()
607
 
#endif
608
 
 
609
 
#if defined DEBUG && DEBUG >= 1
610
 
#undef DBGTRACE1
611
 
#define DBGTRACE1(fmt, ...) DBGTRACE(1, fmt , ## __VA_ARGS__)
612
 
#endif
613
 
 
614
 
#if defined DEBUG && DEBUG >= 2
615
 
#undef DBGTRACE2
616
 
#define DBGTRACE2(fmt, ...) DBGTRACE(2, fmt , ## __VA_ARGS__)
617
 
#endif
618
 
 
619
 
#if defined DEBUG && DEBUG >= 3
620
 
#undef DBGTRACE3
621
 
#define DBGTRACE3(fmt, ...) DBGTRACE(3, fmt , ## __VA_ARGS__)
622
 
#endif
623
 
 
624
 
#if defined DEBUG && DEBUG >= 4
625
 
#undef DBGTRACE4
626
 
#define DBGTRACE4(fmt, ...) DBGTRACE(4, fmt , ## __VA_ARGS__)
627
 
#endif
628
 
 
629
 
#if defined DEBUG && DEBUG >= 5
630
 
#undef DBGTRACE5
631
 
#define DBGTRACE5(fmt, ...) DBGTRACE(5, fmt , ## __VA_ARGS__)
632
 
#endif
633
 
 
634
 
#define TRACEENTER(fmt, ...) DBGTRACE("Enter " fmt , ## __VA_ARGS__)
635
 
#define TRACEENTER1(fmt, ...) DBGTRACE1("Enter " fmt , ## __VA_ARGS__)
636
 
#define TRACEENTER2(fmt, ...) DBGTRACE2("Enter " fmt , ## __VA_ARGS__)
637
 
#define TRACEENTER3(fmt, ...) DBGTRACE3("Enter " fmt , ## __VA_ARGS__)
638
 
#define TRACEENTER4(fmt, ...) DBGTRACE4("Enter " fmt , ## __VA_ARGS__)
639
 
#define TRACEENTER5(fmt, ...) DBGTRACE5("Enter " fmt , ## __VA_ARGS__)
640
 
 
641
 
#define TRACEEXIT(stmt) do { DBGTRACE("Exit"); stmt; } while(0)
642
 
#define TRACEEXIT1(stmt) do { DBGTRACE1("Exit"); stmt; } while(0)
643
 
#define TRACEEXIT2(stmt) do { DBGTRACE2("Exit"); stmt; } while(0)
644
 
#define TRACEEXIT3(stmt) do { DBGTRACE3("Exit"); stmt; } while(0)
645
 
#define TRACEEXIT4(stmt) do { DBGTRACE4("Exit"); stmt; } while(0)
646
 
#define TRACEEXIT5(stmt) do { DBGTRACE5("Exit"); stmt; } while(0)
647
 
 
648
 
#ifdef USB_DEBUG
649
 
#define USBTRACE(fmt, ...) DBGTRACE1(fmt, ## __VA_ARGS__)
650
 
#define USBTRACEENTER(fmt, ...) TRACEENTER1(fmt, ## __VA_ARGS__)
651
 
#define USBTRACEEXIT(stmt) TRACEEXIT1(stmt)
652
 
#else
653
 
#define USBTRACE(fmt, ...)
654
 
#define USBTRACEENTER(fmt, ...)
655
 
#define USBTRACEEXIT(stmt) stmt
656
 
#endif
657
 
 
658
 
#if defined DEBUG
659
 
#define ASSERT(expr) do {                                               \
660
 
                if (!(expr)) {                                          \
661
 
                        ERROR("Assertion failed! %s", (#expr));         \
662
 
                }                                                       \
663
 
        } while (0)
664
 
#else
665
 
#define ASSERT(expr)
 
840
        preempt_check_resched();                                        \
 
841
} while (0)
 
842
 
 
843
static inline ULONG SPAN_PAGES(void *ptr, SIZE_T length)
 
844
{
 
845
        return PAGE_ALIGN(((unsigned long)ptr & (PAGE_SIZE - 1)) + length)
 
846
                >> PAGE_SHIFT;
 
847
}
 
848
 
 
849
#ifdef CONFIG_X86_64
 
850
 
 
851
/* TODO: can these be implemented without using spinlock? */
 
852
 
 
853
static inline struct nt_slist *PushEntrySList(nt_slist_header *head,
 
854
                                              struct nt_slist *entry,
 
855
                                              NT_SPIN_LOCK *lock)
 
856
{
 
857
        KIRQL irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);
 
858
        entry->next = head->next;
 
859
        head->next = entry;
 
860
        head->depth++;
 
861
        nt_spin_unlock_irql(lock, irql);
 
862
        TRACE4("%p, %p, %p", head, entry, entry->next);
 
863
        return entry->next;
 
864
}
 
865
 
 
866
static inline struct nt_slist *PopEntrySList(nt_slist_header *head,
 
867
                                             NT_SPIN_LOCK *lock)
 
868
{
 
869
        struct nt_slist *entry;
 
870
        KIRQL irql = nt_spin_lock_irql(lock, DISPATCH_LEVEL);
 
871
        entry = head->next;
 
872
        if (entry) {
 
873
                head->next = entry->next;
 
874
                head->depth--;
 
875
        }
 
876
        nt_spin_unlock_irql(lock, irql);
 
877
        TRACE4("%p, %p", head, entry);
 
878
        return entry;
 
879
}
 
880
 
 
881
#else
 
882
 
 
883
#define u64_low_32(x) ((u32)x)
 
884
#define u64_high_32(x) ((u32)(x >> 32))
 
885
 
 
886
static inline u64 cmpxchg8b(volatile u64 *ptr, u64 old, u64 new)
 
887
{
 
888
        u64 prev;
 
889
 
 
890
        __asm__ __volatile__(
 
891
                "\n"
 
892
                LOCK_PREFIX "cmpxchg8b %0\n"
 
893
                : "+m" (*ptr), "=A" (prev)
 
894
                : "A" (old), "b" (u64_low_32(new)), "c" (u64_high_32(new)));
 
895
        return prev;
 
896
}
 
897
 
 
898
/* slist routines below update slist atomically - no need for
 
899
 * spinlocks */
 
900
 
 
901
static inline struct nt_slist *PushEntrySList(nt_slist_header *head,
 
902
                                              struct nt_slist *entry,
 
903
                                              NT_SPIN_LOCK *lock)
 
904
{
 
905
        nt_slist_header old, new;
 
906
        do {
 
907
                old.align = head->align;
 
908
                entry->next = old.next;
 
909
                new.next = entry;
 
910
                new.depth = old.depth + 1;
 
911
        } while (cmpxchg8b(&head->align, old.align, new.align) != old.align);
 
912
        TRACE4("%p, %p, %p", head, entry, old.next);
 
913
        return old.next;
 
914
}
 
915
 
 
916
static inline struct nt_slist *PopEntrySList(nt_slist_header *head,
 
917
                                             NT_SPIN_LOCK *lock)
 
918
{
 
919
        struct nt_slist *entry;
 
920
        nt_slist_header old, new;
 
921
        do {
 
922
                old.align = head->align;
 
923
                entry = old.next;
 
924
                if (!entry)
 
925
                        break;
 
926
                new.next = entry->next;
 
927
                new.depth = old.depth - 1;
 
928
        } while (cmpxchg8b(&head->align, old.align, new.align) != old.align);
 
929
        TRACE4("%p, %p", head, entry);
 
930
        return entry;
 
931
}
 
932
 
 
933
#endif
 
934
 
 
935
#define sleep_hz(n)                                     \
 
936
do {                                                    \
 
937
        set_current_state(TASK_INTERRUPTIBLE);          \
 
938
        schedule_timeout(n);                            \
 
939
} while (0)
 
940
 
 
941
int ntoskernel_init(void);
 
942
void ntoskernel_exit(void);
 
943
int ntoskernel_init_device(struct wrap_device *wd);
 
944
void ntoskernel_exit_device(struct wrap_device *wd);
 
945
void *allocate_object(ULONG size, enum common_object_type type,
 
946
                      struct unicode_string *name);
 
947
void free_object(void *object);
 
948
 
 
949
int usb_init(void);
 
950
void usb_exit(void);
 
951
int usb_init_device(struct wrap_device *wd);
 
952
void usb_exit_device(struct wrap_device *wd);
 
953
void usb_cancel_pending_urbs(void);
 
954
 
 
955
int crt_init(void);
 
956
void crt_exit(void);
 
957
int rtl_init(void);
 
958
void rtl_exit(void);
 
959
int wrap_procfs_init(void);
 
960
void wrap_procfs_remove(void);
 
961
 
 
962
int link_pe_images(struct pe_image *pe_image, unsigned short n);
 
963
 
 
964
int stricmp(const char *s1, const char *s2);
 
965
void dump_bytes(const char *name, const u8 *from, int len);
 
966
struct mdl *allocate_init_mdl(void *virt, ULONG length);
 
967
void free_mdl(struct mdl *mdl);
 
968
struct driver_object *find_bus_driver(const char *name);
 
969
void free_custom_extensions(struct driver_extension *drv_obj_ext);
 
970
struct nt_thread *get_current_nt_thread(void);
 
971
u64 ticks_1601(void);
 
972
int schedule_ntos_work_item(NTOS_WORK_FUNC func, void *arg1, void *arg2);
 
973
void wrap_init_timer(struct nt_timer *nt_timer, enum timer_type type,
 
974
                     struct ndis_mp_block *nmb);
 
975
BOOLEAN wrap_set_timer(struct nt_timer *nt_timer, unsigned long expires_hz,
 
976
                       unsigned long repeat_hz, struct kdpc *kdpc);
 
977
 
 
978
LONG InterlockedDecrement(LONG volatile *val) wfastcall;
 
979
LONG InterlockedIncrement(LONG volatile *val) wfastcall;
 
980
struct nt_list *ExInterlockedInsertHeadList
 
981
        (struct nt_list *head, struct nt_list *entry,
 
982
         NT_SPIN_LOCK *lock) wfastcall;
 
983
struct nt_list *ExInterlockedInsertTailList
 
984
        (struct nt_list *head, struct nt_list *entry,
 
985
         NT_SPIN_LOCK *lock) wfastcall;
 
986
struct nt_list *ExInterlockedRemoveHeadList
 
987
        (struct nt_list *head, NT_SPIN_LOCK *lock) wfastcall;
 
988
NTSTATUS IofCallDriver(struct device_object *dev_obj, struct irp *irp) wfastcall;
 
989
KIRQL KfRaiseIrql(KIRQL newirql) wfastcall;
 
990
void KfLowerIrql(KIRQL oldirql) wfastcall;
 
991
KIRQL KfAcquireSpinLock(NT_SPIN_LOCK *lock) wfastcall;
 
992
void KfReleaseSpinLock(NT_SPIN_LOCK *lock, KIRQL oldirql) wfastcall;
 
993
void IofCompleteRequest(struct irp *irp, CHAR prio_boost) wfastcall;
 
994
void KefReleaseSpinLockFromDpcLevel(NT_SPIN_LOCK *lock) wfastcall;
 
995
 
 
996
LONG ObfReferenceObject(void *object) wfastcall;
 
997
void ObfDereferenceObject(void *object) wfastcall;
 
998
 
 
999
#define ObReferenceObject(object) ObfReferenceObject(object)
 
1000
#define ObDereferenceObject(object) ObfDereferenceObject(object)
 
1001
 
 
1002
void WRITE_PORT_UCHAR(ULONG_PTR port, UCHAR value) wstdcall;
 
1003
UCHAR READ_PORT_UCHAR(ULONG_PTR port) wstdcall;
 
1004
 
 
1005
#undef ExAllocatePoolWithTag
 
1006
void *ExAllocatePoolWithTag(enum pool_type pool_type, SIZE_T size,
 
1007
                            ULONG tag) wstdcall;
 
1008
#if defined(ALLOC_DEBUG) && ALLOC_DEBUG > 1
 
1009
#define ExAllocatePoolWithTag(pool_type, size, tag)                     \
 
1010
        wrap_ExAllocatePoolWithTag(pool_type, size, tag, __FILE__, __LINE__)
 
1011
#endif
 
1012
 
 
1013
void ExFreePool(void *p) wstdcall;
 
1014
ULONG MmSizeOfMdl(void *base, ULONG length) wstdcall;
 
1015
void __iomem *MmMapIoSpace(PHYSICAL_ADDRESS phys_addr, SIZE_T size,
 
1016
                   enum memory_caching_type cache) wstdcall;
 
1017
void MmUnmapIoSpace(void __iomem *addr, SIZE_T size) wstdcall;
 
1018
void MmProbeAndLockPages(struct mdl *mdl, KPROCESSOR_MODE access_mode,
 
1019
                         enum lock_operation operation) wstdcall;
 
1020
void MmUnlockPages(struct mdl *mdl) wstdcall;
 
1021
void KeInitializeEvent(struct nt_event *nt_event,
 
1022
                       enum event_type type, BOOLEAN state) wstdcall;
 
1023
LONG KeSetEvent(struct nt_event *nt_event, KPRIORITY incr,
 
1024
                BOOLEAN wait) wstdcall;
 
1025
LONG KeResetEvent(struct nt_event *nt_event) wstdcall;
 
1026
void KeClearEvent(struct nt_event *nt_event) wstdcall;
 
1027
void KeInitializeDpc(struct kdpc *kdpc, void *func, void *ctx) wstdcall;
 
1028
BOOLEAN queue_kdpc(struct kdpc *kdpc);
 
1029
BOOLEAN dequeue_kdpc(struct kdpc *kdpc);
 
1030
 
 
1031
void KeFlushQueuedDpcs(void) wstdcall;
 
1032
NTSTATUS IoConnectInterrupt(struct kinterrupt **kinterrupt,
 
1033
                            PKSERVICE_ROUTINE service_routine,
 
1034
                            void *service_context, NT_SPIN_LOCK *lock,
 
1035
                            ULONG vector, KIRQL irql, KIRQL synch_irql,
 
1036
                            enum kinterrupt_mode interrupt_mode,
 
1037
                            BOOLEAN shareable, KAFFINITY processor_enable_mask,
 
1038
                            BOOLEAN floating_save) wstdcall;
 
1039
void IoDisconnectInterrupt(struct kinterrupt *interrupt) wstdcall;
 
1040
BOOLEAN KeSynchronizeExecution(struct kinterrupt *interrupt,
 
1041
                               PKSYNCHRONIZE_ROUTINE synch_routine,
 
1042
                               void *ctx) wstdcall;
 
1043
 
 
1044
NTSTATUS KeWaitForSingleObject(void *object, KWAIT_REASON reason,
 
1045
                               KPROCESSOR_MODE waitmode, BOOLEAN alertable,
 
1046
                               LARGE_INTEGER *timeout) wstdcall;
 
1047
struct mdl *IoAllocateMdl(void *virt, ULONG length, BOOLEAN second_buf,
 
1048
                          BOOLEAN charge_quota, struct irp *irp) wstdcall;
 
1049
void MmBuildMdlForNonPagedPool(struct mdl *mdl) wstdcall;
 
1050
void IoFreeMdl(struct mdl *mdl) wstdcall;
 
1051
NTSTATUS IoCreateDevice(struct driver_object *driver, ULONG dev_ext_length,
 
1052
                        struct unicode_string *dev_name, DEVICE_TYPE dev_type,
 
1053
                        ULONG dev_chars, BOOLEAN exclusive,
 
1054
                        struct device_object **dev_obj) wstdcall;
 
1055
NTSTATUS IoCreateSymbolicLink(struct unicode_string *link,
 
1056
                              struct unicode_string *dev_name) wstdcall;
 
1057
void IoDeleteDevice(struct device_object *dev) wstdcall;
 
1058
void IoDetachDevice(struct device_object *topdev) wstdcall;
 
1059
struct device_object *IoGetAttachedDevice(struct device_object *dev) wstdcall;
 
1060
struct device_object *IoGetAttachedDeviceReference
 
1061
        (struct device_object *dev) wstdcall;
 
1062
NTSTATUS IoAllocateDriverObjectExtension
 
1063
        (struct driver_object *drv_obj, void *client_id, ULONG extlen,
 
1064
         void **ext) wstdcall;
 
1065
void *IoGetDriverObjectExtension(struct driver_object *drv,
 
1066
                                 void *client_id) wstdcall;
 
1067
struct device_object *IoAttachDeviceToDeviceStack
 
1068
        (struct device_object *src, struct device_object *dst) wstdcall;
 
1069
void KeInitializeEvent(struct nt_event *nt_event, enum event_type type,
 
1070
                       BOOLEAN state) wstdcall;
 
1071
struct irp *IoAllocateIrp(char stack_count, BOOLEAN charge_quota) wstdcall;
 
1072
void IoFreeIrp(struct irp *irp) wstdcall;
 
1073
BOOLEAN IoCancelIrp(struct irp *irp) wstdcall;
 
1074
struct irp *IoBuildSynchronousFsdRequest
 
1075
        (ULONG major_func, struct device_object *dev_obj, void *buf,
 
1076
         ULONG length, LARGE_INTEGER *offset, struct nt_event *event,
 
1077
         struct io_status_block *status) wstdcall;
 
1078
struct irp *IoBuildAsynchronousFsdRequest
 
1079
        (ULONG major_func, struct device_object *dev_obj, void *buf,
 
1080
         ULONG length, LARGE_INTEGER *offset,
 
1081
         struct io_status_block *status) wstdcall;
 
1082
NTSTATUS PoCallDriver(struct device_object *dev_obj, struct irp *irp) wstdcall;
 
1083
 
 
1084
NTSTATUS IoPassIrpDown(struct device_object *dev_obj, struct irp *irp) wstdcall;
 
1085
WIN_FUNC_DECL(IoPassIrpDown,2);
 
1086
NTSTATUS IoSyncForwardIrp(struct device_object *dev_obj,
 
1087
                          struct irp *irp) wstdcall;
 
1088
NTSTATUS IoAsyncForwardIrp(struct device_object *dev_obj,
 
1089
                           struct irp *irp) wstdcall;
 
1090
NTSTATUS IoInvalidDeviceRequest(struct device_object *dev_obj,
 
1091
                                struct irp *irp) wstdcall;
 
1092
 
 
1093
KIRQL KeGetCurrentIrql(void) wstdcall;
 
1094
void KeInitializeSpinLock(NT_SPIN_LOCK *lock) wstdcall;
 
1095
void KeAcquireSpinLock(NT_SPIN_LOCK *lock, KIRQL *irql) wstdcall;
 
1096
void KeReleaseSpinLock(NT_SPIN_LOCK *lock, KIRQL oldirql) wstdcall;
 
1097
KIRQL KeAcquireSpinLockRaiseToDpc(NT_SPIN_LOCK *lock) wstdcall;
 
1098
 
 
1099
void IoAcquireCancelSpinLock(KIRQL *irql) wstdcall;
 
1100
void IoReleaseCancelSpinLock(KIRQL irql) wstdcall;
 
1101
 
 
1102
void RtlCopyMemory(void *dst, const void *src, SIZE_T length) wstdcall;
 
1103
NTSTATUS RtlUnicodeStringToAnsiString
 
1104
        (struct ansi_string *dst, const struct unicode_string *src,
 
1105
         BOOLEAN dup) wstdcall;
 
1106
NTSTATUS RtlAnsiStringToUnicodeString
 
1107
        (struct unicode_string *dst, const struct ansi_string *src,
 
1108
         BOOLEAN dup) wstdcall;
 
1109
void RtlInitAnsiString(struct ansi_string *dst, const char *src) wstdcall;
 
1110
void RtlInitString(struct ansi_string *dst, const char *src) wstdcall;
 
1111
void RtlInitUnicodeString(struct unicode_string *dest,
 
1112
                          const wchar_t *src) wstdcall;
 
1113
void RtlFreeUnicodeString(struct unicode_string *string) wstdcall;
 
1114
void RtlFreeAnsiString(struct ansi_string *string) wstdcall;
 
1115
LONG RtlCompareUnicodeString(const struct unicode_string *s1,
 
1116
                             const struct unicode_string *s2,
 
1117
                             BOOLEAN case_insensitive) wstdcall;
 
1118
void RtlCopyUnicodeString(struct unicode_string *dst,
 
1119
                          struct unicode_string *src) wstdcall;
 
1120
NTSTATUS RtlUpcaseUnicodeString(struct unicode_string *dst,
 
1121
                                struct unicode_string *src,
 
1122
                                BOOLEAN alloc) wstdcall;
 
1123
void KeInitializeTimer(struct nt_timer *nt_timer) wstdcall;
 
1124
void KeInitializeTimerEx(struct nt_timer *nt_timer,
 
1125
                         enum timer_type type) wstdcall;
 
1126
BOOLEAN KeSetTimerEx(struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,
 
1127
                     LONG period_ms, struct kdpc *kdpc) wstdcall;
 
1128
BOOLEAN KeSetTimer(struct nt_timer *nt_timer, LARGE_INTEGER duetime_ticks,
 
1129
                   struct kdpc *kdpc) wstdcall;
 
1130
BOOLEAN KeCancelTimer(struct nt_timer *nt_timer) wstdcall;
 
1131
void KeInitializeDpc(struct kdpc *kdpc, void *func, void *ctx) wstdcall;
 
1132
struct nt_thread *KeGetCurrentThread(void) wstdcall;
 
1133
NTSTATUS ObReferenceObjectByHandle(void *handle, ACCESS_MASK desired_access,
 
1134
                                   void *obj_type, KPROCESSOR_MODE access_mode,
 
1135
                                   void **object, void *handle_info) wstdcall;
 
1136
 
 
1137
void adjust_user_shared_data_addr(char *driver, unsigned long length);
 
1138
 
 
1139
extern spinlock_t ntoskernel_lock;
 
1140
extern spinlock_t irp_cancel_lock;
 
1141
extern struct nt_list object_list;
 
1142
#ifdef CONFIG_X86_64
 
1143
extern struct kuser_shared_data kuser_shared_data;
 
1144
#endif
 
1145
 
 
1146
#define IoCompleteRequest(irp, prio) IofCompleteRequest(irp, prio)
 
1147
#define IoCallDriver(dev, irp) IofCallDriver(dev, irp)
 
1148
 
 
1149
#if defined(IO_DEBUG)
 
1150
#define DUMP_IRP(_irp)                                                  \
 
1151
do {                                                                    \
 
1152
        struct io_stack_location *_irp_sl;                              \
 
1153
        _irp_sl = IoGetCurrentIrpStackLocation(_irp);                   \
 
1154
        IOTRACE("irp: %p, stack size: %d, cl: %d, sl: %p, dev_obj: %p, " \
 
1155
                "mj_fn: %d, minor_fn: %d, nt_urb: %p, event: %p",       \
 
1156
                _irp, _irp->stack_count, (_irp)->current_location,      \
 
1157
                _irp_sl, _irp_sl->dev_obj, _irp_sl->major_fn,           \
 
1158
                _irp_sl->minor_fn, IRP_URB(_irp),                       \
 
1159
                (_irp)->user_event);                                    \
 
1160
} while (0)
 
1161
#else
 
1162
#define DUMP_IRP(_irp) do { } while (0)
666
1163
#endif
667
1164
 
668
1165
#endif // _NTOSKERNEL_H_