2
* Copyright (C) 2006 Giridhar Pemmasani
4
* This program is free software; you can redistribute it and/or modify
5
* it under the terms of the GNU General Public License as published by
6
* the Free Software Foundation; either version 2 of the License, or
7
* (at your option) any later version.
9
* This program is distributed in the hope that it will be useful,
10
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
* GNU General Public License for more details.
18
#include "ntoskernel.h"
20
struct slack_alloc_info {
25
static struct nt_list allocs;
26
static struct nt_list slack_allocs;
27
static NT_SPIN_LOCK alloc_lock;
29
#if defined(ALLOC_DEBUG)
43
static atomic_t alloc_sizes[ALLOC_TYPE_MAX];
46
int wrapmem_init(void)
48
InitializeListHead(&allocs);
49
InitializeListHead(&slack_allocs);
50
nt_spin_lock_init(&alloc_lock);
54
void wrapmem_exit(void)
59
/* free all pointers on the slack list */
60
nt_spin_lock(&alloc_lock);
61
while ((ent = RemoveHeadList(&slack_allocs))) {
62
struct slack_alloc_info *info;
63
info = container_of(ent, struct slack_alloc_info, list);
65
atomic_sub(info->size, &alloc_sizes[ALLOC_TYPE_SLACK]);
69
nt_spin_unlock(&alloc_lock);
72
for (type = 0; type < ALLOC_TYPE_MAX; type++) {
73
int n = atomic_read(&alloc_sizes[type]);
75
WARNING("%d bytes of memory in %d leaking", n, type);
79
nt_spin_lock(&alloc_lock);
80
while ((ent = RemoveHeadList(&allocs))) {
81
struct alloc_info *info;
82
info = container_of(ent, struct alloc_info, list);
83
atomic_sub(info->size, &alloc_sizes[ALLOC_TYPE_SLACK]);
84
WARNING("memory in %d of size %lu allocated at %s(%d) "
88
"leaking; freeing it now", info->type, info->size,
89
info->file, info->line
94
if (info->type == ALLOC_TYPE_ATOMIC ||
95
info->type == ALLOC_TYPE_NON_ATOMIC)
97
else if (info->type == ALLOC_TYPE_VMALLOC)
100
WARNING("invalid type: %d; not freed", info->type);
103
nt_spin_unlock(&alloc_lock);
109
void wrapmem_info(void)
112
enum alloc_type type;
113
for (type = 0; type < ALLOC_TYPE_MAX; type++)
114
INFO("total size of allocations in %d: %d",
115
type, atomic_read(&alloc_sizes[type]));
120
/* allocate memory and add it to list of allocated pointers; if a
121
* driver doesn't free this memory for any reason (buggy driver or we
122
* allocate space behind driver's back since we need more space than
123
* corresponding Windows structure provides etc.), this gets freed
124
* automatically when module is unloaded
126
void *slack_kmalloc(size_t size)
128
struct slack_alloc_info *info;
129
unsigned int flags, n;
132
TRACEENTER4("size = %lu", (unsigned long)size);
134
if (current_irql() < DISPATCH_LEVEL)
138
n = size + sizeof(*info);
139
info = kmalloc(n, flags);
144
nt_spin_lock(&alloc_lock);
145
InsertTailList(&slack_allocs, &info->list);
146
nt_spin_unlock(&alloc_lock);
148
atomic_add(size, &alloc_sizes[ALLOC_TYPE_SLACK]);
150
DBGTRACE4("%p, %p", info, ptr);
151
TRACEEXIT4(return ptr);
154
/* free pointer and remove from list of allocated pointers */
155
void slack_kfree(void *ptr)
157
struct slack_alloc_info *info;
159
TRACEENTER4("%p", ptr);
160
info = ptr - sizeof(*info);
161
nt_spin_lock(&alloc_lock);
162
RemoveEntryList(&info->list);
163
nt_spin_unlock(&alloc_lock);
165
atomic_sub(info->size, &alloc_sizes[ALLOC_TYPE_SLACK]);
171
#if defined(ALLOC_DEBUG)
172
void *wrap_kmalloc(size_t size, unsigned flags, const char *file, int line)
174
struct alloc_info *info;
176
n = size + sizeof(*info);
177
info = kmalloc(n, flags);
180
if (flags & GFP_ATOMIC)
181
info->type = ALLOC_TYPE_ATOMIC;
183
info->type = ALLOC_TYPE_NON_ATOMIC;
185
atomic_add(size, &alloc_sizes[info->type]);
192
nt_spin_lock(&alloc_lock);
193
InsertTailList(&allocs, &info->list);
194
nt_spin_unlock(&alloc_lock);
199
void wrap_kfree(void *ptr)
201
struct alloc_info *info;
202
info = ptr - sizeof(*info);
203
atomic_sub(info->size, &alloc_sizes[info->type]);
205
nt_spin_lock(&alloc_lock);
206
RemoveEntryList(&info->list);
207
nt_spin_unlock(&alloc_lock);
208
if (!(info->type == ALLOC_TYPE_ATOMIC ||
209
info->type == ALLOC_TYPE_NON_ATOMIC))
210
WARNING("invliad type: %d", info->type);
215
void *wrap_vmalloc(unsigned long size, const char *file, int line)
217
struct alloc_info *info;
219
n = size + sizeof(*info);
223
info->type = ALLOC_TYPE_VMALLOC;
225
atomic_add(size, &alloc_sizes[info->type]);
232
nt_spin_lock(&alloc_lock);
233
InsertTailList(&allocs, &info->list);
234
nt_spin_unlock(&alloc_lock);
239
void *wrap__vmalloc(unsigned long size, unsigned int gfp_mask, pgprot_t prot,
240
const char *file, int line)
242
struct alloc_info *info;
244
n = size + sizeof(*info);
245
info = __vmalloc(n, gfp_mask, prot);
248
info->type = ALLOC_TYPE_VMALLOC;
250
atomic_add(size, &alloc_sizes[info->type]);
257
nt_spin_lock(&alloc_lock);
258
InsertTailList(&allocs, &info->list);
259
nt_spin_unlock(&alloc_lock);
264
void wrap_vfree(void *ptr)
266
struct alloc_info *info;
267
info = ptr - sizeof(*info);
268
atomic_sub(info->size, &alloc_sizes[info->type]);
270
nt_spin_lock(&alloc_lock);
271
RemoveEntryList(&info->list);
272
nt_spin_unlock(&alloc_lock);
273
if (info->type != ALLOC_TYPE_VMALLOC)
274
WARNING("invliad type: %d", info->type);
280
void *wrap_ExAllocatePoolWithTag(enum pool_type pool_type, SIZE_T size,
281
ULONG tag, const char *file, int line)
284
no_warn_unused struct alloc_info *info;
286
TRACEENTER4("pool_type: %d, size: %lu, tag: %u", pool_type,
289
if (size <= KMALLOC_THRESHOLD) {
290
if (current_irql() < DISPATCH_LEVEL)
291
addr = wrap_kmalloc(size, GFP_KERNEL, file, line);
293
addr = wrap_kmalloc(size, GFP_ATOMIC, file, line);
295
if (current_irql() < DISPATCH_LEVEL)
296
addr = wrap_vmalloc(size, file, line);
298
addr = wrap__vmalloc(size, GFP_ATOMIC | __GFP_HIGHMEM,
299
PAGE_KERNEL, file, line);
302
info = addr - sizeof(*info);
305
TRACEEXIT4(return addr);
309
int alloc_size(enum alloc_type type)
311
if (type >= 0 && type < ALLOC_TYPE_MAX)
312
return atomic_read(&alloc_sizes[type]);
317
#endif // ALLOC_DEBUG