2
* Copyright (C) 2006 Giridhar Pemmasani
4
* This program is free software; you can redistribute it and/or modify
5
* it under the terms of the GNU General Public License as published by
6
* the Free Software Foundation; either version 2 of the License, or
7
* (at your option) any later version.
9
* This program is distributed in the hope that it will be useful,
10
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
* GNU General Public License for more details.
18
#include "ntoskernel.h"
20
struct slack_alloc_info {
25
static struct nt_list allocs;
26
static struct nt_list slack_allocs;
27
static spinlock_t alloc_lock;
34
static struct nt_list vmem_list;
36
#if defined(ALLOC_DEBUG)
48
static atomic_t alloc_sizes[ALLOC_TYPE_MAX];
51
void wrapmem_info(void)
55
for (type = 0; type < ALLOC_TYPE_MAX; type++)
56
INFO("total size of allocations in %d: %d",
57
type, atomic_read(&alloc_sizes[type]));
61
/* allocate memory and add it to list of allocated pointers; if a
62
* driver doesn't free this memory for any reason (buggy driver or we
63
* allocate space behind driver's back since we need more space than
64
* corresponding Windows structure provides etc.), this gets freed
65
* automatically when module is unloaded
67
void *slack_kmalloc(size_t size)
69
struct slack_alloc_info *info;
72
ENTER4("size = %lu", (unsigned long)size);
74
if (irql_gfp() & GFP_ATOMIC)
78
info = kmalloc(size + sizeof(*info), flags);
82
spin_lock_bh(&alloc_lock);
83
InsertTailList(&slack_allocs, &info->list);
84
spin_unlock_bh(&alloc_lock);
86
atomic_add(size, &alloc_sizes[ALLOC_TYPE_SLACK]);
88
TRACE4("%p, %p", info, info + 1);
89
EXIT4(return info + 1);
92
/* free pointer and remove from list of allocated pointers */
93
void slack_kfree(void *ptr)
95
struct slack_alloc_info *info;
98
info = ptr - sizeof(*info);
99
spin_lock_bh(&alloc_lock);
100
RemoveEntryList(&info->list);
101
spin_unlock_bh(&alloc_lock);
103
atomic_sub(info->size, &alloc_sizes[ALLOC_TYPE_SLACK]);
109
#if defined(ALLOC_DEBUG)
110
void *wrap_kmalloc(size_t size, gfp_t flags, const char *file, int line)
112
struct alloc_info *info;
114
info = kmalloc(size + sizeof(*info), flags);
117
if (flags & GFP_ATOMIC)
118
info->type = ALLOC_TYPE_KMALLOC_ATOMIC;
120
info->type = ALLOC_TYPE_KMALLOC_NON_ATOMIC;
122
atomic_add(size, &alloc_sizes[info->type]);
127
spin_lock_bh(&alloc_lock);
128
InsertTailList(&allocs, &info->list);
129
spin_unlock_bh(&alloc_lock);
131
TRACE4("%p", info + 1);
135
void *wrap_kzalloc(size_t size, gfp_t flags, const char *file, int line)
137
void *ptr = wrap_kmalloc(size, flags, file, line);
139
memset(ptr, 0, size);
143
void wrap_kfree(void *ptr)
145
struct alloc_info *info;
150
info = ptr - sizeof(*info);
151
atomic_sub(info->size, &alloc_sizes[info->type]);
153
spin_lock_bh(&alloc_lock);
154
RemoveEntryList(&info->list);
155
spin_unlock_bh(&alloc_lock);
156
if (!(info->type == ALLOC_TYPE_KMALLOC_ATOMIC ||
157
info->type == ALLOC_TYPE_KMALLOC_NON_ATOMIC))
158
WARNING("invliad type: %d", info->type);
163
void *wrap_vmalloc(unsigned long size, const char *file, int line)
165
struct alloc_info *info;
167
info = vmalloc(size + sizeof(*info));
170
info->type = ALLOC_TYPE_VMALLOC_NON_ATOMIC;
172
atomic_add(size, &alloc_sizes[info->type]);
177
spin_lock_bh(&alloc_lock);
178
InsertTailList(&allocs, &info->list);
179
spin_unlock_bh(&alloc_lock);
184
void *wrap__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
185
const char *file, int line)
187
struct alloc_info *info;
189
info = __vmalloc(size + sizeof(*info), gfp_mask, prot);
192
if (gfp_mask & GFP_ATOMIC)
193
info->type = ALLOC_TYPE_VMALLOC_ATOMIC;
195
info->type = ALLOC_TYPE_VMALLOC_NON_ATOMIC;
197
atomic_add(size, &alloc_sizes[info->type]);
202
spin_lock_bh(&alloc_lock);
203
InsertTailList(&allocs, &info->list);
204
spin_unlock_bh(&alloc_lock);
209
void wrap_vfree(void *ptr)
211
struct alloc_info *info;
213
info = ptr - sizeof(*info);
214
atomic_sub(info->size, &alloc_sizes[info->type]);
216
spin_lock_bh(&alloc_lock);
217
RemoveEntryList(&info->list);
218
spin_unlock_bh(&alloc_lock);
219
if (!(info->type == ALLOC_TYPE_VMALLOC_ATOMIC ||
220
info->type == ALLOC_TYPE_VMALLOC_NON_ATOMIC))
221
WARNING("invliad type: %d", info->type);
226
void *wrap_alloc_pages(gfp_t flags, unsigned int size,
227
const char *file, int line)
229
struct alloc_info *info;
231
size += sizeof(*info);
232
info = (struct alloc_info *)__get_free_pages(flags, get_order(size));
235
info->type = ALLOC_TYPE_PAGES;
237
atomic_add(size, &alloc_sizes[info->type]);
242
spin_lock_bh(&alloc_lock);
243
InsertTailList(&allocs, &info->list);
244
spin_unlock_bh(&alloc_lock);
249
void wrap_free_pages(unsigned long ptr, int order)
251
struct alloc_info *info;
253
info = (void *)ptr - sizeof(*info);
254
atomic_sub(info->size, &alloc_sizes[info->type]);
256
spin_lock_bh(&alloc_lock);
257
RemoveEntryList(&info->list);
258
spin_unlock_bh(&alloc_lock);
259
if (info->type != ALLOC_TYPE_PAGES)
260
WARNING("invliad type: %d", info->type);
262
free_pages((unsigned long)info, get_order(info->size));
266
#undef ExAllocatePoolWithTag
267
void *wrap_ExAllocatePoolWithTag(enum pool_type pool_type, SIZE_T size,
268
ULONG tag, const char *file, int line)
271
struct alloc_info *info;
273
ENTER4("pool_type: %d, size: %lu, tag: %u", pool_type, size, tag);
274
addr = ExAllocatePoolWithTag(pool_type, size, tag);
277
info = addr - sizeof(*info);
285
int alloc_size(enum alloc_type type)
287
if (type >= 0 && type < ALLOC_TYPE_MAX)
288
return atomic_read(&alloc_sizes[type]);
293
#endif // ALLOC_DEBUG
295
int wrapmem_init(void)
297
InitializeListHead(&allocs);
298
InitializeListHead(&slack_allocs);
299
InitializeListHead(&vmem_list);
300
spin_lock_init(&alloc_lock);
304
void wrapmem_exit(void)
306
enum alloc_type type;
309
/* free all pointers on the slack list */
311
struct slack_alloc_info *info;
312
spin_lock_bh(&alloc_lock);
313
ent = RemoveHeadList(&slack_allocs);
314
spin_unlock_bh(&alloc_lock);
317
info = container_of(ent, struct slack_alloc_info, list);
319
atomic_sub(info->size, &alloc_sizes[ALLOC_TYPE_SLACK]);
325
for (type = 0; type < ALLOC_TYPE_MAX; type++) {
326
int n = atomic_read(&alloc_sizes[type]);
328
WARNING("%d bytes of memory in %d leaking", n, type);
333
struct alloc_info *info;
335
spin_lock_bh(&alloc_lock);
336
ent = RemoveHeadList(&allocs);
337
spin_unlock_bh(&alloc_lock);
340
info = container_of(ent, struct alloc_info, list);
341
atomic_sub(info->size, &alloc_sizes[ALLOC_TYPE_SLACK]);
342
WARNING("%p in %d of size %zu allocated at %s(%d) "
343
"with tag 0x%08X leaking; freeing it now",
344
info + 1, info->type, info->size, info->file,
345
info->line, info->tag);
346
if (info->type == ALLOC_TYPE_KMALLOC_ATOMIC ||
347
info->type == ALLOC_TYPE_KMALLOC_NON_ATOMIC)
349
else if (info->type == ALLOC_TYPE_VMALLOC_ATOMIC ||
350
info->type == ALLOC_TYPE_VMALLOC_NON_ATOMIC)
352
else if (info->type == ALLOC_TYPE_PAGES)
353
free_pages((unsigned long)info, get_order(info->size));
355
WARNING("invalid type: %d; not freed", info->type);