4
* Copyright (C) 1993 Linus Torvalds
5
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6
* SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
9
#include <linux/config.h>
10
#include <linux/slab.h>
11
#include <linux/vmalloc.h>
12
#include <linux/spinlock.h>
13
#include <linux/highmem.h>
14
#include <linux/smp_lock.h>
16
#include <asm/uaccess.h>
17
#include <asm/pgalloc.h>
19
rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
20
struct vm_struct * vmlist;
22
static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
34
pte = pte_offset(pmd, address);
41
page = ptep_get_and_clear(pte);
46
if (pte_present(page)) {
47
struct page *ptpage = pte_page(page);
48
if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
52
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
53
} while (address < end);
56
static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
68
pmd = pmd_offset(dir, address);
69
address &= ~PGDIR_MASK;
74
free_area_pte(pmd, address, end - address);
75
address = (address + PMD_SIZE) & PMD_MASK;
77
} while (address < end);
80
void vmfree_area_pages(unsigned long address, unsigned long size)
83
unsigned long end = address + size;
85
dir = pgd_offset_k(address);
88
free_area_pmd(dir, address, end - address);
89
address = (address + PGDIR_SIZE) & PGDIR_MASK;
91
} while (address && (address < end));
95
static inline int alloc_area_pte (pte_t * pte, unsigned long address,
96
unsigned long size, int gfp_mask,
97
pgprot_t prot, struct page ***pages)
101
address &= ~PMD_MASK;
102
end = address + size;
109
spin_unlock(&init_mm.page_table_lock);
110
page = alloc_page(gfp_mask);
111
spin_lock(&init_mm.page_table_lock);
116
/* Add a reference to the page so we can free later */
118
atomic_inc(&page->count);
122
printk(KERN_ERR "alloc_area_pte: page already exists\n");
125
set_pte(pte, mk_pte(page, prot));
126
address += PAGE_SIZE;
128
} while (address < end);
132
static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address,
133
unsigned long size, int gfp_mask,
134
pgprot_t prot, struct page ***pages)
138
address &= ~PGDIR_MASK;
139
end = address + size;
140
if (end > PGDIR_SIZE)
143
pte_t * pte = pte_alloc(&init_mm, pmd, address);
146
if (alloc_area_pte(pte, address, end - address,
147
gfp_mask, prot, pages))
149
address = (address + PMD_SIZE) & PMD_MASK;
151
} while (address < end);
155
static inline int __vmalloc_area_pages (unsigned long address,
159
struct page ***pages)
162
unsigned long end = address + size;
165
dir = pgd_offset_k(address);
166
spin_lock(&init_mm.page_table_lock);
170
pmd = pmd_alloc(&init_mm, dir, address);
176
if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot, pages))
179
address = (address + PGDIR_SIZE) & PGDIR_MASK;
183
} while (address && (address < end));
184
spin_unlock(&init_mm.page_table_lock);
189
int vmalloc_area_pages(unsigned long address, unsigned long size,
190
int gfp_mask, pgprot_t prot)
192
return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL);
195
struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
197
unsigned long addr, next;
198
struct vm_struct **p, *tmp, *area;
200
area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
210
addr = VMALLOC_START;
211
write_lock(&vmlist_lock);
212
for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
213
if ((size + addr) < addr)
215
if (size + addr <= (unsigned long) tmp->addr)
217
next = tmp->size + (unsigned long) tmp->addr;
220
if (addr > VMALLOC_END-size)
224
area->addr = (void *)addr;
228
write_unlock(&vmlist_lock);
232
write_unlock(&vmlist_lock);
237
void vfree(void * addr)
239
struct vm_struct **p, *tmp;
243
if ((PAGE_SIZE-1) & (unsigned long) addr) {
244
printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
247
write_lock(&vmlist_lock);
248
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
249
if (tmp->addr == addr) {
251
#ifdef CONFIG_XENO_PRIV
252
if (tmp->flags & VM_IOREMAP)
253
zap_page_range(&init_mm, VMALLOC_VMADDR(tmp->addr), tmp->size);
256
vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
257
write_unlock(&vmlist_lock);
262
write_unlock(&vmlist_lock);
263
printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
266
void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
269
struct vm_struct *area;
271
size = PAGE_ALIGN(size);
272
if (!size || (size >> PAGE_SHIFT) > num_physpages)
274
area = get_vm_area(size, VM_ALLOC);
278
if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask,
286
void * vmap(struct page **pages, int count,
287
unsigned long flags, pgprot_t prot)
290
struct vm_struct *area;
291
unsigned long size = count << PAGE_SHIFT;
293
if (!size || size > (max_mapnr << PAGE_SHIFT))
295
area = get_vm_area(size, flags);
300
if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0,
308
long vread(char *buf, char *addr, unsigned long count)
310
struct vm_struct *tmp;
311
char *vaddr, *buf_start = buf;
314
/* Don't allow overflow */
315
if ((unsigned long) addr + count < count)
316
count = -(unsigned long) addr;
318
read_lock(&vmlist_lock);
319
for (tmp = vmlist; tmp; tmp = tmp->next) {
320
vaddr = (char *) tmp->addr;
321
if (addr >= vaddr + tmp->size - PAGE_SIZE)
323
while (addr < vaddr) {
331
n = vaddr + tmp->size - PAGE_SIZE - addr;
342
read_unlock(&vmlist_lock);
343
return buf - buf_start;
346
long vwrite(char *buf, char *addr, unsigned long count)
348
struct vm_struct *tmp;
349
char *vaddr, *buf_start = buf;
352
/* Don't allow overflow */
353
if ((unsigned long) addr + count < count)
354
count = -(unsigned long) addr;
356
read_lock(&vmlist_lock);
357
for (tmp = vmlist; tmp; tmp = tmp->next) {
358
vaddr = (char *) tmp->addr;
359
if (addr >= vaddr + tmp->size - PAGE_SIZE)
361
while (addr < vaddr) {
368
n = vaddr + tmp->size - PAGE_SIZE - addr;
379
read_unlock(&vmlist_lock);
380
return buf - buf_start;