~ubuntu-branches/ubuntu/utopic/xen/utopic

« back to all changes in this revision

Viewing changes to xenolinux-2.4.25-sparse/mm/vmalloc.c

  • Committer: Bazaar Package Importer
  • Author(s): Bastian Blank
  • Date: 2010-05-06 15:47:38 UTC
  • mto: (1.3.1) (15.1.1 sid) (4.1.1 experimental)
  • mto: This revision was merged to the branch mainline in revision 3.
  • Revision ID: james.westby@ubuntu.com-20100506154738-agoz0rlafrh1fnq7
Tags: upstream-4.0.0
ImportĀ upstreamĀ versionĀ 4.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
1
 
/*
2
 
 *  linux/mm/vmalloc.c
3
 
 *
4
 
 *  Copyright (C) 1993  Linus Torvalds
5
 
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6
 
 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
7
 
 */
8
 
 
9
 
#include <linux/config.h>
10
 
#include <linux/slab.h>
11
 
#include <linux/vmalloc.h>
12
 
#include <linux/spinlock.h>
13
 
#include <linux/highmem.h>
14
 
#include <linux/smp_lock.h>
15
 
 
16
 
#include <asm/uaccess.h>
17
 
#include <asm/pgalloc.h>
18
 
 
19
 
rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
20
 
struct vm_struct * vmlist;
21
 
 
22
 
static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
23
 
{
24
 
        pte_t * pte;
25
 
        unsigned long end;
26
 
 
27
 
        if (pmd_none(*pmd))
28
 
                return;
29
 
        if (pmd_bad(*pmd)) {
30
 
                pmd_ERROR(*pmd);
31
 
                pmd_clear(pmd);
32
 
                return;
33
 
        }
34
 
        pte = pte_offset(pmd, address);
35
 
        address &= ~PMD_MASK;
36
 
        end = address + size;
37
 
        if (end > PMD_SIZE)
38
 
                end = PMD_SIZE;
39
 
        do {
40
 
                pte_t page;
41
 
                page = ptep_get_and_clear(pte);
42
 
                address += PAGE_SIZE;
43
 
                pte++;
44
 
                if (pte_none(page))
45
 
                        continue;
46
 
                if (pte_present(page)) {
47
 
                        struct page *ptpage = pte_page(page);
48
 
                        if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
49
 
                                __free_page(ptpage);
50
 
                        continue;
51
 
                }
52
 
                printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
53
 
        } while (address < end);
54
 
}
55
 
 
56
 
static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
57
 
{
58
 
        pmd_t * pmd;
59
 
        unsigned long end;
60
 
 
61
 
        if (pgd_none(*dir))
62
 
                return;
63
 
        if (pgd_bad(*dir)) {
64
 
                pgd_ERROR(*dir);
65
 
                pgd_clear(dir);
66
 
                return;
67
 
        }
68
 
        pmd = pmd_offset(dir, address);
69
 
        address &= ~PGDIR_MASK;
70
 
        end = address + size;
71
 
        if (end > PGDIR_SIZE)
72
 
                end = PGDIR_SIZE;
73
 
        do {
74
 
                free_area_pte(pmd, address, end - address);
75
 
                address = (address + PMD_SIZE) & PMD_MASK;
76
 
                pmd++;
77
 
        } while (address < end);
78
 
}
79
 
 
80
 
void vmfree_area_pages(unsigned long address, unsigned long size)
81
 
{
82
 
        pgd_t * dir;
83
 
        unsigned long end = address + size;
84
 
 
85
 
        dir = pgd_offset_k(address);
86
 
        flush_cache_all();
87
 
        do {
88
 
                free_area_pmd(dir, address, end - address);
89
 
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
90
 
                dir++;
91
 
        } while (address && (address < end));
92
 
        flush_tlb_all();
93
 
}
94
 
 
95
 
static inline int alloc_area_pte (pte_t * pte, unsigned long address,
96
 
                        unsigned long size, int gfp_mask,
97
 
                        pgprot_t prot, struct page ***pages)
98
 
{
99
 
        unsigned long end;
100
 
 
101
 
        address &= ~PMD_MASK;
102
 
        end = address + size;
103
 
        if (end > PMD_SIZE)
104
 
                end = PMD_SIZE;
105
 
        do {
106
 
                struct page * page;
107
 
 
108
 
                if (!pages) {
109
 
                        spin_unlock(&init_mm.page_table_lock);
110
 
                        page = alloc_page(gfp_mask);
111
 
                        spin_lock(&init_mm.page_table_lock);
112
 
                } else {
113
 
                        page = (**pages);
114
 
                        (*pages)++;
115
 
 
116
 
                        /* Add a reference to the page so we can free later */
117
 
                        if (page)
118
 
                                atomic_inc(&page->count);
119
 
 
120
 
                }
121
 
                if (!pte_none(*pte))
122
 
                        printk(KERN_ERR "alloc_area_pte: page already exists\n");
123
 
                if (!page)
124
 
                        return -ENOMEM;
125
 
                set_pte(pte, mk_pte(page, prot));
126
 
                address += PAGE_SIZE;
127
 
                pte++;
128
 
        } while (address < end);
129
 
        return 0;
130
 
}
131
 
 
132
 
static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address,
133
 
                        unsigned long size, int gfp_mask,
134
 
                        pgprot_t prot, struct page ***pages)
135
 
{
136
 
        unsigned long end;
137
 
 
138
 
        address &= ~PGDIR_MASK;
139
 
        end = address + size;
140
 
        if (end > PGDIR_SIZE)
141
 
                end = PGDIR_SIZE;
142
 
        do {
143
 
                pte_t * pte = pte_alloc(&init_mm, pmd, address);
144
 
                if (!pte)
145
 
                        return -ENOMEM;
146
 
                if (alloc_area_pte(pte, address, end - address,
147
 
                                        gfp_mask, prot, pages))
148
 
                        return -ENOMEM;
149
 
                address = (address + PMD_SIZE) & PMD_MASK;
150
 
                pmd++;
151
 
        } while (address < end);
152
 
        return 0;
153
 
}
154
 
 
155
 
static inline int __vmalloc_area_pages (unsigned long address,
156
 
                                        unsigned long size,
157
 
                                        int gfp_mask,
158
 
                                        pgprot_t prot,
159
 
                                        struct page ***pages)
160
 
{
161
 
        pgd_t * dir;
162
 
        unsigned long end = address + size;
163
 
        int ret;
164
 
 
165
 
        dir = pgd_offset_k(address);
166
 
        spin_lock(&init_mm.page_table_lock);
167
 
        do {
168
 
                pmd_t *pmd;
169
 
                
170
 
                pmd = pmd_alloc(&init_mm, dir, address);
171
 
                ret = -ENOMEM;
172
 
                if (!pmd)
173
 
                        break;
174
 
 
175
 
                ret = -ENOMEM;
176
 
                if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot, pages))
177
 
                        break;
178
 
 
179
 
                address = (address + PGDIR_SIZE) & PGDIR_MASK;
180
 
                dir++;
181
 
 
182
 
                ret = 0;
183
 
        } while (address && (address < end));
184
 
        spin_unlock(&init_mm.page_table_lock);
185
 
        flush_cache_all();
186
 
        return ret;
187
 
}
188
 
 
189
 
int vmalloc_area_pages(unsigned long address, unsigned long size,
190
 
                       int gfp_mask, pgprot_t prot)
191
 
{
192
 
        return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL);
193
 
}
194
 
 
195
 
struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
196
 
{
197
 
        unsigned long addr, next;
198
 
        struct vm_struct **p, *tmp, *area;
199
 
 
200
 
        area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
201
 
        if (!area)
202
 
                return NULL;
203
 
 
204
 
        size += PAGE_SIZE;
205
 
        if (!size) {
206
 
                kfree (area);
207
 
                return NULL;
208
 
        }
209
 
 
210
 
        addr = VMALLOC_START;
211
 
        write_lock(&vmlist_lock);
212
 
        for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
213
 
                if ((size + addr) < addr)
214
 
                        goto out;
215
 
                if (size + addr <= (unsigned long) tmp->addr)
216
 
                        break;
217
 
                next = tmp->size + (unsigned long) tmp->addr;
218
 
                if (next > addr) 
219
 
                        addr = next;
220
 
                if (addr > VMALLOC_END-size)
221
 
                        goto out;
222
 
        }
223
 
        area->flags = flags;
224
 
        area->addr = (void *)addr;
225
 
        area->size = size;
226
 
        area->next = *p;
227
 
        *p = area;
228
 
        write_unlock(&vmlist_lock);
229
 
        return area;
230
 
 
231
 
out:
232
 
        write_unlock(&vmlist_lock);
233
 
        kfree(area);
234
 
        return NULL;
235
 
}
236
 
 
237
 
void vfree(void * addr)
238
 
{
239
 
        struct vm_struct **p, *tmp;
240
 
 
241
 
        if (!addr)
242
 
                return;
243
 
        if ((PAGE_SIZE-1) & (unsigned long) addr) {
244
 
                printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
245
 
                return;
246
 
        }
247
 
        write_lock(&vmlist_lock);
248
 
        for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
249
 
                if (tmp->addr == addr) {
250
 
                        *p = tmp->next;
251
 
#ifdef CONFIG_XENO_PRIV
252
 
                        if (tmp->flags & VM_IOREMAP)
253
 
                                zap_page_range(&init_mm, VMALLOC_VMADDR(tmp->addr), tmp->size);
254
 
                        else
255
 
#endif
256
 
                        vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
257
 
                        write_unlock(&vmlist_lock);
258
 
                        kfree(tmp);
259
 
                        return;
260
 
                }
261
 
        }
262
 
        write_unlock(&vmlist_lock);
263
 
        printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
264
 
}
265
 
 
266
 
void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
267
 
{
268
 
        void * addr;
269
 
        struct vm_struct *area;
270
 
 
271
 
        size = PAGE_ALIGN(size);
272
 
        if (!size || (size >> PAGE_SHIFT) > num_physpages)
273
 
                return NULL;
274
 
        area = get_vm_area(size, VM_ALLOC);
275
 
        if (!area)
276
 
                return NULL;
277
 
        addr = area->addr;
278
 
        if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask,
279
 
                                 prot, NULL)) {
280
 
                vfree(addr);
281
 
                return NULL;
282
 
        }
283
 
        return addr;
284
 
}
285
 
 
286
 
void * vmap(struct page **pages, int count,
287
 
            unsigned long flags, pgprot_t prot)
288
 
{
289
 
        void * addr;
290
 
        struct vm_struct *area;
291
 
        unsigned long size = count << PAGE_SHIFT;
292
 
 
293
 
        if (!size || size > (max_mapnr << PAGE_SHIFT))
294
 
                return NULL;
295
 
        area = get_vm_area(size, flags);
296
 
        if (!area) {
297
 
                return NULL;
298
 
        }
299
 
        addr = area->addr;
300
 
        if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0,
301
 
                                 prot, &pages)) {
302
 
                vfree(addr);
303
 
                return NULL;
304
 
        }
305
 
        return addr;
306
 
}
307
 
 
308
 
long vread(char *buf, char *addr, unsigned long count)
309
 
{
310
 
        struct vm_struct *tmp;
311
 
        char *vaddr, *buf_start = buf;
312
 
        unsigned long n;
313
 
 
314
 
        /* Don't allow overflow */
315
 
        if ((unsigned long) addr + count < count)
316
 
                count = -(unsigned long) addr;
317
 
 
318
 
        read_lock(&vmlist_lock);
319
 
        for (tmp = vmlist; tmp; tmp = tmp->next) {
320
 
                vaddr = (char *) tmp->addr;
321
 
                if (addr >= vaddr + tmp->size - PAGE_SIZE)
322
 
                        continue;
323
 
                while (addr < vaddr) {
324
 
                        if (count == 0)
325
 
                                goto finished;
326
 
                        *buf = '\0';
327
 
                        buf++;
328
 
                        addr++;
329
 
                        count--;
330
 
                }
331
 
                n = vaddr + tmp->size - PAGE_SIZE - addr;
332
 
                do {
333
 
                        if (count == 0)
334
 
                                goto finished;
335
 
                        *buf = *addr;
336
 
                        buf++;
337
 
                        addr++;
338
 
                        count--;
339
 
                } while (--n > 0);
340
 
        }
341
 
finished:
342
 
        read_unlock(&vmlist_lock);
343
 
        return buf - buf_start;
344
 
}
345
 
 
346
 
long vwrite(char *buf, char *addr, unsigned long count)
347
 
{
348
 
        struct vm_struct *tmp;
349
 
        char *vaddr, *buf_start = buf;
350
 
        unsigned long n;
351
 
 
352
 
        /* Don't allow overflow */
353
 
        if ((unsigned long) addr + count < count)
354
 
                count = -(unsigned long) addr;
355
 
 
356
 
        read_lock(&vmlist_lock);
357
 
        for (tmp = vmlist; tmp; tmp = tmp->next) {
358
 
                vaddr = (char *) tmp->addr;
359
 
                if (addr >= vaddr + tmp->size - PAGE_SIZE)
360
 
                        continue;
361
 
                while (addr < vaddr) {
362
 
                        if (count == 0)
363
 
                                goto finished;
364
 
                        buf++;
365
 
                        addr++;
366
 
                        count--;
367
 
                }
368
 
                n = vaddr + tmp->size - PAGE_SIZE - addr;
369
 
                do {
370
 
                        if (count == 0)
371
 
                                goto finished;
372
 
                        *addr = *buf;
373
 
                        buf++;
374
 
                        addr++;
375
 
                        count--;
376
 
                } while (--n > 0);
377
 
        }
378
 
finished:
379
 
        read_unlock(&vmlist_lock);
380
 
        return buf - buf_start;
381
 
}