~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to drivers/staging/zram/xvmalloc.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
10
10
 * Released under the terms of GNU General Public License Version 2.0
11
11
 */
12
12
 
 
13
#ifdef CONFIG_ZRAM_DEBUG
 
14
#define DEBUG
 
15
#endif
 
16
 
 
17
#include <linux/module.h>
 
18
#include <linux/kernel.h>
13
19
#include <linux/bitops.h>
14
20
#include <linux/errno.h>
15
21
#include <linux/highmem.h>
46
52
}
47
53
 
48
54
/*
49
 
 * Given <page, offset> pair, provide a derefrencable pointer.
 
55
 * Given <page, offset> pair, provide a dereferencable pointer.
50
56
 * This is called from xv_malloc/xv_free path, so it
51
57
 * needs to be fast.
52
58
 */
200
206
                nextblock->link.prev_page = page;
201
207
                nextblock->link.prev_offset = offset;
202
208
                put_ptr_atomic(nextblock, KM_USER1);
 
209
                /* If there was a next page then the free bits are set. */
 
210
                return;
203
211
        }
204
212
 
205
213
        __set_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
207
215
}
208
216
 
209
217
/*
210
 
 * Remove block from head of freelist. Index 'slindex' identifies the freelist.
211
 
 */
212
 
static void remove_block_head(struct xv_pool *pool,
213
 
                        struct block_header *block, u32 slindex)
214
 
{
215
 
        struct block_header *tmpblock;
216
 
        u32 flindex = slindex / BITS_PER_LONG;
217
 
 
218
 
        pool->freelist[slindex].page = block->link.next_page;
219
 
        pool->freelist[slindex].offset = block->link.next_offset;
220
 
        block->link.prev_page = NULL;
221
 
        block->link.prev_offset = 0;
222
 
 
223
 
        if (!pool->freelist[slindex].page) {
224
 
                __clear_bit(slindex % BITS_PER_LONG, &pool->slbitmap[flindex]);
225
 
                if (!pool->slbitmap[flindex])
226
 
                        __clear_bit(flindex, &pool->flbitmap);
227
 
        } else {
228
 
                /*
229
 
                 * DEBUG ONLY: We need not reinitialize freelist head previous
230
 
                 * pointer to 0 - we never depend on its value. But just for
231
 
                 * sanity, lets do it.
232
 
                 */
233
 
                tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
234
 
                                pool->freelist[slindex].offset, KM_USER1);
235
 
                tmpblock->link.prev_page = NULL;
236
 
                tmpblock->link.prev_offset = 0;
237
 
                put_ptr_atomic(tmpblock, KM_USER1);
238
 
        }
239
 
}
240
 
 
241
 
/*
242
218
 * Remove block from freelist. Index 'slindex' identifies the freelist.
243
219
 */
244
220
static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
245
221
                        struct block_header *block, u32 slindex)
246
222
{
247
 
        u32 flindex;
 
223
        u32 flindex = slindex / BITS_PER_LONG;
248
224
        struct block_header *tmpblock;
249
225
 
250
 
        if (pool->freelist[slindex].page == page
251
 
           && pool->freelist[slindex].offset == offset) {
252
 
                remove_block_head(pool, block, slindex);
253
 
                return;
254
 
        }
255
 
 
256
 
        flindex = slindex / BITS_PER_LONG;
257
 
 
258
226
        if (block->link.prev_page) {
259
227
                tmpblock = get_ptr_atomic(block->link.prev_page,
260
228
                                block->link.prev_offset, KM_USER1);
270
238
                tmpblock->link.prev_offset = block->link.prev_offset;
271
239
                put_ptr_atomic(tmpblock, KM_USER1);
272
240
        }
 
241
 
 
242
        /* Is this block is at the head of the freelist? */
 
243
        if (pool->freelist[slindex].page == page
 
244
           && pool->freelist[slindex].offset == offset) {
 
245
 
 
246
                pool->freelist[slindex].page = block->link.next_page;
 
247
                pool->freelist[slindex].offset = block->link.next_offset;
 
248
 
 
249
                if (pool->freelist[slindex].page) {
 
250
                        struct block_header *tmpblock;
 
251
                        tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
 
252
                                        pool->freelist[slindex].offset,
 
253
                                        KM_USER1);
 
254
                        tmpblock->link.prev_page = NULL;
 
255
                        tmpblock->link.prev_offset = 0;
 
256
                        put_ptr_atomic(tmpblock, KM_USER1);
 
257
                } else {
 
258
                        /* This freelist bucket is empty */
 
259
                        __clear_bit(slindex % BITS_PER_LONG,
 
260
                                    &pool->slbitmap[flindex]);
 
261
                        if (!pool->slbitmap[flindex])
 
262
                                __clear_bit(flindex, &pool->flbitmap);
 
263
                }
 
264
        }
 
265
 
 
266
        block->link.prev_page = NULL;
 
267
        block->link.prev_offset = 0;
 
268
        block->link.next_page = NULL;
 
269
        block->link.next_offset = 0;
273
270
}
274
271
 
275
272
/*
320
317
 
321
318
        return pool;
322
319
}
 
320
EXPORT_SYMBOL_GPL(xv_create_pool);
323
321
 
324
322
void xv_destroy_pool(struct xv_pool *pool)
325
323
{
326
324
        kfree(pool);
327
325
}
 
326
EXPORT_SYMBOL_GPL(xv_destroy_pool);
328
327
 
329
328
/**
330
329
 * xv_malloc - Allocate block of given size from pool.
378
377
 
379
378
        block = get_ptr_atomic(*page, *offset, KM_USER0);
380
379
 
381
 
        remove_block_head(pool, block, index);
 
380
        remove_block(pool, *page, *offset, block, index);
382
381
 
383
382
        /* Split the block if required */
384
383
        tmpoffset = *offset + size + XV_ALIGN;
413
412
 
414
413
        return 0;
415
414
}
 
415
EXPORT_SYMBOL_GPL(xv_malloc);
416
416
 
417
417
/*
418
418
 * Free block identified with <page, offset>
489
489
        put_ptr_atomic(page_start, KM_USER0);
490
490
        spin_unlock(&pool->lock);
491
491
}
 
492
EXPORT_SYMBOL_GPL(xv_free);
492
493
 
493
494
u32 xv_get_object_size(void *obj)
494
495
{
497
498
        blk = (struct block_header *)((char *)(obj) - XV_ALIGN);
498
499
        return blk->size;
499
500
}
 
501
EXPORT_SYMBOL_GPL(xv_get_object_size);
500
502
 
501
503
/*
502
504
 * Returns total memory used by allocator (userdata + metadata)
505
507
{
506
508
        return pool->total_pages << PAGE_SHIFT;
507
509
}
 
510
EXPORT_SYMBOL_GPL(xv_get_total_size_bytes);