2
* Ram backed block device driver.
4
* Copyright (C) 2007 Nick Piggin
5
* Copyright (C) 2007 Novell Inc.
7
* Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright
8
* of their respective owners.
11
#include <linux/init.h>
12
#include <linux/module.h>
13
#include <linux/moduleparam.h>
14
#include <linux/major.h>
15
#include <linux/blkdev.h>
16
#include <linux/bio.h>
17
#include <linux/highmem.h>
18
#include <linux/mutex.h>
19
#include <linux/radix-tree.h>
20
#include <linux/buffer_head.h> /* invalidate_bh_lrus() */
21
#include <linux/slab.h>
23
#include <asm/uaccess.h>
25
#define SECTOR_SHIFT 9
26
#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT)
27
#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT)
30
* Each block ramdisk device has a radix_tree brd_pages of pages that stores
31
* the pages containing the block device's contents. A brd page's ->index is
32
* its offset in PAGE_SIZE units. This is similar to, but in no way connected
33
* with, the kernel's pagecache or buffer cache (which sit above our block
39
struct request_queue *brd_queue;
40
struct gendisk *brd_disk;
41
struct list_head brd_list;
44
* Backing store of pages and lock to protect it. This is the contents
45
* of the block device.
48
struct radix_tree_root brd_pages;
52
* Look up and return a brd's page for a given sector.
54
static DEFINE_MUTEX(brd_mutex);
55
static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector)
61
* The page lifetime is protected by the fact that we have opened the
62
* device node -- brd pages will never be deleted under us, so we
63
* don't need any further locking or refcounting.
65
* This is strictly true for the radix-tree nodes as well (ie. we
66
* don't actually need the rcu_read_lock()), however that is not a
67
* documented feature of the radix-tree API so it is better to be
68
* safe here (we don't have total exclusion from radix tree updates
69
* here, only deletes).
72
idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */
73
page = radix_tree_lookup(&brd->brd_pages, idx);
76
BUG_ON(page && page->index != idx);
82
* Look up and return a brd's page for a given sector.
83
* If one does not exist, allocate an empty page, and insert that. Then
86
static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
92
page = brd_lookup_page(brd, sector);
97
* Must use NOIO because we don't want to recurse back into the
98
* block or filesystem layers from page reclaim.
100
* Cannot support XIP and highmem, because our ->direct_access
101
* routine for XIP must return memory that is always addressable.
102
* If XIP was reworked to use pfns and kmap throughout, this
103
* restriction might be able to be lifted.
105
gfp_flags = GFP_NOIO | __GFP_ZERO;
106
#ifndef CONFIG_BLK_DEV_XIP
107
gfp_flags |= __GFP_HIGHMEM;
109
page = alloc_page(gfp_flags);
113
if (radix_tree_preload(GFP_NOIO)) {
118
spin_lock(&brd->brd_lock);
119
idx = sector >> PAGE_SECTORS_SHIFT;
120
if (radix_tree_insert(&brd->brd_pages, idx, page)) {
122
page = radix_tree_lookup(&brd->brd_pages, idx);
124
BUG_ON(page->index != idx);
127
spin_unlock(&brd->brd_lock);
129
radix_tree_preload_end();
134
static void brd_free_page(struct brd_device *brd, sector_t sector)
139
spin_lock(&brd->brd_lock);
140
idx = sector >> PAGE_SECTORS_SHIFT;
141
page = radix_tree_delete(&brd->brd_pages, idx);
142
spin_unlock(&brd->brd_lock);
147
static void brd_zero_page(struct brd_device *brd, sector_t sector)
151
page = brd_lookup_page(brd, sector);
153
clear_highpage(page);
157
* Free all backing store pages and radix tree. This must only be called when
158
* there are no other users of the device.
160
#define FREE_BATCH 16
161
static void brd_free_pages(struct brd_device *brd)
163
unsigned long pos = 0;
164
struct page *pages[FREE_BATCH];
170
nr_pages = radix_tree_gang_lookup(&brd->brd_pages,
171
(void **)pages, pos, FREE_BATCH);
173
for (i = 0; i < nr_pages; i++) {
176
BUG_ON(pages[i]->index < pos);
177
pos = pages[i]->index;
178
ret = radix_tree_delete(&brd->brd_pages, pos);
179
BUG_ON(!ret || ret != pages[i]);
180
__free_page(pages[i]);
186
* This assumes radix_tree_gang_lookup always returns as
187
* many pages as possible. If the radix-tree code changes,
188
* so will this have to.
190
} while (nr_pages == FREE_BATCH);
194
* copy_to_brd_setup must be called before copy_to_brd. It may sleep.
196
static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
198
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
201
copy = min_t(size_t, n, PAGE_SIZE - offset);
202
if (!brd_insert_page(brd, sector))
205
sector += copy >> SECTOR_SHIFT;
206
if (!brd_insert_page(brd, sector))
212
static void discard_from_brd(struct brd_device *brd,
213
sector_t sector, size_t n)
215
while (n >= PAGE_SIZE) {
217
* Don't want to actually discard pages here because
218
* re-allocating the pages can result in writeback
219
* deadlocks under heavy load.
222
brd_free_page(brd, sector);
224
brd_zero_page(brd, sector);
225
sector += PAGE_SIZE >> SECTOR_SHIFT;
231
* Copy n bytes from src to the brd starting at sector. Does not sleep.
233
static void copy_to_brd(struct brd_device *brd, const void *src,
234
sector_t sector, size_t n)
238
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
241
copy = min_t(size_t, n, PAGE_SIZE - offset);
242
page = brd_lookup_page(brd, sector);
245
dst = kmap_atomic(page, KM_USER1);
246
memcpy(dst + offset, src, copy);
247
kunmap_atomic(dst, KM_USER1);
251
sector += copy >> SECTOR_SHIFT;
253
page = brd_lookup_page(brd, sector);
256
dst = kmap_atomic(page, KM_USER1);
257
memcpy(dst, src, copy);
258
kunmap_atomic(dst, KM_USER1);
263
* Copy n bytes to dst from the brd starting at sector. Does not sleep.
265
static void copy_from_brd(void *dst, struct brd_device *brd,
266
sector_t sector, size_t n)
270
unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT;
273
copy = min_t(size_t, n, PAGE_SIZE - offset);
274
page = brd_lookup_page(brd, sector);
276
src = kmap_atomic(page, KM_USER1);
277
memcpy(dst, src + offset, copy);
278
kunmap_atomic(src, KM_USER1);
280
memset(dst, 0, copy);
284
sector += copy >> SECTOR_SHIFT;
286
page = brd_lookup_page(brd, sector);
288
src = kmap_atomic(page, KM_USER1);
289
memcpy(dst, src, copy);
290
kunmap_atomic(src, KM_USER1);
292
memset(dst, 0, copy);
297
* Process a single bvec of a bio.
299
static int brd_do_bvec(struct brd_device *brd, struct page *page,
300
unsigned int len, unsigned int off, int rw,
307
err = copy_to_brd_setup(brd, sector, len);
312
mem = kmap_atomic(page, KM_USER0);
314
copy_from_brd(mem + off, brd, sector, len);
315
flush_dcache_page(page);
317
flush_dcache_page(page);
318
copy_to_brd(brd, mem + off, sector, len);
320
kunmap_atomic(mem, KM_USER0);
326
static void brd_make_request(struct request_queue *q, struct bio *bio)
328
struct block_device *bdev = bio->bi_bdev;
329
struct brd_device *brd = bdev->bd_disk->private_data;
331
struct bio_vec *bvec;
336
sector = bio->bi_sector;
337
if (sector + (bio->bi_size >> SECTOR_SHIFT) >
338
get_capacity(bdev->bd_disk))
341
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
343
discard_from_brd(brd, sector, bio->bi_size);
351
bio_for_each_segment(bvec, bio, i) {
352
unsigned int len = bvec->bv_len;
353
err = brd_do_bvec(brd, bvec->bv_page, len,
354
bvec->bv_offset, rw, sector);
357
sector += len >> SECTOR_SHIFT;
364
#ifdef CONFIG_BLK_DEV_XIP
365
static int brd_direct_access(struct block_device *bdev, sector_t sector,
366
void **kaddr, unsigned long *pfn)
368
struct brd_device *brd = bdev->bd_disk->private_data;
373
if (sector & (PAGE_SECTORS-1))
375
if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk))
377
page = brd_insert_page(brd, sector);
380
*kaddr = page_address(page);
381
*pfn = page_to_pfn(page);
387
static int brd_ioctl(struct block_device *bdev, fmode_t mode,
388
unsigned int cmd, unsigned long arg)
391
struct brd_device *brd = bdev->bd_disk->private_data;
393
if (cmd != BLKFLSBUF)
397
* ram device BLKFLSBUF has special semantics, we want to actually
398
* release and destroy the ramdisk data.
400
mutex_lock(&brd_mutex);
401
mutex_lock(&bdev->bd_mutex);
403
if (bdev->bd_openers <= 1) {
405
* Invalidate the cache first, so it isn't written
406
* back to the device.
408
* Another thread might instantiate more buffercache here,
409
* but there is not much we can do to close that race.
411
invalidate_bh_lrus();
412
truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
416
mutex_unlock(&bdev->bd_mutex);
417
mutex_unlock(&brd_mutex);
422
static const struct block_device_operations brd_fops = {
423
.owner = THIS_MODULE,
425
#ifdef CONFIG_BLK_DEV_XIP
426
.direct_access = brd_direct_access,
431
* And now the modules code and kernel interface.
434
int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
436
static int part_shift;
437
module_param(rd_nr, int, S_IRUGO);
438
MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
439
module_param(rd_size, int, S_IRUGO);
440
MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
441
module_param(max_part, int, S_IRUGO);
442
MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
443
MODULE_LICENSE("GPL");
444
MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
448
/* Legacy boot options - nonmodular */
449
static int __init ramdisk_size(char *str)
451
rd_size = simple_strtol(str, NULL, 0);
454
__setup("ramdisk_size=", ramdisk_size);
458
* The device scheme is derived from loop.c. Keep them in synch where possible
459
* (should share code eventually).
461
static LIST_HEAD(brd_devices);
462
static DEFINE_MUTEX(brd_devices_mutex);
464
static struct brd_device *brd_alloc(int i)
466
struct brd_device *brd;
467
struct gendisk *disk;
469
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
473
spin_lock_init(&brd->brd_lock);
474
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
476
brd->brd_queue = blk_alloc_queue(GFP_KERNEL);
479
blk_queue_make_request(brd->brd_queue, brd_make_request);
480
blk_queue_max_hw_sectors(brd->brd_queue, 1024);
481
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
483
brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
484
brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
485
brd->brd_queue->limits.discard_zeroes_data = 1;
486
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
488
disk = brd->brd_disk = alloc_disk(1 << part_shift);
491
disk->major = RAMDISK_MAJOR;
492
disk->first_minor = i << part_shift;
493
disk->fops = &brd_fops;
494
disk->private_data = brd;
495
disk->queue = brd->brd_queue;
496
disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
497
sprintf(disk->disk_name, "ram%d", i);
498
set_capacity(disk, rd_size * 2);
503
blk_cleanup_queue(brd->brd_queue);
510
static void brd_free(struct brd_device *brd)
512
put_disk(brd->brd_disk);
513
blk_cleanup_queue(brd->brd_queue);
518
static struct brd_device *brd_init_one(int i)
520
struct brd_device *brd;
522
list_for_each_entry(brd, &brd_devices, brd_list) {
523
if (brd->brd_number == i)
529
add_disk(brd->brd_disk);
530
list_add_tail(&brd->brd_list, &brd_devices);
536
static void brd_del_one(struct brd_device *brd)
538
list_del(&brd->brd_list);
539
del_gendisk(brd->brd_disk);
543
static struct kobject *brd_probe(dev_t dev, int *part, void *data)
545
struct brd_device *brd;
546
struct kobject *kobj;
548
mutex_lock(&brd_devices_mutex);
549
brd = brd_init_one(MINOR(dev) >> part_shift);
550
kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM);
551
mutex_unlock(&brd_devices_mutex);
557
static int __init brd_init(void)
561
struct brd_device *brd, *next;
564
* brd module now has a feature to instantiate underlying device
565
* structure on-demand, provided that there is an access dev node.
566
* However, this will not work well with user space tool that doesn't
567
* know about such "feature". In order to not break any existing
568
* tool, we do the following:
570
* (1) if rd_nr is specified, create that many upfront, and this
571
* also becomes a hard limit.
572
* (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT
573
* (default 16) rd device on module load, user can further
574
* extend brd device by create dev node themselves and have
575
* kernel automatically instantiate actual device on-demand.
580
part_shift = fls(max_part);
583
* Adjust max_part according to part_shift as it is exported
584
* to user space so that user can decide correct minor number
585
* if [s]he want to create more devices.
587
* Note that -1 is required because partition 0 is reserved
588
* for the whole disk.
590
max_part = (1UL << part_shift) - 1;
593
if ((1UL << part_shift) > DISK_MAX_PARTS)
596
if (rd_nr > 1UL << (MINORBITS - part_shift))
601
range = rd_nr << part_shift;
603
nr = CONFIG_BLK_DEV_RAM_COUNT;
604
range = 1UL << MINORBITS;
607
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
610
for (i = 0; i < nr; i++) {
614
list_add_tail(&brd->brd_list, &brd_devices);
617
/* point of no return */
619
list_for_each_entry(brd, &brd_devices, brd_list)
620
add_disk(brd->brd_disk);
622
blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range,
623
THIS_MODULE, brd_probe, NULL, NULL);
625
printk(KERN_INFO "brd: module loaded\n");
629
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
630
list_del(&brd->brd_list);
633
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
638
static void __exit brd_exit(void)
641
struct brd_device *brd, *next;
643
range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS;
645
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
648
blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range);
649
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
652
module_init(brd_init);
653
module_exit(brd_exit);