1
/* drivers/android/pmem.c
3
* Copyright (C) 2007 Google, Inc.
5
* This software is licensed under the terms of the GNU General Public
6
* License version 2, as published by the Free Software Foundation, and
7
* may be copied, distributed, and modified under those terms.
9
* This program is distributed in the hope that it will be useful,
10
* but WITHOUT ANY WARRANTY; without even the implied warranty of
11
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
* GNU General Public License for more details.
16
#include <linux/miscdevice.h>
17
#include <linux/platform_device.h>
19
#include <linux/file.h>
21
#include <linux/list.h>
22
#include <linux/debugfs.h>
23
#include <linux/android_pmem.h>
24
#include <linux/mempolicy.h>
25
#include <linux/sched.h>
27
#include <asm/uaccess.h>
28
#include <asm/cacheflush.h>
30
#define PMEM_MAX_DEVICES 10
31
#define PMEM_MAX_ORDER 128
32
#define PMEM_MIN_ALLOC PAGE_SIZE
36
/* indicates that a refernce to this file has been taken via get_pmem_file,
37
* the file should not be released until put_pmem_file is called */
38
#define PMEM_FLAGS_BUSY 0x1
39
/* indicates that this is a suballocation of a larger master range */
40
#define PMEM_FLAGS_CONNECTED 0x1 << 1
41
/* indicates this is a master and not a sub allocation and that it is mmaped */
42
#define PMEM_FLAGS_MASTERMAP 0x1 << 2
43
/* submap and unsubmap flags indicate:
44
* 00: subregion has never been mmaped
45
* 10: subregion has been mmaped, reference to the mm was taken
46
* 11: subretion has ben released, refernece to the mm still held
47
* 01: subretion has been released, reference to the mm has been released
49
#define PMEM_FLAGS_SUBMAP 0x1 << 3
50
#define PMEM_FLAGS_UNSUBMAP 0x1 << 4
54
/* in alloc mode: an index into the bitmap
55
* in no_alloc mode: the size of the allocation */
57
/* see flags above for descriptions */
59
/* protects this data field, if the mm_mmap sem will be held at the
60
* same time as this sem, the mm sem must be taken first (as this is
61
* the order for vma_open and vma_close ops */
62
struct rw_semaphore sem;
63
/* info about the mmaping process */
64
struct vm_area_struct *vma;
65
/* task struct of the mapping process */
66
struct task_struct *task;
67
/* process id of teh mapping process */
69
/* file descriptor of the master */
71
/* file struct of the master */
72
struct file *master_file;
73
/* a list of currently available regions if this is a suballocation */
74
struct list_head region_list;
75
/* a linked list of data so we can access them for debugging */
76
struct list_head list;
83
unsigned allocated:1; /* 1 if allocated, 0 if free */
84
unsigned order:7; /* size of the region in pmem space */
87
struct pmem_region_node {
88
struct pmem_region region;
89
struct list_head list;
92
#define PMEM_DEBUG_MSGS 0
94
#define DLOG(fmt,args...) \
95
do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \
99
#define DLOG(x...) do {} while (0)
103
struct miscdevice dev;
104
/* physical start address of the remaped pmem space */
106
/* vitual start address of the remaped pmem space */
107
unsigned char __iomem *vbase;
108
/* total size of the pmem space */
110
/* number of entries in the pmem space */
111
unsigned long num_entries;
112
/* pfn of the garbage page in memory */
113
unsigned long garbage_pfn;
114
/* index of the garbage page in the pmem space */
116
/* the bitmap for the region indicating which entries are allocated
117
* and which are free */
118
struct pmem_bits *bitmap;
119
/* indicates the region should not be managed with an allocator */
120
unsigned no_allocator;
121
/* indicates maps of this region should be cached, if a mix of
122
* cached and uncached is desired, set this and open the device with
123
* O_SYNC to get an uncached region */
126
/* in no_allocator mode the first mapper gets the whole space and sets
129
/* for debugging, creates a list of pmem file structs, the
130
* data_list_sem should be taken before pmem_data->sem if both are
132
struct semaphore data_list_sem;
133
struct list_head data_list;
134
/* pmem_sem protects the bitmap array
135
* a write lock should be held when modifying entries in bitmap
136
* a read lock should be held when reading data from bits or
137
* dereferencing a pointer into bitmap
139
* pmem_data->sem protects the pmem data of a particular file
140
* Many of the function that require the pmem_data->sem have a non-
141
* locking version for when the caller is already holding that sem.
143
* IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:
144
* down(pmem_data->sem) => down(bitmap_sem)
146
struct rw_semaphore bitmap_sem;
148
long (*ioctl)(struct file *, unsigned int, unsigned long);
149
int (*release)(struct inode *, struct file *);
152
static struct pmem_info pmem[PMEM_MAX_DEVICES];
155
#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated)
156
#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order
157
#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index)))
158
#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index)))
159
#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC)
160
#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base)
161
#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC)
162
#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \
164
#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase)
165
#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \
167
#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED)
168
#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
169
#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \
170
(!(data->flags & PMEM_FLAGS_UNSUBMAP)))
172
static int pmem_release(struct inode *, struct file *);
173
static int pmem_mmap(struct file *, struct vm_area_struct *);
174
static int pmem_open(struct inode *, struct file *);
175
static long pmem_ioctl(struct file *, unsigned int, unsigned long);
177
struct file_operations pmem_fops = {
178
.release = pmem_release,
181
.unlocked_ioctl = pmem_ioctl,
184
static int get_id(struct file *file)
186
return MINOR(file->f_dentry->d_inode->i_rdev);
189
static int is_pmem_file(struct file *file)
193
if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))
196
if (unlikely(id >= PMEM_MAX_DEVICES))
198
if (unlikely(file->f_dentry->d_inode->i_rdev !=
199
MKDEV(MISC_MAJOR, pmem[id].dev.minor)))
204
static int has_allocation(struct file *file)
206
struct pmem_data *data;
207
/* check is_pmem_file first if not accessed via pmem_file_ops */
209
if (unlikely(!file->private_data))
211
data = (struct pmem_data *)file->private_data;
212
if (unlikely(data->index < 0))
217
static int is_master_owner(struct file *file)
219
struct file *master_file;
220
struct pmem_data *data;
221
int put_needed, ret = 0;
223
if (!is_pmem_file(file) || !has_allocation(file))
225
data = (struct pmem_data *)file->private_data;
226
if (PMEM_FLAGS_MASTERMAP & data->flags)
228
master_file = fget_light(data->master_fd, &put_needed);
229
if (master_file && data->master_file == master_file)
231
fput_light(master_file, put_needed);
235
static int pmem_free(int id, int index)
237
/* caller should hold the write lock on pmem_sem! */
238
int buddy, curr = index;
239
DLOG("index %d\n", index);
241
if (pmem[id].no_allocator) {
242
pmem[id].allocated = 0;
245
/* clean up the bitmap, merging any buddies */
246
pmem[id].bitmap[curr].allocated = 0;
247
/* find a slots buddy Buddy# = Slot# ^ (1 << order)
248
* if the buddy is also free merge them
249
* repeat until the buddy is not free or end of the bitmap is reached
252
buddy = PMEM_BUDDY_INDEX(id, curr);
253
if (PMEM_IS_FREE(id, buddy) &&
254
PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
255
PMEM_ORDER(id, buddy)++;
256
PMEM_ORDER(id, curr)++;
257
curr = min(buddy, curr);
261
} while (curr < pmem[id].num_entries);
266
static void pmem_revoke(struct file *file, struct pmem_data *data);
268
static int pmem_release(struct inode *inode, struct file *file)
270
struct pmem_data *data = (struct pmem_data *)file->private_data;
271
struct pmem_region_node *region_node;
272
struct list_head *elt, *elt2;
273
int id = get_id(file), ret = 0;
276
down(&pmem[id].data_list_sem);
277
/* if this file is a master, revoke all the memory in the connected
279
if (PMEM_FLAGS_MASTERMAP & data->flags) {
280
struct pmem_data *sub_data;
281
list_for_each(elt, &pmem[id].data_list) {
282
sub_data = list_entry(elt, struct pmem_data, list);
283
down_read(&sub_data->sem);
284
if (PMEM_IS_SUBMAP(sub_data) &&
285
file == sub_data->master_file) {
286
up_read(&sub_data->sem);
287
pmem_revoke(file, sub_data);
289
up_read(&sub_data->sem);
292
list_del(&data->list);
293
up(&pmem[id].data_list_sem);
296
down_write(&data->sem);
298
/* if its not a conencted file and it has an allocation, free it */
299
if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) {
300
down_write(&pmem[id].bitmap_sem);
301
ret = pmem_free(id, data->index);
302
up_write(&pmem[id].bitmap_sem);
305
/* if this file is a submap (mapped, connected file), downref the
307
if (PMEM_FLAGS_SUBMAP & data->flags)
309
put_task_struct(data->task);
313
file->private_data = NULL;
315
list_for_each_safe(elt, elt2, &data->region_list) {
316
region_node = list_entry(elt, struct pmem_region_node, list);
320
BUG_ON(!list_empty(&data->region_list));
322
up_write(&data->sem);
324
if (pmem[id].release)
325
ret = pmem[id].release(inode, file);
330
static int pmem_open(struct inode *inode, struct file *file)
332
struct pmem_data *data;
333
int id = get_id(file);
336
DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file));
337
/* setup file->private_data to indicate its unmapped */
338
/* you can only open a pmem device one time */
339
if (file->private_data != NULL)
341
data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL);
343
printk("pmem: unable to allocate memory for pmem metadata.");
351
data->master_file = NULL;
355
INIT_LIST_HEAD(&data->region_list);
356
init_rwsem(&data->sem);
358
file->private_data = data;
359
INIT_LIST_HEAD(&data->list);
361
down(&pmem[id].data_list_sem);
362
list_add(&data->list, &pmem[id].data_list);
363
up(&pmem[id].data_list_sem);
367
static unsigned long pmem_order(unsigned long len)
371
len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC;
373
for (i = 0; i < sizeof(len)*8; i++)
379
static int pmem_allocate(int id, unsigned long len)
381
/* caller should hold the write lock on pmem_sem! */
382
/* return the corresponding pdata[] entry */
384
int end = pmem[id].num_entries;
386
unsigned long order = pmem_order(len);
388
if (pmem[id].no_allocator) {
389
DLOG("no allocator");
390
if ((len > pmem[id].size) || pmem[id].allocated)
392
pmem[id].allocated = 1;
396
if (order > PMEM_MAX_ORDER)
398
DLOG("order %lx\n", order);
400
/* look through the bitmap:
401
* if you find a free slot of the correct order use it
402
* otherwise, use the best fit (smallest with size > order) slot
405
if (PMEM_IS_FREE(id, curr)) {
406
if (PMEM_ORDER(id, curr) == (unsigned char)order) {
407
/* set the not free bit and clear others */
411
if (PMEM_ORDER(id, curr) > (unsigned char)order &&
413
PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit)))
416
curr = PMEM_NEXT_INDEX(id, curr);
419
/* if best_fit < 0, there are no suitable slots,
423
printk("pmem: no space left to allocate!\n");
427
/* now partition the best fit:
428
* split the slot into 2 buddies of order - 1
429
* repeat until the slot is of the correct order
431
while (PMEM_ORDER(id, best_fit) > (unsigned char)order) {
433
PMEM_ORDER(id, best_fit) -= 1;
434
buddy = PMEM_BUDDY_INDEX(id, best_fit);
435
PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit);
437
pmem[id].bitmap[best_fit].allocated = 1;
441
static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot)
443
int id = get_id(file);
444
#ifdef pgprot_noncached
445
if (pmem[id].cached == 0 || file->f_flags & O_SYNC)
446
return pgprot_noncached(vma_prot);
448
#ifdef pgprot_ext_buffered
449
else if (pmem[id].buffered)
450
return pgprot_ext_buffered(vma_prot);
455
static unsigned long pmem_start_addr(int id, struct pmem_data *data)
457
if (pmem[id].no_allocator)
458
return PMEM_START_ADDR(id, 0);
460
return PMEM_START_ADDR(id, data->index);
464
static void *pmem_start_vaddr(int id, struct pmem_data *data)
466
return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase;
469
static unsigned long pmem_len(int id, struct pmem_data *data)
471
if (pmem[id].no_allocator)
474
return PMEM_LEN(id, data->index);
477
static int pmem_map_garbage(int id, struct vm_area_struct *vma,
478
struct pmem_data *data, unsigned long offset,
481
int i, garbage_pages = len >> PAGE_SHIFT;
483
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE;
484
for (i = 0; i < garbage_pages; i++) {
485
if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE),
486
pmem[id].garbage_pfn))
492
static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma,
493
struct pmem_data *data, unsigned long offset,
497
DLOG("unmap offset %lx len %lx\n", offset, len);
499
BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
501
garbage_pages = len >> PAGE_SHIFT;
502
zap_page_range(vma, vma->vm_start + offset, len, NULL);
503
pmem_map_garbage(id, vma, data, offset, len);
507
static int pmem_map_pfn_range(int id, struct vm_area_struct *vma,
508
struct pmem_data *data, unsigned long offset,
511
DLOG("map offset %lx len %lx\n", offset, len);
512
BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start));
513
BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end));
514
BUG_ON(!PMEM_IS_PAGE_ALIGNED(len));
515
BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset));
517
if (io_remap_pfn_range(vma, vma->vm_start + offset,
518
(pmem_start_addr(id, data) + offset) >> PAGE_SHIFT,
519
len, vma->vm_page_prot)) {
525
static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma,
526
struct pmem_data *data, unsigned long offset,
529
/* hold the mm semp for the vma you are modifying when you call this */
531
zap_page_range(vma, vma->vm_start + offset, len, NULL);
532
return pmem_map_pfn_range(id, vma, data, offset, len);
535
static void pmem_vma_open(struct vm_area_struct *vma)
537
struct file *file = vma->vm_file;
538
struct pmem_data *data = file->private_data;
539
int id = get_id(file);
540
/* this should never be called as we don't support copying pmem
542
BUG_ON(!has_allocation(file));
543
down_write(&data->sem);
544
/* remap the garbage pages, forkers don't get access to the data */
545
pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end);
546
up_write(&data->sem);
549
static void pmem_vma_close(struct vm_area_struct *vma)
551
struct file *file = vma->vm_file;
552
struct pmem_data *data = file->private_data;
554
DLOG("current %u ppid %u file %p count %d\n", current->pid,
555
current->parent->pid, file, file_count(file));
556
if (unlikely(!is_pmem_file(file) || !has_allocation(file))) {
557
printk(KERN_WARNING "pmem: something is very wrong, you are "
558
"closing a vm backing an allocation that doesn't "
562
down_write(&data->sem);
563
if (data->vma == vma) {
565
if ((data->flags & PMEM_FLAGS_CONNECTED) &&
566
(data->flags & PMEM_FLAGS_SUBMAP))
567
data->flags |= PMEM_FLAGS_UNSUBMAP;
569
/* the kernel is going to free this vma now anyway */
570
up_write(&data->sem);
573
static struct vm_operations_struct vm_ops = {
574
.open = pmem_vma_open,
575
.close = pmem_vma_close,
578
static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
580
struct pmem_data *data;
582
unsigned long vma_size = vma->vm_end - vma->vm_start;
583
int ret = 0, id = get_id(file);
585
if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) {
587
printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned"
588
" and a multiple of pages_size.\n");
593
data = (struct pmem_data *)file->private_data;
594
down_write(&data->sem);
595
/* check this file isn't already mmaped, for submaps check this file
596
* has never been mmaped */
597
if ((data->flags & PMEM_FLAGS_MASTERMAP) ||
598
(data->flags & PMEM_FLAGS_SUBMAP) ||
599
(data->flags & PMEM_FLAGS_UNSUBMAP)) {
601
printk(KERN_ERR "pmem: you can only mmap a pmem file once, "
602
"this file is already mmaped. %x\n", data->flags);
607
/* if file->private_data == unalloced, alloc*/
608
if (data && data->index == -1) {
609
down_write(&pmem[id].bitmap_sem);
610
index = pmem_allocate(id, vma->vm_end - vma->vm_start);
611
up_write(&pmem[id].bitmap_sem);
614
/* either no space was available or an error occured */
615
if (!has_allocation(file)) {
617
printk("pmem: could not find allocation for map.\n");
621
if (pmem_len(id, data) < vma_size) {
623
printk(KERN_WARNING "pmem: mmap size [%lu] does not match"
624
"size of backing region [%lu].\n", vma_size,
631
vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT;
632
vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot);
634
if (data->flags & PMEM_FLAGS_CONNECTED) {
635
struct pmem_region_node *region_node;
636
struct list_head *elt;
637
if (pmem_map_garbage(id, vma, data, 0, vma_size)) {
638
printk("pmem: mmap failed in kernel!\n");
642
list_for_each(elt, &data->region_list) {
643
region_node = list_entry(elt, struct pmem_region_node,
645
DLOG("remapping file: %p %lx %lx\n", file,
646
region_node->region.offset,
647
region_node->region.len);
648
if (pmem_remap_pfn_range(id, vma, data,
649
region_node->region.offset,
650
region_node->region.len)) {
655
data->flags |= PMEM_FLAGS_SUBMAP;
656
get_task_struct(current->group_leader);
657
data->task = current->group_leader;
660
data->pid = current->pid;
662
DLOG("submmapped file %p vma %p pid %u\n", file, vma,
665
if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) {
666
printk(KERN_INFO "pmem: mmap failed in kernel!\n");
670
data->flags |= PMEM_FLAGS_MASTERMAP;
671
data->pid = current->pid;
673
vma->vm_ops = &vm_ops;
675
up_write(&data->sem);
679
/* the following are the api for accessing pmem regions by other drivers
680
* from inside the kernel */
681
int get_pmem_user_addr(struct file *file, unsigned long *start,
684
struct pmem_data *data;
685
if (!is_pmem_file(file) || !has_allocation(file)) {
687
printk(KERN_INFO "pmem: requested pmem data from invalid"
692
data = (struct pmem_data *)file->private_data;
693
down_read(&data->sem);
695
*start = data->vma->vm_start;
696
*len = data->vma->vm_end - data->vma->vm_start;
705
int get_pmem_addr(struct file *file, unsigned long *start,
706
unsigned long *vstart, unsigned long *len)
708
struct pmem_data *data;
711
if (!is_pmem_file(file) || !has_allocation(file)) {
715
data = (struct pmem_data *)file->private_data;
716
if (data->index == -1) {
718
printk(KERN_INFO "pmem: requested pmem data from file with no "
725
down_read(&data->sem);
726
*start = pmem_start_addr(id, data);
727
*len = pmem_len(id, data);
728
*vstart = (unsigned long)pmem_start_vaddr(id, data);
731
down_write(&data->sem);
733
up_write(&data->sem);
738
int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
739
unsigned long *len, struct file **filp)
744
if (unlikely(file == NULL)) {
745
printk(KERN_INFO "pmem: requested data from file descriptor "
746
"that doesn't exist.");
750
if (get_pmem_addr(file, start, vstart, len))
761
void put_pmem_file(struct file *file)
763
struct pmem_data *data;
766
if (!is_pmem_file(file))
769
data = (struct pmem_data *)file->private_data;
771
down_write(&data->sem);
772
if (data->ref == 0) {
773
printk("pmem: pmem_put > pmem_get %s (pid %d)\n",
774
pmem[id].dev.name, data->pid);
778
up_write(&data->sem);
783
void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
785
struct pmem_data *data;
788
struct pmem_region_node *region_node;
789
struct list_head *elt;
790
void *flush_start, *flush_end;
792
if (!is_pmem_file(file) || !has_allocation(file)) {
797
data = (struct pmem_data *)file->private_data;
798
if (!pmem[id].cached)
801
down_read(&data->sem);
802
vaddr = pmem_start_vaddr(id, data);
803
/* if this isn't a submmapped file, flush the whole thing */
804
if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
805
dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
808
/* otherwise, flush the region of the file we are drawing */
809
list_for_each(elt, &data->region_list) {
810
region_node = list_entry(elt, struct pmem_region_node, list);
811
if ((offset >= region_node->region.offset) &&
812
((offset + len) <= (region_node->region.offset +
813
region_node->region.len))) {
814
flush_start = vaddr + region_node->region.offset;
815
flush_end = flush_start + region_node->region.len;
816
dmac_flush_range(flush_start, flush_end);
824
static int pmem_connect(unsigned long connect, struct file *file)
826
struct pmem_data *data = (struct pmem_data *)file->private_data;
827
struct pmem_data *src_data;
828
struct file *src_file;
829
int ret = 0, put_needed;
831
down_write(&data->sem);
832
/* retrieve the src file and check it is a pmem file with an alloc */
833
src_file = fget_light(connect, &put_needed);
834
DLOG("connect %p to %p\n", file, src_file);
836
printk("pmem: src file not found!\n");
840
if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) {
841
printk(KERN_INFO "pmem: src file is not a pmem file or has no "
846
src_data = (struct pmem_data *)src_file->private_data;
848
if (has_allocation(file) && (data->index != src_data->index)) {
849
printk("pmem: file is already mapped but doesn't match this"
854
data->index = src_data->index;
855
data->flags |= PMEM_FLAGS_CONNECTED;
856
data->master_fd = connect;
857
data->master_file = src_file;
860
fput_light(src_file, put_needed);
862
up_write(&data->sem);
866
static void pmem_unlock_data_and_mm(struct pmem_data *data,
867
struct mm_struct *mm)
869
up_write(&data->sem);
871
up_write(&mm->mmap_sem);
876
static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data,
877
struct mm_struct **locked_mm)
880
struct mm_struct *mm = NULL;
883
down_read(&data->sem);
884
if (PMEM_IS_SUBMAP(data)) {
885
mm = get_task_mm(data->task);
888
printk("pmem: can't remap task is gone!\n");
897
down_write(&mm->mmap_sem);
899
down_write(&data->sem);
900
/* check that the file didn't get mmaped before we could take the
901
* data sem, this should be safe b/c you can only submap each file
903
if (PMEM_IS_SUBMAP(data) && !mm) {
904
pmem_unlock_data_and_mm(data, mm);
905
up_write(&data->sem);
908
/* now check that vma.mm is still there, it could have been
909
* deleted by vma_close before we could get the data->sem */
910
if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) {
911
/* might as well release this */
912
if (data->flags & PMEM_FLAGS_SUBMAP) {
913
put_task_struct(data->task);
915
/* lower the submap flag to show the mm is gone */
916
data->flags &= ~(PMEM_FLAGS_SUBMAP);
918
pmem_unlock_data_and_mm(data, mm);
925
int pmem_remap(struct pmem_region *region, struct file *file,
929
struct pmem_region_node *region_node;
930
struct mm_struct *mm = NULL;
931
struct list_head *elt, *elt2;
932
int id = get_id(file);
933
struct pmem_data *data = (struct pmem_data *)file->private_data;
935
/* pmem region must be aligned on a page boundry */
936
if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) ||
937
!PMEM_IS_PAGE_ALIGNED(region->len))) {
939
printk("pmem: request for unaligned pmem suballocation "
940
"%lx %lx\n", region->offset, region->len);
945
/* if userspace requests a region of len 0, there's nothing to do */
946
if (region->len == 0)
949
/* lock the mm and data */
950
ret = pmem_lock_data_and_mm(file, data, &mm);
954
/* only the owner of the master file can remap the client fds
956
if (!is_master_owner(file)) {
958
printk("pmem: remap requested from non-master process\n");
964
/* check that the requested range is within the src allocation */
965
if (unlikely((region->offset > pmem_len(id, data)) ||
966
(region->len > pmem_len(id, data)) ||
967
(region->offset + region->len > pmem_len(id, data)))) {
969
printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n");
975
if (operation == PMEM_MAP) {
976
region_node = kmalloc(sizeof(struct pmem_region_node),
981
printk(KERN_INFO "No space to allocate metadata!");
985
region_node->region = *region;
986
list_add(®ion_node->list, &data->region_list);
987
} else if (operation == PMEM_UNMAP) {
989
list_for_each_safe(elt, elt2, &data->region_list) {
990
region_node = list_entry(elt, struct pmem_region_node,
992
if (region->len == 0 ||
993
(region_node->region.offset == region->offset &&
994
region_node->region.len == region->len)) {
1002
printk("pmem: Unmap region does not map any mapped "
1010
if (data->vma && PMEM_IS_SUBMAP(data)) {
1011
if (operation == PMEM_MAP)
1012
ret = pmem_remap_pfn_range(id, data->vma, data,
1013
region->offset, region->len);
1014
else if (operation == PMEM_UNMAP)
1015
ret = pmem_unmap_pfn_range(id, data->vma, data,
1016
region->offset, region->len);
1020
pmem_unlock_data_and_mm(data, mm);
1024
static void pmem_revoke(struct file *file, struct pmem_data *data)
1026
struct pmem_region_node *region_node;
1027
struct list_head *elt, *elt2;
1028
struct mm_struct *mm = NULL;
1029
int id = get_id(file);
1032
data->master_file = NULL;
1033
ret = pmem_lock_data_and_mm(file, data, &mm);
1034
/* if lock_data_and_mm fails either the task that mapped the fd, or
1035
* the vma that mapped it have already gone away, nothing more
1036
* needs to be done */
1039
/* unmap everything */
1040
/* delete the regions and region list nothing is mapped any more */
1042
list_for_each_safe(elt, elt2, &data->region_list) {
1043
region_node = list_entry(elt, struct pmem_region_node,
1045
pmem_unmap_pfn_range(id, data->vma, data,
1046
region_node->region.offset,
1047
region_node->region.len);
1051
/* delete the master file */
1052
pmem_unlock_data_and_mm(data, mm);
1055
static void pmem_get_size(struct pmem_region *region, struct file *file)
1057
struct pmem_data *data = (struct pmem_data *)file->private_data;
1058
int id = get_id(file);
1060
if (!has_allocation(file)) {
1065
region->offset = pmem_start_addr(id, data);
1066
region->len = pmem_len(id, data);
1068
DLOG("offset %lx len %lx\n", region->offset, region->len);
1072
static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
1074
struct pmem_data *data;
1075
int id = get_id(file);
1080
struct pmem_region region;
1082
if (!has_allocation(file)) {
1086
data = (struct pmem_data *)file->private_data;
1087
region.offset = pmem_start_addr(id, data);
1088
region.len = pmem_len(id, data);
1090
printk(KERN_INFO "pmem: request for physical address of pmem region "
1091
"from process %d.\n", current->pid);
1092
if (copy_to_user((void __user *)arg, ®ion,
1093
sizeof(struct pmem_region)))
1099
struct pmem_region region;
1100
if (copy_from_user(®ion, (void __user *)arg,
1101
sizeof(struct pmem_region)))
1103
data = (struct pmem_data *)file->private_data;
1104
return pmem_remap(®ion, file, PMEM_MAP);
1109
struct pmem_region region;
1110
if (copy_from_user(®ion, (void __user *)arg,
1111
sizeof(struct pmem_region)))
1113
data = (struct pmem_data *)file->private_data;
1114
return pmem_remap(®ion, file, PMEM_UNMAP);
1119
struct pmem_region region;
1121
pmem_get_size(®ion, file);
1122
if (copy_to_user((void __user *)arg, ®ion,
1123
sizeof(struct pmem_region)))
1127
case PMEM_GET_TOTAL_SIZE:
1129
struct pmem_region region;
1130
DLOG("get total size\n");
1133
region.len = pmem[id].size;
1134
if (copy_to_user((void __user *)arg, ®ion,
1135
sizeof(struct pmem_region)))
1141
if (has_allocation(file))
1143
data = (struct pmem_data *)file->private_data;
1144
data->index = pmem_allocate(id, arg);
1149
return pmem_connect(arg, file);
1153
return pmem[id].ioctl(file, cmd, arg);
1160
static ssize_t debug_open(struct inode *inode, struct file *file)
1162
file->private_data = inode->i_private;
1166
static ssize_t debug_read(struct file *file, char __user *buf, size_t count,
1169
struct list_head *elt, *elt2;
1170
struct pmem_data *data;
1171
struct pmem_region_node *region_node;
1172
int id = (int)file->private_data;
1173
const int debug_bufmax = 4096;
1174
static char buffer[4096];
1177
DLOG("debug open\n");
1178
n = scnprintf(buffer, debug_bufmax,
1179
"pid #: mapped regions (offset, len) (offset,len)...\n");
1181
down(&pmem[id].data_list_sem);
1182
list_for_each(elt, &pmem[id].data_list) {
1183
data = list_entry(elt, struct pmem_data, list);
1184
down_read(&data->sem);
1185
n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",
1187
list_for_each(elt2, &data->region_list) {
1188
region_node = list_entry(elt2, struct pmem_region_node,
1190
n += scnprintf(buffer + n, debug_bufmax - n,
1192
region_node->region.offset,
1193
region_node->region.len);
1195
n += scnprintf(buffer + n, debug_bufmax - n, "\n");
1196
up_read(&data->sem);
1198
up(&pmem[id].data_list_sem);
1202
return simple_read_from_buffer(buf, count, ppos, buffer, n);
1205
static struct file_operations debug_fops = {
1212
static struct miscdevice pmem_dev = {
1218
int pmem_setup(struct android_pmem_platform_data *pdata,
1219
long (*ioctl)(struct file *, unsigned int, unsigned long),
1220
int (*release)(struct inode *, struct file *))
1227
pmem[id].no_allocator = pdata->no_allocator;
1228
pmem[id].cached = pdata->cached;
1229
pmem[id].buffered = pdata->buffered;
1230
pmem[id].base = pdata->start;
1231
pmem[id].size = pdata->size;
1232
pmem[id].ioctl = ioctl;
1233
pmem[id].release = release;
1234
init_rwsem(&pmem[id].bitmap_sem);
1235
init_MUTEX(&pmem[id].data_list_sem);
1236
INIT_LIST_HEAD(&pmem[id].data_list);
1237
pmem[id].dev.name = pdata->name;
1238
pmem[id].dev.minor = id;
1239
pmem[id].dev.fops = &pmem_fops;
1240
printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached);
1242
err = misc_register(&pmem[id].dev);
1244
printk(KERN_ALERT "Unable to register pmem driver!\n");
1245
goto err_cant_register_device;
1247
pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC;
1249
pmem[id].bitmap = kmalloc(pmem[id].num_entries *
1250
sizeof(struct pmem_bits), GFP_KERNEL);
1251
if (!pmem[id].bitmap)
1252
goto err_no_mem_for_metadata;
1254
memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) *
1255
pmem[id].num_entries);
1257
for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) {
1258
if ((pmem[id].num_entries) & 1<<i) {
1259
PMEM_ORDER(id, index) = i;
1260
index = PMEM_NEXT_INDEX(id, index);
1264
if (pmem[id].cached)
1265
pmem[id].vbase = ioremap_cached(pmem[id].base,
1267
#ifdef ioremap_ext_buffered
1268
else if (pmem[id].buffered)
1269
pmem[id].vbase = ioremap_ext_buffered(pmem[id].base,
1273
pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size);
1275
if (pmem[id].vbase == 0)
1276
goto error_cant_remap;
1278
pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL));
1279
if (pmem[id].no_allocator)
1280
pmem[id].allocated = 0;
1283
debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id,
1288
kfree(pmem[id].bitmap);
1289
err_no_mem_for_metadata:
1290
misc_deregister(&pmem[id].dev);
1291
err_cant_register_device:
1295
static int pmem_probe(struct platform_device *pdev)
1297
struct android_pmem_platform_data *pdata;
1299
if (!pdev || !pdev->dev.platform_data) {
1300
printk(KERN_ALERT "Unable to probe pmem!\n");
1303
pdata = pdev->dev.platform_data;
1304
return pmem_setup(pdata, NULL, NULL);
1308
static int pmem_remove(struct platform_device *pdev)
1311
__free_page(pfn_to_page(pmem[id].garbage_pfn));
1312
misc_deregister(&pmem[id].dev);
1316
static struct platform_driver pmem_driver = {
1317
.probe = pmem_probe,
1318
.remove = pmem_remove,
1319
.driver = { .name = "android_pmem" }
1323
static int __init pmem_init(void)
1325
return platform_driver_register(&pmem_driver);
1328
static void __exit pmem_exit(void)
1330
platform_driver_unregister(&pmem_driver);
1333
module_init(pmem_init);
1334
module_exit(pmem_exit);