2
* OMAP DMM (Dynamic memory mapping) to IOMMU module
4
* Copyright (C) 2010 Texas Instruments. All rights reserved.
6
* Authors: Ramesh Gupta <grgupta@ti.com>
7
* Hari Kanigeri <h-kanigeri2@ti.com>
9
* dma_map API usage in this code is inspired from Ohad Ben-Cohen's
10
* implementation in dspbridge code.
12
* This program is free software; you can redistribute it and/or
13
* modify it under the terms of the GNU General Public License
14
* version 2 as published by the Free Software Foundation.
16
* This program is distributed in the hope that it will be useful, but
17
* WITHOUT ANY WARRANTY; without even the implied warranty of
18
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19
* General Public License for more details.
21
* You should have received a copy of the GNU General Public License
22
* along with this program; if not, write to the Free Software
23
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
27
#include <linux/err.h>
28
#include <linux/slab.h>
29
#include <linux/scatterlist.h>
30
#include <linux/platform_device.h>
31
#include <linux/pagemap.h>
32
#include <linux/kernel.h>
33
#include <linux/genalloc.h>
34
#include <linux/eventfd.h>
36
#include <linux/sched.h>
37
#include <asm/cacheflush.h>
38
#include <asm/mach/map.h>
39
#include <linux/dma-mapping.h>
41
#include <plat/iommu.h>
42
#include <plat/dmm_user.h>
44
#include "iopgtable.h"
46
#ifndef CONFIG_DMM_DMA_API
47
/* Hack hack code to handle MM buffers */
48
int temp_user_dma_op(unsigned long start, unsigned long end, int op)
51
struct mm_struct *mm = current->active_mm;
52
void (*inner_op)(const void *, const void *);
53
void (*outer_op)(unsigned long, unsigned long);
57
inner_op = dmac_inv_range;
58
outer_op = outer_inv_range;
62
inner_op = dmac_clean_range;
63
outer_op = outer_clean_range;
67
inner_op = dmac_flush_range;
68
outer_op = outer_flush_range;
78
down_read(&mm->mmap_sem);
81
struct vm_area_struct *vma = find_vma(mm, start);
83
if (!vma || start < vma->vm_start ||
84
vma->vm_flags & (VM_IO | VM_PFNMAP)) {
85
up_read(&mm->mmap_sem);
89
unsigned long e = (start | ~PAGE_MASK) + 1;
94
page = follow_page(vma, start, FOLL_GET);
96
up_read(&mm->mmap_sem);
102
* This flushes the userspace address - which
103
* is not what this API was intended to do.
104
* Things may go astray as a result.
106
inner_op((void *)start, (void *)e);
108
* Now handle the L2 cache.
110
phys = page_to_phys(page) +
111
(start & ~PAGE_MASK);
112
outer_op(phys, phys + e - start);
116
} while (start < end && start < vma->vm_end);
117
} while (start < end);
119
up_read(&mm->mmap_sem);
125
static inline struct gen_pool *get_pool_handle(struct iovmm_device *iovmm_obj,
128
struct iovmm_pool *pool;
130
list_for_each_entry(pool, &iovmm_obj->mmap_pool, list) {
131
if (pool->pool_id == pool_id)
132
return pool->genpool;
138
* This function walks through the page tables to convert a userland
139
* virtual address to physical address
141
static u32 __user_va2_pa(struct mm_struct *mm, u32 address)
147
pgd = pgd_offset(mm, address);
148
if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
149
pmd = pmd_offset(pgd, address);
150
if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
151
ptep = pte_offset_map(pmd, address);
154
if (pte_present(pte))
155
return pte & PAGE_MASK;
162
/* remember mapping information */
163
static struct dmm_map_object *add_mapping_info(struct iodmm_struct *obj,
164
struct gen_pool *gen_pool, u32 va, u32 da, u32 size)
166
struct dmm_map_object *map_obj;
168
u32 num_usr_pgs = size / PAGE_SIZE;
170
pr_debug("%s: adding map info: va 0x%x virt 0x%x size 0x%x\n",
173
map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL);
175
pr_err("%s: kzalloc failed\n", __func__);
178
INIT_LIST_HEAD(&map_obj->link);
180
map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *),
182
if (!map_obj->pages) {
183
pr_err("%s: kzalloc failed\n", __func__);
190
map_obj->size = size;
191
map_obj->num_usr_pgs = num_usr_pgs;
192
map_obj->gen_pool = gen_pool;
193
list_add(&map_obj->link, &obj->map_list);
198
static int match_exact_map_obj(struct dmm_map_object *map_obj,
203
if (map_obj->da == da && map_obj->size != size)
204
pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
205
__func__, da, map_obj->size, size);
207
if (map_obj->da == da && map_obj->size == size)
214
static void remove_mapping_information(struct iodmm_struct *obj,
217
struct dmm_map_object *map_obj;
219
pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
221
list_for_each_entry(map_obj, &obj->map_list, link) {
222
pr_debug("%s: candidate: va 0x%x virt 0x%x size 0x%x\n",
228
if (!match_exact_map_obj(map_obj, da, size)) {
229
pr_debug("%s: match, deleting map info\n", __func__);
230
if (map_obj->gen_pool != NULL)
231
gen_pool_free(map_obj->gen_pool, da, size);
232
list_del(&map_obj->link);
233
kfree(map_obj->dma_info.sg);
234
kfree(map_obj->pages);
238
pr_debug("%s: candidate didn't match\n", __func__);
241
pr_err("%s: failed to find given map info\n", __func__);
246
static int match_containing_map_obj(struct dmm_map_object *map_obj,
247
u32 va, u32 da, bool check_va, u32 size)
253
map_obj_end = map_obj->va + map_obj->size;
254
if ((va >= map_obj->va) && (va + size <= map_obj_end))
259
if (da == map_obj->da)
268
* Find the mapping object based on either MPU virtual address or
269
* Device virtual address. Which option to select to search for the mapping
270
* is specified with check_va flag. check_va is set to TRUE if search is
271
* based on MPU virtual address and FALSE if search is based on Device
274
static struct dmm_map_object *find_containing_mapping(
275
struct iodmm_struct *obj,
276
u32 va, u32 da, bool check_va,
279
struct dmm_map_object *map_obj, *temp_map;
280
pr_debug("%s: looking for va 0x%x size 0x%x\n", __func__,
282
list_for_each_entry_safe(map_obj, temp_map, &obj->map_list, link) {
283
pr_debug("%s: candidate: va 0x%x virt 0x%x size 0x%x\n",
288
if (!match_containing_map_obj(map_obj, va, da, check_va,
290
pr_debug("%s: match!\n", __func__);
294
pr_debug("%s: no match!\n", __func__);
303
static inline struct page *get_mapping_page(struct dmm_map_object *map_obj,
306
pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__,
307
pg_i, map_obj->num_usr_pgs);
308
if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) {
309
pr_err("%s: requested pg_i %d is out of mapped range\n",
313
return map_obj->pages[pg_i];
316
#ifdef CONFIG_DMM_DMA_API
317
static int find_first_page_in_cache(struct dmm_map_object *map_obj,
320
u32 mapped_base_page = map_obj->va >> PAGE_SHIFT;
321
u32 requested_base_page = va >> PAGE_SHIFT;
322
int pg_index = requested_base_page - mapped_base_page;
324
if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) {
325
pr_err("%s: failed (got %d)\n", __func__, pg_index);
329
pr_debug("%s: first page is %d\n", __func__, pg_index);
333
/* Cache operation against kernel address instead of users */
334
static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
338
unsigned long offset;
341
unsigned long first_data_page = start >> PAGE_SHIFT;
342
unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
343
/* calculating the number of pages this area spans */
344
unsigned long num_pages = last_data_page - first_data_page + 1;
345
struct scatterlist *sg;
348
sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
350
pr_err("%s: kcalloc failed\n", __func__);
355
sg_init_table(sg, num_pages);
357
/* cleanup a previous sg allocation */
358
/* this may happen if application doesn't signal for e/o DMA */
359
kfree(map_obj->dma_info.sg);
361
map_obj->dma_info.sg = sg;
362
map_obj->dma_info.num_pages = num_pages;
364
pg_i = find_first_page_in_cache(map_obj, start);
367
page = get_mapping_page(map_obj, pg_i);
369
pr_err("%s: no page for %08lx, pg_i is %x\n", __func__,
373
} else if (IS_ERR(page)) {
374
pr_err("%s: err page for %08lx(%lu)\n", __func__, start,
380
offset = start & ~PAGE_MASK;
381
rest = min_t(ssize_t, PAGE_SIZE - offset, len);
383
sg_set_page(&sg[i], page, rest, offset);
390
if (i != map_obj->dma_info.num_pages) {
391
pr_err("%s: bad number of sg iterations\n", __func__);
400
static int memory_regain_ownership(struct device *dev,
401
struct dmm_map_object *map_obj, unsigned long start,
402
ssize_t len, enum dma_data_direction dir)
405
unsigned long first_data_page = start >> PAGE_SHIFT;
406
unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
407
/* calculating the number of pages this area spans */
408
unsigned long num_pages = last_data_page - first_data_page + 1;
409
struct device_dma_map_info *dma_info = &map_obj->dma_info;
415
if (num_pages > dma_info->num_pages) {
416
pr_err("%s: dma info params invalid\n", __func__);
420
pg_i = find_first_page_in_cache(map_obj, start);
425
dma_unmap_sg(dev, (dma_info->sg), num_pages, dir);
427
pr_debug("%s: dma_map_sg unmapped\n", __func__);
433
/* Cache operation against kernel address instead of users */
434
static int memory_give_ownership(struct device *dev,
435
struct dmm_map_object *map_obj, unsigned long start,
436
ssize_t len, enum dma_data_direction dir)
439
struct device_dma_map_info *dma_info = &map_obj->dma_info;
440
unsigned long first_data_page = start >> PAGE_SHIFT;
441
unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
442
/* calculating the number of pages this area spans */
443
unsigned long num_pages = last_data_page - first_data_page + 1;
446
pg_i = find_first_page_in_cache(map_obj, start);
452
sg_num = dma_map_sg(dev, (dma_info->sg), num_pages, dir);
454
pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
459
pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
467
int proc_begin_dma(struct iodmm_struct *obj, const void __user *args)
470
struct dmm_dma_info dma_info;
471
#ifdef CONFIG_DMM_DMA_API
472
struct dmm_map_object *map_obj;
475
if (copy_from_user(&dma_info, (void __user *)args,
476
sizeof(struct dmm_dma_info)))
478
dev = obj->iovmm->iommu->dev;
480
mutex_lock(&obj->iovmm->dmm_map_lock);
481
pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
483
dma_info.ul_size, dma_info.dir);
484
/* find requested memory are in cached mapping information */
485
map_obj = find_containing_mapping(obj, (u32)dma_info.pva, 0, true,
488
pr_err("%s: find_containing_mapping failed\n", __func__);
493
if (memory_give_ownership(dev, map_obj, (u32)dma_info.pva,
494
dma_info.ul_size, dma_info.dir)) {
495
pr_err("%s: InValid address parameters %x %x\n",
496
__func__, (u32)dma_info.pva, dma_info.ul_size);
501
mutex_unlock(&obj->iovmm->dmm_map_lock);
503
if (copy_from_user(&dma_info, (void __user *)args,
504
sizeof(struct dmm_dma_info)))
506
status = temp_user_dma_op((u32)dma_info.pva,
507
(u32)dma_info.pva + dma_info.ul_size, 3);
512
int proc_end_dma(struct iodmm_struct *obj, const void __user *args)
515
struct dmm_dma_info dma_info;
516
#ifdef CONFIG_DMM_DMA_API
518
struct dmm_map_object *map_obj;
520
if (copy_from_user(&dma_info, (void __user *)args,
521
sizeof(struct dmm_dma_info)))
523
dev = obj->iovmm->iommu->dev;
525
pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
527
dma_info.ul_size, dma_info.dir);
528
mutex_lock(&obj->iovmm->dmm_map_lock);
530
/* find requested memory are in cached mapping information */
531
map_obj = find_containing_mapping(obj, (u32)dma_info.pva, 0, true,
534
pr_err("%s: find_containing_mapping failed\n", __func__);
539
if (memory_regain_ownership(dev, map_obj, (u32)dma_info.pva,
540
dma_info.ul_size, dma_info.dir)) {
541
pr_err("%s: InValid address parameters %p %x\n",
542
__func__, dma_info.pva, dma_info.ul_size);
548
mutex_unlock(&obj->iovmm->dmm_map_lock);
550
if (copy_from_user(&dma_info, (void __user *)args,
551
sizeof(struct dmm_dma_info)))
553
status = temp_user_dma_op((u32)dma_info.pva,
554
(u32)dma_info.pva + dma_info.ul_size, 1);
560
* user_to_device_unmap() - unmaps Device virtual buffer.
561
* @mmu: Pointer to iommu handle.
564
* This function unmaps a user space buffer into DSP virtual address.
567
static int user_to_device_unmap(struct iommu *mmu, u32 da, unsigned size)
569
unsigned total = size;
574
bytes = iopgtable_clear_entry(mmu, start);
578
dev_dbg(mmu->dev, "%s: unmap 0x%x 0x%x\n",
579
__func__, start, bytes);
580
BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
587
static int __user_un_map(struct iodmm_struct *obj, u32 map_addr)
592
struct dmm_map_object *map_obj;
596
va_align = round_down(map_addr, PAGE_SIZE);
598
mutex_lock(&obj->iovmm->dmm_map_lock);
600
* Update DMM structures. Get the size to unmap.
601
* This function returns error if the VA is not mapped
603
/* find requested memory are in cached mapping information */
604
map_obj = find_containing_mapping(obj, 0, map_addr, false, 0);
607
size_align = map_obj->size;
608
/* Remove mapping from the page tables. */
609
status = user_to_device_unmap(obj->iovmm->iommu, va_align,
614
i = size_align/PAGE_SIZE;
616
pg = map_obj->pages[i];
617
if (pg && pfn_valid(page_to_pfn(pg))) {
618
if (page_count(pg) < 1)
619
pr_info("%s UNMAP FAILURE !!!\n", __func__);
622
page_cache_release(pg);
627
* A successful unmap should be followed by removal of map_obj
628
* from dmm_map_list, so that mapped memory resource tracking
631
remove_mapping_information(obj, map_obj->da, map_obj->size);
633
mutex_unlock(&obj->iovmm->dmm_map_lock);
639
* user_un_map - Removes User's mapped address
640
* @obj: target dmm object
641
* @args Mapped address that needs to be unmapped
643
* removes user's dmm buffer mapping
645
int user_un_map(struct iodmm_struct *obj, const void __user *args)
650
if (copy_from_user(&map_addr, (void __user *)args, sizeof(u32)))
653
status = __user_un_map(obj, map_addr);
655
pr_err("%s:Unmap of buffer 0x%x failedn", __func__, map_addr);
661
* user_to_device_map() - maps user to dsp virtual address
662
* @mmu: Pointer to iommu handle.
663
* @uva: Virtual user space address.
665
* @size Buffer size to map.
666
* @usr_pgs struct page array pointer where the user pages will be stored
668
* This function maps a user space buffer into DSP virtual address.
671
static int user_to_device_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
672
struct page **usr_pgs)
677
struct vm_area_struct *vma;
678
struct mm_struct *mm = current->mm;
684
struct iotlb_entry tlb_entry;
685
struct page *mapped_page;
687
if (!size || !usr_pgs)
690
pages = size / PAGE_SIZE;
692
vma = find_vma(mm, uva);
698
if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
701
for (pg_i = 0; pg_i < pages; pg_i++) {
702
pg_num = get_user_pages(current, mm, uva, 1,
703
w, 1, &mapped_page, NULL);
705
if (page_count(mapped_page) < 1) {
706
pr_err("Bad page count after doing"
711
pa = page_to_phys(mapped_page);
712
iotlb_init_entry(&tlb_entry, da, pa,
714
MMU_RAM_ENDIAN_LITTLE |
717
iopgtable_store_entry(mmu, &tlb_entry);
719
usr_pgs[pg_i] = mapped_page;
723
pr_err("get_user_pages FAILED,"
725
"vma->vm_flags = 0x%lx,"
729
vma->vm_flags, pg_num,
740
* phys_to_device_map() - maps physical addr
741
* to device virtual address
742
* @mmu: Pointer to iommu handle.
743
* @uva: Virtual user space address.
745
* @size Buffer size to map.
746
* @usr_pgs struct page array pointer where the user pages will be stored
748
* This function maps a user space buffer into DSP virtual address.
751
static int phys_to_device_map(struct iodmm_struct *obj,
752
int pool_id, u32 *mapped_addr,
753
u32 pa, size_t bytes, u32 flags)
755
struct iotlb_entry e;
756
struct dmm_map_object *dmm_obj;
760
u32 pg_size[] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K};
761
int size_flag[] = {MMU_CAM_PGSZ_16M, MMU_CAM_PGSZ_1M,
762
MMU_CAM_PGSZ_64K, MMU_CAM_PGSZ_4K};
764
struct gen_pool *gen_pool;
772
da = round_down(*mapped_addr, PAGE_SIZE);
775
/* search through the list of available pools to
778
gen_pool = get_pool_handle(obj->iovmm, pool_id);
780
da = gen_pool_alloc(gen_pool, bytes);
781
*mapped_addr = (da | (pa & (PAGE_SIZE - 1)));
788
dmm_obj = add_mapping_info(obj, gen_pool, pa, *mapped_addr, bytes);
789
if (dmm_obj == NULL) {
796
* To find the max. page size with which both PA & VA are
800
for (i = 0; i < 4; i++) {
801
if ((bytes >= pg_size[i]) && ((all_bits &
802
(pg_size[i] - 1)) == 0)) {
803
iotlb_init_entry(&e, da, pa,
805
MMU_RAM_ENDIAN_LITTLE |
807
iopgtable_store_entry(obj->iovmm->iommu, &e);
819
gen_pool_free(gen_pool, da, bytes);
825
* dmm_user - Maps user buffer to Device address
826
* @obj: target dmm object
827
* @args: DMM map information
829
* Maps given user buffer to Device address
831
int dmm_user(struct iodmm_struct *obj, void __user *args)
833
struct gen_pool *gen_pool;
834
struct dmm_map_object *dmm_obj;
835
struct iovmm_device *iovmm_obj = obj->iovmm;
836
u32 addr_align, da_align, size_align, tmp_addr;
840
struct vm_area_struct *vma;
841
struct mm_struct *mm = current->mm;
843
struct dmm_map_info map_info;
844
struct iotlb_entry e;
847
if (copy_from_user(&map_info, (void __user *)args,
848
sizeof(struct dmm_map_info)))
852
* Important Note: va is mapped from user application process
853
* to current process - it must lie completely within the current
854
* virtual memory address space in order to be of use to us here!
856
down_read(&mm->mmap_sem);
858
/* Calculate the page-aligned PA, VA and size */
859
addr_align = round_down((u32) map_info.mpu_addr, PAGE_SIZE);
860
size_align = round_up(map_info.size + map_info.mpu_addr - addr_align,
863
mutex_lock(&iovmm_obj->dmm_map_lock);
866
* User passed physical address to map. No DMM pool
867
* specified if pool_id as -1, so the da is interpreted
868
* as the Device Address.
870
if (map_info.flags == DMM_DA_PHYS) {
871
err = phys_to_device_map(obj, map_info.pool_id, map_info.da,
872
addr_align, size_align, map_info.flags);
876
vma = find_vma(mm, map_info.mpu_addr);
878
dev_dbg(iovmm_obj->iommu->dev,
879
"VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
880
"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n",
882
map_info.size, vma->vm_start, vma->vm_end,
886
* It is observed that under some circumstances, the user buffer is
887
* spread across several VMAs. So loop through and check if the entire
888
* user buffer is covered
890
while ((vma) && (map_info.mpu_addr + map_info.size > vma->vm_end)) {
891
/* jump to the next VMA region */
892
vma = find_vma(mm, vma->vm_end + 1);
893
dev_dbg(iovmm_obj->iommu->dev,
894
"VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
895
"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n",
897
map_info.size, vma->vm_start, vma->vm_end,
901
pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
902
__func__, map_info.mpu_addr, map_info.size);
908
* If user provided anonymous address, then don't allocate it from
911
if (map_info.flags == DMM_DA_ANON) {
913
da_align = round_down((u32)map_info.da, PAGE_SIZE);
915
/* search through the list of available pools to
918
gen_pool = get_pool_handle(iovmm_obj, map_info.pool_id);
920
da_align = gen_pool_alloc(gen_pool, size_align);
927
/* Mapped address = MSB of VA | LSB of PA */
928
tmp_addr = (da_align | ((u32)map_info.mpu_addr & (PAGE_SIZE - 1)));
929
dmm_obj = add_mapping_info(obj, gen_pool, map_info.mpu_addr, tmp_addr,
934
*map_info.da = tmp_addr;
936
/* Mapping the IO buffers */
937
if (vma->vm_flags & VM_IO) {
938
num_of_pages = size_align/PAGE_SIZE;
939
for (i = 0; i < num_of_pages; i++) {
940
io_addr = __user_va2_pa(current->mm, addr_align);
941
pg = phys_to_page(io_addr);
943
iotlb_init_entry(&e, da_align, io_addr,
945
MMU_RAM_ENDIAN_LITTLE |
947
iopgtable_store_entry(obj->iovmm->iommu, &e);
948
da_align += PAGE_SIZE;
949
addr_align += PAGE_SIZE;
950
dmm_obj->pages[i] = pg;
956
/* Mapping the Userspace buffer */
957
err = user_to_device_map(iovmm_obj->iommu, addr_align,
958
da_align, size_align, dmm_obj->pages);
960
/* clean the entries that were mapped */
961
__user_un_map(obj, tmp_addr);
964
#ifdef CONFIG_DMM_DMA_API
966
* Build the SG list that would be required for dma map and
969
err = build_dma_sg(dmm_obj, map_info.mpu_addr, map_info.size);
972
* calling dma_map_sg(cache flush) is essential for
973
* dma_unmap_sg to work since the sg->dma_address required
974
* for dma_unmap_sg is built during dma_map_sg call.
976
err = memory_give_ownership(iovmm_obj->iommu->dev, dmm_obj,
977
map_info.mpu_addr, map_info.size, DMA_BIDIRECTIONAL);
982
copy_to_user((void __user *)args, &map_info,
983
sizeof(struct dmm_map_info));
984
mutex_unlock(&iovmm_obj->dmm_map_lock);
985
up_read(&mm->mmap_sem);
990
* user_remove_resources - Removes User's dmm resources
991
* @obj: target dmm object
993
* removes user's dmm resources
995
void user_remove_resources(struct iodmm_struct *obj)
998
struct dmm_map_object *temp_map, *map_obj;
1001
/* Free DMM mapped memory resources */
1002
list_for_each_entry_safe(map_obj, temp_map, &obj->map_list, link) {
1003
status = __user_un_map(obj, map_obj->da);
1005
pr_err("%s: proc_un_map failed!"
1006
" status = 0x%x\n", __func__, status);
1012
* omap_create_dmm_pool - Create DMM pool
1013
* @obj: target dmm object
1014
* @args pool information
1016
int omap_create_dmm_pool(struct iodmm_struct *obj, const void __user *args)
1018
struct iovmm_pool *pool;
1019
struct iovmm_device *iovmm = obj->iovmm;
1020
struct iovmm_pool_info pool_info;
1022
if (copy_from_user(&pool_info, args, sizeof(struct iovmm_pool_info)))
1025
pool = kzalloc(sizeof(struct iovmm_pool), GFP_KERNEL);
1029
pool->pool_id = pool_info.pool_id;
1030
pool->da_begin = pool_info.da_begin;
1031
pool->da_end = pool_info.da_begin + pool_info.size;
1033
pool->genpool = gen_pool_create(12, -1);
1035
gen_pool_add(pool->genpool, pool->da_begin,
1036
pool_info.size, -1);
1038
pr_err("%s:gen_pool_create retuned null\n", __func__);
1040
INIT_LIST_HEAD(&pool->list);
1041
list_add_tail(&pool->list, &iovmm->mmap_pool);
1047
* omap_delete_dmm_pool - Delete DMM pools
1048
* @obj: target dmm object
1050
int omap_delete_dmm_pools(struct iodmm_struct *obj)
1052
struct iovmm_pool *pool;
1053
struct iovmm_device *iovmm_obj = obj->iovmm;
1054
struct list_head *_pool, *_next_pool;
1056
list_for_each_safe(_pool, _next_pool, &iovmm_obj->mmap_pool) {
1057
pool = list_entry(_pool, struct iovmm_pool, list);
1058
gen_pool_destroy(pool->genpool);
1059
list_del(&pool->list);
1067
* register_mmufault - Register for MMU fault notification
1068
* @obj: target dmm object
1069
* @args: Eventfd information
1071
* Registering to MMU fault event notification
1073
int register_mmufault(struct iodmm_struct *obj, const void __user *args)
1076
struct iommu_event_ntfy *fd_reg;
1078
if (copy_from_user(&fd, args, sizeof(int)))
1081
fd_reg = kzalloc(sizeof(struct iommu_event_ntfy), GFP_KERNEL);
1083
fd_reg->evt_ctx = eventfd_ctx_fdget(fd);
1084
INIT_LIST_HEAD(&fd_reg->list);
1085
spin_lock_irq(&obj->iovmm->iommu->event_lock);
1086
list_add_tail(&fd_reg->list, &obj->iovmm->iommu->event_list);
1087
spin_unlock_irq(&obj->iovmm->iommu->event_lock);
1093
* unregister_mmufault - Unregister for MMU fault notification
1094
* @obj: target dmm object
1095
* @args: Eventfd information
1097
* Unregister to MMU fault event notification
1099
int unregister_mmufault(struct iodmm_struct *obj, const void __user *args)
1102
struct iommu_event_ntfy *fd_reg, *temp_reg;
1104
if (copy_from_user(&fd, (void __user *)args, sizeof(int)))
1107
/* remove the mmu fault event notification */
1108
spin_lock_irq(&obj->iovmm->iommu->event_lock);
1109
list_for_each_entry_safe(fd_reg, temp_reg,
1110
&obj->iovmm->iommu->event_list, list) {
1111
if (fd_reg->fd == fd) {
1112
list_del(&fd_reg->list);
1116
spin_unlock_irq(&obj->iovmm->iommu->event_lock);
1122
* program_tlb_entry - Program the IOMMU TLB entry
1123
* @obj: target dmm object
1124
* @args: TLB entry information
1126
* This function loads the TLB entry that the user specifies.
1127
* This function should be used only during remote Processor
1130
int program_tlb_entry(struct iodmm_struct *obj, const void __user *args)
1132
struct iotlb_entry e;
1135
if (copy_from_user(&e, args, sizeof(struct iotlb_entry)))
1138
ret = load_iotlb_entry(obj->iovmm->iommu, &e);
1142
MODULE_LICENSE("GPL v2");
1143
MODULE_DESCRIPTION("Userspace DMM to IOMMU");
1144
MODULE_AUTHOR("Hari Kanigeri");
1145
MODULE_AUTHOR("Ramesh Gupta");