2
* PS3 address space management.
4
* Copyright (C) 2006 Sony Computer Entertainment Inc.
5
* Copyright 2006 Sony Corp.
7
* This program is free software; you can redistribute it and/or modify
8
* it under the terms of the GNU General Public License as published by
9
* the Free Software Foundation; version 2 of the License.
11
* This program is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14
* GNU General Public License for more details.
16
* You should have received a copy of the GNU General Public License
17
* along with this program; if not, write to the Free Software
18
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21
#include <linux/kernel.h>
22
#include <linux/export.h>
23
#include <linux/memory_hotplug.h>
24
#include <linux/memblock.h>
25
#include <linux/slab.h>
27
#include <asm/cell-regs.h>
28
#include <asm/firmware.h>
31
#include <asm/lv1call.h>
36
#define DBG udbg_printf
42
#if defined(CONFIG_PS3_DYNAMIC_DMA)
55
static unsigned long make_page_sizes(unsigned long a, unsigned long b)
57
return (a << 56) | (b << 48);
61
ALLOCATE_MEMORY_TRY_ALT_UNIT = 0X04,
62
ALLOCATE_MEMORY_ADDR_ZERO = 0X08,
65
/* valid htab sizes are {18,19,20} = 256K, 512K, 1M */
68
HTAB_SIZE_MAX = 20U, /* HV limit of 1MB */
69
HTAB_SIZE_MIN = 18U, /* CPU limit of 256KB */
72
/*============================================================================*/
73
/* virtual address space routines */
74
/*============================================================================*/
77
* struct mem_region - memory region structure
79
* @size: size in bytes
80
* @offset: difference between base and rm.size
90
* struct map - address space state variables holder
91
* @total: total memory available as reported by HV
92
* @vas_id - HV virtual address space id
93
* @htab_size: htab size in bytes
95
* The HV virtual address space (vas) allows for hotplug memory regions.
96
* Memory regions can be created and destroyed in the vas at runtime.
97
* @rm: real mode (bootmem) region
98
* @r1: hotplug memory region(s)
101
* virt_addr: a cpu 'translated' effective address
102
* phys_addr: an address in what Linux thinks is the physical address space
103
* lpar_addr: an address in the HV virtual address space
104
* bus_addr: an io controller 'translated' address on a device bus
111
struct mem_region rm;
112
struct mem_region r1;
115
#define debug_dump_map(x) _debug_dump_map(x, __func__, __LINE__)
116
static void __maybe_unused _debug_dump_map(const struct map *m,
117
const char *func, int line)
119
DBG("%s:%d: map.total = %llxh\n", func, line, m->total);
120
DBG("%s:%d: map.rm.size = %llxh\n", func, line, m->rm.size);
121
DBG("%s:%d: map.vas_id = %llu\n", func, line, m->vas_id);
122
DBG("%s:%d: map.htab_size = %llxh\n", func, line, m->htab_size);
123
DBG("%s:%d: map.r1.base = %llxh\n", func, line, m->r1.base);
124
DBG("%s:%d: map.r1.offset = %lxh\n", func, line, m->r1.offset);
125
DBG("%s:%d: map.r1.size = %llxh\n", func, line, m->r1.size);
128
static struct map map;
131
* ps3_mm_phys_to_lpar - translate a linux physical address to lpar address
132
* @phys_addr: linux physical address
135
unsigned long ps3_mm_phys_to_lpar(unsigned long phys_addr)
137
BUG_ON(is_kernel_addr(phys_addr));
138
return (phys_addr < map.rm.size || phys_addr >= map.total)
139
? phys_addr : phys_addr + map.r1.offset;
142
EXPORT_SYMBOL(ps3_mm_phys_to_lpar);
145
* ps3_mm_vas_create - create the virtual address space
148
void __init ps3_mm_vas_create(unsigned long* htab_size)
157
result = lv1_query_logical_partition_address_region_info(0,
158
&start_address, &size, &access_right, &max_page_size,
162
DBG("%s:%d: lv1_query_logical_partition_address_region_info "
163
"failed: %s\n", __func__, __LINE__,
168
if (max_page_size < PAGE_SHIFT_16M) {
169
DBG("%s:%d: bad max_page_size %llxh\n", __func__, __LINE__,
174
BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE > HTAB_SIZE_MAX);
175
BUILD_BUG_ON(CONFIG_PS3_HTAB_SIZE < HTAB_SIZE_MIN);
177
result = lv1_construct_virtual_address_space(CONFIG_PS3_HTAB_SIZE,
178
2, make_page_sizes(PAGE_SHIFT_16M, PAGE_SHIFT_64K),
179
&map.vas_id, &map.htab_size);
182
DBG("%s:%d: lv1_construct_virtual_address_space failed: %s\n",
183
__func__, __LINE__, ps3_result(result));
187
result = lv1_select_virtual_address_space(map.vas_id);
190
DBG("%s:%d: lv1_select_virtual_address_space failed: %s\n",
191
__func__, __LINE__, ps3_result(result));
195
*htab_size = map.htab_size;
197
debug_dump_map(&map);
202
panic("ps3_mm_vas_create failed");
206
* ps3_mm_vas_destroy -
209
void ps3_mm_vas_destroy(void)
213
DBG("%s:%d: map.vas_id = %llu\n", __func__, __LINE__, map.vas_id);
216
result = lv1_select_virtual_address_space(0);
218
result = lv1_destruct_virtual_address_space(map.vas_id);
224
/*============================================================================*/
225
/* memory hotplug routines */
226
/*============================================================================*/
229
* ps3_mm_region_create - create a memory region in the vas
230
* @r: pointer to a struct mem_region to accept initialized values
231
* @size: requested region size
233
* This implementation creates the region with the vas large page size.
234
* @size is rounded down to a multiple of the vas large page size.
237
static int ps3_mm_region_create(struct mem_region *r, unsigned long size)
242
r->size = _ALIGN_DOWN(size, 1 << PAGE_SHIFT_16M);
244
DBG("%s:%d requested %lxh\n", __func__, __LINE__, size);
245
DBG("%s:%d actual %llxh\n", __func__, __LINE__, r->size);
246
DBG("%s:%d difference %llxh (%lluMB)\n", __func__, __LINE__,
247
size - r->size, (size - r->size) / 1024 / 1024);
250
DBG("%s:%d: size == 0\n", __func__, __LINE__);
255
result = lv1_allocate_memory(r->size, PAGE_SHIFT_16M, 0,
256
ALLOCATE_MEMORY_TRY_ALT_UNIT, &r->base, &muid);
258
if (result || r->base < map.rm.size) {
259
DBG("%s:%d: lv1_allocate_memory failed: %s\n",
260
__func__, __LINE__, ps3_result(result));
264
r->offset = r->base - map.rm.size;
268
r->size = r->base = r->offset = 0;
273
* ps3_mm_region_destroy - destroy a memory region
274
* @r: pointer to struct mem_region
277
static void ps3_mm_region_destroy(struct mem_region *r)
281
DBG("%s:%d: r->base = %llxh\n", __func__, __LINE__, r->base);
283
result = lv1_release_memory(r->base);
285
r->size = r->base = r->offset = 0;
286
map.total = map.rm.size;
291
* ps3_mm_add_memory - hot add memory
294
static int __init ps3_mm_add_memory(void)
297
unsigned long start_addr;
298
unsigned long start_pfn;
299
unsigned long nr_pages;
301
if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
304
BUG_ON(!mem_init_done);
306
start_addr = map.rm.size;
307
start_pfn = start_addr >> PAGE_SHIFT;
308
nr_pages = (map.r1.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
310
DBG("%s:%d: start_addr %lxh, start_pfn %lxh, nr_pages %lxh\n",
311
__func__, __LINE__, start_addr, start_pfn, nr_pages);
313
result = add_memory(0, start_addr, map.r1.size);
316
pr_err("%s:%d: add_memory failed: (%d)\n",
317
__func__, __LINE__, result);
321
memblock_add(start_addr, map.r1.size);
324
result = online_pages(start_pfn, nr_pages);
327
pr_err("%s:%d: online_pages failed: (%d)\n",
328
__func__, __LINE__, result);
333
device_initcall(ps3_mm_add_memory);
335
/*============================================================================*/
337
/*============================================================================*/
340
* dma_sb_lpar_to_bus - Translate an lpar address to ioc mapped bus address.
341
* @r: pointer to dma region structure
342
* @lpar_addr: HV lpar address
345
static unsigned long dma_sb_lpar_to_bus(struct ps3_dma_region *r,
346
unsigned long lpar_addr)
348
if (lpar_addr >= map.rm.size)
349
lpar_addr -= map.r1.offset;
350
BUG_ON(lpar_addr < r->offset);
351
BUG_ON(lpar_addr >= r->offset + r->len);
352
return r->bus_addr + lpar_addr - r->offset;
355
#define dma_dump_region(_a) _dma_dump_region(_a, __func__, __LINE__)
356
static void __maybe_unused _dma_dump_region(const struct ps3_dma_region *r,
357
const char *func, int line)
359
DBG("%s:%d: dev %llu:%llu\n", func, line, r->dev->bus_id,
361
DBG("%s:%d: page_size %u\n", func, line, r->page_size);
362
DBG("%s:%d: bus_addr %lxh\n", func, line, r->bus_addr);
363
DBG("%s:%d: len %lxh\n", func, line, r->len);
364
DBG("%s:%d: offset %lxh\n", func, line, r->offset);
368
* dma_chunk - A chunk of dma pages mapped by the io controller.
369
* @region - The dma region that owns this chunk.
370
* @lpar_addr: Starting lpar address of the area to map.
371
* @bus_addr: Starting ioc bus address of the area to map.
372
* @len: Length in bytes of the area to map.
373
* @link: A struct list_head used with struct ps3_dma_region.chunk_list, the
374
* list of all chuncks owned by the region.
376
* This implementation uses a very simple dma page manager
377
* based on the dma_chunk structure. This scheme assumes
378
* that all drivers use very well behaved dma ops.
382
struct ps3_dma_region *region;
383
unsigned long lpar_addr;
384
unsigned long bus_addr;
386
struct list_head link;
387
unsigned int usage_count;
390
#define dma_dump_chunk(_a) _dma_dump_chunk(_a, __func__, __LINE__)
391
static void _dma_dump_chunk (const struct dma_chunk* c, const char* func,
394
DBG("%s:%d: r.dev %llu:%llu\n", func, line,
395
c->region->dev->bus_id, c->region->dev->dev_id);
396
DBG("%s:%d: r.bus_addr %lxh\n", func, line, c->region->bus_addr);
397
DBG("%s:%d: r.page_size %u\n", func, line, c->region->page_size);
398
DBG("%s:%d: r.len %lxh\n", func, line, c->region->len);
399
DBG("%s:%d: r.offset %lxh\n", func, line, c->region->offset);
400
DBG("%s:%d: c.lpar_addr %lxh\n", func, line, c->lpar_addr);
401
DBG("%s:%d: c.bus_addr %lxh\n", func, line, c->bus_addr);
402
DBG("%s:%d: c.len %lxh\n", func, line, c->len);
405
static struct dma_chunk * dma_find_chunk(struct ps3_dma_region *r,
406
unsigned long bus_addr, unsigned long len)
409
unsigned long aligned_bus = _ALIGN_DOWN(bus_addr, 1 << r->page_size);
410
unsigned long aligned_len = _ALIGN_UP(len+bus_addr-aligned_bus,
413
list_for_each_entry(c, &r->chunk_list.head, link) {
415
if (aligned_bus >= c->bus_addr &&
416
aligned_bus + aligned_len <= c->bus_addr + c->len)
420
if (aligned_bus + aligned_len <= c->bus_addr)
424
if (aligned_bus >= c->bus_addr + c->len)
427
/* we don't handle the multi-chunk case for now */
434
static struct dma_chunk *dma_find_chunk_lpar(struct ps3_dma_region *r,
435
unsigned long lpar_addr, unsigned long len)
438
unsigned long aligned_lpar = _ALIGN_DOWN(lpar_addr, 1 << r->page_size);
439
unsigned long aligned_len = _ALIGN_UP(len + lpar_addr - aligned_lpar,
442
list_for_each_entry(c, &r->chunk_list.head, link) {
444
if (c->lpar_addr <= aligned_lpar &&
445
aligned_lpar < c->lpar_addr + c->len) {
446
if (aligned_lpar + aligned_len <= c->lpar_addr + c->len)
454
if (aligned_lpar + aligned_len <= c->lpar_addr) {
458
if (c->lpar_addr + c->len <= aligned_lpar) {
465
static int dma_sb_free_chunk(struct dma_chunk *c)
470
result = lv1_unmap_device_dma_region(c->region->dev->bus_id,
471
c->region->dev->dev_id, c->bus_addr, c->len);
479
static int dma_ioc0_free_chunk(struct dma_chunk *c)
483
unsigned long offset;
484
struct ps3_dma_region *r = c->region;
486
DBG("%s:start\n", __func__);
487
for (iopage = 0; iopage < (c->len >> r->page_size); iopage++) {
488
offset = (1 << r->page_size) * iopage;
489
/* put INVALID entry */
490
result = lv1_put_iopte(0,
491
c->bus_addr + offset,
492
c->lpar_addr + offset,
495
DBG("%s: bus=%#lx, lpar=%#lx, ioid=%d\n", __func__,
496
c->bus_addr + offset,
497
c->lpar_addr + offset,
501
DBG("%s:%d: lv1_put_iopte failed: %s\n", __func__,
502
__LINE__, ps3_result(result));
506
DBG("%s:end\n", __func__);
511
* dma_sb_map_pages - Maps dma pages into the io controller bus address space.
512
* @r: Pointer to a struct ps3_dma_region.
513
* @phys_addr: Starting physical address of the area to map.
514
* @len: Length in bytes of the area to map.
515
* c_out: A pointer to receive an allocated struct dma_chunk for this area.
517
* This is the lowest level dma mapping routine, and is the one that will
518
* make the HV call to add the pages into the io controller address space.
521
static int dma_sb_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
522
unsigned long len, struct dma_chunk **c_out, u64 iopte_flag)
527
c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
535
c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
536
c->bus_addr = dma_sb_lpar_to_bus(r, c->lpar_addr);
539
BUG_ON(iopte_flag != 0xf800000000000000UL);
540
result = lv1_map_device_dma_region(c->region->dev->bus_id,
541
c->region->dev->dev_id, c->lpar_addr,
542
c->bus_addr, c->len, iopte_flag);
544
DBG("%s:%d: lv1_map_device_dma_region failed: %s\n",
545
__func__, __LINE__, ps3_result(result));
549
list_add(&c->link, &r->chunk_list.head);
558
DBG(" <- %s:%d\n", __func__, __LINE__);
562
static int dma_ioc0_map_pages(struct ps3_dma_region *r, unsigned long phys_addr,
563
unsigned long len, struct dma_chunk **c_out,
567
struct dma_chunk *c, *last;
569
unsigned long offset;
571
DBG(KERN_ERR "%s: phy=%#lx, lpar%#lx, len=%#lx\n", __func__,
572
phys_addr, ps3_mm_phys_to_lpar(phys_addr), len);
573
c = kzalloc(sizeof(struct dma_chunk), GFP_ATOMIC);
582
c->lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
583
/* allocate IO address */
584
if (list_empty(&r->chunk_list.head)) {
586
c->bus_addr = r->bus_addr;
588
/* derive from last bus addr*/
589
last = list_entry(r->chunk_list.head.next,
590
struct dma_chunk, link);
591
c->bus_addr = last->bus_addr + last->len;
592
DBG("%s: last bus=%#lx, len=%#lx\n", __func__,
593
last->bus_addr, last->len);
596
/* FIXME: check whether length exceeds region size */
598
/* build ioptes for the area */
599
pages = len >> r->page_size;
600
DBG("%s: pgsize=%#x len=%#lx pages=%#x iopteflag=%#llx\n", __func__,
601
r->page_size, r->len, pages, iopte_flag);
602
for (iopage = 0; iopage < pages; iopage++) {
603
offset = (1 << r->page_size) * iopage;
604
result = lv1_put_iopte(0,
605
c->bus_addr + offset,
606
c->lpar_addr + offset,
610
pr_warning("%s:%d: lv1_put_iopte failed: %s\n",
611
__func__, __LINE__, ps3_result(result));
614
DBG("%s: pg=%d bus=%#lx, lpar=%#lx, ioid=%#x\n", __func__,
615
iopage, c->bus_addr + offset, c->lpar_addr + offset,
619
/* be sure that last allocated one is inserted at head */
620
list_add(&c->link, &r->chunk_list.head);
623
DBG("%s: end\n", __func__);
627
for (iopage--; 0 <= iopage; iopage--) {
629
c->bus_addr + offset,
630
c->lpar_addr + offset,
641
* dma_sb_region_create - Create a device dma region.
642
* @r: Pointer to a struct ps3_dma_region.
644
* This is the lowest level dma region create routine, and is the one that
645
* will make the HV call to create the region.
648
static int dma_sb_region_create(struct ps3_dma_region *r)
653
DBG(" -> %s:%d:\n", __func__, __LINE__);
657
if (!r->dev->bus_id) {
658
pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
659
r->dev->bus_id, r->dev->dev_id);
663
DBG("%s:%u: len = 0x%lx, page_size = %u, offset = 0x%lx\n", __func__,
664
__LINE__, r->len, r->page_size, r->offset);
667
BUG_ON(!r->page_size);
668
BUG_ON(!r->region_ops);
670
INIT_LIST_HEAD(&r->chunk_list.head);
671
spin_lock_init(&r->chunk_list.lock);
673
result = lv1_allocate_device_dma_region(r->dev->bus_id, r->dev->dev_id,
674
roundup_pow_of_two(r->len), r->page_size, r->region_type,
676
r->bus_addr = bus_addr;
679
DBG("%s:%d: lv1_allocate_device_dma_region failed: %s\n",
680
__func__, __LINE__, ps3_result(result));
681
r->len = r->bus_addr = 0;
687
static int dma_ioc0_region_create(struct ps3_dma_region *r)
692
INIT_LIST_HEAD(&r->chunk_list.head);
693
spin_lock_init(&r->chunk_list.lock);
695
result = lv1_allocate_io_segment(0,
699
r->bus_addr = bus_addr;
701
DBG("%s:%d: lv1_allocate_io_segment failed: %s\n",
702
__func__, __LINE__, ps3_result(result));
703
r->len = r->bus_addr = 0;
705
DBG("%s: len=%#lx, pg=%d, bus=%#lx\n", __func__,
706
r->len, r->page_size, r->bus_addr);
711
* dma_region_free - Free a device dma region.
712
* @r: Pointer to a struct ps3_dma_region.
714
* This is the lowest level dma region free routine, and is the one that
715
* will make the HV call to free the region.
718
static int dma_sb_region_free(struct ps3_dma_region *r)
722
struct dma_chunk *tmp;
726
if (!r->dev->bus_id) {
727
pr_info("%s:%d: %llu:%llu no dma\n", __func__, __LINE__,
728
r->dev->bus_id, r->dev->dev_id);
732
list_for_each_entry_safe(c, tmp, &r->chunk_list.head, link) {
734
dma_sb_free_chunk(c);
737
result = lv1_free_device_dma_region(r->dev->bus_id, r->dev->dev_id,
741
DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
742
__func__, __LINE__, ps3_result(result));
749
static int dma_ioc0_region_free(struct ps3_dma_region *r)
752
struct dma_chunk *c, *n;
754
DBG("%s: start\n", __func__);
755
list_for_each_entry_safe(c, n, &r->chunk_list.head, link) {
757
dma_ioc0_free_chunk(c);
760
result = lv1_release_io_segment(0, r->bus_addr);
763
DBG("%s:%d: lv1_free_device_dma_region failed: %s\n",
764
__func__, __LINE__, ps3_result(result));
767
DBG("%s: end\n", __func__);
773
* dma_sb_map_area - Map an area of memory into a device dma region.
774
* @r: Pointer to a struct ps3_dma_region.
775
* @virt_addr: Starting virtual address of the area to map.
776
* @len: Length in bytes of the area to map.
777
* @bus_addr: A pointer to return the starting ioc bus address of the area to
780
* This is the common dma mapping routine.
783
static int dma_sb_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
784
unsigned long len, dma_addr_t *bus_addr,
790
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
792
unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
793
unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
795
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
797
if (!USE_DYNAMIC_DMA) {
798
unsigned long lpar_addr = ps3_mm_phys_to_lpar(phys_addr);
799
DBG(" -> %s:%d\n", __func__, __LINE__);
800
DBG("%s:%d virt_addr %lxh\n", __func__, __LINE__,
802
DBG("%s:%d phys_addr %lxh\n", __func__, __LINE__,
804
DBG("%s:%d lpar_addr %lxh\n", __func__, __LINE__,
806
DBG("%s:%d len %lxh\n", __func__, __LINE__, len);
807
DBG("%s:%d bus_addr %llxh (%lxh)\n", __func__, __LINE__,
811
spin_lock_irqsave(&r->chunk_list.lock, flags);
812
c = dma_find_chunk(r, *bus_addr, len);
815
DBG("%s:%d: reusing mapped chunk", __func__, __LINE__);
818
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
822
result = dma_sb_map_pages(r, aligned_phys, aligned_len, &c, iopte_flag);
826
DBG("%s:%d: dma_sb_map_pages failed (%d)\n",
827
__func__, __LINE__, result);
828
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
834
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
838
static int dma_ioc0_map_area(struct ps3_dma_region *r, unsigned long virt_addr,
839
unsigned long len, dma_addr_t *bus_addr,
845
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
847
unsigned long aligned_phys = _ALIGN_DOWN(phys_addr, 1 << r->page_size);
848
unsigned long aligned_len = _ALIGN_UP(len + phys_addr - aligned_phys,
851
DBG(KERN_ERR "%s: vaddr=%#lx, len=%#lx\n", __func__,
853
DBG(KERN_ERR "%s: ph=%#lx a_ph=%#lx a_l=%#lx\n", __func__,
854
phys_addr, aligned_phys, aligned_len);
856
spin_lock_irqsave(&r->chunk_list.lock, flags);
857
c = dma_find_chunk_lpar(r, ps3_mm_phys_to_lpar(phys_addr), len);
862
*bus_addr = c->bus_addr + phys_addr - aligned_phys;
864
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
868
result = dma_ioc0_map_pages(r, aligned_phys, aligned_len, &c,
873
DBG("%s:%d: dma_ioc0_map_pages failed (%d)\n",
874
__func__, __LINE__, result);
875
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
878
*bus_addr = c->bus_addr + phys_addr - aligned_phys;
879
DBG("%s: va=%#lx pa=%#lx a_pa=%#lx bus=%#llx\n", __func__,
880
virt_addr, phys_addr, aligned_phys, *bus_addr);
883
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
888
* dma_sb_unmap_area - Unmap an area of memory from a device dma region.
889
* @r: Pointer to a struct ps3_dma_region.
890
* @bus_addr: The starting ioc bus address of the area to unmap.
891
* @len: Length in bytes of the area to unmap.
893
* This is the common dma unmap routine.
896
static int dma_sb_unmap_area(struct ps3_dma_region *r, dma_addr_t bus_addr,
902
spin_lock_irqsave(&r->chunk_list.lock, flags);
903
c = dma_find_chunk(r, bus_addr, len);
906
unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
908
unsigned long aligned_len = _ALIGN_UP(len + bus_addr
909
- aligned_bus, 1 << r->page_size);
910
DBG("%s:%d: not found: bus_addr %llxh\n",
911
__func__, __LINE__, bus_addr);
912
DBG("%s:%d: not found: len %lxh\n",
913
__func__, __LINE__, len);
914
DBG("%s:%d: not found: aligned_bus %lxh\n",
915
__func__, __LINE__, aligned_bus);
916
DBG("%s:%d: not found: aligned_len %lxh\n",
917
__func__, __LINE__, aligned_len);
923
if (!c->usage_count) {
925
dma_sb_free_chunk(c);
928
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
932
static int dma_ioc0_unmap_area(struct ps3_dma_region *r,
933
dma_addr_t bus_addr, unsigned long len)
938
DBG("%s: start a=%#llx l=%#lx\n", __func__, bus_addr, len);
939
spin_lock_irqsave(&r->chunk_list.lock, flags);
940
c = dma_find_chunk(r, bus_addr, len);
943
unsigned long aligned_bus = _ALIGN_DOWN(bus_addr,
945
unsigned long aligned_len = _ALIGN_UP(len + bus_addr
948
DBG("%s:%d: not found: bus_addr %llxh\n",
949
__func__, __LINE__, bus_addr);
950
DBG("%s:%d: not found: len %lxh\n",
951
__func__, __LINE__, len);
952
DBG("%s:%d: not found: aligned_bus %lxh\n",
953
__func__, __LINE__, aligned_bus);
954
DBG("%s:%d: not found: aligned_len %lxh\n",
955
__func__, __LINE__, aligned_len);
961
if (!c->usage_count) {
963
dma_ioc0_free_chunk(c);
966
spin_unlock_irqrestore(&r->chunk_list.lock, flags);
967
DBG("%s: end\n", __func__);
972
* dma_sb_region_create_linear - Setup a linear dma mapping for a device.
973
* @r: Pointer to a struct ps3_dma_region.
975
* This routine creates an HV dma region for the device and maps all available
976
* ram into the io controller bus address space.
979
static int dma_sb_region_create_linear(struct ps3_dma_region *r)
982
unsigned long virt_addr, len;
985
if (r->len > 16*1024*1024) { /* FIXME: need proper fix */
986
/* force 16M dma pages for linear mapping */
987
if (r->page_size != PS3_DMA_16M) {
988
pr_info("%s:%d: forcing 16M pages for linear map\n",
990
r->page_size = PS3_DMA_16M;
991
r->len = _ALIGN_UP(r->len, 1 << r->page_size);
995
result = dma_sb_region_create(r);
998
if (r->offset < map.rm.size) {
999
/* Map (part of) 1st RAM chunk */
1000
virt_addr = map.rm.base + r->offset;
1001
len = map.rm.size - r->offset;
1004
result = dma_sb_map_area(r, virt_addr, len, &tmp,
1005
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1010
if (r->offset + r->len > map.rm.size) {
1011
/* Map (part of) 2nd RAM chunk */
1012
virt_addr = map.rm.size;
1014
if (r->offset >= map.rm.size)
1015
virt_addr += r->offset - map.rm.size;
1017
len -= map.rm.size - r->offset;
1018
result = dma_sb_map_area(r, virt_addr, len, &tmp,
1019
CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_SO_RW |
1028
* dma_sb_region_free_linear - Free a linear dma mapping for a device.
1029
* @r: Pointer to a struct ps3_dma_region.
1031
* This routine will unmap all mapped areas and free the HV dma region.
1034
static int dma_sb_region_free_linear(struct ps3_dma_region *r)
1037
dma_addr_t bus_addr;
1038
unsigned long len, lpar_addr;
1040
if (r->offset < map.rm.size) {
1041
/* Unmap (part of) 1st RAM chunk */
1042
lpar_addr = map.rm.base + r->offset;
1043
len = map.rm.size - r->offset;
1046
bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1047
result = dma_sb_unmap_area(r, bus_addr, len);
1051
if (r->offset + r->len > map.rm.size) {
1052
/* Unmap (part of) 2nd RAM chunk */
1053
lpar_addr = map.r1.base;
1055
if (r->offset >= map.rm.size)
1056
lpar_addr += r->offset - map.rm.size;
1058
len -= map.rm.size - r->offset;
1059
bus_addr = dma_sb_lpar_to_bus(r, lpar_addr);
1060
result = dma_sb_unmap_area(r, bus_addr, len);
1064
result = dma_sb_region_free(r);
1071
* dma_sb_map_area_linear - Map an area of memory into a device dma region.
1072
* @r: Pointer to a struct ps3_dma_region.
1073
* @virt_addr: Starting virtual address of the area to map.
1074
* @len: Length in bytes of the area to map.
1075
* @bus_addr: A pointer to return the starting ioc bus address of the area to
1078
* This routine just returns the corresponding bus address. Actual mapping
1079
* occurs in dma_region_create_linear().
1082
static int dma_sb_map_area_linear(struct ps3_dma_region *r,
1083
unsigned long virt_addr, unsigned long len, dma_addr_t *bus_addr,
1086
unsigned long phys_addr = is_kernel_addr(virt_addr) ? __pa(virt_addr)
1088
*bus_addr = dma_sb_lpar_to_bus(r, ps3_mm_phys_to_lpar(phys_addr));
1093
* dma_unmap_area_linear - Unmap an area of memory from a device dma region.
1094
* @r: Pointer to a struct ps3_dma_region.
1095
* @bus_addr: The starting ioc bus address of the area to unmap.
1096
* @len: Length in bytes of the area to unmap.
1098
* This routine does nothing. Unmapping occurs in dma_sb_region_free_linear().
1101
static int dma_sb_unmap_area_linear(struct ps3_dma_region *r,
1102
dma_addr_t bus_addr, unsigned long len)
1107
static const struct ps3_dma_region_ops ps3_dma_sb_region_ops = {
1108
.create = dma_sb_region_create,
1109
.free = dma_sb_region_free,
1110
.map = dma_sb_map_area,
1111
.unmap = dma_sb_unmap_area
1114
static const struct ps3_dma_region_ops ps3_dma_sb_region_linear_ops = {
1115
.create = dma_sb_region_create_linear,
1116
.free = dma_sb_region_free_linear,
1117
.map = dma_sb_map_area_linear,
1118
.unmap = dma_sb_unmap_area_linear
1121
static const struct ps3_dma_region_ops ps3_dma_ioc0_region_ops = {
1122
.create = dma_ioc0_region_create,
1123
.free = dma_ioc0_region_free,
1124
.map = dma_ioc0_map_area,
1125
.unmap = dma_ioc0_unmap_area
1128
int ps3_dma_region_init(struct ps3_system_bus_device *dev,
1129
struct ps3_dma_region *r, enum ps3_dma_page_size page_size,
1130
enum ps3_dma_region_type region_type, void *addr, unsigned long len)
1132
unsigned long lpar_addr;
1134
lpar_addr = addr ? ps3_mm_phys_to_lpar(__pa(addr)) : 0;
1137
r->page_size = page_size;
1138
r->region_type = region_type;
1139
r->offset = lpar_addr;
1140
if (r->offset >= map.rm.size)
1141
r->offset -= map.r1.offset;
1142
r->len = len ? len : _ALIGN_UP(map.total, 1 << r->page_size);
1144
switch (dev->dev_type) {
1145
case PS3_DEVICE_TYPE_SB:
1146
r->region_ops = (USE_DYNAMIC_DMA)
1147
? &ps3_dma_sb_region_ops
1148
: &ps3_dma_sb_region_linear_ops;
1150
case PS3_DEVICE_TYPE_IOC0:
1151
r->region_ops = &ps3_dma_ioc0_region_ops;
1159
EXPORT_SYMBOL(ps3_dma_region_init);
1161
int ps3_dma_region_create(struct ps3_dma_region *r)
1164
BUG_ON(!r->region_ops);
1165
BUG_ON(!r->region_ops->create);
1166
return r->region_ops->create(r);
1168
EXPORT_SYMBOL(ps3_dma_region_create);
1170
int ps3_dma_region_free(struct ps3_dma_region *r)
1173
BUG_ON(!r->region_ops);
1174
BUG_ON(!r->region_ops->free);
1175
return r->region_ops->free(r);
1177
EXPORT_SYMBOL(ps3_dma_region_free);
1179
int ps3_dma_map(struct ps3_dma_region *r, unsigned long virt_addr,
1180
unsigned long len, dma_addr_t *bus_addr,
1183
return r->region_ops->map(r, virt_addr, len, bus_addr, iopte_flag);
1186
int ps3_dma_unmap(struct ps3_dma_region *r, dma_addr_t bus_addr,
1189
return r->region_ops->unmap(r, bus_addr, len);
1192
/*============================================================================*/
1193
/* system startup routines */
1194
/*============================================================================*/
1197
* ps3_mm_init - initialize the address space state variables
1200
void __init ps3_mm_init(void)
1204
DBG(" -> %s:%d\n", __func__, __LINE__);
1206
result = ps3_repository_read_mm_info(&map.rm.base, &map.rm.size,
1210
panic("ps3_repository_read_mm_info() failed");
1212
map.rm.offset = map.rm.base;
1213
map.vas_id = map.htab_size = 0;
1215
/* this implementation assumes map.rm.base is zero */
1217
BUG_ON(map.rm.base);
1218
BUG_ON(!map.rm.size);
1221
/* arrange to do this in ps3_mm_add_memory */
1222
ps3_mm_region_create(&map.r1, map.total - map.rm.size);
1224
/* correct map.total for the real total amount of memory we use */
1225
map.total = map.rm.size + map.r1.size;
1227
DBG(" <- %s:%d\n", __func__, __LINE__);
1231
* ps3_mm_shutdown - final cleanup of address space
1234
void ps3_mm_shutdown(void)
1236
ps3_mm_region_destroy(&map.r1);