4
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
6
* API definitions to setup MMU TLB and PTE
8
* Copyright (C) 2007 Texas Instruments, Inc.
10
* This package is free software; you can redistribute it and/or modify
11
* it under the terms of the GNU General Public License version 2 as
12
* published by the Free Software Foundation.
14
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
20
#include "MMURegAcM.h"
23
#include <linux/types.h>
24
#include <linux/err.h>
26
#define MMU_BASE_VAL_MASK 0xFC00
27
#define MMU_PAGE_MAX 3
28
#define MMU_ELEMENTSIZE_MAX 3
29
#define MMU_ADDR_MASK 0xFFFFF000
30
#define MMU_TTB_MASK 0xFFFFC000
31
#define MMU_SECTION_ADDR_MASK 0xFFF00000
32
#define MMU_SSECTION_ADDR_MASK 0xFF000000
33
#define MMU_PAGE_TABLE_MASK 0xFFFFFC00
34
#define MMU_LARGE_PAGE_MASK 0xFFFF0000
35
#define MMU_SMALL_PAGE_MASK 0xFFFFF000
37
#define MMU_LOAD_TLB 0x00000001
38
#define MMU_GFLUSH 0x60
41
* hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
43
enum hw_mmu_page_size_t {
51
* FUNCTION : mmu_flush_entry
55
* Identifier : base_address
57
* Description : Base Address of instance of MMU module
62
* Description : 0 -- No errors occurred
63
* RET_BAD_NULL_PARAM -- A Pointer
64
* Paramater was set to NULL
66
* PURPOSE: : Flush the TLB entry pointed by the
67
* lock counter register
68
* even if this entry is set protected
70
* METHOD: : Check the Input parameter and Flush a
71
* single entry in the TLB.
73
static hw_status mmu_flush_entry(const void __iomem *base_address);
76
* FUNCTION : mmu_set_cam_entry
80
* Identifier : base_address
82
* Description : Base Address of instance of MMU module
84
* Identifier : page_sz
86
* Description : It indicates the page size
88
* Identifier : preserved_bit
90
* Description : It indicates the TLB entry is preserved entry
93
* Identifier : valid_bit
95
* Description : It indicates the TLB entry is valid entry or not
98
* Identifier : virtual_addr_tag
100
* Description : virtual Address
105
* Description : 0 -- No errors occurred
106
* RET_BAD_NULL_PARAM -- A Pointer Paramater
108
* RET_PARAM_OUT_OF_RANGE -- Input Parameter out
111
* PURPOSE: : Set MMU_CAM reg
113
* METHOD: : Check the Input parameters and set the CAM entry.
115
static hw_status mmu_set_cam_entry(const void __iomem *base_address,
117
const u32 preserved_bit,
119
const u32 virtual_addr_tag);
122
* FUNCTION : mmu_set_ram_entry
126
* Identifier : base_address
128
* Description : Base Address of instance of MMU module
130
* Identifier : physical_addr
132
* Description : Physical Address to which the corresponding
133
* virtual Address shouldpoint
135
* Identifier : endianism
136
* Type : hw_endianism_t
137
* Description : endianism for the given page
139
* Identifier : element_size
140
* Type : hw_element_size_t
141
* Description : The element size ( 8,16, 32 or 64 bit)
143
* Identifier : mixed_size
144
* Type : hw_mmu_mixed_size_t
145
* Description : Element Size to follow CPU or TLB
150
* Description : 0 -- No errors occurred
151
* RET_BAD_NULL_PARAM -- A Pointer Paramater
153
* RET_PARAM_OUT_OF_RANGE -- Input Parameter
156
* PURPOSE: : Set MMU_CAM reg
158
* METHOD: : Check the Input parameters and set the RAM entry.
160
static hw_status mmu_set_ram_entry(const void __iomem *base_address,
161
const u32 physical_addr,
162
enum hw_endianism_t endianism,
163
enum hw_element_size_t element_size,
164
enum hw_mmu_mixed_size_t mixed_size);
168
hw_status hw_mmu_enable(const void __iomem *base_address)
170
hw_status status = 0;
172
MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
177
hw_status hw_mmu_disable(const void __iomem *base_address)
179
hw_status status = 0;
181
MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
186
hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
187
u32 num_locked_entries)
189
hw_status status = 0;
191
MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
196
hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
197
u32 victim_entry_num)
199
hw_status status = 0;
201
MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
206
hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
208
hw_status status = 0;
210
MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
215
hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
217
hw_status status = 0;
220
irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
222
MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
227
hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
229
hw_status status = 0;
232
irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
234
MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
239
hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
241
hw_status status = 0;
243
*irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
248
hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
250
hw_status status = 0;
252
/* read values from register */
253
*addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
258
hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
260
hw_status status = 0;
263
load_ttb = ttb_phys_addr & ~0x7FUL;
264
/* write values to register */
265
MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
270
hw_status hw_mmu_twl_enable(const void __iomem *base_address)
272
hw_status status = 0;
274
MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
279
hw_status hw_mmu_twl_disable(const void __iomem *base_address)
281
hw_status status = 0;
283
MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
288
hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
291
hw_status status = 0;
292
u32 virtual_addr_tag;
293
enum hw_mmu_page_size_t pg_size_bits;
296
case HW_PAGE_SIZE4KB:
297
pg_size_bits = HW_MMU_SMALL_PAGE;
300
case HW_PAGE_SIZE64KB:
301
pg_size_bits = HW_MMU_LARGE_PAGE;
304
case HW_PAGE_SIZE1MB:
305
pg_size_bits = HW_MMU_SECTION;
308
case HW_PAGE_SIZE16MB:
309
pg_size_bits = HW_MMU_SUPERSECTION;
316
/* Generate the 20-bit tag from virtual address */
317
virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
319
mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
321
mmu_flush_entry(base_address);
326
hw_status hw_mmu_tlb_add(const void __iomem *base_address,
331
struct hw_mmu_map_attrs_t *map_attrs,
332
s8 preserved_bit, s8 valid_bit)
334
hw_status status = 0;
336
u32 virtual_addr_tag;
337
enum hw_mmu_page_size_t mmu_pg_size;
339
/*Check the input Parameters */
341
case HW_PAGE_SIZE4KB:
342
mmu_pg_size = HW_MMU_SMALL_PAGE;
345
case HW_PAGE_SIZE64KB:
346
mmu_pg_size = HW_MMU_LARGE_PAGE;
349
case HW_PAGE_SIZE1MB:
350
mmu_pg_size = HW_MMU_SECTION;
353
case HW_PAGE_SIZE16MB:
354
mmu_pg_size = HW_MMU_SUPERSECTION;
361
lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
363
/* Generate the 20-bit tag from virtual address */
364
virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
366
/* Write the fields in the CAM Entry Register */
367
mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
370
/* Write the different fields of the RAM Entry Register */
371
/* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
372
mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
373
map_attrs->element_size, map_attrs->mixed_size);
375
/* Update the MMU Lock Register */
376
/* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
377
MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
379
/* Enable loading of an entry in TLB by writing 1
380
into LD_TLB_REG register */
381
MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
383
MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
388
hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
391
u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
393
hw_status status = 0;
394
u32 pte_addr, pte_val;
398
case HW_PAGE_SIZE4KB:
399
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
401
MMU_SMALL_PAGE_MASK);
403
((physical_addr & MMU_SMALL_PAGE_MASK) |
404
(map_attrs->endianism << 9) | (map_attrs->
406
(map_attrs->mixed_size << 11) | 2);
409
case HW_PAGE_SIZE64KB:
411
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
413
MMU_LARGE_PAGE_MASK);
415
((physical_addr & MMU_LARGE_PAGE_MASK) |
416
(map_attrs->endianism << 9) | (map_attrs->
418
(map_attrs->mixed_size << 11) | 1);
421
case HW_PAGE_SIZE1MB:
422
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
424
MMU_SECTION_ADDR_MASK);
426
((((physical_addr & MMU_SECTION_ADDR_MASK) |
427
(map_attrs->endianism << 15) | (map_attrs->
428
element_size << 10) |
429
(map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
432
case HW_PAGE_SIZE16MB:
434
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
436
MMU_SSECTION_ADDR_MASK);
438
(((physical_addr & MMU_SSECTION_ADDR_MASK) |
439
(map_attrs->endianism << 15) | (map_attrs->
440
element_size << 10) |
441
(map_attrs->mixed_size << 17)
445
case HW_MMU_COARSE_PAGE_SIZE:
446
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
448
MMU_SECTION_ADDR_MASK);
449
pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
456
while (--num_entries >= 0)
457
((u32 *) pte_addr)[num_entries] = pte_val;
462
hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
464
hw_status status = 0;
469
case HW_PAGE_SIZE4KB:
470
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
472
MMU_SMALL_PAGE_MASK);
475
case HW_PAGE_SIZE64KB:
477
pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
479
MMU_LARGE_PAGE_MASK);
482
case HW_PAGE_SIZE1MB:
483
case HW_MMU_COARSE_PAGE_SIZE:
484
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
486
MMU_SECTION_ADDR_MASK);
489
case HW_PAGE_SIZE16MB:
491
pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
493
MMU_SSECTION_ADDR_MASK);
500
while (--num_entries >= 0)
501
((u32 *) pte_addr)[num_entries] = 0;
506
/* mmu_flush_entry */
507
static hw_status mmu_flush_entry(const void __iomem *base_address)
509
hw_status status = 0;
510
u32 flush_entry_data = 0x1;
512
/* write values to register */
513
MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
518
/* mmu_set_cam_entry */
519
static hw_status mmu_set_cam_entry(const void __iomem *base_address,
521
const u32 preserved_bit,
523
const u32 virtual_addr_tag)
525
hw_status status = 0;
528
mmu_cam_reg = (virtual_addr_tag << 12);
529
mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
530
(preserved_bit << 3);
532
/* write values to register */
533
MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
538
/* mmu_set_ram_entry */
539
static hw_status mmu_set_ram_entry(const void __iomem *base_address,
540
const u32 physical_addr,
541
enum hw_endianism_t endianism,
542
enum hw_element_size_t element_size,
543
enum hw_mmu_mixed_size_t mixed_size)
545
hw_status status = 0;
548
mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
549
mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
552
/* write values to register */
553
MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
559
void hw_mmu_tlb_flush_all(const void __iomem *base)
561
__raw_writel(1, base + MMU_GFLUSH);