154
156
* Please note that the implementation of these, and the required
155
157
* effects are cache-type (VIVT/VIPT/PIPT) specific.
157
* flush_cache_kern_all()
161
* Unconditionally clean and invalidate the entire icache.
162
* Currently only needed for cache-v6.S and cache-v7.S, see
163
* __flush_icache_all for the generic implementation.
159
167
* Unconditionally clean and invalidate the entire cache.
161
* flush_cache_user_mm(mm)
163
171
* Clean and invalidate all user space cache entries
164
172
* before a change of page tables.
166
* flush_cache_user_range(start, end, flags)
174
* flush_user_range(start, end, flags)
168
176
* Clean and invalidate a range of cache entries in the
169
177
* specified address space before a change of page tables.
179
187
* - start - virtual start address
180
188
* - end - virtual end address
190
* coherent_user_range(start, end)
192
* Ensure coherency between the Icache and the Dcache in the
193
* region described by start, end. If you have non-snooping
194
* Harvard caches, you need to implement this function.
195
* - start - virtual start address
196
* - end - virtual end address
198
* flush_kern_dcache_area(kaddr, size)
200
* Ensure that the data held in page is written back.
201
* - kaddr - page address
202
* - size - region size
182
204
* DMA Cache Coherency
183
205
* ===================
185
* dma_inv_range(start, end)
187
* Invalidate (discard) the specified virtual address range.
188
* May not write back any entries. If 'start' or 'end'
189
* are not cache line aligned, those lines must be written
191
* - start - virtual start address
192
* - end - virtual end address
194
* dma_clean_range(start, end)
196
* Clean (write back) the specified virtual address range.
197
* - start - virtual start address
198
* - end - virtual end address
200
207
* dma_flush_range(start, end)
202
209
* Clean and invalidate the specified virtual address range.
207
214
struct cpu_cache_fns {
215
void (*flush_icache_all)(void);
208
216
void (*flush_kern_all)(void);
209
217
void (*flush_user_all)(void);
210
218
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
212
220
void (*coherent_kern_range)(unsigned long, unsigned long);
213
221
void (*coherent_user_range)(unsigned long, unsigned long);
214
void (*flush_kern_dcache_page)(void *);
216
void (*dma_inv_range)(const void *, const void *);
217
void (*dma_clean_range)(const void *, const void *);
222
void (*flush_kern_dcache_area)(void *, size_t);
224
void (*dma_map_area)(const void *, size_t, int);
225
void (*dma_unmap_area)(const void *, size_t, int);
218
227
void (*dma_flush_range)(const void *, const void *);
221
struct outer_cache_fns {
222
void (*inv_range)(unsigned long, unsigned long);
223
void (*clean_range)(unsigned long, unsigned long);
224
void (*flush_range)(unsigned long, unsigned long);
228
231
* Select the calling method
232
235
extern struct cpu_cache_fns cpu_cache;
237
#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
234
238
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
235
239
#define __cpuc_flush_user_all cpu_cache.flush_user_all
236
240
#define __cpuc_flush_user_range cpu_cache.flush_user_range
237
241
#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
238
242
#define __cpuc_coherent_user_range cpu_cache.coherent_user_range
239
#define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
243
#define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
242
246
* These are private to the dma-mapping API. Do not use directly.
244
248
* is visible to DMA, or data written by DMA to system memory is
245
249
* visible to the CPU.
247
#define dmac_inv_range cpu_cache.dma_inv_range
248
#define dmac_clean_range cpu_cache.dma_clean_range
251
#define dmac_map_area cpu_cache.dma_map_area
252
#define dmac_unmap_area cpu_cache.dma_unmap_area
249
253
#define dmac_flush_range cpu_cache.dma_flush_range
257
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
253
258
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
254
259
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
255
260
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
256
261
#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
257
262
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
258
#define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
263
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
265
extern void __cpuc_flush_icache_all(void);
260
266
extern void __cpuc_flush_kern_all(void);
261
267
extern void __cpuc_flush_user_all(void);
262
268
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
263
269
extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
264
270
extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
265
extern void __cpuc_flush_dcache_page(void *);
271
extern void __cpuc_flush_dcache_area(void *, size_t);
268
274
* These are private to the dma-mapping API. Do not use directly.
270
276
* is visible to DMA, or data written by DMA to system memory is
271
277
* visible to the CPU.
273
#define dmac_inv_range __glue(_CACHE,_dma_inv_range)
274
#define dmac_clean_range __glue(_CACHE,_dma_clean_range)
279
#define dmac_map_area __glue(_CACHE,_dma_map_area)
280
#define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
275
281
#define dmac_flush_range __glue(_CACHE,_dma_flush_range)
277
extern void dmac_inv_range(const void *, const void *);
278
extern void dmac_clean_range(const void *, const void *);
283
extern void dmac_map_area(const void *, size_t, int);
284
extern void dmac_unmap_area(const void *, size_t, int);
279
285
extern void dmac_flush_range(const void *, const void *);
283
#ifdef CONFIG_OUTER_CACHE
285
extern struct outer_cache_fns outer_cache;
287
static inline void outer_inv_range(unsigned long start, unsigned long end)
289
if (outer_cache.inv_range)
290
outer_cache.inv_range(start, end);
292
static inline void outer_clean_range(unsigned long start, unsigned long end)
294
if (outer_cache.clean_range)
295
outer_cache.clean_range(start, end);
297
static inline void outer_flush_range(unsigned long start, unsigned long end)
299
if (outer_cache.flush_range)
300
outer_cache.flush_range(start, end);
305
static inline void outer_inv_range(unsigned long start, unsigned long end)
307
static inline void outer_clean_range(unsigned long start, unsigned long end)
309
static inline void outer_flush_range(unsigned long start, unsigned long end)
315
290
* Copy user data from/to a page which is mapped into a different
316
291
* processes address space. Really, we want to allow our "user
317
292
* space" model to handle this.
319
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
321
memcpy(dst, src, len); \
322
flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
294
extern void copy_to_user_page(struct vm_area_struct *, struct page *,
295
unsigned long, void *, const void *, unsigned long);
325
296
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
327
298
memcpy(dst, src, len); \
331
302
* Convert calls to our calling convention.
305
/* Invalidate I-cache */
306
#define __flush_icache_all_generic() \
307
asm("mcr p15, 0, %0, c7, c5, 0" \
310
/* Invalidate I-cache inner shareable */
311
#define __flush_icache_all_v7_smp() \
312
asm("mcr p15, 0, %0, c7, c1, 0" \
316
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7
317
* will fall through to use __flush_icache_all_generic.
319
#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
320
defined(CONFIG_SMP_ON_UP)
321
#define __flush_icache_preferred __cpuc_flush_icache_all
322
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
323
#define __flush_icache_preferred __flush_icache_all_v7_smp
324
#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
325
#define __flush_icache_preferred __cpuc_flush_icache_all
327
#define __flush_icache_preferred __flush_icache_all_generic
330
static inline void __flush_icache_all(void)
332
__flush_icache_preferred();
333
335
#define flush_cache_all() __cpuc_flush_kern_all()
334
#ifndef CONFIG_CPU_CACHE_VIPT
335
static inline void flush_cache_mm(struct mm_struct *mm)
337
static inline void vivt_flush_cache_mm(struct mm_struct *mm)
337
339
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
338
340
__cpuc_flush_user_all();
341
343
static inline void
342
flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
344
vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
344
346
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
345
347
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
359
flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
360
unsigned long uaddr, void *kaddr,
361
unsigned long len, int write)
363
if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
364
unsigned long addr = (unsigned long)kaddr;
365
__cpuc_coherent_kern_range(addr, addr + len);
360
#ifndef CONFIG_CPU_CACHE_VIPT
361
#define flush_cache_mm(mm) \
362
vivt_flush_cache_mm(mm)
363
#define flush_cache_range(vma,start,end) \
364
vivt_flush_cache_range(vma,start,end)
365
#define flush_cache_page(vma,addr,pfn) \
366
vivt_flush_cache_page(vma,addr,pfn)
369
368
extern void flush_cache_mm(struct mm_struct *mm);
370
369
extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
371
370
extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
372
extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
373
unsigned long uaddr, void *kaddr,
374
unsigned long len, int write);
377
373
#define flush_cache_dup_mm(mm) flush_cache_mm(mm)
408
404
* about to change to user space. This is the same method as used on SPARC64.
409
405
* See update_mmu_cache for the user space part.
407
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
411
408
extern void flush_dcache_page(struct page *);
413
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
415
static inline void __flush_icache_all(void)
417
#ifdef CONFIG_ARM_ERRATA_411920
418
extern void v6_icache_inval_all(void);
419
v6_icache_inval_all();
421
asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
410
static inline void flush_kernel_vmap_range(void *addr, int size)
412
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
413
__cpuc_flush_dcache_area(addr, (size_t)size);
415
static inline void invalidate_kernel_vmap_range(void *addr, int size)
417
if ((cache_is_vivt() || cache_is_vipt_aliasing()))
418
__cpuc_flush_dcache_area(addr, (size_t)size);
427
421
#define ARCH_HAS_FLUSH_ANON_PAGE
457
448
#define flush_icache_page(vma,page) do { } while (0)
459
static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
460
unsigned offset, size_t size)
462
const void *start = (void __force *)virt + offset;
463
dmac_inv_range(start, start + size);
467
451
* flush_cache_vmap() is used when creating mappings (eg, via vmap,
468
452
* vmalloc, ioremap etc) in kernel space for pages. On non-VIPT