435
435
#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
436
436
#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
439
* Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
442
* - Clear the SCTLR.C bit to prevent further cache allocations
443
* - Flush the desired level of cache
444
* - Clear the ACTLR "SMP" bit to disable local coherency
446
* ... and so without any intervening memory access in between those steps,
447
* not even to the stack.
449
* WARNING -- After this has been called:
451
* - No ldrex/strex (and similar) instructions must be used.
452
* - The CPU is obviously no longer coherent with the other CPUs.
453
* - This is unlikely to work as expected if Linux is running non-secure.
457
* - This is known to apply to several ARMv7 processor implementations,
458
* however some exceptions may exist. Caveat emptor.
460
* - The clobber list is dictated by the call to v7_flush_dcache_*.
461
* fp is preserved to the stack explicitly prior disabling the cache
462
* since adding it to the clobber list is incompatible with having
463
* CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
464
* trampoline are inserted by the linker and to keep sp 64-bit aligned.
466
#define v7_exit_coherency_flush(level) \
468
"stmfd sp!, {fp, ip} \n\t" \
469
"mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
470
"bic r0, r0, #"__stringify(CR_C)" \n\t" \
471
"mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
473
"bl v7_flush_dcache_"__stringify(level)" \n\t" \
475
"mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
476
"bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
477
"mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
480
"ldmfd sp!, {fp, ip}" \
481
: : : "r0","r1","r2","r3","r4","r5","r6","r7", \
482
"r9","r10","lr","memory" )