2
* native hashtable management.
4
* SMP scalability work:
5
* Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7
* This program is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU General Public License
9
* as published by the Free Software Foundation; either version
10
* 2 of the License, or (at your option) any later version.
15
#include <linux/spinlock.h>
16
#include <linux/bitops.h>
17
#include <linux/threads.h>
18
#include <linux/smp.h>
20
#include <asm/abs_addr.h>
21
#include <asm/machdep.h>
23
#include <asm/mmu_context.h>
24
#include <asm/pgtable.h>
25
#include <asm/tlbflush.h>
27
#include <asm/cputable.h>
29
#include <asm/kexec.h>
30
#include <asm/ppc-opcode.h>
33
#define DBG_LOW(fmt...) udbg_printf(fmt)
35
#define DBG_LOW(fmt...)
38
#define HPTE_LOCK_BIT 3
40
DEFINE_RAW_SPINLOCK(native_tlbie_lock);
42
static inline void __tlbie(unsigned long va, int psize, int ssize)
46
/* clear top 16 bits, non SLS segment */
47
va &= ~(0xffffULL << 48);
53
asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
54
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
58
penc = mmu_psize_defs[psize].penc;
59
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
63
asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
64
: : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
70
static inline void __tlbiel(unsigned long va, int psize, int ssize)
74
/* clear top 16 bits, non SLS segment */
75
va &= ~(0xffffULL << 48);
81
asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
82
: : "r"(va) : "memory");
85
penc = mmu_psize_defs[psize].penc;
86
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
90
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
91
: : "r"(va) : "memory");
97
static inline void tlbie(unsigned long va, int psize, int ssize, int local)
99
unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
100
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
103
use_local = mmu_psize_defs[psize].tlbiel;
104
if (lock_tlbie && !use_local)
105
raw_spin_lock(&native_tlbie_lock);
106
asm volatile("ptesync": : :"memory");
108
__tlbiel(va, psize, ssize);
109
asm volatile("ptesync": : :"memory");
111
__tlbie(va, psize, ssize);
112
asm volatile("eieio; tlbsync; ptesync": : :"memory");
114
if (lock_tlbie && !use_local)
115
raw_spin_unlock(&native_tlbie_lock);
118
static inline void native_lock_hpte(struct hash_pte *hptep)
120
unsigned long *word = &hptep->v;
123
if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
125
while(test_bit(HPTE_LOCK_BIT, word))
130
static inline void native_unlock_hpte(struct hash_pte *hptep)
132
unsigned long *word = &hptep->v;
134
clear_bit_unlock(HPTE_LOCK_BIT, word);
137
static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
138
unsigned long pa, unsigned long rflags,
139
unsigned long vflags, int psize, int ssize)
141
struct hash_pte *hptep = htab_address + hpte_group;
142
unsigned long hpte_v, hpte_r;
145
if (!(vflags & HPTE_V_BOLTED)) {
146
DBG_LOW(" insert(group=%lx, va=%016lx, pa=%016lx,"
147
" rflags=%lx, vflags=%lx, psize=%d)\n",
148
hpte_group, va, pa, rflags, vflags, psize);
151
for (i = 0; i < HPTES_PER_GROUP; i++) {
152
if (! (hptep->v & HPTE_V_VALID)) {
153
/* retry with lock held */
154
native_lock_hpte(hptep);
155
if (! (hptep->v & HPTE_V_VALID))
157
native_unlock_hpte(hptep);
163
if (i == HPTES_PER_GROUP)
166
hpte_v = hpte_encode_v(va, psize, ssize) | vflags | HPTE_V_VALID;
167
hpte_r = hpte_encode_r(pa, psize) | rflags;
169
if (!(vflags & HPTE_V_BOLTED)) {
170
DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
175
/* Guarantee the second dword is visible before the valid bit */
178
* Now set the first dword including the valid bit
179
* NOTE: this also unlocks the hpte
183
__asm__ __volatile__ ("ptesync" : : : "memory");
185
return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
188
static long native_hpte_remove(unsigned long hpte_group)
190
struct hash_pte *hptep;
193
unsigned long hpte_v;
195
DBG_LOW(" remove(group=%lx)\n", hpte_group);
197
/* pick a random entry to start at */
198
slot_offset = mftb() & 0x7;
200
for (i = 0; i < HPTES_PER_GROUP; i++) {
201
hptep = htab_address + hpte_group + slot_offset;
204
if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
205
/* retry with lock held */
206
native_lock_hpte(hptep);
208
if ((hpte_v & HPTE_V_VALID)
209
&& !(hpte_v & HPTE_V_BOLTED))
211
native_unlock_hpte(hptep);
218
if (i == HPTES_PER_GROUP)
221
/* Invalidate the hpte. NOTE: this also unlocks it */
227
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
228
unsigned long va, int psize, int ssize,
231
struct hash_pte *hptep = htab_address + slot;
232
unsigned long hpte_v, want_v;
235
want_v = hpte_encode_v(va, psize, ssize);
237
DBG_LOW(" update(va=%016lx, avpnv=%016lx, hash=%016lx, newpp=%x)",
238
va, want_v & HPTE_V_AVPN, slot, newpp);
240
native_lock_hpte(hptep);
244
/* Even if we miss, we need to invalidate the TLB */
245
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
246
DBG_LOW(" -> miss\n");
249
DBG_LOW(" -> hit\n");
250
/* Update the HPTE */
251
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
252
(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
254
native_unlock_hpte(hptep);
256
/* Ensure it is out of the tlb too. */
257
tlbie(va, psize, ssize, local);
262
static long native_hpte_find(unsigned long va, int psize, int ssize)
264
struct hash_pte *hptep;
268
unsigned long want_v, hpte_v;
270
hash = hpt_hash(va, mmu_psize_defs[psize].shift, ssize);
271
want_v = hpte_encode_v(va, psize, ssize);
273
/* Bolted mappings are only ever in the primary group */
274
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
275
for (i = 0; i < HPTES_PER_GROUP; i++) {
276
hptep = htab_address + slot;
279
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
289
* Update the page protection bits. Intended to be used to create
290
* guard pages for kernel data structures on pages which are bolted
291
* in the HPT. Assumes pages being operated on will not be stolen.
293
* No need to lock here because we should be the only user.
295
static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
296
int psize, int ssize)
298
unsigned long vsid, va;
300
struct hash_pte *hptep;
302
vsid = get_kernel_vsid(ea, ssize);
303
va = hpt_va(ea, vsid, ssize);
305
slot = native_hpte_find(va, psize, ssize);
307
panic("could not find page to bolt\n");
308
hptep = htab_address + slot;
310
/* Update the HPTE */
311
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
312
(newpp & (HPTE_R_PP | HPTE_R_N));
314
/* Ensure it is out of the tlb too. */
315
tlbie(va, psize, ssize, 0);
318
static void native_hpte_invalidate(unsigned long slot, unsigned long va,
319
int psize, int ssize, int local)
321
struct hash_pte *hptep = htab_address + slot;
322
unsigned long hpte_v;
323
unsigned long want_v;
326
local_irq_save(flags);
328
DBG_LOW(" invalidate(va=%016lx, hash: %x)\n", va, slot);
330
want_v = hpte_encode_v(va, psize, ssize);
331
native_lock_hpte(hptep);
334
/* Even if we miss, we need to invalidate the TLB */
335
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
336
native_unlock_hpte(hptep);
338
/* Invalidate the hpte. NOTE: this also unlocks it */
341
/* Invalidate the TLB */
342
tlbie(va, psize, ssize, local);
344
local_irq_restore(flags);
349
#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
351
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
352
int *psize, int *ssize, unsigned long *va)
354
unsigned long hpte_r = hpte->r;
355
unsigned long hpte_v = hpte->v;
357
int i, size, shift, penc;
359
if (!(hpte_v & HPTE_V_LARGE))
362
for (i = 0; i < LP_BITS; i++) {
363
if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
366
penc = LP_MASK(i+1) >> LP_SHIFT;
367
for (size = 0; size < MMU_PAGE_COUNT; size++) {
369
/* 4K pages are not represented by LP */
370
if (size == MMU_PAGE_4K)
373
/* valid entries have a shift value */
374
if (!mmu_psize_defs[size].shift)
377
if (penc == mmu_psize_defs[size].penc)
382
/* This works for all page sizes, and for 256M and 1T segments */
383
shift = mmu_psize_defs[size].shift;
384
avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm) << 23;
387
unsigned long vpi, vsid, pteg;
389
pteg = slot / HPTES_PER_GROUP;
390
if (hpte_v & HPTE_V_SECONDARY)
392
switch (hpte_v >> HPTE_V_SSIZE_SHIFT) {
393
case MMU_SEGSIZE_256M:
394
vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
398
vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
401
avpn = vpi = size = 0;
403
avpn |= (vpi << mmu_psize_defs[size].shift);
408
*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
412
* clear all mappings on kexec. All cpus are in real mode (or they will
413
* be when they isi), and we are the only one left. We rely on our kernel
414
* mapping being 0xC0's and the hardware ignoring those two real bits.
416
* TODO: add batching support when enabled. remember, no dynamic memory here,
417
* athough there is the control page available...
419
static void native_hpte_clear(void)
421
unsigned long slot, slots, flags;
422
struct hash_pte *hptep = htab_address;
423
unsigned long hpte_v, va;
424
unsigned long pteg_count;
427
pteg_count = htab_hash_mask + 1;
429
local_irq_save(flags);
431
/* we take the tlbie lock and hold it. Some hardware will
432
* deadlock if we try to tlbie from two processors at once.
434
raw_spin_lock(&native_tlbie_lock);
436
slots = pteg_count * HPTES_PER_GROUP;
438
for (slot = 0; slot < slots; slot++, hptep++) {
440
* we could lock the pte here, but we are the only cpu
441
* running, right? and for crash dump, we probably
442
* don't want to wait for a maybe bad cpu.
447
* Call __tlbie() here rather than tlbie() since we
448
* already hold the native_tlbie_lock.
450
if (hpte_v & HPTE_V_VALID) {
451
hpte_decode(hptep, slot, &psize, &ssize, &va);
453
__tlbie(va, psize, ssize);
457
asm volatile("eieio; tlbsync; ptesync":::"memory");
458
raw_spin_unlock(&native_tlbie_lock);
459
local_irq_restore(flags);
463
* Batched hash table flush, we batch the tlbie's to avoid taking/releasing
464
* the lock all the time
466
static void native_flush_hash_range(unsigned long number, int local)
468
unsigned long va, hash, index, hidx, shift, slot;
469
struct hash_pte *hptep;
470
unsigned long hpte_v;
471
unsigned long want_v;
474
struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch);
475
unsigned long psize = batch->psize;
476
int ssize = batch->ssize;
479
local_irq_save(flags);
481
for (i = 0; i < number; i++) {
482
va = batch->vaddr[i];
485
pte_iterate_hashed_subpages(pte, psize, va, index, shift) {
486
hash = hpt_hash(va, shift, ssize);
487
hidx = __rpte_to_hidx(pte, index);
488
if (hidx & _PTEIDX_SECONDARY)
490
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
491
slot += hidx & _PTEIDX_GROUP_IX;
492
hptep = htab_address + slot;
493
want_v = hpte_encode_v(va, psize, ssize);
494
native_lock_hpte(hptep);
496
if (!HPTE_V_COMPARE(hpte_v, want_v) ||
497
!(hpte_v & HPTE_V_VALID))
498
native_unlock_hpte(hptep);
501
} pte_iterate_hashed_end();
504
if (mmu_has_feature(MMU_FTR_TLBIEL) &&
505
mmu_psize_defs[psize].tlbiel && local) {
506
asm volatile("ptesync":::"memory");
507
for (i = 0; i < number; i++) {
508
va = batch->vaddr[i];
511
pte_iterate_hashed_subpages(pte, psize, va, index,
513
__tlbiel(va, psize, ssize);
514
} pte_iterate_hashed_end();
516
asm volatile("ptesync":::"memory");
518
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
521
raw_spin_lock(&native_tlbie_lock);
523
asm volatile("ptesync":::"memory");
524
for (i = 0; i < number; i++) {
525
va = batch->vaddr[i];
528
pte_iterate_hashed_subpages(pte, psize, va, index,
530
__tlbie(va, psize, ssize);
531
} pte_iterate_hashed_end();
533
asm volatile("eieio; tlbsync; ptesync":::"memory");
536
raw_spin_unlock(&native_tlbie_lock);
539
local_irq_restore(flags);
542
#ifdef CONFIG_PPC_PSERIES
543
/* Disable TLB batching on nighthawk */
544
static inline int tlb_batching_enabled(void)
546
struct device_node *root = of_find_node_by_path("/");
550
const char *model = of_get_property(root, "model", NULL);
551
if (model && !strcmp(model, "IBM,9076-N81"))
559
static inline int tlb_batching_enabled(void)
565
void __init hpte_init_native(void)
567
ppc_md.hpte_invalidate = native_hpte_invalidate;
568
ppc_md.hpte_updatepp = native_hpte_updatepp;
569
ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
570
ppc_md.hpte_insert = native_hpte_insert;
571
ppc_md.hpte_remove = native_hpte_remove;
572
ppc_md.hpte_clear_all = native_hpte_clear;
573
if (tlb_batching_enabled())
574
ppc_md.flush_hash_range = native_flush_hash_range;