2
** Proll (PROM replacement)
3
** system.c: shared miscallenea.
4
** Copyright 1999 Pete Zaitcev
5
** This code is licensed under GNU General Public License.
11
#define NULL ((void*)0)
17
#include <timer.h> /* Local copy of 2.2 style include */
18
#include <general.h> /* __P() */
19
#include <net.h> /* init_net() */
20
#include <romlib.h> /* we are a provider for part of this. */
21
#include <netpriv.h> /* myipaddr */
23
#include <system.h> /* our own prototypes */
28
char idprom[IDPROM_SIZE];
32
* Create an I/O mapping to pa[size].
33
* Returns va of the mapping or 0 if unsuccessful.
36
map_io(unsigned pa, int size)
43
off = pa & (PAGE_SIZE-1);
44
npages = (off + size + (PAGE_SIZE-1)) / PAGE_SIZE;
47
va = mem_alloc(&cio, npages*PAGE_SIZE, PAGE_SIZE);
48
if (va == 0) return va;
50
mva = (unsigned int) va;
51
/* printk("map_io: va 0x%x pa 0x%x off 0x%x npages %d\n", va, pa, off, npages); */ /* P3 */
52
while (npages-- != 0) {
53
map_page(pmem.pl1, mva, pa, 1, pmem.pbas);
58
return (void *)((unsigned int)va + off);
62
* Tablewalk routine used for testing.
66
proc_tablewalk(int ctx, unsigned int va)
70
__asm__ __volatile__ ("lda [%1] %2, %0" :
72
"r" (AC_M_CTPR), "i" (ASI_M_MMUREGS));
73
/* printk(" ctpr %x ctx %x\n", pa1, ctx); */ /* P3 */
75
pa1 = ld_bypass(pa1 + (ctx << 2));
76
if ((pa1 & 0x03) == 0) goto invalid;
77
return mem_tablewalk((pa1 & 0xFFFFFFF0) << 4, va);
80
printk(" invalid %x\n", pa1);
85
* Walk the tables in memory, starting at physical address pa.
88
mem_tablewalk(unsigned int pa, unsigned int va)
92
printk("pa %x va %x", pa, va);
93
pa1 = ld_bypass(pa + (((va&0xFF000000)>>24) << 2));
94
if ((pa1 & 0x03) == 0) goto invalid;
95
printk(" l1 %x", pa1);
96
pa1 <<= 4; pa1 &= 0xFFFFFF00;
97
pa1 = ld_bypass(pa1 + (((va&0x00FC0000)>>18) << 2));
98
if ((pa1 & 0x03) == 0) goto invalid;
99
printk(" l2 %x", pa1);
100
pa1 <<= 4; pa1 &= 0xFFFFFF00;
101
pa1 = ld_bypass(pa1 + (((va&0x0003F000)>>12) << 2));
102
if ((pa1 & 0x03) == 0) goto invalid;
103
printk(" l3 %x", pa1);
104
printk(" off %x\n", va&0x00000FFF);
107
printk(" invalid %x\n", pa1);
112
* Make CPU page tables.
113
* Returns pointer to context table.
114
* Here we ignore memory allocation errors which "should not happen"
115
* because we cannot print anything anyways if memory initialization fails.
117
void makepages(struct phym *t, unsigned int highbase)
119
unsigned int *ctp, *l1, pte;
123
ctp = mem_zalloc(&cmem, NCTX_SWIFT*sizeof(int), NCTX_SWIFT*sizeof(int));
124
l1 = mem_zalloc(&cmem, 256*sizeof(int), 256*sizeof(int));
126
pte = SRMMU_ET_PTD | (((unsigned int)l1 - PROLBASE + highbase) >> 4);
127
for (i = 0; i < NCTX_SWIFT; i++) {
132
for (va = PROLBASE; va < PROLDATA; va += PAGE_SIZE) {
133
map_page(l1, va, pa, 0, highbase);
136
pa = highbase + PROLDATA - PROLBASE;
137
for (va = PROLDATA; va < PROLBASE + PROLSIZE; va += PAGE_SIZE) {
138
map_page(l1, va, pa, 0, highbase);
142
/* We need to start from LOADBASE, but kernel wants PAGE_SIZE. */
144
for (va = 0; va < LOWMEMSZ; va += PAGE_SIZE) {
145
map_page(l1, va, pa, 0, highbase);
155
* Create a memory mapping from va to epa in page table pgd.
156
* highbase is used for v2p translation.
159
map_page(unsigned int *pgd, unsigned int va,
160
unsigned int epa, int type, unsigned int highbase)
166
pte = pgd[((va)>>SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD-1)];
167
if ((pte & SRMMU_ET_MASK) == SRMMU_ET_INVALID) {
168
p = mem_zalloc(&cmem, SRMMU_PTRS_PER_PMD*sizeof(int),
169
SRMMU_PTRS_PER_PMD*sizeof(int));
170
if (p == 0) goto drop;
172
(((unsigned int)p - PROLBASE + highbase) >> 4);
173
pgd[((va)>>SRMMU_PGDIR_SHIFT) & (SRMMU_PTRS_PER_PGD-1)] = pte;
177
pa = ((pte & 0xFFFFFFF0) << 4);
178
pa += (((va)>>SRMMU_PMD_SHIFT & (SRMMU_PTRS_PER_PMD-1)) << 2);
180
if ((pte & SRMMU_ET_MASK) == SRMMU_ET_INVALID) {
181
p = mem_zalloc(&cmem, SRMMU_PTRS_PER_PTE*sizeof(int),
182
SRMMU_PTRS_PER_PTE*sizeof(int));
183
if (p == 0) goto drop;
185
(((unsigned int)p - PROLBASE + highbase) >> 4);
189
pa = ((pte & 0xFFFFFFF0) << 4);
190
pa += (((va)>>PAGE_SHIFT & (SRMMU_PTRS_PER_PTE-1)) << 2);
192
pte = SRMMU_ET_PTE | ((epa & PAGE_MASK) >> 4);
193
if (type) { /* I/O */
195
/* SRMMU cannot make Supervisor-only, but not exectutable */
197
} else { /* memory */
198
pte |= SRMMU_REF|SRMMU_CACHE;
199
pte |= SRMMU_PRIV; /* Supervisor only access */
209
* Switch page tables.
212
init_mmu_swift(unsigned int ctp_phy)
219
for (addr = 0; addr < 0x2000; addr += 0x10) {
220
__asm__ __volatile__ ("sta %%g0, [%0] %1\n\t" : :
221
"r" (addr), "i" (ASI_M_DATAC_TAG));
222
__asm__ __volatile__ ("sta %%g0, [%0] %1\n\t" : :
223
"r" (addr<<1), "i" (ASI_M_TXTC_TAG));
230
/* printk("done flushing, switching to %x\n", ctp_phy); */
231
__asm__ __volatile__ ("sta %0, [%1] %2\n\t" : :
232
"r" (ctp_phy), "r" (AC_M_CTPR), "i" (ASI_M_MMUREGS));
235
* Flush old page table references
237
__asm__ __volatile__ ("sta %%g0, [%0] %1\n\t" : :
238
"r" (0x400), "i" (ASI_M_FLUSH_PROBE) : "memory");
242
* add_timer, del_timer
243
* This should go into sched.c, but we have it split for different archs.
245
struct timer_list_head {
246
struct timer_list *head, *tail;
249
static struct timer_list_head timers; /* Anonymous heap of timers */
251
void add_timer(struct timer_list *timer) {
252
struct timer_list *p;
253
if (timer->prev != NULL || timer->next != NULL) {
254
printk("bug: kernel timer added twice at 0x%x.\n",
255
__builtin_return_address(0));
258
if ((p = timers.tail) != NULL) {
269
int del_timer(struct timer_list *timer) {
270
struct timer_list *p;
273
if (timers.head == timer) timers.head = timer->next;
274
if (timers.tail == timer) timers.tail = timer->prev;
275
if ((p = timer->prev) != NULL) p->next = timer->next;
276
if ((p = timer->next) != NULL) p->prev = timer->prev;
277
ret = timer->next != 0 || timer->prev != 0;
284
struct timer_list *p;
288
if (p->expires < jiffies) {
289
del_timer(p); /* XXX make nonstatic member */
290
(*p->function)(p->data);
299
* Allocate memory. This is reusable.
301
void mem_init(struct mem *t, char *begin, char *limit)
308
void mem_fini(struct mem *t)
313
void *mem_alloc(struct mem *t, int size, int align)
317
p = (char *)((((unsigned int)t->curp) + (align-1)) & ~(align-1));
318
if (p >= t->uplim || p + size > t->uplim) return 0;
323
void *mem_zalloc(struct mem *t, int size, int align)
327
if ((p = mem_alloc(t, size, align)) != 0) bzero(p, size);
334
void bzero(void *s, int len) {
335
while (len--) *((char *)s)++ = 0;
338
void bcopy(const void *f, void *t, int len) {
339
while (len--) *((char *)t)++ = *((char *)f)++;
342
/* Comparison is 7-bit */
343
int bcmp(const void *s1, const void *s2, int len)
349
ch = *((char *)s1)++;
350
if ((i = ch - *((char *)s2)++) != 0)
358
int strlen(const char *s) {
360
for (p = s; *p != 0; p++) { }
364
extern void *printk_fn;
366
void printk(char *fmt, ...)
370
void (*write)(void *, char *, int);
372
extern void prf(struct prf_fp *, char *fmt, va_list adx);
377
prfa.write = printk_fn;
389
* Get the highest bit number from the mask.
391
int highc(int mask, int size)
399
if (m1 & mask) break;
406
unsigned int ld_bp_swap(unsigned int ptr) {
409
n = (n>>24 & 0xFF) | (n>>8 & 0xFF00) | ((n&0xFF00) << 8) | (n<<24);
413
void st_bp_swap(unsigned int ptr, unsigned int n) {
414
n = (n>>24 & 0xFF) | (n>>8 & 0xFF00) | ((n&0xFF00) << 8) | (n<<24);