~ubuntu-branches/ubuntu/precise/linux-ti-omap4/precise

« back to all changes in this revision

Viewing changes to arch/powerpc/kernel/setup_64.c

  • Committer: Bazaar Package Importer
  • Author(s): Paolo Pisati
  • Date: 2011-06-29 15:23:51 UTC
  • mfrom: (26.1.1 natty-proposed)
  • Revision ID: james.westby@ubuntu.com-20110629152351-xs96tm303d95rpbk
Tags: 3.0.0-1200.2
* Rebased against 3.0.0-6.7
* BSP from TI based on 3.0.0

Show diffs side-by-side

added added

removed removed

Lines of Context:
62
62
#include <asm/udbg.h>
63
63
#include <asm/kexec.h>
64
64
#include <asm/mmu_context.h>
 
65
#include <asm/code-patching.h>
65
66
 
66
67
#include "setup.h"
67
68
 
72
73
#endif
73
74
 
74
75
int boot_cpuid = 0;
 
76
int __initdata boot_cpu_count;
75
77
u64 ppc64_pft_size;
76
78
 
77
79
/* Pick defaults since we might want to patch instructions
233
235
void smp_release_cpus(void)
234
236
{
235
237
        unsigned long *ptr;
 
238
        int i;
236
239
 
237
240
        DBG(" -> smp_release_cpus()\n");
238
241
 
245
248
        ptr  = (unsigned long *)((unsigned long)&__secondary_hold_spinloop
246
249
                        - PHYSICAL_START);
247
250
        *ptr = __pa(generic_secondary_smp_init);
248
 
        mb();
 
251
 
 
252
        /* And wait a bit for them to catch up */
 
253
        for (i = 0; i < 100000; i++) {
 
254
                mb();
 
255
                HMT_low();
 
256
                if (boot_cpu_count == 0)
 
257
                        break;
 
258
                udelay(1);
 
259
        }
 
260
        DBG("boot_cpu_count = %d\n", boot_cpu_count);
249
261
 
250
262
        DBG(" <- smp_release_cpus()\n");
251
263
}
423
435
        DBG(" <- setup_system()\n");
424
436
}
425
437
 
426
 
static u64 slb0_limit(void)
 
438
/* This returns the limit below which memory accesses to the linear
 
439
 * mapping are guarnateed not to cause a TLB or SLB miss. This is
 
440
 * used to allocate interrupt or emergency stacks for which our
 
441
 * exception entry path doesn't deal with being interrupted.
 
442
 */
 
443
static u64 safe_stack_limit(void)
427
444
{
428
 
        if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) {
 
445
#ifdef CONFIG_PPC_BOOK3E
 
446
        /* Freescale BookE bolts the entire linear mapping */
 
447
        if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
 
448
                return linear_map_top;
 
449
        /* Other BookE, we assume the first GB is bolted */
 
450
        return 1ul << 30;
 
451
#else
 
452
        /* BookS, the first segment is bolted */
 
453
        if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
429
454
                return 1UL << SID_SHIFT_1T;
430
 
        }
431
455
        return 1UL << SID_SHIFT;
 
456
#endif
432
457
}
433
458
 
434
459
static void __init irqstack_early_init(void)
435
460
{
436
 
        u64 limit = slb0_limit();
 
461
        u64 limit = safe_stack_limit();
437
462
        unsigned int i;
438
463
 
439
464
        /*
453
478
#ifdef CONFIG_PPC_BOOK3E
454
479
static void __init exc_lvl_early_init(void)
455
480
{
 
481
        extern unsigned int interrupt_base_book3e;
 
482
        extern unsigned int exc_debug_debug_book3e;
 
483
 
456
484
        unsigned int i;
457
485
 
458
486
        for_each_possible_cpu(i) {
463
491
                mcheckirq_ctx[i] = (struct thread_info *)
464
492
                        __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
465
493
        }
 
494
 
 
495
        if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
 
496
                patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1,
 
497
                             (unsigned long)&exc_debug_debug_book3e, 0);
466
498
}
467
499
#else
468
500
#define exc_lvl_early_init()
486
518
         * bringup, we need to get at them in real mode. This means they
487
519
         * must also be within the RMO region.
488
520
         */
489
 
        limit = min(slb0_limit(), ppc64_rma_size);
 
521
        limit = min(safe_stack_limit(), ppc64_rma_size);
490
522
 
491
523
        for_each_possible_cpu(i) {
492
524
                unsigned long sp;