~ubuntu-branches/ubuntu/trusty/linux-linaro-omap/trusty

« back to all changes in this revision

Viewing changes to arch/powerpc/platforms/iseries/exception.S

  • Committer: Package Import Robot
  • Author(s): John Rigby, John Rigby
  • Date: 2011-09-26 10:44:23 UTC
  • Revision ID: package-import@ubuntu.com-20110926104423-57i0gl3v99b3lkfg
Tags: 3.0.0-1007.9
[ John Rigby ]

Enable crypto modules and remove crypto-modules from
exclude-module files
LP: #826021

Show diffs side-by-side

added added

removed removed

Lines of Context:
31
31
#include <asm/thread_info.h>
32
32
#include <asm/ptrace.h>
33
33
#include <asm/cputable.h>
 
34
#include <asm/mmu.h>
34
35
 
35
36
#include "exception.h"
36
37
 
60
61
/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
61
62
/* In the UP case we'll yield() later, and we will not access the paca anyway */
62
63
#ifdef CONFIG_SMP
63
 
1:
 
64
iSeries_secondary_wait_paca:
64
65
        HMT_LOW
65
66
        LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
66
67
        ld      r23,0(r23)
67
 
        sync
68
 
        LOAD_REG_ADDR(r3,current_set)
69
 
        sldi    r28,r24,3               /* get current_set[cpu#] */
70
 
        ldx     r3,r3,r28
71
 
        addi    r1,r3,THREAD_SIZE
72
 
        subi    r1,r1,STACK_FRAME_OVERHEAD
73
 
 
74
 
        cmpwi   0,r23,0                 /* Keep poking the Hypervisor until */
75
 
        bne     2f                      /* we're released */
76
 
        /* Let the Hypervisor know we are alive */
 
68
 
 
69
        cmpdi   0,r23,0
 
70
        bne     2f                      /* go on when the master is ready */
 
71
 
 
72
        /* Keep poking the Hypervisor until we're released */
77
73
        /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
78
74
        lis     r3,0x8002
79
75
        rldicr  r3,r3,32,15             /* r0 = (r3 << 32) & 0xffff000000000000 */
80
76
        li      r0,-1                   /* r0=-1 indicates a Hypervisor call */
81
77
        sc                              /* Invoke the hypervisor via a system call */
82
 
        b       1b
83
 
#endif
 
78
        b       iSeries_secondary_wait_paca
84
79
 
85
80
2:
 
81
        HMT_MEDIUM
 
82
        sync
 
83
 
 
84
        LOAD_REG_ADDR(r3, nr_cpu_ids)   /* get number of pacas allocated */
 
85
        lwz     r3,0(r3)                /* nr_cpus= or NR_CPUS can limit */
 
86
        cmpld   0,r24,r3                /* is our cpu number allocated? */
 
87
        bge     iSeries_secondary_yield /* no, yield forever */
 
88
 
86
89
        /* Load our paca now that it's been allocated */
87
90
        LOAD_REG_ADDR(r13, paca)
88
91
        ld      r13,0(r13)
93
96
        ori     r23,r23,MSR_RI
94
97
        mtmsrd  r23                     /* RI on */
95
98
 
96
 
        HMT_LOW
97
 
#ifdef CONFIG_SMP
 
99
iSeries_secondary_smp_loop:
98
100
        lbz     r23,PACAPROCSTART(r13)  /* Test if this processor
99
101
                                         * should start */
100
 
        sync
101
 
        LOAD_REG_ADDR(r3,current_set)
102
 
        sldi    r28,r24,3               /* get current_set[cpu#] */
103
 
        ldx     r3,r3,r28
104
 
        addi    r1,r3,THREAD_SIZE
105
 
        subi    r1,r1,STACK_FRAME_OVERHEAD
106
 
 
107
102
        cmpwi   0,r23,0
108
 
        beq     iSeries_secondary_smp_loop      /* Loop until told to go */
 
103
        bne     3f                      /* go on when we are told */
 
104
 
 
105
        HMT_LOW
 
106
        /* Let the Hypervisor know we are alive */
 
107
        /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
 
108
        lis     r3,0x8002
 
109
        rldicr  r3,r3,32,15             /* r0 = (r3 << 32) & 0xffff000000000000 */
 
110
        li      r0,-1                   /* r0=-1 indicates a Hypervisor call */
 
111
        sc                              /* Invoke the hypervisor via a system call */
 
112
        mfspr   r13,SPRN_SPRG_PACA      /* Put r13 back ???? */
 
113
        b       iSeries_secondary_smp_loop /* wait for signal to start */
 
114
 
 
115
3:
 
116
        HMT_MEDIUM
 
117
        sync
 
118
        LOAD_REG_ADDR(r3,current_set)
 
119
        sldi    r28,r24,3               /* get current_set[cpu#] */
 
120
        ldx     r3,r3,r28
 
121
        addi    r1,r3,THREAD_SIZE
 
122
        subi    r1,r1,STACK_FRAME_OVERHEAD
 
123
 
109
124
        b       __secondary_start               /* Loop until told to go */
110
 
iSeries_secondary_smp_loop:
111
 
        /* Let the Hypervisor know we are alive */
112
 
        /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
113
 
        lis     r3,0x8002
114
 
        rldicr  r3,r3,32,15             /* r0 = (r3 << 32) & 0xffff000000000000 */
115
 
#else /* CONFIG_SMP */
 
125
#endif /* CONFIG_SMP */
 
126
 
 
127
iSeries_secondary_yield:
116
128
        /* Yield the processor.  This is required for non-SMP kernels
117
129
                which are running on multi-threaded machines. */
 
130
        HMT_LOW
118
131
        lis     r3,0x8000
119
132
        rldicr  r3,r3,32,15             /* r3 = (r3 << 32) & 0xffff000000000000 */
120
133
        addi    r3,r3,18                /* r3 = 0x8000000000000012 which is "yield" */
121
134
        li      r4,0                    /* "yield timed" */
122
135
        li      r5,-1                   /* "yield forever" */
123
 
#endif /* CONFIG_SMP */
124
136
        li      r0,-1                   /* r0=-1 indicates a Hypervisor call */
125
137
        sc                              /* Invoke the hypervisor via a system call */
126
138
        mfspr   r13,SPRN_SPRG_PACA      /* Put r13 back ???? */
127
 
        b       2b                      /* If SMP not configured, secondaries
 
139
        b       iSeries_secondary_yield /* If SMP not configured, secondaries
128
140
                                         * loop forever */
129
141
 
130
142
/***  ISeries-LPAR interrupt handlers ***/
157
169
FTR_SECTION_ELSE
158
170
        EXCEPTION_PROLOG_1(PACA_EXGEN)
159
171
        EXCEPTION_PROLOG_ISERIES_1
160
 
ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB)
 
172
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
161
173
        b       data_access_common
162
174
 
163
175
.do_stab_bolted_iSeries: