Statistics
| Branch: | Revision:

root / prex-0.9.0 / bsp / hal / ppc / arch / locore.S @ 03e9c04a

History | View | Annotate | Download (10.5 KB)

1
/*-
2
 * Copyright (c) 2009, Kohsuke Ohtani
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright
11
 *    notice, this list of conditions and the following disclaimer in the
12
 *    documentation and/or other materials provided with the distribution.
13
 * 3. Neither the name of the author nor the names of any co-contributors
14
 *    may be used to endorse or promote products derived from this software
15
 *    without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29

    
30
/*
31
 * locore.S - low level platform support
32
 */
33

    
34
/*
35
 * Memo: SPRG usage
36
 *  SPRG0 - kernel stack pointer
37
 *  SPRG1 - saved stack pointer
38
 *  SPRG2 - interrupt nest counter
39
 *  SPRG3 - scratch pad
40
 */
41

    
42
#include <conf/config.h>
43
#include <machine/asm.h>
44
#include <machine/syspage.h>
45
#include <machine/memory.h>
46
#include <sys/errno.h>
47
#include <context.h>
48
#include <trap.h>
49
#include <cpu.h>
50

    
51
	.section ".text","ax"
52

    
53
#define ABS_JUMP(target) \
54
	lis	r12, (target)@ha ; \
55
	ori	r12, r12, (target)@l ; \
56
	mtctr	r12 ;\
57
	bctr
58

    
59
/*
60
 * Macro to save all registers.
61
 */
62
#define SAVE_ALL \
63
	subi	r1, r1, CTXREGS; \
64
	stw	r0, REG_R0(r1); \
65
	stw	r2, REG_R2(r1); \
66
	stw	r3, REG_R3(r1); \
67
	stw	r4, REG_R4(r1); \
68
	stw	r5, REG_R5(r1); \
69
	stw	r6, REG_R6(r1); \
70
	stw	r7, REG_R7(r1); \
71
	stw	r8, REG_R8(r1); \
72
	stw	r9, REG_R9(r1); \
73
	stw	r10, REG_R10(r1); \
74
	stw	r11, REG_R11(r1); \
75
	stw	r12, REG_R12(r1); \
76
	stw	r13, REG_R13(r1); \
77
	stw	r14, REG_R14(r1); \
78
	stw	r15, REG_R15(r1); \
79
	stw	r16, REG_R16(r1); \
80
	stw	r17, REG_R17(r1); \
81
	stw	r18, REG_R18(r1); \
82
	stw	r19, REG_R19(r1); \
83
	stw	r20, REG_R20(r1); \
84
	stw	r21, REG_R21(r1); \
85
	stw	r22, REG_R22(r1); \
86
	stw	r23, REG_R23(r1); \
87
	stw	r24, REG_R24(r1); \
88
	stw	r25, REG_R25(r1); \
89
	stw	r26, REG_R26(r1); \
90
	stw	r27, REG_R27(r1); \
91
	stw	r28, REG_R28(r1); \
92
	stw	r29, REG_R29(r1); \
93
	stw	r30, REG_R30(r1); \
94
	stw	r31, REG_R31(r1); \
95
	mfsprg1	r10; 			/* Get saved sp */ \
96
	stw	r10, REG_R1(r1); \
97
	mfspr	r10, SPR_SRR0; \
98
	stw	r10, REG_SRR0(r1); \
99
	mfspr	r10, SPR_SRR1; \
100
	stw	r10, REG_SRR1(r1); \
101
	mfspr	r10, SPR_LR; \
102
	stw	r10, REG_LR(r1); \
103
	mfspr	r10, SPR_CTR; \
104
	stw	r10, REG_CTR(r1); \
105
	mfspr	r10, SPR_XER; \
106
	stw	r10, REG_XER(r1); \
107
	mfspr	r10, SPR_LR; \
108
	stw	r10, REG_LR(r1); \
109
	mfcr	r10; \
110
	stw	r10, REG_CR(r1);
111

    
112
/*
113
 * Macro to restore all registers.
114
 */
115
#define RESTORE_ALL \
116
	lwz	r10, REG_SRR0(r1); \
117
	mtspr	SPR_SRR0, r10; \
118
	lwz	r10, REG_SRR1(r1); \
119
	mtspr	SPR_SRR1, r10; \
120
	lwz	r10, REG_LR(r1); \
121
	mtspr	SPR_LR, r10; \
122
	lwz	r10, REG_XER(r1); \
123
	mtspr	SPR_XER, r10; \
124
	lwz	r10, REG_CTR(r1); \
125
	mtspr	SPR_CTR, r10; \
126
	lwz	r10, REG_LR(r1); \
127
	mtspr	SPR_LR, r10; \
128
	lwz	r10, REG_CR(r1); \
129
	mtcr	r10; \
130
	lwz	r10, REG_R1(r1); \
131
	mtsprg1	r10; 			/* Restore saved sp */ \
132
	lwz	r0, REG_R0(r1); \
133
	lwz	r2, REG_R2(r1); \
134
	lwz	r3, REG_R3(r1); \
135
	lwz	r4, REG_R4(r1); \
136
	lwz	r5, REG_R5(r1); \
137
	lwz	r6, REG_R6(r1); \
138
	lwz	r7, REG_R7(r1); \
139
	lwz	r8, REG_R8(r1); \
140
	lwz	r9, REG_R9(r1); \
141
	lwz	r10, REG_R10(r1); \
142
	lwz	r11, REG_R11(r1); \
143
	lwz	r12, REG_R12(r1); \
144
	lwz	r13, REG_R13(r1); \
145
	lwz	r14, REG_R14(r1); \
146
	lwz	r15, REG_R15(r1); \
147
	lwz	r16, REG_R16(r1); \
148
	lwz	r17, REG_R17(r1); \
149
	lwz	r18, REG_R18(r1); \
150
	lwz	r19, REG_R19(r1); \
151
	lwz	r20, REG_R20(r1); \
152
	lwz	r21, REG_R21(r1); \
153
	lwz	r22, REG_R22(r1); \
154
	lwz	r23, REG_R23(r1); \
155
	lwz	r24, REG_R24(r1); \
156
	lwz	r25, REG_R25(r1); \
157
	lwz	r26, REG_R26(r1); \
158
	lwz	r27, REG_R27(r1); \
159
	lwz	r28, REG_R28(r1); \
160
	lwz	r29, REG_R29(r1); \
161
	lwz	r30, REG_R30(r1); \
162
	lwz	r31, REG_R31(r1);
163

    
164
/*
165
 * Macro to build an exception entry.
166
 * We assume interrupts are disabled.
167
 */
168
#define EXCEPTION_ENTRY(offset, name, id, xfer) \
169
	.skip offset - (. - exception_vector); \
170
exception_##name: \
171
	mtsprg1 r1;		/* sprg1: current sp */ \
172
	mtsprg3	r10; 		/* sprg3: saved r10 */ \
173
	mfcr	r10; 		/* r10:	saved cr */ \
174
	mfsrr1	r1; 		/* Get msr */ \
175
	mtcr	r1; \
176
	bt	17, 1f;		/* Exception from user mode? */ \
177
	mfsprg1	r1; 		/* Kernel mode => restore orignal sp */ \
178
	b	2f; \
179
1:	mfsprg0	r1; 		/* User mode => Load kernel stack */ \
180
2:	mtcr	r10; 		/* Restore cr */ \
181
	mfsprg3 r10;		/* Restore r10 */ \
182
	SAVE_ALL ; \
183
	li	r10, id; \
184
	ABS_JUMP(xfer);
185

    
186
#define INTR_ENTRY(offset, name, id) \
187
		EXCEPTION_ENTRY(offset, name, id, interrupt_common)
188

    
189
#define TRAP_ENTRY(offset, name, id) \
190
		EXCEPTION_ENTRY(offset, name, id, trap_common)
191

    
192
/*
193
 * Macro to build a system call entry.
194
 */
195
#define SYSC_ENTRY(offset, name, id) \
196
	.skip offset - (. - exception_vector); \
197
exception_##name: \
198
	mtsprg1 r1;		/* sprg1: current sp */ \
199
	mfsprg0	r1; 		/* Load kernel stack */ \
200
	SAVE_ALL ; \
201
	li	r10, id; \
202
	ABS_JUMP(syscall_entry);
203

    
204
/*
205
 * Exception vectors
206
 */
207
.globl	exception_vector
208
exception_vector:
209
	.long	0
210

    
211
	.skip 0x100 - (. - exception_vector); \
212
ENTRY(system_reset)
213
	ABS_JUMP(kernel_start)
214

    
215
TRAP_ENTRY(0x200, machine_check  ,TRAP_MACHINE_CHECK)
216
TRAP_ENTRY(0x300, dsi            ,TRAP_DSI)
217
TRAP_ENTRY(0x400, isi            ,TRAP_ISI)
218
INTR_ENTRY(0x500, external_intr  ,TRAP_EXT_INTERRUPT)
219
TRAP_ENTRY(0x600, alignment      ,TRAP_ALIGNMENT)
220
TRAP_ENTRY(0x700, program        ,TRAP_PROGRAM)
221
TRAP_ENTRY(0x800, fp_unavailable ,TRAP_FP_UNAVAILABLE)
222
INTR_ENTRY(0x900, decrementer    ,TRAP_DECREMENTER)
223
SYSC_ENTRY(0xc00, syscall        ,TRAP_SYSTEM_CALL)
224
TRAP_ENTRY(0xd00, trace          ,TRAP_TRACE)
225
TRAP_ENTRY(0xe00, fp_assist      ,TRAP_FP_ASSIST)
226

    
227
.globl	exception_vector_end
228
exception_vector_end:
229

    
230

    
231
/*
232
 * Kernel start point
233
 */
234
ENTRY(kernel_start)
235
	/*
236
	 * Setup CPU registers.
237
	 */
238
	li	r3, MSR_IP		/* Establish default MSR value */
239
	mtmsr	r3
240

    
241
	li	r3, 0			/* Init interrupt nest count */
242
	mtspr	SPR_SPRG2, r3
243

    
244
	li	r3, 0			/* Reset timebase */
245
	mttbl	r3
246
	mttbu	r3
247
	mttbl	r3
248

    
249
	/*
250
	 * Init boot stack
251
	 */
252
	lis	r1, BOOTSTKTOP@ha
253
	addi	r1, r1, BOOTSTKTOP@l
254
	subi	r1, r1, 16
255

    
256
	mtspr	SPR_SPRG0, r1		/* Keep kernel stack */
257

    
258
	/*
259
	 * Clear kernel BSS
260
	 */
261
	lis	r3, __bss@ha
262
	addi	r3, r3, __bss@l
263
	lis	r4, __end@ha
264
	addi	r4, r4, __end@l
265
	li	r0, 0
266
1:
267
	stwu	r0, 4(r3)
268
	cmplw	cr0, r3, r4
269
	blt	1b
270

    
271
	/*
272
	 * Call kernel main routine
273
	 */
274
	b	main
275
	/* NOTREACHED */
276

    
277

    
278
/*
279
 * Common entry for interrupts.
280
 * r3 - trap id
281
 */
282
ENTRY(interrupt_common)
283
	stw	r10, CTX_TRAPNO(r1)
284

    
285
	mfsprg2	r28			/* r28: current IRQ nesting level */
286
	addi	r3, r28, 1		/* Increment IRQ nesting level */
287
	mtsprg2	r3
288

    
289
	mfmsr	r29			/* r29: current msr value */
290
	isync
291
	mr	r30, r1			/* r30: trap frame */
292
	subi	r1, r1, STKFRAME_LEN	/* Adjust stack frame for C routine */
293

    
294
	cmpwi	cr0, r28, 0		/* Outermost interrupt? */
295
	bne	1f
296
	bl	sched_lock		/* If outermost, lock scheduler */
297
1:
298
	mr	r3, r30
299
	bl	interrupt_handler	/* Call main interrupt handler */
300

    
301
	mtsprg2	r28			/* Restore IRQ nesting level */
302
	cmpwi	cr0, r28, 0		/* Outermost interrupt? */
303
	bne	interrupt_ret
304
	bl	sched_unlock		/* Try to preempt */
305

    
306
	mtcr	r29			/* Exception from user mode? */
307
	bf	17, interrupt_ret	/* Exit if it's from kernel mode */
308

    
309
	mfmsr	r27			/* Enable IRQ */
310
	andi.	r4, r27, ~MSR_EE@l
311
	mtmsr	r4
312
	bl	exception_deliver	/* Check exception */
313
	mtmsr	r27			/* Disable IRQ */
314
interrupt_ret:
315
	mr	r1, r30			/* Restore stack */
316
	RESTORE_ALL
317
	mfsprg1	r1			/* restore original sp */
318
	rfi
319

    
320
/*
321
 * System call entry
322
 */
323
	.global syscall_ret
324
ENTRY(syscall_entry)
325
 	stw	r10, CTX_TRAPNO(r1)
326

    
327
	mfmsr	r29			/* r29: current msr value */
328
	mr	r30, r1			/* r30: trap frame */
329
	subi	r1, r1, STKFRAME_LEN	/* Adjust stack frame for C routine */
330

    
331
	mfmsr	r27			/* Enable IRQ */
332
	andi.	r27, r27, ~MSR_EE@l
333
	mtmsr	r27
334

    
335
	mr	r26, r0			/* r26: saved syscall number */
336
	mr	r7, r0
337
	bl	syscall_handler		/* System call dispatcher */
338

    
339
	cmpwi	cr0, r26, 0		/* exception_return? */
340
	beq	1f			/* Skip storing error if so */
341
 	stw	r3, REG_R3(30)		/* Set return value */
342
1:
343
	bl	exception_deliver	/* Check exception */
344
	mr	r1, r30			/* Restore stack */
345
syscall_ret:
346
	mfmsr	r27			/* Disable IRQ */
347
	ori	r27, r27, MSR_EE@l
348
	mtmsr	r27
349
	RESTORE_ALL
350
	mfsprg1	r1			/* Restore original sp */
351
	rfi
352

    
353
/*
354
 * Common entry for exceptions.
355
 * r3 - trap id
356
 */
357
ENTRY(trap_common)
358
	stw	r10, CTX_TRAPNO(r1)
359

    
360
	mfsprg2	r3			/* increment nest counter */
361
	addi	r3, r3, 1
362
	mtsprg2	r3
363

    
364
	mr	r3, r1
365
	subi	r1, r1, STKFRAME_LEN
366
	bl	trap_handler
367
	addi	r1, r1, STKFRAME_LEN
368

    
369
	mfsprg2	r3			/* decrement nest counter */
370
	addi	r3, r3, -1
371
	mtsprg2	r3
372

    
373
	RESTORE_ALL
374
	mfsprg1	r1			/* restore original sp */
375
	rfi
376

    
377
/*
378
 * Switch register context.
379
 * r3 = previous kern_regs, r4 = next kern_regs
380
 * Interrupts must be disabled by caller.
381
 *
382
 * syntax - void cpu_switch(kern_regs *prev, kern_regs *next)
383
 *
384
 * Note: GCC uses r0-r12 as scratch registers
385
 */
386
ENTRY(cpu_switch)
387
	isync
388
	stw	r13, 0x00(r3)
389
	stw	r14, 0x04(r3)
390
	stw	r15, 0x08(r3)
391
	stw	r16, 0x0c(r3)
392
	stw	r17, 0x10(r3)
393
	stw	r18, 0x14(r3)
394
	stw	r19, 0x18(r3)
395
	stw	r20, 0x1c(r3)
396
	stw	r21, 0x20(r3)
397
	stw	r22, 0x24(r3)
398
	stw	r23, 0x28(r3)
399
	stw	r24, 0x2c(r3)
400
	stw	r25, 0x30(r3)
401
	stw	r26, 0x34(r3)
402
	stw	r27, 0x38(r3)
403
	stw	r28, 0x3c(r3)
404
	stw	r29, 0x40(r3)
405
	stw	r30, 0x44(r3)
406
	stw	r31, 0x48(r3)
407
	stw	r2,  0x4c(r3)
408
	stw	r1,  0x50(r3)
409
	mflr	r5
410
	stw	r5,  0x54(r3)
411
	mfcr	r5
412
	stw	r5,  0x58(r3)
413
	mfsprg0	r5			/* Save kernel stack */
414
	stw	r5,  0x5c(r3)
415

    
416
	lwz	r13, 0x00(r4)
417
	lwz	r14, 0x04(r4)
418
	lwz	r15, 0x08(r4)
419
	lwz	r16, 0x0c(r4)
420
	lwz	r17, 0x10(r4)
421
	lwz	r18, 0x14(r4)
422
	lwz	r19, 0x18(r4)
423
	lwz	r20, 0x1c(r4)
424
	lwz	r21, 0x20(r4)
425
	lwz	r22, 0x24(r4)
426
	lwz	r23, 0x28(r4)
427
	lwz	r24, 0x2c(r4)
428
	lwz	r25, 0x30(r4)
429
	lwz	r26, 0x34(r4)
430
	lwz	r27, 0x38(r4)
431
	lwz	r28, 0x3c(r4)
432
	lwz	r29, 0x40(r4)
433
	lwz	r30, 0x44(r4)
434
	lwz	r31, 0x48(r4)
435
	lwz	r2,  0x4c(r4)
436
	lwz	r1,  0x50(r4)
437
	lwz	r5,  0x54(r4)
438
	mtlr	r5
439
	lwz	r5,  0x58(r4)
440
	mtcr	r5
441
	lwz	r5,  0x5c(r4)
442
	mtsprg0	r5			/* Restore kernel stack */
443
	isync
444
	blr
445

    
446
/*
447
 * void sploff(void);
448
 */
449
ENTRY(sploff)
450
	mfmsr	r3
451
	andi.	r4, r3, ~MSR_EE@l
452
	mtmsr	r4
453
	blr
454

    
455
/*
456
 * void splon(void);
457
 */
458
ENTRY(splon)
459
	mfmsr	r3
460
	ori	r4, r3, MSR_EE@l
461
	mtmsr	r4
462
	blr
463