Statistics
| Branch: | Revision:

scoutos / prex-0.9.0 / bsp / hal / x86 / arch / locore.S @ 03e9c04a

History | View | Annotate | Download (11.9 KB)

1
/*-
2
 * Copyright (c) 2005-2008, Kohsuke Ohtani
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright
11
 *    notice, this list of conditions and the following disclaimer in the
12
 *    documentation and/or other materials provided with the distribution.
13
 * 3. Neither the name of the author nor the names of any co-contributors
14
 *    may be used to endorse or promote products derived from this software
15
 *    without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29

    
30
/*
31
 * locore.S - low level platform support
32
 */
33

    
34
#include <conf/config.h>
35
#include <machine/asm.h>
36
#include <machine/syspage.h>
37
#include <machine/memory.h>
38
#include <sys/errno.h>
39
#include <cpu.h>
40

    
41
/*
42
 * Macro to save/restore registers
43
 *
44
 * This macro builds the trap frame by pushing registers.
45
 * If you change the push order of these macro, you must change the
46
 * trap frame structure in arch.h. In addition, the system call stub
47
 * will depend on this register format.
48
 */
49
#define SAVE_ALL \
50
	cld; \
51
	pushl	%es; \
52
	pushl	%ds; \
53
	pushl	%ebp; \
54
	pushl	%edi; \
55
	pushl	%eax; \
56
	pushl	%esi; \
57
	pushl	%edx; \
58
	pushl	%ecx; \
59
	pushl	%ebx;
60

    
61
#define RESTORE_ALL \
62
	popl	%ebx; \
63
	popl	%ecx; \
64
	popl	%edx; \
65
	popl	%esi; \
66
	popl	%eax; \
67
	popl	%edi; \
68
	popl	%ebp; \
69
	popl	%ds; \
70
	popl	%es;
71

    
72
#define SETUP_SEG \
73
	movl	$(KERNEL_DS), %edx; \
74
	movl	%edx, %ds; \
75
	movl	%edx, %es;
76

    
77
	.section ".text"
78

    
79
/*
80
 * Kernel start point
81
 *
82
 * The kernel assumes that the following state is already set by
83
 * the kernel loader.
84
 * - CPU is in protected mode
85
 * - Segment registers are set as 32-bit flat segment
86
 * - A20 line is enabled for 32-bit addressing
87
 * - Paging is turned off
88
 * - The boot information is loaded to address 1000h-1fffh
89
 */
90
/*
91
 * Note: The linker will generate an address for kernel_start as 0x80010000.
92
 * But, the loader will load the kernel to 0x10000 (physical address).
93
 * So, the linear address pointer can not be used until paging is enabled.
94
 */
95
ENTRY(kernel_start)
96
	cli				/* Disable interrupt */
97
	cld
98

    
99
	/*
100
	 * Setup CPU registers.
101
	 */
102
	movl	%cr0, %eax		/* Enable kernel write protection */
103
	orl	$(CR0_WP), %eax
104
	movl	%eax, %cr0
105

    
106
	pushfl				/* Clear nested task, iopl = 0 */
107
	popl	%eax
108
	andl	$~(EFL_IOPL|EFL_NT), %eax
109
	pushl	%eax
110
	popfl
111

    
112
#ifdef CONFIG_MMU
113
	/*
114
	 * Initialize page table.
115
	 * The physical address 0-4M is mapped on virtual address 2G.
116
	 */
117
	movl	$(BOOT_PGD_PHYS), %edi	/* Setup kernel page directory */
118
	xorl	%eax, %eax		/* Invalidate all address */
119
	movl	$0x1000, %ecx
120
	rep
121
	stosb
122
	movl	$(BOOT_PGD_PHYS+0x800), %edi
123
	movl	$(BOOT_PTE0_PHYS+0x07), (%edi)
124
	movl	$1024, %ecx		/* Fill boot page table entry */
125
	movl	$(BOOT_PTE0_PHYS), %edi
126
	movl	$0007, %eax
127
fill_pte0:
128
	stosl
129
	add	$0x1000, %eax		/* Process next page */
130
	loop	fill_pte0
131

    
132
	/*
133
	 * Enable paging.
134
	 * The physical address 0-4M is temporarily mapped to virtial
135
	 * address 0-4M. This is needed to enable paging.
136
	 */
137
	movl	$(BOOT_PGD_PHYS), %edi	/* Map address 0-4M */
138
	movl	$(BOOT_PTE0_PHYS+0x07), (%edi)
139
	movl	$(BOOT_PGD_PHYS), %eax	/* Set page directory pointer */
140
	movl	%eax, %cr3
141
	movl	%cr0, %eax		/* Enable paging bit */
142
	orl	$(CR0_PG), %eax
143
	movl	%eax, %cr0
144
	jmp	pipeline_flush		/* Flush processor pipeline */
145
pipeline_flush:
146
	movl	$cs_reset, %eax
147
	jmp	*%eax
148
cs_reset:
149
	movl	$(BOOT_PGD), %edi	/* Unmap 0-4M */
150
	movl	$0, (%edi)
151
	movl	%cr3, %eax
152
	movl	%eax, %cr3
153

    
154
#endif /* CONFIG_MMU */
155

    
156
	/*
157
	 * Clear kernel BSS
158
	 */
159
	xorl	%eax, %eax
160
	movl	$__bss, %edi
161
	movl	$__end, %ecx
162
	subl	%edi, %ecx
163
	rep
164
	stosb
165

    
166
	/*
167
	 * Setup boot stack
168
	 */
169
	movl	$(BOOTSTK), %edi
170
	movl	$(BOOTSTKSZ), %ecx
171
	rep
172
	stosb
173
	movl	$(BOOTSTKTOP), %esp
174
	movl	%esp, %ebp
175

    
176
	movl	$15, curspl
177

    
178
	/*
179
	 * Call kernel main routine
180
	 */
181
	call	main
182
	/* NOTREACHED */
183
	cli
184
	hlt
185

    
186
/*
187
 * Common entry for all interrupts
188
 * Setup interrupt stack for outermost interrupt.
189
 * The code should be written to prevent the stack overflow
190
 * by continuous interrupt as much as it can.
191
 */
192
ENTRY(interrupt_common)
193
	SAVE_ALL
194
	SETUP_SEG
195
	incl	irq_nesting		/* Increment nesting level */
196
	cmpl	$1, irq_nesting		/* Outermost interrupt ? */
197
	jne	nested_irq
198
	mov	%esp, %ebp		/* Save current stack */
199
	movl	$(INTSTKTOP), %esp	/* Switch stack */
200
	call	sched_lock		/* Lock scheduler */
201
	pushl	%ebp			/* Push trap frame */
202
	call	interrupt_handler	/* Process interrupt */
203
	movl	%ebp, %esp		/* Restore original stack */
204
	decl	irq_nesting
205
	call	sched_unlock		/* Try to preempt */
206
	testl	$3, 0x30(%esp)		/* Return to kernel mode ? */
207
	jz	interrupt_ret		/* Skip exception if kernel mode */
208
	sti
209
	call	exception_deliver	/* Check exception */
210
interrupt_ret:
211
	cli
212
	RESTORE_ALL
213
	addl	$8, %esp
214
	iret
215
nested_irq:
216
	push	%esp			/* Push trap frame */
217
	call	interrupt_handler	/* Process interrupt */
218
	addl	$4, %esp
219
	decl	irq_nesting
220
	jmp	interrupt_ret
221

    
222
/*
223
 * Macro to build interrupt entry
224
 */
225
#define INTR_ENTRY(irq) \
226
ENTRY(intr_##irq) \
227
	pushl	$0; \
228
	pushl	$(irq); \
229
	jmp	interrupt_common
230

    
231
INTR_ENTRY(0)
232
INTR_ENTRY(1)
233
INTR_ENTRY(2)
234
INTR_ENTRY(3)
235
INTR_ENTRY(4)
236
INTR_ENTRY(5)
237
INTR_ENTRY(6)
238
INTR_ENTRY(7)
239
INTR_ENTRY(8)
240
INTR_ENTRY(9)
241
INTR_ENTRY(10)
242
INTR_ENTRY(11)
243
INTR_ENTRY(12)
244
INTR_ENTRY(13)
245
INTR_ENTRY(14)
246
INTR_ENTRY(15)
247

    
248
/*
249
 * Common entry for all traps
250
 * New thread will start from trap_ret.
251
 */
252
ENTRY(trap_common)
253
	SAVE_ALL
254
	SETUP_SEG
255
	pushl	%esp
256
	call	trap_handler
257
	addl	$4, %esp
258
trap_ret:
259
	RESTORE_ALL
260
	addl	$8, %esp
261
	iret
262

    
263
/*
264
 * Default trap entry
265
 */
266
ENTRY(trap_default)
267
	pushl	$0
268
	pushl	$(INVALID_INT)
269
	jmp	trap_common
270

    
271
/*
272
 * Macro to build trap entry
273
 * Some trap will push the error code into stack.
274
 */
275
#define TRAP_ENTRY(id) \
276
ENTRY(trap_##id) \
277
	pushl	$0; \
278
	pushl	$(id); \
279
	jmp	trap_common;
280

    
281
#define TRAP_ERR_ENTRY(id) \
282
ENTRY(trap_##id) \
283
	pushl	$(id); \
284
	jmp	trap_common;
285

    
286
TRAP_ENTRY    ( 0)		/* Divide error */
287
TRAP_ENTRY    ( 1)		/* Debug trap */
288
TRAP_ENTRY    ( 2)		/* NMI */
289
TRAP_ENTRY    ( 3)		/* Breakpoint */
290
TRAP_ENTRY    ( 4)		/* Overflow */
291
TRAP_ENTRY    ( 5)		/* Bounds check */
292
TRAP_ENTRY    ( 6)		/* Invalid opecode */
293
TRAP_ENTRY    ( 7)		/* Device not available */
294
TRAP_ERR_ENTRY( 8)		/* Double fault */
295
TRAP_ERR_ENTRY( 9)		/* Coprocessor overrun */
296
TRAP_ERR_ENTRY(10)		/* Invalid TSS */
297
TRAP_ERR_ENTRY(11)		/* Segment not present */
298
TRAP_ERR_ENTRY(12)		/* Stack bounds */
299
TRAP_ERR_ENTRY(13)		/* General Protection */
300
TRAP_ERR_ENTRY(14)		/* Page fault */
301
TRAP_ENTRY    (15)		/* (reserved) */
302
TRAP_ENTRY    (16)		/* Coprocessor error */
303
TRAP_ERR_ENTRY(17)		/* Alignment check */
304
TRAP_ERR_ENTRY(18)		/* Cache flush denied */
305

    
306

    
307
/*
308
 * System call entry
309
 */
310
	.global syscall_ret
311
ENTRY(syscall_entry)
312
	pushl	$0			/* Dummy for error code */
313
	pushl	$(SYSCALL_INT)		/* Trap number */
314
	SAVE_ALL
315
	SETUP_SEG
316
	call	syscall_handler
317
	cmpl	$0, 0x10(%esp)		/* Skip setting eax if exception_return */
318
	je	1f
319
	movl	%eax, 0x10(%esp)	/* Set return value to eax */
320
1:
321
	call	exception_deliver	/* Check exception */
322
syscall_ret:
323
	RESTORE_ALL
324
	addl	$8, %esp		/* Discard err/trap no */
325
	iret
326

    
327
/*
328
 * Switch register context.
329
 * Interrupts must be disabled by caller.
330
 *
331
 * syntax - void cpu_switch(kern_regs *prev, kern_regs *next)
332
 *
333
 * Note: GCC assumes ebx,ebp,edi,esi registers are not changed in each routine.
334
 */
335
ENTRY(cpu_switch)
336
	movl	4(%esp), %ecx		/* Point ecx to previous registers */
337
	movl	(%esp), %eax		/* Get return address */
338
	movl	%eax, 0(%ecx)		/* Save it as eip */
339
	movl	%ebx, 4(%ecx)		/* Save ebx */
340
	movl	%edi, 8(%ecx)		/* Save edi */
341
	movl	%esi, 12(%ecx)		/* Save esi */
342
	movl	%ebp, 16(%ecx)		/* Save ebp */
343
	movl	%esp, 20(%ecx)		/* Save esp */
344
	movl	8(%esp), %ecx		/* Point ecx to next registers */
345
	movl	4(%ecx), %ebx		/* Restore ebx */
346
	movl	8(%ecx), %edi		/* Restore edi */
347
	movl	12(%ecx), %esi		/* Restore esp */
348
	movl	16(%ecx), %ebp		/* Restore ebp */
349
	movl	20(%ecx), %esp		/* Restore esp */
350
	movl	0(%ecx), %eax		/* Get eip */
351
	movl	%eax, (%esp)		/* Restore it as return address */
352
	ret
353

    
354
/*
355
 * Copy data from user to kernel space.
356
 * Returns 0 on success, or EFAULT on page fault.
357
 *
358
 *  syntax - int copyin(const void *uaddr, void *kaddr, size_t len)
359
 */
360
	.global known_fault1
361
ENTRY(copyin)
362
	pushl	%esi
363
	pushl	%edi
364
	pushl	$(EFAULT)		/* Set EFAULT as default return */
365

    
366
	movl	16(%esp), %esi
367
	movl	20(%esp), %edi
368
	movl	24(%esp), %ecx
369

    
370
	movl	%esi, %edx		/* Check if valid user address */
371
	addl	%ecx, %edx
372
	jc	copy_fault
373
	cmpl	$(USERLIMIT), %edx	/* User area? */
374
	jae	copy_fault
375
	cld
376
known_fault1:				/* May be fault here */
377
	rep
378
	movsb
379

    
380
	popl	%eax
381
	xorl	%eax, %eax		/* Set no error */
382
	popl	%edi
383
	popl	%esi
384
	ret
385

    
386
/*
387
 * Copy data to user from kernel space.
388
 * Returns 0 on success, or EFAULT on page fault.
389
 *
390
 *  syntax - int copyout(const void *kaddr, void *uaddr, size_t len)
391
 */
392
	.global known_fault2
393
ENTRY(copyout)
394
	pushl	%esi
395
	pushl	%edi
396
	pushl	$(EFAULT)		/* Set EFAULT as default return */
397

    
398
	movl	16(%esp), %esi
399
	movl	20(%esp), %edi
400
	movl	24(%esp), %ecx
401

    
402
	movl	%edi, %edx
403
	addl	%ecx, %edx
404
	jc	copy_fault
405
	cmpl	$(USERLIMIT), %edx	/* User area? */
406
	jae	copy_fault
407
	cld
408
known_fault2:				/* May be fault here */
409
	rep
410
	movsb
411

    
412
	popl	%eax
413
	xorl	%eax, %eax		/* Set no error */
414
	popl	%edi
415
	popl	%esi
416
	ret
417

    
418
/**
419
 * copyinstr - Copy string from user space.
420
 * Returns 0 on success, or EFAULT on page fault, or ENAMETOOLONG.
421
 *
422
 *  syntax - int copyinstr(const char *uaddr, void *kaddr, size_t len);
423
 *
424
 * Note: The returned length value does NOT include the NULL terminator.
425
 */
426
	.global known_fault3
427
ENTRY(copyinstr)
428
	pushl	%esi
429
	pushl	%edi
430
	pushl	$(EFAULT)		/* Set EFAULT as default return */
431

    
432
	movl	16(%esp), %esi
433
	movl	20(%esp), %edi
434
	movl	24(%esp), %ecx
435

    
436
	movl	%esi, %edx		/* Check if valid user address */
437
	addl	%ecx, %edx
438
	jc	copy_fault
439
	cmpl	$(USERLIMIT), %edx	/* User area? */
440
	jae	copy_fault
441
	cld
442
	jmp	2f
443
1:
444
	decl	%ecx
445
	jz	copyin_toolong
446
2:
447
known_fault3:				/* May be fault here */
448
	lodsb
449
	stosb
450
	testb	%al, %al
451
	jnz	1b
452

    
453
	popl	%eax
454
	xorl	%eax, %eax		/* Set no error */
455
	popl	%edi
456
	popl	%esi
457
	ret
458

    
459
copyin_toolong:
460
	popl	%eax
461
	movl	$(ENAMETOOLONG), %eax
462
	popl	%edi
463
	popl	%esi
464
	ret
465

    
466
/*
467
 * Fault entry for user access
468
 */
469
ENTRY(copy_fault)
470
	popl	%eax			/* Get return value from stack */
471
	popl	%edi
472
	popl	%esi
473
	ret
474

    
475
/*
476
 * Reset cpu
477
 * Use triple fault
478
 */
479
ENTRY(cpu_reset)
480
	cli
481
	movl	$null_idt, %eax		/* Reset by triple fault */
482
	lidt	(%eax)
483
	int	$3
484
	hlt
485

    
486
	.align 4
487
null_idt:
488
	.word	0
489
	.long	0
490

    
491
/*
492
 * Initialize cache.
493
 */
494
ENTRY(cache_init)
495
	movl	%cr0, %eax		/* Clear cache disable bit */
496
	andl	$~(CR0_CD), %eax
497
	movl	%eax, %cr0
498
	ret
499

    
500
/*
501
 * void splx(int s);
502
 */
503
ENTRY(splx)
504
	cli
505
	movl	4(%esp), %eax
506
	movl	%eax, curspl
507
	cmpl	$0, %eax
508
	ja	1f
509
	sti
510
1:
511
	ret
512

    
513
/*
514
 * int splhigh(void);
515
 */
516
ENTRY(splhigh)
517
	cli
518
	movl	curspl, %eax
519
	movl	$15, curspl
520
	ret
521

    
522
/*
523
 * int spl0(void);
524
 */
525
ENTRY(spl0)
526
	movl	curspl, %eax
527
	movl	$0, curspl
528
	sti
529
	ret
530

    
531
/*
532
 * Disable interrupts
533
 */
534
ENTRY(sploff)
535
	cli
536
	ret
537

    
538
/*
539
 * Enable interrupts
540
 */
541
ENTRY(splon)
542
	sti
543
	ret
544

    
545
/*
546
 * Interrupt nest counter.
547
 *
548
 * This counter is incremented in the entry of interrupt handler
549
 * to switch the interrupt stack. Since all interrupt handlers
550
 * share same one interrupt stack, each handler must pay attention
551
 * to the stack overflow.
552
 */
553
	.section ".bss"
554
irq_nesting:
555
	.long	0
556

    
557
/*
558
 * Current spl
559
 */
560
curspl:
561
	.long	0