Project

General

Profile

Statistics
| Branch: | Revision:

scoutos / prex-0.9.0 / bsp / hal / arm / arch / mmu.c @ 03e9c04a

History | View | Annotate | Download (7.94 KB)

1 03e9c04a Brad Neuman
/*-
2
 * Copyright (c) 2008-2009, Kohsuke Ohtani
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright
11
 *    notice, this list of conditions and the following disclaimer in the
12
 *    documentation and/or other materials provided with the distribution.
13
 * 3. Neither the name of the author nor the names of any co-contributors
14
 *    may be used to endorse or promote products derived from this software
15
 *    without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29
30
/*
31
 * mmu.c - memory management unit support routines
32
 */
33
34
/*
35
 * This module provides virtual/physical address translation for
36
 * ARM MMU. This kernel will do only page level translation
37
 * and protection and it does not use ARM protection domain.
38
 */
39
40
#include <machine/syspage.h>
41
#include <kernel.h>
42
#include <page.h>
43
#include <mmu.h>
44
#include <cpu.h>
45
#include <cpufunc.h>
46
47
#define L1TBL_MASK        (L1TBL_SIZE - 1)
48
#define PGD_ALIGN(n)        ((((paddr_t)(n)) + L1TBL_MASK) & ~L1TBL_MASK)
49
50
/*
51
 * Boot page directory.
52
 * This works as a template for all page directory in the system.
53
 */
54
static pgd_t boot_pgd = (pgd_t)BOOT_PGD;
55
56
/*
57
 * Allocate pgd
58
 *
59
 * The page directory for ARM must be aligned in 16K bytes
60
 * boundary. So, we allocates 32K bytes first, and use
61
 * 16K-aligned area in it.
62
 */
63
static paddr_t
64
alloc_pgd(void)
65
{
66
        paddr_t pg, pgd;
67
        size_t gap;
68
69
        /* Allocate 32K first. */
70
        if ((pg = page_alloc(L1TBL_SIZE * 2)) == 0)
71
                return 0;
72
73
        /* Find 16K aligned pointer */
74
        pgd = PGD_ALIGN(pg);
75
76
        /* Release un-needed area */
77
        gap = (size_t)(pgd - pg);
78
        if (gap != 0)
79
                page_free(pg, gap);
80
        page_free((paddr_t)(pgd + L1TBL_SIZE), (size_t)(L1TBL_SIZE - gap));
81
82
        return pgd;
83
}
84
85
/*
86
 * Map physical memory range into virtual address
87
 *
88
 * Returns 0 on success, or ENOMEM on failure.
89
 *
90
 * Map type can be one of the following type.
91
 *   PG_UNMAP  - Remove mapping
92
 *   PG_READ   - Read only mapping
93
 *   PG_WRITE  - Read/write allowed
94
 *   PG_SYSTEM - Kernel page
95
 *   PG_IO     - I/O memory
96
 *
97
 * Setup the appropriate page tables for mapping. If there is no
98
 * page table for the specified address, new page table is
99
 * allocated.
100
 *
101
 * This routine does not return any error even if the specified
102
 * address has been already mapped to other physical address.
103
 * In this case, it will just override the existing mapping.
104
 *
105
 * In order to unmap the page, pg_type is specified as 0.  But,
106
 * the page tables are not released even if there is no valid
107
 * page entry in it. All page tables are released when mmu_delmap()
108
 * is called when task is terminated.
109
 */
110
int
111
mmu_map(pgd_t pgd, paddr_t pa, vaddr_t va, size_t size, int type)
112
{
113
        uint32_t pte_flag = 0;
114
        pte_t pte;
115
        paddr_t pg;        /* page */
116
117
        pa = round_page(pa);
118
        va = round_page(va);
119
        size = trunc_page(size);
120
121
        /*
122
         * Set page flag
123
         */
124
        switch (type) {
125
        case PG_UNMAP:
126
                pte_flag = 0;
127
                break;
128
        case PG_READ:
129
                pte_flag = (uint32_t)(PTE_PRESENT | PTE_WBUF | PTE_CACHE |
130
                                      PTE_USER_RO);
131
                break;
132
        case PG_WRITE:
133
                pte_flag = (uint32_t)(PTE_PRESENT | PTE_WBUF | PTE_CACHE |
134
                                      PTE_USER_RW);
135
                break;
136
        case PG_SYSTEM:
137
                pte_flag = (uint32_t)(PTE_PRESENT | PTE_WBUF | PTE_CACHE |
138
                                      PTE_SYSTEM);
139
                break;
140
        case PG_IOMEM:
141
                pte_flag = (uint32_t)(PTE_PRESENT | PTE_SYSTEM);
142
                break;
143
        default:
144
                panic("mmu_map");
145
        }
146
        /*
147
         * Map all pages
148
         */
149
        flush_tlb();
150
151
        while (size > 0) {
152
                if (pte_present(pgd, va)) {
153
                        /* Page table already exists for the address */
154
                        pte = vtopte(pgd, va);
155
                } else {
156
                        ASSERT(pte_flag != 0);
157
                        if ((pg = page_alloc(L2TBL_SIZE)) == 0) {
158
                                DPRINTF(("Error: MMU mapping failed\n"));
159
                                return ENOMEM;
160
                        }
161
                        pgd[PAGE_DIR(va)] = (uint32_t)pg | PDE_PRESENT;
162
                        pte = (pte_t)ptokv(pg);
163
                        memset(pte, 0, L2TBL_SIZE);
164
                }
165
                /* Set new entry into page table */
166
                pte[PAGE_TABLE(va)] = (uint32_t)pa | pte_flag;
167
168
                /* Process next page */
169
                pa += PAGE_SIZE;
170
                va += PAGE_SIZE;
171
                size -= PAGE_SIZE;
172
        }
173
        flush_tlb();
174
        return 0;
175
}
176
177
/*
178
 * Create new page map.
179
 *
180
 * Returns a page directory on success, or NULL on failure.  This
181
 * routine is called when new task is created. All page map must
182
 * have the same kernel page table in it. So, the kernel page
183
 * tables are copied to newly created map.
184
 */
185
pgd_t
186
mmu_newmap(void)
187
{
188
        paddr_t pg;
189
        pgd_t pgd;
190
        int i;
191
192
        if ((pg = alloc_pgd()) == 0)
193
                return NO_PGD;
194
        pgd = (pgd_t)ptokv(pg);
195
        memset(pgd, 0, L1TBL_SIZE);
196
197
        /* Copy kernel page tables */
198
        i = PAGE_DIR(KERNBASE);
199
        memcpy(&pgd[i], &boot_pgd[i], (size_t)(L1TBL_SIZE - i * 4));
200
201
        /* Map vector page (address 0) */
202
        mmu_map(pgd, 0, 0, PAGE_SIZE, PG_SYSTEM);
203
        return pgd;
204
}
205
206
/*
207
 * Terminate all page mapping.
208
 */
209
void
210
mmu_terminate(pgd_t pgd)
211
{
212
        int i;
213
        pte_t pte;
214
215
        flush_tlb();
216
217
        /* Release all user page table */
218
        for (i = 0; i < PAGE_DIR(KERNBASE); i++) {
219
                pte = (pte_t)pgd[i];
220
                if (pte != 0)
221
                        page_free(((paddr_t)pte & PTE_ADDRESS),
222
                                  L2TBL_SIZE);
223
        }
224
        /* Release page directory */
225
        page_free(kvtop(pgd), L1TBL_SIZE);
226
}
227
228
/*
229
 * Switch to new page directory
230
 *
231
 * This is called when context is switched.
232
 * Whole TLB/cache must be flushed after loading
233
 * TLTB register
234
 */
235
void
236
mmu_switch(pgd_t pgd)
237
{
238
        paddr_t phys = kvtop(pgd);
239
240
        if (phys != get_ttb())
241
                switch_ttb(phys);
242
}
243
244
/*
245
 * Returns the physical address for the specified virtual address.
246
 * This routine checks if the virtual area actually exist.
247
 * It returns NULL if at least one page is not mapped.
248
 */
249
paddr_t
250
mmu_extract(pgd_t pgd, vaddr_t virt, size_t size)
251
{
252
        pte_t pte;
253
        vaddr_t start, end, pg;
254
        paddr_t pa;
255
256
        start = trunc_page(virt);
257
        end = trunc_page(virt + size - 1);
258
259
        /* Check all pages exist */
260
        for (pg = start; pg <= end; pg += PAGE_SIZE) {
261
                if (!pte_present(pgd, pg))
262
                        return 0;
263
                pte = vtopte(pgd, pg);
264
                if (!page_present(pte, pg))
265
                        return 0;
266
        }
267
268
        /* Get physical address */
269
        pte = vtopte(pgd, start);
270
        pa = (paddr_t)ptetopg(pte, start);
271
        return pa + (paddr_t)(virt - start);
272
}
273
274
/*
275
 * Map I/O memory for diagnostic device at very early stage.
276
 */
277
void
278
mmu_premap(paddr_t phys, vaddr_t virt)
279
{
280
        pte_t pte = (pte_t)BOOT_PTE1;
281
282
        memset(pte, 0, L2TBL_SIZE);
283
        boot_pgd[PAGE_DIR(virt)] = (uint32_t)kvtop(pte) | PDE_PRESENT;
284
        pte[PAGE_TABLE(virt)] = (uint32_t)phys | PTE_PRESENT | PTE_SYSTEM;
285
        flush_tlb();
286
}
287
288
/*
289
 * Initialize mmu
290
 *
291
 * Paging is already enabled in locore.S. And, physical address
292
 * 0-4M has been already mapped into kernel space in locore.S.
293
 * Now, all physical memory is mapped into kernel virtual address
294
 * as straight 1:1 mapping. User mode access is not allowed for
295
 * these kernel pages.
296
 * page_init() must be called before calling this routine.
297
 */
298
void
299
mmu_init(struct mmumap *mmumap_table)
300
{
301
        struct mmumap *map;
302
        int map_type = 0;
303
304
        for (map = mmumap_table; map->type != 0; map++) {
305
                switch (map->type) {
306
                case VMT_RAM:
307
                case VMT_ROM:
308
                case VMT_DMA:
309
                        map_type = PG_SYSTEM;
310
                        break;
311
                case VMT_IO:
312
                        map_type = PG_IOMEM;
313
                        break;
314
                }
315
316
                if (mmu_map(boot_pgd, map->phys, map->virt,
317
                            map->size, map_type))
318
                        panic("mmu_init");
319
        }
320
        /*
321
         * Map vector page.
322
         */
323
        if (mmu_map(boot_pgd, 0, CONFIG_ARM_VECTORS, PAGE_SIZE, PG_SYSTEM))
324
                panic("mmu_init");
325
}