Project

General

Profile

Statistics
| Branch: | Revision:

root / prex-0.9.0 / sys / kern / thread.c @ 03e9c04a

History | View | Annotate | Download (12.3 KB)

1 03e9c04a Brad Neuman
/*-
2
 * Copyright (c) 2005-2009, Kohsuke Ohtani
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 * 1. Redistributions of source code must retain the above copyright
9
 *    notice, this list of conditions and the following disclaimer.
10
 * 2. Redistributions in binary form must reproduce the above copyright
11
 *    notice, this list of conditions and the following disclaimer in the
12
 *    documentation and/or other materials provided with the distribution.
13
 * 3. Neither the name of the author nor the names of any co-contributors
14
 *    may be used to endorse or promote products derived from this software
15
 *    without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18
 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20
 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21
 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22
 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23
 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24
 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25
 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26
 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27
 * SUCH DAMAGE.
28
 */
29
30
/*
31
 * thread.c - thread management routines.
32
 */
33
34
#include <kernel.h>
35
#include <kmem.h>
36
#include <task.h>
37
#include <thread.h>
38
#include <ipc.h>
39
#include <sched.h>
40
#include <sync.h>
41
#include <hal.h>
42
43
/* forward declarations */
44
static thread_t thread_allocate(task_t);
45
static void        thread_deallocate(thread_t);
46
47
static struct thread        idle_thread;        /* idle thread */
48
static thread_t                zombie;                /* zombie thread */
49
static struct list        thread_list;        /* list of all threads */
50
51
/* global variable */
52
thread_t curthread = &idle_thread;        /* current thread */
53
54
/*
55
 * Create a new thread.
56
 *
57
 * The new thread will start from the return address of
58
 * thread_create() in user mode.  Since a new thread shares
59
 * the user mode stack of the caller thread, the caller is
60
 * responsible to allocate and set new stack for it. The new
61
 * thread is initially set to suspend state, and so,
62
 * thread_resume() must be called to start it.
63
 */
64
int
65
thread_create(task_t task, thread_t *tp)
66
{
67
        thread_t t;
68
        vaddr_t sp;
69
70
        sched_lock();
71
72
        if (!task_valid(task)) {
73
                sched_unlock();
74
                return ESRCH;
75
        }
76
        if (!task_access(task)) {
77
                sched_unlock();
78
                return EPERM;
79
        }
80
        if (task->nthreads >= MAXTHREADS) {
81
                sched_unlock();
82
                return EAGAIN;
83
        }
84
        /*
85
         * We check the pointer to the return value here.
86
         * This will simplify the error recoveries of the
87
         * subsequent code.
88
         */
89
        if ((curtask->flags & TF_SYSTEM) == 0) {
90
                t = NULL;
91
                if (copyout(&t, tp, sizeof(t))) {
92
                        sched_unlock();
93
                        return EFAULT;
94
                }
95
        }
96
        /*
97
         * Make thread entry for new thread.
98
         */
99
        if ((t = thread_allocate(task)) == NULL) {
100
                DPRINTF(("Out of text\n"));
101
                sched_unlock();
102
                return ENOMEM;
103
        }
104
        memcpy(t->kstack, curthread->kstack, KSTACKSZ);
105
        sp = (vaddr_t)t->kstack + KSTACKSZ;
106
        context_set(&t->ctx, CTX_KSTACK, (register_t)sp);
107
        context_set(&t->ctx, CTX_KENTRY, (register_t)&syscall_ret);
108
        sched_start(t, curthread->basepri, SCHED_RR);
109
        t->suscnt = task->suscnt + 1;
110
111
        /*
112
         * No page fault here:
113
         */
114
        if (curtask->flags & TF_SYSTEM)
115
                *tp = t;
116
        else
117
                copyout(&t, tp, sizeof(t));
118
119
        sched_unlock();
120
        return 0;
121
}
122
123
/*
124
 * Permanently stop execution of the specified thread.
125
 * If given thread is a current thread, this routine
126
 * never returns.
127
 */
128
int
129
thread_terminate(thread_t t)
130
{
131
132
        sched_lock();
133
        if (!thread_valid(t)) {
134
                sched_unlock();
135
                return ESRCH;
136
        }
137
        if (!task_access(t->task)) {
138
                sched_unlock();
139
                return EPERM;
140
        }
141
        thread_destroy(t);
142
        sched_unlock();
143
        return 0;
144
}
145
146
/*
147
 * thread_destroy-- the internal version of thread_terminate.
148
 */
149
void
150
thread_destroy(thread_t th)
151
{
152
153
        msg_cancel(th);
154
        mutex_cancel(th);
155
        timer_cancel(th);
156
        sched_stop(th);
157
        thread_deallocate(th);
158
}
159
160
/*
161
 * Load entry/stack address of the user mode context.
162
 *
163
 * If the entry or stack address is NULL, we keep the
164
 * old value for it.
165
 */
166
int
167
thread_load(thread_t t, void (*entry)(void), void *stack)
168
{
169
        int s;
170
171
        if (entry != NULL && !user_area(entry))
172
                return EINVAL;
173
        if (stack != NULL && !user_area(stack))
174
                return EINVAL;
175
176
        sched_lock();
177
178
        if (!thread_valid(t)) {
179
                sched_unlock();
180
                return ESRCH;
181
        }
182
        if (!task_access(t->task)) {
183
                sched_unlock();
184
                return EPERM;
185
        }
186
        s = splhigh();
187
        if (entry != NULL)
188
                context_set(&t->ctx, CTX_UENTRY, (register_t)entry);
189
        if (stack != NULL)
190
                context_set(&t->ctx, CTX_USTACK, (register_t)stack);
191
        splx(s);
192
193
        sched_unlock();
194
        return 0;
195
}
196
197
/*
198
 * Return the current thread.
199
 */
200
thread_t
201
thread_self(void)
202
{
203
204
        return curthread;
205
}
206
207
/*
208
 * Return true if specified thread is valid.
209
 */
210
int
211
thread_valid(thread_t t)
212
{
213
        list_t head, n;
214
        thread_t tmp;
215
216
        head = &thread_list;
217
        for (n = list_first(head); n != head; n = list_next(n)) {
218
                tmp = list_entry(n, struct thread, link);
219
                if (tmp == t)
220
                        return 1;
221
        }
222
        return 0;
223
}
224
225
/*
226
 * Release a current thread for other thread.
227
 */
228
void
229
thread_yield(void)
230
{
231
232
        sched_yield();
233
}
234
235
/*
236
 * Suspend thread.
237
 *
238
 * A thread can be suspended any number of times.
239
 * And, it does not start to run again unless the thread
240
 * is resumed by the same count of suspend request.
241
 */
242
int
243
thread_suspend(thread_t t)
244
{
245
246
        sched_lock();
247
        if (!thread_valid(t)) {
248
                sched_unlock();
249
                return ESRCH;
250
        }
251
        if (!task_access(t->task)) {
252
                sched_unlock();
253
                return EPERM;
254
        }
255
        if (++t->suscnt == 1)
256
                sched_suspend(t);
257
258
        sched_unlock();
259
        return 0;
260
}
261
262
/*
263
 * Resume thread.
264
 *
265
 * A thread does not begin to run, unless both thread
266
 * suspend count and task suspend count are set to 0.
267
 */
268
int
269
thread_resume(thread_t t)
270
{
271
272
        ASSERT(t != curthread);
273
274
        sched_lock();
275
        if (!thread_valid(t)) {
276
                sched_unlock();
277
                return ESRCH;
278
        }
279
        if (!task_access(t->task)) {
280
                sched_unlock();
281
                return EPERM;
282
        }
283
        if (t->suscnt == 0) {
284
                sched_unlock();
285
                return EINVAL;
286
        }
287
        t->suscnt--;
288
        if (t->suscnt == 0 && t->task->suscnt == 0)
289
                sched_resume(t);
290
291
        sched_unlock();
292
        return 0;
293
}
294
295
/*
296
 * thread_schedparam - get/set scheduling parameter.
297
 */
298
int
299
thread_schedparam(thread_t t, int op, int *param)
300
{
301
        int pri, policy;
302
        int error = 0;
303
304
        sched_lock();
305
        if (!thread_valid(t)) {
306
                sched_unlock();
307
                return ESRCH;
308
        }
309
        if (t->task->flags & TF_SYSTEM) {
310
                sched_unlock();
311
                return EINVAL;
312
        }
313
        /*
314
         * A thread can change the scheduling parameters of the
315
         * threads in the same task or threads in the child task.
316
         */
317
        if (!(t->task == curtask || t->task->parent == curtask) &&
318
            !task_capable(CAP_NICE)) {
319
                sched_unlock();
320
                return EPERM;
321
        }
322
323
        switch (op) {
324
        case SOP_GETPRI:
325
                pri = sched_getpri(t);
326
                if (copyout(&pri, param, sizeof(pri)))
327
                        error = EINVAL;
328
                break;
329
330
        case SOP_SETPRI:
331
                if (copyin(param, &pri, sizeof(pri))) {
332
                        error = EINVAL;
333
                        break;
334
                }
335
                /*
336
                 * Validate the priority range.
337
                 */
338
                if (pri < 0)
339
                        pri = 0;
340
                else if (pri >= PRI_IDLE)
341
                        pri = PRI_IDLE - 1;
342
343
                /*
344
                 * If the caller has CAP_NICE capability, it can
345
                 * change the thread priority to any level.
346
                 * Otherwise, the caller can not set the priority
347
                 * to higher above realtime priority.
348
                 */
349
                if (pri <= PRI_REALTIME && !task_capable(CAP_NICE)) {
350
                        error = EPERM;
351
                        break;
352
                }
353
                /*
354
                 * If a current priority is inherited for mutex,
355
                 * we can not change the priority to lower value.
356
                 * In this case, only the base priority is changed,
357
                 * and a current priority will be adjusted to
358
                 * correct value, later.
359
                 */
360
                if (t->priority != t->basepri && pri > t->priority)
361
                        pri = t->priority;
362
363
                mutex_setpri(t, pri);
364
                sched_setpri(t, pri, pri);
365
                break;
366
367
        case SOP_GETPOLICY:
368
                policy = sched_getpolicy(t);
369
                if (copyout(&policy, param, sizeof(policy)))
370
                        error = EINVAL;
371
                break;
372
373
        case SOP_SETPOLICY:
374
                if (copyin(param, &policy, sizeof(policy))) {
375
                        error = EINVAL;
376
                        break;
377
                }
378
                error = sched_setpolicy(t, policy);
379
                break;
380
381
        default:
382
                error = EINVAL;
383
                break;
384
        }
385
        sched_unlock();
386
        return error;
387
}
388
389
/*
390
 * Idle thread.
391
 *
392
 * Put the system into low power mode until we get an
393
 * interrupt.  Then, we try to release the current thread to
394
 * run the thread who was woken by ISR.  This routine is
395
 * called only once after kernel initialization is completed.
396
 */
397
void
398
thread_idle(void)
399
{
400
401
        for (;;) {
402
                machine_idle();
403
                sched_yield();
404
        }
405
}
406
407
/*
408
 * Allocate a thread.
409
 */
410
static thread_t
411
thread_allocate(task_t task)
412
{
413
        struct thread *t;
414
        void *stack;
415
416
        if ((t = kmem_alloc(sizeof(*t))) == NULL)
417
                return NULL;
418
419
        if ((stack = kmem_alloc(KSTACKSZ)) == NULL) {
420
                kmem_free(t);
421
                return NULL;
422
        }
423
        memset(t, 0, sizeof(*t));
424
425
        t->kstack = stack;
426
        t->task = task;
427
        list_init(&t->mutexes);
428
        list_insert(&thread_list, &t->link);
429
        list_insert(&task->threads, &t->task_link);
430
        task->nthreads++;
431
432
        return t;
433
}
434
435
/*
436
 * Deallocate a thread.
437
 *
438
 * We can not release the context of the "current" thread
439
 * because our thread switching always requires the current
440
 * context. So, the resource deallocation is deferred until
441
 * another thread calls thread_deallocate() later.
442
 */
443
static void
444
thread_deallocate(thread_t t)
445
{
446
447
        list_remove(&t->task_link);
448
        list_remove(&t->link);
449
        t->excbits = 0;
450
        t->task->nthreads--;
451
452
        if (zombie != NULL) {
453
                /*
454
                 * Deallocate a zombie thread which
455
                 * was killed in previous request.
456
                 */
457
                ASSERT(zombie != curthread);
458
                kmem_free(zombie->kstack);
459
                zombie->kstack = NULL;
460
                kmem_free(zombie);
461
                zombie = NULL;
462
        }
463
        if (t == curthread) {
464
                /*
465
                 * Enter zombie state and wait for
466
                 * somebody to be killed us.
467
                 */
468
                zombie = t;
469
                return;
470
        }
471
472
        kmem_free(t->kstack);
473
        t->kstack = NULL;
474
        kmem_free(t);
475
}
476
477
/*
478
 * Return thread information.
479
 */
480
int
481
thread_info(struct threadinfo *info)
482
{
483
        u_long target = info->cookie;
484
        u_long i = 0;
485
        thread_t t;
486
        list_t n;
487
488
        sched_lock();
489
        n = list_last(&thread_list);
490
        do {
491
                if (i++ == target) {
492
                        t = list_entry(n, struct thread, link);
493
                        info->cookie = i;
494
                        info->id = t;
495
                        info->state = t->state;
496
                        info->policy = t->policy;
497
                        info->priority = t->priority;
498
                        info->basepri = t->basepri;
499
                        info->time = t->time;
500
                        info->suscnt = t->suscnt;
501
                        info->task = t->task;
502
                        info->active = (t == curthread) ? 1 : 0;
503
                        strlcpy(info->taskname, t->task->name, MAXTASKNAME);
504
                        strlcpy(info->slpevt, t->slpevt ?
505
                                t->slpevt->name : "-", MAXEVTNAME);
506
                        sched_unlock();
507
                        return 0;
508
                }
509
                n = list_prev(n);
510
        } while (n != &thread_list);
511
        sched_unlock();
512
        return ESRCH;
513
}
514
515
/*
516
 * Create a thread running in the kernel address space.
517
 *
518
 * Since we disable an interrupt during thread switching, the
519
 * interrupt is still disabled at the entry of the kernel
520
 * thread.  So, the kernel thread must enable interrupts
521
 * immediately when it gets control.
522
 * This routine assumes the scheduler is already locked.
523
 */
524
thread_t
525
kthread_create(void (*entry)(void *), void *arg, int pri)
526
{
527
        thread_t t;
528
        vaddr_t sp;
529
530
        ASSERT(curthread->locks > 0);
531
532
        /*
533
         * If there is not enough core for the new thread,
534
         * the caller should just drop to panic().
535
         */
536
        if ((t = thread_allocate(&kernel_task)) == NULL)
537
                return NULL;
538
539
        memset(t->kstack, 0, KSTACKSZ);
540
        sp = (vaddr_t)t->kstack + KSTACKSZ;
541
        context_set(&t->ctx, CTX_KSTACK, (register_t)sp);
542
        context_set(&t->ctx, CTX_KENTRY, (register_t)entry);
543
        context_set(&t->ctx, CTX_KARG, (register_t)arg);
544
        sched_start(t, pri, SCHED_FIFO);
545
        t->suscnt = 1;
546
        sched_resume(t);
547
548
        return t;
549
}
550
551
/*
552
 * Terminate a kernel thread.
553
 */
554
void
555
kthread_terminate(thread_t t)
556
{
557
        ASSERT(t != NULL);
558
        ASSERT(t->task->flags & TF_SYSTEM);
559
560
        sched_lock();
561
562
        mutex_cancel(t);
563
        timer_cancel(t);
564
        sched_stop(t);
565
        thread_deallocate(t);
566
567
        sched_unlock();
568
}
569
570
/*
571
 * The first thread in the system is created here by hand.
572
 * This thread will become an idle thread when thread_idle()
573
 * is called later in main().
574
 */
575
void
576
thread_init(void)
577
{
578
        void *stack;
579
        vaddr_t sp;
580
581
        list_init(&thread_list);
582
583
        if ((stack = kmem_alloc(KSTACKSZ)) == NULL)
584
                panic("thread_init");
585
586
        memset(stack, 0, KSTACKSZ);
587
        sp = (vaddr_t)stack + KSTACKSZ;
588
        context_set(&idle_thread.ctx, CTX_KSTACK, (register_t)sp);
589
        sched_start(&idle_thread, PRI_IDLE, SCHED_FIFO);
590
        idle_thread.kstack = stack;
591
        idle_thread.task = &kernel_task;
592
        idle_thread.state = TS_RUN;
593
        idle_thread.locks = 1;
594
        list_init(&idle_thread.mutexes);
595
596
        list_insert(&thread_list, &idle_thread.link);
597
        list_insert(&kernel_task.threads, &idle_thread.task_link);
598
        kernel_task.nthreads = 1;
599
}