root / prex-0.9.0 / sys / kern / sched.c @ 03e9c04a
History | View | Annotate | Download (14.7 KB)
1 |
/*-
|
---|---|
2 |
* Copyright (c) 2005-2009, Kohsuke Ohtani
|
3 |
* All rights reserved.
|
4 |
*
|
5 |
* Redistribution and use in source and binary forms, with or without
|
6 |
* modification, are permitted provided that the following conditions
|
7 |
* are met:
|
8 |
* 1. Redistributions of source code must retain the above copyright
|
9 |
* notice, this list of conditions and the following disclaimer.
|
10 |
* 2. Redistributions in binary form must reproduce the above copyright
|
11 |
* notice, this list of conditions and the following disclaimer in the
|
12 |
* documentation and/or other materials provided with the distribution.
|
13 |
* 3. Neither the name of the author nor the names of any co-contributors
|
14 |
* may be used to endorse or promote products derived from this software
|
15 |
* without specific prior written permission.
|
16 |
*
|
17 |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
18 |
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
19 |
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
20 |
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
21 |
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
22 |
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
23 |
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
24 |
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
25 |
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
26 |
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
27 |
* SUCH DAMAGE.
|
28 |
*/
|
29 |
|
30 |
/*
|
31 |
* sched.c - scheduler
|
32 |
*/
|
33 |
|
34 |
/**
|
35 |
* General Design:
|
36 |
*
|
37 |
* The Prex scheduler is based on the algorithm known as priority
|
38 |
* based multi level queue. Each thread has its own priority assigned
|
39 |
* between 0 and 255. The lower number means higher priority like BSD
|
40 |
* UNIX. The scheduler maintains 256 level run queues mapped to each
|
41 |
* priority. The lowest priority (=255) is used only for an idle
|
42 |
* thread.
|
43 |
*
|
44 |
* All threads have two different types of priorities:
|
45 |
*
|
46 |
* Base priority:
|
47 |
* This is a static priority used for priority computation.
|
48 |
* A user mode program can change this value via system call.
|
49 |
*
|
50 |
* Current priority:
|
51 |
* An actual scheduling priority. A kernel may adjust this
|
52 |
* priority dynamically if it's needed.
|
53 |
*
|
54 |
* Each thread has one of the following state.
|
55 |
*
|
56 |
* - TS_RUN Running or ready to run
|
57 |
* - TS_SLEEP Sleep for some event
|
58 |
* - TS_SUSP Suspend count is not 0
|
59 |
* - TS_EXIT Terminated
|
60 |
*
|
61 |
* The thread is always preemptive even in the kernel mode.
|
62 |
* There are following 4 reasons to switch thread.
|
63 |
*
|
64 |
* (1) Block
|
65 |
* Thread is blocked for sleep or suspend.
|
66 |
*
|
67 |
* (2) Preemption
|
68 |
* Higher priority thread becomes runnable.
|
69 |
*
|
70 |
* (3) Quantum expiration
|
71 |
* The thread consumes its time quantum.
|
72 |
*
|
73 |
* (4) Yield
|
74 |
* The thread releases CPU by itself.
|
75 |
*
|
76 |
* There are following three types of scheduling policies.
|
77 |
*
|
78 |
* - SCHED_FIFO First in-first-out
|
79 |
* - SCHED_RR Round robin (SCHED_FIFO + timeslice)
|
80 |
* - SCHED_OTHER Not supported now
|
81 |
*/
|
82 |
|
83 |
#include <kernel.h> |
84 |
#include <event.h> |
85 |
#include <thread.h> |
86 |
#include <timer.h> |
87 |
#include <vm.h> |
88 |
#include <task.h> |
89 |
#include <sched.h> |
90 |
#include <hal.h> |
91 |
|
92 |
static struct queue runq[NPRI]; /* run queues */ |
93 |
static struct queue wakeq; /* queue for waking threads */ |
94 |
static struct queue dpcq; /* DPC queue */ |
95 |
static struct event dpc_event; /* event for DPC */ |
96 |
static int maxpri; /* highest priority in runq */ |
97 |
|
98 |
/*
|
99 |
* Search for highest-priority runnable thread.
|
100 |
*/
|
101 |
static int |
102 |
runq_getbest(void)
|
103 |
{ |
104 |
int pri;
|
105 |
|
106 |
for (pri = 0; pri < MINPRI; pri++) |
107 |
if (!queue_empty(&runq[pri]))
|
108 |
break;
|
109 |
return pri;
|
110 |
} |
111 |
|
112 |
/*
|
113 |
* Put a thread on the tail of the run queue.
|
114 |
* The rescheduling flag is set if the priority is beter
|
115 |
* than the currently running thread.
|
116 |
*/
|
117 |
static void |
118 |
runq_enqueue(thread_t t) |
119 |
{ |
120 |
|
121 |
enqueue(&runq[t->priority], &t->sched_link); |
122 |
if (t->priority < maxpri) {
|
123 |
maxpri = t->priority; |
124 |
curthread->resched = 1;
|
125 |
} |
126 |
} |
127 |
|
128 |
/*
|
129 |
* Insert a thread to the head of the run queue.
|
130 |
* We assume this routine is called while thread switching.
|
131 |
*/
|
132 |
static void |
133 |
runq_insert(thread_t t) |
134 |
{ |
135 |
|
136 |
queue_insert(&runq[t->priority], &t->sched_link); |
137 |
if (t->priority < maxpri)
|
138 |
maxpri = t->priority; |
139 |
} |
140 |
|
141 |
/*
|
142 |
* Pick up and remove the highest-priority thread
|
143 |
* from the run queue.
|
144 |
*/
|
145 |
static thread_t
|
146 |
runq_dequeue(void)
|
147 |
{ |
148 |
queue_t q; |
149 |
thread_t t; |
150 |
|
151 |
q = dequeue(&runq[maxpri]); |
152 |
t = queue_entry(q, struct thread, sched_link);
|
153 |
if (queue_empty(&runq[maxpri]))
|
154 |
maxpri = runq_getbest(); |
155 |
|
156 |
return t;
|
157 |
} |
158 |
|
159 |
/*
|
160 |
* Remove the specified thread from the run queue.
|
161 |
*/
|
162 |
static void |
163 |
runq_remove(thread_t t) |
164 |
{ |
165 |
|
166 |
queue_remove(&t->sched_link); |
167 |
maxpri = runq_getbest(); |
168 |
} |
169 |
|
170 |
/*
|
171 |
* Wake up all threads in the wake queue.
|
172 |
*/
|
173 |
static void |
174 |
wakeq_flush(void)
|
175 |
{ |
176 |
queue_t q; |
177 |
thread_t t; |
178 |
|
179 |
while (!queue_empty(&wakeq)) {
|
180 |
/*
|
181 |
* Set a thread runnable.
|
182 |
*/
|
183 |
q = dequeue(&wakeq); |
184 |
t = queue_entry(q, struct thread, sched_link);
|
185 |
t->slpevt = NULL;
|
186 |
t->state &= ~TS_SLEEP; |
187 |
if (t != curthread && t->state == TS_RUN)
|
188 |
runq_enqueue(t); |
189 |
} |
190 |
} |
191 |
|
192 |
/*
|
193 |
* Set the thread running:
|
194 |
* Put a thread on the wake queue. This thread will be moved
|
195 |
* to the run queue later in wakeq_flush().
|
196 |
*/
|
197 |
static void |
198 |
sched_setrun(thread_t t) |
199 |
{ |
200 |
|
201 |
enqueue(&wakeq, &t->sched_link); |
202 |
timer_stop(&t->timeout); |
203 |
} |
204 |
|
205 |
/*
|
206 |
* sched_swtch - this is the scheduler proper:
|
207 |
*
|
208 |
* If the scheduling reason is preemption, the current thread
|
209 |
* will remain at the head of the run queue. So, the thread
|
210 |
* still has right to run next again among the same priority
|
211 |
* threads. For other scheduling reason, the current thread is
|
212 |
* inserted into the tail of the run queue.
|
213 |
*/
|
214 |
static void |
215 |
sched_swtch(void)
|
216 |
{ |
217 |
thread_t prev, next; |
218 |
|
219 |
/*
|
220 |
* Put the current thread on the run queue.
|
221 |
*/
|
222 |
prev = curthread; |
223 |
if (prev->state == TS_RUN) {
|
224 |
if (prev->priority > maxpri)
|
225 |
runq_insert(prev); /* preemption */
|
226 |
else
|
227 |
runq_enqueue(prev); |
228 |
} |
229 |
prev->resched = 0;
|
230 |
|
231 |
/*
|
232 |
* Select the thread to run the CPU next.
|
233 |
* If it's same with previous one, return.
|
234 |
*/
|
235 |
next = runq_dequeue(); |
236 |
if (next == prev)
|
237 |
return;
|
238 |
curthread = next; |
239 |
|
240 |
/*
|
241 |
* Switch to the new thread.
|
242 |
* You are expected to understand this..
|
243 |
*/
|
244 |
if (prev->task != next->task)
|
245 |
vm_switch(next->task->map); |
246 |
context_switch(&prev->ctx, &next->ctx); |
247 |
} |
248 |
|
249 |
/*
|
250 |
* sleep_timeout - sleep timer is expired:
|
251 |
*
|
252 |
* Wake up the thread which is sleeping in sched_tsleep().
|
253 |
*/
|
254 |
static void |
255 |
sleep_timeout(void *arg)
|
256 |
{ |
257 |
thread_t t = (thread_t)arg; |
258 |
|
259 |
sched_unsleep(t, SLP_TIMEOUT); |
260 |
} |
261 |
|
262 |
/*
|
263 |
* sched_tsleep - sleep the current thread until a wakeup
|
264 |
* is performed on the specified event.
|
265 |
* This routine returns a sleep result.
|
266 |
*/
|
267 |
int
|
268 |
sched_tsleep(struct event *evt, u_long msec)
|
269 |
{ |
270 |
int s;
|
271 |
|
272 |
ASSERT(evt != NULL);
|
273 |
|
274 |
sched_lock(); |
275 |
s = splhigh(); |
276 |
|
277 |
/*
|
278 |
* Put the current thread on the sleep queue.
|
279 |
*/
|
280 |
curthread->slpevt = evt; |
281 |
curthread->state |= TS_SLEEP; |
282 |
enqueue(&evt->sleepq, &curthread->sched_link); |
283 |
|
284 |
/*
|
285 |
* Program timer to wake us up at timeout.
|
286 |
*/
|
287 |
if (msec != 0) { |
288 |
timer_callout(&curthread->timeout, msec, |
289 |
&sleep_timeout, curthread); |
290 |
} |
291 |
|
292 |
wakeq_flush(); |
293 |
sched_swtch(); /* Sleep here. Zzzz.. */
|
294 |
|
295 |
splx(s); |
296 |
sched_unlock(); |
297 |
return curthread->slpret;
|
298 |
} |
299 |
|
300 |
/*
|
301 |
* sched_wakeup - wake up all threads sleeping on event.
|
302 |
*
|
303 |
* A thread can have sleep and suspend state simultaneously.
|
304 |
* So, the thread may keep suspending even if it woke up.
|
305 |
*/
|
306 |
void
|
307 |
sched_wakeup(struct event *evt)
|
308 |
{ |
309 |
queue_t q; |
310 |
thread_t t; |
311 |
int s;
|
312 |
|
313 |
ASSERT(evt != NULL);
|
314 |
|
315 |
sched_lock(); |
316 |
s = splhigh(); |
317 |
while (!queue_empty(&evt->sleepq)) {
|
318 |
q = dequeue(&evt->sleepq); |
319 |
t = queue_entry(q, struct thread, sched_link);
|
320 |
t->slpret = 0;
|
321 |
sched_setrun(t); |
322 |
} |
323 |
splx(s); |
324 |
sched_unlock(); |
325 |
} |
326 |
|
327 |
/*
|
328 |
* sched_wakeone - wake up one thread sleeping on event.
|
329 |
*
|
330 |
* The highest priority thread is woken among sleeping
|
331 |
* threads. This routine returns the thread ID of the
|
332 |
* woken thread, or NULL if no threads are sleeping.
|
333 |
*/
|
334 |
thread_t |
335 |
sched_wakeone(struct event *evt)
|
336 |
{ |
337 |
queue_t head, q; |
338 |
thread_t top, t = NULL;
|
339 |
int s;
|
340 |
|
341 |
sched_lock(); |
342 |
s = splhigh(); |
343 |
head = &evt->sleepq; |
344 |
if (!queue_empty(head)) {
|
345 |
/*
|
346 |
* Select the highet priority thread in
|
347 |
* the sleep queue, and wake it up.
|
348 |
*/
|
349 |
q = queue_first(head); |
350 |
top = queue_entry(q, struct thread, sched_link);
|
351 |
while (!queue_end(head, q)) {
|
352 |
t = queue_entry(q, struct thread, sched_link);
|
353 |
if (t->priority < top->priority)
|
354 |
top = t; |
355 |
q = queue_next(q); |
356 |
} |
357 |
queue_remove(&top->sched_link); |
358 |
top->slpret = 0;
|
359 |
sched_setrun(top); |
360 |
} |
361 |
splx(s); |
362 |
sched_unlock(); |
363 |
return t;
|
364 |
} |
365 |
|
366 |
/*
|
367 |
* sched_unsleep - cancel sleep.
|
368 |
*
|
369 |
* sched_unsleep() removes the specified thread from its
|
370 |
* sleep queue. The specified sleep result will be passed
|
371 |
* to the sleeping thread as a return value of sched_tsleep().
|
372 |
*/
|
373 |
void
|
374 |
sched_unsleep(thread_t t, int result)
|
375 |
{ |
376 |
int s;
|
377 |
|
378 |
sched_lock(); |
379 |
if (t->state & TS_SLEEP) {
|
380 |
s = splhigh(); |
381 |
queue_remove(&t->sched_link); |
382 |
t->slpret = result; |
383 |
sched_setrun(t); |
384 |
splx(s); |
385 |
} |
386 |
sched_unlock(); |
387 |
} |
388 |
|
389 |
/*
|
390 |
* Yield the current processor to another thread.
|
391 |
*
|
392 |
* Note that the current thread may run immediately again,
|
393 |
* if no other thread exists in the same priority queue.
|
394 |
*/
|
395 |
void
|
396 |
sched_yield(void)
|
397 |
{ |
398 |
|
399 |
sched_lock(); |
400 |
|
401 |
if (!queue_empty(&runq[curthread->priority]))
|
402 |
curthread->resched = 1;
|
403 |
|
404 |
sched_unlock(); /* Switch a current thread here */
|
405 |
} |
406 |
|
407 |
/*
|
408 |
* Suspend the specified thread.
|
409 |
* Called with scheduler locked.
|
410 |
*/
|
411 |
void
|
412 |
sched_suspend(thread_t t) |
413 |
{ |
414 |
|
415 |
if (t->state == TS_RUN) {
|
416 |
if (t == curthread)
|
417 |
curthread->resched = 1;
|
418 |
else
|
419 |
runq_remove(t); |
420 |
} |
421 |
t->state |= TS_SUSP; |
422 |
} |
423 |
|
424 |
/*
|
425 |
* Resume the specified thread.
|
426 |
* Called with scheduler locked.
|
427 |
*/
|
428 |
void
|
429 |
sched_resume(thread_t t) |
430 |
{ |
431 |
|
432 |
if (t->state & TS_SUSP) {
|
433 |
t->state &= ~TS_SUSP; |
434 |
if (t->state == TS_RUN)
|
435 |
runq_enqueue(t); |
436 |
} |
437 |
} |
438 |
|
439 |
/*
|
440 |
* sched_tick() is called from timer_clock() once every tick.
|
441 |
* Check quantum expiration, and mark a rescheduling flag.
|
442 |
* We don't need locking in here.
|
443 |
*/
|
444 |
void
|
445 |
sched_tick(void)
|
446 |
{ |
447 |
|
448 |
if (curthread->state != TS_EXIT) {
|
449 |
/*
|
450 |
* Bill time to current thread.
|
451 |
*/
|
452 |
curthread->time++; |
453 |
|
454 |
if (curthread->policy == SCHED_RR) {
|
455 |
if (--curthread->timeleft <= 0) { |
456 |
/*
|
457 |
* The quantum is up.
|
458 |
* Give the thread another.
|
459 |
*/
|
460 |
curthread->timeleft += QUANTUM; |
461 |
curthread->resched = 1;
|
462 |
} |
463 |
} |
464 |
} |
465 |
} |
466 |
|
467 |
/*
|
468 |
* Setup the thread structure to start scheduling.
|
469 |
*/
|
470 |
void
|
471 |
sched_start(thread_t t, int pri, int policy) |
472 |
{ |
473 |
|
474 |
t->state = TS_RUN | TS_SUSP; |
475 |
t->policy = policy; |
476 |
t->priority = pri; |
477 |
t->basepri = pri; |
478 |
if (t->policy == SCHED_RR)
|
479 |
t->timeleft = QUANTUM; |
480 |
} |
481 |
|
482 |
/*
|
483 |
* Stop scheduling of the specified thread.
|
484 |
*/
|
485 |
void
|
486 |
sched_stop(thread_t t) |
487 |
{ |
488 |
|
489 |
if (t == curthread) {
|
490 |
/*
|
491 |
* If specified thread is a current thread,
|
492 |
* the scheduling lock count is force set
|
493 |
* to 1 to ensure the thread switching in
|
494 |
* the next sched_unlock().
|
495 |
*/
|
496 |
curthread->locks = 1;
|
497 |
curthread->resched = 1;
|
498 |
} else {
|
499 |
if (t->state == TS_RUN)
|
500 |
runq_remove(t); |
501 |
else if (t->state & TS_SLEEP) |
502 |
queue_remove(&t->sched_link); |
503 |
} |
504 |
timer_stop(&t->timeout); |
505 |
t->state = TS_EXIT; |
506 |
} |
507 |
|
508 |
/*
|
509 |
* sched_lock - lock the scheduler.
|
510 |
*
|
511 |
* The thread switch is disabled during scheduler locked.
|
512 |
* Since the scheduling lock can be nested any number of
|
513 |
* times, the caller has the responsible to unlock the same
|
514 |
* number of locks.
|
515 |
*/
|
516 |
void
|
517 |
sched_lock(void)
|
518 |
{ |
519 |
|
520 |
curthread->locks++; |
521 |
} |
522 |
|
523 |
/*
|
524 |
* sched_unlock - unlock scheduler.
|
525 |
*
|
526 |
* If nobody locks the scheduler anymore, it checks the
|
527 |
* rescheduling flag and kick the scheduler if it's required.
|
528 |
* This routine will be always called at the end of each
|
529 |
* interrupt handler.
|
530 |
*/
|
531 |
void
|
532 |
sched_unlock(void)
|
533 |
{ |
534 |
int s;
|
535 |
|
536 |
ASSERT(curthread->locks > 0);
|
537 |
|
538 |
s = splhigh(); |
539 |
if (curthread->locks == 1) { |
540 |
wakeq_flush(); |
541 |
while (curthread->resched) {
|
542 |
/*
|
543 |
* Kick scheduler.
|
544 |
*/
|
545 |
sched_swtch(); |
546 |
|
547 |
/*
|
548 |
* Now we run pending interrupts which fired
|
549 |
* during the thread switch. So, we can catch
|
550 |
* the rescheduling request from such ISRs.
|
551 |
* Otherwise, the reschedule may be deferred
|
552 |
* until _next_ sched_unlock() call.
|
553 |
*/
|
554 |
splx(s); |
555 |
s = splhigh(); |
556 |
wakeq_flush(); |
557 |
} |
558 |
} |
559 |
curthread->locks--; |
560 |
splx(s); |
561 |
} |
562 |
|
563 |
/*
|
564 |
* Return the priority of the specified thread.
|
565 |
*/
|
566 |
int
|
567 |
sched_getpri(thread_t t) |
568 |
{ |
569 |
|
570 |
return t->priority;
|
571 |
} |
572 |
|
573 |
/*
|
574 |
* sched_setpri - set priority of thread.
|
575 |
*
|
576 |
* Arrange to reschedule if the resulting priority is
|
577 |
* better than that of the current thread.
|
578 |
* Called with scheduler locked.
|
579 |
*/
|
580 |
void
|
581 |
sched_setpri(thread_t t, int basepri, int pri) |
582 |
{ |
583 |
|
584 |
t->basepri = basepri; |
585 |
|
586 |
if (t == curthread) {
|
587 |
/*
|
588 |
* If we change the current thread's priority,
|
589 |
* rescheduling may be happened.
|
590 |
*/
|
591 |
t->priority = pri; |
592 |
maxpri = runq_getbest(); |
593 |
if (pri != maxpri)
|
594 |
curthread->resched = 1;
|
595 |
} else {
|
596 |
if (t->state == TS_RUN) {
|
597 |
/*
|
598 |
* Update the thread priority and adjust
|
599 |
* the run queue position for new priority.
|
600 |
* The rescheduling flag may be set.
|
601 |
*/
|
602 |
runq_remove(t); |
603 |
t->priority = pri; |
604 |
runq_enqueue(t); |
605 |
} else
|
606 |
t->priority = pri; |
607 |
} |
608 |
} |
609 |
|
610 |
/*
|
611 |
* Get the scheduling policy.
|
612 |
*/
|
613 |
int
|
614 |
sched_getpolicy(thread_t t) |
615 |
{ |
616 |
|
617 |
return t->policy;
|
618 |
} |
619 |
|
620 |
/*
|
621 |
* Set the scheduling policy.
|
622 |
*/
|
623 |
int
|
624 |
sched_setpolicy(thread_t t, int policy)
|
625 |
{ |
626 |
int error = 0; |
627 |
|
628 |
switch (policy) {
|
629 |
case SCHED_RR:
|
630 |
case SCHED_FIFO:
|
631 |
t->timeleft = QUANTUM; |
632 |
t->policy = policy; |
633 |
break;
|
634 |
default:
|
635 |
error = EINVAL; |
636 |
break;
|
637 |
} |
638 |
return error;
|
639 |
} |
640 |
|
641 |
/*
|
642 |
* Schedule DPC callback.
|
643 |
*
|
644 |
* DPC (Deferred Procedure Call) is used to call the specific
|
645 |
* function at some later time with a DPC priority.
|
646 |
* This routine can be called from ISR.
|
647 |
*/
|
648 |
void
|
649 |
sched_dpc(struct dpc *dpc, void (*fn)(void *), void *arg) |
650 |
{ |
651 |
int s;
|
652 |
|
653 |
ASSERT(dpc != NULL);
|
654 |
ASSERT(fn != NULL);
|
655 |
|
656 |
sched_lock(); |
657 |
|
658 |
s = splhigh(); |
659 |
dpc->func = fn; |
660 |
dpc->arg = arg; |
661 |
if (dpc->state != DPC_PENDING)
|
662 |
enqueue(&dpcq, &dpc->link); |
663 |
dpc->state = DPC_PENDING; |
664 |
splx(s); |
665 |
|
666 |
sched_wakeup(&dpc_event); |
667 |
sched_unlock(); |
668 |
} |
669 |
|
670 |
/*
|
671 |
* DPC thread.
|
672 |
*
|
673 |
* This is a kernel thread to process the pending call back
|
674 |
* request in DPC queue. Each DPC routine is called with
|
675 |
* the following conditions.
|
676 |
* - Interrupt is enabled.
|
677 |
* - Scheduler is unlocked.
|
678 |
* - The scheduling priority is PRI_DPC.
|
679 |
*/
|
680 |
static void |
681 |
dpc_thread(void *dummy)
|
682 |
{ |
683 |
queue_t q; |
684 |
struct dpc *dpc;
|
685 |
|
686 |
splhigh(); |
687 |
|
688 |
for (;;) {
|
689 |
/*
|
690 |
* Wait until next DPC request.
|
691 |
*/
|
692 |
sched_sleep(&dpc_event); |
693 |
|
694 |
while (!queue_empty(&dpcq)) {
|
695 |
q = dequeue(&dpcq); |
696 |
dpc = queue_entry(q, struct dpc, link);
|
697 |
dpc->state = DPC_FREE; |
698 |
|
699 |
/*
|
700 |
* Call DPC routine.
|
701 |
*/
|
702 |
spl0(); |
703 |
(*dpc->func)(dpc->arg); |
704 |
splhigh(); |
705 |
} |
706 |
} |
707 |
/* NOTREACHED */
|
708 |
} |
709 |
|
710 |
/*
|
711 |
* Initialize the global scheduler state.
|
712 |
*/
|
713 |
void
|
714 |
sched_init(void)
|
715 |
{ |
716 |
thread_t t; |
717 |
int i;
|
718 |
|
719 |
for (i = 0; i < NPRI; i++) |
720 |
queue_init(&runq[i]); |
721 |
|
722 |
queue_init(&wakeq); |
723 |
queue_init(&dpcq); |
724 |
event_init(&dpc_event, "dpc");
|
725 |
maxpri = PRI_IDLE; |
726 |
curthread->resched = 1;
|
727 |
|
728 |
t = kthread_create(dpc_thread, NULL, PRI_DPC);
|
729 |
if (t == NULL) |
730 |
panic("sched_init");
|
731 |
|
732 |
DPRINTF(("Time slice is %d msec\n", CONFIG_TIME_SLICE));
|
733 |
} |