ast.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
  4. * Copyright (c) 1993,1994 The University of Utah and
  5. * the Computer Systems Laboratory (CSL).
  6. * All rights reserved.
  7. *
  8. * Permission to use, copy, modify and distribute this software and its
  9. * documentation is hereby granted, provided that both the copyright
  10. * notice and this permission notice appear in all copies of the
  11. * software, derivative works or modified versions, and any portions
  12. * thereof, and that both notices appear in supporting documentation.
  13. *
  14. * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
  15. * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
  16. * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
  17. * THIS SOFTWARE.
  18. *
  19. * Carnegie Mellon requests users of this software to return to
  20. *
  21. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  22. * School of Computer Science
  23. * Carnegie Mellon University
  24. * Pittsburgh PA 15213-3890
  25. *
  26. * any improvements or extensions that they make and grant Carnegie Mellon
  27. * the rights to redistribute these changes.
  28. */
  29. /*
  30. *
  31. * This file contains routines to check whether an ast is needed.
  32. *
  33. * ast_check() - check whether ast is needed for interrupt or context
  34. * switch. Usually called by clock interrupt handler.
  35. *
  36. */
  37. #include <kern/ast.h>
  38. #include <kern/counters.h>
  39. #include <kern/debug.h>
  40. #include "cpu_number.h"
  41. #include <kern/queue.h>
  42. #include <kern/sched.h>
  43. #include <kern/sched_prim.h>
  44. #include <kern/thread.h>
  45. #include <kern/processor.h>
  46. #include <device/net_io.h>
  47. #include <machine/machspl.h> /* for splsched */
  48. #if MACH_FIXPRI
  49. #include <mach/policy.h>
  50. #endif /* MACH_FIXPRI */
  51. volatile ast_t need_ast[NCPUS];
  52. void
  53. ast_init(void)
  54. {
  55. #ifndef MACHINE_AST
  56. int i;
  57. for (i=0; i<NCPUS; i++)
  58. need_ast[i] = 0;
  59. #endif /* MACHINE_AST */
  60. }
  61. void
  62. ast_taken(void)
  63. {
  64. thread_t self = current_thread();
  65. ast_t reasons;
  66. /*
  67. * Interrupts are still disabled.
  68. * We must clear need_ast and then enable interrupts.
  69. */
  70. reasons = need_ast[cpu_number()];
  71. need_ast[cpu_number()] = AST_ZILCH;
  72. (void) spl0();
  73. /*
  74. * These actions must not block.
  75. */
  76. if (reasons & AST_NETWORK)
  77. net_ast();
  78. /*
  79. * Make darn sure that we don't call thread_halt_self
  80. * or thread_block from the idle thread.
  81. */
  82. if (self != current_processor()->idle_thread) {
  83. #ifndef MIGRATING_THREADS
  84. while (thread_should_halt(self))
  85. thread_halt_self(thread_exception_return);
  86. #endif
  87. /*
  88. * One of the previous actions might well have
  89. * woken a high-priority thread, so we use
  90. * csw_needed in addition to AST_BLOCK.
  91. */
  92. if ((reasons & AST_BLOCK) ||
  93. csw_needed(self, current_processor())) {
  94. counter(c_ast_taken_block++);
  95. thread_block(thread_exception_return);
  96. }
  97. }
  98. }
  99. void
  100. ast_check(void)
  101. {
  102. int mycpu = cpu_number();
  103. processor_t myprocessor;
  104. thread_t thread = current_thread();
  105. run_queue_t rq;
  106. spl_t s = splsched();
  107. /*
  108. * Check processor state for ast conditions.
  109. */
  110. myprocessor = cpu_to_processor(mycpu);
  111. switch(myprocessor->state) {
  112. case PROCESSOR_OFF_LINE:
  113. case PROCESSOR_IDLE:
  114. case PROCESSOR_DISPATCHING:
  115. /*
  116. * No ast.
  117. */
  118. break;
  119. #if NCPUS > 1
  120. case PROCESSOR_ASSIGN:
  121. case PROCESSOR_SHUTDOWN:
  122. /*
  123. * Need ast to force action thread onto processor.
  124. *
  125. * XXX Should check if action thread is already there.
  126. */
  127. ast_on(mycpu, AST_BLOCK);
  128. break;
  129. #endif /* NCPUS > 1 */
  130. case PROCESSOR_RUNNING:
  131. /*
  132. * Propagate thread ast to processor. If we already
  133. * need an ast, don't look for more reasons.
  134. */
  135. ast_propagate(thread, mycpu);
  136. if (ast_needed(mycpu))
  137. break;
  138. /*
  139. * Context switch check. The csw_needed macro isn't
  140. * used here because the rq->low hint may be wrong,
  141. * and fixing it here avoids an extra ast.
  142. * First check the easy cases.
  143. */
  144. if (thread->state & TH_SUSP || myprocessor->runq.count > 0) {
  145. ast_on(mycpu, AST_BLOCK);
  146. break;
  147. }
  148. /*
  149. * Update lazy evaluated runq->low if only timesharing.
  150. */
  151. #if MACH_FIXPRI
  152. if (myprocessor->processor_set->policies & POLICY_FIXEDPRI) {
  153. if (csw_needed(thread,myprocessor)) {
  154. ast_on(mycpu, AST_BLOCK);
  155. break;
  156. }
  157. else {
  158. /*
  159. * For fixed priority threads, set first_quantum
  160. * so entire new quantum is used.
  161. */
  162. if (thread->policy == POLICY_FIXEDPRI)
  163. myprocessor->first_quantum = TRUE;
  164. }
  165. }
  166. else {
  167. #endif /* MACH_FIXPRI */
  168. rq = &(myprocessor->processor_set->runq);
  169. if (!(myprocessor->first_quantum) && (rq->count > 0)) {
  170. queue_t q;
  171. /*
  172. * This is not the first quantum, and there may
  173. * be something in the processor_set runq.
  174. * Check whether low hint is accurate.
  175. */
  176. q = rq->runq + *(volatile int *)&rq->low;
  177. if (queue_empty(q)) {
  178. int i;
  179. /*
  180. * Need to recheck and possibly update hint.
  181. */
  182. simple_lock(&rq->lock);
  183. q = rq->runq + rq->low;
  184. if (rq->count > 0) {
  185. for (i = rq->low; i < NRQS; i++) {
  186. if(!(queue_empty(q)))
  187. break;
  188. q++;
  189. }
  190. rq->low = i;
  191. }
  192. simple_unlock(&rq->lock);
  193. }
  194. if (rq->low <= thread->sched_pri) {
  195. ast_on(mycpu, AST_BLOCK);
  196. break;
  197. }
  198. }
  199. #if MACH_FIXPRI
  200. }
  201. #endif /* MACH_FIXPRI */
  202. break;
  203. default:
  204. panic("ast_check: Bad processor state (cpu %d processor %p) state: %d",
  205. mycpu, myprocessor, myprocessor->state);
  206. }
  207. (void) splx(s);
  208. }