thread.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1994-1987 Carnegie Mellon University
  4. * All Rights Reserved.
  5. *
  6. * Permission to use, copy, modify and distribute this software and its
  7. * documentation is hereby granted, provided that both the copyright
  8. * notice and this permission notice appear in all copies of the
  9. * software, derivative works or modified versions, and any portions
  10. * thereof, and that both notices appear in supporting documentation.
  11. *
  12. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  13. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  14. * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  15. *
  16. * Carnegie Mellon requests users of this software to return to
  17. *
  18. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  19. * School of Computer Science
  20. * Carnegie Mellon University
  21. * Pittsburgh PA 15213-3890
  22. *
  23. * any improvements or extensions that they make and grant Carnegie Mellon
  24. * the rights to redistribute these changes.
  25. */
  26. /*
  27. * File: kern/thread.c
  28. * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub
  29. * Date: 1986
  30. *
  31. * Thread management primitives implementation.
  32. */
  33. #include <glue/gnulinux.h>
  34. #include <mach/message.h>
  35. #include <mach/boolean.h>
  36. #include <mach/kern_return.h>
  37. #include <mach/mach_param.h>
  38. #include <mach/task_special_ports.h>
  39. #include <mach/thread_special_ports.h>
  40. #include <vm/vm_kern.h>
  41. #include <kern/debug.h>
  42. #include <kern/kalloc.h>
  43. #include <kern/task.h>
  44. #include <kern/thread.h>
  45. #include <kern/ipc_kobject.h>
  46. #include <kern/ipc_tt.h>
  47. #include <ipc/ipc_space.h>
  48. #include <ipc/ipc_table.h>
  49. #include <ipc/ipc_port.h>
  50. #include <ipc/ipc_right.h>
  51. #include <ipc/ipc_entry.h>
  52. #include <ipc/ipc_object.h>
  53. #include <linux/printk.h>
  54. #if 0
  55. #include <kern/printf.h>
  56. #include <mach/std_types.h>
  57. #include <mach/policy.h>
  58. #include <mach/thread_info.h>
  59. #include <mach/thread_special_ports.h>
  60. #include <mach/thread_status.h>
  61. #include <mach/time_value.h>
  62. #include <machine/vm_param.h>
  63. #include <kern/ast.h>
  64. #include <kern/counters.h>
  65. #include <kern/debug.h>
  66. #include <kern/eventcount.h>
  67. #include <kern/ipc_mig.h>
  68. #include <kern/ipc_tt.h>
  69. #include <kern/processor.h>
  70. #include <kern/queue.h>
  71. #include <kern/sched.h>
  72. #include <kern/sched_prim.h>
  73. #include <kern/syscall_subr.h>
  74. #include <kern/thread.h>
  75. #include <kern/thread_swap.h>
  76. #include <kern/host.h>
  77. #include <kern/kalloc.h>
  78. #include <kern/slab.h>
  79. #include <kern/mach_clock.h>
  80. #include <vm/vm_kern.h>
  81. #include <vm/vm_user.h>
  82. #include <ipc/ipc_kmsg.h>
  83. #include <ipc/ipc_port.h>
  84. #include <ipc/mach_msg.h>
  85. #include <ipc/mach_port.h>
  86. #include <machine/machspl.h> /* for splsched */
  87. #include <machine/pcb.h>
  88. #include <machine/thread.h> /* for MACHINE_STACK */
  89. thread_t active_threads[NCPUS];
  90. vm_offset_t active_stacks[NCPUS];
  91. #endif
  92. struct gnu_kmem_cache thread_cache;
  93. struct thread thread_template;
  94. #if 0
  95. struct kmem_cache thread_stack_cache;
  96. queue_head_t reaper_queue;
  97. decl_simple_lock_data(, reaper_lock)
  98. /* private */
  99. #if MACH_DEBUG
  100. #define STACK_MARKER 0xdeadbeefU
  101. boolean_t stack_check_usage = FALSE;
  102. decl_simple_lock_data(, stack_usage_lock)
  103. vm_size_t stack_max_usage = 0;
  104. #endif /* MACH_DEBUG */
  105. /*
  106. * Machine-dependent code must define:
  107. * pcb_init
  108. * pcb_terminate
  109. * pcb_collect
  110. *
  111. * The thread->pcb field is reserved for machine-dependent code.
  112. */
  113. #ifdef MACHINE_STACK
  114. /*
  115. * Machine-dependent code must define:
  116. * stack_alloc_try
  117. * stack_alloc
  118. * stack_free
  119. * stack_handoff
  120. * stack_collect
  121. * and if MACH_DEBUG:
  122. * stack_statistics
  123. */
  124. #else /* MACHINE_STACK */
  125. /*
  126. * We allocate stacks from generic kernel VM.
  127. * Machine-dependent code must define:
  128. * stack_attach
  129. * stack_detach
  130. * stack_handoff
  131. *
  132. * The stack_free_list can only be accessed at splsched,
  133. * because stack_alloc_try/thread_invoke operate at splsched.
  134. */
  135. decl_simple_lock_data(, stack_lock_data)/* splsched only */
  136. #define stack_lock() simple_lock(&stack_lock_data)
  137. #define stack_unlock() simple_unlock(&stack_lock_data)
  138. vm_offset_t stack_free_list; /* splsched only */
  139. unsigned int stack_free_count = 0; /* splsched only */
  140. unsigned int stack_free_limit = 1; /* patchable */
  141. /*
  142. * The next field is at the base of the stack,
  143. * so the low end is left unsullied.
  144. */
  145. #define stack_next(stack) (*((vm_offset_t *)((stack) + KERNEL_STACK_SIZE) - 1))
  146. /*
  147. * stack_alloc_try:
  148. *
  149. * Non-blocking attempt to allocate a kernel stack.
  150. * Called at splsched with the thread locked.
  151. */
  152. boolean_t stack_alloc_try(
  153. thread_t thread,
  154. void (*resume)(thread_t))
  155. {
  156. vm_offset_t stack;
  157. stack_lock();
  158. stack = stack_free_list;
  159. if (stack != 0) {
  160. stack_free_list = stack_next(stack);
  161. stack_free_count--;
  162. } else {
  163. stack = thread->stack_privilege;
  164. }
  165. stack_unlock();
  166. if (stack != 0) {
  167. stack_attach(thread, stack, resume);
  168. counter(c_stack_alloc_hits++);
  169. return TRUE;
  170. } else {
  171. counter(c_stack_alloc_misses++);
  172. return FALSE;
  173. }
  174. }
  175. /*
  176. * stack_alloc:
  177. *
  178. * Allocate a kernel stack for a thread.
  179. * May block.
  180. */
  181. kern_return_t stack_alloc(
  182. thread_t thread,
  183. void (*resume)(thread_t))
  184. {
  185. vm_offset_t stack;
  186. spl_t s;
  187. /*
  188. * We first try the free list. It is probably empty,
  189. * or stack_alloc_try would have succeeded, but possibly
  190. * a stack was freed before the swapin thread got to us.
  191. */
  192. s = splsched();
  193. stack_lock();
  194. stack = stack_free_list;
  195. if (stack != 0) {
  196. stack_free_list = stack_next(stack);
  197. stack_free_count--;
  198. }
  199. stack_unlock();
  200. (void) splx(s);
  201. if (stack == 0) {
  202. stack = kmem_cache_alloc(&thread_stack_cache);
  203. assert(stack != 0);
  204. #if MACH_DEBUG
  205. stack_init(stack);
  206. #endif /* MACH_DEBUG */
  207. }
  208. stack_attach(thread, stack, resume);
  209. return KERN_SUCCESS;
  210. }
  211. /*
  212. * stack_free:
  213. *
  214. * Free a thread's kernel stack.
  215. * Called at splsched with the thread locked.
  216. */
  217. void stack_free(
  218. thread_t thread)
  219. {
  220. vm_offset_t stack;
  221. stack = stack_detach(thread);
  222. if (stack != thread->stack_privilege) {
  223. stack_lock();
  224. stack_next(stack) = stack_free_list;
  225. stack_free_list = stack;
  226. stack_free_count += 1;
  227. #if MACH_COUNTERS
  228. if (stack_free_count > c_stack_alloc_max)
  229. c_stack_alloc_max = stack_free_count;
  230. #endif /* MACH_COUNTERS */
  231. stack_unlock();
  232. }
  233. }
  234. /*
  235. * stack_collect:
  236. *
  237. * Free excess kernel stacks.
  238. * May block.
  239. */
  240. void stack_collect(void)
  241. {
  242. vm_offset_t stack;
  243. spl_t s;
  244. s = splsched();
  245. stack_lock();
  246. while (stack_free_count > stack_free_limit) {
  247. stack = stack_free_list;
  248. stack_free_list = stack_next(stack);
  249. stack_free_count--;
  250. stack_unlock();
  251. (void) splx(s);
  252. #if MACH_DEBUG
  253. stack_finalize(stack);
  254. #endif /* MACH_DEBUG */
  255. kmem_cache_free(&thread_stack_cache, stack);
  256. s = splsched();
  257. stack_lock();
  258. }
  259. stack_unlock();
  260. (void) splx(s);
  261. }
  262. #endif /* MACHINE_STACK */
  263. /*
  264. * stack_privilege:
  265. *
  266. * stack_alloc_try on this thread must always succeed.
  267. */
  268. void stack_privilege(
  269. thread_t thread)
  270. {
  271. /*
  272. * This implementation only works for the current thread.
  273. */
  274. if (thread != current_thread())
  275. panic("stack_privilege");
  276. if (thread->stack_privilege == 0)
  277. thread->stack_privilege = current_stack();
  278. }
  279. ////
  280. #endif
  281. void thread_init(void)
  282. {
  283. gnu_kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
  284. NULL, 0);
  285. #if 0
  286. /*
  287. * Kernel stacks should be naturally aligned,
  288. * so that it is easy to find the starting/ending
  289. * addresses of a stack given an address in the middle.
  290. */
  291. kmem_cache_init(&thread_stack_cache, "thread_stack",
  292. KERNEL_STACK_SIZE, KERNEL_STACK_SIZE,
  293. NULL, 0);
  294. #endif
  295. /*
  296. * Fill in a template thread for fast initialization.
  297. * [Fields that must be (or are typically) reset at
  298. * time of creation are so noted.]
  299. */
  300. /* thread_template.links (none) */
  301. thread_template.runq = RUN_QUEUE_NULL;
  302. thread_template.linux_task = 0;
  303. /* thread_template.task (later) */
  304. /* thread_template.thread_list (later) */
  305. /* thread_template.pset_threads (later) */
  306. /* thread_template.lock (later) */
  307. /* one ref for being alive; one for the guy who creates the thread */
  308. thread_template.ref_count = 2;
  309. thread_template.pcb = (pcb_t) 0; /* (reset) */
  310. thread_template.kernel_stack = (vm_offset_t) 0;
  311. thread_template.stack_privilege = (vm_offset_t) 0;
  312. thread_template.wait_event = 0;
  313. /* thread_template.suspend_count (later) */
  314. thread_template.wait_result = KERN_SUCCESS;
  315. thread_template.wake_active = FALSE;
  316. thread_template.state = TH_SUSP | TH_SWAPPED;
  317. //FIXME: thread_template.swap_func = thread_bootstrap_return;
  318. /* thread_template.priority (later) */
  319. thread_template.max_priority = BASEPRI_USER;
  320. /* thread_template.sched_pri (later - compute_priority) */
  321. #if MACH_FIXPRI
  322. thread_template.sched_data = 0;
  323. thread_template.policy = POLICY_TIMESHARE;
  324. #endif /* MACH_FIXPRI */
  325. thread_template.depress_priority = -1;
  326. thread_template.cpu_usage = 0;
  327. thread_template.sched_usage = 0;
  328. /* thread_template.sched_stamp (later) */
  329. thread_template.recover = (vm_offset_t) 0;
  330. thread_template.vm_privilege = 0;
  331. thread_template.user_stop_count = 1;
  332. /* thread_template.<IPC structures> (later) */
  333. #if 0
  334. timer_init(&(thread_template.user_timer));
  335. timer_init(&(thread_template.system_timer));
  336. #endif
  337. thread_template.user_timer_save.low = 0;
  338. thread_template.user_timer_save.high = 0;
  339. thread_template.system_timer_save.low = 0;
  340. thread_template.system_timer_save.high = 0;
  341. thread_template.cpu_delta = 0;
  342. thread_template.sched_delta = 0;
  343. thread_template.active = FALSE; /* reset */
  344. thread_template.ast = AST_ZILCH;
  345. /* thread_template.processor_set (later) */
  346. thread_template.bound_processor = PROCESSOR_NULL;
  347. #if MACH_HOST
  348. thread_template.may_assign = TRUE;
  349. thread_template.assign_active = FALSE;
  350. #endif /* MACH_HOST */
  351. #if NCPUS > 1
  352. /* thread_template.last_processor (later) */
  353. #endif /* NCPUS > 1 */
  354. /*
  355. * Initialize other data structures used in
  356. * this module.
  357. */
  358. //queue_init(&reaper_queue);
  359. //simple_lock_init(&reaper_lock);
  360. #ifndef MACHINE_STACK
  361. //simple_lock_init(&stack_lock_data);
  362. #endif /* MACHINE_STACK */
  363. #if MACH_DEBUG
  364. simple_lock_init(&stack_usage_lock);
  365. #endif /* MACH_DEBUG */
  366. /*
  367. * Initialize any machine-dependent
  368. * per-thread structures necessary.
  369. */
  370. //pcb_module_init();
  371. }
  372. kern_return_t thread_create(
  373. task_t parent_task,
  374. thread_t *child_thread) /* OUT */
  375. {
  376. thread_t new_thread;
  377. processor_set_t pset;
  378. if (parent_task == TASK_NULL)
  379. return KERN_INVALID_ARGUMENT;
  380. /*
  381. * Allocate a thread and initialize static fields
  382. */
  383. new_thread = (thread_t) gnu_kmem_cache_alloc(&thread_cache);
  384. if (new_thread == THREAD_NULL)
  385. return KERN_RESOURCE_SHORTAGE;
  386. *new_thread = thread_template;
  387. //record_time_stamp (&new_thread->creation_time);
  388. /*
  389. * Initialize runtime-dependent fields
  390. */
  391. new_thread->task = parent_task;
  392. simple_lock_init(&new_thread->lock);
  393. //new_thread->sched_stamp = sched_tick;
  394. //thread_timeout_setup(new_thread);
  395. /*
  396. * Create a pcb. The kernel stack is created later,
  397. * when the thread is swapped-in.
  398. */
  399. //pcb_init(parent_task, new_thread);
  400. ipc_thread_init(new_thread);
  401. #if 0
  402. /*
  403. * Find the processor set for the parent task.
  404. */
  405. task_lock(parent_task);
  406. pset = parent_task->processor_set;
  407. pset_reference(pset);
  408. task_unlock(parent_task);
  409. /*
  410. * Lock both the processor set and the task,
  411. * so that the thread can be added to both
  412. * simultaneously. Processor set must be
  413. * locked first.
  414. */
  415. Restart:
  416. pset_lock(pset);
  417. task_lock(parent_task);
  418. /*
  419. * If the task has changed processor sets,
  420. * catch up (involves lots of lock juggling).
  421. */
  422. {
  423. processor_set_t cur_pset;
  424. cur_pset = parent_task->processor_set;
  425. if (!cur_pset->active)
  426. cur_pset = &default_pset;
  427. if (cur_pset != pset) {
  428. pset_reference(cur_pset);
  429. task_unlock(parent_task);
  430. pset_unlock(pset);
  431. pset_deallocate(pset);
  432. pset = cur_pset;
  433. goto Restart;
  434. }
  435. }
  436. /*
  437. * Set the thread`s priority from the pset and task.
  438. */
  439. new_thread->priority = parent_task->priority;
  440. if (pset->max_priority > new_thread->max_priority)
  441. new_thread->max_priority = pset->max_priority;
  442. if (new_thread->max_priority > new_thread->priority)
  443. new_thread->priority = new_thread->max_priority;
  444. /*
  445. * Don't need to lock thread here because it can't
  446. * possibly execute and no one else knows about it.
  447. */
  448. compute_priority(new_thread, TRUE);
  449. /*
  450. * Thread is suspended if the task is. Add 1 to
  451. * suspend count since thread is created in suspended
  452. * state.
  453. */
  454. new_thread->suspend_count = parent_task->suspend_count + 1;
  455. /*
  456. * Add the thread to the processor set.
  457. * If the pset is empty, suspend the thread again.
  458. */
  459. pset_add_thread(pset, new_thread);
  460. if (pset->empty)
  461. new_thread->suspend_count++;
  462. #if HW_FOOTPRINT
  463. /*
  464. * Need to set last_processor, idle processor would be best, but
  465. * that requires extra locking nonsense. Go for tail of
  466. * processors queue to avoid master.
  467. */
  468. if (!pset->empty) {
  469. new_thread->last_processor =
  470. (processor_t)queue_first(&pset->processors);
  471. }
  472. else {
  473. /*
  474. * Thread created in empty processor set. Pick
  475. * master processor as an acceptable legal value.
  476. */
  477. new_thread->last_processor = master_processor;
  478. }
  479. #else /* HW_FOOTPRINT */
  480. /*
  481. * Don't need to initialize because the context switch
  482. * code will set it before it can be used.
  483. */
  484. #endif /* HW_FOOTPRINT */
  485. #if MACH_PCSAMPLE
  486. new_thread->pc_sample.seqno = 0;
  487. new_thread->pc_sample.sampletypes = 0;
  488. #endif /* MACH_PCSAMPLE */
  489. new_thread->pc_sample.buffer = 0;
  490. /*
  491. * Add the thread to the task`s list of threads.
  492. * The new thread holds another reference to the task.
  493. */
  494. parent_task->ref_count++;
  495. parent_task->thread_count++;
  496. queue_enter(&parent_task->thread_list, new_thread, thread_t,
  497. thread_list);
  498. /*
  499. * Finally, mark the thread active.
  500. */
  501. new_thread->active = TRUE;
  502. #if 0
  503. if (!parent_task->active) {
  504. task_unlock(parent_task);
  505. pset_unlock(pset);
  506. (void) thread_terminate(new_thread);
  507. /* release ref we would have given our caller */
  508. thread_deallocate(new_thread);
  509. return KERN_FAILURE;
  510. }
  511. #endif
  512. task_unlock(parent_task);
  513. pset_unlock(pset);
  514. #endif
  515. ipc_thread_enable(new_thread);
  516. *child_thread = new_thread;
  517. return KERN_SUCCESS;
  518. }
  519. void* gnumach_thread_create(void* task)
  520. {
  521. thread_t thread = 0;
  522. (void) thread_create((task_t)task,&thread);
  523. return thread;
  524. }
  525. void* gnumach_handle_fork(void* parent)
  526. {
  527. thread_t parent_thread = (thread_t)parent;
  528. thread_t child_thread = 0;
  529. task_t parent_task = parent_thread->task;
  530. task_t child_task = 0;
  531. (void) task_create_kernel(parent_task, FALSE, &child_task);
  532. (void) thread_create((task_t)child_task,&child_thread);
  533. //printk(KERN_CRIT "mach_handle_fork %p %p %p %p\n",parent_task,parent_thread,child_task,child_thread);
  534. return child_thread;
  535. }
  536. #if 0
  537. unsigned int thread_deallocate_stack = 0;
  538. void thread_deallocate(
  539. thread_t thread)
  540. {
  541. spl_t s;
  542. task_t task;
  543. processor_set_t pset;
  544. time_value_t user_time, system_time;
  545. if (thread == THREAD_NULL)
  546. return;
  547. /*
  548. * First, check for new count > 0 (the common case).
  549. * Only the thread needs to be locked.
  550. */
  551. s = splsched();
  552. thread_lock(thread);
  553. if (--thread->ref_count > 0) {
  554. thread_unlock(thread);
  555. (void) splx(s);
  556. return;
  557. }
  558. /*
  559. * Count is zero. However, the task's and processor set's
  560. * thread lists have implicit references to
  561. * the thread, and may make new ones. Their locks also
  562. * dominate the thread lock. To check for this, we
  563. * temporarily restore the one thread reference, unlock
  564. * the thread, and then lock the other structures in
  565. * the proper order.
  566. */
  567. thread->ref_count = 1;
  568. thread_unlock(thread);
  569. (void) splx(s);
  570. pset = thread->processor_set;
  571. pset_lock(pset);
  572. #if MACH_HOST
  573. /*
  574. * The thread might have moved.
  575. */
  576. while (pset != thread->processor_set) {
  577. pset_unlock(pset);
  578. pset = thread->processor_set;
  579. pset_lock(pset);
  580. }
  581. #endif /* MACH_HOST */
  582. task = thread->task;
  583. task_lock(task);
  584. s = splsched();
  585. thread_lock(thread);
  586. if (--thread->ref_count > 0) {
  587. /*
  588. * Task or processor_set made extra reference.
  589. */
  590. thread_unlock(thread);
  591. (void) splx(s);
  592. task_unlock(task);
  593. pset_unlock(pset);
  594. return;
  595. }
  596. /*
  597. * Thread has no references - we can remove it.
  598. */
  599. /*
  600. * Remove pending timeouts.
  601. */
  602. reset_timeout_check(&thread->timer);
  603. reset_timeout_check(&thread->depress_timer);
  604. thread->depress_priority = -1;
  605. /*
  606. * Accumulate times for dead threads in task.
  607. */
  608. thread_read_times(thread, &user_time, &system_time);
  609. time_value_add(&task->total_user_time, &user_time);
  610. time_value_add(&task->total_system_time, &system_time);
  611. /*
  612. * Remove thread from task list and processor_set threads list.
  613. */
  614. task->thread_count--;
  615. queue_remove(&task->thread_list, thread, thread_t, thread_list);
  616. pset_remove_thread(pset, thread);
  617. thread_unlock(thread); /* no more references - safe */
  618. (void) splx(s);
  619. task_unlock(task);
  620. pset_unlock(pset);
  621. pset_deallocate(pset);
  622. /*
  623. * A couple of quick sanity checks
  624. */
  625. if (thread == current_thread()) {
  626. panic("thread deallocating itself");
  627. }
  628. if ((thread->state & ~(TH_RUN | TH_HALTED | TH_SWAPPED)) != TH_SUSP)
  629. panic("unstopped thread destroyed!");
  630. /*
  631. * Deallocate the task reference, since we know the thread
  632. * is not running.
  633. */
  634. task_deallocate(thread->task); /* may block */
  635. /*
  636. * Clean up any machine-dependent resources.
  637. */
  638. if ((thread->state & TH_SWAPPED) == 0) {
  639. splsched();
  640. stack_free(thread);
  641. (void) splx(s);
  642. thread_deallocate_stack++;
  643. }
  644. /*
  645. * Rattle the event count machinery (gag)
  646. */
  647. evc_notify_abort(thread);
  648. pcb_terminate(thread);
  649. kmem_cache_free(&thread_cache, (vm_offset_t) thread);
  650. }
  651. void thread_reference(
  652. thread_t thread)
  653. {
  654. spl_t s;
  655. if (thread == THREAD_NULL)
  656. return;
  657. s = splsched();
  658. thread_lock(thread);
  659. thread->ref_count++;
  660. thread_unlock(thread);
  661. (void) splx(s);
  662. }
  663. /*
  664. * thread_terminate:
  665. *
  666. * Permanently stop execution of the specified thread.
  667. *
  668. * A thread to be terminated must be allowed to clean up any state
  669. * that it has before it exits. The thread is broken out of any
  670. * wait condition that it is in, and signalled to exit. It then
  671. * cleans up its state and calls thread_halt_self on its way out of
  672. * the kernel. The caller waits for the thread to halt, terminates
  673. * its IPC state, and then deallocates it.
  674. *
  675. * If the caller is the current thread, it must still exit the kernel
  676. * to clean up any state (thread and port references, messages, etc).
  677. * When it exits the kernel, it then terminates its IPC state and
  678. * queues itself for the reaper thread, which will wait for the thread
  679. * to stop and then deallocate it. (A thread cannot deallocate itself,
  680. * since it needs a kernel stack to execute.)
  681. */
  682. kern_return_t thread_terminate(
  683. thread_t thread)
  684. {
  685. thread_t cur_thread = current_thread();
  686. task_t cur_task;
  687. spl_t s;
  688. if (thread == THREAD_NULL)
  689. return KERN_INVALID_ARGUMENT;
  690. /*
  691. * Break IPC control over the thread.
  692. */
  693. ipc_thread_disable(thread);
  694. if (thread == cur_thread) {
  695. /*
  696. * Current thread will queue itself for reaper when
  697. * exiting kernel.
  698. */
  699. s = splsched();
  700. thread_lock(thread);
  701. if (thread->active) {
  702. thread->active = FALSE;
  703. thread_ast_set(thread, AST_TERMINATE);
  704. }
  705. thread_unlock(thread);
  706. ast_on(cpu_number(), AST_TERMINATE);
  707. splx(s);
  708. return KERN_SUCCESS;
  709. }
  710. /*
  711. * Lock both threads and the current task
  712. * to check termination races and prevent deadlocks.
  713. */
  714. cur_task = current_task();
  715. task_lock(cur_task);
  716. s = splsched();
  717. if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
  718. thread_lock(thread);
  719. thread_lock(cur_thread);
  720. }
  721. else {
  722. thread_lock(cur_thread);
  723. thread_lock(thread);
  724. }
  725. /*
  726. * If the current thread is being terminated, help out.
  727. */
  728. if ((!cur_task->active) || (!cur_thread->active)) {
  729. thread_unlock(cur_thread);
  730. thread_unlock(thread);
  731. (void) splx(s);
  732. task_unlock(cur_task);
  733. thread_terminate(cur_thread);
  734. return KERN_FAILURE;
  735. }
  736. thread_unlock(cur_thread);
  737. task_unlock(cur_task);
  738. /*
  739. * Terminate victim thread.
  740. */
  741. if (!thread->active) {
  742. /*
  743. * Someone else got there first.
  744. */
  745. thread_unlock(thread);
  746. (void) splx(s);
  747. return KERN_FAILURE;
  748. }
  749. thread->active = FALSE;
  750. thread_unlock(thread);
  751. (void) splx(s);
  752. #if MACH_HOST
  753. /*
  754. * Reassign thread to default pset if needed.
  755. */
  756. thread_freeze(thread);
  757. if (thread->processor_set != &default_pset) {
  758. thread_doassign(thread, &default_pset, FALSE);
  759. }
  760. #endif /* MACH_HOST */
  761. /*
  762. * Halt the victim at the clean point.
  763. */
  764. (void) thread_halt(thread, TRUE);
  765. #if MACH_HOST
  766. thread_unfreeze(thread);
  767. #endif /* MACH_HOST */
  768. /*
  769. * Shut down the victims IPC and deallocate its
  770. * reference to itself.
  771. */
  772. ipc_thread_terminate(thread);
  773. thread_deallocate(thread);
  774. return KERN_SUCCESS;
  775. }
  776. kern_return_t thread_terminate_release(
  777. thread_t thread,
  778. task_t task,
  779. mach_port_t thread_name,
  780. mach_port_t reply_port,
  781. vm_offset_t address,
  782. vm_size_t size)
  783. {
  784. if (task == NULL)
  785. return KERN_INVALID_ARGUMENT;
  786. mach_port_deallocate(task->itk_space, thread_name);
  787. if (reply_port != MACH_PORT_NULL)
  788. mach_port_destroy(task->itk_space, reply_port);
  789. if ((address != 0) || (size != 0))
  790. vm_deallocate(task->map, address, size);
  791. return thread_terminate(thread);
  792. }
  793. /*
  794. * thread_force_terminate:
  795. *
  796. * Version of thread_terminate called by task_terminate. thread is
  797. * not the current thread. task_terminate is the dominant operation,
  798. * so we can force this thread to stop.
  799. */
  800. void
  801. thread_force_terminate(
  802. thread_t thread)
  803. {
  804. boolean_t deallocate_here;
  805. spl_t s;
  806. ipc_thread_disable(thread);
  807. #if MACH_HOST
  808. /*
  809. * Reassign thread to default pset if needed.
  810. */
  811. thread_freeze(thread);
  812. if (thread->processor_set != &default_pset)
  813. thread_doassign(thread, &default_pset, FALSE);
  814. #endif /* MACH_HOST */
  815. s = splsched();
  816. thread_lock(thread);
  817. deallocate_here = thread->active;
  818. thread->active = FALSE;
  819. thread_unlock(thread);
  820. (void) splx(s);
  821. (void) thread_halt(thread, TRUE);
  822. ipc_thread_terminate(thread);
  823. #if MACH_HOST
  824. thread_unfreeze(thread);
  825. #endif /* MACH_HOST */
  826. if (deallocate_here)
  827. thread_deallocate(thread);
  828. }
  829. /*
  830. * Halt a thread at a clean point, leaving it suspended.
  831. *
  832. * must_halt indicates whether thread must halt.
  833. *
  834. */
  835. kern_return_t thread_halt(
  836. thread_t thread,
  837. boolean_t must_halt)
  838. {
  839. thread_t cur_thread = current_thread();
  840. kern_return_t ret;
  841. spl_t s;
  842. if (thread == cur_thread)
  843. panic("thread_halt: trying to halt current thread.");
  844. /*
  845. * If must_halt is FALSE, then a check must be made for
  846. * a cycle of halt operations.
  847. */
  848. if (!must_halt) {
  849. /*
  850. * Grab both thread locks.
  851. */
  852. s = splsched();
  853. if ((vm_offset_t)thread < (vm_offset_t)cur_thread) {
  854. thread_lock(thread);
  855. thread_lock(cur_thread);
  856. }
  857. else {
  858. thread_lock(cur_thread);
  859. thread_lock(thread);
  860. }
  861. /*
  862. * If target thread is already halted, grab a hold
  863. * on it and return.
  864. */
  865. if (thread->state & TH_HALTED) {
  866. thread->suspend_count++;
  867. thread_unlock(cur_thread);
  868. thread_unlock(thread);
  869. (void) splx(s);
  870. return KERN_SUCCESS;
  871. }
  872. /*
  873. * If someone is trying to halt us, we have a potential
  874. * halt cycle. Break the cycle by interrupting anyone
  875. * who is trying to halt us, and causing this operation
  876. * to fail; retry logic will only retry operations
  877. * that cannot deadlock. (If must_halt is TRUE, this
  878. * operation can never cause a deadlock.)
  879. */
  880. if (cur_thread->ast & AST_HALT) {
  881. thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(cur_thread),
  882. THREAD_INTERRUPTED);
  883. thread_unlock(thread);
  884. thread_unlock(cur_thread);
  885. (void) splx(s);
  886. return KERN_FAILURE;
  887. }
  888. thread_unlock(cur_thread);
  889. }
  890. else {
  891. /*
  892. * Lock thread and check whether it is already halted.
  893. */
  894. s = splsched();
  895. thread_lock(thread);
  896. if (thread->state & TH_HALTED) {
  897. thread->suspend_count++;
  898. thread_unlock(thread);
  899. (void) splx(s);
  900. return KERN_SUCCESS;
  901. }
  902. }
  903. /*
  904. * Suspend thread - inline version of thread_hold() because
  905. * thread is already locked.
  906. */
  907. thread->suspend_count++;
  908. thread->state |= TH_SUSP;
  909. /*
  910. * If someone else is halting it, wait for that to complete.
  911. * Fail if wait interrupted and must_halt is false.
  912. */
  913. while ((thread->ast & AST_HALT) && (!(thread->state & TH_HALTED))) {
  914. thread->wake_active = TRUE;
  915. thread_sleep(TH_EV_WAKE_ACTIVE(thread),
  916. simple_lock_addr(thread->lock), TRUE);
  917. if (thread->state & TH_HALTED) {
  918. (void) splx(s);
  919. return KERN_SUCCESS;
  920. }
  921. if ((current_thread()->wait_result != THREAD_AWAKENED)
  922. && !(must_halt)) {
  923. (void) splx(s);
  924. thread_release(thread);
  925. return KERN_FAILURE;
  926. }
  927. thread_lock(thread);
  928. }
  929. /*
  930. * Otherwise, have to do it ourselves.
  931. */
  932. thread_ast_set(thread, AST_HALT);
  933. while (TRUE) {
  934. /*
  935. * Wait for thread to stop.
  936. */
  937. thread_unlock(thread);
  938. (void) splx(s);
  939. ret = thread_dowait(thread, must_halt);
  940. /*
  941. * If the dowait failed, so do we. Drop AST_HALT, and
  942. * wake up anyone else who might be waiting for it.
  943. */
  944. if (ret != KERN_SUCCESS) {
  945. s = splsched();
  946. thread_lock(thread);
  947. thread_ast_clear(thread, AST_HALT);
  948. thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(thread),
  949. THREAD_INTERRUPTED);
  950. thread_unlock(thread);
  951. (void) splx(s);
  952. thread_release(thread);
  953. return ret;
  954. }
  955. /*
  956. * Clear any interruptible wait.
  957. */
  958. clear_wait(thread, THREAD_INTERRUPTED, TRUE);
  959. /*
  960. * If the thread's at a clean point, we're done.
  961. * Don't need a lock because it really is stopped.
  962. */
  963. if (thread->state & TH_HALTED) {
  964. return KERN_SUCCESS;
  965. }
  966. /*
  967. * If the thread is at a nice continuation,
  968. * or a continuation with a cleanup routine,
  969. * call the cleanup routine.
  970. */
  971. if ((((thread->swap_func == mach_msg_continue) ||
  972. (thread->swap_func == mach_msg_receive_continue)) &&
  973. mach_msg_interrupt(thread)) ||
  974. (thread->swap_func == thread_exception_return) ||
  975. (thread->swap_func == thread_bootstrap_return)) {
  976. s = splsched();
  977. thread_lock(thread);
  978. thread->state |= TH_HALTED;
  979. thread_ast_clear(thread, AST_HALT);
  980. thread_unlock(thread);
  981. splx(s);
  982. return KERN_SUCCESS;
  983. }
  984. /*
  985. * Force the thread to stop at a clean
  986. * point, and arrange to wait for it.
  987. *
  988. * Set it running, so it can notice. Override
  989. * the suspend count. We know that the thread
  990. * is suspended and not waiting.
  991. *
  992. * Since the thread may hit an interruptible wait
  993. * before it reaches a clean point, we must force it
  994. * to wake us up when it does so. This involves some
  995. * trickery:
  996. * We mark the thread SUSPENDED so that thread_block
  997. * will suspend it and wake us up.
  998. * We mark the thread RUNNING so that it will run.
  999. * We mark the thread UN-INTERRUPTIBLE (!) so that
  1000. * some other thread trying to halt or suspend it won't
  1001. * take it off the run queue before it runs. Since
  1002. * dispatching a thread (the tail of thread_invoke) marks
  1003. * the thread interruptible, it will stop at the next
  1004. * context switch or interruptible wait.
  1005. */
  1006. s = splsched();
  1007. thread_lock(thread);
  1008. if ((thread->state & TH_SCHED_STATE) != TH_SUSP)
  1009. panic("thread_halt");
  1010. thread->state |= TH_RUN | TH_UNINT;
  1011. thread_setrun(thread, FALSE);
  1012. /*
  1013. * Continue loop and wait for thread to stop.
  1014. */
  1015. }
  1016. }
  1017. void __attribute__((noreturn)) walking_zombie(void)
  1018. {
  1019. panic("the zombie walks!");
  1020. }
  1021. /*
  1022. * Thread calls this routine on exit from the kernel when it
  1023. * notices a halt request.
  1024. */
  1025. void thread_halt_self(continuation_t continuation)
  1026. {
  1027. thread_t thread = current_thread();
  1028. spl_t s;
  1029. if (thread->ast & AST_TERMINATE) {
  1030. /*
  1031. * Thread is terminating itself. Shut
  1032. * down IPC, then queue it up for the
  1033. * reaper thread.
  1034. */
  1035. ipc_thread_terminate(thread);
  1036. thread_hold(thread);
  1037. s = splsched();
  1038. simple_lock(&reaper_lock);
  1039. enqueue_tail(&reaper_queue, &(thread->links));
  1040. simple_unlock(&reaper_lock);
  1041. thread_lock(thread);
  1042. thread->state |= TH_HALTED;
  1043. thread_unlock(thread);
  1044. (void) splx(s);
  1045. thread_wakeup((event_t)&reaper_queue);
  1046. counter(c_thread_halt_self_block++);
  1047. thread_block(walking_zombie);
  1048. /*NOTREACHED*/
  1049. } else {
  1050. /*
  1051. * Thread was asked to halt - show that it
  1052. * has done so.
  1053. */
  1054. s = splsched();
  1055. thread_lock(thread);
  1056. thread->state |= TH_HALTED;
  1057. thread_ast_clear(thread, AST_HALT);
  1058. thread_unlock(thread);
  1059. splx(s);
  1060. counter(c_thread_halt_self_block++);
  1061. thread_block(continuation);
  1062. /*
  1063. * thread_release resets TH_HALTED.
  1064. */
  1065. }
  1066. }
  1067. /*
  1068. * thread_hold:
  1069. *
  1070. * Suspend execution of the specified thread.
  1071. * This is a recursive-style suspension of the thread, a count of
  1072. * suspends is maintained.
  1073. */
  1074. void thread_hold(
  1075. thread_t thread)
  1076. {
  1077. spl_t s;
  1078. s = splsched();
  1079. thread_lock(thread);
  1080. thread->suspend_count++;
  1081. thread->state |= TH_SUSP;
  1082. thread_unlock(thread);
  1083. (void) splx(s);
  1084. }
  1085. /*
  1086. * thread_dowait:
  1087. *
  1088. * Wait for a thread to actually enter stopped state.
  1089. *
  1090. * must_halt argument indicates if this may fail on interruption.
  1091. * This is FALSE only if called from thread_abort via thread_halt.
  1092. */
  1093. kern_return_t
  1094. thread_dowait(
  1095. thread_t thread,
  1096. boolean_t must_halt)
  1097. {
  1098. boolean_t need_wakeup;
  1099. kern_return_t ret = KERN_SUCCESS;
  1100. spl_t s;
  1101. if (thread == current_thread())
  1102. panic("thread_dowait");
  1103. /*
  1104. * If a thread is not interruptible, it may not be suspended
  1105. * until it becomes interruptible. In this case, we wait for
  1106. * the thread to stop itself, and indicate that we are waiting
  1107. * for it to stop so that it can wake us up when it does stop.
  1108. *
  1109. * If the thread is interruptible, we may be able to suspend
  1110. * it immediately. There are several cases:
  1111. *
  1112. * 1) The thread is already stopped (trivial)
  1113. * 2) The thread is runnable (marked RUN and on a run queue).
  1114. * We pull it off the run queue and mark it stopped.
  1115. * 3) The thread is running. We wait for it to stop.
  1116. */
  1117. need_wakeup = FALSE;
  1118. s = splsched();
  1119. thread_lock(thread);
  1120. for (;;) {
  1121. switch (thread->state & TH_SCHED_STATE) {
  1122. case TH_SUSP:
  1123. case TH_WAIT | TH_SUSP:
  1124. /*
  1125. * Thread is already suspended, or sleeping in an
  1126. * interruptible wait. We win!
  1127. */
  1128. break;
  1129. case TH_RUN | TH_SUSP:
  1130. /*
  1131. * The thread is interruptible. If we can pull
  1132. * it off a runq, stop it here.
  1133. */
  1134. if (rem_runq(thread) != RUN_QUEUE_NULL) {
  1135. thread->state &= ~TH_RUN;
  1136. need_wakeup = thread->wake_active;
  1137. thread->wake_active = FALSE;
  1138. break;
  1139. }
  1140. #if NCPUS > 1
  1141. /*
  1142. * The thread must be running, so make its
  1143. * processor execute ast_check(). This
  1144. * should cause the thread to take an ast and
  1145. * context switch to suspend for us.
  1146. */
  1147. cause_ast_check(thread->last_processor);
  1148. #endif /* NCPUS > 1 */
  1149. /*
  1150. * Fall through to wait for thread to stop.
  1151. */
  1152. case TH_RUN | TH_SUSP | TH_UNINT:
  1153. case TH_RUN | TH_WAIT | TH_SUSP:
  1154. case TH_RUN | TH_WAIT | TH_SUSP | TH_UNINT:
  1155. case TH_WAIT | TH_SUSP | TH_UNINT:
  1156. /*
  1157. * Wait for the thread to stop, or sleep interruptibly
  1158. * (thread_block will stop it in the latter case).
  1159. * Check for failure if interrupted.
  1160. */
  1161. thread->wake_active = TRUE;
  1162. thread_sleep(TH_EV_WAKE_ACTIVE(thread),
  1163. simple_lock_addr(thread->lock), TRUE);
  1164. thread_lock(thread);
  1165. if ((current_thread()->wait_result != THREAD_AWAKENED) &&
  1166. !must_halt) {
  1167. ret = KERN_FAILURE;
  1168. break;
  1169. }
  1170. /*
  1171. * Repeat loop to check thread`s state.
  1172. */
  1173. continue;
  1174. }
  1175. /*
  1176. * Thread is stopped at this point.
  1177. */
  1178. break;
  1179. }
  1180. thread_unlock(thread);
  1181. (void) splx(s);
  1182. if (need_wakeup)
  1183. thread_wakeup(TH_EV_WAKE_ACTIVE(thread));
  1184. return ret;
  1185. }
  1186. void thread_release(
  1187. thread_t thread)
  1188. {
  1189. spl_t s;
  1190. s = splsched();
  1191. thread_lock(thread);
  1192. if (--thread->suspend_count == 0) {
  1193. thread->state &= ~(TH_SUSP | TH_HALTED);
  1194. if ((thread->state & (TH_WAIT | TH_RUN)) == 0) {
  1195. /* was only suspended */
  1196. thread->state |= TH_RUN;
  1197. thread_setrun(thread, TRUE);
  1198. }
  1199. }
  1200. thread_unlock(thread);
  1201. (void) splx(s);
  1202. }
  1203. kern_return_t thread_suspend(
  1204. thread_t thread)
  1205. {
  1206. boolean_t hold;
  1207. spl_t spl;
  1208. if (thread == THREAD_NULL)
  1209. return KERN_INVALID_ARGUMENT;
  1210. hold = FALSE;
  1211. spl = splsched();
  1212. thread_lock(thread);
  1213. /* Wait for thread to get interruptible */
  1214. while (thread->state & TH_UNINT) {
  1215. assert_wait(TH_EV_STATE(thread), TRUE);
  1216. thread_unlock(thread);
  1217. thread_block(thread_no_continuation);
  1218. thread_lock(thread);
  1219. }
  1220. if (thread->user_stop_count++ == 0) {
  1221. hold = TRUE;
  1222. thread->suspend_count++;
  1223. thread->state |= TH_SUSP;
  1224. }
  1225. thread_unlock(thread);
  1226. (void) splx(spl);
  1227. /*
  1228. * Now wait for the thread if necessary.
  1229. */
  1230. if (hold) {
  1231. if (thread == current_thread()) {
  1232. /*
  1233. * We want to call thread_block on our way out,
  1234. * to stop running.
  1235. */
  1236. spl = splsched();
  1237. ast_on(cpu_number(), AST_BLOCK);
  1238. (void) splx(spl);
  1239. } else
  1240. (void) thread_dowait(thread, TRUE);
  1241. }
  1242. return KERN_SUCCESS;
  1243. }
  1244. kern_return_t thread_resume(
  1245. thread_t thread)
  1246. {
  1247. kern_return_t ret;
  1248. spl_t s;
  1249. if (thread == THREAD_NULL)
  1250. return KERN_INVALID_ARGUMENT;
  1251. ret = KERN_SUCCESS;
  1252. s = splsched();
  1253. thread_lock(thread);
  1254. if (thread->user_stop_count > 0) {
  1255. if (--thread->user_stop_count == 0) {
  1256. if (--thread->suspend_count == 0) {
  1257. thread->state &= ~(TH_SUSP | TH_HALTED);
  1258. if ((thread->state & (TH_WAIT | TH_RUN)) == 0) {
  1259. /* was only suspended */
  1260. thread->state |= TH_RUN;
  1261. thread_setrun(thread, TRUE);
  1262. }
  1263. }
  1264. }
  1265. }
  1266. else {
  1267. ret = KERN_FAILURE;
  1268. }
  1269. thread_unlock(thread);
  1270. (void) splx(s);
  1271. return ret;
  1272. }
  1273. /*
  1274. * Return thread's machine-dependent state.
  1275. */
  1276. kern_return_t thread_get_state(
  1277. thread_t thread,
  1278. int flavor,
  1279. thread_state_t old_state, /* pointer to OUT array */
  1280. natural_t *old_state_count) /*IN/OUT*/
  1281. {
  1282. kern_return_t ret;
  1283. #if defined(__i386__) || defined(__x86_64__)
  1284. if (flavor == i386_DEBUG_STATE && thread == current_thread())
  1285. /* This state can be obtained directly for the curren thread. */
  1286. return thread_getstatus(thread, flavor, old_state, old_state_count);
  1287. #endif
  1288. if (thread == THREAD_NULL || thread == current_thread()) {
  1289. return KERN_INVALID_ARGUMENT;
  1290. }
  1291. thread_hold(thread);
  1292. (void) thread_dowait(thread, TRUE);
  1293. ret = thread_getstatus(thread, flavor, old_state, old_state_count);
  1294. thread_release(thread);
  1295. return ret;
  1296. }
  1297. /*
  1298. * Change thread's machine-dependent state.
  1299. */
  1300. kern_return_t thread_set_state(
  1301. thread_t thread,
  1302. int flavor,
  1303. thread_state_t new_state,
  1304. natural_t new_state_count)
  1305. {
  1306. kern_return_t ret;
  1307. #if defined(__i386__) || defined(__x86_64__)
  1308. if (flavor == i386_DEBUG_STATE && thread == current_thread())
  1309. /* This state can be set directly for the curren thread. */
  1310. return thread_setstatus(thread, flavor, new_state, new_state_count);
  1311. #endif
  1312. if (thread == THREAD_NULL || thread == current_thread()) {
  1313. return KERN_INVALID_ARGUMENT;
  1314. }
  1315. thread_hold(thread);
  1316. (void) thread_dowait(thread, TRUE);
  1317. ret = thread_setstatus(thread, flavor, new_state, new_state_count);
  1318. thread_release(thread);
  1319. return ret;
  1320. }
  1321. kern_return_t thread_info(
  1322. thread_t thread,
  1323. int flavor,
  1324. thread_info_t thread_info_out, /* pointer to OUT array */
  1325. natural_t *thread_info_count) /*IN/OUT*/
  1326. {
  1327. int state, flags;
  1328. spl_t s;
  1329. if (thread == THREAD_NULL)
  1330. return KERN_INVALID_ARGUMENT;
  1331. if (flavor == THREAD_BASIC_INFO) {
  1332. thread_basic_info_t basic_info;
  1333. /* Allow *thread_info_count to be one smaller than the
  1334. usual amount, because creation_time is a new member
  1335. that some callers might not know about. */
  1336. if (*thread_info_count < THREAD_BASIC_INFO_COUNT - 1) {
  1337. return KERN_INVALID_ARGUMENT;
  1338. }
  1339. basic_info = (thread_basic_info_t) thread_info_out;
  1340. s = splsched();
  1341. thread_lock(thread);
  1342. /*
  1343. * Update lazy-evaluated scheduler info because someone wants it.
  1344. */
  1345. if ((thread->state & TH_RUN) == 0 &&
  1346. thread->sched_stamp != sched_tick)
  1347. update_priority(thread);
  1348. /* fill in info */
  1349. thread_read_times(thread,
  1350. &basic_info->user_time,
  1351. &basic_info->system_time);
  1352. basic_info->base_priority = thread->priority;
  1353. basic_info->cur_priority = thread->sched_pri;
  1354. read_time_stamp(&thread->creation_time,
  1355. &basic_info->creation_time);
  1356. /*
  1357. * To calculate cpu_usage, first correct for timer rate,
  1358. * then for 5/8 ageing. The correction factor [3/5] is
  1359. * (1/(5/8) - 1).
  1360. */
  1361. basic_info->cpu_usage = thread->cpu_usage /
  1362. (TIMER_RATE/TH_USAGE_SCALE);
  1363. basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5;
  1364. #if SIMPLE_CLOCK
  1365. /*
  1366. * Clock drift compensation.
  1367. */
  1368. basic_info->cpu_usage =
  1369. (basic_info->cpu_usage * 1000000)/sched_usec;
  1370. #endif /* SIMPLE_CLOCK */
  1371. flags = 0;
  1372. if (thread->state & TH_SWAPPED)
  1373. flags |= TH_FLAGS_SWAPPED;
  1374. if (thread->state & TH_IDLE)
  1375. flags |= TH_FLAGS_IDLE;
  1376. if (thread->state & TH_HALTED)
  1377. state = TH_STATE_HALTED;
  1378. else
  1379. if (thread->state & TH_RUN)
  1380. state = TH_STATE_RUNNING;
  1381. else
  1382. if (thread->state & TH_UNINT)
  1383. state = TH_STATE_UNINTERRUPTIBLE;
  1384. else
  1385. if (thread->state & TH_SUSP)
  1386. state = TH_STATE_STOPPED;
  1387. else
  1388. if (thread->state & TH_WAIT)
  1389. state = TH_STATE_WAITING;
  1390. else
  1391. state = 0; /* ? */
  1392. basic_info->run_state = state;
  1393. basic_info->flags = flags;
  1394. basic_info->suspend_count = thread->user_stop_count;
  1395. if (state == TH_STATE_RUNNING)
  1396. basic_info->sleep_time = 0;
  1397. else
  1398. basic_info->sleep_time = sched_tick - thread->sched_stamp;
  1399. thread_unlock(thread);
  1400. splx(s);
  1401. if (*thread_info_count > THREAD_BASIC_INFO_COUNT)
  1402. *thread_info_count = THREAD_BASIC_INFO_COUNT;
  1403. return KERN_SUCCESS;
  1404. }
  1405. else if (flavor == THREAD_SCHED_INFO) {
  1406. thread_sched_info_t sched_info;
  1407. if (*thread_info_count < THREAD_SCHED_INFO_COUNT) {
  1408. return KERN_INVALID_ARGUMENT;
  1409. }
  1410. sched_info = (thread_sched_info_t) thread_info_out;
  1411. s = splsched();
  1412. thread_lock(thread);
  1413. #if MACH_FIXPRI
  1414. sched_info->policy = thread->policy;
  1415. if (thread->policy == POLICY_FIXEDPRI) {
  1416. sched_info->data = (thread->sched_data * tick)/1000;
  1417. }
  1418. else {
  1419. sched_info->data = 0;
  1420. }
  1421. #else /* MACH_FIXPRI */
  1422. sched_info->policy = POLICY_TIMESHARE;
  1423. sched_info->data = 0;
  1424. #endif /* MACH_FIXPRI */
  1425. sched_info->base_priority = thread->priority;
  1426. sched_info->max_priority = thread->max_priority;
  1427. sched_info->cur_priority = thread->sched_pri;
  1428. sched_info->depressed = (thread->depress_priority >= 0);
  1429. sched_info->depress_priority = thread->depress_priority;
  1430. thread_unlock(thread);
  1431. splx(s);
  1432. *thread_info_count = THREAD_SCHED_INFO_COUNT;
  1433. return KERN_SUCCESS;
  1434. }
  1435. return KERN_INVALID_ARGUMENT;
  1436. }
  1437. kern_return_t thread_abort(
  1438. thread_t thread)
  1439. {
  1440. if (thread == THREAD_NULL || thread == current_thread()) {
  1441. return KERN_INVALID_ARGUMENT;
  1442. }
  1443. /*
  1444. *
  1445. * clear it of an event wait
  1446. */
  1447. evc_notify_abort(thread);
  1448. /*
  1449. * Try to force the thread to a clean point
  1450. * If the halt operation fails return KERN_ABORTED.
  1451. * ipc code will convert this to an ipc interrupted error code.
  1452. */
  1453. if (thread_halt(thread, FALSE) != KERN_SUCCESS)
  1454. return KERN_ABORTED;
  1455. /*
  1456. * If the thread was in an exception, abort that too.
  1457. */
  1458. mach_msg_abort_rpc(thread);
  1459. /*
  1460. * Then set it going again.
  1461. */
  1462. thread_release(thread);
  1463. /*
  1464. * Also abort any depression.
  1465. */
  1466. if (thread->depress_priority != -1)
  1467. thread_depress_abort(thread);
  1468. return KERN_SUCCESS;
  1469. }
  1470. /*
  1471. * thread_start:
  1472. *
  1473. * Start a thread at the specified routine.
  1474. * The thread must be in a swapped state.
  1475. */
  1476. void
  1477. thread_start(
  1478. thread_t thread,
  1479. continuation_t start)
  1480. {
  1481. thread->swap_func = start;
  1482. }
  1483. /*
  1484. * kernel_thread:
  1485. *
  1486. * Start up a kernel thread in the specified task.
  1487. */
  1488. thread_t kernel_thread(
  1489. task_t task,
  1490. continuation_t start,
  1491. void * arg)
  1492. {
  1493. kern_return_t kr;
  1494. thread_t thread;
  1495. kr = thread_create(task, &thread);
  1496. if (kr != KERN_SUCCESS)
  1497. return THREAD_NULL;
  1498. /* release "extra" ref that thread_create gave us */
  1499. thread_deallocate(thread);
  1500. thread_start(thread, start);
  1501. thread->ith_other = arg;
  1502. /*
  1503. * We ensure that the kernel thread starts with a stack.
  1504. * The swapin mechanism might not be operational yet.
  1505. */
  1506. thread_doswapin(thread);
  1507. thread->max_priority = BASEPRI_SYSTEM;
  1508. thread->priority = BASEPRI_SYSTEM;
  1509. thread->sched_pri = BASEPRI_SYSTEM;
  1510. (void) thread_resume(thread);
  1511. return thread;
  1512. }
  1513. /*
  1514. * reaper_thread:
  1515. *
  1516. * This kernel thread runs forever looking for threads to destroy
  1517. * (when they request that they be destroyed, of course).
  1518. */
  1519. void __attribute__((noreturn)) reaper_thread_continue(void)
  1520. {
  1521. for (;;) {
  1522. thread_t thread;
  1523. spl_t s;
  1524. s = splsched();
  1525. simple_lock(&reaper_lock);
  1526. while ((thread = (thread_t) dequeue_head(&reaper_queue))
  1527. != THREAD_NULL) {
  1528. simple_unlock(&reaper_lock);
  1529. (void) splx(s);
  1530. (void) thread_dowait(thread, TRUE); /* may block */
  1531. thread_deallocate(thread); /* may block */
  1532. s = splsched();
  1533. simple_lock(&reaper_lock);
  1534. }
  1535. assert_wait((event_t) &reaper_queue, FALSE);
  1536. simple_unlock(&reaper_lock);
  1537. (void) splx(s);
  1538. counter(c_reaper_thread_block++);
  1539. thread_block(reaper_thread_continue);
  1540. }
  1541. }
  1542. void reaper_thread(void)
  1543. {
  1544. reaper_thread_continue();
  1545. /*NOTREACHED*/
  1546. }
  1547. #if MACH_HOST
  1548. /*
  1549. * thread_assign:
  1550. *
  1551. * Change processor set assignment.
  1552. * Caller must hold an extra reference to the thread (if this is
  1553. * called directly from the ipc interface, this is an operation
  1554. * in progress reference). Caller must hold no locks -- this may block.
  1555. */
  1556. kern_return_t
  1557. thread_assign(
  1558. thread_t thread,
  1559. processor_set_t new_pset)
  1560. {
  1561. if (thread == THREAD_NULL || new_pset == PROCESSOR_SET_NULL) {
  1562. return KERN_INVALID_ARGUMENT;
  1563. }
  1564. thread_freeze(thread);
  1565. thread_doassign(thread, new_pset, TRUE);
  1566. return KERN_SUCCESS;
  1567. }
  1568. /*
  1569. * thread_freeze:
  1570. *
  1571. * Freeze thread's assignment. Prelude to assigning thread.
  1572. * Only one freeze may be held per thread.
  1573. */
  1574. void
  1575. thread_freeze(
  1576. thread_t thread)
  1577. {
  1578. spl_t s;
  1579. /*
  1580. * Freeze the assignment, deferring to a prior freeze.
  1581. */
  1582. s = splsched();
  1583. thread_lock(thread);
  1584. while (thread->may_assign == FALSE) {
  1585. thread->assign_active = TRUE;
  1586. thread_sleep((event_t) &thread->assign_active,
  1587. simple_lock_addr(thread->lock), FALSE);
  1588. thread_lock(thread);
  1589. }
  1590. thread->may_assign = FALSE;
  1591. thread_unlock(thread);
  1592. (void) splx(s);
  1593. }
  1594. /*
  1595. * thread_unfreeze: release freeze on thread's assignment.
  1596. */
  1597. void
  1598. thread_unfreeze(
  1599. thread_t thread)
  1600. {
  1601. spl_t s;
  1602. s = splsched();
  1603. thread_lock(thread);
  1604. thread->may_assign = TRUE;
  1605. if (thread->assign_active) {
  1606. thread->assign_active = FALSE;
  1607. thread_wakeup((event_t)&thread->assign_active);
  1608. }
  1609. thread_unlock(thread);
  1610. splx(s);
  1611. }
  1612. /*
  1613. * thread_doassign:
  1614. *
  1615. * Actually do thread assignment. thread_will_assign must have been
  1616. * called on the thread. release_freeze argument indicates whether
  1617. * to release freeze on thread.
  1618. */
  1619. void
  1620. thread_doassign(
  1621. thread_t thread,
  1622. processor_set_t new_pset,
  1623. boolean_t release_freeze)
  1624. {
  1625. processor_set_t pset;
  1626. boolean_t old_empty, new_empty;
  1627. boolean_t recompute_pri = FALSE;
  1628. spl_t s;
  1629. /*
  1630. * Check for silly no-op.
  1631. */
  1632. pset = thread->processor_set;
  1633. if (pset == new_pset) {
  1634. if (release_freeze)
  1635. thread_unfreeze(thread);
  1636. return;
  1637. }
  1638. /*
  1639. * Suspend the thread and stop it if it's not the current thread.
  1640. */
  1641. thread_hold(thread);
  1642. if (thread != current_thread())
  1643. (void) thread_dowait(thread, TRUE);
  1644. /*
  1645. * Lock both psets now, use ordering to avoid deadlocks.
  1646. */
  1647. Restart:
  1648. if ((vm_offset_t)pset < (vm_offset_t)new_pset) {
  1649. pset_lock(pset);
  1650. pset_lock(new_pset);
  1651. }
  1652. else {
  1653. pset_lock(new_pset);
  1654. pset_lock(pset);
  1655. }
  1656. /*
  1657. * Check if new_pset is ok to assign to. If not, reassign
  1658. * to default_pset.
  1659. */
  1660. if (!new_pset->active) {
  1661. pset_unlock(pset);
  1662. pset_unlock(new_pset);
  1663. new_pset = &default_pset;
  1664. goto Restart;
  1665. }
  1666. pset_reference(new_pset);
  1667. /*
  1668. * Grab the thread lock and move the thread.
  1669. * Then drop the lock on the old pset and the thread's
  1670. * reference to it.
  1671. */
  1672. s = splsched();
  1673. thread_lock(thread);
  1674. thread_change_psets(thread, pset, new_pset);
  1675. old_empty = pset->empty;
  1676. new_empty = new_pset->empty;
  1677. pset_unlock(pset);
  1678. /*
  1679. * Reset policy and priorities if needed.
  1680. */
  1681. #if MACH_FIXPRI
  1682. if (thread->policy & new_pset->policies == 0) {
  1683. thread->policy = POLICY_TIMESHARE;
  1684. recompute_pri = TRUE;
  1685. }
  1686. #endif /* MACH_FIXPRI */
  1687. if (thread->max_priority < new_pset->max_priority) {
  1688. thread->max_priority = new_pset->max_priority;
  1689. if (thread->priority < thread->max_priority) {
  1690. thread->priority = thread->max_priority;
  1691. recompute_pri = TRUE;
  1692. }
  1693. else {
  1694. if ((thread->depress_priority >= 0) &&
  1695. (thread->depress_priority < thread->max_priority)) {
  1696. thread->depress_priority = thread->max_priority;
  1697. }
  1698. }
  1699. }
  1700. pset_unlock(new_pset);
  1701. if (recompute_pri)
  1702. compute_priority(thread, TRUE);
  1703. if (release_freeze) {
  1704. thread->may_assign = TRUE;
  1705. if (thread->assign_active) {
  1706. thread->assign_active = FALSE;
  1707. thread_wakeup((event_t)&thread->assign_active);
  1708. }
  1709. }
  1710. thread_unlock(thread);
  1711. splx(s);
  1712. pset_deallocate(pset);
  1713. /*
  1714. * Figure out hold status of thread. Threads assigned to empty
  1715. * psets must be held. Therefore:
  1716. * If old pset was empty release its hold.
  1717. * Release our hold from above unless new pset is empty.
  1718. */
  1719. if (old_empty)
  1720. thread_release(thread);
  1721. if (!new_empty)
  1722. thread_release(thread);
  1723. /*
  1724. * If current_thread is assigned, context switch to force
  1725. * assignment to happen. This also causes hold to take
  1726. * effect if the new pset is empty.
  1727. */
  1728. if (thread == current_thread()) {
  1729. s = splsched();
  1730. ast_on(cpu_number(), AST_BLOCK);
  1731. (void) splx(s);
  1732. }
  1733. }
  1734. #else /* MACH_HOST */
  1735. kern_return_t
  1736. thread_assign(
  1737. thread_t thread,
  1738. processor_set_t new_pset)
  1739. {
  1740. return KERN_FAILURE;
  1741. }
  1742. #endif /* MACH_HOST */
  1743. /*
  1744. * thread_assign_default:
  1745. *
  1746. * Special version of thread_assign for assigning threads to default
  1747. * processor set.
  1748. */
  1749. kern_return_t
  1750. thread_assign_default(
  1751. thread_t thread)
  1752. {
  1753. return thread_assign(thread, &default_pset);
  1754. }
  1755. /*
  1756. * thread_get_assignment
  1757. *
  1758. * Return current assignment for this thread.
  1759. */
  1760. kern_return_t thread_get_assignment(
  1761. thread_t thread,
  1762. processor_set_t *pset)
  1763. {
  1764. if (thread == THREAD_NULL)
  1765. return KERN_INVALID_ARGUMENT;
  1766. *pset = thread->processor_set;
  1767. pset_reference(*pset);
  1768. return KERN_SUCCESS;
  1769. }
  1770. /*
  1771. * thread_priority:
  1772. *
  1773. * Set priority (and possibly max priority) for thread.
  1774. */
  1775. kern_return_t
  1776. thread_priority(
  1777. thread_t thread,
  1778. int priority,
  1779. boolean_t set_max)
  1780. {
  1781. spl_t s;
  1782. kern_return_t ret = KERN_SUCCESS;
  1783. if ((thread == THREAD_NULL) || invalid_pri(priority))
  1784. return KERN_INVALID_ARGUMENT;
  1785. s = splsched();
  1786. thread_lock(thread);
  1787. /*
  1788. * Check for violation of max priority
  1789. */
  1790. if (priority < thread->max_priority) {
  1791. ret = KERN_FAILURE;
  1792. }
  1793. else {
  1794. /*
  1795. * Set priorities. If a depression is in progress,
  1796. * change the priority to restore.
  1797. */
  1798. if (thread->depress_priority >= 0) {
  1799. thread->depress_priority = priority;
  1800. }
  1801. else {
  1802. thread->priority = priority;
  1803. compute_priority(thread, TRUE);
  1804. }
  1805. if (set_max)
  1806. thread->max_priority = priority;
  1807. }
  1808. thread_unlock(thread);
  1809. (void) splx(s);
  1810. return ret;
  1811. }
  1812. /*
  1813. * thread_set_own_priority:
  1814. *
  1815. * Internal use only; sets the priority of the calling thread.
  1816. * Will adjust max_priority if necessary.
  1817. */
  1818. void
  1819. thread_set_own_priority(
  1820. int priority)
  1821. {
  1822. spl_t s;
  1823. thread_t thread = current_thread();
  1824. s = splsched();
  1825. thread_lock(thread);
  1826. if (priority < thread->max_priority)
  1827. thread->max_priority = priority;
  1828. thread->priority = priority;
  1829. compute_priority(thread, TRUE);
  1830. thread_unlock(thread);
  1831. (void) splx(s);
  1832. }
  1833. /*
  1834. * thread_max_priority:
  1835. *
  1836. * Reset the max priority for a thread.
  1837. */
  1838. kern_return_t
  1839. thread_max_priority(
  1840. thread_t thread,
  1841. processor_set_t pset,
  1842. int max_priority)
  1843. {
  1844. spl_t s;
  1845. kern_return_t ret = KERN_SUCCESS;
  1846. if ((thread == THREAD_NULL) || (pset == PROCESSOR_SET_NULL) ||
  1847. invalid_pri(max_priority))
  1848. return KERN_INVALID_ARGUMENT;
  1849. s = splsched();
  1850. thread_lock(thread);
  1851. #if MACH_HOST
  1852. /*
  1853. * Check for wrong processor set.
  1854. */
  1855. if (pset != thread->processor_set) {
  1856. ret = KERN_FAILURE;
  1857. }
  1858. else {
  1859. #endif /* MACH_HOST */
  1860. thread->max_priority = max_priority;
  1861. /*
  1862. * Reset priority if it violates new max priority
  1863. */
  1864. if (max_priority > thread->priority) {
  1865. thread->priority = max_priority;
  1866. compute_priority(thread, TRUE);
  1867. }
  1868. else {
  1869. if (thread->depress_priority >= 0 &&
  1870. max_priority > thread->depress_priority)
  1871. thread->depress_priority = max_priority;
  1872. }
  1873. #if MACH_HOST
  1874. }
  1875. #endif /* MACH_HOST */
  1876. thread_unlock(thread);
  1877. (void) splx(s);
  1878. return ret;
  1879. }
  1880. /*
  1881. * thread_policy:
  1882. *
  1883. * Set scheduling policy for thread.
  1884. */
  1885. kern_return_t
  1886. thread_policy(
  1887. thread_t thread,
  1888. int policy,
  1889. int data)
  1890. {
  1891. #if MACH_FIXPRI
  1892. kern_return_t ret = KERN_SUCCESS;
  1893. int temp;
  1894. spl_t s;
  1895. #endif /* MACH_FIXPRI */
  1896. if ((thread == THREAD_NULL) || invalid_policy(policy))
  1897. return KERN_INVALID_ARGUMENT;
  1898. #if MACH_FIXPRI
  1899. s = splsched();
  1900. thread_lock(thread);
  1901. /*
  1902. * Check if changing policy.
  1903. */
  1904. if (policy == thread->policy) {
  1905. /*
  1906. * Just changing data. This is meaningless for
  1907. * timesharing, quantum for fixed priority (but
  1908. * has no effect until current quantum runs out).
  1909. */
  1910. if (policy == POLICY_FIXEDPRI) {
  1911. temp = data * 1000;
  1912. if (temp % tick)
  1913. temp += tick;
  1914. thread->sched_data = temp/tick;
  1915. }
  1916. }
  1917. else {
  1918. /*
  1919. * Changing policy. Check if new policy is allowed.
  1920. */
  1921. if ((thread->processor_set->policies & policy) == 0) {
  1922. ret = KERN_FAILURE;
  1923. }
  1924. else {
  1925. /*
  1926. * Changing policy. Save data and calculate new
  1927. * priority.
  1928. */
  1929. thread->policy = policy;
  1930. if (policy == POLICY_FIXEDPRI) {
  1931. temp = data * 1000;
  1932. if (temp % tick)
  1933. temp += tick;
  1934. thread->sched_data = temp/tick;
  1935. }
  1936. compute_priority(thread, TRUE);
  1937. }
  1938. }
  1939. thread_unlock(thread);
  1940. (void) splx(s);
  1941. return ret;
  1942. #else /* MACH_FIXPRI */
  1943. if (policy == POLICY_TIMESHARE)
  1944. return KERN_SUCCESS;
  1945. else
  1946. return KERN_FAILURE;
  1947. #endif /* MACH_FIXPRI */
  1948. }
  1949. /*
  1950. * thread_wire:
  1951. *
  1952. * Specify that the target thread must always be able
  1953. * to run and to allocate memory.
  1954. */
  1955. kern_return_t
  1956. thread_wire(
  1957. host_t host,
  1958. thread_t thread,
  1959. boolean_t wired)
  1960. {
  1961. spl_t s;
  1962. if (host == HOST_NULL)
  1963. return KERN_INVALID_ARGUMENT;
  1964. if (thread == THREAD_NULL)
  1965. return KERN_INVALID_ARGUMENT;
  1966. /*
  1967. * This implementation only works for the current thread.
  1968. * See stack_privilege.
  1969. */
  1970. if (thread != current_thread())
  1971. return KERN_INVALID_ARGUMENT;
  1972. s = splsched();
  1973. thread_lock(thread);
  1974. if (wired) {
  1975. thread->vm_privilege = 1;
  1976. stack_privilege(thread);
  1977. }
  1978. else {
  1979. thread->vm_privilege = 0;
  1980. /*XXX stack_unprivilege(thread); */
  1981. thread->stack_privilege = 0;
  1982. }
  1983. thread_unlock(thread);
  1984. splx(s);
  1985. return KERN_SUCCESS;
  1986. }
  1987. /*
  1988. * thread_collect_scan:
  1989. *
  1990. * Attempt to free resources owned by threads.
  1991. * pcb_collect doesn't do anything yet.
  1992. */
  1993. void thread_collect_scan(void)
  1994. {
  1995. thread_t thread, prev_thread;
  1996. processor_set_t pset, prev_pset;
  1997. prev_thread = THREAD_NULL;
  1998. prev_pset = PROCESSOR_SET_NULL;
  1999. simple_lock(&all_psets_lock);
  2000. queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
  2001. pset_lock(pset);
  2002. queue_iterate(&pset->threads, thread, thread_t, pset_threads) {
  2003. spl_t s = splsched();
  2004. thread_lock(thread);
  2005. /*
  2006. * Only collect threads which are
  2007. * not runnable and are swapped.
  2008. */
  2009. if ((thread->state & (TH_RUN|TH_SWAPPED))
  2010. == TH_SWAPPED) {
  2011. thread->ref_count++;
  2012. thread_unlock(thread);
  2013. (void) splx(s);
  2014. pset->ref_count++;
  2015. pset_unlock(pset);
  2016. simple_unlock(&all_psets_lock);
  2017. pcb_collect(thread);
  2018. if (prev_thread != THREAD_NULL)
  2019. thread_deallocate(prev_thread);
  2020. prev_thread = thread;
  2021. if (prev_pset != PROCESSOR_SET_NULL)
  2022. pset_deallocate(prev_pset);
  2023. prev_pset = pset;
  2024. simple_lock(&all_psets_lock);
  2025. pset_lock(pset);
  2026. } else {
  2027. thread_unlock(thread);
  2028. (void) splx(s);
  2029. }
  2030. }
  2031. pset_unlock(pset);
  2032. }
  2033. simple_unlock(&all_psets_lock);
  2034. if (prev_thread != THREAD_NULL)
  2035. thread_deallocate(prev_thread);
  2036. if (prev_pset != PROCESSOR_SET_NULL)
  2037. pset_deallocate(prev_pset);
  2038. }
  2039. boolean_t thread_collect_allowed = TRUE;
  2040. unsigned thread_collect_last_tick = 0;
  2041. unsigned thread_collect_max_rate = 0; /* in ticks */
  2042. /*
  2043. * consider_thread_collect:
  2044. *
  2045. * Called by the pageout daemon when the system needs more free pages.
  2046. */
  2047. void consider_thread_collect(void)
  2048. {
  2049. /*
  2050. * By default, don't attempt thread collection more frequently
  2051. * than once a second.
  2052. */
  2053. if (thread_collect_max_rate == 0)
  2054. thread_collect_max_rate = hz;
  2055. if (thread_collect_allowed &&
  2056. (sched_tick >
  2057. (thread_collect_last_tick + thread_collect_max_rate))) {
  2058. thread_collect_last_tick = sched_tick;
  2059. thread_collect_scan();
  2060. }
  2061. }
  2062. #if MACH_DEBUG
  2063. vm_size_t stack_usage(
  2064. vm_offset_t stack)
  2065. {
  2066. unsigned i;
  2067. for (i = 0; i < KERNEL_STACK_SIZE/sizeof(unsigned int); i++)
  2068. if (((unsigned int *)stack)[i] != STACK_MARKER)
  2069. break;
  2070. return KERNEL_STACK_SIZE - i * sizeof(unsigned int);
  2071. }
  2072. /*
  2073. * Machine-dependent code should call stack_init
  2074. * before doing its own initialization of the stack.
  2075. */
  2076. void stack_init(
  2077. vm_offset_t stack)
  2078. {
  2079. if (stack_check_usage) {
  2080. unsigned i;
  2081. for (i = 0; i < KERNEL_STACK_SIZE/sizeof(unsigned int); i++)
  2082. ((unsigned int *)stack)[i] = STACK_MARKER;
  2083. }
  2084. }
  2085. /*
  2086. * Machine-dependent code should call stack_finalize
  2087. * before releasing the stack memory.
  2088. */
  2089. void stack_finalize(
  2090. vm_offset_t stack)
  2091. {
  2092. if (stack_check_usage) {
  2093. vm_size_t used = stack_usage(stack);
  2094. simple_lock(&stack_usage_lock);
  2095. if (used > stack_max_usage)
  2096. stack_max_usage = used;
  2097. simple_unlock(&stack_usage_lock);
  2098. }
  2099. }
  2100. #ifndef MACHINE_STACK
  2101. /*
  2102. * stack_statistics:
  2103. *
  2104. * Return statistics on cached kernel stacks.
  2105. * *maxusagep must be initialized by the caller.
  2106. */
  2107. void stack_statistics(
  2108. natural_t *totalp,
  2109. vm_size_t *maxusagep)
  2110. {
  2111. spl_t s;
  2112. s = splsched();
  2113. stack_lock();
  2114. if (stack_check_usage) {
  2115. vm_offset_t stack;
  2116. /*
  2117. * This is pretty expensive to do at splsched,
  2118. * but it only happens when someone makes
  2119. * a debugging call, so it should be OK.
  2120. */
  2121. for (stack = stack_free_list; stack != 0;
  2122. stack = stack_next(stack)) {
  2123. vm_size_t usage = stack_usage(stack);
  2124. if (usage > *maxusagep)
  2125. *maxusagep = usage;
  2126. }
  2127. }
  2128. *totalp = stack_free_count;
  2129. stack_unlock();
  2130. (void) splx(s);
  2131. }
  2132. #endif /* MACHINE_STACK */
  2133. kern_return_t host_stack_usage(
  2134. host_t host,
  2135. vm_size_t *reservedp,
  2136. unsigned int *totalp,
  2137. vm_size_t *spacep,
  2138. vm_size_t *residentp,
  2139. vm_size_t *maxusagep,
  2140. vm_offset_t *maxstackp)
  2141. {
  2142. natural_t total;
  2143. vm_size_t maxusage;
  2144. if (host == HOST_NULL)
  2145. return KERN_INVALID_HOST;
  2146. simple_lock(&stack_usage_lock);
  2147. maxusage = stack_max_usage;
  2148. simple_unlock(&stack_usage_lock);
  2149. stack_statistics(&total, &maxusage);
  2150. *reservedp = 0;
  2151. *totalp = total;
  2152. *spacep = *residentp = total * round_page(KERNEL_STACK_SIZE);
  2153. *maxusagep = maxusage;
  2154. *maxstackp = 0;
  2155. return KERN_SUCCESS;
  2156. }
  2157. kern_return_t processor_set_stack_usage(
  2158. processor_set_t pset,
  2159. unsigned int *totalp,
  2160. vm_size_t *spacep,
  2161. vm_size_t *residentp,
  2162. vm_size_t *maxusagep,
  2163. vm_offset_t *maxstackp)
  2164. {
  2165. unsigned int total;
  2166. vm_size_t maxusage;
  2167. vm_offset_t maxstack;
  2168. thread_t *threads;
  2169. thread_t tmp_thread;
  2170. unsigned int actual; /* this many things */
  2171. unsigned int i;
  2172. vm_size_t size, size_needed;
  2173. vm_offset_t addr;
  2174. if (pset == PROCESSOR_SET_NULL)
  2175. return KERN_INVALID_ARGUMENT;
  2176. size = 0; addr = 0;
  2177. for (;;) {
  2178. pset_lock(pset);
  2179. if (!pset->active) {
  2180. pset_unlock(pset);
  2181. return KERN_INVALID_ARGUMENT;
  2182. }
  2183. actual = pset->thread_count;
  2184. /* do we have the memory we need? */
  2185. size_needed = actual * sizeof(thread_t);
  2186. if (size_needed <= size)
  2187. break;
  2188. /* unlock the pset and allocate more memory */
  2189. pset_unlock(pset);
  2190. if (size != 0)
  2191. kfree(addr, size);
  2192. assert(size_needed > 0);
  2193. size = size_needed;
  2194. addr = kalloc(size);
  2195. if (addr == 0)
  2196. return KERN_RESOURCE_SHORTAGE;
  2197. }
  2198. /* OK, have memory and the processor_set is locked & active */
  2199. threads = (thread_t *) addr;
  2200. for (i = 0, tmp_thread = (thread_t) queue_first(&pset->threads);
  2201. i < actual;
  2202. i++,
  2203. tmp_thread = (thread_t) queue_next(&tmp_thread->pset_threads)) {
  2204. thread_reference(tmp_thread);
  2205. threads[i] = tmp_thread;
  2206. }
  2207. assert(queue_end(&pset->threads, (queue_entry_t) tmp_thread));
  2208. /* can unlock processor set now that we have the thread refs */
  2209. pset_unlock(pset);
  2210. /* calculate maxusage and free thread references */
  2211. total = 0;
  2212. maxusage = 0;
  2213. maxstack = 0;
  2214. for (i = 0; i < actual; i++) {
  2215. thread_t thread = threads[i];
  2216. vm_offset_t stack = 0;
  2217. /*
  2218. * thread->kernel_stack is only accurate if the
  2219. * thread isn't swapped and is not executing.
  2220. *
  2221. * Of course, we don't have the appropriate locks
  2222. * for these shenanigans.
  2223. */
  2224. if ((thread->state & TH_SWAPPED) == 0) {
  2225. int cpu;
  2226. stack = thread->kernel_stack;
  2227. for (cpu = 0; cpu < NCPUS; cpu++)
  2228. if (active_threads[cpu] == thread) {
  2229. stack = active_stacks[cpu];
  2230. break;
  2231. }
  2232. }
  2233. if (stack != 0) {
  2234. total++;
  2235. if (stack_check_usage) {
  2236. vm_size_t usage = stack_usage(stack);
  2237. if (usage > maxusage) {
  2238. maxusage = usage;
  2239. maxstack = (vm_offset_t) thread;
  2240. }
  2241. }
  2242. }
  2243. thread_deallocate(thread);
  2244. }
  2245. if (size != 0)
  2246. kfree(addr, size);
  2247. *totalp = total;
  2248. *residentp = *spacep = total * round_page(KERNEL_STACK_SIZE);
  2249. *maxusagep = maxusage;
  2250. *maxstackp = maxstack;
  2251. return KERN_SUCCESS;
  2252. }
  2253. /*
  2254. * Useful in the debugger:
  2255. */
  2256. void
  2257. thread_stats(void)
  2258. {
  2259. thread_t thread;
  2260. int total = 0, rpcreply = 0;
  2261. queue_iterate(&default_pset.threads, thread, thread_t, pset_threads) {
  2262. total++;
  2263. if (thread->ith_rpc_reply != IP_NULL)
  2264. rpcreply++;
  2265. }
  2266. printf("%d total threads.\n", total);
  2267. printf("%d using rpc_reply.\n", rpcreply);
  2268. }
  2269. #endif /* MACH_DEBUG */
  2270. #endif