task.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1993-1988 Carnegie Mellon University
  4. * All Rights Reserved.
  5. *
  6. * Permission to use, copy, modify and distribute this software and its
  7. * documentation is hereby granted, provided that both the copyright
  8. * notice and this permission notice appear in all copies of the
  9. * software, derivative works or modified versions, and any portions
  10. * thereof, and that both notices appear in supporting documentation.
  11. *
  12. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  13. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  14. * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  15. *
  16. * Carnegie Mellon requests users of this software to return to
  17. *
  18. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  19. * School of Computer Science
  20. * Carnegie Mellon University
  21. * Pittsburgh PA 15213-3890
  22. *
  23. * any improvements or extensions that they make and grant Carnegie Mellon
  24. * the rights to redistribute these changes.
  25. */
  26. /*
  27. * File: kern/task.c
  28. * Author: Avadis Tevanian, Jr., Michael Wayne Young, David Golub,
  29. * David Black
  30. *
  31. * Task management primitives implementation.
  32. */
  33. #include <glue/gnulinux.h>
  34. #include <mach/message.h>
  35. #include <mach/boolean.h>
  36. #include <mach/kern_return.h>
  37. #include <mach/mach_param.h>
  38. #include <mach/task_special_ports.h>
  39. #include <mach/thread_special_ports.h>
  40. #include <vm/vm_kern.h>
  41. #include <kern/debug.h>
  42. #include <kern/kalloc.h>
  43. #include <kern/task.h>
  44. #include <kern/thread.h>
  45. #include <kern/ipc_kobject.h>
  46. #include <kern/ipc_tt.h>
  47. #include <ipc/ipc_space.h>
  48. #include <ipc/ipc_table.h>
  49. #include <ipc/ipc_port.h>
  50. #include <ipc/ipc_right.h>
  51. #include <ipc/ipc_entry.h>
  52. #include <ipc/ipc_object.h>
  53. #if 0
  54. #include <string.h>
  55. #include <mach/machine/vm_types.h>
  56. #include <mach/vm_param.h>
  57. #include <mach/task_info.h>
  58. #include <mach/task_special_ports.h>
  59. #include <mach_debug/mach_debug_types.h>
  60. #include <ipc/ipc_space.h>
  61. #include <ipc/ipc_types.h>
  62. #include <kern/debug.h>
  63. #include <kern/task.h>
  64. #include <kern/thread.h>
  65. #include <kern/slab.h>
  66. #include <kern/kalloc.h>
  67. #include <kern/processor.h>
  68. #include <kern/printf.h>
  69. #include <kern/sched_prim.h> /* for thread_wakeup */
  70. #include <kern/ipc_tt.h>
  71. #include <kern/syscall_emulation.h>
  72. #include <kern/task_notify.user.h>
  73. #include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
  74. #include <machine/machspl.h> /* for splsched */
  75. #endif
  76. task_t kernel_task = TASK_NULL;
  77. struct gnu_kmem_cache task_cache;
  78. /* Where to send notifications about newly created tasks. */
  79. ipc_port_t new_task_notification = NULL;
  80. void task_init(void)
  81. {
  82. gnu_kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
  83. NULL, 0);
  84. //eml_init();
  85. //machine_task_module_init ();
  86. #if 1
  87. /*
  88. * Create the kernel task as the first task.
  89. * Task_create must assign to kernel_task as a side effect,
  90. * for other initialization. (:-()
  91. */
  92. (void) task_create_kernel(TASK_NULL, FALSE, &kernel_task);
  93. //FIXME:(void) task_set_name(kernel_task, "kernel_task");
  94. //vm_map_set_name(kernel_map, kernel_task->name);
  95. #endif
  96. }
  97. kern_return_t task_create(
  98. task_t parent_task,
  99. boolean_t inherit_memory,
  100. task_t *child_task) /* OUT */
  101. {
  102. if (parent_task == TASK_NULL)
  103. return KERN_INVALID_TASK;
  104. return task_create_kernel (parent_task, inherit_memory,
  105. child_task);
  106. }
  107. kern_return_t
  108. task_create_kernel(
  109. task_t parent_task,
  110. boolean_t inherit_memory,
  111. task_t *child_task) /* OUT */
  112. {
  113. task_t new_task;
  114. processor_set_t pset;
  115. #if FAST_TAS
  116. int i;
  117. #endif
  118. new_task = (task_t) gnu_kmem_cache_alloc(&task_cache);
  119. if (new_task == TASK_NULL)
  120. return KERN_RESOURCE_SHORTAGE;
  121. /* one ref for just being alive; one for our caller */
  122. new_task->ref_count = 2;
  123. #if 0
  124. if (child_task == &kernel_task) {
  125. new_task->map = kernel_map;
  126. } else if (inherit_memory) {
  127. new_task->map = vm_map_fork(parent_task->map);
  128. } else {
  129. pmap_t new_pmap = pmap_create((vm_size_t) 0);
  130. if (new_pmap == PMAP_NULL)
  131. new_task->map = VM_MAP_NULL;
  132. else {
  133. new_task->map = vm_map_create(new_pmap,
  134. round_page(VM_MIN_ADDRESS),
  135. trunc_page(VM_MAX_ADDRESS));
  136. if (new_task->map == VM_MAP_NULL)
  137. pmap_destroy(new_pmap);
  138. }
  139. }
  140. if (new_task->map == VM_MAP_NULL) {
  141. kmem_cache_free(&task_cache, (vm_address_t) new_task);
  142. return KERN_RESOURCE_SHORTAGE;
  143. }
  144. if (child_task != &kernel_task)
  145. vm_map_set_name(new_task->map, new_task->name);
  146. #endif
  147. simple_lock_init(&new_task->lock);
  148. queue_init(&new_task->thread_list);
  149. new_task->suspend_count = 0;
  150. new_task->active = TRUE;
  151. new_task->user_stop_count = 0;
  152. new_task->thread_count = 0;
  153. new_task->faults = 0;
  154. new_task->zero_fills = 0;
  155. new_task->reactivations = 0;
  156. new_task->pageins = 0;
  157. new_task->cow_faults = 0;
  158. new_task->messages_sent = 0;
  159. new_task->messages_received = 0;
  160. // eml_task_reference(new_task, parent_task);
  161. ipc_task_init(new_task, parent_task);
  162. //machine_task_init (new_task);
  163. new_task->total_user_time.seconds = 0;
  164. new_task->total_user_time.microseconds = 0;
  165. new_task->total_system_time.seconds = 0;
  166. new_task->total_system_time.microseconds = 0;
  167. // record_time_stamp (&new_task->creation_time);
  168. #if 0
  169. if (parent_task != TASK_NULL) {
  170. task_lock(parent_task);
  171. pset = parent_task->processor_set;
  172. if (!pset->active)
  173. pset = &default_pset;
  174. pset_reference(pset);
  175. new_task->priority = parent_task->priority;
  176. task_unlock(parent_task);
  177. }
  178. else {
  179. pset = &default_pset;
  180. pset_reference(pset);
  181. new_task->priority = BASEPRI_USER;
  182. }
  183. pset_lock(pset);
  184. pset_add_task(pset, new_task);
  185. pset_unlock(pset);
  186. #endif
  187. new_task->may_assign = TRUE;
  188. new_task->assign_active = FALSE;
  189. #if MACH_PCSAMPLE
  190. new_task->pc_sample.buffer = 0;
  191. new_task->pc_sample.seqno = 0;
  192. new_task->pc_sample.sampletypes = 0;
  193. #endif /* MACH_PCSAMPLE */
  194. #if FAST_TAS
  195. for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
  196. if (inherit_memory) {
  197. new_task->fast_tas_base[i] = parent_task->fast_tas_base[i];
  198. new_task->fast_tas_end[i] = parent_task->fast_tas_end[i];
  199. } else {
  200. new_task->fast_tas_base[i] = (vm_offset_t)0;
  201. new_task->fast_tas_end[i] = (vm_offset_t)0;
  202. }
  203. }
  204. #endif /* FAST_TAS */
  205. #if 0
  206. if (parent_task == TASK_NULL)
  207. snprintf (new_task->name, sizeof new_task->name, "%p",
  208. new_task);
  209. else
  210. snprintf (new_task->name, sizeof new_task->name, "(%.*s)",
  211. sizeof new_task->name - 3, parent_task->name);
  212. if (new_task_notification != NULL) {
  213. task_reference (new_task);
  214. task_reference (parent_task);
  215. mach_notify_new_task (new_task_notification,
  216. convert_task_to_port (new_task),
  217. parent_task
  218. ? convert_task_to_port (parent_task)
  219. : IP_NULL);
  220. }
  221. #endif
  222. ipc_task_enable(new_task);
  223. *child_task = new_task;
  224. return KERN_SUCCESS;
  225. }
  226. #if 0
  227. /*
  228. * task_deallocate:
  229. *
  230. * Give up a reference to the specified task and destroy it if there
  231. * are no other references left. It is assumed that the current thread
  232. * is never in this task.
  233. */
  234. void task_deallocate(
  235. task_t task)
  236. {
  237. int c;
  238. processor_set_t pset;
  239. if (task == TASK_NULL)
  240. return;
  241. task_lock(task);
  242. c = --(task->ref_count);
  243. task_unlock(task);
  244. if (c != 0)
  245. return;
  246. machine_task_terminate (task);
  247. eml_task_deallocate(task);
  248. pset = task->processor_set;
  249. pset_lock(pset);
  250. pset_remove_task(pset,task);
  251. pset_unlock(pset);
  252. pset_deallocate(pset);
  253. vm_map_deallocate(task->map);
  254. is_release(task->itk_space);
  255. kmem_cache_free(&task_cache, (vm_offset_t) task);
  256. }
  257. void task_reference(
  258. task_t task)
  259. {
  260. if (task == TASK_NULL)
  261. return;
  262. task_lock(task);
  263. task->ref_count++;
  264. task_unlock(task);
  265. }
  266. /*
  267. * task_terminate:
  268. *
  269. * Terminate the specified task. See comments on thread_terminate
  270. * (kern/thread.c) about problems with terminating the "current task."
  271. */
  272. kern_return_t task_terminate(
  273. task_t task)
  274. {
  275. thread_t thread, cur_thread;
  276. queue_head_t *list;
  277. task_t cur_task;
  278. spl_t s;
  279. if (task == TASK_NULL)
  280. return KERN_INVALID_ARGUMENT;
  281. list = &task->thread_list;
  282. cur_task = current_task();
  283. cur_thread = current_thread();
  284. /*
  285. * Deactivate task so that it can't be terminated again,
  286. * and so lengthy operations in progress will abort.
  287. *
  288. * If the current thread is in this task, remove it from
  289. * the task's thread list to keep the thread-termination
  290. * loop simple.
  291. */
  292. if (task == cur_task) {
  293. task_lock(task);
  294. if (!task->active) {
  295. /*
  296. * Task is already being terminated.
  297. */
  298. task_unlock(task);
  299. return KERN_FAILURE;
  300. }
  301. /*
  302. * Make sure current thread is not being terminated.
  303. */
  304. s = splsched();
  305. thread_lock(cur_thread);
  306. if (!cur_thread->active) {
  307. thread_unlock(cur_thread);
  308. (void) splx(s);
  309. task_unlock(task);
  310. thread_terminate(cur_thread);
  311. return KERN_FAILURE;
  312. }
  313. task_hold_locked(task);
  314. task->active = FALSE;
  315. queue_remove(list, cur_thread, thread_t, thread_list);
  316. thread_unlock(cur_thread);
  317. (void) splx(s);
  318. task_unlock(task);
  319. /*
  320. * Shut down this thread's ipc now because it must
  321. * be left alone to terminate the task.
  322. */
  323. ipc_thread_disable(cur_thread);
  324. ipc_thread_terminate(cur_thread);
  325. }
  326. else {
  327. /*
  328. * Lock both current and victim task to check for
  329. * potential deadlock.
  330. */
  331. if ((vm_offset_t)task < (vm_offset_t)cur_task) {
  332. task_lock(task);
  333. task_lock(cur_task);
  334. }
  335. else {
  336. task_lock(cur_task);
  337. task_lock(task);
  338. }
  339. /*
  340. * Check if current thread or task is being terminated.
  341. */
  342. s = splsched();
  343. thread_lock(cur_thread);
  344. if ((!cur_task->active) ||(!cur_thread->active)) {
  345. /*
  346. * Current task or thread is being terminated.
  347. */
  348. thread_unlock(cur_thread);
  349. (void) splx(s);
  350. task_unlock(task);
  351. task_unlock(cur_task);
  352. thread_terminate(cur_thread);
  353. return KERN_FAILURE;
  354. }
  355. thread_unlock(cur_thread);
  356. (void) splx(s);
  357. task_unlock(cur_task);
  358. if (!task->active) {
  359. /*
  360. * Task is already being terminated.
  361. */
  362. task_unlock(task);
  363. return KERN_FAILURE;
  364. }
  365. task_hold_locked(task);
  366. task->active = FALSE;
  367. task_unlock(task);
  368. }
  369. /*
  370. * Prevent further execution of the task. ipc_task_disable
  371. * prevents further task operations via the task port.
  372. * If this is the current task, the current thread will
  373. * be left running.
  374. */
  375. (void) task_dowait(task,TRUE); /* may block */
  376. ipc_task_disable(task);
  377. /*
  378. * Terminate each thread in the task.
  379. *
  380. * The task_port is closed down, so no more thread_create
  381. * operations can be done. Thread_force_terminate closes the
  382. * thread port for each thread; when that is done, the
  383. * thread will eventually disappear. Thus the loop will
  384. * terminate. Call thread_force_terminate instead of
  385. * thread_terminate to avoid deadlock checks. Need
  386. * to call thread_block() inside loop because some other
  387. * thread (e.g., the reaper) may have to run to get rid
  388. * of all references to the thread; it won't vanish from
  389. * the task's thread list until the last one is gone.
  390. */
  391. task_lock(task);
  392. while (!queue_empty(list)) {
  393. thread = (thread_t) queue_first(list);
  394. thread_reference(thread);
  395. task_unlock(task);
  396. thread_force_terminate(thread);
  397. thread_deallocate(thread);
  398. thread_block(thread_no_continuation);
  399. task_lock(task);
  400. }
  401. task_unlock(task);
  402. /*
  403. * Shut down IPC.
  404. */
  405. ipc_task_terminate(task);
  406. /*
  407. * Deallocate the task's reference to itself.
  408. */
  409. task_deallocate(task);
  410. /*
  411. * If the current thread is in this task, it has not yet
  412. * been terminated (since it was removed from the task's
  413. * thread-list). Put it back in the thread list (for
  414. * completeness), and terminate it. Since it holds the
  415. * last reference to the task, terminating it will deallocate
  416. * the task.
  417. */
  418. if (cur_thread->task == task) {
  419. task_lock(task);
  420. s = splsched();
  421. queue_enter(list, cur_thread, thread_t, thread_list);
  422. (void) splx(s);
  423. task_unlock(task);
  424. (void) thread_terminate(cur_thread);
  425. }
  426. return KERN_SUCCESS;
  427. }
  428. /*
  429. * task_hold:
  430. *
  431. * Suspend execution of the specified task.
  432. * This is a recursive-style suspension of the task, a count of
  433. * suspends is maintained.
  434. *
  435. * CONDITIONS: the task is locked and active.
  436. */
  437. void task_hold_locked(
  438. task_t task)
  439. {
  440. queue_head_t *list;
  441. thread_t thread, cur_thread;
  442. assert(task->active);
  443. cur_thread = current_thread();
  444. task->suspend_count++;
  445. /*
  446. * Iterate through all the threads and hold them.
  447. * Do not hold the current thread if it is within the
  448. * task.
  449. */
  450. list = &task->thread_list;
  451. queue_iterate(list, thread, thread_t, thread_list) {
  452. if (thread != cur_thread)
  453. thread_hold(thread);
  454. }
  455. }
  456. /*
  457. * task_hold:
  458. *
  459. * Suspend execution of the specified task.
  460. * This is a recursive-style suspension of the task, a count of
  461. * suspends is maintained.
  462. */
  463. kern_return_t task_hold(
  464. task_t task)
  465. {
  466. task_lock(task);
  467. if (!task->active) {
  468. task_unlock(task);
  469. return KERN_FAILURE;
  470. }
  471. task_hold_locked(task);
  472. task_unlock(task);
  473. return KERN_SUCCESS;
  474. }
  475. /*
  476. * task_dowait:
  477. *
  478. * Wait until the task has really been suspended (all of the threads
  479. * are stopped). Skip the current thread if it is within the task.
  480. *
  481. * If task is deactivated while waiting, return a failure code unless
  482. * must_wait is true.
  483. */
  484. kern_return_t task_dowait(
  485. task_t task,
  486. boolean_t must_wait)
  487. {
  488. queue_head_t *list;
  489. thread_t thread, cur_thread, prev_thread;
  490. kern_return_t ret = KERN_SUCCESS;
  491. /*
  492. * Iterate through all the threads.
  493. * While waiting for each thread, we gain a reference to it
  494. * to prevent it from going away on us. This guarantees
  495. * that the "next" thread in the list will be a valid thread.
  496. *
  497. * We depend on the fact that if threads are created while
  498. * we are looping through the threads, they will be held
  499. * automatically. We don't care about threads that get
  500. * deallocated along the way (the reference prevents it
  501. * from happening to the thread we are working with).
  502. *
  503. * If the current thread is in the affected task, it is skipped.
  504. *
  505. * If the task is deactivated before we're done, and we don't
  506. * have to wait for it (must_wait is FALSE), just bail out.
  507. */
  508. cur_thread = current_thread();
  509. list = &task->thread_list;
  510. prev_thread = THREAD_NULL;
  511. task_lock(task);
  512. queue_iterate(list, thread, thread_t, thread_list) {
  513. if (!(task->active) && !(must_wait)) {
  514. ret = KERN_FAILURE;
  515. break;
  516. }
  517. if (thread != cur_thread) {
  518. thread_reference(thread);
  519. task_unlock(task);
  520. if (prev_thread != THREAD_NULL)
  521. thread_deallocate(prev_thread);
  522. /* may block */
  523. (void) thread_dowait(thread, TRUE); /* may block */
  524. prev_thread = thread;
  525. task_lock(task);
  526. }
  527. }
  528. task_unlock(task);
  529. if (prev_thread != THREAD_NULL)
  530. thread_deallocate(prev_thread); /* may block */
  531. return ret;
  532. }
  533. kern_return_t task_release(
  534. task_t task)
  535. {
  536. queue_head_t *list;
  537. thread_t thread, next;
  538. task_lock(task);
  539. if (!task->active) {
  540. task_unlock(task);
  541. return KERN_FAILURE;
  542. }
  543. task->suspend_count--;
  544. /*
  545. * Iterate through all the threads and release them
  546. */
  547. list = &task->thread_list;
  548. thread = (thread_t) queue_first(list);
  549. while (!queue_end(list, (queue_entry_t) thread)) {
  550. next = (thread_t) queue_next(&thread->thread_list);
  551. thread_release(thread);
  552. thread = next;
  553. }
  554. task_unlock(task);
  555. return KERN_SUCCESS;
  556. }
  557. kern_return_t task_threads(
  558. task_t task,
  559. thread_array_t *thread_list,
  560. natural_t *count)
  561. {
  562. unsigned int actual; /* this many threads */
  563. thread_t thread;
  564. thread_t *threads;
  565. unsigned i;
  566. vm_size_t size, size_needed;
  567. vm_offset_t addr;
  568. if (task == TASK_NULL)
  569. return KERN_INVALID_ARGUMENT;
  570. size = 0; addr = 0;
  571. for (;;) {
  572. task_lock(task);
  573. if (!task->active) {
  574. task_unlock(task);
  575. return KERN_FAILURE;
  576. }
  577. actual = task->thread_count;
  578. /* do we have the memory we need? */
  579. size_needed = actual * sizeof(mach_port_t);
  580. if (size_needed <= size)
  581. break;
  582. /* unlock the task and allocate more memory */
  583. task_unlock(task);
  584. if (size != 0)
  585. kfree(addr, size);
  586. assert(size_needed > 0);
  587. size = size_needed;
  588. addr = kalloc(size);
  589. if (addr == 0)
  590. return KERN_RESOURCE_SHORTAGE;
  591. }
  592. /* OK, have memory and the task is locked & active */
  593. threads = (thread_t *) addr;
  594. for (i = 0, thread = (thread_t) queue_first(&task->thread_list);
  595. i < actual;
  596. i++, thread = (thread_t) queue_next(&thread->thread_list)) {
  597. /* take ref for convert_thread_to_port */
  598. thread_reference(thread);
  599. threads[i] = thread;
  600. }
  601. assert(queue_end(&task->thread_list, (queue_entry_t) thread));
  602. /* can unlock task now that we've got the thread refs */
  603. task_unlock(task);
  604. if (actual == 0) {
  605. /* no threads, so return null pointer and deallocate memory */
  606. *thread_list = 0;
  607. *count = 0;
  608. if (size != 0)
  609. kfree(addr, size);
  610. } else {
  611. /* if we allocated too much, must copy */
  612. if (size_needed < size) {
  613. vm_offset_t newaddr;
  614. newaddr = kalloc(size_needed);
  615. if (newaddr == 0) {
  616. for (i = 0; i < actual; i++)
  617. thread_deallocate(threads[i]);
  618. kfree(addr, size);
  619. return KERN_RESOURCE_SHORTAGE;
  620. }
  621. memcpy((void *) newaddr, (void *) addr, size_needed);
  622. kfree(addr, size);
  623. threads = (thread_t *) newaddr;
  624. }
  625. *thread_list = (mach_port_t *) threads;
  626. *count = actual;
  627. /* do the conversion that Mig should handle */
  628. for (i = 0; i < actual; i++)
  629. ((ipc_port_t *) threads)[i] =
  630. convert_thread_to_port(threads[i]);
  631. }
  632. return KERN_SUCCESS;
  633. }
  634. kern_return_t task_suspend(
  635. task_t task)
  636. {
  637. boolean_t hold;
  638. if (task == TASK_NULL)
  639. return KERN_INVALID_ARGUMENT;
  640. hold = FALSE;
  641. task_lock(task);
  642. if ((task->user_stop_count)++ == 0)
  643. hold = TRUE;
  644. task_unlock(task);
  645. /*
  646. * If the stop count was positive, the task is
  647. * already stopped and we can exit.
  648. */
  649. if (!hold) {
  650. return KERN_SUCCESS;
  651. }
  652. /*
  653. * Hold all of the threads in the task, and wait for
  654. * them to stop. If the current thread is within
  655. * this task, hold it separately so that all of the
  656. * other threads can stop first.
  657. */
  658. if (task_hold(task) != KERN_SUCCESS)
  659. return KERN_FAILURE;
  660. if (task_dowait(task, FALSE) != KERN_SUCCESS)
  661. return KERN_FAILURE;
  662. if (current_task() == task) {
  663. spl_t s;
  664. thread_hold(current_thread());
  665. /*
  666. * We want to call thread_block on our way out,
  667. * to stop running.
  668. */
  669. s = splsched();
  670. ast_on(cpu_number(), AST_BLOCK);
  671. (void) splx(s);
  672. }
  673. return KERN_SUCCESS;
  674. }
  675. kern_return_t task_resume(
  676. task_t task)
  677. {
  678. boolean_t release;
  679. if (task == TASK_NULL)
  680. return KERN_INVALID_ARGUMENT;
  681. release = FALSE;
  682. task_lock(task);
  683. if (task->user_stop_count > 0) {
  684. if (--(task->user_stop_count) == 0)
  685. release = TRUE;
  686. }
  687. else {
  688. task_unlock(task);
  689. return KERN_FAILURE;
  690. }
  691. task_unlock(task);
  692. /*
  693. * Release the task if necessary.
  694. */
  695. if (release)
  696. return task_release(task);
  697. return KERN_SUCCESS;
  698. }
  699. kern_return_t task_info(
  700. task_t task,
  701. int flavor,
  702. task_info_t task_info_out, /* pointer to OUT array */
  703. natural_t *task_info_count) /* IN/OUT */
  704. {
  705. vm_map_t map;
  706. if (task == TASK_NULL)
  707. return KERN_INVALID_ARGUMENT;
  708. switch (flavor) {
  709. case TASK_BASIC_INFO:
  710. {
  711. task_basic_info_t basic_info;
  712. /* Allow *task_info_count to be two words smaller than
  713. the usual amount, because creation_time is a new member
  714. that some callers might not know about. */
  715. if (*task_info_count < TASK_BASIC_INFO_COUNT - 2) {
  716. return KERN_INVALID_ARGUMENT;
  717. }
  718. basic_info = (task_basic_info_t) task_info_out;
  719. map = (task == kernel_task) ? kernel_map : task->map;
  720. basic_info->virtual_size = map->size;
  721. basic_info->resident_size = pmap_resident_count(map->pmap)
  722. * PAGE_SIZE;
  723. task_lock(task);
  724. basic_info->base_priority = task->priority;
  725. basic_info->suspend_count = task->user_stop_count;
  726. basic_info->user_time.seconds
  727. = task->total_user_time.seconds;
  728. basic_info->user_time.microseconds
  729. = task->total_user_time.microseconds;
  730. basic_info->system_time.seconds
  731. = task->total_system_time.seconds;
  732. basic_info->system_time.microseconds
  733. = task->total_system_time.microseconds;
  734. read_time_stamp(&task->creation_time,
  735. &basic_info->creation_time);
  736. task_unlock(task);
  737. if (*task_info_count > TASK_BASIC_INFO_COUNT)
  738. *task_info_count = TASK_BASIC_INFO_COUNT;
  739. break;
  740. }
  741. case TASK_EVENTS_INFO:
  742. {
  743. task_events_info_t event_info;
  744. if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
  745. return KERN_INVALID_ARGUMENT;
  746. }
  747. event_info = (task_events_info_t) task_info_out;
  748. task_lock(task);
  749. event_info->faults = task->faults;
  750. event_info->zero_fills = task->zero_fills;
  751. event_info->reactivations = task->reactivations;
  752. event_info->pageins = task->pageins;
  753. event_info->cow_faults = task->cow_faults;
  754. event_info->messages_sent = task->messages_sent;
  755. event_info->messages_received = task->messages_received;
  756. task_unlock(task);
  757. *task_info_count = TASK_EVENTS_INFO_COUNT;
  758. break;
  759. }
  760. case TASK_THREAD_TIMES_INFO:
  761. {
  762. task_thread_times_info_t times_info;
  763. thread_t thread;
  764. if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
  765. return KERN_INVALID_ARGUMENT;
  766. }
  767. times_info = (task_thread_times_info_t) task_info_out;
  768. times_info->user_time.seconds = 0;
  769. times_info->user_time.microseconds = 0;
  770. times_info->system_time.seconds = 0;
  771. times_info->system_time.microseconds = 0;
  772. task_lock(task);
  773. queue_iterate(&task->thread_list, thread,
  774. thread_t, thread_list)
  775. {
  776. time_value_t user_time, system_time;
  777. spl_t s;
  778. s = splsched();
  779. thread_lock(thread);
  780. thread_read_times(thread, &user_time, &system_time);
  781. thread_unlock(thread);
  782. splx(s);
  783. time_value_add(&times_info->user_time, &user_time);
  784. time_value_add(&times_info->system_time, &system_time);
  785. }
  786. task_unlock(task);
  787. *task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
  788. break;
  789. }
  790. default:
  791. return KERN_INVALID_ARGUMENT;
  792. }
  793. return KERN_SUCCESS;
  794. }
  795. #if MACH_HOST
  796. /*
  797. * task_assign:
  798. *
  799. * Change the assigned processor set for the task
  800. */
  801. kern_return_t
  802. task_assign(
  803. task_t task,
  804. processor_set_t new_pset,
  805. boolean_t assign_threads)
  806. {
  807. kern_return_t ret = KERN_SUCCESS;
  808. thread_t thread, prev_thread;
  809. queue_head_t *list;
  810. processor_set_t pset;
  811. if (task == TASK_NULL || new_pset == PROCESSOR_SET_NULL) {
  812. return KERN_INVALID_ARGUMENT;
  813. }
  814. /*
  815. * Freeze task`s assignment. Prelude to assigning
  816. * task. Only one freeze may be held per task.
  817. */
  818. task_lock(task);
  819. while (task->may_assign == FALSE) {
  820. task->assign_active = TRUE;
  821. assert_wait((event_t)&task->assign_active, TRUE);
  822. task_unlock(task);
  823. thread_block(thread_no_continuation);
  824. task_lock(task);
  825. }
  826. /*
  827. * Avoid work if task already in this processor set.
  828. */
  829. if (task->processor_set == new_pset) {
  830. /*
  831. * No need for task->assign_active wakeup:
  832. * task->may_assign is still TRUE.
  833. */
  834. task_unlock(task);
  835. return KERN_SUCCESS;
  836. }
  837. task->may_assign = FALSE;
  838. task_unlock(task);
  839. /*
  840. * Safe to get the task`s pset: it cannot change while
  841. * task is frozen.
  842. */
  843. pset = task->processor_set;
  844. /*
  845. * Lock both psets now. Use ordering to avoid deadlock.
  846. */
  847. Restart:
  848. if ((vm_offset_t) pset < (vm_offset_t) new_pset) {
  849. pset_lock(pset);
  850. pset_lock(new_pset);
  851. }
  852. else {
  853. pset_lock(new_pset);
  854. pset_lock(pset);
  855. }
  856. /*
  857. * Check if new_pset is ok to assign to. If not,
  858. * reassign to default_pset.
  859. */
  860. if (!new_pset->active) {
  861. pset_unlock(pset);
  862. pset_unlock(new_pset);
  863. new_pset = &default_pset;
  864. goto Restart;
  865. }
  866. pset_reference(new_pset);
  867. /*
  868. * Now grab the task lock and move the task.
  869. */
  870. task_lock(task);
  871. pset_remove_task(pset, task);
  872. pset_add_task(new_pset, task);
  873. pset_unlock(pset);
  874. pset_unlock(new_pset);
  875. if (assign_threads == FALSE) {
  876. /*
  877. * We leave existing threads at their
  878. * old assignments. Unfreeze task`s
  879. * assignment.
  880. */
  881. task->may_assign = TRUE;
  882. if (task->assign_active) {
  883. task->assign_active = FALSE;
  884. thread_wakeup((event_t) &task->assign_active);
  885. }
  886. task_unlock(task);
  887. pset_deallocate(pset);
  888. return KERN_SUCCESS;
  889. }
  890. /*
  891. * If current thread is in task, freeze its assignment.
  892. */
  893. if (current_thread()->task == task) {
  894. task_unlock(task);
  895. thread_freeze(current_thread());
  896. task_lock(task);
  897. }
  898. /*
  899. * Iterate down the thread list reassigning all the threads.
  900. * New threads pick up task's new processor set automatically.
  901. * Do current thread last because new pset may be empty.
  902. */
  903. list = &task->thread_list;
  904. prev_thread = THREAD_NULL;
  905. queue_iterate(list, thread, thread_t, thread_list) {
  906. if (!(task->active)) {
  907. ret = KERN_FAILURE;
  908. break;
  909. }
  910. if (thread != current_thread()) {
  911. thread_reference(thread);
  912. task_unlock(task);
  913. if (prev_thread != THREAD_NULL)
  914. thread_deallocate(prev_thread); /* may block */
  915. thread_assign(thread,new_pset); /* may block */
  916. prev_thread = thread;
  917. task_lock(task);
  918. }
  919. }
  920. /*
  921. * Done, wakeup anyone waiting for us.
  922. */
  923. task->may_assign = TRUE;
  924. if (task->assign_active) {
  925. task->assign_active = FALSE;
  926. thread_wakeup((event_t)&task->assign_active);
  927. }
  928. task_unlock(task);
  929. if (prev_thread != THREAD_NULL)
  930. thread_deallocate(prev_thread); /* may block */
  931. /*
  932. * Finish assignment of current thread.
  933. */
  934. if (current_thread()->task == task)
  935. thread_doassign(current_thread(), new_pset, TRUE);
  936. pset_deallocate(pset);
  937. return ret;
  938. }
  939. #else /* MACH_HOST */
  940. /*
  941. * task_assign:
  942. *
  943. * Change the assigned processor set for the task
  944. */
  945. kern_return_t
  946. task_assign(
  947. task_t task,
  948. processor_set_t new_pset,
  949. boolean_t assign_threads)
  950. {
  951. return KERN_FAILURE;
  952. }
  953. #endif /* MACH_HOST */
  954. /*
  955. * task_assign_default:
  956. *
  957. * Version of task_assign to assign to default processor set.
  958. */
  959. kern_return_t
  960. task_assign_default(
  961. task_t task,
  962. boolean_t assign_threads)
  963. {
  964. return task_assign(task, &default_pset, assign_threads);
  965. }
  966. /*
  967. * task_get_assignment
  968. *
  969. * Return name of processor set that task is assigned to.
  970. */
  971. kern_return_t task_get_assignment(
  972. task_t task,
  973. processor_set_t *pset)
  974. {
  975. if (task == TASK_NULL)
  976. return KERN_INVALID_ARGUMENT;
  977. if (!task->active)
  978. return KERN_FAILURE;
  979. *pset = task->processor_set;
  980. pset_reference(*pset);
  981. return KERN_SUCCESS;
  982. }
  983. /*
  984. * task_priority
  985. *
  986. * Set priority of task; used only for newly created threads.
  987. * Optionally change priorities of threads.
  988. */
  989. kern_return_t
  990. task_priority(
  991. task_t task,
  992. int priority,
  993. boolean_t change_threads)
  994. {
  995. kern_return_t ret = KERN_SUCCESS;
  996. if (task == TASK_NULL || invalid_pri(priority))
  997. return KERN_INVALID_ARGUMENT;
  998. task_lock(task);
  999. task->priority = priority;
  1000. if (change_threads) {
  1001. thread_t thread;
  1002. queue_head_t *list;
  1003. list = &task->thread_list;
  1004. queue_iterate(list, thread, thread_t, thread_list) {
  1005. if (thread_priority(thread, priority, FALSE)
  1006. != KERN_SUCCESS)
  1007. ret = KERN_FAILURE;
  1008. }
  1009. }
  1010. task_unlock(task);
  1011. return ret;
  1012. }
  1013. /*
  1014. * task_set_name
  1015. *
  1016. * Set the name of task TASK to NAME. This is a debugging aid.
  1017. * NAME will be used in error messages printed by the kernel.
  1018. */
  1019. kern_return_t
  1020. task_set_name(
  1021. task_t task,
  1022. kernel_debug_name_t name)
  1023. {
  1024. strncpy(task->name, name, sizeof task->name - 1);
  1025. task->name[sizeof task->name - 1] = '\0';
  1026. return KERN_SUCCESS;
  1027. }
  1028. /*
  1029. * task_collect_scan:
  1030. *
  1031. * Attempt to free resources owned by tasks.
  1032. */
  1033. void task_collect_scan(void)
  1034. {
  1035. task_t task, prev_task;
  1036. processor_set_t pset, prev_pset;
  1037. prev_task = TASK_NULL;
  1038. prev_pset = PROCESSOR_SET_NULL;
  1039. simple_lock(&all_psets_lock);
  1040. queue_iterate(&all_psets, pset, processor_set_t, all_psets) {
  1041. pset_lock(pset);
  1042. queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
  1043. task_reference(task);
  1044. pset_reference(pset);
  1045. pset_unlock(pset);
  1046. simple_unlock(&all_psets_lock);
  1047. machine_task_collect (task);
  1048. pmap_collect(task->map->pmap);
  1049. if (prev_task != TASK_NULL)
  1050. task_deallocate(prev_task);
  1051. prev_task = task;
  1052. if (prev_pset != PROCESSOR_SET_NULL)
  1053. pset_deallocate(prev_pset);
  1054. prev_pset = pset;
  1055. simple_lock(&all_psets_lock);
  1056. pset_lock(pset);
  1057. }
  1058. pset_unlock(pset);
  1059. }
  1060. simple_unlock(&all_psets_lock);
  1061. if (prev_task != TASK_NULL)
  1062. task_deallocate(prev_task);
  1063. if (prev_pset != PROCESSOR_SET_NULL)
  1064. pset_deallocate(prev_pset);
  1065. }
  1066. boolean_t task_collect_allowed = TRUE;
  1067. unsigned task_collect_last_tick = 0;
  1068. unsigned task_collect_max_rate = 0; /* in ticks */
  1069. /*
  1070. * consider_task_collect:
  1071. *
  1072. * Called by the pageout daemon when the system needs more free pages.
  1073. */
  1074. void consider_task_collect(void)
  1075. {
  1076. /*
  1077. * By default, don't attempt task collection more frequently
  1078. * than once a second.
  1079. */
  1080. if (task_collect_max_rate == 0)
  1081. task_collect_max_rate = hz;
  1082. if (task_collect_allowed &&
  1083. (sched_tick > (task_collect_last_tick + task_collect_max_rate))) {
  1084. task_collect_last_tick = sched_tick;
  1085. task_collect_scan();
  1086. }
  1087. }
  1088. kern_return_t
  1089. task_ras_control(
  1090. task_t task,
  1091. vm_offset_t pc,
  1092. vm_offset_t endpc,
  1093. int flavor)
  1094. {
  1095. kern_return_t ret = KERN_FAILURE;
  1096. #if FAST_TAS
  1097. int i;
  1098. ret = KERN_SUCCESS;
  1099. task_lock(task);
  1100. switch (flavor) {
  1101. case TASK_RAS_CONTROL_PURGE_ALL: /* remove all RAS */
  1102. for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
  1103. task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
  1104. }
  1105. break;
  1106. case TASK_RAS_CONTROL_PURGE_ONE: /* remove this RAS, collapse remaining */
  1107. for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
  1108. if ( (task->fast_tas_base[i] == pc)
  1109. && (task->fast_tas_end[i] == endpc)) {
  1110. while (i < TASK_FAST_TAS_NRAS-1) {
  1111. task->fast_tas_base[i] = task->fast_tas_base[i+1];
  1112. task->fast_tas_end[i] = task->fast_tas_end[i+1];
  1113. i++;
  1114. }
  1115. task->fast_tas_base[TASK_FAST_TAS_NRAS-1] = 0;
  1116. task->fast_tas_end[TASK_FAST_TAS_NRAS-1] = 0;
  1117. break;
  1118. }
  1119. }
  1120. if (i == TASK_FAST_TAS_NRAS) {
  1121. ret = KERN_INVALID_ADDRESS;
  1122. }
  1123. break;
  1124. case TASK_RAS_CONTROL_PURGE_ALL_AND_INSTALL_ONE:
  1125. /* remove all RAS an install this RAS */
  1126. for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
  1127. task->fast_tas_base[i] = task->fast_tas_end[i] = 0;
  1128. }
  1129. /* FALL THROUGH */
  1130. case TASK_RAS_CONTROL_INSTALL_ONE: /* install this RAS */
  1131. for (i = 0; i < TASK_FAST_TAS_NRAS; i++) {
  1132. if ( (task->fast_tas_base[i] == pc)
  1133. && (task->fast_tas_end[i] == endpc)) {
  1134. /* already installed */
  1135. break;
  1136. }
  1137. if ((task->fast_tas_base[i] == 0) && (task->fast_tas_end[i] == 0)){
  1138. task->fast_tas_base[i] = pc;
  1139. task->fast_tas_end[i] = endpc;
  1140. break;
  1141. }
  1142. }
  1143. if (i == TASK_FAST_TAS_NRAS) {
  1144. ret = KERN_RESOURCE_SHORTAGE;
  1145. }
  1146. break;
  1147. default: ret = KERN_INVALID_VALUE;
  1148. break;
  1149. }
  1150. task_unlock(task);
  1151. #endif /* FAST_TAS */
  1152. return ret;
  1153. }
  1154. /*
  1155. * register_new_task_notification
  1156. *
  1157. * Register a port to which a notification about newly created
  1158. * tasks are sent.
  1159. */
  1160. kern_return_t
  1161. register_new_task_notification(
  1162. const host_t host,
  1163. ipc_port_t notification)
  1164. {
  1165. if (host == HOST_NULL)
  1166. return KERN_INVALID_HOST;
  1167. if (new_task_notification != NULL)
  1168. return KERN_NO_ACCESS;
  1169. new_task_notification = notification;
  1170. return KERN_SUCCESS;
  1171. }
  1172. #endif