processor.c 22 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1993-1988 Carnegie Mellon University
  4. * All Rights Reserved.
  5. *
  6. * Permission to use, copy, modify and distribute this software and its
  7. * documentation is hereby granted, provided that both the copyright
  8. * notice and this permission notice appear in all copies of the
  9. * software, derivative works or modified versions, and any portions
  10. * thereof, and that both notices appear in supporting documentation.
  11. *
  12. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  13. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  14. * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  15. *
  16. * Carnegie Mellon requests users of this software to return to
  17. *
  18. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  19. * School of Computer Science
  20. * Carnegie Mellon University
  21. * Pittsburgh PA 15213-3890
  22. *
  23. * any improvements or extensions that they make and grant Carnegie Mellon
  24. * the rights to redistribute these changes.
  25. */
  26. /*
  27. * processor.c: processor and processor_set manipulation routines.
  28. */
  29. #include <string.h>
  30. #include <mach/boolean.h>
  31. #include <mach/policy.h>
  32. #include <mach/processor_info.h>
  33. #include <mach/vm_param.h>
  34. #include <kern/cpu_number.h>
  35. #include <kern/debug.h>
  36. #include <kern/kalloc.h>
  37. #include <kern/lock.h>
  38. #include <kern/host.h>
  39. #include <kern/ipc_tt.h>
  40. #include <kern/processor.h>
  41. #include <kern/sched.h>
  42. #include <kern/task.h>
  43. #include <kern/thread.h>
  44. #include <kern/ipc_host.h>
  45. #include <ipc/ipc_port.h>
  46. #if MACH_HOST
  47. #include <kern/slab.h>
  48. struct kmem_cache pset_cache;
  49. #endif /* MACH_HOST */
  50. /*
  51. * Exported variables.
  52. */
  53. struct processor_set default_pset;
  54. struct processor processor_array[NCPUS];
  55. queue_head_t all_psets;
  56. int all_psets_count;
  57. decl_simple_lock_data(, all_psets_lock);
  58. processor_t master_processor;
  59. processor_t processor_ptr[NCPUS];
  60. /*
  61. * Bootstrap the processor/pset system so the scheduler can run.
  62. */
  63. void pset_sys_bootstrap(void)
  64. {
  65. int i;
  66. pset_init(&default_pset);
  67. default_pset.empty = FALSE;
  68. for (i = 0; i < NCPUS; i++) {
  69. /*
  70. * Initialize processor data structures.
  71. * Note that cpu_to_processor(i) is processor_ptr[i].
  72. */
  73. processor_ptr[i] = &processor_array[i];
  74. processor_init(processor_ptr[i], i);
  75. }
  76. master_processor = cpu_to_processor(master_cpu);
  77. queue_init(&all_psets);
  78. simple_lock_init(&all_psets_lock);
  79. queue_enter(&all_psets, &default_pset, processor_set_t, all_psets);
  80. all_psets_count = 1;
  81. default_pset.active = TRUE;
  82. default_pset.empty = FALSE;
  83. /*
  84. * Note: the default_pset has a max_priority of BASEPRI_USER.
  85. * Internal kernel threads override this in kernel_thread.
  86. */
  87. }
  88. #if MACH_HOST
  89. /*
  90. * Rest of pset system initializations.
  91. */
  92. void pset_sys_init(void)
  93. {
  94. int i;
  95. processor_t processor;
  96. /*
  97. * Allocate the cache for processor sets.
  98. */
  99. kmem_cache_init(&pset_cache, "processor_set",
  100. sizeof(struct processor_set), 0, NULL, 0);
  101. /*
  102. * Give each processor a control port.
  103. * The master processor already has one.
  104. */
  105. for (i = 0; i < NCPUS; i++) {
  106. processor = cpu_to_processor(i);
  107. if (processor != master_processor &&
  108. machine_slot[i].is_cpu)
  109. {
  110. ipc_processor_init(processor);
  111. }
  112. }
  113. }
  114. #endif /* MACH_HOST */
  115. /*
  116. * Initialize the given processor_set structure.
  117. */
  118. void pset_init(
  119. processor_set_t pset)
  120. {
  121. int i;
  122. simple_lock_init(&pset->runq.lock);
  123. pset->runq.low = 0;
  124. pset->runq.count = 0;
  125. for (i = 0; i < NRQS; i++) {
  126. queue_init(&(pset->runq.runq[i]));
  127. }
  128. queue_init(&pset->idle_queue);
  129. pset->idle_count = 0;
  130. simple_lock_init(&pset->idle_lock);
  131. queue_init(&pset->processors);
  132. pset->processor_count = 0;
  133. pset->empty = TRUE;
  134. queue_init(&pset->tasks);
  135. pset->task_count = 0;
  136. queue_init(&pset->threads);
  137. pset->thread_count = 0;
  138. pset->ref_count = 1;
  139. simple_lock_init(&pset->ref_lock);
  140. queue_init(&pset->all_psets);
  141. pset->active = FALSE;
  142. simple_lock_init(&pset->lock);
  143. pset->pset_self = IP_NULL;
  144. pset->pset_name_self = IP_NULL;
  145. pset->max_priority = BASEPRI_USER;
  146. #if MACH_FIXPRI
  147. pset->policies = POLICY_TIMESHARE;
  148. #endif /* MACH_FIXPRI */
  149. pset->set_quantum = min_quantum;
  150. #if NCPUS > 1
  151. pset->quantum_adj_index = 0;
  152. simple_lock_init(&pset->quantum_adj_lock);
  153. for (i = 0; i <= NCPUS; i++) {
  154. pset->machine_quantum[i] = min_quantum;
  155. }
  156. #endif /* NCPUS > 1 */
  157. pset->mach_factor = 0;
  158. pset->load_average = 0;
  159. pset->sched_load = SCHED_SCALE; /* i.e. 1 */
  160. }
  161. /*
  162. * Initialize the given processor structure for the processor in
  163. * the slot specified by slot_num.
  164. */
  165. void processor_init(
  166. processor_t pr,
  167. int slot_num)
  168. {
  169. int i;
  170. simple_lock_init(&pr->runq.lock);
  171. pr->runq.low = 0;
  172. pr->runq.count = 0;
  173. for (i = 0; i < NRQS; i++) {
  174. queue_init(&(pr->runq.runq[i]));
  175. }
  176. queue_init(&pr->processor_queue);
  177. pr->state = PROCESSOR_OFF_LINE;
  178. pr->next_thread = THREAD_NULL;
  179. pr->idle_thread = THREAD_NULL;
  180. pr->quantum = 0;
  181. pr->first_quantum = FALSE;
  182. pr->last_quantum = 0;
  183. pr->processor_set = PROCESSOR_SET_NULL;
  184. pr->processor_set_next = PROCESSOR_SET_NULL;
  185. queue_init(&pr->processors);
  186. simple_lock_init(&pr->lock);
  187. pr->processor_self = IP_NULL;
  188. pr->slot_num = slot_num;
  189. }
  190. /*
  191. * pset_remove_processor() removes a processor from a processor_set.
  192. * It can only be called on the current processor. Caller must
  193. * hold lock on current processor and processor set.
  194. */
  195. void pset_remove_processor(
  196. processor_set_t pset,
  197. processor_t processor)
  198. {
  199. if (pset != processor->processor_set)
  200. panic("pset_remove_processor: wrong pset");
  201. queue_remove(&pset->processors, processor, processor_t, processors);
  202. processor->processor_set = PROCESSOR_SET_NULL;
  203. pset->processor_count--;
  204. quantum_set(pset);
  205. }
  206. /*
  207. * pset_add_processor() adds a processor to a processor_set.
  208. * It can only be called on the current processor. Caller must
  209. * hold lock on curent processor and on pset. No reference counting on
  210. * processors. Processor reference to pset is implicit.
  211. */
  212. void pset_add_processor(
  213. processor_set_t pset,
  214. processor_t processor)
  215. {
  216. queue_enter(&pset->processors, processor, processor_t, processors);
  217. processor->processor_set = pset;
  218. pset->processor_count++;
  219. quantum_set(pset);
  220. }
  221. /*
  222. * pset_remove_task() removes a task from a processor_set.
  223. * Caller must hold locks on pset and task. Pset reference count
  224. * is not decremented; caller must explicitly pset_deallocate.
  225. */
  226. void pset_remove_task(
  227. processor_set_t pset,
  228. task_t task)
  229. {
  230. if (pset != task->processor_set)
  231. return;
  232. queue_remove(&pset->tasks, task, task_t, pset_tasks);
  233. task->processor_set = PROCESSOR_SET_NULL;
  234. pset->task_count--;
  235. }
  236. /*
  237. * pset_add_task() adds a task to a processor_set.
  238. * Caller must hold locks on pset and task. Pset references to
  239. * tasks are implicit.
  240. */
  241. void pset_add_task(
  242. processor_set_t pset,
  243. task_t task)
  244. {
  245. queue_enter(&pset->tasks, task, task_t, pset_tasks);
  246. task->processor_set = pset;
  247. pset->task_count++;
  248. }
  249. /*
  250. * pset_remove_thread() removes a thread from a processor_set.
  251. * Caller must hold locks on pset and thread. Pset reference count
  252. * is not decremented; caller must explicitly pset_deallocate.
  253. */
  254. void pset_remove_thread(
  255. processor_set_t pset,
  256. thread_t thread)
  257. {
  258. queue_remove(&pset->threads, thread, thread_t, pset_threads);
  259. thread->processor_set = PROCESSOR_SET_NULL;
  260. pset->thread_count--;
  261. }
  262. /*
  263. * pset_add_thread() adds a thread to a processor_set.
  264. * Caller must hold locks on pset and thread. Pset references to
  265. * threads are implicit.
  266. */
  267. void pset_add_thread(
  268. processor_set_t pset,
  269. thread_t thread)
  270. {
  271. queue_enter(&pset->threads, thread, thread_t, pset_threads);
  272. thread->processor_set = pset;
  273. pset->thread_count++;
  274. }
  275. /*
  276. * thread_change_psets() changes the pset of a thread. Caller must
  277. * hold locks on both psets and thread. The old pset must be
  278. * explicitly pset_deallocat()'ed by caller.
  279. */
  280. void thread_change_psets(
  281. thread_t thread,
  282. processor_set_t old_pset,
  283. processor_set_t new_pset)
  284. {
  285. queue_remove(&old_pset->threads, thread, thread_t, pset_threads);
  286. old_pset->thread_count--;
  287. queue_enter(&new_pset->threads, thread, thread_t, pset_threads);
  288. thread->processor_set = new_pset;
  289. new_pset->thread_count++;
  290. }
  291. /*
  292. * pset_deallocate:
  293. *
  294. * Remove one reference to the processor set. Destroy processor_set
  295. * if this was the last reference.
  296. */
  297. void pset_deallocate(
  298. processor_set_t pset)
  299. {
  300. if (pset == PROCESSOR_SET_NULL)
  301. return;
  302. pset_ref_lock(pset);
  303. if (--pset->ref_count > 0) {
  304. pset_ref_unlock(pset);
  305. return;
  306. }
  307. #if !MACH_HOST
  308. panic("pset_deallocate: default_pset destroyed");
  309. #endif /* !MACH_HOST */
  310. #if MACH_HOST
  311. /*
  312. * Reference count is zero, however the all_psets list
  313. * holds an implicit reference and may make new ones.
  314. * Its lock also dominates the pset lock. To check for this,
  315. * temporarily restore one reference, and then lock the
  316. * other structures in the right order.
  317. */
  318. pset->ref_count = 1;
  319. pset_ref_unlock(pset);
  320. simple_lock(&all_psets_lock);
  321. pset_ref_lock(pset);
  322. if (--pset->ref_count > 0) {
  323. /*
  324. * Made an extra reference.
  325. */
  326. pset_ref_unlock(pset);
  327. simple_unlock(&all_psets_lock);
  328. return;
  329. }
  330. /*
  331. * Ok to destroy pset. Make a few paranoia checks.
  332. */
  333. if ((pset == &default_pset) || (pset->thread_count > 0) ||
  334. (pset->task_count > 0) || pset->processor_count > 0) {
  335. panic("pset_deallocate: destroy default or active pset");
  336. }
  337. /*
  338. * Remove from all_psets queue.
  339. */
  340. queue_remove(&all_psets, pset, processor_set_t, all_psets);
  341. all_psets_count--;
  342. pset_ref_unlock(pset);
  343. simple_unlock(&all_psets_lock);
  344. /*
  345. * That's it, free data structure.
  346. */
  347. kmem_cache_free(&pset_cache, (vm_offset_t)pset);
  348. #endif /* MACH_HOST */
  349. }
  350. /*
  351. * pset_reference:
  352. *
  353. * Add one reference to the processor set.
  354. */
  355. void pset_reference(
  356. processor_set_t pset)
  357. {
  358. pset_ref_lock(pset);
  359. pset->ref_count++;
  360. pset_ref_unlock(pset);
  361. }
  362. kern_return_t
  363. processor_info(
  364. processor_t processor,
  365. int flavor,
  366. host_t *host,
  367. processor_info_t info,
  368. natural_t *count)
  369. {
  370. int slot_num, state;
  371. processor_basic_info_t basic_info;
  372. if (processor == PROCESSOR_NULL)
  373. return KERN_INVALID_ARGUMENT;
  374. if (flavor != PROCESSOR_BASIC_INFO ||
  375. *count < PROCESSOR_BASIC_INFO_COUNT)
  376. return KERN_FAILURE;
  377. basic_info = (processor_basic_info_t) info;
  378. slot_num = processor->slot_num;
  379. basic_info->cpu_type = machine_slot[slot_num].cpu_type;
  380. basic_info->cpu_subtype = machine_slot[slot_num].cpu_subtype;
  381. state = processor->state;
  382. if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
  383. basic_info->running = FALSE;
  384. else
  385. basic_info->running = TRUE;
  386. basic_info->slot_num = slot_num;
  387. if (processor == master_processor)
  388. basic_info->is_master = TRUE;
  389. else
  390. basic_info->is_master = FALSE;
  391. *count = PROCESSOR_BASIC_INFO_COUNT;
  392. *host = &realhost;
  393. return KERN_SUCCESS;
  394. }
  395. kern_return_t processor_start(
  396. processor_t processor)
  397. {
  398. if (processor == PROCESSOR_NULL)
  399. return KERN_INVALID_ARGUMENT;
  400. #if NCPUS > 1
  401. return cpu_start(processor->slot_num);
  402. #else /* NCPUS > 1 */
  403. return KERN_FAILURE;
  404. #endif /* NCPUS > 1 */
  405. }
  406. kern_return_t processor_exit(
  407. processor_t processor)
  408. {
  409. if (processor == PROCESSOR_NULL)
  410. return KERN_INVALID_ARGUMENT;
  411. #if NCPUS > 1
  412. return processor_shutdown(processor);
  413. #else /* NCPUS > 1 */
  414. return KERN_FAILURE;
  415. #endif /* NCPUS > 1 */
  416. }
  417. kern_return_t
  418. processor_control(
  419. processor_t processor,
  420. processor_info_t info,
  421. natural_t count)
  422. {
  423. if (processor == PROCESSOR_NULL)
  424. return KERN_INVALID_ARGUMENT;
  425. #if NCPUS > 1
  426. return cpu_control(processor->slot_num, (int *)info, count);
  427. #else /* NCPUS > 1 */
  428. return KERN_FAILURE;
  429. #endif /* NCPUS > 1 */
  430. }
  431. /*
  432. * Precalculate the appropriate system quanta based on load. The
  433. * index into machine_quantum is the number of threads on the
  434. * processor set queue. It is limited to the number of processors in
  435. * the set.
  436. */
  437. void quantum_set(
  438. processor_set_t pset)
  439. {
  440. #if NCPUS > 1
  441. int i, ncpus;
  442. ncpus = pset->processor_count;
  443. for ( i=1 ; i <= ncpus ; i++) {
  444. pset->machine_quantum[i] =
  445. ((min_quantum * ncpus) + (i/2)) / i ;
  446. }
  447. pset->machine_quantum[0] = 2 * pset->machine_quantum[1];
  448. i = ((pset->runq.count > pset->processor_count) ?
  449. pset->processor_count : pset->runq.count);
  450. pset->set_quantum = pset->machine_quantum[i];
  451. #else /* NCPUS > 1 */
  452. default_pset.set_quantum = min_quantum;
  453. #endif /* NCPUS > 1 */
  454. }
  455. #if MACH_HOST
  456. /*
  457. * processor_set_create:
  458. *
  459. * Create and return a new processor set.
  460. */
  461. kern_return_t
  462. processor_set_create(
  463. host_t host,
  464. processor_set_t *new_set,
  465. processor_set_t *new_name)
  466. {
  467. processor_set_t pset;
  468. if (host == HOST_NULL)
  469. return KERN_INVALID_ARGUMENT;
  470. pset = (processor_set_t) kmem_cache_alloc(&pset_cache);
  471. pset_init(pset);
  472. pset_reference(pset); /* for new_set out argument */
  473. pset_reference(pset); /* for new_name out argument */
  474. ipc_pset_init(pset);
  475. pset->active = TRUE;
  476. simple_lock(&all_psets_lock);
  477. queue_enter(&all_psets, pset, processor_set_t, all_psets);
  478. all_psets_count++;
  479. simple_unlock(&all_psets_lock);
  480. ipc_pset_enable(pset);
  481. *new_set = pset;
  482. *new_name = pset;
  483. return KERN_SUCCESS;
  484. }
  485. /*
  486. * processor_set_destroy:
  487. *
  488. * destroy a processor set. Any tasks, threads or processors
  489. * currently assigned to it are reassigned to the default pset.
  490. */
  491. kern_return_t processor_set_destroy(
  492. processor_set_t pset)
  493. {
  494. queue_entry_t elem;
  495. queue_head_t *list;
  496. if (pset == PROCESSOR_SET_NULL || pset == &default_pset)
  497. return KERN_INVALID_ARGUMENT;
  498. /*
  499. * Handle multiple termination race. First one through sets
  500. * active to FALSE and disables ipc access.
  501. */
  502. pset_lock(pset);
  503. if (!(pset->active)) {
  504. pset_unlock(pset);
  505. return KERN_FAILURE;
  506. }
  507. pset->active = FALSE;
  508. ipc_pset_disable(pset);
  509. /*
  510. * Now reassign everything in this set to the default set.
  511. */
  512. if (pset->task_count > 0) {
  513. list = &pset->tasks;
  514. while (!queue_empty(list)) {
  515. elem = queue_first(list);
  516. task_reference((task_t) elem);
  517. pset_unlock(pset);
  518. task_assign((task_t) elem, &default_pset, FALSE);
  519. task_deallocate((task_t) elem);
  520. pset_lock(pset);
  521. }
  522. }
  523. if (pset->thread_count > 0) {
  524. list = &pset->threads;
  525. while (!queue_empty(list)) {
  526. elem = queue_first(list);
  527. thread_reference((thread_t) elem);
  528. pset_unlock(pset);
  529. thread_assign((thread_t) elem, &default_pset);
  530. thread_deallocate((thread_t) elem);
  531. pset_lock(pset);
  532. }
  533. }
  534. if (pset->processor_count > 0) {
  535. list = &pset->processors;
  536. while(!queue_empty(list)) {
  537. elem = queue_first(list);
  538. pset_unlock(pset);
  539. processor_assign((processor_t) elem, &default_pset, TRUE);
  540. pset_lock(pset);
  541. }
  542. }
  543. pset_unlock(pset);
  544. /*
  545. * Destroy ipc state.
  546. */
  547. ipc_pset_terminate(pset);
  548. /*
  549. * Deallocate pset's reference to itself.
  550. */
  551. pset_deallocate(pset);
  552. return KERN_SUCCESS;
  553. }
  554. #else /* MACH_HOST */
  555. kern_return_t
  556. processor_set_create(
  557. host_t host,
  558. processor_set_t *new_set,
  559. processor_set_t *new_name)
  560. {
  561. return KERN_FAILURE;
  562. }
  563. kern_return_t processor_set_destroy(
  564. processor_set_t pset)
  565. {
  566. return KERN_FAILURE;
  567. }
  568. #endif /* MACH_HOST */
  569. kern_return_t
  570. processor_get_assignment(
  571. processor_t processor,
  572. processor_set_t *pset)
  573. {
  574. int state;
  575. if (processor == PROCESSOR_NULL)
  576. return KERN_INVALID_ARGUMENT;
  577. state = processor->state;
  578. if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
  579. return KERN_FAILURE;
  580. *pset = processor->processor_set;
  581. pset_reference(*pset);
  582. return KERN_SUCCESS;
  583. }
  584. kern_return_t
  585. processor_set_info(
  586. processor_set_t pset,
  587. int flavor,
  588. host_t *host,
  589. processor_set_info_t info,
  590. natural_t *count)
  591. {
  592. if (pset == PROCESSOR_SET_NULL)
  593. return KERN_INVALID_ARGUMENT;
  594. if (flavor == PROCESSOR_SET_BASIC_INFO) {
  595. processor_set_basic_info_t basic_info;
  596. if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
  597. return KERN_FAILURE;
  598. basic_info = (processor_set_basic_info_t) info;
  599. pset_lock(pset);
  600. basic_info->processor_count = pset->processor_count;
  601. basic_info->task_count = pset->task_count;
  602. basic_info->thread_count = pset->thread_count;
  603. basic_info->mach_factor = pset->mach_factor;
  604. basic_info->load_average = pset->load_average;
  605. pset_unlock(pset);
  606. *count = PROCESSOR_SET_BASIC_INFO_COUNT;
  607. *host = &realhost;
  608. return KERN_SUCCESS;
  609. }
  610. else if (flavor == PROCESSOR_SET_SCHED_INFO) {
  611. processor_set_sched_info_t sched_info;
  612. if (*count < PROCESSOR_SET_SCHED_INFO_COUNT)
  613. return KERN_FAILURE;
  614. sched_info = (processor_set_sched_info_t) info;
  615. pset_lock(pset);
  616. #if MACH_FIXPRI
  617. sched_info->policies = pset->policies;
  618. #else /* MACH_FIXPRI */
  619. sched_info->policies = POLICY_TIMESHARE;
  620. #endif /* MACH_FIXPRI */
  621. sched_info->max_priority = pset->max_priority;
  622. pset_unlock(pset);
  623. *count = PROCESSOR_SET_SCHED_INFO_COUNT;
  624. *host = &realhost;
  625. return KERN_SUCCESS;
  626. }
  627. *host = HOST_NULL;
  628. return KERN_INVALID_ARGUMENT;
  629. }
  630. /*
  631. * processor_set_max_priority:
  632. *
  633. * Specify max priority permitted on processor set. This affects
  634. * newly created and assigned threads. Optionally change existing
  635. * ones.
  636. */
  637. kern_return_t
  638. processor_set_max_priority(
  639. processor_set_t pset,
  640. int max_priority,
  641. boolean_t change_threads)
  642. {
  643. if (pset == PROCESSOR_SET_NULL || invalid_pri(max_priority))
  644. return KERN_INVALID_ARGUMENT;
  645. pset_lock(pset);
  646. pset->max_priority = max_priority;
  647. if (change_threads) {
  648. queue_head_t *list;
  649. thread_t thread;
  650. list = &pset->threads;
  651. queue_iterate(list, thread, thread_t, pset_threads) {
  652. if (thread->max_priority < max_priority)
  653. thread_max_priority(thread, pset, max_priority);
  654. }
  655. }
  656. pset_unlock(pset);
  657. return KERN_SUCCESS;
  658. }
  659. /*
  660. * processor_set_policy_enable:
  661. *
  662. * Allow indicated policy on processor set.
  663. */
  664. kern_return_t
  665. processor_set_policy_enable(
  666. processor_set_t pset,
  667. int policy)
  668. {
  669. if ((pset == PROCESSOR_SET_NULL) || invalid_policy(policy))
  670. return KERN_INVALID_ARGUMENT;
  671. #if MACH_FIXPRI
  672. pset_lock(pset);
  673. pset->policies |= policy;
  674. pset_unlock(pset);
  675. return KERN_SUCCESS;
  676. #else /* MACH_FIXPRI */
  677. if (policy == POLICY_TIMESHARE)
  678. return KERN_SUCCESS;
  679. else
  680. return KERN_FAILURE;
  681. #endif /* MACH_FIXPRI */
  682. }
  683. /*
  684. * processor_set_policy_disable:
  685. *
  686. * Forbid indicated policy on processor set. Time sharing cannot
  687. * be forbidden.
  688. */
  689. kern_return_t
  690. processor_set_policy_disable(
  691. processor_set_t pset,
  692. int policy,
  693. boolean_t change_threads)
  694. {
  695. if ((pset == PROCESSOR_SET_NULL) || policy == POLICY_TIMESHARE ||
  696. invalid_policy(policy))
  697. return KERN_INVALID_ARGUMENT;
  698. #if MACH_FIXPRI
  699. pset_lock(pset);
  700. /*
  701. * Check if policy enabled. Disable if so, then handle
  702. * change_threads.
  703. */
  704. if (pset->policies & policy) {
  705. pset->policies &= ~policy;
  706. if (change_threads) {
  707. queue_head_t *list;
  708. thread_t thread;
  709. list = &pset->threads;
  710. queue_iterate(list, thread, thread_t, pset_threads) {
  711. if (thread->policy == policy)
  712. thread_policy(thread, POLICY_TIMESHARE, 0);
  713. }
  714. }
  715. }
  716. pset_unlock(pset);
  717. #endif /* MACH_FIXPRI */
  718. return KERN_SUCCESS;
  719. }
  720. #define THING_TASK 0
  721. #define THING_THREAD 1
  722. /*
  723. * processor_set_things:
  724. *
  725. * Common internals for processor_set_{threads,tasks}
  726. */
  727. kern_return_t
  728. processor_set_things(
  729. processor_set_t pset,
  730. mach_port_t **thing_list,
  731. natural_t *count,
  732. int type)
  733. {
  734. unsigned int actual; /* this many things */
  735. unsigned i;
  736. vm_size_t size, size_needed;
  737. vm_offset_t addr;
  738. if (pset == PROCESSOR_SET_NULL)
  739. return KERN_INVALID_ARGUMENT;
  740. size = 0; addr = 0;
  741. for (;;) {
  742. pset_lock(pset);
  743. if (!pset->active) {
  744. pset_unlock(pset);
  745. return KERN_FAILURE;
  746. }
  747. if (type == THING_TASK)
  748. actual = pset->task_count;
  749. else
  750. actual = pset->thread_count;
  751. /* do we have the memory we need? */
  752. size_needed = actual * sizeof(mach_port_t);
  753. if (size_needed <= size)
  754. break;
  755. /* unlock the pset and allocate more memory */
  756. pset_unlock(pset);
  757. if (size != 0)
  758. kfree(addr, size);
  759. assert(size_needed > 0);
  760. size = size_needed;
  761. addr = kalloc(size);
  762. if (addr == 0)
  763. return KERN_RESOURCE_SHORTAGE;
  764. }
  765. /* OK, have memory and the processor_set is locked & active */
  766. switch (type) {
  767. case THING_TASK: {
  768. task_t *tasks = (task_t *) addr;
  769. task_t task;
  770. for (i = 0, task = (task_t) queue_first(&pset->tasks);
  771. i < actual;
  772. i++, task = (task_t) queue_next(&task->pset_tasks)) {
  773. /* take ref for convert_task_to_port */
  774. task_reference(task);
  775. tasks[i] = task;
  776. }
  777. assert(queue_end(&pset->tasks, (queue_entry_t) task));
  778. break;
  779. }
  780. case THING_THREAD: {
  781. thread_t *threads = (thread_t *) addr;
  782. thread_t thread;
  783. for (i = 0, thread = (thread_t) queue_first(&pset->threads);
  784. i < actual;
  785. i++,
  786. thread = (thread_t) queue_next(&thread->pset_threads)) {
  787. /* take ref for convert_thread_to_port */
  788. thread_reference(thread);
  789. threads[i] = thread;
  790. }
  791. assert(queue_end(&pset->threads, (queue_entry_t) thread));
  792. break;
  793. }
  794. }
  795. /* can unlock processor set now that we have the task/thread refs */
  796. pset_unlock(pset);
  797. if (actual == 0) {
  798. /* no things, so return null pointer and deallocate memory */
  799. *thing_list = 0;
  800. *count = 0;
  801. if (size != 0)
  802. kfree(addr, size);
  803. } else {
  804. /* if we allocated too much, must copy */
  805. if (size_needed < size) {
  806. vm_offset_t newaddr;
  807. newaddr = kalloc(size_needed);
  808. if (newaddr == 0) {
  809. switch (type) {
  810. case THING_TASK: {
  811. task_t *tasks = (task_t *) addr;
  812. for (i = 0; i < actual; i++)
  813. task_deallocate(tasks[i]);
  814. break;
  815. }
  816. case THING_THREAD: {
  817. thread_t *threads = (thread_t *) addr;
  818. for (i = 0; i < actual; i++)
  819. thread_deallocate(threads[i]);
  820. break;
  821. }
  822. }
  823. kfree(addr, size);
  824. return KERN_RESOURCE_SHORTAGE;
  825. }
  826. memcpy((void *) newaddr, (void *) addr, size_needed);
  827. kfree(addr, size);
  828. addr = newaddr;
  829. }
  830. *thing_list = (mach_port_t *) addr;
  831. *count = actual;
  832. /* do the conversion that Mig should handle */
  833. switch (type) {
  834. case THING_TASK: {
  835. task_t *tasks = (task_t *) addr;
  836. for (i = 0; i < actual; i++)
  837. ((mach_port_t *) tasks)[i] =
  838. (mach_port_t)convert_task_to_port(tasks[i]);
  839. break;
  840. }
  841. case THING_THREAD: {
  842. thread_t *threads = (thread_t *) addr;
  843. for (i = 0; i < actual; i++)
  844. ((mach_port_t *) threads)[i] =
  845. (mach_port_t)convert_thread_to_port(threads[i]);
  846. break;
  847. }
  848. }
  849. }
  850. return KERN_SUCCESS;
  851. }
  852. /*
  853. * processor_set_tasks:
  854. *
  855. * List all tasks in the processor set.
  856. */
  857. kern_return_t
  858. processor_set_tasks(
  859. processor_set_t pset,
  860. task_array_t *task_list,
  861. natural_t *count)
  862. {
  863. return processor_set_things(pset, task_list, count, THING_TASK);
  864. }
  865. /*
  866. * processor_set_threads:
  867. *
  868. * List all threads in the processor set.
  869. */
  870. kern_return_t
  871. processor_set_threads(
  872. processor_set_t pset,
  873. thread_array_t *thread_list,
  874. natural_t *count)
  875. {
  876. return processor_set_things(pset, thread_list, count, THING_THREAD);
  877. }