ipc_entry.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
  4. * Copyright (c) 1993,1994 The University of Utah and
  5. * the Computer Systems Laboratory (CSL).
  6. * All rights reserved.
  7. *
  8. * Permission to use, copy, modify and distribute this software and its
  9. * documentation is hereby granted, provided that both the copyright
  10. * notice and this permission notice appear in all copies of the
  11. * software, derivative works or modified versions, and any portions
  12. * thereof, and that both notices appear in supporting documentation.
  13. *
  14. * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
  15. * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
  16. * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
  17. * THIS SOFTWARE.
  18. *
  19. * Carnegie Mellon requests users of this software to return to
  20. *
  21. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  22. * School of Computer Science
  23. * Carnegie Mellon University
  24. * Pittsburgh PA 15213-3890
  25. *
  26. * any improvements or extensions that they make and grant Carnegie Mellon
  27. * the rights to redistribute these changes.
  28. */
  29. /*
  30. * File: ipc/ipc_entry.c
  31. * Author: Rich Draves
  32. * Date: 1989
  33. *
  34. * Primitive functions to manipulate translation entries.
  35. */
  36. #pragma GCC diagnostic error "-Wundef"
  37. #include <glue/gnulinux.h>
  38. #include <kern/printf.h>
  39. #include <string.h>
  40. #include <mach/kern_return.h>
  41. #include <mach/port.h>
  42. #include <kern/assert.h>
  43. #include <kern/sched_prim.h>
  44. #include <kern/slab.h>
  45. #include <ipc/port.h>
  46. #include <ipc/ipc_types.h>
  47. #include <ipc/ipc_entry.h>
  48. #include <ipc/ipc_space.h>
  49. #include <ipc/ipc_splay.h>
  50. #include <ipc/ipc_hash.h>
  51. #include <ipc/ipc_table.h>
  52. #include <ipc/ipc_object.h>
  53. struct gnu_kmem_cache ipc_tree_entry_cache;
  54. /*
  55. * Routine: ipc_entry_tree_collision
  56. * Purpose:
  57. * Checks if "name" collides with an allocated name
  58. * in the space's tree. That is, returns TRUE
  59. * if the splay tree contains a name with the same
  60. * index as "name".
  61. * Conditions:
  62. * The space is locked (read or write) and active.
  63. */
  64. boolean_t
  65. ipc_entry_tree_collision(
  66. ipc_space_t space,
  67. mach_port_t name)
  68. {
  69. mach_port_index_t index;
  70. mach_port_t lower, upper;
  71. assert(space->is_active);
  72. /*
  73. * Check if we collide with the next smaller name
  74. * or the next larger name.
  75. */
  76. ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
  77. index = MACH_PORT_INDEX(name);
  78. return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) ||
  79. ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
  80. }
  81. /*
  82. * Routine: ipc_entry_lookup
  83. * Purpose:
  84. * Searches for an entry, given its name.
  85. * Conditions:
  86. * The space must be read or write locked throughout.
  87. * The space must be active.
  88. */
  89. ipc_entry_t
  90. ipc_entry_lookup(space, name)
  91. ipc_space_t space;
  92. mach_port_t name;
  93. {
  94. mach_port_index_t index;
  95. ipc_entry_t entry;
  96. assert(space->is_active);
  97. index = MACH_PORT_INDEX(name);
  98. if (index < space->is_table_size) {
  99. entry = &space->is_table[index];
  100. if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
  101. if (entry->ie_bits & IE_BITS_COLLISION) {
  102. assert(space->is_tree_total > 0);
  103. goto tree_lookup;
  104. } else
  105. entry = IE_NULL;
  106. else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
  107. entry = IE_NULL;
  108. } else if (space->is_tree_total == 0)
  109. entry = IE_NULL;
  110. else
  111. tree_lookup:
  112. entry = (ipc_entry_t)
  113. ipc_splay_tree_lookup(&space->is_tree, name);
  114. assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
  115. return entry;
  116. }
  117. /*
  118. * Routine: ipc_entry_get
  119. * Purpose:
  120. * Tries to allocate an entry out of the space.
  121. * Conditions:
  122. * The space is write-locked and active throughout.
  123. * An object may be locked. Will not allocate memory.
  124. * Returns:
  125. * KERN_SUCCESS A free entry was found.
  126. * KERN_NO_SPACE No entry allocated.
  127. */
  128. kern_return_t
  129. ipc_entry_get(space, namep, entryp)
  130. ipc_space_t space;
  131. mach_port_t *namep;
  132. ipc_entry_t *entryp;
  133. {
  134. ipc_entry_t table;
  135. mach_port_index_t first_free;
  136. mach_port_t new_name;
  137. ipc_entry_t free_entry;
  138. assert(space->is_active);
  139. table = space->is_table;
  140. first_free = table->ie_next;
  141. if (first_free == 0)
  142. return KERN_NO_SPACE;
  143. free_entry = &table[first_free];
  144. table->ie_next = free_entry->ie_next;
  145. /*
  146. * Initialize the new entry. We need only
  147. * increment the generation number and clear ie_request.
  148. */
  149. {
  150. mach_port_gen_t gen;
  151. assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
  152. gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
  153. free_entry->ie_bits = gen;
  154. free_entry->ie_request = 0;
  155. new_name = MACH_PORT_MAKE(first_free, gen);
  156. }
  157. /*
  158. * The new name can't be MACH_PORT_NULL because index
  159. * is non-zero. It can't be MACH_PORT_DEAD because
  160. * the table isn't allowed to grow big enough.
  161. * (See comment in ipc/ipc_table.h.)
  162. */
  163. assert(MACH_PORT_VALID(new_name));
  164. assert(free_entry->ie_object == IO_NULL);
  165. *namep = new_name;
  166. *entryp = free_entry;
  167. return KERN_SUCCESS;
  168. }
  169. /*
  170. * Routine: ipc_entry_alloc
  171. * Purpose:
  172. * Allocate an entry out of the space.
  173. * Conditions:
  174. * The space is not locked before, but it is write-locked after
  175. * if the call is successful. May allocate memory.
  176. * Returns:
  177. * KERN_SUCCESS An entry was allocated.
  178. * KERN_INVALID_TASK The space is dead.
  179. * KERN_NO_SPACE No room for an entry in the space.
  180. * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
  181. */
  182. kern_return_t
  183. ipc_entry_alloc(
  184. ipc_space_t space,
  185. mach_port_t *namep,
  186. ipc_entry_t *entryp)
  187. {
  188. kern_return_t kr;
  189. is_write_lock(space);
  190. for (;;) {
  191. if (!space->is_active) {
  192. is_write_unlock(space);
  193. return KERN_INVALID_TASK;
  194. }
  195. kr = ipc_entry_get(space, namep, entryp);
  196. if (kr == KERN_SUCCESS)
  197. return kr;
  198. kr = ipc_entry_grow_table(space);
  199. if (kr != KERN_SUCCESS)
  200. return kr; /* space is unlocked */
  201. }
  202. }
  203. /*
  204. * Routine: ipc_entry_alloc_name
  205. * Purpose:
  206. * Allocates/finds an entry with a specific name.
  207. * If an existing entry is returned, its type will be nonzero.
  208. * Conditions:
  209. * The space is not locked before, but it is write-locked after
  210. * if the call is successful. May allocate memory.
  211. * Returns:
  212. * KERN_SUCCESS Found existing entry with same name.
  213. * KERN_SUCCESS Allocated a new entry.
  214. * KERN_INVALID_TASK The space is dead.
  215. * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
  216. */
  217. kern_return_t
  218. ipc_entry_alloc_name(
  219. ipc_space_t space,
  220. mach_port_t name,
  221. ipc_entry_t *entryp)
  222. {
  223. mach_port_index_t index = MACH_PORT_INDEX(name);
  224. mach_port_gen_t gen = MACH_PORT_GEN(name);
  225. ipc_tree_entry_t tree_entry = ITE_NULL;
  226. assert(MACH_PORT_VALID(name));
  227. is_write_lock(space);
  228. for (;;) {
  229. ipc_entry_t entry;
  230. ipc_tree_entry_t tentry;
  231. ipc_table_size_t its;
  232. if (!space->is_active) {
  233. is_write_unlock(space);
  234. if (tree_entry) ite_free(tree_entry);
  235. return KERN_INVALID_TASK;
  236. }
  237. /*
  238. * If we are under the table cutoff,
  239. * there are three cases:
  240. * 1) The entry is inuse, for the same name
  241. * 2) The entry is inuse, for a different name
  242. * 3) The entry is free
  243. */
  244. if ((0 < index) && (index < space->is_table_size)) {
  245. ipc_entry_t table = space->is_table;
  246. entry = &table[index];
  247. if (IE_BITS_TYPE(entry->ie_bits)) {
  248. if (IE_BITS_GEN(entry->ie_bits) == gen) {
  249. *entryp = entry;
  250. if (tree_entry) ite_free(tree_entry);
  251. return KERN_SUCCESS;
  252. }
  253. } else {
  254. mach_port_index_t free_index, next_index;
  255. /*
  256. * Rip the entry out of the free list.
  257. */
  258. for (free_index = 0;
  259. (next_index = table[free_index].ie_next)
  260. != index;
  261. free_index = next_index)
  262. continue;
  263. table[free_index].ie_next =
  264. table[next_index].ie_next;
  265. entry->ie_bits = gen;
  266. assert(entry->ie_object == IO_NULL);
  267. entry->ie_request = 0;
  268. *entryp = entry;
  269. if (tree_entry) ite_free(tree_entry);
  270. return KERN_SUCCESS;
  271. }
  272. }
  273. /*
  274. * Before trying to allocate any memory,
  275. * check if the entry already exists in the tree.
  276. * This avoids spurious resource errors.
  277. * The splay tree makes a subsequent lookup/insert
  278. * of the same name cheap, so this costs little.
  279. */
  280. if ((space->is_tree_total > 0) &&
  281. ((tentry = ipc_splay_tree_lookup(&space->is_tree, name))
  282. != ITE_NULL)) {
  283. assert(tentry->ite_space == space);
  284. assert(IE_BITS_TYPE(tentry->ite_bits));
  285. *entryp = &tentry->ite_entry;
  286. if (tree_entry) ite_free(tree_entry);
  287. return KERN_SUCCESS;
  288. }
  289. its = space->is_table_next;
  290. /*
  291. * Check if the table should be grown.
  292. *
  293. * Note that if space->is_table_size == its->its_size,
  294. * then we won't ever try to grow the table.
  295. *
  296. * Note that we are optimistically assuming that name
  297. * doesn't collide with any existing names. (So if
  298. * it were entered into the tree, is_tree_small would
  299. * be incremented.) This is OK, because even in that
  300. * case, we don't lose memory by growing the table.
  301. */
  302. if ((space->is_table_size <= index) &&
  303. (index < its->its_size) &&
  304. (((its->its_size - space->is_table_size) *
  305. sizeof(struct ipc_entry)) <
  306. ((space->is_tree_small + 1) *
  307. sizeof(struct ipc_tree_entry)))) {
  308. kern_return_t kr;
  309. /*
  310. * Can save space by growing the table.
  311. * Because the space will be unlocked,
  312. * we must restart.
  313. */
  314. kr = ipc_entry_grow_table(space);
  315. assert(kr != KERN_NO_SPACE);
  316. if (kr != KERN_SUCCESS) {
  317. /* space is unlocked */
  318. if (tree_entry) ite_free(tree_entry);
  319. return kr;
  320. }
  321. continue;
  322. }
  323. /*
  324. * If a splay-tree entry was allocated previously,
  325. * go ahead and insert it into the tree.
  326. */
  327. if (tree_entry != ITE_NULL) {
  328. space->is_tree_total++;
  329. if (index < space->is_table_size)
  330. space->is_table[index].ie_bits |=
  331. IE_BITS_COLLISION;
  332. else if ((index < its->its_size) &&
  333. !ipc_entry_tree_collision(space, name))
  334. space->is_tree_small++;
  335. ipc_splay_tree_insert(&space->is_tree,
  336. name, tree_entry);
  337. tree_entry->ite_bits = 0;
  338. tree_entry->ite_object = IO_NULL;
  339. tree_entry->ite_request = 0;
  340. tree_entry->ite_space = space;
  341. *entryp = &tree_entry->ite_entry;
  342. return KERN_SUCCESS;
  343. }
  344. /*
  345. * Allocate a tree entry and try again.
  346. */
  347. is_write_unlock(space);
  348. tree_entry = ite_alloc();
  349. if (tree_entry == ITE_NULL)
  350. return KERN_RESOURCE_SHORTAGE;
  351. is_write_lock(space);
  352. }
  353. }
  354. /*
  355. * Routine: ipc_entry_dealloc
  356. * Purpose:
  357. * Deallocates an entry from a space.
  358. * Conditions:
  359. * The space must be write-locked throughout.
  360. * The space must be active.
  361. */
  362. void
  363. ipc_entry_dealloc(
  364. ipc_space_t space,
  365. mach_port_t name,
  366. ipc_entry_t entry)
  367. {
  368. ipc_entry_t table;
  369. ipc_entry_num_t size;
  370. mach_port_index_t index;
  371. assert(space->is_active);
  372. assert(entry->ie_object == IO_NULL);
  373. assert(entry->ie_request == 0);
  374. index = MACH_PORT_INDEX(name);
  375. table = space->is_table;
  376. size = space->is_table_size;
  377. if ((index < size) && (entry == &table[index])) {
  378. assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
  379. if (entry->ie_bits & IE_BITS_COLLISION) {
  380. struct ipc_splay_tree small, collisions;
  381. ipc_tree_entry_t tentry;
  382. mach_port_t tname;
  383. boolean_t pick;
  384. ipc_entry_bits_t bits;
  385. ipc_object_t obj;
  386. /* must move an entry from tree to table */
  387. ipc_splay_tree_split(&space->is_tree,
  388. MACH_PORT_MAKE(index+1, 0),
  389. &collisions);
  390. ipc_splay_tree_split(&collisions,
  391. MACH_PORT_MAKE(index, 0),
  392. &small);
  393. pick = ipc_splay_tree_pick(&collisions,
  394. &tname, &tentry);
  395. assert(pick);
  396. assert(MACH_PORT_INDEX(tname) == index);
  397. bits = tentry->ite_bits;
  398. entry->ie_bits = bits | MACH_PORT_GEN(tname);
  399. entry->ie_object = obj = tentry->ite_object;
  400. entry->ie_request = tentry->ite_request;
  401. assert(tentry->ite_space == space);
  402. if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND) {
  403. ipc_hash_global_delete(space, obj,
  404. tname, tentry);
  405. ipc_hash_local_insert(space, obj,
  406. index, entry);
  407. }
  408. ipc_splay_tree_delete(&collisions, tname, tentry);
  409. assert(space->is_tree_total > 0);
  410. space->is_tree_total--;
  411. /* check if collision bit should still be on */
  412. pick = ipc_splay_tree_pick(&collisions,
  413. &tname, &tentry);
  414. if (pick) {
  415. entry->ie_bits |= IE_BITS_COLLISION;
  416. ipc_splay_tree_join(&space->is_tree,
  417. &collisions);
  418. }
  419. ipc_splay_tree_join(&space->is_tree, &small);
  420. } else {
  421. entry->ie_bits &= IE_BITS_GEN_MASK;
  422. entry->ie_next = table->ie_next;
  423. table->ie_next = index;
  424. }
  425. } else {
  426. ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
  427. assert(tentry->ite_space == space);
  428. ipc_splay_tree_delete(&space->is_tree, name, tentry);
  429. assert(space->is_tree_total > 0);
  430. space->is_tree_total--;
  431. if (index < size) {
  432. ipc_entry_t ientry = &table[index];
  433. assert(ientry->ie_bits & IE_BITS_COLLISION);
  434. if (!ipc_entry_tree_collision(space, name))
  435. ientry->ie_bits &= ~IE_BITS_COLLISION;
  436. } else if ((index < space->is_table_next->its_size) &&
  437. !ipc_entry_tree_collision(space, name)) {
  438. assert(space->is_tree_small > 0);
  439. space->is_tree_small--;
  440. }
  441. }
  442. }
  443. /*
  444. * Routine: ipc_entry_grow_table
  445. * Purpose:
  446. * Grows the table in a space.
  447. * Conditions:
  448. * The space must be write-locked and active before.
  449. * If successful, it is also returned locked.
  450. * Allocates memory.
  451. * Returns:
  452. * KERN_SUCCESS Grew the table.
  453. * KERN_SUCCESS Somebody else grew the table.
  454. * KERN_SUCCESS The space died.
  455. * KERN_NO_SPACE Table has maximum size already.
  456. * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
  457. */
  458. kern_return_t
  459. ipc_entry_grow_table(space)
  460. ipc_space_t space;
  461. {
  462. ipc_entry_num_t osize, size, nsize;
  463. do {
  464. ipc_entry_t otable, table;
  465. ipc_table_size_t oits, its, nits;
  466. mach_port_index_t i, free_index;
  467. assert(space->is_active);
  468. if (space->is_growing) {
  469. /*
  470. * Somebody else is growing the table.
  471. * We just wait for them to finish.
  472. */
  473. assert_wait((event_t) space, FALSE);
  474. is_write_unlock(space);
  475. thread_block((void (*)()) 0);
  476. is_write_lock(space);
  477. return KERN_SUCCESS;
  478. }
  479. otable = space->is_table;
  480. its = space->is_table_next; //TODO make growable
  481. size = its->its_size;
  482. oits = its - 1;
  483. osize = oits->its_size;
  484. nits = its + 1;
  485. nsize = nits->its_size;
  486. if (osize == size) {
  487. is_write_unlock(space);
  488. printf_once("no more room for ipc_entry_grow_table in space %p\n", space);
  489. return KERN_NO_SPACE;
  490. }
  491. assert((osize < size) && (size <= nsize));
  492. /*
  493. * OK, we'll attempt to grow the table.
  494. * The realloc requires that the old table
  495. * remain in existence.
  496. */
  497. space->is_growing = TRUE;
  498. is_write_unlock(space);
  499. #ifdef BADAPPLE
  500. if (it_entries_reallocable(oits))
  501. table = it_entries_realloc(oits, otable, its);
  502. else
  503. #endif
  504. table = it_entries_alloc(its);
  505. is_write_lock(space);
  506. space->is_growing = FALSE;
  507. /*
  508. * We need to do a wakeup on the space,
  509. * to rouse waiting threads. We defer
  510. * this until the space is unlocked,
  511. * because we don't want them to spin.
  512. */
  513. if (table == IE_NULL) {
  514. is_write_unlock(space);
  515. thread_wakeup((event_t) space);
  516. return KERN_RESOURCE_SHORTAGE;
  517. }
  518. if (!space->is_active) {
  519. /*
  520. * The space died while it was unlocked.
  521. */
  522. is_write_unlock(space);
  523. thread_wakeup((event_t) space);
  524. it_entries_free(its, table);
  525. is_write_lock(space);
  526. return KERN_SUCCESS;
  527. }
  528. assert(space->is_table == otable);
  529. assert(space->is_table_next == its);
  530. assert(space->is_table_size == osize);
  531. space->is_table = table;
  532. space->is_table_size = size;
  533. space->is_table_next = nits;
  534. /*
  535. * If we did a realloc, it remapped the data.
  536. * Otherwise we copy by hand first. Then we have
  537. * to clear the index fields in the old part and
  538. * zero the new part.
  539. */
  540. if (!it_entries_reallocable(oits))
  541. memcpy(table, otable,
  542. osize * sizeof(struct ipc_entry));
  543. for (i = 0; i < osize; i++)
  544. table[i].ie_index = 0;
  545. (void) memset((void *) (table + osize), 0,
  546. (size - osize) * sizeof(struct ipc_entry));
  547. /*
  548. * Put old entries into the reverse hash table.
  549. */
  550. for (i = 0; i < osize; i++) {
  551. ipc_entry_t entry = &table[i];
  552. if (IE_BITS_TYPE(entry->ie_bits) ==
  553. MACH_PORT_TYPE_SEND)
  554. ipc_hash_local_insert(space, entry->ie_object,
  555. i, entry);
  556. }
  557. /*
  558. * If there are entries in the splay tree,
  559. * then we have work to do:
  560. * 1) transfer entries to the table
  561. * 2) update is_tree_small
  562. */
  563. if (space->is_tree_total > 0) {
  564. mach_port_index_t index;
  565. boolean_t delete;
  566. struct ipc_splay_tree ignore;
  567. struct ipc_splay_tree move;
  568. struct ipc_splay_tree small;
  569. ipc_entry_num_t nosmall;
  570. ipc_tree_entry_t tentry;
  571. /*
  572. * The splay tree divides into four regions,
  573. * based on the index of the entries:
  574. * 1) 0 <= index < osize
  575. * 2) osize <= index < size
  576. * 3) size <= index < nsize
  577. * 4) nsize <= index
  578. *
  579. * Entries in the first part are ignored.
  580. * Entries in the second part, that don't
  581. * collide, are moved into the table.
  582. * Entries in the third part, that don't
  583. * collide, are counted for is_tree_small.
  584. * Entries in the fourth part are ignored.
  585. */
  586. ipc_splay_tree_split(&space->is_tree,
  587. MACH_PORT_MAKE(nsize, 0),
  588. &small);
  589. ipc_splay_tree_split(&small,
  590. MACH_PORT_MAKE(size, 0),
  591. &move);
  592. ipc_splay_tree_split(&move,
  593. MACH_PORT_MAKE(osize, 0),
  594. &ignore);
  595. /* move entries into the table */
  596. for (tentry = ipc_splay_traverse_start(&move);
  597. tentry != ITE_NULL;
  598. tentry = ipc_splay_traverse_next(&move, delete)) {
  599. mach_port_t name;
  600. mach_port_gen_t gen;
  601. mach_port_type_t type;
  602. ipc_entry_bits_t bits;
  603. ipc_object_t obj;
  604. ipc_entry_t entry;
  605. name = tentry->ite_name;
  606. gen = MACH_PORT_GEN(name);
  607. index = MACH_PORT_INDEX(name);
  608. assert(tentry->ite_space == space);
  609. assert((osize <= index) && (index < size));
  610. entry = &table[index];
  611. /* collision with previously moved entry? */
  612. bits = entry->ie_bits;
  613. if (bits != 0) {
  614. assert(IE_BITS_TYPE(bits));
  615. assert(IE_BITS_GEN(bits) != gen);
  616. entry->ie_bits =
  617. bits | IE_BITS_COLLISION;
  618. delete = FALSE;
  619. continue;
  620. }
  621. bits = tentry->ite_bits;
  622. type = IE_BITS_TYPE(bits);
  623. assert(type != MACH_PORT_TYPE_NONE);
  624. entry->ie_bits = bits | gen;
  625. entry->ie_object = obj = tentry->ite_object;
  626. entry->ie_request = tentry->ite_request;
  627. if (type == MACH_PORT_TYPE_SEND) {
  628. ipc_hash_global_delete(space, obj,
  629. name, tentry);
  630. ipc_hash_local_insert(space, obj,
  631. index, entry);
  632. }
  633. space->is_tree_total--;
  634. delete = TRUE;
  635. }
  636. ipc_splay_traverse_finish(&move);
  637. /* count entries for is_tree_small */
  638. nosmall = 0; index = 0;
  639. for (tentry = ipc_splay_traverse_start(&small);
  640. tentry != ITE_NULL;
  641. tentry = ipc_splay_traverse_next(&small, FALSE)) {
  642. mach_port_index_t nindex;
  643. nindex = MACH_PORT_INDEX(tentry->ite_name);
  644. if (nindex != index) {
  645. nosmall++;
  646. index = nindex;
  647. }
  648. }
  649. ipc_splay_traverse_finish(&small);
  650. assert(nosmall <= (nsize - size));
  651. assert(nosmall <= space->is_tree_total);
  652. space->is_tree_small = nosmall;
  653. /* put the splay tree back together */
  654. ipc_splay_tree_join(&space->is_tree, &small);
  655. ipc_splay_tree_join(&space->is_tree, &move);
  656. ipc_splay_tree_join(&space->is_tree, &ignore);
  657. }
  658. /*
  659. * Add entries in the new part which still aren't used
  660. * to the free list. Add them in reverse order,
  661. * and set the generation number to -1, so that
  662. * early allocations produce "natural" names.
  663. */
  664. free_index = table[0].ie_next;
  665. for (i = size-1; i >= osize; --i) {
  666. ipc_entry_t entry = &table[i];
  667. if (entry->ie_bits == 0) {
  668. entry->ie_bits = IE_BITS_GEN_MASK;
  669. entry->ie_next = free_index;
  670. free_index = i;
  671. }
  672. }
  673. table[0].ie_next = free_index;
  674. /*
  675. * Now we need to free the old table.
  676. * If the space dies or grows while unlocked,
  677. * then we can quit here.
  678. */
  679. is_write_unlock(space);
  680. thread_wakeup((event_t) space);
  681. it_entries_free(oits, otable);
  682. is_write_lock(space);
  683. if (!space->is_active || (space->is_table_next != nits))
  684. return KERN_SUCCESS;
  685. /*
  686. * We might have moved enough entries from
  687. * the splay tree into the table that
  688. * the table can be profitably grown again.
  689. *
  690. * Note that if size == nsize, then
  691. * space->is_tree_small == 0.
  692. */
  693. } while ((space->is_tree_small > 0) &&
  694. (((nsize - size) * sizeof(struct ipc_entry)) <
  695. (space->is_tree_small * sizeof(struct ipc_tree_entry))));
  696. return KERN_SUCCESS;
  697. }
  698. #if MACH_KDB
  699. #include <ddb/db_output.h>
  700. #include <kern/task.h>
  701. #define printf kdbprintf
  702. ipc_entry_t db_ipc_object_by_name(
  703. task_t task,
  704. mach_port_t name);
  705. ipc_entry_t
  706. db_ipc_object_by_name(
  707. task_t task,
  708. mach_port_t name)
  709. {
  710. ipc_space_t space = task->itk_space;
  711. ipc_entry_t entry;
  712. entry = ipc_entry_lookup(space, name);
  713. if(entry != IE_NULL) {
  714. iprintf("(task 0x%x, name 0x%x) ==> object 0x%x",
  715. entry->ie_object);
  716. return (ipc_entry_t) entry->ie_object;
  717. }
  718. return entry;
  719. }
  720. #endif /* MACH_KDB */