vm_kern.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
  4. * Copyright (c) 1993,1994 The University of Utah and
  5. * the Computer Systems Laboratory (CSL).
  6. * All rights reserved.
  7. *
  8. * Permission to use, copy, modify and distribute this software and its
  9. * documentation is hereby granted, provided that both the copyright
  10. * notice and this permission notice appear in all copies of the
  11. * software, derivative works or modified versions, and any portions
  12. * thereof, and that both notices appear in supporting documentation.
  13. *
  14. * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
  15. * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
  16. * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
  17. * THIS SOFTWARE.
  18. *
  19. * Carnegie Mellon requests users of this software to return to
  20. *
  21. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  22. * School of Computer Science
  23. * Carnegie Mellon University
  24. * Pittsburgh PA 15213-3890
  25. *
  26. * any improvements or extensions that they make and grant Carnegie Mellon
  27. * the rights to redistribute these changes.
  28. */
  29. /*
  30. * File: vm/vm_kern.c
  31. * Author: Avadis Tevanian, Jr., Michael Wayne Young
  32. * Date: 1985
  33. *
  34. * Kernel memory management.
  35. */
  36. #include <string.h>
  37. #include <mach/kern_return.h>
  38. #include <machine/locore.h>
  39. #include <machine/vm_param.h>
  40. #include <kern/assert.h>
  41. #include <kern/debug.h>
  42. #include <kern/lock.h>
  43. #include <kern/slab.h>
  44. #include <kern/thread.h>
  45. #include <kern/printf.h>
  46. #include <vm/pmap.h>
  47. #include <vm/vm_fault.h>
  48. #include <vm/vm_kern.h>
  49. #include <vm/vm_map.h>
  50. #include <vm/vm_object.h>
  51. #include <vm/vm_page.h>
  52. #include <vm/vm_pageout.h>
  53. /*
  54. * Variables exported by this module.
  55. */
  56. static struct vm_map kernel_map_store;
  57. vm_map_t kernel_map = &kernel_map_store;
  58. vm_map_t kernel_pageable_map;
  59. /*
  60. * projected_buffer_allocate
  61. *
  62. * Allocate a wired-down buffer shared between kernel and user task.
  63. * Fresh, zero-filled memory is allocated.
  64. * If persistence is false, this buffer can only be deallocated from
  65. * user task using projected_buffer_deallocate, and deallocation
  66. * from user task also deallocates the buffer from the kernel map.
  67. * projected_buffer_collect is called from vm_map_deallocate to
  68. * automatically deallocate projected buffers on task_deallocate.
  69. * Sharing with more than one user task is achieved by using
  70. * projected_buffer_map for the second and subsequent tasks.
  71. * The user is precluded from manipulating the VM entry of this buffer
  72. * (i.e. changing protection, inheritance or machine attributes).
  73. */
  74. kern_return_t
  75. projected_buffer_allocate(
  76. vm_map_t map,
  77. vm_size_t size,
  78. int persistence,
  79. vm_offset_t *kernel_p,
  80. vm_offset_t *user_p,
  81. vm_prot_t protection,
  82. vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
  83. {
  84. vm_object_t object;
  85. vm_map_entry_t u_entry, k_entry;
  86. vm_offset_t addr;
  87. vm_size_t r_size;
  88. kern_return_t kr;
  89. if (map == VM_MAP_NULL || map == kernel_map)
  90. return(KERN_INVALID_ARGUMENT);
  91. /*
  92. * Allocate a new object.
  93. */
  94. size = round_page(size);
  95. object = vm_object_allocate(size);
  96. vm_map_lock(kernel_map);
  97. kr = vm_map_find_entry(kernel_map, &addr, size, (vm_offset_t) 0,
  98. VM_OBJECT_NULL, &k_entry);
  99. if (kr != KERN_SUCCESS) {
  100. vm_map_unlock(kernel_map);
  101. vm_object_deallocate(object);
  102. return kr;
  103. }
  104. k_entry->object.vm_object = object;
  105. if (!persistence)
  106. k_entry->projected_on = (vm_map_entry_t) -1;
  107. /*Mark entry so as to automatically deallocate it when
  108. last corresponding user entry is deallocated*/
  109. vm_map_unlock(kernel_map);
  110. *kernel_p = addr;
  111. vm_map_lock(map);
  112. kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
  113. VM_OBJECT_NULL, &u_entry);
  114. if (kr != KERN_SUCCESS) {
  115. vm_map_unlock(map);
  116. vm_map_lock(kernel_map);
  117. vm_map_entry_delete(kernel_map, k_entry);
  118. vm_map_unlock(kernel_map);
  119. vm_object_deallocate(object);
  120. return kr;
  121. }
  122. u_entry->object.vm_object = object;
  123. vm_object_reference(object);
  124. u_entry->projected_on = k_entry;
  125. /*Creates coupling with kernel mapping of the buffer, and
  126. also guarantees that user cannot directly manipulate
  127. buffer VM entry*/
  128. u_entry->protection = protection;
  129. u_entry->max_protection = protection;
  130. u_entry->inheritance = inheritance;
  131. vm_map_unlock(map);
  132. *user_p = addr;
  133. /*
  134. * Allocate wired-down memory in the object,
  135. * and enter it in the kernel pmap.
  136. */
  137. kmem_alloc_pages(object, 0,
  138. *kernel_p, *kernel_p + size,
  139. VM_PROT_READ | VM_PROT_WRITE);
  140. memset((void*) *kernel_p, 0, size); /*Zero fill*/
  141. /* Set up physical mappings for user pmap */
  142. pmap_pageable(map->pmap, *user_p, *user_p + size, FALSE);
  143. for (r_size = 0; r_size < size; r_size += PAGE_SIZE) {
  144. addr = pmap_extract(kernel_pmap, *kernel_p + r_size);
  145. pmap_enter(map->pmap, *user_p + r_size, addr,
  146. protection, TRUE);
  147. }
  148. return(KERN_SUCCESS);
  149. }
  150. /*
  151. * projected_buffer_map
  152. *
  153. * Map an area of kernel memory onto a task's address space.
  154. * No new memory is allocated; the area must previously exist in the
  155. * kernel memory map.
  156. */
  157. kern_return_t
  158. projected_buffer_map(
  159. vm_map_t map,
  160. vm_offset_t kernel_addr,
  161. vm_size_t size,
  162. vm_offset_t *user_p,
  163. vm_prot_t protection,
  164. vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
  165. {
  166. vm_map_entry_t u_entry, k_entry;
  167. vm_offset_t physical_addr, user_addr;
  168. vm_size_t r_size;
  169. kern_return_t kr;
  170. /*
  171. * Find entry in kernel map
  172. */
  173. size = round_page(size);
  174. if (map == VM_MAP_NULL || map == kernel_map ||
  175. !vm_map_lookup_entry(kernel_map, kernel_addr, &k_entry) ||
  176. kernel_addr + size > k_entry->vme_end)
  177. return(KERN_INVALID_ARGUMENT);
  178. /*
  179. * Create entry in user task
  180. */
  181. vm_map_lock(map);
  182. kr = vm_map_find_entry(map, &user_addr, size, (vm_offset_t) 0,
  183. VM_OBJECT_NULL, &u_entry);
  184. if (kr != KERN_SUCCESS) {
  185. vm_map_unlock(map);
  186. return kr;
  187. }
  188. u_entry->object.vm_object = k_entry->object.vm_object;
  189. vm_object_reference(k_entry->object.vm_object);
  190. u_entry->offset = kernel_addr - k_entry->vme_start + k_entry->offset;
  191. u_entry->projected_on = k_entry;
  192. /*Creates coupling with kernel mapping of the buffer, and
  193. also guarantees that user cannot directly manipulate
  194. buffer VM entry*/
  195. u_entry->protection = protection;
  196. u_entry->max_protection = protection;
  197. u_entry->inheritance = inheritance;
  198. u_entry->wired_count = k_entry->wired_count;
  199. vm_map_unlock(map);
  200. *user_p = user_addr;
  201. /* Set up physical mappings for user pmap */
  202. pmap_pageable(map->pmap, user_addr, user_addr + size,
  203. !k_entry->wired_count);
  204. for (r_size = 0; r_size < size; r_size += PAGE_SIZE) {
  205. physical_addr = pmap_extract(kernel_pmap, kernel_addr + r_size);
  206. pmap_enter(map->pmap, user_addr + r_size, physical_addr,
  207. protection, k_entry->wired_count);
  208. }
  209. return(KERN_SUCCESS);
  210. }
  211. /*
  212. * projected_buffer_deallocate
  213. *
  214. * Unmap projected buffer from task's address space.
  215. * May also unmap buffer from kernel map, if buffer is not
  216. * persistent and only the kernel reference remains.
  217. */
  218. kern_return_t
  219. projected_buffer_deallocate(
  220. vm_map_t map,
  221. vm_offset_t start,
  222. vm_offset_t end)
  223. {
  224. vm_map_entry_t entry, k_entry;
  225. if (map == VM_MAP_NULL || map == kernel_map)
  226. return KERN_INVALID_ARGUMENT;
  227. vm_map_lock(map);
  228. if (!vm_map_lookup_entry(map, start, &entry) ||
  229. end > entry->vme_end ||
  230. /*Check corresponding kernel entry*/
  231. (k_entry = entry->projected_on) == 0) {
  232. vm_map_unlock(map);
  233. return(KERN_INVALID_ARGUMENT);
  234. }
  235. /*Prepare for deallocation*/
  236. if (entry->vme_start < start)
  237. _vm_map_clip_start(&map->hdr, entry, start);
  238. if (entry->vme_end > end)
  239. _vm_map_clip_end(&map->hdr, entry, end);
  240. if (map->first_free == entry) /*Adjust first_free hint*/
  241. map->first_free = entry->vme_prev;
  242. entry->projected_on = 0; /*Needed to allow deletion*/
  243. entry->wired_count = 0; /*Avoid unwire fault*/
  244. vm_map_entry_delete(map, entry);
  245. vm_map_unlock(map);
  246. /*Check if the buffer is not persistent and only the
  247. kernel mapping remains, and if so delete it*/
  248. vm_map_lock(kernel_map);
  249. if (k_entry->projected_on == (vm_map_entry_t) -1 &&
  250. k_entry->object.vm_object->ref_count == 1) {
  251. if (kernel_map->first_free == k_entry)
  252. kernel_map->first_free = k_entry->vme_prev;
  253. k_entry->projected_on = 0; /*Allow unwire fault*/
  254. vm_map_entry_delete(kernel_map, k_entry);
  255. }
  256. vm_map_unlock(kernel_map);
  257. return(KERN_SUCCESS);
  258. }
  259. /*
  260. * projected_buffer_collect
  261. *
  262. * Unmap all projected buffers from task's address space.
  263. */
  264. kern_return_t
  265. projected_buffer_collect(vm_map_t map)
  266. {
  267. vm_map_entry_t entry, next;
  268. if (map == VM_MAP_NULL || map == kernel_map)
  269. return(KERN_INVALID_ARGUMENT);
  270. for (entry = vm_map_first_entry(map);
  271. entry != vm_map_to_entry(map);
  272. entry = next) {
  273. next = entry->vme_next;
  274. if (entry->projected_on != 0)
  275. projected_buffer_deallocate(map, entry->vme_start, entry->vme_end);
  276. }
  277. return(KERN_SUCCESS);
  278. }
  279. /*
  280. * projected_buffer_in_range
  281. *
  282. * Verifies whether a projected buffer exists in the address range
  283. * given.
  284. */
  285. boolean_t
  286. projected_buffer_in_range(
  287. vm_map_t map,
  288. vm_offset_t start,
  289. vm_offset_t end)
  290. {
  291. vm_map_entry_t entry;
  292. if (map == VM_MAP_NULL || map == kernel_map)
  293. return(FALSE);
  294. /*Find first entry*/
  295. if (!vm_map_lookup_entry(map, start, &entry))
  296. entry = entry->vme_next;
  297. while (entry != vm_map_to_entry(map) && entry->projected_on == 0 &&
  298. entry->vme_start <= end) {
  299. entry = entry->vme_next;
  300. }
  301. return(entry != vm_map_to_entry(map) && entry->vme_start <= end);
  302. }
  303. /*
  304. * kmem_alloc:
  305. *
  306. * Allocate wired-down memory in the kernel's address map
  307. * or a submap. The memory is not zero-filled.
  308. */
  309. kern_return_t
  310. kmem_alloc(
  311. vm_map_t map,
  312. vm_offset_t *addrp,
  313. vm_size_t size)
  314. {
  315. vm_object_t object;
  316. vm_map_entry_t entry;
  317. vm_offset_t addr;
  318. unsigned int attempts;
  319. kern_return_t kr;
  320. /*
  321. * Allocate a new object. We must do this before locking
  322. * the map, lest we risk deadlock with the default pager:
  323. * device_read_alloc uses kmem_alloc,
  324. * which tries to allocate an object,
  325. * which uses kmem_alloc_wired to get memory,
  326. * which blocks for pages.
  327. * then the default pager needs to read a block
  328. * to process a memory_object_data_write,
  329. * and device_read_alloc calls kmem_alloc
  330. * and deadlocks on the map lock.
  331. */
  332. size = round_page(size);
  333. object = vm_object_allocate(size);
  334. attempts = 0;
  335. retry:
  336. vm_map_lock(map);
  337. kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
  338. VM_OBJECT_NULL, &entry);
  339. if (kr != KERN_SUCCESS) {
  340. vm_map_unlock(map);
  341. if (attempts == 0) {
  342. attempts++;
  343. slab_collect();
  344. goto retry;
  345. }
  346. printf_once("no more room for kmem_alloc in %p (%s)\n",
  347. map, map->name);
  348. vm_object_deallocate(object);
  349. return kr;
  350. }
  351. entry->object.vm_object = object;
  352. entry->offset = 0;
  353. /*
  354. * Since we have not given out this address yet,
  355. * it is safe to unlock the map.
  356. */
  357. vm_map_unlock(map);
  358. /*
  359. * Allocate wired-down memory in the kernel_object,
  360. * for this entry, and enter it in the kernel pmap.
  361. */
  362. kmem_alloc_pages(object, 0,
  363. addr, addr + size,
  364. VM_PROT_DEFAULT);
  365. /*
  366. * Return the memory, not zeroed.
  367. */
  368. *addrp = addr;
  369. return KERN_SUCCESS;
  370. }
  371. /*
  372. * kmem_alloc_wired:
  373. *
  374. * Allocate wired-down memory in the kernel's address map
  375. * or a submap. The memory is not zero-filled.
  376. *
  377. * The memory is allocated in the kernel_object.
  378. * It may not be copied with vm_map_copy.
  379. */
  380. kern_return_t
  381. kmem_alloc_wired(
  382. vm_map_t map,
  383. vm_offset_t *addrp,
  384. vm_size_t size)
  385. {
  386. vm_map_entry_t entry;
  387. vm_offset_t offset;
  388. vm_offset_t addr;
  389. unsigned int attempts;
  390. kern_return_t kr;
  391. /*
  392. * Use the kernel object for wired-down kernel pages.
  393. * Assume that no region of the kernel object is
  394. * referenced more than once. We want vm_map_find_entry
  395. * to extend an existing entry if possible.
  396. */
  397. size = round_page(size);
  398. attempts = 0;
  399. retry:
  400. vm_map_lock(map);
  401. kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
  402. kernel_object, &entry);
  403. if (kr != KERN_SUCCESS) {
  404. vm_map_unlock(map);
  405. if (attempts == 0) {
  406. attempts++;
  407. slab_collect();
  408. goto retry;
  409. }
  410. printf_once("no more room for kmem_alloc_wired in %p (%s)\n",
  411. map, map->name);
  412. return kr;
  413. }
  414. /*
  415. * Since we didn't know where the new region would
  416. * start, we couldn't supply the correct offset into
  417. * the kernel object. We only initialize the entry
  418. * if we aren't extending an existing entry.
  419. */
  420. offset = addr - VM_MIN_KERNEL_ADDRESS;
  421. if (entry->object.vm_object == VM_OBJECT_NULL) {
  422. vm_object_reference(kernel_object);
  423. entry->object.vm_object = kernel_object;
  424. entry->offset = offset;
  425. }
  426. /*
  427. * Since we have not given out this address yet,
  428. * it is safe to unlock the map.
  429. */
  430. vm_map_unlock(map);
  431. /*
  432. * Allocate wired-down memory in the kernel_object,
  433. * for this entry, and enter it in the kernel pmap.
  434. */
  435. kmem_alloc_pages(kernel_object, offset,
  436. addr, addr + size,
  437. VM_PROT_DEFAULT);
  438. /*
  439. * Return the memory, not zeroed.
  440. */
  441. *addrp = addr;
  442. return KERN_SUCCESS;
  443. }
  444. /*
  445. * kmem_alloc_aligned:
  446. *
  447. * Like kmem_alloc_wired, except that the memory is aligned.
  448. * The size should be a power-of-2.
  449. */
  450. kern_return_t
  451. kmem_alloc_aligned(
  452. vm_map_t map,
  453. vm_offset_t *addrp,
  454. vm_size_t size)
  455. {
  456. vm_map_entry_t entry;
  457. vm_offset_t offset;
  458. vm_offset_t addr;
  459. unsigned int attempts;
  460. kern_return_t kr;
  461. if ((size & (size - 1)) != 0)
  462. panic("kmem_alloc_aligned");
  463. /*
  464. * Use the kernel object for wired-down kernel pages.
  465. * Assume that no region of the kernel object is
  466. * referenced more than once. We want vm_map_find_entry
  467. * to extend an existing entry if possible.
  468. */
  469. size = round_page(size);
  470. attempts = 0;
  471. retry:
  472. vm_map_lock(map);
  473. kr = vm_map_find_entry(map, &addr, size, size - 1,
  474. kernel_object, &entry);
  475. if (kr != KERN_SUCCESS) {
  476. vm_map_unlock(map);
  477. if (attempts == 0) {
  478. attempts++;
  479. slab_collect();
  480. goto retry;
  481. }
  482. printf_once("no more room for kmem_alloc_aligned in %p (%s)\n",
  483. map, map->name);
  484. return kr;
  485. }
  486. /*
  487. * Since we didn't know where the new region would
  488. * start, we couldn't supply the correct offset into
  489. * the kernel object. We only initialize the entry
  490. * if we aren't extending an existing entry.
  491. */
  492. offset = addr - VM_MIN_KERNEL_ADDRESS;
  493. if (entry->object.vm_object == VM_OBJECT_NULL) {
  494. vm_object_reference(kernel_object);
  495. entry->object.vm_object = kernel_object;
  496. entry->offset = offset;
  497. }
  498. /*
  499. * Since we have not given out this address yet,
  500. * it is safe to unlock the map.
  501. */
  502. vm_map_unlock(map);
  503. /*
  504. * Allocate wired-down memory in the kernel_object,
  505. * for this entry, and enter it in the kernel pmap.
  506. */
  507. kmem_alloc_pages(kernel_object, offset,
  508. addr, addr + size,
  509. VM_PROT_DEFAULT);
  510. /*
  511. * Return the memory, not zeroed.
  512. */
  513. *addrp = addr;
  514. return KERN_SUCCESS;
  515. }
  516. /*
  517. * kmem_alloc_pageable:
  518. *
  519. * Allocate pageable memory in the kernel's address map.
  520. */
  521. kern_return_t
  522. kmem_alloc_pageable(
  523. vm_map_t map,
  524. vm_offset_t *addrp,
  525. vm_size_t size)
  526. {
  527. vm_offset_t addr;
  528. kern_return_t kr;
  529. addr = vm_map_min(map);
  530. kr = vm_map_enter(map, &addr, round_page(size),
  531. (vm_offset_t) 0, TRUE,
  532. VM_OBJECT_NULL, (vm_offset_t) 0, FALSE,
  533. VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
  534. if (kr != KERN_SUCCESS) {
  535. printf_once("no more room for kmem_alloc_pageable in %p (%s)\n",
  536. map, map->name);
  537. return kr;
  538. }
  539. *addrp = addr;
  540. return KERN_SUCCESS;
  541. }
  542. /*
  543. * kmem_free:
  544. *
  545. * Release a region of kernel virtual memory allocated
  546. * with kmem_alloc, kmem_alloc_wired, or kmem_alloc_pageable,
  547. * and return the physical pages associated with that region.
  548. */
  549. void
  550. kmem_free(
  551. vm_map_t map,
  552. vm_offset_t addr,
  553. vm_size_t size)
  554. {
  555. kern_return_t kr;
  556. kr = vm_map_remove(map, trunc_page(addr), round_page(addr + size));
  557. if (kr != KERN_SUCCESS)
  558. panic("kmem_free");
  559. }
  560. /*
  561. * Allocate new wired pages in an object.
  562. * The object is assumed to be mapped into the kernel map or
  563. * a submap.
  564. */
  565. void
  566. kmem_alloc_pages(
  567. vm_object_t object,
  568. vm_offset_t offset,
  569. vm_offset_t start,
  570. vm_offset_t end,
  571. vm_prot_t protection)
  572. {
  573. /*
  574. * Mark the pmap region as not pageable.
  575. */
  576. pmap_pageable(kernel_pmap, start, end, FALSE);
  577. while (start < end) {
  578. vm_page_t mem;
  579. vm_object_lock(object);
  580. /*
  581. * Allocate a page
  582. */
  583. while ((mem = vm_page_alloc(object, offset))
  584. == VM_PAGE_NULL) {
  585. vm_object_unlock(object);
  586. VM_PAGE_WAIT((void (*)()) 0);
  587. vm_object_lock(object);
  588. }
  589. /*
  590. * Wire it down
  591. */
  592. vm_page_lock_queues();
  593. vm_page_wire(mem);
  594. vm_page_unlock_queues();
  595. vm_object_unlock(object);
  596. /*
  597. * Enter it in the kernel pmap
  598. */
  599. PMAP_ENTER(kernel_pmap, start, mem,
  600. protection, TRUE);
  601. vm_object_lock(object);
  602. PAGE_WAKEUP_DONE(mem);
  603. vm_object_unlock(object);
  604. start += PAGE_SIZE;
  605. offset += PAGE_SIZE;
  606. }
  607. }
  608. /*
  609. * Remap wired pages in an object into a new region.
  610. * The object is assumed to be mapped into the kernel map or
  611. * a submap.
  612. */
  613. void
  614. kmem_remap_pages(
  615. vm_object_t object,
  616. vm_offset_t offset,
  617. vm_offset_t start,
  618. vm_offset_t end,
  619. vm_prot_t protection)
  620. {
  621. /*
  622. * Mark the pmap region as not pageable.
  623. */
  624. pmap_pageable(kernel_pmap, start, end, FALSE);
  625. while (start < end) {
  626. vm_page_t mem;
  627. vm_object_lock(object);
  628. /*
  629. * Find a page
  630. */
  631. if ((mem = vm_page_lookup(object, offset)) == VM_PAGE_NULL)
  632. panic("kmem_remap_pages");
  633. /*
  634. * Wire it down (again)
  635. */
  636. vm_page_lock_queues();
  637. vm_page_wire(mem);
  638. vm_page_unlock_queues();
  639. vm_object_unlock(object);
  640. /*
  641. * Enter it in the kernel pmap. The page isn't busy,
  642. * but this shouldn't be a problem because it is wired.
  643. */
  644. PMAP_ENTER(kernel_pmap, start, mem,
  645. protection, TRUE);
  646. start += PAGE_SIZE;
  647. offset += PAGE_SIZE;
  648. }
  649. }
  650. /*
  651. * kmem_submap:
  652. *
  653. * Initializes a map to manage a subrange
  654. * of the kernel virtual address space.
  655. *
  656. * Arguments are as follows:
  657. *
  658. * map Map to initialize
  659. * parent Map to take range from
  660. * size Size of range to find
  661. * min, max Returned endpoints of map
  662. * pageable Can the region be paged
  663. */
  664. void
  665. kmem_submap(
  666. vm_map_t map,
  667. vm_map_t parent,
  668. vm_offset_t *min,
  669. vm_offset_t *max,
  670. vm_size_t size)
  671. {
  672. vm_offset_t addr;
  673. kern_return_t kr;
  674. size = round_page(size);
  675. /*
  676. * Need reference on submap object because it is internal
  677. * to the vm_system. vm_object_enter will never be called
  678. * on it (usual source of reference for vm_map_enter).
  679. */
  680. vm_object_reference(vm_submap_object);
  681. addr = vm_map_min(parent);
  682. kr = vm_map_enter(parent, &addr, size,
  683. (vm_offset_t) 0, TRUE,
  684. vm_submap_object, (vm_offset_t) 0, FALSE,
  685. VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
  686. if (kr != KERN_SUCCESS)
  687. panic("kmem_submap");
  688. pmap_reference(vm_map_pmap(parent));
  689. vm_map_setup(map, vm_map_pmap(parent), addr, addr + size);
  690. kr = vm_map_submap(parent, addr, addr + size, map);
  691. if (kr != KERN_SUCCESS)
  692. panic("kmem_submap");
  693. *min = addr;
  694. *max = addr + size;
  695. }
  696. /*
  697. * kmem_init:
  698. *
  699. * Initialize the kernel's virtual memory map, taking
  700. * into account all memory allocated up to this time.
  701. */
  702. void kmem_init(
  703. vm_offset_t start,
  704. vm_offset_t end)
  705. {
  706. vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end);
  707. /*
  708. * Reserve virtual memory allocated up to this time.
  709. */
  710. if (start != VM_MIN_KERNEL_ADDRESS) {
  711. kern_return_t rc;
  712. vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
  713. rc = vm_map_enter(kernel_map,
  714. &addr, start - VM_MIN_KERNEL_ADDRESS,
  715. (vm_offset_t) 0, TRUE,
  716. VM_OBJECT_NULL, (vm_offset_t) 0, FALSE,
  717. VM_PROT_DEFAULT, VM_PROT_ALL,
  718. VM_INHERIT_DEFAULT);
  719. if (rc)
  720. panic("vm_map_enter failed (%d)\n", rc);
  721. }
  722. }
  723. /*
  724. * New and improved IO wiring support.
  725. */
  726. /*
  727. * kmem_io_map_copyout:
  728. *
  729. * Establish temporary mapping in designated map for the memory
  730. * passed in. Memory format must be a page_list vm_map_copy.
  731. * Mapping is READ-ONLY.
  732. */
  733. kern_return_t
  734. kmem_io_map_copyout(
  735. vm_map_t map,
  736. vm_offset_t *addr, /* actual addr of data */
  737. vm_offset_t *alloc_addr, /* page aligned addr */
  738. vm_size_t *alloc_size, /* size allocated */
  739. vm_map_copy_t copy,
  740. vm_size_t min_size) /* Do at least this much */
  741. {
  742. vm_offset_t myaddr, offset;
  743. vm_size_t mysize, copy_size;
  744. kern_return_t ret;
  745. vm_page_t *page_list;
  746. vm_map_copy_t new_copy;
  747. int i;
  748. assert(copy->type == VM_MAP_COPY_PAGE_LIST);
  749. assert(min_size != 0);
  750. /*
  751. * Figure out the size in vm pages.
  752. */
  753. min_size += copy->offset - trunc_page(copy->offset);
  754. min_size = round_page(min_size);
  755. mysize = round_page(copy->offset + copy->size) -
  756. trunc_page(copy->offset);
  757. /*
  758. * If total size is larger than one page list and
  759. * we don't have to do more than one page list, then
  760. * only do one page list.
  761. *
  762. * XXX Could be much smarter about this ... like trimming length
  763. * XXX if we need more than one page list but not all of them.
  764. */
  765. copy_size = ptoa(copy->cpy_npages);
  766. if (mysize > copy_size && copy_size > min_size)
  767. mysize = copy_size;
  768. /*
  769. * Allocate some address space in the map (must be kernel
  770. * space).
  771. */
  772. myaddr = vm_map_min(map);
  773. ret = vm_map_enter(map, &myaddr, mysize,
  774. (vm_offset_t) 0, TRUE,
  775. VM_OBJECT_NULL, (vm_offset_t) 0, FALSE,
  776. VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
  777. if (ret != KERN_SUCCESS)
  778. return(ret);
  779. /*
  780. * Tell the pmap module that this will be wired, and
  781. * enter the mappings.
  782. */
  783. pmap_pageable(vm_map_pmap(map), myaddr, myaddr + mysize, TRUE);
  784. *addr = myaddr + (copy->offset - trunc_page(copy->offset));
  785. *alloc_addr = myaddr;
  786. *alloc_size = mysize;
  787. offset = myaddr;
  788. page_list = &copy->cpy_page_list[0];
  789. while (TRUE) {
  790. for ( i = 0; i < copy->cpy_npages; i++, offset += PAGE_SIZE) {
  791. PMAP_ENTER(vm_map_pmap(map), offset, *page_list,
  792. VM_PROT_READ, TRUE);
  793. page_list++;
  794. }
  795. if (offset == (myaddr + mysize))
  796. break;
  797. /*
  798. * Onward to the next page_list. The extend_cont
  799. * leaves the current page list's pages alone;
  800. * they'll be cleaned up at discard. Reset this
  801. * copy's continuation to discard the next one.
  802. */
  803. vm_map_copy_invoke_extend_cont(copy, &new_copy, &ret);
  804. if (ret != KERN_SUCCESS) {
  805. kmem_io_map_deallocate(map, myaddr, mysize);
  806. return(ret);
  807. }
  808. copy->cpy_cont = vm_map_copy_discard_cont;
  809. copy->cpy_cont_args = (char *) new_copy;
  810. copy = new_copy;
  811. page_list = &copy->cpy_page_list[0];
  812. }
  813. return(ret);
  814. }
  815. /*
  816. * kmem_io_map_deallocate:
  817. *
  818. * Get rid of the mapping established by kmem_io_map_copyout.
  819. * Assumes that addr and size have been rounded to page boundaries.
  820. * (e.g., the alloc_addr and alloc_size returned by kmem_io_map_copyout)
  821. */
  822. void
  823. kmem_io_map_deallocate(
  824. vm_map_t map,
  825. vm_offset_t addr,
  826. vm_size_t size)
  827. {
  828. /*
  829. * Remove the mappings. The pmap_remove is needed.
  830. */
  831. pmap_remove(vm_map_pmap(map), addr, addr + size);
  832. vm_map_remove(map, addr, addr + size);
  833. }
  834. /*
  835. * Routine: copyinmap
  836. * Purpose:
  837. * Like copyin, except that fromaddr is an address
  838. * in the specified VM map. This implementation
  839. * is incomplete; it handles the current user map
  840. * and the kernel map/submaps.
  841. */
  842. int copyinmap(
  843. vm_map_t map,
  844. char *fromaddr,
  845. char *toaddr,
  846. int length)
  847. {
  848. if (vm_map_pmap(map) == kernel_pmap) {
  849. /* assume a correct copy */
  850. memcpy(toaddr, fromaddr, length);
  851. return 0;
  852. }
  853. if (current_map() == map)
  854. return copyin( fromaddr, toaddr, length);
  855. return 1;
  856. }
  857. /*
  858. * Routine: copyoutmap
  859. * Purpose:
  860. * Like copyout, except that toaddr is an address
  861. * in the specified VM map. This implementation
  862. * is incomplete; it handles the current user map
  863. * and the kernel map/submaps.
  864. */
  865. int copyoutmap(
  866. vm_map_t map,
  867. char *fromaddr,
  868. char *toaddr,
  869. int length)
  870. {
  871. if (vm_map_pmap(map) == kernel_pmap) {
  872. /* assume a correct copy */
  873. memcpy(toaddr, fromaddr, length);
  874. return 0;
  875. }
  876. if (current_map() == map)
  877. return copyout(fromaddr, toaddr, length);
  878. return 1;
  879. }