vm_user.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
  4. * All Rights Reserved.
  5. *
  6. * Permission to use, copy, modify and distribute this software and its
  7. * documentation is hereby granted, provided that both the copyright
  8. * notice and this permission notice appear in all copies of the
  9. * software, derivative works or modified versions, and any portions
  10. * thereof, and that both notices appear in supporting documentation.
  11. *
  12. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  13. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  14. * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  15. *
  16. * Carnegie Mellon requests users of this software to return to
  17. *
  18. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  19. * School of Computer Science
  20. * Carnegie Mellon University
  21. * Pittsburgh PA 15213-3890
  22. *
  23. * any improvements or extensions that they make and grant Carnegie Mellon
  24. * the rights to redistribute these changes.
  25. */
  26. /*
  27. * File: vm/vm_user.c
  28. * Author: Avadis Tevanian, Jr., Michael Wayne Young
  29. *
  30. * User-exported virtual memory functions.
  31. */
  32. #include <mach/boolean.h>
  33. #include <mach/kern_return.h>
  34. #include <mach/mach_types.h> /* to get vm_address_t */
  35. #include <mach/memory_object.h>
  36. #include <mach/std_types.h> /* to get pointer_t */
  37. #include <mach/vm_attributes.h>
  38. #include <mach/vm_param.h>
  39. #include <mach/vm_statistics.h>
  40. #include <mach/vm_cache_statistics.h>
  41. #include <kern/host.h>
  42. #include <kern/task.h>
  43. #include <vm/vm_fault.h>
  44. #include <vm/vm_kern.h>
  45. #include <vm/vm_map.h>
  46. #include <vm/vm_object.h>
  47. #include <vm/memory_object_proxy.h>
  48. #include <vm/vm_page.h>
  49. vm_statistics_data_t vm_stat;
  50. /*
  51. * vm_allocate allocates "zero fill" memory in the specfied
  52. * map.
  53. */
  54. kern_return_t vm_allocate(
  55. vm_map_t map,
  56. vm_offset_t *addr,
  57. vm_size_t size,
  58. boolean_t anywhere)
  59. {
  60. kern_return_t result;
  61. if (map == VM_MAP_NULL)
  62. return(KERN_INVALID_ARGUMENT);
  63. if (size == 0) {
  64. *addr = 0;
  65. return(KERN_SUCCESS);
  66. }
  67. if (anywhere)
  68. *addr = vm_map_min(map);
  69. else
  70. *addr = trunc_page(*addr);
  71. size = round_page(size);
  72. result = vm_map_enter(
  73. map,
  74. addr,
  75. size,
  76. (vm_offset_t)0,
  77. anywhere,
  78. VM_OBJECT_NULL,
  79. (vm_offset_t)0,
  80. FALSE,
  81. VM_PROT_DEFAULT,
  82. VM_PROT_ALL,
  83. VM_INHERIT_DEFAULT);
  84. return(result);
  85. }
  86. /*
  87. * vm_deallocate deallocates the specified range of addresses in the
  88. * specified address map.
  89. */
  90. kern_return_t vm_deallocate(
  91. vm_map_t map,
  92. vm_offset_t start,
  93. vm_size_t size)
  94. {
  95. if (map == VM_MAP_NULL)
  96. return(KERN_INVALID_ARGUMENT);
  97. if (size == (vm_offset_t) 0)
  98. return(KERN_SUCCESS);
  99. return(vm_map_remove(map, trunc_page(start), round_page(start+size)));
  100. }
  101. /*
  102. * vm_inherit sets the inheritance of the specified range in the
  103. * specified map.
  104. */
  105. kern_return_t vm_inherit(
  106. vm_map_t map,
  107. vm_offset_t start,
  108. vm_size_t size,
  109. vm_inherit_t new_inheritance)
  110. {
  111. if (map == VM_MAP_NULL)
  112. return(KERN_INVALID_ARGUMENT);
  113. switch (new_inheritance) {
  114. case VM_INHERIT_NONE:
  115. case VM_INHERIT_COPY:
  116. case VM_INHERIT_SHARE:
  117. break;
  118. default:
  119. return(KERN_INVALID_ARGUMENT);
  120. }
  121. /*Check if range includes projected buffer;
  122. user is not allowed direct manipulation in that case*/
  123. if (projected_buffer_in_range(map, start, start+size))
  124. return(KERN_INVALID_ARGUMENT);
  125. return(vm_map_inherit(map,
  126. trunc_page(start),
  127. round_page(start+size),
  128. new_inheritance));
  129. }
  130. /*
  131. * vm_protect sets the protection of the specified range in the
  132. * specified map.
  133. */
  134. kern_return_t vm_protect(
  135. vm_map_t map,
  136. vm_offset_t start,
  137. vm_size_t size,
  138. boolean_t set_maximum,
  139. vm_prot_t new_protection)
  140. {
  141. if ((map == VM_MAP_NULL) ||
  142. (new_protection & ~(VM_PROT_ALL|VM_PROT_NOTIFY)))
  143. return(KERN_INVALID_ARGUMENT);
  144. /*Check if range includes projected buffer;
  145. user is not allowed direct manipulation in that case*/
  146. if (projected_buffer_in_range(map, start, start+size))
  147. return(KERN_INVALID_ARGUMENT);
  148. return(vm_map_protect(map,
  149. trunc_page(start),
  150. round_page(start+size),
  151. new_protection,
  152. set_maximum));
  153. }
  154. kern_return_t vm_statistics(
  155. vm_map_t map,
  156. vm_statistics_data_t *stat)
  157. {
  158. if (map == VM_MAP_NULL)
  159. return(KERN_INVALID_ARGUMENT);
  160. *stat = vm_stat;
  161. stat->pagesize = PAGE_SIZE;
  162. stat->free_count = vm_page_mem_free();
  163. stat->active_count = vm_page_active_count;
  164. stat->inactive_count = vm_page_inactive_count;
  165. stat->wire_count = vm_page_wire_count;
  166. return(KERN_SUCCESS);
  167. }
  168. kern_return_t vm_cache_statistics(
  169. vm_map_t map,
  170. vm_cache_statistics_data_t *stats)
  171. {
  172. if (map == VM_MAP_NULL)
  173. return KERN_INVALID_ARGUMENT;
  174. stats->cache_object_count = vm_object_external_count;
  175. stats->cache_count = vm_object_external_pages;
  176. /* XXX Not implemented yet */
  177. stats->active_tmp_count = 0;
  178. stats->inactive_tmp_count = 0;
  179. stats->active_perm_count = 0;
  180. stats->inactive_perm_count = 0;
  181. stats->dirty_count = 0;
  182. stats->laundry_count = 0;
  183. stats->writeback_count = 0;
  184. stats->slab_count = 0;
  185. stats->slab_reclaim_count = 0;
  186. return KERN_SUCCESS;
  187. }
  188. /*
  189. * Handle machine-specific attributes for a mapping, such
  190. * as cachability, migrability, etc.
  191. */
  192. kern_return_t vm_machine_attribute(
  193. vm_map_t map,
  194. vm_address_t address,
  195. vm_size_t size,
  196. vm_machine_attribute_t attribute,
  197. vm_machine_attribute_val_t* value) /* IN/OUT */
  198. {
  199. if (map == VM_MAP_NULL)
  200. return(KERN_INVALID_ARGUMENT);
  201. /*Check if range includes projected buffer;
  202. user is not allowed direct manipulation in that case*/
  203. if (projected_buffer_in_range(map, address, address+size))
  204. return(KERN_INVALID_ARGUMENT);
  205. return vm_map_machine_attribute(map, address, size, attribute, value);
  206. }
  207. kern_return_t vm_read(
  208. vm_map_t map,
  209. vm_address_t address,
  210. vm_size_t size,
  211. pointer_t *data,
  212. vm_size_t *data_size)
  213. {
  214. kern_return_t error;
  215. vm_map_copy_t ipc_address;
  216. if (map == VM_MAP_NULL)
  217. return(KERN_INVALID_ARGUMENT);
  218. if ((error = vm_map_copyin(map,
  219. address,
  220. size,
  221. FALSE, /* src_destroy */
  222. &ipc_address)) == KERN_SUCCESS) {
  223. *data = (pointer_t) ipc_address;
  224. *data_size = size;
  225. }
  226. return(error);
  227. }
  228. kern_return_t vm_write(
  229. vm_map_t map,
  230. vm_address_t address,
  231. pointer_t data,
  232. vm_size_t size)
  233. {
  234. if (map == VM_MAP_NULL)
  235. return KERN_INVALID_ARGUMENT;
  236. return vm_map_copy_overwrite(map, address, (vm_map_copy_t) data,
  237. FALSE /* interruptible XXX */);
  238. }
  239. kern_return_t vm_copy(
  240. vm_map_t map,
  241. vm_address_t source_address,
  242. vm_size_t size,
  243. vm_address_t dest_address)
  244. {
  245. vm_map_copy_t copy;
  246. kern_return_t kr;
  247. if (map == VM_MAP_NULL)
  248. return KERN_INVALID_ARGUMENT;
  249. kr = vm_map_copyin(map, source_address, size,
  250. FALSE, &copy);
  251. if (kr != KERN_SUCCESS)
  252. return kr;
  253. kr = vm_map_copy_overwrite(map, dest_address, copy,
  254. FALSE /* interruptible XXX */);
  255. if (kr != KERN_SUCCESS) {
  256. vm_map_copy_discard(copy);
  257. return kr;
  258. }
  259. return KERN_SUCCESS;
  260. }
  261. /*
  262. * Routine: vm_map
  263. */
  264. kern_return_t vm_map(
  265. vm_map_t target_map,
  266. vm_offset_t *address,
  267. vm_size_t size,
  268. vm_offset_t mask,
  269. boolean_t anywhere,
  270. ipc_port_t memory_object,
  271. vm_offset_t offset,
  272. boolean_t copy,
  273. vm_prot_t cur_protection,
  274. vm_prot_t max_protection,
  275. vm_inherit_t inheritance)
  276. {
  277. vm_object_t object;
  278. kern_return_t result;
  279. if ((target_map == VM_MAP_NULL) ||
  280. (cur_protection & ~VM_PROT_ALL) ||
  281. (max_protection & ~VM_PROT_ALL))
  282. return(KERN_INVALID_ARGUMENT);
  283. switch (inheritance) {
  284. case VM_INHERIT_NONE:
  285. case VM_INHERIT_COPY:
  286. case VM_INHERIT_SHARE:
  287. break;
  288. default:
  289. return(KERN_INVALID_ARGUMENT);
  290. }
  291. if (size == 0)
  292. return KERN_INVALID_ARGUMENT;
  293. *address = trunc_page(*address);
  294. size = round_page(size);
  295. if (!IP_VALID(memory_object)) {
  296. object = VM_OBJECT_NULL;
  297. offset = 0;
  298. copy = FALSE;
  299. } else if ((object = vm_object_enter(memory_object, size, FALSE))
  300. == VM_OBJECT_NULL)
  301. {
  302. ipc_port_t real_memobj;
  303. vm_prot_t prot;
  304. result = memory_object_proxy_lookup (memory_object, &real_memobj,
  305. &prot);
  306. if (result != KERN_SUCCESS)
  307. return result;
  308. /* Reduce the allowed access to the memory object. */
  309. max_protection &= prot;
  310. cur_protection &= prot;
  311. if ((object = vm_object_enter(real_memobj, size, FALSE))
  312. == VM_OBJECT_NULL)
  313. return KERN_INVALID_ARGUMENT;
  314. }
  315. /*
  316. * Perform the copy if requested
  317. */
  318. if (copy) {
  319. vm_object_t new_object;
  320. vm_offset_t new_offset;
  321. result = vm_object_copy_strategically(object, offset, size,
  322. &new_object, &new_offset,
  323. &copy);
  324. /*
  325. * Throw away the reference to the
  326. * original object, as it won't be mapped.
  327. */
  328. vm_object_deallocate(object);
  329. if (result != KERN_SUCCESS)
  330. return (result);
  331. object = new_object;
  332. offset = new_offset;
  333. }
  334. if ((result = vm_map_enter(target_map,
  335. address, size, mask, anywhere,
  336. object, offset,
  337. copy,
  338. cur_protection, max_protection, inheritance
  339. )) != KERN_SUCCESS)
  340. vm_object_deallocate(object);
  341. return(result);
  342. }
  343. /*
  344. * Specify that the range of the virtual address space
  345. * of the target task must not cause page faults for
  346. * the indicated accesses.
  347. *
  348. * [ To unwire the pages, specify VM_PROT_NONE. ]
  349. */
  350. kern_return_t vm_wire(port, map, start, size, access)
  351. const ipc_port_t port;
  352. vm_map_t map;
  353. vm_offset_t start;
  354. vm_size_t size;
  355. vm_prot_t access;
  356. {
  357. boolean_t priv;
  358. if (!IP_VALID(port))
  359. return KERN_INVALID_HOST;
  360. ip_lock(port);
  361. if (!ip_active(port) ||
  362. (ip_kotype(port) != IKOT_HOST_PRIV
  363. && ip_kotype(port) != IKOT_HOST))
  364. {
  365. ip_unlock(port);
  366. return KERN_INVALID_HOST;
  367. }
  368. priv = ip_kotype(port) == IKOT_HOST_PRIV;
  369. ip_unlock(port);
  370. if (map == VM_MAP_NULL)
  371. return KERN_INVALID_TASK;
  372. if (access & ~VM_PROT_ALL)
  373. return KERN_INVALID_ARGUMENT;
  374. /*Check if range includes projected buffer;
  375. user is not allowed direct manipulation in that case*/
  376. if (projected_buffer_in_range(map, start, start+size))
  377. return(KERN_INVALID_ARGUMENT);
  378. /* TODO: make it tunable */
  379. if (!priv && access != VM_PROT_NONE && map->size_wired + size > 65536)
  380. return KERN_NO_ACCESS;
  381. return vm_map_pageable(map, trunc_page(start), round_page(start+size),
  382. access, TRUE, TRUE);
  383. }
  384. kern_return_t vm_wire_all(const ipc_port_t port, vm_map_t map, vm_wire_t flags)
  385. {
  386. if (!IP_VALID(port))
  387. return KERN_INVALID_HOST;
  388. ip_lock(port);
  389. if (!ip_active(port)
  390. || (ip_kotype(port) != IKOT_HOST_PRIV)) {
  391. ip_unlock(port);
  392. return KERN_INVALID_HOST;
  393. }
  394. ip_unlock(port);
  395. if (map == VM_MAP_NULL) {
  396. return KERN_INVALID_TASK;
  397. }
  398. if (flags & ~VM_WIRE_ALL) {
  399. return KERN_INVALID_ARGUMENT;
  400. }
  401. /*Check if range includes projected buffer;
  402. user is not allowed direct manipulation in that case*/
  403. if (projected_buffer_in_range(map, map->min_offset, map->max_offset)) {
  404. return KERN_INVALID_ARGUMENT;
  405. }
  406. return vm_map_pageable_all(map, flags);
  407. }