vm_debug.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1991,1990 Carnegie Mellon University
  4. * All Rights Reserved.
  5. *
  6. * Permission to use, copy, modify and distribute this software and its
  7. * documentation is hereby granted, provided that both the copyright
  8. * notice and this permission notice appear in all copies of the
  9. * software, derivative works or modified versions, and any portions
  10. * thereof, and that both notices appear in supporting documentation.
  11. *
  12. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  13. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  14. * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  15. *
  16. * Carnegie Mellon requests users of this software to return to
  17. *
  18. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  19. * School of Computer Science
  20. * Carnegie Mellon University
  21. * Pittsburgh PA 15213-3890
  22. *
  23. * any improvements or extensions that they make and grant Carnegie Mellon
  24. * the rights to redistribute these changes.
  25. */
  26. /*
  27. * File: vm/vm_debug.c.
  28. * Author: Rich Draves
  29. * Date: March, 1990
  30. *
  31. * Exported kernel calls. See mach_debug/mach_debug.defs.
  32. */
  33. #include <string.h>
  34. #include <kern/debug.h>
  35. #include <kern/thread.h>
  36. #include <mach/kern_return.h>
  37. #include <mach/machine/vm_types.h>
  38. #include <mach/memory_object.h>
  39. #include <mach/vm_prot.h>
  40. #include <mach/vm_inherit.h>
  41. #include <mach/vm_param.h>
  42. #include <mach_debug/vm_info.h>
  43. #include <mach_debug/hash_info.h>
  44. #include <vm/vm_map.h>
  45. #include <vm/vm_kern.h>
  46. #include <vm/vm_object.h>
  47. #include <kern/task.h>
  48. #include <kern/host.h>
  49. #include <ipc/ipc_port.h>
  50. #if MACH_VM_DEBUG
  51. /*
  52. * Routine: vm_object_real_name
  53. * Purpose:
  54. * Convert a VM object to a name port.
  55. * Conditions:
  56. * Takes object and port locks.
  57. * Returns:
  58. * A naked send right for the object's name port,
  59. * or IP_NULL if the object or its name port is null.
  60. */
  61. ipc_port_t
  62. vm_object_real_name(vm_object_t object)
  63. {
  64. ipc_port_t port = IP_NULL;
  65. if (object != VM_OBJECT_NULL) {
  66. vm_object_lock(object);
  67. if (object->pager_name != IP_NULL)
  68. port = ipc_port_make_send(object->pager_name);
  69. vm_object_unlock(object);
  70. }
  71. return port;
  72. }
  73. /*
  74. * Routine: mach_vm_region_info [kernel call]
  75. * Purpose:
  76. * Retrieve information about a VM region,
  77. * including info about the object chain.
  78. * Conditions:
  79. * Nothing locked.
  80. * Returns:
  81. * KERN_SUCCESS Retrieve region/object info.
  82. * KERN_INVALID_TASK The map is null.
  83. * KERN_NO_SPACE There is no entry at/after the address.
  84. */
  85. kern_return_t
  86. mach_vm_region_info(
  87. vm_map_t map,
  88. vm_offset_t address,
  89. vm_region_info_t *regionp,
  90. ipc_port_t *portp)
  91. {
  92. vm_map_t cmap; /* current map in traversal */
  93. vm_map_t nmap; /* next map to look at */
  94. vm_map_entry_t entry; /* entry in current map */
  95. vm_object_t object;
  96. if (map == VM_MAP_NULL)
  97. return KERN_INVALID_TASK;
  98. /* find the entry containing (or following) the address */
  99. vm_map_lock_read(map);
  100. for (cmap = map;;) {
  101. /* cmap is read-locked */
  102. if (!vm_map_lookup_entry(cmap, address, &entry)) {
  103. entry = entry->vme_next;
  104. if (entry == vm_map_to_entry(cmap)) {
  105. if (map == cmap) {
  106. vm_map_unlock_read(cmap);
  107. return KERN_NO_SPACE;
  108. }
  109. /* back out to top-level & skip this submap */
  110. address = vm_map_max(cmap);
  111. vm_map_unlock_read(cmap);
  112. vm_map_lock_read(map);
  113. cmap = map;
  114. continue;
  115. }
  116. }
  117. if (entry->is_sub_map) {
  118. /* move down to the sub map */
  119. nmap = entry->object.sub_map;
  120. vm_map_lock_read(nmap);
  121. vm_map_unlock_read(cmap);
  122. cmap = nmap;
  123. continue;
  124. } else {
  125. break;
  126. }
  127. /*NOTREACHED*/
  128. }
  129. assert(entry->vme_start < entry->vme_end);
  130. regionp->vri_start = entry->vme_start;
  131. regionp->vri_end = entry->vme_end;
  132. /* attributes from the real entry */
  133. regionp->vri_protection = entry->protection;
  134. regionp->vri_max_protection = entry->max_protection;
  135. regionp->vri_inheritance = entry->inheritance;
  136. regionp->vri_wired_count = !!entry->wired_count; /* Doesn't stack */
  137. regionp->vri_user_wired_count = regionp->vri_wired_count; /* Obsolete */
  138. object = entry->object.vm_object;
  139. *portp = vm_object_real_name(object);
  140. regionp->vri_object = (vm_offset_t) object;
  141. regionp->vri_offset = entry->offset;
  142. regionp->vri_needs_copy = entry->needs_copy;
  143. regionp->vri_sharing = entry->is_shared;
  144. vm_map_unlock_read(cmap);
  145. return KERN_SUCCESS;
  146. }
  147. /*
  148. * Routine: mach_vm_object_info [kernel call]
  149. * Purpose:
  150. * Retrieve information about a VM object.
  151. * Conditions:
  152. * Nothing locked.
  153. * Returns:
  154. * KERN_SUCCESS Retrieved object info.
  155. * KERN_INVALID_ARGUMENT The object is null.
  156. */
  157. kern_return_t
  158. mach_vm_object_info(
  159. vm_object_t object,
  160. vm_object_info_t *infop,
  161. ipc_port_t *shadowp,
  162. ipc_port_t *copyp)
  163. {
  164. vm_object_info_t info;
  165. vm_object_info_state_t state;
  166. ipc_port_t shadow, copy;
  167. if (object == VM_OBJECT_NULL)
  168. return KERN_INVALID_ARGUMENT;
  169. /*
  170. * Because of lock-ordering/deadlock considerations,
  171. * we can't use vm_object_real_name for the copy object.
  172. */
  173. retry:
  174. vm_object_lock(object);
  175. copy = IP_NULL;
  176. if (object->copy != VM_OBJECT_NULL) {
  177. if (!vm_object_lock_try(object->copy)) {
  178. vm_object_unlock(object);
  179. simple_lock_pause(); /* wait a bit */
  180. goto retry;
  181. }
  182. if (object->copy->pager_name != IP_NULL)
  183. copy = ipc_port_make_send(object->copy->pager_name);
  184. vm_object_unlock(object->copy);
  185. }
  186. shadow = vm_object_real_name(object->shadow);
  187. info.voi_object = (vm_offset_t) object;
  188. info.voi_pagesize = PAGE_SIZE;
  189. info.voi_size = object->size;
  190. info.voi_ref_count = object->ref_count;
  191. info.voi_resident_page_count = object->resident_page_count;
  192. info.voi_absent_count = object->absent_count;
  193. info.voi_copy = (vm_offset_t) object->copy;
  194. info.voi_shadow = (vm_offset_t) object->shadow;
  195. info.voi_shadow_offset = object->shadow_offset;
  196. info.voi_paging_offset = object->paging_offset;
  197. info.voi_copy_strategy = object->copy_strategy;
  198. info.voi_last_alloc = object->last_alloc;
  199. info.voi_paging_in_progress = object->paging_in_progress;
  200. state = 0;
  201. if (object->pager_created)
  202. state |= VOI_STATE_PAGER_CREATED;
  203. if (object->pager_initialized)
  204. state |= VOI_STATE_PAGER_INITIALIZED;
  205. if (object->pager_ready)
  206. state |= VOI_STATE_PAGER_READY;
  207. if (object->can_persist)
  208. state |= VOI_STATE_CAN_PERSIST;
  209. if (object->internal)
  210. state |= VOI_STATE_INTERNAL;
  211. if (object->temporary)
  212. state |= VOI_STATE_TEMPORARY;
  213. if (object->alive)
  214. state |= VOI_STATE_ALIVE;
  215. if (object->lock_in_progress)
  216. state |= VOI_STATE_LOCK_IN_PROGRESS;
  217. if (object->lock_restart)
  218. state |= VOI_STATE_LOCK_RESTART;
  219. info.voi_state = state;
  220. vm_object_unlock(object);
  221. *infop = info;
  222. *shadowp = shadow;
  223. *copyp = copy;
  224. return KERN_SUCCESS;
  225. }
  226. #define VPI_STATE_NODATA (VPI_STATE_BUSY|VPI_STATE_FICTITIOUS| \
  227. VPI_STATE_PRIVATE|VPI_STATE_ABSENT)
  228. /*
  229. * Routine: mach_vm_object_pages [kernel call]
  230. * Purpose:
  231. * Retrieve information about the pages in a VM object.
  232. * Conditions:
  233. * Nothing locked. Obeys CountInOut protocol.
  234. * Returns:
  235. * KERN_SUCCESS Retrieved object info.
  236. * KERN_INVALID_ARGUMENT The object is null.
  237. * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
  238. */
  239. kern_return_t
  240. mach_vm_object_pages(
  241. vm_object_t object,
  242. vm_page_info_array_t *pagesp,
  243. natural_t *countp)
  244. {
  245. vm_size_t size;
  246. vm_offset_t addr;
  247. vm_page_info_t *pages;
  248. unsigned int potential, actual, count;
  249. vm_page_t p;
  250. kern_return_t kr;
  251. if (object == VM_OBJECT_NULL)
  252. return KERN_INVALID_ARGUMENT;
  253. /* start with in-line memory */
  254. pages = *pagesp;
  255. potential = *countp;
  256. for (size = 0;;) {
  257. vm_object_lock(object);
  258. actual = object->resident_page_count;
  259. if (actual <= potential)
  260. break;
  261. vm_object_unlock(object);
  262. if (pages != *pagesp)
  263. kmem_free(ipc_kernel_map, addr, size);
  264. size = round_page(actual * sizeof *pages);
  265. kr = kmem_alloc(ipc_kernel_map, &addr, size);
  266. if (kr != KERN_SUCCESS)
  267. return kr;
  268. pages = (vm_page_info_t *) addr;
  269. potential = size/sizeof *pages;
  270. }
  271. /* object is locked, we have enough wired memory */
  272. count = 0;
  273. queue_iterate(&object->memq, p, vm_page_t, listq) {
  274. vm_page_info_t *info = &pages[count++];
  275. vm_page_info_state_t state = 0;
  276. info->vpi_offset = p->offset;
  277. info->vpi_phys_addr = p->phys_addr;
  278. info->vpi_wire_count = p->wire_count;
  279. info->vpi_page_lock = p->page_lock;
  280. info->vpi_unlock_request = p->unlock_request;
  281. if (p->busy)
  282. state |= VPI_STATE_BUSY;
  283. if (p->wanted)
  284. state |= VPI_STATE_WANTED;
  285. if (p->tabled)
  286. state |= VPI_STATE_TABLED;
  287. if (p->fictitious)
  288. state |= VPI_STATE_FICTITIOUS;
  289. if (p->private)
  290. state |= VPI_STATE_PRIVATE;
  291. if (p->absent)
  292. state |= VPI_STATE_ABSENT;
  293. if (p->error)
  294. state |= VPI_STATE_ERROR;
  295. if (p->dirty)
  296. state |= VPI_STATE_DIRTY;
  297. if (p->precious)
  298. state |= VPI_STATE_PRECIOUS;
  299. if (p->overwriting)
  300. state |= VPI_STATE_OVERWRITING;
  301. if (((state & (VPI_STATE_NODATA|VPI_STATE_DIRTY)) == 0) &&
  302. pmap_is_modified(p->phys_addr)) {
  303. state |= VPI_STATE_DIRTY;
  304. p->dirty = TRUE;
  305. }
  306. vm_page_lock_queues();
  307. if (p->inactive)
  308. state |= VPI_STATE_INACTIVE;
  309. if (p->active)
  310. state |= VPI_STATE_ACTIVE;
  311. if (p->laundry)
  312. state |= VPI_STATE_LAUNDRY;
  313. if (p->free)
  314. state |= VPI_STATE_FREE;
  315. if (p->reference)
  316. state |= VPI_STATE_REFERENCE;
  317. if (((state & (VPI_STATE_NODATA|VPI_STATE_REFERENCE)) == 0) &&
  318. pmap_is_referenced(p->phys_addr)) {
  319. state |= VPI_STATE_REFERENCE;
  320. p->reference = TRUE;
  321. }
  322. vm_page_unlock_queues();
  323. info->vpi_state = state;
  324. }
  325. if (object->resident_page_count != count)
  326. panic("mach_vm_object_pages");
  327. vm_object_unlock(object);
  328. if (pages == *pagesp) {
  329. /* data fit in-line; nothing to deallocate */
  330. *countp = actual;
  331. } else if (actual == 0) {
  332. kmem_free(ipc_kernel_map, addr, size);
  333. *countp = 0;
  334. } else {
  335. vm_size_t size_used, rsize_used;
  336. vm_map_copy_t copy;
  337. /* kmem_alloc doesn't zero memory */
  338. size_used = actual * sizeof *pages;
  339. rsize_used = round_page(size_used);
  340. if (rsize_used != size)
  341. kmem_free(ipc_kernel_map,
  342. addr + rsize_used, size - rsize_used);
  343. if (size_used != rsize_used)
  344. memset((void *) (addr + size_used), 0,
  345. rsize_used - size_used);
  346. kr = vm_map_copyin(ipc_kernel_map, addr, rsize_used,
  347. TRUE, &copy);
  348. assert(kr == KERN_SUCCESS);
  349. *pagesp = (vm_page_info_t *) copy;
  350. *countp = actual;
  351. }
  352. return KERN_SUCCESS;
  353. }
  354. #endif /* MACH_VM_DEBUG */
  355. /*
  356. * Routine: host_virtual_physical_table_info
  357. * Purpose:
  358. * Return information about the VP table.
  359. * Conditions:
  360. * Nothing locked. Obeys CountInOut protocol.
  361. * Returns:
  362. * KERN_SUCCESS Returned information.
  363. * KERN_INVALID_HOST The host is null.
  364. * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
  365. */
  366. kern_return_t
  367. host_virtual_physical_table_info(host, infop, countp)
  368. const host_t host;
  369. hash_info_bucket_array_t *infop;
  370. natural_t *countp;
  371. {
  372. vm_offset_t addr;
  373. vm_size_t size = 0;/* '=0' to quiet gcc warnings */
  374. hash_info_bucket_t *info;
  375. unsigned int potential, actual;
  376. kern_return_t kr;
  377. if (host == HOST_NULL)
  378. return KERN_INVALID_HOST;
  379. /* start with in-line data */
  380. info = *infop;
  381. potential = *countp;
  382. for (;;) {
  383. actual = vm_page_info(info, potential);
  384. if (actual <= potential)
  385. break;
  386. /* allocate more memory */
  387. if (info != *infop)
  388. kmem_free(ipc_kernel_map, addr, size);
  389. size = round_page(actual * sizeof *info);
  390. kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
  391. if (kr != KERN_SUCCESS)
  392. return KERN_RESOURCE_SHORTAGE;
  393. info = (hash_info_bucket_t *) addr;
  394. potential = size/sizeof *info;
  395. }
  396. if (info == *infop) {
  397. /* data fit in-line; nothing to deallocate */
  398. *countp = actual;
  399. } else if (actual == 0) {
  400. kmem_free(ipc_kernel_map, addr, size);
  401. *countp = 0;
  402. } else {
  403. vm_map_copy_t copy;
  404. vm_size_t used;
  405. used = round_page(actual * sizeof *info);
  406. if (used != size)
  407. kmem_free(ipc_kernel_map, addr + used, size - used);
  408. kr = vm_map_copyin(ipc_kernel_map, addr, used,
  409. TRUE, &copy);
  410. assert(kr == KERN_SUCCESS);
  411. *infop = (hash_info_bucket_t *) copy;
  412. *countp = actual;
  413. }
  414. return KERN_SUCCESS;
  415. }