vm_resident.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
  4. * Copyright (c) 1993,1994 The University of Utah and
  5. * the Computer Systems Laboratory (CSL).
  6. * All rights reserved.
  7. *
  8. * Permission to use, copy, modify and distribute this software and its
  9. * documentation is hereby granted, provided that both the copyright
  10. * notice and this permission notice appear in all copies of the
  11. * software, derivative works or modified versions, and any portions
  12. * thereof, and that both notices appear in supporting documentation.
  13. *
  14. * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
  15. * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
  16. * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
  17. * THIS SOFTWARE.
  18. *
  19. * Carnegie Mellon requests users of this software to return to
  20. *
  21. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  22. * School of Computer Science
  23. * Carnegie Mellon University
  24. * Pittsburgh PA 15213-3890
  25. *
  26. * any improvements or extensions that they make and grant Carnegie Mellon
  27. * the rights to redistribute these changes.
  28. */
  29. /*
  30. * File: vm/vm_resident.c
  31. * Author: Avadis Tevanian, Jr., Michael Wayne Young
  32. *
  33. * Resident memory management module.
  34. */
  35. #include <kern/printf.h>
  36. #include <string.h>
  37. #include <mach/vm_prot.h>
  38. #include <kern/counters.h>
  39. #include <kern/debug.h>
  40. #include <kern/list.h>
  41. #include <kern/sched_prim.h>
  42. #include <kern/task.h>
  43. #include <kern/thread.h>
  44. #include <mach/vm_statistics.h>
  45. #include <machine/vm_param.h>
  46. #include <kern/xpr.h>
  47. #include <kern/slab.h>
  48. #include <vm/pmap.h>
  49. #include <vm/vm_map.h>
  50. #include <vm/vm_page.h>
  51. #include <vm/vm_pageout.h>
  52. #include <vm/vm_kern.h>
  53. #if MACH_VM_DEBUG
  54. #include <mach/kern_return.h>
  55. #include <mach_debug/hash_info.h>
  56. #include <vm/vm_user.h>
  57. #endif
  58. #if MACH_KDB
  59. #include <ddb/db_output.h>
  60. #include <vm/vm_print.h>
  61. #endif /* MACH_KDB */
  62. /*
  63. * Associated with each page of user-allocatable memory is a
  64. * page structure.
  65. */
  66. /*
  67. * These variables record the values returned by vm_page_bootstrap,
  68. * for debugging purposes. The implementation of pmap_steal_memory
  69. * here also uses them internally.
  70. */
  71. vm_offset_t virtual_space_start;
  72. vm_offset_t virtual_space_end;
  73. /*
  74. * The vm_page_lookup() routine, which provides for fast
  75. * (virtual memory object, offset) to page lookup, employs
  76. * the following hash table. The vm_page_{insert,remove}
  77. * routines install and remove associations in the table.
  78. * [This table is often called the virtual-to-physical,
  79. * or VP, table.]
  80. */
  81. typedef struct {
  82. decl_simple_lock_data(,lock)
  83. vm_page_t pages;
  84. } vm_page_bucket_t;
  85. vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
  86. unsigned long vm_page_bucket_count = 0; /* How big is array? */
  87. unsigned long vm_page_hash_mask; /* Mask for hash function */
  88. static struct list vm_page_queue_fictitious;
  89. decl_simple_lock_data(,vm_page_queue_free_lock)
  90. int vm_page_fictitious_count;
  91. int vm_object_external_count;
  92. int vm_object_external_pages;
  93. /*
  94. * Occasionally, the virtual memory system uses
  95. * resident page structures that do not refer to
  96. * real pages, for example to leave a page with
  97. * important state information in the VP table.
  98. *
  99. * These page structures are allocated the way
  100. * most other kernel structures are.
  101. */
  102. struct kmem_cache vm_page_cache;
  103. /*
  104. * Fictitious pages don't have a physical address,
  105. * but we must initialize phys_addr to something.
  106. * For debugging, this should be a strange value
  107. * that the pmap module can recognize in assertions.
  108. */
  109. phys_addr_t vm_page_fictitious_addr = (phys_addr_t) -1;
  110. /*
  111. * Resident page structures are also chained on
  112. * queues that are used by the page replacement
  113. * system (pageout daemon). These queues are
  114. * defined here, but are shared by the pageout
  115. * module.
  116. */
  117. decl_simple_lock_data(,vm_page_queue_lock)
  118. int vm_page_active_count;
  119. int vm_page_inactive_count;
  120. int vm_page_wire_count;
  121. /*
  122. * Several page replacement parameters are also
  123. * shared with this module, so that page allocation
  124. * (done here in vm_page_alloc) can trigger the
  125. * pageout daemon.
  126. */
  127. int vm_page_laundry_count = 0;
  128. int vm_page_external_laundry_count = 0;
  129. /*
  130. * The VM system has a couple of heuristics for deciding
  131. * that pages are "uninteresting" and should be placed
  132. * on the inactive queue as likely candidates for replacement.
  133. * These variables let the heuristics be controlled at run-time
  134. * to make experimentation easier.
  135. */
  136. boolean_t vm_page_deactivate_behind = TRUE;
  137. boolean_t vm_page_deactivate_hint = TRUE;
  138. /*
  139. * vm_page_bootstrap:
  140. *
  141. * Initializes the resident memory module.
  142. *
  143. * Allocates memory for the page cells, and
  144. * for the object/offset-to-page hash table headers.
  145. * Each page cell is initialized and placed on the free list.
  146. * Returns the range of available kernel virtual memory.
  147. */
  148. void vm_page_bootstrap(
  149. vm_offset_t *startp,
  150. vm_offset_t *endp)
  151. {
  152. int i;
  153. /*
  154. * Initialize the page queues.
  155. */
  156. simple_lock_init(&vm_page_queue_free_lock);
  157. simple_lock_init(&vm_page_queue_lock);
  158. list_init(&vm_page_queue_fictitious);
  159. /*
  160. * Allocate (and initialize) the virtual-to-physical
  161. * table hash buckets.
  162. *
  163. * The number of buckets should be a power of two to
  164. * get a good hash function. The following computation
  165. * chooses the first power of two that is greater
  166. * than the number of physical pages in the system.
  167. */
  168. if (vm_page_bucket_count == 0) {
  169. unsigned long npages = vm_page_table_size();
  170. vm_page_bucket_count = 1;
  171. while (vm_page_bucket_count < npages)
  172. vm_page_bucket_count <<= 1;
  173. }
  174. vm_page_hash_mask = vm_page_bucket_count - 1;
  175. if (vm_page_hash_mask & vm_page_bucket_count)
  176. printf("vm_page_bootstrap: WARNING -- strange page hash\n");
  177. vm_page_buckets = (vm_page_bucket_t *)
  178. pmap_steal_memory(vm_page_bucket_count *
  179. sizeof(vm_page_bucket_t));
  180. for (i = 0; i < vm_page_bucket_count; i++) {
  181. vm_page_bucket_t *bucket = &vm_page_buckets[i];
  182. bucket->pages = VM_PAGE_NULL;
  183. simple_lock_init(&bucket->lock);
  184. }
  185. vm_page_setup();
  186. virtual_space_start = round_page(virtual_space_start);
  187. virtual_space_end = trunc_page(virtual_space_end);
  188. *startp = virtual_space_start;
  189. *endp = virtual_space_end;
  190. }
  191. #ifndef MACHINE_PAGES
  192. /*
  193. * We implement pmap_steal_memory with the help
  194. * of two simpler functions, pmap_virtual_space and vm_page_bootalloc.
  195. */
  196. vm_offset_t pmap_steal_memory(
  197. vm_size_t size)
  198. {
  199. vm_offset_t addr, vaddr, paddr;
  200. size = round_page(size);
  201. /*
  202. * If this is the first call to pmap_steal_memory,
  203. * we have to initialize ourself.
  204. */
  205. if (virtual_space_start == virtual_space_end) {
  206. pmap_virtual_space(&virtual_space_start, &virtual_space_end);
  207. /*
  208. * The initial values must be aligned properly, and
  209. * we don't trust the pmap module to do it right.
  210. */
  211. virtual_space_start = round_page(virtual_space_start);
  212. virtual_space_end = trunc_page(virtual_space_end);
  213. }
  214. /*
  215. * Allocate virtual memory for this request.
  216. */
  217. addr = virtual_space_start;
  218. virtual_space_start += size;
  219. /*
  220. * Allocate and map physical pages to back new virtual pages.
  221. */
  222. for (vaddr = round_page(addr);
  223. vaddr < addr + size;
  224. vaddr += PAGE_SIZE) {
  225. paddr = vm_page_bootalloc(PAGE_SIZE);
  226. /*
  227. * XXX Logically, these mappings should be wired,
  228. * but some pmap modules barf if they are.
  229. */
  230. pmap_enter(kernel_pmap, vaddr, paddr,
  231. VM_PROT_READ|VM_PROT_WRITE, FALSE);
  232. }
  233. return addr;
  234. }
  235. #endif /* MACHINE_PAGES */
  236. /*
  237. * Routine: vm_page_module_init
  238. * Purpose:
  239. * Second initialization pass, to be done after
  240. * the basic VM system is ready.
  241. */
  242. void vm_page_module_init(void)
  243. {
  244. kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0,
  245. NULL, 0);
  246. }
  247. /*
  248. * vm_page_hash:
  249. *
  250. * Distributes the object/offset key pair among hash buckets.
  251. *
  252. * NOTE: To get a good hash function, the bucket count should
  253. * be a power of two.
  254. */
  255. #define vm_page_hash(object, offset) \
  256. (((unsigned int)(vm_offset_t)object + (unsigned int)atop(offset)) \
  257. & vm_page_hash_mask)
  258. /*
  259. * vm_page_insert: [ internal use only ]
  260. *
  261. * Inserts the given mem entry into the object/object-page
  262. * table and object list.
  263. *
  264. * The object and page must be locked.
  265. * The free page queue must not be locked.
  266. */
  267. void vm_page_insert(
  268. vm_page_t mem,
  269. vm_object_t object,
  270. vm_offset_t offset)
  271. {
  272. vm_page_bucket_t *bucket;
  273. VM_PAGE_CHECK(mem);
  274. assert(!mem->active && !mem->inactive);
  275. assert(!mem->external);
  276. if (!object->internal) {
  277. mem->external = TRUE;
  278. vm_object_external_pages++;
  279. }
  280. if (mem->tabled)
  281. panic("vm_page_insert");
  282. /*
  283. * Record the object/offset pair in this page
  284. */
  285. mem->object = object;
  286. mem->offset = offset;
  287. /*
  288. * Insert it into the object_object/offset hash table
  289. */
  290. bucket = &vm_page_buckets[vm_page_hash(object, offset)];
  291. simple_lock(&bucket->lock);
  292. mem->next = bucket->pages;
  293. bucket->pages = mem;
  294. simple_unlock(&bucket->lock);
  295. /*
  296. * Now link into the object's list of backed pages.
  297. */
  298. queue_enter(&object->memq, mem, vm_page_t, listq);
  299. mem->tabled = TRUE;
  300. /*
  301. * Show that the object has one more resident page.
  302. */
  303. object->resident_page_count++;
  304. assert(object->resident_page_count != 0);
  305. /*
  306. * Detect sequential access and inactivate previous page.
  307. * We ignore busy pages.
  308. */
  309. if (vm_page_deactivate_behind &&
  310. (offset == object->last_alloc + PAGE_SIZE)) {
  311. vm_page_t last_mem;
  312. last_mem = vm_page_lookup(object, object->last_alloc);
  313. if ((last_mem != VM_PAGE_NULL) && !last_mem->busy)
  314. vm_page_deactivate(last_mem);
  315. }
  316. object->last_alloc = offset;
  317. }
  318. /*
  319. * vm_page_replace:
  320. *
  321. * Exactly like vm_page_insert, except that we first
  322. * remove any existing page at the given offset in object
  323. * and we don't do deactivate-behind.
  324. *
  325. * The object and page must be locked.
  326. * The free page queue must not be locked.
  327. */
  328. void vm_page_replace(
  329. vm_page_t mem,
  330. vm_object_t object,
  331. vm_offset_t offset)
  332. {
  333. vm_page_bucket_t *bucket;
  334. VM_PAGE_CHECK(mem);
  335. assert(!mem->active && !mem->inactive);
  336. assert(!mem->external);
  337. if (!object->internal) {
  338. mem->external = TRUE;
  339. vm_object_external_pages++;
  340. }
  341. if (mem->tabled)
  342. panic("vm_page_replace");
  343. /*
  344. * Record the object/offset pair in this page
  345. */
  346. mem->object = object;
  347. mem->offset = offset;
  348. /*
  349. * Insert it into the object_object/offset hash table,
  350. * replacing any page that might have been there.
  351. */
  352. bucket = &vm_page_buckets[vm_page_hash(object, offset)];
  353. simple_lock(&bucket->lock);
  354. if (bucket->pages) {
  355. vm_page_t *mp = &bucket->pages;
  356. vm_page_t m = *mp;
  357. do {
  358. if (m->object == object && m->offset == offset) {
  359. /*
  360. * Remove page from bucket and from object,
  361. * and return it to the free list.
  362. */
  363. *mp = m->next;
  364. queue_remove(&object->memq, m, vm_page_t,
  365. listq);
  366. m->tabled = FALSE;
  367. object->resident_page_count--;
  368. VM_PAGE_QUEUES_REMOVE(m);
  369. if (m->external) {
  370. m->external = FALSE;
  371. vm_object_external_pages--;
  372. }
  373. /*
  374. * Return page to the free list.
  375. * Note the page is not tabled now, so this
  376. * won't self-deadlock on the bucket lock.
  377. */
  378. vm_page_free(m);
  379. break;
  380. }
  381. mp = &m->next;
  382. } while ((m = *mp) != 0);
  383. mem->next = bucket->pages;
  384. } else {
  385. mem->next = VM_PAGE_NULL;
  386. }
  387. bucket->pages = mem;
  388. simple_unlock(&bucket->lock);
  389. /*
  390. * Now link into the object's list of backed pages.
  391. */
  392. queue_enter(&object->memq, mem, vm_page_t, listq);
  393. mem->tabled = TRUE;
  394. /*
  395. * And show that the object has one more resident
  396. * page.
  397. */
  398. object->resident_page_count++;
  399. assert(object->resident_page_count != 0);
  400. }
  401. /*
  402. * vm_page_remove: [ internal use only ]
  403. *
  404. * Removes the given mem entry from the object/offset-page
  405. * table, the object page list, and the page queues.
  406. *
  407. * The object and page must be locked.
  408. * The free page queue must not be locked.
  409. */
  410. void vm_page_remove(
  411. vm_page_t mem)
  412. {
  413. vm_page_bucket_t *bucket;
  414. vm_page_t this;
  415. assert(mem->tabled);
  416. VM_PAGE_CHECK(mem);
  417. /*
  418. * Remove from the object_object/offset hash table
  419. */
  420. bucket = &vm_page_buckets[vm_page_hash(mem->object, mem->offset)];
  421. simple_lock(&bucket->lock);
  422. if ((this = bucket->pages) == mem) {
  423. /* optimize for common case */
  424. bucket->pages = mem->next;
  425. } else {
  426. vm_page_t *prev;
  427. for (prev = &this->next;
  428. (this = *prev) != mem;
  429. prev = &this->next)
  430. continue;
  431. *prev = this->next;
  432. }
  433. simple_unlock(&bucket->lock);
  434. /*
  435. * Now remove from the object's list of backed pages.
  436. */
  437. queue_remove(&mem->object->memq, mem, vm_page_t, listq);
  438. /*
  439. * And show that the object has one fewer resident
  440. * page.
  441. */
  442. mem->object->resident_page_count--;
  443. mem->tabled = FALSE;
  444. VM_PAGE_QUEUES_REMOVE(mem);
  445. if (mem->external) {
  446. mem->external = FALSE;
  447. vm_object_external_pages--;
  448. }
  449. }
  450. /*
  451. * vm_page_lookup:
  452. *
  453. * Returns the page associated with the object/offset
  454. * pair specified; if none is found, VM_PAGE_NULL is returned.
  455. *
  456. * The object must be locked. No side effects.
  457. */
  458. vm_page_t vm_page_lookup(
  459. vm_object_t object,
  460. vm_offset_t offset)
  461. {
  462. vm_page_t mem;
  463. vm_page_bucket_t *bucket;
  464. /*
  465. * Search the hash table for this object/offset pair
  466. */
  467. bucket = &vm_page_buckets[vm_page_hash(object, offset)];
  468. simple_lock(&bucket->lock);
  469. for (mem = bucket->pages; mem != VM_PAGE_NULL; mem = mem->next) {
  470. VM_PAGE_CHECK(mem);
  471. if ((mem->object == object) && (mem->offset == offset))
  472. break;
  473. }
  474. simple_unlock(&bucket->lock);
  475. return mem;
  476. }
  477. /*
  478. * vm_page_rename:
  479. *
  480. * Move the given memory entry from its
  481. * current object to the specified target object/offset.
  482. *
  483. * The object must be locked.
  484. */
  485. void vm_page_rename(
  486. vm_page_t mem,
  487. vm_object_t new_object,
  488. vm_offset_t new_offset)
  489. {
  490. /*
  491. * Changes to mem->object require the page lock because
  492. * the pageout daemon uses that lock to get the object.
  493. */
  494. vm_page_lock_queues();
  495. vm_page_remove(mem);
  496. vm_page_insert(mem, new_object, new_offset);
  497. vm_page_unlock_queues();
  498. }
  499. static void vm_page_init_template(vm_page_t m)
  500. {
  501. m->object = VM_OBJECT_NULL; /* reset later */
  502. m->offset = 0; /* reset later */
  503. m->wire_count = 0;
  504. m->inactive = FALSE;
  505. m->active = FALSE;
  506. m->laundry = FALSE;
  507. m->external_laundry = FALSE;
  508. m->free = FALSE;
  509. m->external = FALSE;
  510. m->busy = TRUE;
  511. m->wanted = FALSE;
  512. m->tabled = FALSE;
  513. m->fictitious = FALSE;
  514. m->private = FALSE;
  515. m->absent = FALSE;
  516. m->error = FALSE;
  517. m->dirty = FALSE;
  518. m->precious = FALSE;
  519. m->reference = FALSE;
  520. m->page_lock = VM_PROT_NONE;
  521. m->unlock_request = VM_PROT_NONE;
  522. }
  523. /*
  524. * vm_page_init:
  525. *
  526. * Initialize the fields in a new page.
  527. * This takes a structure with random values and initializes it
  528. * so that it can be given to vm_page_release or vm_page_insert.
  529. */
  530. void vm_page_init(
  531. vm_page_t mem)
  532. {
  533. vm_page_init_template(mem);
  534. }
  535. /*
  536. * vm_page_grab_fictitious:
  537. *
  538. * Remove a fictitious page from the free list.
  539. * Returns VM_PAGE_NULL if there are no free pages.
  540. */
  541. vm_page_t vm_page_grab_fictitious(void)
  542. {
  543. vm_page_t m;
  544. simple_lock(&vm_page_queue_free_lock);
  545. if (list_empty(&vm_page_queue_fictitious)) {
  546. m = VM_PAGE_NULL;
  547. } else {
  548. m = list_first_entry(&vm_page_queue_fictitious,
  549. struct vm_page, node);
  550. assert(m->fictitious);
  551. list_remove(&m->node);
  552. m->free = FALSE;
  553. vm_page_fictitious_count--;
  554. }
  555. simple_unlock(&vm_page_queue_free_lock);
  556. return m;
  557. }
  558. /*
  559. * vm_page_release_fictitious:
  560. *
  561. * Release a fictitious page to the free list.
  562. */
  563. static void vm_page_release_fictitious(
  564. vm_page_t m)
  565. {
  566. simple_lock(&vm_page_queue_free_lock);
  567. if (m->free)
  568. panic("vm_page_release_fictitious");
  569. m->free = TRUE;
  570. list_insert_head(&vm_page_queue_fictitious, &m->node);
  571. vm_page_fictitious_count++;
  572. simple_unlock(&vm_page_queue_free_lock);
  573. }
  574. /*
  575. * vm_page_more_fictitious:
  576. *
  577. * Add more fictitious pages to the free list.
  578. * Allowed to block.
  579. */
  580. int vm_page_fictitious_quantum = 5;
  581. void vm_page_more_fictitious(void)
  582. {
  583. vm_page_t m;
  584. int i;
  585. for (i = 0; i < vm_page_fictitious_quantum; i++) {
  586. m = (vm_page_t) kmem_cache_alloc(&vm_page_cache);
  587. if (m == VM_PAGE_NULL)
  588. panic("vm_page_more_fictitious");
  589. vm_page_init(m);
  590. m->phys_addr = vm_page_fictitious_addr;
  591. m->fictitious = TRUE;
  592. vm_page_release_fictitious(m);
  593. }
  594. }
  595. /*
  596. * vm_page_convert:
  597. *
  598. * Attempt to convert a fictitious page into a real page.
  599. *
  600. * The object referenced by *MP must be locked.
  601. */
  602. boolean_t vm_page_convert(struct vm_page **mp)
  603. {
  604. struct vm_page *real_m, *fict_m;
  605. vm_object_t object;
  606. vm_offset_t offset;
  607. fict_m = *mp;
  608. assert(fict_m->fictitious);
  609. assert(fict_m->phys_addr == vm_page_fictitious_addr);
  610. assert(!fict_m->active);
  611. assert(!fict_m->inactive);
  612. real_m = vm_page_grab();
  613. if (real_m == VM_PAGE_NULL)
  614. return FALSE;
  615. object = fict_m->object;
  616. offset = fict_m->offset;
  617. vm_page_remove(fict_m);
  618. memcpy(&real_m->vm_page_header,
  619. &fict_m->vm_page_header,
  620. sizeof(*fict_m) - VM_PAGE_HEADER_SIZE);
  621. real_m->fictitious = FALSE;
  622. vm_page_insert(real_m, object, offset);
  623. assert(real_m->phys_addr != vm_page_fictitious_addr);
  624. assert(fict_m->fictitious);
  625. assert(fict_m->phys_addr == vm_page_fictitious_addr);
  626. vm_page_release_fictitious(fict_m);
  627. *mp = real_m;
  628. return TRUE;
  629. }
  630. /*
  631. * vm_page_grab:
  632. *
  633. * Remove a page from the free list.
  634. * Returns VM_PAGE_NULL if the free list is too small.
  635. */
  636. vm_page_t vm_page_grab(void)
  637. {
  638. vm_page_t mem;
  639. simple_lock(&vm_page_queue_free_lock);
  640. /*
  641. * XXX Mach has many modules that merely assume memory is
  642. * directly mapped in kernel space. Instead of updating all
  643. * users, we assume those which need specific physical memory
  644. * properties will wire down their pages, either because
  645. * they can't be paged (not part of an object), or with
  646. * explicit VM calls. The strategy is then to let memory
  647. * pressure balance the physical segments with pageable pages.
  648. */
  649. mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL);
  650. if (mem == NULL) {
  651. simple_unlock(&vm_page_queue_free_lock);
  652. return NULL;
  653. }
  654. mem->free = FALSE;
  655. simple_unlock(&vm_page_queue_free_lock);
  656. return mem;
  657. }
  658. phys_addr_t vm_page_grab_phys_addr(void)
  659. {
  660. vm_page_t p = vm_page_grab();
  661. if (p == VM_PAGE_NULL)
  662. return -1;
  663. else
  664. return p->phys_addr;
  665. }
  666. /*
  667. * vm_page_release:
  668. *
  669. * Return a page to the free list.
  670. */
  671. void vm_page_release(
  672. vm_page_t mem,
  673. boolean_t laundry,
  674. boolean_t external_laundry)
  675. {
  676. simple_lock(&vm_page_queue_free_lock);
  677. if (mem->free)
  678. panic("vm_page_release");
  679. mem->free = TRUE;
  680. vm_page_free_pa(mem, 0);
  681. if (laundry) {
  682. vm_page_laundry_count--;
  683. if (vm_page_laundry_count == 0) {
  684. vm_pageout_resume();
  685. }
  686. }
  687. if (external_laundry) {
  688. /*
  689. * If vm_page_external_laundry_count is negative,
  690. * the pageout daemon isn't expecting to be
  691. * notified.
  692. */
  693. if (vm_page_external_laundry_count > 0) {
  694. vm_page_external_laundry_count--;
  695. if (vm_page_external_laundry_count == 0) {
  696. vm_pageout_resume();
  697. }
  698. }
  699. }
  700. simple_unlock(&vm_page_queue_free_lock);
  701. }
  702. /*
  703. * vm_page_grab_contig:
  704. *
  705. * Remove a block of contiguous pages from the free list.
  706. * Returns VM_PAGE_NULL if the request fails.
  707. */
  708. vm_page_t vm_page_grab_contig(
  709. vm_size_t size,
  710. unsigned int selector)
  711. {
  712. unsigned int i, order, nr_pages;
  713. vm_page_t mem;
  714. order = vm_page_order(size);
  715. nr_pages = 1 << order;
  716. simple_lock(&vm_page_queue_free_lock);
  717. /* TODO Allow caller to pass type */
  718. mem = vm_page_alloc_pa(order, selector, VM_PT_KERNEL);
  719. if (mem == NULL) {
  720. simple_unlock(&vm_page_queue_free_lock);
  721. return NULL;
  722. }
  723. for (i = 0; i < nr_pages; i++) {
  724. mem[i].free = FALSE;
  725. }
  726. simple_unlock(&vm_page_queue_free_lock);
  727. return mem;
  728. }
  729. /*
  730. * vm_page_free_contig:
  731. *
  732. * Return a block of contiguous pages to the free list.
  733. */
  734. void vm_page_free_contig(vm_page_t mem, vm_size_t size)
  735. {
  736. unsigned int i, order, nr_pages;
  737. order = vm_page_order(size);
  738. nr_pages = 1 << order;
  739. simple_lock(&vm_page_queue_free_lock);
  740. for (i = 0; i < nr_pages; i++) {
  741. if (mem[i].free)
  742. panic("vm_page_free_contig");
  743. mem[i].free = TRUE;
  744. }
  745. vm_page_free_pa(mem, order);
  746. simple_unlock(&vm_page_queue_free_lock);
  747. }
  748. /*
  749. * vm_page_alloc:
  750. *
  751. * Allocate and return a memory cell associated
  752. * with this VM object/offset pair.
  753. *
  754. * Object must be locked.
  755. */
  756. vm_page_t vm_page_alloc(
  757. vm_object_t object,
  758. vm_offset_t offset)
  759. {
  760. vm_page_t mem;
  761. mem = vm_page_grab();
  762. if (mem == VM_PAGE_NULL)
  763. return VM_PAGE_NULL;
  764. vm_page_lock_queues();
  765. vm_page_insert(mem, object, offset);
  766. vm_page_unlock_queues();
  767. return mem;
  768. }
  769. /*
  770. * vm_page_free:
  771. *
  772. * Returns the given page to the free list,
  773. * disassociating it with any VM object.
  774. *
  775. * Object and page queues must be locked prior to entry.
  776. */
  777. void vm_page_free(
  778. vm_page_t mem)
  779. {
  780. if (mem->free)
  781. panic("vm_page_free");
  782. if (mem->tabled) {
  783. vm_page_remove(mem);
  784. }
  785. assert(!mem->active && !mem->inactive);
  786. if (mem->wire_count != 0) {
  787. if (!mem->private && !mem->fictitious)
  788. vm_page_wire_count--;
  789. mem->wire_count = 0;
  790. }
  791. PAGE_WAKEUP_DONE(mem);
  792. if (mem->absent)
  793. vm_object_absent_release(mem->object);
  794. /*
  795. * XXX The calls to vm_page_init here are
  796. * really overkill.
  797. */
  798. if (mem->private || mem->fictitious) {
  799. vm_page_init(mem);
  800. mem->phys_addr = vm_page_fictitious_addr;
  801. mem->fictitious = TRUE;
  802. vm_page_release_fictitious(mem);
  803. } else {
  804. boolean_t laundry = mem->laundry;
  805. boolean_t external_laundry = mem->external_laundry;
  806. vm_page_init(mem);
  807. vm_page_release(mem, laundry, external_laundry);
  808. }
  809. }
  810. /*
  811. * vm_page_zero_fill:
  812. *
  813. * Zero-fill the specified page.
  814. */
  815. void vm_page_zero_fill(
  816. vm_page_t m)
  817. {
  818. VM_PAGE_CHECK(m);
  819. pmap_zero_page(m->phys_addr);
  820. }
  821. /*
  822. * vm_page_copy:
  823. *
  824. * Copy one page to another
  825. */
  826. void vm_page_copy(
  827. vm_page_t src_m,
  828. vm_page_t dest_m)
  829. {
  830. VM_PAGE_CHECK(src_m);
  831. VM_PAGE_CHECK(dest_m);
  832. pmap_copy_page(src_m->phys_addr, dest_m->phys_addr);
  833. }
  834. #if MACH_VM_DEBUG
  835. /*
  836. * Routine: vm_page_info
  837. * Purpose:
  838. * Return information about the global VP table.
  839. * Fills the buffer with as much information as possible
  840. * and returns the desired size of the buffer.
  841. * Conditions:
  842. * Nothing locked. The caller should provide
  843. * possibly-pageable memory.
  844. */
  845. unsigned int
  846. vm_page_info(
  847. hash_info_bucket_t *info,
  848. unsigned int count)
  849. {
  850. int i;
  851. if (vm_page_bucket_count < count)
  852. count = vm_page_bucket_count;
  853. for (i = 0; i < count; i++) {
  854. vm_page_bucket_t *bucket = &vm_page_buckets[i];
  855. unsigned int bucket_count = 0;
  856. vm_page_t m;
  857. simple_lock(&bucket->lock);
  858. for (m = bucket->pages; m != VM_PAGE_NULL; m = m->next)
  859. bucket_count++;
  860. simple_unlock(&bucket->lock);
  861. /* don't touch pageable memory while holding locks */
  862. info[i].hib_count = bucket_count;
  863. }
  864. return vm_page_bucket_count;
  865. }
  866. #endif /* MACH_VM_DEBUG */
  867. #if MACH_KDB
  868. #define printf kdbprintf
  869. /*
  870. * Routine: vm_page_print [exported]
  871. */
  872. void vm_page_print(p)
  873. const vm_page_t p;
  874. {
  875. iprintf("Page 0x%X: object 0x%X,", (vm_offset_t) p, (vm_offset_t) p->object);
  876. printf(" offset 0x%X", p->offset);
  877. printf("wire_count %d,", p->wire_count);
  878. printf(" %s",
  879. (p->active ? "active" : (p->inactive ? "inactive" : "loose")));
  880. printf("%s",
  881. (p->free ? " free" : ""));
  882. printf("%s ",
  883. (p->laundry ? " laundry" : ""));
  884. printf("%s",
  885. (p->dirty ? "dirty" : "clean"));
  886. printf("%s",
  887. (p->busy ? " busy" : ""));
  888. printf("%s",
  889. (p->absent ? " absent" : ""));
  890. printf("%s",
  891. (p->error ? " error" : ""));
  892. printf("%s",
  893. (p->fictitious ? " fictitious" : ""));
  894. printf("%s",
  895. (p->private ? " private" : ""));
  896. printf("%s",
  897. (p->wanted ? " wanted" : ""));
  898. printf("%s,",
  899. (p->tabled ? "" : "not_tabled"));
  900. printf("phys_addr = 0x%X, lock = 0x%X, unlock_request = 0x%X\n",
  901. p->phys_addr,
  902. (vm_offset_t) p->page_lock,
  903. (vm_offset_t) p->unlock_request);
  904. }
  905. #endif /* MACH_KDB */