vm_object.c 72 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
  4. * Copyright (c) 1993,1994 The University of Utah and
  5. * the Computer Systems Laboratory (CSL).
  6. * All rights reserved.
  7. *
  8. * Permission to use, copy, modify and distribute this software and its
  9. * documentation is hereby granted, provided that both the copyright
  10. * notice and this permission notice appear in all copies of the
  11. * software, derivative works or modified versions, and any portions
  12. * thereof, and that both notices appear in supporting documentation.
  13. *
  14. * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
  15. * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
  16. * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
  17. * THIS SOFTWARE.
  18. *
  19. * Carnegie Mellon requests users of this software to return to
  20. *
  21. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  22. * School of Computer Science
  23. * Carnegie Mellon University
  24. * Pittsburgh PA 15213-3890
  25. *
  26. * any improvements or extensions that they make and grant Carnegie Mellon
  27. * the rights to redistribute these changes.
  28. */
  29. /*
  30. * File: vm/vm_object.c
  31. * Author: Avadis Tevanian, Jr., Michael Wayne Young
  32. *
  33. * Virtual memory object module.
  34. */
  35. #include <kern/printf.h>
  36. #include <string.h>
  37. #include <mach/memory_object.h>
  38. #include <vm/memory_object_default.user.h>
  39. #include <vm/memory_object_user.user.h>
  40. #include <machine/vm_param.h>
  41. #include <ipc/ipc_port.h>
  42. #include <ipc/ipc_space.h>
  43. #include <kern/assert.h>
  44. #include <kern/debug.h>
  45. #include <kern/lock.h>
  46. #include <kern/queue.h>
  47. #include <kern/xpr.h>
  48. #include <kern/slab.h>
  49. #include <vm/memory_object.h>
  50. #include <vm/vm_fault.h>
  51. #include <vm/vm_map.h>
  52. #include <vm/vm_object.h>
  53. #include <vm/vm_page.h>
  54. #include <vm/vm_pageout.h>
  55. #if MACH_KDB
  56. #include <ddb/db_output.h>
  57. #endif /* MACH_KDB */
  58. void memory_object_release(
  59. ipc_port_t pager,
  60. pager_request_t pager_request,
  61. ipc_port_t pager_name); /* forward */
  62. /*
  63. * Virtual memory objects maintain the actual data
  64. * associated with allocated virtual memory. A given
  65. * page of memory exists within exactly one object.
  66. *
  67. * An object is only deallocated when all "references"
  68. * are given up. Only one "reference" to a given
  69. * region of an object should be writeable.
  70. *
  71. * Associated with each object is a list of all resident
  72. * memory pages belonging to that object; this list is
  73. * maintained by the "vm_page" module, but locked by the object's
  74. * lock.
  75. *
  76. * Each object also records the memory object port
  77. * that is used by the kernel to request and write
  78. * back data (the memory object port, field "pager"),
  79. * and the ports provided to the memory manager, the server that
  80. * manages that data, to return data and control its
  81. * use (the memory object control port, field "pager_request")
  82. * and for naming (the memory object name port, field "pager_name").
  83. *
  84. * Virtual memory objects are allocated to provide
  85. * zero-filled memory (vm_allocate) or map a user-defined
  86. * memory object into a virtual address space (vm_map).
  87. *
  88. * Virtual memory objects that refer to a user-defined
  89. * memory object are called "permanent", because all changes
  90. * made in virtual memory are reflected back to the
  91. * memory manager, which may then store it permanently.
  92. * Other virtual memory objects are called "temporary",
  93. * meaning that changes need be written back only when
  94. * necessary to reclaim pages, and that storage associated
  95. * with the object can be discarded once it is no longer
  96. * mapped.
  97. *
  98. * A permanent memory object may be mapped into more
  99. * than one virtual address space. Moreover, two threads
  100. * may attempt to make the first mapping of a memory
  101. * object concurrently. Only one thread is allowed to
  102. * complete this mapping; all others wait for the
  103. * "pager_initialized" field is asserted, indicating
  104. * that the first thread has initialized all of the
  105. * necessary fields in the virtual memory object structure.
  106. *
  107. * The kernel relies on a *default memory manager* to
  108. * provide backing storage for the zero-filled virtual
  109. * memory objects. The memory object ports associated
  110. * with these temporary virtual memory objects are only
  111. * generated and passed to the default memory manager
  112. * when it becomes necessary. Virtual memory objects
  113. * that depend on the default memory manager are called
  114. * "internal". The "pager_created" field is provided to
  115. * indicate whether these ports have ever been allocated.
  116. *
  117. * The kernel may also create virtual memory objects to
  118. * hold changed pages after a copy-on-write operation.
  119. * In this case, the virtual memory object (and its
  120. * backing storage -- its memory object) only contain
  121. * those pages that have been changed. The "shadow"
  122. * field refers to the virtual memory object that contains
  123. * the remainder of the contents. The "shadow_offset"
  124. * field indicates where in the "shadow" these contents begin.
  125. * The "copy" field refers to a virtual memory object
  126. * to which changed pages must be copied before changing
  127. * this object, in order to implement another form
  128. * of copy-on-write optimization.
  129. *
  130. * The virtual memory object structure also records
  131. * the attributes associated with its memory object.
  132. * The "pager_ready", "can_persist" and "copy_strategy"
  133. * fields represent those attributes. The "cached_list"
  134. * field is used in the implementation of the persistence
  135. * attribute.
  136. *
  137. * ZZZ Continue this comment.
  138. */
  139. struct kmem_cache vm_object_cache; /* vm backing store cache */
  140. /*
  141. * All wired-down kernel memory belongs to a single virtual
  142. * memory object (kernel_object) to avoid wasting data structures.
  143. */
  144. static struct vm_object kernel_object_store;
  145. vm_object_t kernel_object = &kernel_object_store;
  146. /*
  147. * Virtual memory objects that are not referenced by
  148. * any address maps, but that are allowed to persist
  149. * (an attribute specified by the associated memory manager),
  150. * are kept in a queue (vm_object_cached_list).
  151. *
  152. * When an object from this queue is referenced again,
  153. * for example to make another address space mapping,
  154. * it must be removed from the queue. That is, the
  155. * queue contains *only* objects with zero references.
  156. *
  157. * The kernel may choose to terminate objects from this
  158. * queue in order to reclaim storage. The current policy
  159. * is to let memory pressure dynamically adjust the number
  160. * of unreferenced objects. The pageout daemon attempts to
  161. * collect objects after removing pages from them.
  162. *
  163. * A simple lock (accessed by routines
  164. * vm_object_cache_{lock,lock_try,unlock}) governs the
  165. * object cache. It must be held when objects are
  166. * added to or removed from the cache (in vm_object_terminate).
  167. * The routines that acquire a reference to a virtual
  168. * memory object based on one of the memory object ports
  169. * must also lock the cache.
  170. *
  171. * Ideally, the object cache should be more isolated
  172. * from the reference mechanism, so that the lock need
  173. * not be held to make simple references.
  174. */
  175. queue_head_t vm_object_cached_list;
  176. decl_simple_lock_data(,vm_object_cached_lock_data)
  177. #define vm_object_cache_lock() \
  178. simple_lock(&vm_object_cached_lock_data)
  179. #define vm_object_cache_lock_try() \
  180. simple_lock_try(&vm_object_cached_lock_data)
  181. #define vm_object_cache_unlock() \
  182. simple_unlock(&vm_object_cached_lock_data)
  183. /*
  184. * Number of physical pages referenced by cached objects.
  185. * This counter is protected by its own lock to work around
  186. * lock ordering issues.
  187. */
  188. int vm_object_cached_pages;
  189. decl_simple_lock_data(,vm_object_cached_pages_lock_data)
  190. /*
  191. * Virtual memory objects are initialized from
  192. * a template (see vm_object_allocate).
  193. *
  194. * When adding a new field to the virtual memory
  195. * object structure, be sure to add initialization
  196. * (see vm_object_init).
  197. */
  198. struct vm_object vm_object_template;
  199. /*
  200. * vm_object_allocate:
  201. *
  202. * Returns a new object with the given size.
  203. */
  204. static void _vm_object_setup(
  205. vm_object_t object,
  206. vm_size_t size)
  207. {
  208. *object = vm_object_template;
  209. queue_init(&object->memq);
  210. vm_object_lock_init(object);
  211. object->size = size;
  212. }
  213. vm_object_t _vm_object_allocate(
  214. vm_size_t size)
  215. {
  216. vm_object_t object;
  217. object = (vm_object_t) kmem_cache_alloc(&vm_object_cache);
  218. if (!object)
  219. return 0;
  220. _vm_object_setup(object, size);
  221. return object;
  222. }
  223. vm_object_t vm_object_allocate(
  224. vm_size_t size)
  225. {
  226. vm_object_t object;
  227. ipc_port_t port;
  228. object = _vm_object_allocate(size);
  229. if (object == 0)
  230. panic("vm_object_allocate");
  231. port = ipc_port_alloc_kernel();
  232. if (port == IP_NULL)
  233. panic("vm_object_allocate");
  234. object->pager_name = port;
  235. ipc_kobject_set(port, (ipc_kobject_t) object, IKOT_PAGING_NAME);
  236. return object;
  237. }
  238. /*
  239. * vm_object_bootstrap:
  240. *
  241. * Initialize the VM objects module.
  242. */
  243. void vm_object_bootstrap(void)
  244. {
  245. kmem_cache_init(&vm_object_cache, "vm_object",
  246. sizeof(struct vm_object), 0, NULL, 0);
  247. queue_init(&vm_object_cached_list);
  248. simple_lock_init(&vm_object_cached_lock_data);
  249. /*
  250. * Fill in a template object, for quick initialization
  251. */
  252. vm_object_template.ref_count = 1;
  253. vm_object_template.size = 0;
  254. vm_object_template.resident_page_count = 0;
  255. vm_object_template.copy = VM_OBJECT_NULL;
  256. vm_object_template.shadow = VM_OBJECT_NULL;
  257. vm_object_template.shadow_offset = (vm_offset_t) 0;
  258. vm_object_template.pager = IP_NULL;
  259. vm_object_template.paging_offset = 0;
  260. vm_object_template.pager_request = PAGER_REQUEST_NULL;
  261. vm_object_template.pager_name = IP_NULL;
  262. vm_object_template.pager_created = FALSE;
  263. vm_object_template.pager_initialized = FALSE;
  264. vm_object_template.pager_ready = FALSE;
  265. vm_object_template.copy_strategy = MEMORY_OBJECT_COPY_NONE;
  266. /* ignored if temporary, will be reset before
  267. * permanent object becomes ready */
  268. vm_object_template.use_shared_copy = FALSE;
  269. vm_object_template.shadowed = FALSE;
  270. vm_object_template.absent_count = 0;
  271. vm_object_template.all_wanted = 0; /* all bits FALSE */
  272. vm_object_template.paging_in_progress = 0;
  273. vm_object_template.used_for_pageout = FALSE;
  274. vm_object_template.can_persist = FALSE;
  275. vm_object_template.cached = FALSE;
  276. vm_object_template.internal = TRUE;
  277. vm_object_template.temporary = TRUE;
  278. vm_object_template.alive = TRUE;
  279. vm_object_template.lock_in_progress = FALSE;
  280. vm_object_template.lock_restart = FALSE;
  281. vm_object_template.last_alloc = (vm_offset_t) 0;
  282. #if MACH_PAGEMAP
  283. vm_object_template.existence_info = VM_EXTERNAL_NULL;
  284. #endif /* MACH_PAGEMAP */
  285. /*
  286. * Initialize the "kernel object"
  287. */
  288. _vm_object_setup(kernel_object,
  289. VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
  290. /*
  291. * Initialize the "submap object". Make it as large as the
  292. * kernel object so that no limit is imposed on submap sizes.
  293. */
  294. _vm_object_setup(vm_submap_object,
  295. VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
  296. #if MACH_PAGEMAP
  297. vm_external_module_initialize();
  298. #endif /* MACH_PAGEMAP */
  299. }
  300. void vm_object_init(void)
  301. {
  302. /*
  303. * Finish initializing the kernel object.
  304. * The submap object doesn't need a name port.
  305. */
  306. kernel_object->pager_name = ipc_port_alloc_kernel();
  307. ipc_kobject_set(kernel_object->pager_name,
  308. (ipc_kobject_t) kernel_object,
  309. IKOT_PAGING_NAME);
  310. }
  311. /*
  312. * Object cache management functions.
  313. *
  314. * Both the cache and the object must be locked
  315. * before calling these functions.
  316. */
  317. static void vm_object_cache_add(
  318. vm_object_t object)
  319. {
  320. assert(!object->cached);
  321. queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
  322. object->cached = TRUE;
  323. }
  324. static void vm_object_cache_remove(
  325. vm_object_t object)
  326. {
  327. assert(object->cached);
  328. queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
  329. object->cached = FALSE;
  330. }
  331. void vm_object_collect(
  332. vm_object_t object)
  333. {
  334. vm_object_unlock(object);
  335. /*
  336. * The cache lock must be acquired in the proper order.
  337. */
  338. vm_object_cache_lock();
  339. vm_object_lock(object);
  340. /*
  341. * If the object was referenced while the lock was
  342. * dropped, cancel the termination.
  343. */
  344. if (!vm_object_collectable(object)) {
  345. vm_object_unlock(object);
  346. vm_object_cache_unlock();
  347. return;
  348. }
  349. vm_object_cache_remove(object);
  350. vm_object_terminate(object);
  351. }
  352. /*
  353. * vm_object_reference:
  354. *
  355. * Gets another reference to the given object.
  356. */
  357. void vm_object_reference(
  358. vm_object_t object)
  359. {
  360. if (object == VM_OBJECT_NULL)
  361. return;
  362. vm_object_lock(object);
  363. assert(object->ref_count > 0);
  364. object->ref_count++;
  365. vm_object_unlock(object);
  366. }
  367. /*
  368. * vm_object_deallocate:
  369. *
  370. * Release a reference to the specified object,
  371. * gained either through a vm_object_allocate
  372. * or a vm_object_reference call. When all references
  373. * are gone, storage associated with this object
  374. * may be relinquished.
  375. *
  376. * No object may be locked.
  377. */
  378. void vm_object_deallocate(
  379. vm_object_t object)
  380. {
  381. vm_object_t temp;
  382. while (object != VM_OBJECT_NULL) {
  383. /*
  384. * The cache holds a reference (uncounted) to
  385. * the object; we must lock it before removing
  386. * the object.
  387. */
  388. vm_object_cache_lock();
  389. /*
  390. * Lose the reference
  391. */
  392. vm_object_lock(object);
  393. if (--(object->ref_count) > 0) {
  394. /*
  395. * If there are still references, then
  396. * we are done.
  397. */
  398. vm_object_unlock(object);
  399. vm_object_cache_unlock();
  400. return;
  401. }
  402. /*
  403. * See whether this object can persist. If so, enter
  404. * it in the cache.
  405. */
  406. if (object->can_persist && (object->resident_page_count > 0)) {
  407. vm_object_cache_add(object);
  408. vm_object_cache_unlock();
  409. vm_object_unlock(object);
  410. return;
  411. }
  412. if (object->pager_created &&
  413. !object->pager_initialized) {
  414. /*
  415. * Have to wait for initialization.
  416. * Put reference back and retry
  417. * when it's initialized.
  418. */
  419. object->ref_count++;
  420. vm_object_assert_wait(object,
  421. VM_OBJECT_EVENT_INITIALIZED, FALSE);
  422. vm_object_unlock(object);
  423. vm_object_cache_unlock();
  424. thread_block((void (*)()) 0);
  425. continue;
  426. }
  427. /*
  428. * Take the reference to the shadow object
  429. * out of the object to be destroyed.
  430. */
  431. temp = object->shadow;
  432. /*
  433. * Destroy the object; the cache lock will
  434. * be released in the process.
  435. */
  436. vm_object_terminate(object);
  437. /*
  438. * Deallocate the reference to the shadow
  439. * by continuing the loop with that object
  440. * in place of the original.
  441. */
  442. object = temp;
  443. }
  444. }
  445. /*
  446. * Routine: vm_object_terminate
  447. * Purpose:
  448. * Free all resources associated with a vm_object.
  449. * In/out conditions:
  450. * Upon entry, the object and the cache must be locked,
  451. * and the object must have no references.
  452. *
  453. * The shadow object reference is left alone.
  454. *
  455. * Upon exit, the cache will be unlocked, and the
  456. * object will cease to exist.
  457. */
  458. void vm_object_terminate(
  459. vm_object_t object)
  460. {
  461. vm_page_t p;
  462. vm_object_t shadow_object;
  463. /*
  464. * Make sure the object isn't already being terminated
  465. */
  466. assert(object->alive);
  467. object->alive = FALSE;
  468. /*
  469. * Make sure no one can look us up now.
  470. */
  471. vm_object_remove(object);
  472. vm_object_cache_unlock();
  473. /*
  474. * Detach the object from its shadow if we are the shadow's
  475. * copy.
  476. */
  477. if ((shadow_object = object->shadow) != VM_OBJECT_NULL) {
  478. vm_object_lock(shadow_object);
  479. assert((shadow_object->copy == object) ||
  480. (shadow_object->copy == VM_OBJECT_NULL));
  481. shadow_object->copy = VM_OBJECT_NULL;
  482. vm_object_unlock(shadow_object);
  483. }
  484. /*
  485. * The pageout daemon might be playing with our pages.
  486. * Now that the object is dead, it won't touch any more
  487. * pages, but some pages might already be on their way out.
  488. * Hence, we wait until the active paging activities have ceased.
  489. */
  490. vm_object_paging_wait(object, FALSE);
  491. /*
  492. * Clean or free the pages, as appropriate.
  493. * It is possible for us to find busy/absent pages,
  494. * if some faults on this object were aborted.
  495. */
  496. if ((object->temporary) || (object->pager == IP_NULL)) {
  497. while (!queue_empty(&object->memq)) {
  498. p = (vm_page_t) queue_first(&object->memq);
  499. VM_PAGE_CHECK(p);
  500. VM_PAGE_FREE(p);
  501. }
  502. } else while (!queue_empty(&object->memq)) {
  503. p = (vm_page_t) queue_first(&object->memq);
  504. VM_PAGE_CHECK(p);
  505. vm_page_lock_queues();
  506. VM_PAGE_QUEUES_REMOVE(p);
  507. vm_page_unlock_queues();
  508. if (p->absent || p->private) {
  509. /*
  510. * For private pages, VM_PAGE_FREE just
  511. * leaves the page structure around for
  512. * its owner to clean up. For absent
  513. * pages, the structure is returned to
  514. * the appropriate pool.
  515. */
  516. goto free_page;
  517. }
  518. if (!p->dirty)
  519. p->dirty = pmap_is_modified(p->phys_addr);
  520. if (p->dirty || p->precious) {
  521. p->busy = TRUE;
  522. vm_pageout_page(p, FALSE, TRUE); /* flush page */
  523. } else {
  524. free_page:
  525. VM_PAGE_FREE(p);
  526. }
  527. }
  528. assert(object->ref_count == 0);
  529. assert(object->paging_in_progress == 0);
  530. assert(!object->cached);
  531. if (!object->internal) {
  532. assert(object->resident_page_count == 0);
  533. vm_page_lock_queues();
  534. vm_object_external_count--;
  535. vm_page_unlock_queues();
  536. }
  537. /*
  538. * Throw away port rights... note that they may
  539. * already have been thrown away (by vm_object_destroy
  540. * or memory_object_destroy).
  541. *
  542. * Instead of destroying the control and name ports,
  543. * we send all rights off to the memory manager instead,
  544. * using memory_object_terminate.
  545. */
  546. vm_object_unlock(object);
  547. if (object->pager != IP_NULL) {
  548. /* consumes our rights for pager, pager_request, pager_name */
  549. memory_object_release(object->pager,
  550. object->pager_request,
  551. object->pager_name);
  552. } else if (object->pager_name != IP_NULL) {
  553. /* consumes our right for pager_name */
  554. ipc_port_dealloc_kernel(object->pager_name);
  555. }
  556. #if MACH_PAGEMAP
  557. vm_external_destroy(object->existence_info);
  558. #endif /* MACH_PAGEMAP */
  559. /*
  560. * Free the space for the object.
  561. */
  562. kmem_cache_free(&vm_object_cache, (vm_offset_t) object);
  563. }
  564. /*
  565. * Routine: vm_object_pager_wakeup
  566. * Purpose: Wake up anyone waiting for IKOT_PAGER_TERMINATING
  567. */
  568. void
  569. vm_object_pager_wakeup(
  570. ipc_port_t pager)
  571. {
  572. boolean_t someone_waiting;
  573. /*
  574. * If anyone was waiting for the memory_object_terminate
  575. * to be queued, wake them up now.
  576. */
  577. vm_object_cache_lock();
  578. assert(ip_kotype(pager) == IKOT_PAGER_TERMINATING);
  579. someone_waiting = (pager->ip_kobject != IKO_NULL);
  580. if (ip_active(pager))
  581. ipc_kobject_set(pager, IKO_NULL, IKOT_NONE);
  582. vm_object_cache_unlock();
  583. if (someone_waiting) {
  584. thread_wakeup((event_t) pager);
  585. }
  586. }
  587. /*
  588. * Routine: memory_object_release
  589. * Purpose: Terminate the pager and release port rights,
  590. * just like memory_object_terminate, except
  591. * that we wake up anyone blocked in vm_object_enter
  592. * waiting for termination message to be queued
  593. * before calling memory_object_init.
  594. */
  595. void memory_object_release(
  596. ipc_port_t pager,
  597. pager_request_t pager_request,
  598. ipc_port_t pager_name)
  599. {
  600. /*
  601. * Keep a reference to pager port;
  602. * the terminate might otherwise release all references.
  603. */
  604. ip_reference(pager);
  605. /*
  606. * Terminate the pager.
  607. */
  608. (void) memory_object_terminate(pager, pager_request, pager_name);
  609. /*
  610. * Wakeup anyone waiting for this terminate
  611. */
  612. vm_object_pager_wakeup(pager);
  613. /*
  614. * Release reference to pager port.
  615. */
  616. ip_release(pager);
  617. }
  618. /*
  619. * Routine: vm_object_abort_activity [internal use only]
  620. * Purpose:
  621. * Abort paging requests pending on this object.
  622. * In/out conditions:
  623. * The object is locked on entry and exit.
  624. */
  625. void vm_object_abort_activity(
  626. vm_object_t object)
  627. {
  628. vm_page_t p;
  629. vm_page_t next;
  630. /*
  631. * Abort all activity that would be waiting
  632. * for a result on this memory object.
  633. *
  634. * We could also choose to destroy all pages
  635. * that we have in memory for this object, but
  636. * we don't.
  637. */
  638. p = (vm_page_t) queue_first(&object->memq);
  639. while (!queue_end(&object->memq, (queue_entry_t) p)) {
  640. next = (vm_page_t) queue_next(&p->listq);
  641. /*
  642. * If it's being paged in, destroy it.
  643. * If an unlock has been requested, start it again.
  644. */
  645. if (p->busy && p->absent) {
  646. VM_PAGE_FREE(p);
  647. }
  648. else {
  649. if (p->unlock_request != VM_PROT_NONE)
  650. p->unlock_request = VM_PROT_NONE;
  651. PAGE_WAKEUP(p);
  652. }
  653. p = next;
  654. }
  655. /*
  656. * Wake up threads waiting for the memory object to
  657. * become ready.
  658. */
  659. object->pager_ready = TRUE;
  660. vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
  661. }
  662. /*
  663. * Routine: memory_object_destroy [user interface]
  664. * Purpose:
  665. * Shut down a memory object, despite the
  666. * presence of address map (or other) references
  667. * to the vm_object.
  668. * Note:
  669. * This routine may be called either from the user interface,
  670. * or from port destruction handling (via vm_object_destroy).
  671. */
  672. kern_return_t memory_object_destroy(
  673. vm_object_t object,
  674. kern_return_t reason)
  675. {
  676. ipc_port_t old_object, old_name;
  677. pager_request_t old_control;
  678. if (object == VM_OBJECT_NULL)
  679. return KERN_SUCCESS;
  680. /*
  681. * Remove the port associations immediately.
  682. *
  683. * This will prevent the memory manager from further
  684. * meddling. [If it wanted to flush data or make
  685. * other changes, it should have done so before performing
  686. * the destroy call.]
  687. */
  688. vm_object_cache_lock();
  689. vm_object_lock(object);
  690. vm_object_remove(object);
  691. object->can_persist = FALSE;
  692. vm_object_cache_unlock();
  693. /*
  694. * Rip out the ports from the vm_object now... this
  695. * will prevent new memory_object calls from succeeding.
  696. */
  697. old_object = object->pager;
  698. object->pager = IP_NULL;
  699. old_control = object->pager_request;
  700. object->pager_request = PAGER_REQUEST_NULL;
  701. old_name = object->pager_name;
  702. object->pager_name = IP_NULL;
  703. /*
  704. * Wait for existing paging activity (that might
  705. * have the old ports) to subside.
  706. */
  707. vm_object_paging_wait(object, FALSE);
  708. vm_object_unlock(object);
  709. /*
  710. * Shut down the ports now.
  711. *
  712. * [Paging operations may be proceeding concurrently --
  713. * they'll get the null values established above.]
  714. */
  715. if (old_object != IP_NULL) {
  716. /* consumes our rights for object, control, name */
  717. memory_object_release(old_object, old_control,
  718. old_name);
  719. } else if (old_name != IP_NULL) {
  720. /* consumes our right for name */
  721. ipc_port_dealloc_kernel(object->pager_name);
  722. }
  723. /*
  724. * Lose the reference that was donated for this routine
  725. */
  726. vm_object_deallocate(object);
  727. return KERN_SUCCESS;
  728. }
  729. /*
  730. * Routine: vm_object_pmap_protect
  731. *
  732. * Purpose:
  733. * Reduces the permission for all physical
  734. * pages in the specified object range.
  735. *
  736. * If removing write permission only, it is
  737. * sufficient to protect only the pages in
  738. * the top-level object; only those pages may
  739. * have write permission.
  740. *
  741. * If removing all access, we must follow the
  742. * shadow chain from the top-level object to
  743. * remove access to all pages in shadowed objects.
  744. *
  745. * The object must *not* be locked. The object must
  746. * be temporary/internal.
  747. *
  748. * If pmap is not NULL, this routine assumes that
  749. * the only mappings for the pages are in that
  750. * pmap.
  751. */
  752. boolean_t vm_object_pmap_protect_by_page = FALSE;
  753. void vm_object_pmap_protect(
  754. vm_object_t object,
  755. vm_offset_t offset,
  756. vm_size_t size,
  757. pmap_t pmap,
  758. vm_offset_t pmap_start,
  759. vm_prot_t prot)
  760. {
  761. if (object == VM_OBJECT_NULL)
  762. return;
  763. vm_object_lock(object);
  764. assert(object->temporary && object->internal);
  765. while (TRUE) {
  766. if (object->resident_page_count > atop(size) / 2 &&
  767. pmap != PMAP_NULL) {
  768. vm_object_unlock(object);
  769. pmap_protect(pmap, pmap_start, pmap_start + size, prot);
  770. return;
  771. }
  772. {
  773. vm_page_t p;
  774. vm_offset_t end;
  775. end = offset + size;
  776. queue_iterate(&object->memq, p, vm_page_t, listq) {
  777. if (!p->fictitious &&
  778. (offset <= p->offset) &&
  779. (p->offset < end)) {
  780. if ((pmap == PMAP_NULL) ||
  781. vm_object_pmap_protect_by_page) {
  782. pmap_page_protect(p->phys_addr,
  783. prot & ~p->page_lock);
  784. } else {
  785. vm_offset_t start =
  786. pmap_start +
  787. (p->offset - offset);
  788. pmap_protect(pmap,
  789. start,
  790. start + PAGE_SIZE,
  791. prot);
  792. }
  793. }
  794. }
  795. }
  796. if (prot == VM_PROT_NONE) {
  797. /*
  798. * Must follow shadow chain to remove access
  799. * to pages in shadowed objects.
  800. */
  801. vm_object_t next_object;
  802. next_object = object->shadow;
  803. if (next_object != VM_OBJECT_NULL) {
  804. offset += object->shadow_offset;
  805. vm_object_lock(next_object);
  806. vm_object_unlock(object);
  807. object = next_object;
  808. }
  809. else {
  810. /*
  811. * End of chain - we are done.
  812. */
  813. break;
  814. }
  815. }
  816. else {
  817. /*
  818. * Pages in shadowed objects may never have
  819. * write permission - we may stop here.
  820. */
  821. break;
  822. }
  823. }
  824. vm_object_unlock(object);
  825. }
  826. /*
  827. * vm_object_pmap_remove:
  828. *
  829. * Removes all physical pages in the specified
  830. * object range from all physical maps.
  831. *
  832. * The object must *not* be locked.
  833. */
  834. void vm_object_pmap_remove(
  835. vm_object_t object,
  836. vm_offset_t start,
  837. vm_offset_t end)
  838. {
  839. vm_page_t p;
  840. if (object == VM_OBJECT_NULL)
  841. return;
  842. vm_object_lock(object);
  843. queue_iterate(&object->memq, p, vm_page_t, listq) {
  844. if (!p->fictitious &&
  845. (start <= p->offset) &&
  846. (p->offset < end))
  847. pmap_page_protect(p->phys_addr, VM_PROT_NONE);
  848. }
  849. vm_object_unlock(object);
  850. }
  851. /*
  852. * Routine: vm_object_copy_slowly
  853. *
  854. * Description:
  855. * Copy the specified range of the source
  856. * virtual memory object without using
  857. * protection-based optimizations (such
  858. * as copy-on-write). The pages in the
  859. * region are actually copied.
  860. *
  861. * In/out conditions:
  862. * The caller must hold a reference and a lock
  863. * for the source virtual memory object. The source
  864. * object will be returned *unlocked*.
  865. *
  866. * Results:
  867. * If the copy is completed successfully, KERN_SUCCESS is
  868. * returned. If the caller asserted the interruptible
  869. * argument, and an interruption occurred while waiting
  870. * for a user-generated event, MACH_SEND_INTERRUPTED is
  871. * returned. Other values may be returned to indicate
  872. * hard errors during the copy operation.
  873. *
  874. * A new virtual memory object is returned in a
  875. * parameter (_result_object). The contents of this
  876. * new object, starting at a zero offset, are a copy
  877. * of the source memory region. In the event of
  878. * an error, this parameter will contain the value
  879. * VM_OBJECT_NULL.
  880. */
  881. kern_return_t vm_object_copy_slowly(
  882. vm_object_t src_object,
  883. vm_offset_t src_offset,
  884. vm_size_t size,
  885. boolean_t interruptible,
  886. vm_object_t *_result_object) /* OUT */
  887. {
  888. vm_object_t new_object;
  889. vm_offset_t new_offset;
  890. if (size == 0) {
  891. vm_object_unlock(src_object);
  892. *_result_object = VM_OBJECT_NULL;
  893. return KERN_INVALID_ARGUMENT;
  894. }
  895. /*
  896. * Prevent destruction of the source object while we copy.
  897. */
  898. assert(src_object->ref_count > 0);
  899. src_object->ref_count++;
  900. vm_object_unlock(src_object);
  901. /*
  902. * Create a new object to hold the copied pages.
  903. * A few notes:
  904. * We fill the new object starting at offset 0,
  905. * regardless of the input offset.
  906. * We don't bother to lock the new object within
  907. * this routine, since we have the only reference.
  908. */
  909. new_object = vm_object_allocate(size);
  910. new_offset = 0;
  911. assert(size == trunc_page(size)); /* Will the loop terminate? */
  912. for ( ;
  913. size != 0 ;
  914. src_offset += PAGE_SIZE, new_offset += PAGE_SIZE, size -= PAGE_SIZE
  915. ) {
  916. vm_page_t new_page;
  917. vm_fault_return_t result;
  918. while ((new_page = vm_page_alloc(new_object, new_offset))
  919. == VM_PAGE_NULL) {
  920. VM_PAGE_WAIT((void (*)()) 0);
  921. }
  922. do {
  923. vm_prot_t prot = VM_PROT_READ;
  924. vm_page_t _result_page;
  925. vm_page_t top_page;
  926. vm_page_t result_page;
  927. vm_object_lock(src_object);
  928. src_object->paging_in_progress++;
  929. result = vm_fault_page(src_object, src_offset,
  930. VM_PROT_READ, FALSE, interruptible,
  931. &prot, &_result_page, &top_page,
  932. FALSE, (void (*)()) 0);
  933. switch(result) {
  934. case VM_FAULT_SUCCESS:
  935. result_page = _result_page;
  936. /*
  937. * We don't need to hold the object
  938. * lock -- the busy page will be enough.
  939. * [We don't care about picking up any
  940. * new modifications.]
  941. *
  942. * Copy the page to the new object.
  943. *
  944. * POLICY DECISION:
  945. * If result_page is clean,
  946. * we could steal it instead
  947. * of copying.
  948. */
  949. vm_object_unlock(result_page->object);
  950. vm_page_copy(result_page, new_page);
  951. /*
  952. * Let go of both pages (make them
  953. * not busy, perform wakeup, activate).
  954. */
  955. new_page->busy = FALSE;
  956. new_page->dirty = TRUE;
  957. vm_object_lock(result_page->object);
  958. PAGE_WAKEUP_DONE(result_page);
  959. vm_page_lock_queues();
  960. if (!result_page->active &&
  961. !result_page->inactive)
  962. vm_page_activate(result_page);
  963. vm_page_activate(new_page);
  964. vm_page_unlock_queues();
  965. /*
  966. * Release paging references and
  967. * top-level placeholder page, if any.
  968. */
  969. vm_fault_cleanup(result_page->object,
  970. top_page);
  971. break;
  972. case VM_FAULT_RETRY:
  973. break;
  974. case VM_FAULT_MEMORY_SHORTAGE:
  975. VM_PAGE_WAIT((void (*)()) 0);
  976. break;
  977. case VM_FAULT_FICTITIOUS_SHORTAGE:
  978. vm_page_more_fictitious();
  979. break;
  980. case VM_FAULT_INTERRUPTED:
  981. vm_page_free(new_page);
  982. vm_object_deallocate(new_object);
  983. vm_object_deallocate(src_object);
  984. *_result_object = VM_OBJECT_NULL;
  985. return MACH_SEND_INTERRUPTED;
  986. case VM_FAULT_MEMORY_ERROR:
  987. /*
  988. * A policy choice:
  989. * (a) ignore pages that we can't
  990. * copy
  991. * (b) return the null object if
  992. * any page fails [chosen]
  993. */
  994. vm_page_free(new_page);
  995. vm_object_deallocate(new_object);
  996. vm_object_deallocate(src_object);
  997. *_result_object = VM_OBJECT_NULL;
  998. return KERN_MEMORY_ERROR;
  999. }
  1000. } while (result != VM_FAULT_SUCCESS);
  1001. }
  1002. /*
  1003. * Lose the extra reference, and return our object.
  1004. */
  1005. vm_object_deallocate(src_object);
  1006. *_result_object = new_object;
  1007. return KERN_SUCCESS;
  1008. }
  1009. /*
  1010. * Routine: vm_object_copy_temporary
  1011. *
  1012. * Purpose:
  1013. * Copy the specified range of the source virtual
  1014. * memory object, if it can be done without blocking.
  1015. *
  1016. * Results:
  1017. * If the copy is successful, the copy is returned in
  1018. * the arguments; otherwise, the arguments are not
  1019. * affected.
  1020. *
  1021. * In/out conditions:
  1022. * The object should be unlocked on entry and exit.
  1023. */
  1024. boolean_t vm_object_copy_temporary(
  1025. vm_object_t *_object, /* INOUT */
  1026. vm_offset_t *_offset, /* INOUT */
  1027. boolean_t *_src_needs_copy, /* OUT */
  1028. boolean_t *_dst_needs_copy) /* OUT */
  1029. {
  1030. vm_object_t object = *_object;
  1031. if (object == VM_OBJECT_NULL) {
  1032. *_src_needs_copy = FALSE;
  1033. *_dst_needs_copy = FALSE;
  1034. return TRUE;
  1035. }
  1036. /*
  1037. * If the object is temporary, we can perform
  1038. * a symmetric copy-on-write without asking.
  1039. */
  1040. vm_object_lock(object);
  1041. if (object->temporary) {
  1042. /*
  1043. * Shared objects use delayed copy
  1044. */
  1045. if (object->use_shared_copy) {
  1046. /*
  1047. * Asymmetric copy strategy. Destination
  1048. * must be copied (to allow copy object reuse).
  1049. * Source is unaffected.
  1050. */
  1051. vm_object_unlock(object);
  1052. object = vm_object_copy_delayed(object);
  1053. *_object = object;
  1054. *_src_needs_copy = FALSE;
  1055. *_dst_needs_copy = TRUE;
  1056. return TRUE;
  1057. }
  1058. /*
  1059. * Make another reference to the object.
  1060. *
  1061. * Leave object/offset unchanged.
  1062. */
  1063. assert(object->ref_count > 0);
  1064. object->ref_count++;
  1065. object->shadowed = TRUE;
  1066. vm_object_unlock(object);
  1067. /*
  1068. * Both source and destination must make
  1069. * shadows, and the source must be made
  1070. * read-only if not already.
  1071. */
  1072. *_src_needs_copy = TRUE;
  1073. *_dst_needs_copy = TRUE;
  1074. return TRUE;
  1075. }
  1076. if (object->pager_ready &&
  1077. (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY)) {
  1078. /* XXX Do something intelligent (see temporary code above) */
  1079. }
  1080. vm_object_unlock(object);
  1081. return FALSE;
  1082. }
  1083. /*
  1084. * Routine: vm_object_copy_call [internal]
  1085. *
  1086. * Description:
  1087. * Copy the specified (src_offset, size) portion
  1088. * of the source object (src_object), using the
  1089. * user-managed copy algorithm.
  1090. *
  1091. * In/out conditions:
  1092. * The source object must be locked on entry. It
  1093. * will be *unlocked* on exit.
  1094. *
  1095. * Results:
  1096. * If the copy is successful, KERN_SUCCESS is returned.
  1097. * This routine is interruptible; if a wait for
  1098. * a user-generated event is interrupted, MACH_SEND_INTERRUPTED
  1099. * is returned. Other return values indicate hard errors
  1100. * in creating the user-managed memory object for the copy.
  1101. *
  1102. * A new object that represents the copied virtual
  1103. * memory is returned in a parameter (*_result_object).
  1104. * If the return value indicates an error, this parameter
  1105. * is not valid.
  1106. */
  1107. kern_return_t vm_object_copy_call(
  1108. vm_object_t src_object,
  1109. vm_offset_t src_offset,
  1110. vm_size_t size,
  1111. vm_object_t *_result_object) /* OUT */
  1112. {
  1113. vm_offset_t src_end = src_offset + size;
  1114. ipc_port_t new_memory_object;
  1115. vm_object_t new_object;
  1116. vm_page_t p;
  1117. /*
  1118. * Create a memory object port to be associated
  1119. * with this new vm_object.
  1120. *
  1121. * Since the kernel has the only rights to this
  1122. * port, we need not hold the cache lock.
  1123. *
  1124. * Since we have the only object reference, we
  1125. * need not be worried about collapse operations.
  1126. *
  1127. */
  1128. new_memory_object = ipc_port_alloc_kernel();
  1129. if (new_memory_object == IP_NULL)
  1130. return KERN_RESOURCE_SHORTAGE;
  1131. /*
  1132. * Set the backing object for the new
  1133. * temporary object.
  1134. */
  1135. assert(src_object->ref_count > 0);
  1136. src_object->ref_count++;
  1137. vm_object_paging_begin(src_object);
  1138. vm_object_unlock(src_object);
  1139. /* we hold a naked receive right for new_memory_object */
  1140. (void) ipc_port_make_send(new_memory_object);
  1141. /* now we also hold a naked send right for new_memory_object */
  1142. /*
  1143. * Let the memory manager know that a copy operation
  1144. * is in progress. Note that we're using the old
  1145. * memory object's ports (for which we're holding
  1146. * a paging reference)... the memory manager cannot
  1147. * yet affect the new memory object.
  1148. */
  1149. (void) memory_object_copy(src_object->pager,
  1150. src_object->pager_request,
  1151. src_offset, size,
  1152. new_memory_object);
  1153. /* no longer hold the naked receive right for new_memory_object */
  1154. vm_object_lock(src_object);
  1155. vm_object_paging_end(src_object);
  1156. /*
  1157. * Remove write access from all of the pages of
  1158. * the old memory object that we can.
  1159. */
  1160. queue_iterate(&src_object->memq, p, vm_page_t, listq) {
  1161. if (!p->fictitious &&
  1162. (src_offset <= p->offset) &&
  1163. (p->offset < src_end) &&
  1164. !(p->page_lock & VM_PROT_WRITE)) {
  1165. p->page_lock |= VM_PROT_WRITE;
  1166. pmap_page_protect(p->phys_addr, VM_PROT_ALL & ~p->page_lock);
  1167. }
  1168. }
  1169. vm_object_unlock(src_object);
  1170. /*
  1171. * Initialize the rest of the paging stuff
  1172. */
  1173. new_object = vm_object_enter(new_memory_object, size, FALSE);
  1174. new_object->shadow = src_object;
  1175. new_object->shadow_offset = src_offset;
  1176. /*
  1177. * Drop the reference for new_memory_object taken above.
  1178. */
  1179. ipc_port_release_send(new_memory_object);
  1180. /* no longer hold the naked send right for new_memory_object */
  1181. *_result_object = new_object;
  1182. return KERN_SUCCESS;
  1183. }
  1184. /*
  1185. * Routine: vm_object_copy_delayed [internal]
  1186. *
  1187. * Description:
  1188. * Copy the specified virtual memory object, using
  1189. * the asymmetric copy-on-write algorithm.
  1190. *
  1191. * In/out conditions:
  1192. * The object must be unlocked on entry.
  1193. *
  1194. * This routine will not block waiting for user-generated
  1195. * events. It is not interruptible.
  1196. */
  1197. vm_object_t vm_object_copy_delayed(
  1198. vm_object_t src_object)
  1199. {
  1200. vm_object_t new_copy;
  1201. vm_object_t old_copy;
  1202. vm_page_t p;
  1203. /*
  1204. * The user-level memory manager wants to see
  1205. * all of the changes to this object, but it
  1206. * has promised not to make any changes on its own.
  1207. *
  1208. * Perform an asymmetric copy-on-write, as follows:
  1209. * Create a new object, called a "copy object"
  1210. * to hold pages modified by the new mapping
  1211. * (i.e., the copy, not the original mapping).
  1212. * Record the original object as the backing
  1213. * object for the copy object. If the
  1214. * original mapping does not change a page,
  1215. * it may be used read-only by the copy.
  1216. * Record the copy object in the original
  1217. * object. When the original mapping causes
  1218. * a page to be modified, it must be copied
  1219. * to a new page that is "pushed" to the
  1220. * copy object.
  1221. * Mark the new mapping (the copy object)
  1222. * copy-on-write. This makes the copy
  1223. * object itself read-only, allowing it
  1224. * to be reused if the original mapping
  1225. * makes no changes, and simplifying the
  1226. * synchronization required in the "push"
  1227. * operation described above.
  1228. *
  1229. * The copy-on-write is said to be asymmetric because
  1230. * the original object is *not* marked copy-on-write.
  1231. * A copied page is pushed to the copy object, regardless
  1232. * which party attempted to modify the page.
  1233. *
  1234. * Repeated asymmetric copy operations may be done.
  1235. * If the original object has not been changed since
  1236. * the last copy, its copy object can be reused.
  1237. * Otherwise, a new copy object can be inserted
  1238. * between the original object and its previous
  1239. * copy object. Since any copy object is read-only,
  1240. * this cannot affect the contents of the previous copy
  1241. * object.
  1242. *
  1243. * Note that a copy object is higher in the object
  1244. * tree than the original object; therefore, use of
  1245. * the copy object recorded in the original object
  1246. * must be done carefully, to avoid deadlock.
  1247. */
  1248. /*
  1249. * Allocate a new copy object before locking, even
  1250. * though we may not need it later.
  1251. */
  1252. new_copy = vm_object_allocate(src_object->size);
  1253. vm_object_lock(src_object);
  1254. /*
  1255. * See whether we can reuse the result of a previous
  1256. * copy operation.
  1257. */
  1258. Retry:
  1259. old_copy = src_object->copy;
  1260. if (old_copy != VM_OBJECT_NULL) {
  1261. /*
  1262. * Try to get the locks (out of order)
  1263. */
  1264. if (!vm_object_lock_try(old_copy)) {
  1265. vm_object_unlock(src_object);
  1266. simple_lock_pause(); /* wait a bit */
  1267. vm_object_lock(src_object);
  1268. goto Retry;
  1269. }
  1270. /*
  1271. * Determine whether the old copy object has
  1272. * been modified.
  1273. */
  1274. if (old_copy->resident_page_count == 0 &&
  1275. !old_copy->pager_created) {
  1276. /*
  1277. * It has not been modified.
  1278. *
  1279. * Return another reference to
  1280. * the existing copy-object.
  1281. */
  1282. assert(old_copy->ref_count > 0);
  1283. old_copy->ref_count++;
  1284. vm_object_unlock(old_copy);
  1285. vm_object_unlock(src_object);
  1286. vm_object_deallocate(new_copy);
  1287. return old_copy;
  1288. }
  1289. /*
  1290. * The copy-object is always made large enough to
  1291. * completely shadow the original object, since
  1292. * it may have several users who want to shadow
  1293. * the original object at different points.
  1294. */
  1295. assert((old_copy->shadow == src_object) &&
  1296. (old_copy->shadow_offset == (vm_offset_t) 0));
  1297. /*
  1298. * Make the old copy-object shadow the new one.
  1299. * It will receive no more pages from the original
  1300. * object.
  1301. */
  1302. src_object->ref_count--; /* remove ref. from old_copy */
  1303. assert(src_object->ref_count > 0);
  1304. old_copy->shadow = new_copy;
  1305. assert(new_copy->ref_count > 0);
  1306. new_copy->ref_count++;
  1307. vm_object_unlock(old_copy); /* done with old_copy */
  1308. }
  1309. /*
  1310. * Point the new copy at the existing object.
  1311. */
  1312. new_copy->shadow = src_object;
  1313. new_copy->shadow_offset = 0;
  1314. new_copy->shadowed = TRUE; /* caller must set needs_copy */
  1315. assert(src_object->ref_count > 0);
  1316. src_object->ref_count++;
  1317. src_object->copy = new_copy;
  1318. /*
  1319. * Mark all pages of the existing object copy-on-write.
  1320. * This object may have a shadow chain below it, but
  1321. * those pages will already be marked copy-on-write.
  1322. */
  1323. queue_iterate(&src_object->memq, p, vm_page_t, listq) {
  1324. if (!p->fictitious)
  1325. pmap_page_protect(p->phys_addr,
  1326. (VM_PROT_ALL & ~VM_PROT_WRITE &
  1327. ~p->page_lock));
  1328. }
  1329. vm_object_unlock(src_object);
  1330. return new_copy;
  1331. }
  1332. /*
  1333. * Routine: vm_object_copy_strategically
  1334. *
  1335. * Purpose:
  1336. * Perform a copy according to the source object's
  1337. * declared strategy. This operation may block,
  1338. * and may be interrupted.
  1339. */
  1340. kern_return_t vm_object_copy_strategically(
  1341. vm_object_t src_object,
  1342. vm_offset_t src_offset,
  1343. vm_size_t size,
  1344. vm_object_t *dst_object, /* OUT */
  1345. vm_offset_t *dst_offset, /* OUT */
  1346. boolean_t *dst_needs_copy) /* OUT */
  1347. {
  1348. kern_return_t result = KERN_SUCCESS; /* to quiet gcc warnings */
  1349. boolean_t interruptible = TRUE; /* XXX */
  1350. assert(src_object != VM_OBJECT_NULL);
  1351. vm_object_lock(src_object);
  1352. /* XXX assert(!src_object->temporary); JSB FIXME */
  1353. /*
  1354. * The copy strategy is only valid if the memory manager
  1355. * is "ready".
  1356. */
  1357. while (!src_object->pager_ready) {
  1358. vm_object_wait( src_object,
  1359. VM_OBJECT_EVENT_PAGER_READY,
  1360. interruptible);
  1361. if (interruptible &&
  1362. (current_thread()->wait_result != THREAD_AWAKENED)) {
  1363. *dst_object = VM_OBJECT_NULL;
  1364. *dst_offset = 0;
  1365. *dst_needs_copy = FALSE;
  1366. return MACH_SEND_INTERRUPTED;
  1367. }
  1368. vm_object_lock(src_object);
  1369. }
  1370. /*
  1371. * The object may be temporary (even though it is external).
  1372. * If so, do a symmetric copy.
  1373. */
  1374. if (src_object->temporary) {
  1375. /*
  1376. * XXX
  1377. * This does not count as intelligent!
  1378. * This buys us the object->temporary optimizations,
  1379. * but we aren't using a symmetric copy,
  1380. * which may confuse the vm code. The correct thing
  1381. * to do here is to figure out what to call to get
  1382. * a temporary shadowing set up.
  1383. */
  1384. src_object->copy_strategy = MEMORY_OBJECT_COPY_DELAY;
  1385. }
  1386. /*
  1387. * The object is permanent. Use the appropriate copy strategy.
  1388. */
  1389. switch (src_object->copy_strategy) {
  1390. case MEMORY_OBJECT_COPY_NONE:
  1391. if ((result = vm_object_copy_slowly(
  1392. src_object,
  1393. src_offset,
  1394. size,
  1395. interruptible,
  1396. dst_object))
  1397. == KERN_SUCCESS) {
  1398. *dst_offset = 0;
  1399. *dst_needs_copy = FALSE;
  1400. }
  1401. break;
  1402. case MEMORY_OBJECT_COPY_CALL:
  1403. if ((result = vm_object_copy_call(
  1404. src_object,
  1405. src_offset,
  1406. size,
  1407. dst_object))
  1408. == KERN_SUCCESS) {
  1409. *dst_offset = 0;
  1410. *dst_needs_copy = FALSE;
  1411. }
  1412. break;
  1413. case MEMORY_OBJECT_COPY_DELAY:
  1414. vm_object_unlock(src_object);
  1415. *dst_object = vm_object_copy_delayed(src_object);
  1416. *dst_offset = src_offset;
  1417. *dst_needs_copy = TRUE;
  1418. result = KERN_SUCCESS;
  1419. break;
  1420. }
  1421. return result;
  1422. }
  1423. /*
  1424. * vm_object_shadow:
  1425. *
  1426. * Create a new object which is backed by the
  1427. * specified existing object range. The source
  1428. * object reference is deallocated.
  1429. *
  1430. * The new object and offset into that object
  1431. * are returned in the source parameters.
  1432. */
  1433. void vm_object_shadow(
  1434. vm_object_t *object, /* IN/OUT */
  1435. vm_offset_t *offset, /* IN/OUT */
  1436. vm_size_t length)
  1437. {
  1438. vm_object_t source;
  1439. vm_object_t result;
  1440. source = *object;
  1441. /*
  1442. * Allocate a new object with the given length
  1443. */
  1444. if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL)
  1445. panic("vm_object_shadow: no object for shadowing");
  1446. /*
  1447. * The new object shadows the source object, adding
  1448. * a reference to it. Our caller changes his reference
  1449. * to point to the new object, removing a reference to
  1450. * the source object. Net result: no change of reference
  1451. * count.
  1452. */
  1453. result->shadow = source;
  1454. /*
  1455. * Store the offset into the source object,
  1456. * and fix up the offset into the new object.
  1457. */
  1458. result->shadow_offset = *offset;
  1459. /*
  1460. * Return the new things
  1461. */
  1462. *offset = 0;
  1463. *object = result;
  1464. }
  1465. /*
  1466. * The relationship between vm_object structures and
  1467. * the memory_object ports requires careful synchronization.
  1468. *
  1469. * All associations are created by vm_object_enter. All three
  1470. * port fields are filled in, as follows:
  1471. * pager: the memory_object port itself, supplied by
  1472. * the user requesting a mapping (or the kernel,
  1473. * when initializing internal objects); the
  1474. * kernel simulates holding send rights by keeping
  1475. * a port reference;
  1476. * pager_request:
  1477. * pager_name:
  1478. * the memory object control and name ports,
  1479. * created by the kernel; the kernel holds
  1480. * receive (and ownership) rights to these
  1481. * ports, but no other references.
  1482. * All of the ports are referenced by their global names.
  1483. *
  1484. * When initialization is complete, the "initialized" field
  1485. * is asserted. Other mappings using a particular memory object,
  1486. * and any references to the vm_object gained through the
  1487. * port association must wait for this initialization to occur.
  1488. *
  1489. * In order to allow the memory manager to set attributes before
  1490. * requests (notably virtual copy operations, but also data or
  1491. * unlock requests) are made, a "ready" attribute is made available.
  1492. * Only the memory manager may affect the value of this attribute.
  1493. * Its value does not affect critical kernel functions, such as
  1494. * internal object initialization or destruction. [Furthermore,
  1495. * memory objects created by the kernel are assumed to be ready
  1496. * immediately; the default memory manager need not explicitly
  1497. * set the "ready" attribute.]
  1498. *
  1499. * [Both the "initialized" and "ready" attribute wait conditions
  1500. * use the "pager" field as the wait event.]
  1501. *
  1502. * The port associations can be broken down by any of the
  1503. * following routines:
  1504. * vm_object_terminate:
  1505. * No references to the vm_object remain, and
  1506. * the object cannot (or will not) be cached.
  1507. * This is the normal case, and is done even
  1508. * though one of the other cases has already been
  1509. * done.
  1510. * vm_object_destroy:
  1511. * The memory_object port has been destroyed,
  1512. * meaning that the kernel cannot flush dirty
  1513. * pages or request new data or unlock existing
  1514. * data.
  1515. * memory_object_destroy:
  1516. * The memory manager has requested that the
  1517. * kernel relinquish rights to the memory object
  1518. * port. [The memory manager may not want to
  1519. * destroy the port, but may wish to refuse or
  1520. * tear down existing memory mappings.]
  1521. * Each routine that breaks an association must break all of
  1522. * them at once. At some later time, that routine must clear
  1523. * the vm_object port fields and release the port rights.
  1524. * [Furthermore, each routine must cope with the simultaneous
  1525. * or previous operations of the others.]
  1526. *
  1527. * In addition to the lock on the object, the vm_object_cache_lock
  1528. * governs the port associations. References gained through the
  1529. * port association require use of the cache lock.
  1530. *
  1531. * Because the port fields may be cleared spontaneously, they
  1532. * cannot be used to determine whether a memory object has
  1533. * ever been associated with a particular vm_object. [This
  1534. * knowledge is important to the shadow object mechanism.]
  1535. * For this reason, an additional "created" attribute is
  1536. * provided.
  1537. *
  1538. * During various paging operations, the port values found in the
  1539. * vm_object must be valid. To prevent these port rights from being
  1540. * released, and to prevent the port associations from changing
  1541. * (other than being removed, i.e., made null), routines may use
  1542. * the vm_object_paging_begin/end routines [actually, macros].
  1543. * The implementation uses the "paging_in_progress" and "wanted" fields.
  1544. * [Operations that alter the validity of the port values include the
  1545. * termination routines and vm_object_collapse.]
  1546. */
  1547. vm_object_t vm_object_lookup(
  1548. ipc_port_t port)
  1549. {
  1550. vm_object_t object = VM_OBJECT_NULL;
  1551. if (IP_VALID(port)) {
  1552. ip_lock(port);
  1553. if (ip_active(port) &&
  1554. (ip_kotype(port) == IKOT_PAGING_REQUEST)) {
  1555. vm_object_cache_lock();
  1556. object = (vm_object_t) port->ip_kobject;
  1557. vm_object_lock(object);
  1558. assert(object->alive);
  1559. if (object->ref_count == 0)
  1560. vm_object_cache_remove(object);
  1561. object->ref_count++;
  1562. vm_object_unlock(object);
  1563. vm_object_cache_unlock();
  1564. }
  1565. ip_unlock(port);
  1566. }
  1567. return object;
  1568. }
  1569. vm_object_t vm_object_lookup_name(
  1570. ipc_port_t port)
  1571. {
  1572. vm_object_t object = VM_OBJECT_NULL;
  1573. if (IP_VALID(port)) {
  1574. ip_lock(port);
  1575. if (ip_active(port) &&
  1576. (ip_kotype(port) == IKOT_PAGING_NAME)) {
  1577. vm_object_cache_lock();
  1578. object = (vm_object_t) port->ip_kobject;
  1579. vm_object_lock(object);
  1580. assert(object->alive);
  1581. if (object->ref_count == 0)
  1582. vm_object_cache_remove(object);
  1583. object->ref_count++;
  1584. vm_object_unlock(object);
  1585. vm_object_cache_unlock();
  1586. }
  1587. ip_unlock(port);
  1588. }
  1589. return object;
  1590. }
  1591. void vm_object_destroy(
  1592. ipc_port_t pager)
  1593. {
  1594. vm_object_t object;
  1595. pager_request_t old_request;
  1596. ipc_port_t old_name;
  1597. /*
  1598. * Perform essentially the same operations as in vm_object_lookup,
  1599. * except that this time we look up based on the memory_object
  1600. * port, not the control port.
  1601. */
  1602. vm_object_cache_lock();
  1603. if (ip_kotype(pager) != IKOT_PAGER) {
  1604. vm_object_cache_unlock();
  1605. return;
  1606. }
  1607. object = (vm_object_t) pager->ip_kobject;
  1608. vm_object_lock(object);
  1609. if (object->ref_count == 0)
  1610. vm_object_cache_remove(object);
  1611. object->ref_count++;
  1612. object->can_persist = FALSE;
  1613. assert(object->pager == pager);
  1614. /*
  1615. * Remove the port associations.
  1616. *
  1617. * Note that the memory_object itself is dead, so
  1618. * we don't bother with it.
  1619. */
  1620. object->pager = IP_NULL;
  1621. vm_object_remove(object);
  1622. old_request = object->pager_request;
  1623. object->pager_request = PAGER_REQUEST_NULL;
  1624. old_name = object->pager_name;
  1625. object->pager_name = IP_NULL;
  1626. vm_object_unlock(object);
  1627. vm_object_cache_unlock();
  1628. /*
  1629. * Clean up the port references. Note that there's no
  1630. * point in trying the memory_object_terminate call
  1631. * because the memory_object itself is dead.
  1632. */
  1633. ipc_port_release_send(pager);
  1634. if (old_request != IP_NULL)
  1635. ipc_port_dealloc_kernel(old_request);
  1636. if (old_name != IP_NULL)
  1637. ipc_port_dealloc_kernel(old_name);
  1638. /*
  1639. * Restart pending page requests
  1640. */
  1641. vm_object_abort_activity(object);
  1642. /*
  1643. * Lose the object reference.
  1644. */
  1645. vm_object_deallocate(object);
  1646. }
  1647. /*
  1648. * Routine: vm_object_enter
  1649. * Purpose:
  1650. * Find a VM object corresponding to the given
  1651. * pager; if no such object exists, create one,
  1652. * and initialize the pager.
  1653. */
  1654. vm_object_t vm_object_enter(
  1655. ipc_port_t pager,
  1656. vm_size_t size,
  1657. boolean_t internal)
  1658. {
  1659. vm_object_t object;
  1660. vm_object_t new_object;
  1661. boolean_t must_init;
  1662. ipc_kobject_type_t po;
  1663. restart:
  1664. if (!IP_VALID(pager))
  1665. return vm_object_allocate(size);
  1666. new_object = VM_OBJECT_NULL;
  1667. must_init = FALSE;
  1668. /*
  1669. * Look for an object associated with this port.
  1670. */
  1671. vm_object_cache_lock();
  1672. for (;;) {
  1673. po = ip_kotype(pager);
  1674. /*
  1675. * If a previous object is being terminated,
  1676. * we must wait for the termination message
  1677. * to be queued.
  1678. *
  1679. * We set kobject to a non-null value to let the
  1680. * terminator know that someone is waiting.
  1681. * Among the possibilities is that the port
  1682. * could die while we're waiting. Must restart
  1683. * instead of continuing the loop.
  1684. */
  1685. if (po == IKOT_PAGER_TERMINATING) {
  1686. pager->ip_kobject = (ipc_kobject_t) pager;
  1687. assert_wait((event_t) pager, FALSE);
  1688. vm_object_cache_unlock();
  1689. thread_block((void (*)()) 0);
  1690. goto restart;
  1691. }
  1692. /*
  1693. * Bail if there is already a kobject associated
  1694. * with the pager port.
  1695. */
  1696. if (po != IKOT_NONE) {
  1697. break;
  1698. }
  1699. /*
  1700. * We must unlock to create a new object;
  1701. * if we do so, we must try the lookup again.
  1702. */
  1703. if (new_object == VM_OBJECT_NULL) {
  1704. vm_object_cache_unlock();
  1705. new_object = vm_object_allocate(size);
  1706. vm_object_cache_lock();
  1707. } else {
  1708. /*
  1709. * Lookup failed twice, and we have something
  1710. * to insert; set the object.
  1711. */
  1712. ipc_kobject_set(pager,
  1713. (ipc_kobject_t) new_object,
  1714. IKOT_PAGER);
  1715. new_object = VM_OBJECT_NULL;
  1716. must_init = TRUE;
  1717. }
  1718. }
  1719. if (internal)
  1720. must_init = TRUE;
  1721. /*
  1722. * It's only good if it's a VM object!
  1723. */
  1724. object = (po == IKOT_PAGER) ? (vm_object_t) pager->ip_kobject
  1725. : VM_OBJECT_NULL;
  1726. if ((object != VM_OBJECT_NULL) && !must_init) {
  1727. vm_object_lock(object);
  1728. if (object->ref_count == 0)
  1729. vm_object_cache_remove(object);
  1730. object->ref_count++;
  1731. vm_object_unlock(object);
  1732. vm_stat.hits++;
  1733. }
  1734. assert((object == VM_OBJECT_NULL) || (object->ref_count > 0) ||
  1735. ((object->paging_in_progress != 0) && internal));
  1736. vm_stat.lookups++;
  1737. vm_object_cache_unlock();
  1738. /*
  1739. * If we raced to create a vm_object but lost, let's
  1740. * throw away ours.
  1741. */
  1742. if (new_object != VM_OBJECT_NULL)
  1743. vm_object_deallocate(new_object);
  1744. if (object == VM_OBJECT_NULL)
  1745. return(object);
  1746. if (must_init) {
  1747. /*
  1748. * Copy the naked send right we were given.
  1749. */
  1750. pager = ipc_port_copy_send(pager);
  1751. if (!IP_VALID(pager))
  1752. panic("vm_object_enter: port died"); /* XXX */
  1753. object->pager_created = TRUE;
  1754. object->pager = pager;
  1755. /*
  1756. * Allocate request port.
  1757. */
  1758. object->pager_request = ipc_port_alloc_kernel();
  1759. if (object->pager_request == IP_NULL)
  1760. panic("vm_object_enter: pager request alloc");
  1761. ipc_kobject_set(object->pager_request,
  1762. (ipc_kobject_t) object,
  1763. IKOT_PAGING_REQUEST);
  1764. /*
  1765. * Let the pager know we're using it.
  1766. */
  1767. if (internal) {
  1768. /* acquire a naked send right for the DMM */
  1769. ipc_port_t DMM = memory_manager_default_reference();
  1770. /* mark the object internal */
  1771. object->internal = TRUE;
  1772. assert(object->temporary);
  1773. /* default-pager objects are ready immediately */
  1774. object->pager_ready = TRUE;
  1775. /* consumes the naked send right for DMM */
  1776. (void) memory_object_create(DMM,
  1777. pager,
  1778. object->size,
  1779. object->pager_request,
  1780. object->pager_name,
  1781. PAGE_SIZE);
  1782. } else {
  1783. /* the object is external and not temporary */
  1784. object->internal = FALSE;
  1785. object->temporary = FALSE;
  1786. assert(object->resident_page_count == 0);
  1787. vm_object_external_count++;
  1788. /* user pager objects are not ready until marked so */
  1789. object->pager_ready = FALSE;
  1790. (void) memory_object_init(pager,
  1791. object->pager_request,
  1792. object->pager_name,
  1793. PAGE_SIZE);
  1794. }
  1795. vm_object_lock(object);
  1796. object->pager_initialized = TRUE;
  1797. vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
  1798. } else {
  1799. vm_object_lock(object);
  1800. }
  1801. /*
  1802. * [At this point, the object must be locked]
  1803. */
  1804. /*
  1805. * Wait for the work above to be done by the first
  1806. * thread to map this object.
  1807. */
  1808. while (!object->pager_initialized) {
  1809. vm_object_wait( object,
  1810. VM_OBJECT_EVENT_INITIALIZED,
  1811. FALSE);
  1812. vm_object_lock(object);
  1813. }
  1814. vm_object_unlock(object);
  1815. return object;
  1816. }
  1817. /*
  1818. * Routine: vm_object_pager_create
  1819. * Purpose:
  1820. * Create a memory object for an internal object.
  1821. * In/out conditions:
  1822. * The object is locked on entry and exit;
  1823. * it may be unlocked within this call.
  1824. * Limitations:
  1825. * Only one thread may be performing a
  1826. * vm_object_pager_create on an object at
  1827. * a time. Presumably, only the pageout
  1828. * daemon will be using this routine.
  1829. */
  1830. void vm_object_pager_create(
  1831. vm_object_t object)
  1832. {
  1833. ipc_port_t pager;
  1834. if (object->pager_created) {
  1835. /*
  1836. * Someone else got to it first...
  1837. * wait for them to finish initializing
  1838. */
  1839. while (!object->pager_initialized) {
  1840. vm_object_wait( object,
  1841. VM_OBJECT_EVENT_PAGER_READY,
  1842. FALSE);
  1843. vm_object_lock(object);
  1844. }
  1845. return;
  1846. }
  1847. /*
  1848. * Indicate that a memory object has been assigned
  1849. * before dropping the lock, to prevent a race.
  1850. */
  1851. object->pager_created = TRUE;
  1852. /*
  1853. * Prevent collapse or termination by
  1854. * holding a paging reference
  1855. */
  1856. vm_object_paging_begin(object);
  1857. vm_object_unlock(object);
  1858. #if MACH_PAGEMAP
  1859. object->existence_info = vm_external_create(
  1860. object->size +
  1861. object->paging_offset);
  1862. assert((object->size + object->paging_offset) >=
  1863. object->size);
  1864. #endif /* MACH_PAGEMAP */
  1865. /*
  1866. * Create the pager, and associate with it
  1867. * this object.
  1868. *
  1869. * Note that we only make the port association
  1870. * so that vm_object_enter can properly look up
  1871. * the object to complete the initialization...
  1872. * we do not expect any user to ever map this
  1873. * object.
  1874. *
  1875. * Since the kernel has the only rights to the
  1876. * port, it's safe to install the association
  1877. * without holding the cache lock.
  1878. */
  1879. pager = ipc_port_alloc_kernel();
  1880. if (pager == IP_NULL)
  1881. panic("vm_object_pager_create: allocate pager port");
  1882. (void) ipc_port_make_send(pager);
  1883. ipc_kobject_set(pager, (ipc_kobject_t) object, IKOT_PAGER);
  1884. /*
  1885. * Initialize the rest of the paging stuff
  1886. */
  1887. if (vm_object_enter(pager, object->size, TRUE) != object)
  1888. panic("vm_object_pager_create: mismatch");
  1889. /*
  1890. * Drop the naked send right taken above.
  1891. */
  1892. ipc_port_release_send(pager);
  1893. /*
  1894. * Release the paging reference
  1895. */
  1896. vm_object_lock(object);
  1897. vm_object_paging_end(object);
  1898. }
  1899. /*
  1900. * Routine: vm_object_remove
  1901. * Purpose:
  1902. * Eliminate the pager/object association
  1903. * for this pager.
  1904. * Conditions:
  1905. * The object cache must be locked.
  1906. */
  1907. void vm_object_remove(
  1908. vm_object_t object)
  1909. {
  1910. ipc_port_t port;
  1911. if ((port = object->pager) != IP_NULL) {
  1912. if (ip_kotype(port) == IKOT_PAGER)
  1913. ipc_kobject_set(port, IKO_NULL,
  1914. IKOT_PAGER_TERMINATING);
  1915. else if (ip_kotype(port) != IKOT_NONE)
  1916. panic("vm_object_remove: bad object port");
  1917. }
  1918. if ((port = object->pager_request) != IP_NULL) {
  1919. if (ip_kotype(port) == IKOT_PAGING_REQUEST)
  1920. ipc_kobject_set(port, IKO_NULL, IKOT_NONE);
  1921. else if (ip_kotype(port) != IKOT_NONE)
  1922. panic("vm_object_remove: bad request port");
  1923. }
  1924. if ((port = object->pager_name) != IP_NULL) {
  1925. if (ip_kotype(port) == IKOT_PAGING_NAME)
  1926. ipc_kobject_set(port, IKO_NULL, IKOT_NONE);
  1927. else if (ip_kotype(port) != IKOT_NONE)
  1928. panic("vm_object_remove: bad name port");
  1929. }
  1930. }
  1931. /*
  1932. * Global variables for vm_object_collapse():
  1933. *
  1934. * Counts for normal collapses and bypasses.
  1935. * Debugging variables, to watch or disable collapse.
  1936. */
  1937. long object_collapses = 0;
  1938. long object_bypasses = 0;
  1939. int vm_object_collapse_debug = 0;
  1940. boolean_t vm_object_collapse_allowed = TRUE;
  1941. boolean_t vm_object_collapse_bypass_allowed = TRUE;
  1942. /*
  1943. * vm_object_collapse:
  1944. *
  1945. * Collapse an object with the object backing it.
  1946. * Pages in the backing object are moved into the
  1947. * parent, and the backing object is deallocated.
  1948. *
  1949. * Requires that the object be locked and the page
  1950. * queues be unlocked. May unlock/relock the object,
  1951. * so the caller should hold a reference for the object.
  1952. */
  1953. void vm_object_collapse(
  1954. vm_object_t object)
  1955. {
  1956. vm_object_t backing_object;
  1957. vm_offset_t backing_offset;
  1958. vm_size_t size;
  1959. vm_offset_t new_offset;
  1960. vm_page_t p, pp;
  1961. ipc_port_t old_name_port;
  1962. if (!vm_object_collapse_allowed)
  1963. return;
  1964. while (TRUE) {
  1965. /*
  1966. * Verify that the conditions are right for collapse:
  1967. *
  1968. * The object exists and no pages in it are currently
  1969. * being paged out (or have ever been paged out).
  1970. *
  1971. * This check is probably overkill -- if a memory
  1972. * object has not been created, the fault handler
  1973. * shouldn't release the object lock while paging
  1974. * is in progress or absent pages exist.
  1975. */
  1976. if (object == VM_OBJECT_NULL ||
  1977. object->pager_created ||
  1978. object->paging_in_progress != 0 ||
  1979. object->absent_count != 0)
  1980. return;
  1981. /*
  1982. * There is a backing object, and
  1983. */
  1984. if ((backing_object = object->shadow) == VM_OBJECT_NULL)
  1985. return;
  1986. vm_object_lock(backing_object);
  1987. /*
  1988. * ...
  1989. * The backing object is not read_only,
  1990. * and no pages in the backing object are
  1991. * currently being paged out.
  1992. * The backing object is internal.
  1993. *
  1994. * XXX It may be sufficient for the backing
  1995. * XXX object to be temporary.
  1996. */
  1997. if (!backing_object->internal ||
  1998. backing_object->paging_in_progress != 0) {
  1999. vm_object_unlock(backing_object);
  2000. return;
  2001. }
  2002. /*
  2003. * The backing object can't be a copy-object:
  2004. * the shadow_offset for the copy-object must stay
  2005. * as 0. Furthermore (for the 'we have all the
  2006. * pages' case), if we bypass backing_object and
  2007. * just shadow the next object in the chain, old
  2008. * pages from that object would then have to be copied
  2009. * BOTH into the (former) backing_object and into the
  2010. * parent object.
  2011. */
  2012. if (backing_object->shadow != VM_OBJECT_NULL &&
  2013. backing_object->shadow->copy != VM_OBJECT_NULL) {
  2014. vm_object_unlock(backing_object);
  2015. return;
  2016. }
  2017. /*
  2018. * We know that we can either collapse the backing
  2019. * object (if the parent is the only reference to
  2020. * it) or (perhaps) remove the parent's reference
  2021. * to it.
  2022. */
  2023. backing_offset = object->shadow_offset;
  2024. size = object->size;
  2025. /*
  2026. * If there is exactly one reference to the backing
  2027. * object, we can collapse it into the parent.
  2028. */
  2029. if (backing_object->ref_count == 1) {
  2030. if (!vm_object_cache_lock_try()) {
  2031. vm_object_unlock(backing_object);
  2032. return;
  2033. }
  2034. /*
  2035. * We can collapse the backing object.
  2036. *
  2037. * Move all in-memory pages from backing_object
  2038. * to the parent. Pages that have been paged out
  2039. * will be overwritten by any of the parent's
  2040. * pages that shadow them.
  2041. */
  2042. while (!queue_empty(&backing_object->memq)) {
  2043. p = (vm_page_t)
  2044. queue_first(&backing_object->memq);
  2045. new_offset = (p->offset - backing_offset);
  2046. assert(!p->busy || p->absent);
  2047. /*
  2048. * If the parent has a page here, or if
  2049. * this page falls outside the parent,
  2050. * dispose of it.
  2051. *
  2052. * Otherwise, move it as planned.
  2053. */
  2054. if (p->offset < backing_offset ||
  2055. new_offset >= size) {
  2056. VM_PAGE_FREE(p);
  2057. } else {
  2058. pp = vm_page_lookup(object, new_offset);
  2059. if (pp != VM_PAGE_NULL && !pp->absent) {
  2060. /*
  2061. * Parent object has a real page.
  2062. * Throw away the backing object's
  2063. * page.
  2064. */
  2065. VM_PAGE_FREE(p);
  2066. }
  2067. else {
  2068. assert(pp == VM_PAGE_NULL || !
  2069. "vm_object_collapse: bad case");
  2070. /*
  2071. * Parent now has no page.
  2072. * Move the backing object's page up.
  2073. */
  2074. vm_page_rename(p, object, new_offset);
  2075. }
  2076. }
  2077. }
  2078. /*
  2079. * Move the pager from backing_object to object.
  2080. *
  2081. * XXX We're only using part of the paging space
  2082. * for keeps now... we ought to discard the
  2083. * unused portion.
  2084. */
  2085. switch (vm_object_collapse_debug) {
  2086. case 0:
  2087. break;
  2088. case 1:
  2089. if ((backing_object->pager == IP_NULL) &&
  2090. (backing_object->pager_request ==
  2091. PAGER_REQUEST_NULL))
  2092. break;
  2093. /* Fall through to... */
  2094. default:
  2095. printf("vm_object_collapse: %p (pager %p, request %p) up to %p\n",
  2096. backing_object, backing_object->pager, backing_object->pager_request,
  2097. object);
  2098. if (vm_object_collapse_debug > 2)
  2099. SoftDebugger("vm_object_collapse");
  2100. }
  2101. object->pager = backing_object->pager;
  2102. if (object->pager != IP_NULL)
  2103. ipc_kobject_set(object->pager,
  2104. (ipc_kobject_t) object,
  2105. IKOT_PAGER);
  2106. object->pager_initialized = backing_object->pager_initialized;
  2107. object->pager_ready = backing_object->pager_ready;
  2108. object->pager_created = backing_object->pager_created;
  2109. object->pager_request = backing_object->pager_request;
  2110. if (object->pager_request != IP_NULL)
  2111. ipc_kobject_set(object->pager_request,
  2112. (ipc_kobject_t) object,
  2113. IKOT_PAGING_REQUEST);
  2114. old_name_port = object->pager_name;
  2115. if (old_name_port != IP_NULL)
  2116. ipc_kobject_set(old_name_port,
  2117. IKO_NULL, IKOT_NONE);
  2118. object->pager_name = backing_object->pager_name;
  2119. if (object->pager_name != IP_NULL)
  2120. ipc_kobject_set(object->pager_name,
  2121. (ipc_kobject_t) object,
  2122. IKOT_PAGING_NAME);
  2123. vm_object_cache_unlock();
  2124. /*
  2125. * If there is no pager, leave paging-offset alone.
  2126. */
  2127. if (object->pager != IP_NULL)
  2128. object->paging_offset =
  2129. backing_object->paging_offset +
  2130. backing_offset;
  2131. #if MACH_PAGEMAP
  2132. assert(object->existence_info == VM_EXTERNAL_NULL);
  2133. object->existence_info = backing_object->existence_info;
  2134. #endif /* MACH_PAGEMAP */
  2135. /*
  2136. * Object now shadows whatever backing_object did.
  2137. * Note that the reference to backing_object->shadow
  2138. * moves from within backing_object to within object.
  2139. */
  2140. object->shadow = backing_object->shadow;
  2141. object->shadow_offset += backing_object->shadow_offset;
  2142. if (object->shadow != VM_OBJECT_NULL &&
  2143. object->shadow->copy != VM_OBJECT_NULL) {
  2144. panic("vm_object_collapse: we collapsed a copy-object!");
  2145. }
  2146. /*
  2147. * Discard backing_object.
  2148. *
  2149. * Since the backing object has no pages, no
  2150. * pager left, and no object references within it,
  2151. * all that is necessary is to dispose of it.
  2152. */
  2153. assert(
  2154. (backing_object->ref_count == 1) &&
  2155. (backing_object->resident_page_count == 0) &&
  2156. (backing_object->paging_in_progress == 0)
  2157. );
  2158. assert(backing_object->alive);
  2159. assert(!backing_object->cached);
  2160. backing_object->alive = FALSE;
  2161. vm_object_unlock(backing_object);
  2162. vm_object_unlock(object);
  2163. if (old_name_port != IP_NULL)
  2164. ipc_port_dealloc_kernel(old_name_port);
  2165. kmem_cache_free(&vm_object_cache, (vm_offset_t) backing_object);
  2166. vm_object_lock(object);
  2167. object_collapses++;
  2168. }
  2169. else {
  2170. if (!vm_object_collapse_bypass_allowed) {
  2171. vm_object_unlock(backing_object);
  2172. return;
  2173. }
  2174. /*
  2175. * If all of the pages in the backing object are
  2176. * shadowed by the parent object, the parent
  2177. * object no longer has to shadow the backing
  2178. * object; it can shadow the next one in the
  2179. * chain.
  2180. *
  2181. * The backing object must not be paged out - we'd
  2182. * have to check all of the paged-out pages, as
  2183. * well.
  2184. */
  2185. if (backing_object->pager_created) {
  2186. vm_object_unlock(backing_object);
  2187. return;
  2188. }
  2189. /*
  2190. * Should have a check for a 'small' number
  2191. * of pages here.
  2192. */
  2193. queue_iterate(&backing_object->memq, p,
  2194. vm_page_t, listq)
  2195. {
  2196. new_offset = (p->offset - backing_offset);
  2197. /*
  2198. * If the parent has a page here, or if
  2199. * this page falls outside the parent,
  2200. * keep going.
  2201. *
  2202. * Otherwise, the backing_object must be
  2203. * left in the chain.
  2204. */
  2205. if (p->offset >= backing_offset &&
  2206. new_offset <= size &&
  2207. (pp = vm_page_lookup(object, new_offset))
  2208. == VM_PAGE_NULL) {
  2209. /*
  2210. * Page still needed.
  2211. * Can't go any further.
  2212. */
  2213. vm_object_unlock(backing_object);
  2214. return;
  2215. }
  2216. }
  2217. /*
  2218. * Make the parent shadow the next object
  2219. * in the chain. Deallocating backing_object
  2220. * will not remove it, since its reference
  2221. * count is at least 2.
  2222. */
  2223. vm_object_reference(object->shadow = backing_object->shadow);
  2224. object->shadow_offset += backing_object->shadow_offset;
  2225. /*
  2226. * Backing object might have had a copy pointer
  2227. * to us. If it did, clear it.
  2228. */
  2229. if (backing_object->copy == object)
  2230. backing_object->copy = VM_OBJECT_NULL;
  2231. /*
  2232. * Drop the reference count on backing_object.
  2233. * Since its ref_count was at least 2, it
  2234. * will not vanish; so we don't need to call
  2235. * vm_object_deallocate.
  2236. */
  2237. backing_object->ref_count--;
  2238. assert(backing_object->ref_count > 0);
  2239. vm_object_unlock(backing_object);
  2240. object_bypasses ++;
  2241. }
  2242. /*
  2243. * Try again with this object's new backing object.
  2244. */
  2245. }
  2246. }
  2247. /*
  2248. * Routine: vm_object_page_remove: [internal]
  2249. * Purpose:
  2250. * Removes all physical pages in the specified
  2251. * object range from the object's list of pages.
  2252. *
  2253. * In/out conditions:
  2254. * The object must be locked.
  2255. */
  2256. unsigned int vm_object_page_remove_lookup = 0;
  2257. unsigned int vm_object_page_remove_iterate = 0;
  2258. void vm_object_page_remove(
  2259. vm_object_t object,
  2260. vm_offset_t start,
  2261. vm_offset_t end)
  2262. {
  2263. vm_page_t p, next;
  2264. /*
  2265. * One and two page removals are most popular.
  2266. * The factor of 16 here is somewhat arbitrary.
  2267. * It balances vm_object_lookup vs iteration.
  2268. */
  2269. if (atop(end - start) < object->resident_page_count/16) {
  2270. vm_object_page_remove_lookup++;
  2271. for (; start < end; start += PAGE_SIZE) {
  2272. p = vm_page_lookup(object, start);
  2273. if (p != VM_PAGE_NULL) {
  2274. if (!p->fictitious)
  2275. pmap_page_protect(p->phys_addr,
  2276. VM_PROT_NONE);
  2277. VM_PAGE_FREE(p);
  2278. }
  2279. }
  2280. } else {
  2281. vm_object_page_remove_iterate++;
  2282. p = (vm_page_t) queue_first(&object->memq);
  2283. while (!queue_end(&object->memq, (queue_entry_t) p)) {
  2284. next = (vm_page_t) queue_next(&p->listq);
  2285. if ((start <= p->offset) && (p->offset < end)) {
  2286. if (!p->fictitious)
  2287. pmap_page_protect(p->phys_addr,
  2288. VM_PROT_NONE);
  2289. VM_PAGE_FREE(p);
  2290. }
  2291. p = next;
  2292. }
  2293. }
  2294. }
  2295. /*
  2296. * Routine: vm_object_coalesce
  2297. * Function: Coalesces two objects backing up adjoining
  2298. * regions of memory into a single object.
  2299. *
  2300. * returns TRUE if objects were combined.
  2301. *
  2302. * NOTE: Only works at the moment if the second object is NULL -
  2303. * if it's not, which object do we lock first?
  2304. *
  2305. * Parameters:
  2306. * prev_object First object to coalesce
  2307. * prev_offset Offset into prev_object
  2308. * next_object Second object into coalesce
  2309. * next_offset Offset into next_object
  2310. *
  2311. * prev_size Size of reference to prev_object
  2312. * next_size Size of reference to next_object
  2313. *
  2314. * Conditions:
  2315. * The object must *not* be locked.
  2316. */
  2317. boolean_t vm_object_coalesce(
  2318. vm_object_t prev_object,
  2319. vm_object_t next_object,
  2320. vm_offset_t prev_offset,
  2321. vm_offset_t next_offset,
  2322. vm_size_t prev_size,
  2323. vm_size_t next_size)
  2324. {
  2325. vm_size_t newsize;
  2326. if (next_object != VM_OBJECT_NULL) {
  2327. return FALSE;
  2328. }
  2329. if (prev_object == VM_OBJECT_NULL) {
  2330. return TRUE;
  2331. }
  2332. vm_object_lock(prev_object);
  2333. /*
  2334. * Try to collapse the object first
  2335. */
  2336. vm_object_collapse(prev_object);
  2337. /*
  2338. * Can't coalesce if pages not mapped to
  2339. * prev_entry may be in use anyway:
  2340. * . more than one reference
  2341. * . paged out
  2342. * . shadows another object
  2343. * . has a copy elsewhere
  2344. * . paging references (pages might be in page-list)
  2345. */
  2346. if ((prev_object->ref_count > 1) ||
  2347. prev_object->pager_created ||
  2348. prev_object->used_for_pageout ||
  2349. (prev_object->shadow != VM_OBJECT_NULL) ||
  2350. (prev_object->copy != VM_OBJECT_NULL) ||
  2351. (prev_object->paging_in_progress != 0)) {
  2352. vm_object_unlock(prev_object);
  2353. return FALSE;
  2354. }
  2355. /*
  2356. * Remove any pages that may still be in the object from
  2357. * a previous deallocation.
  2358. */
  2359. vm_object_page_remove(prev_object,
  2360. prev_offset + prev_size,
  2361. prev_offset + prev_size + next_size);
  2362. /*
  2363. * Extend the object if necessary.
  2364. */
  2365. newsize = prev_offset + prev_size + next_size;
  2366. if (newsize > prev_object->size)
  2367. prev_object->size = newsize;
  2368. vm_object_unlock(prev_object);
  2369. return TRUE;
  2370. }
  2371. vm_object_t vm_object_request_object(
  2372. ipc_port_t p)
  2373. {
  2374. return vm_object_lookup(p);
  2375. }
  2376. /*
  2377. * Routine: vm_object_name
  2378. * Purpose:
  2379. * Returns a naked send right to the "name" port associated
  2380. * with this object.
  2381. */
  2382. ipc_port_t vm_object_name(
  2383. vm_object_t object)
  2384. {
  2385. ipc_port_t p;
  2386. if (object == VM_OBJECT_NULL)
  2387. return IP_NULL;
  2388. vm_object_lock(object);
  2389. while (object->shadow != VM_OBJECT_NULL) {
  2390. vm_object_t new_object = object->shadow;
  2391. vm_object_lock(new_object);
  2392. vm_object_unlock(object);
  2393. object = new_object;
  2394. }
  2395. p = object->pager_name;
  2396. if (p != IP_NULL)
  2397. p = ipc_port_make_send(p);
  2398. vm_object_unlock(object);
  2399. return p;
  2400. }
  2401. /*
  2402. * Attach a set of physical pages to an object, so that they can
  2403. * be mapped by mapping the object. Typically used to map IO memory.
  2404. *
  2405. * The mapping function and its private data are used to obtain the
  2406. * physical addresses for each page to be mapped.
  2407. */
  2408. void
  2409. vm_object_page_map(
  2410. vm_object_t object,
  2411. vm_offset_t offset,
  2412. vm_size_t size,
  2413. vm_offset_t (*map_fn)(void *, vm_offset_t),
  2414. void * map_fn_data) /* private to map_fn */
  2415. {
  2416. int num_pages;
  2417. int i;
  2418. vm_page_t m;
  2419. vm_page_t old_page;
  2420. vm_offset_t addr;
  2421. num_pages = atop(size);
  2422. for (i = 0; i < num_pages; i++, offset += PAGE_SIZE) {
  2423. addr = (*map_fn)(map_fn_data, offset);
  2424. while ((m = vm_page_grab_fictitious()) == VM_PAGE_NULL)
  2425. vm_page_more_fictitious();
  2426. vm_object_lock(object);
  2427. if ((old_page = vm_page_lookup(object, offset))
  2428. != VM_PAGE_NULL)
  2429. {
  2430. VM_PAGE_FREE(old_page);
  2431. }
  2432. vm_page_init(m);
  2433. m->phys_addr = addr;
  2434. m->private = TRUE; /* don`t free page */
  2435. m->wire_count = 1;
  2436. vm_page_lock_queues();
  2437. vm_page_insert(m, object, offset);
  2438. vm_page_unlock_queues();
  2439. PAGE_WAKEUP_DONE(m);
  2440. vm_object_unlock(object);
  2441. }
  2442. }
  2443. #if MACH_KDB
  2444. #include <vm/vm_print.h>
  2445. #define printf kdbprintf
  2446. boolean_t vm_object_print_pages = FALSE;
  2447. /*
  2448. * vm_object_print: [ debug ]
  2449. */
  2450. void vm_object_print(
  2451. vm_object_t object)
  2452. {
  2453. vm_page_t p;
  2454. int count;
  2455. if (object == VM_OBJECT_NULL)
  2456. return;
  2457. iprintf("Object 0x%X: size=0x%X, %d references",
  2458. (vm_offset_t) object, (vm_offset_t) object->size,
  2459. object->ref_count);
  2460. printf("\n");
  2461. iprintf("%lu resident pages,", object->resident_page_count);
  2462. printf(" %d absent pages,", object->absent_count);
  2463. printf(" %d paging ops\n", object->paging_in_progress);
  2464. indent += 1;
  2465. iprintf("memory object=0x%X (offset=0x%X),",
  2466. (vm_offset_t) object->pager, (vm_offset_t) object->paging_offset);
  2467. printf("control=0x%X, name=0x%X\n",
  2468. (vm_offset_t) object->pager_request, (vm_offset_t) object->pager_name);
  2469. iprintf("%s%s",
  2470. object->pager_ready ? " ready" : "",
  2471. object->pager_created ? " created" : "");
  2472. printf("%s,%s ",
  2473. object->pager_initialized ? "" : "uninitialized",
  2474. object->temporary ? "temporary" : "permanent");
  2475. printf("%s%s,",
  2476. object->internal ? "internal" : "external",
  2477. object->can_persist ? " cacheable" : "");
  2478. printf("copy_strategy=%d\n", (vm_offset_t)object->copy_strategy);
  2479. iprintf("shadow=0x%X (offset=0x%X),",
  2480. (vm_offset_t) object->shadow, (vm_offset_t) object->shadow_offset);
  2481. printf("copy=0x%X\n", (vm_offset_t) object->copy);
  2482. indent += 1;
  2483. if (vm_object_print_pages) {
  2484. count = 0;
  2485. p = (vm_page_t) queue_first(&object->memq);
  2486. while (!queue_end(&object->memq, (queue_entry_t) p)) {
  2487. if (count == 0) iprintf("memory:=");
  2488. else if (count == 4) {printf("\n"); iprintf(" ..."); count = 0;}
  2489. else printf(",");
  2490. count++;
  2491. printf("(off=0x%X,page=0x%X)", p->offset, (vm_offset_t) p);
  2492. p = (vm_page_t) queue_next(&p->listq);
  2493. }
  2494. if (count != 0)
  2495. printf("\n");
  2496. }
  2497. indent -= 2;
  2498. }
  2499. #endif /* MACH_KDB */