mapping.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303
  1. /*
  2. * Server-side file mapping management
  3. *
  4. * Copyright (C) 1999 Alexandre Julliard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. */
  20. #include "config.h"
  21. #include "wine/port.h"
  22. #include <assert.h>
  23. #include <stdarg.h>
  24. #include <stdio.h>
  25. #include <stdlib.h>
  26. #include <sys/stat.h>
  27. #ifdef HAVE_SYS_MMAN_H
  28. # include <sys/mman.h>
  29. #endif
  30. #include <unistd.h>
  31. #include "ntstatus.h"
  32. #define WIN32_NO_STATUS
  33. #include "windef.h"
  34. #include "winternl.h"
  35. #include "ddk/wdm.h"
  36. #include "file.h"
  37. #include "handle.h"
  38. #include "thread.h"
  39. #include "process.h"
  40. #include "request.h"
  41. #include "security.h"
  42. /* list of memory ranges, used to store committed info */
  43. struct ranges
  44. {
  45. struct object obj; /* object header */
  46. unsigned int count; /* number of used ranges */
  47. unsigned int max; /* number of allocated ranges */
  48. struct range
  49. {
  50. file_pos_t start;
  51. file_pos_t end;
  52. } *ranges;
  53. };
  54. static void ranges_dump( struct object *obj, int verbose );
  55. static void ranges_destroy( struct object *obj );
  56. static const struct object_ops ranges_ops =
  57. {
  58. sizeof(struct ranges), /* size */
  59. &no_type, /* type */
  60. ranges_dump, /* dump */
  61. no_add_queue, /* add_queue */
  62. NULL, /* remove_queue */
  63. NULL, /* signaled */
  64. NULL, /* satisfied */
  65. no_signal, /* signal */
  66. no_get_fd, /* get_fd */
  67. default_map_access, /* map_access */
  68. default_get_sd, /* get_sd */
  69. default_set_sd, /* set_sd */
  70. no_get_full_name, /* get_full_name */
  71. no_lookup_name, /* lookup_name */
  72. no_link_name, /* link_name */
  73. NULL, /* unlink_name */
  74. no_open_file, /* open_file */
  75. no_kernel_obj_list, /* get_kernel_obj_list */
  76. no_close_handle, /* close_handle */
  77. ranges_destroy /* destroy */
  78. };
  79. /* file backing the shared sections of a PE image mapping */
  80. struct shared_map
  81. {
  82. struct object obj; /* object header */
  83. struct fd *fd; /* file descriptor of the mapped PE file */
  84. struct file *file; /* temp file holding the shared data */
  85. struct list entry; /* entry in global shared maps list */
  86. };
  87. static void shared_map_dump( struct object *obj, int verbose );
  88. static void shared_map_destroy( struct object *obj );
  89. static const struct object_ops shared_map_ops =
  90. {
  91. sizeof(struct shared_map), /* size */
  92. &no_type, /* type */
  93. shared_map_dump, /* dump */
  94. no_add_queue, /* add_queue */
  95. NULL, /* remove_queue */
  96. NULL, /* signaled */
  97. NULL, /* satisfied */
  98. no_signal, /* signal */
  99. no_get_fd, /* get_fd */
  100. default_map_access, /* map_access */
  101. default_get_sd, /* get_sd */
  102. default_set_sd, /* set_sd */
  103. no_get_full_name, /* get_full_name */
  104. no_lookup_name, /* lookup_name */
  105. no_link_name, /* link_name */
  106. NULL, /* unlink_name */
  107. no_open_file, /* open_file */
  108. no_kernel_obj_list, /* get_kernel_obj_list */
  109. no_close_handle, /* close_handle */
  110. shared_map_destroy /* destroy */
  111. };
  112. static struct list shared_map_list = LIST_INIT( shared_map_list );
  113. /* memory view mapped in client address space */
  114. struct memory_view
  115. {
  116. struct list entry; /* entry in per-process view list */
  117. struct fd *fd; /* fd for mapped file */
  118. struct ranges *committed; /* list of committed ranges in this mapping */
  119. struct shared_map *shared; /* temp file for shared PE mapping */
  120. pe_image_info_t image; /* image info (for PE image mapping) */
  121. unsigned int flags; /* SEC_* flags */
  122. client_ptr_t base; /* view base address (in process addr space) */
  123. mem_size_t size; /* view size */
  124. file_pos_t start; /* start offset in mapping */
  125. data_size_t namelen;
  126. WCHAR name[1]; /* filename for .so dll image views */
  127. };
  128. static const WCHAR mapping_name[] = {'S','e','c','t','i','o','n'};
  129. struct type_descr mapping_type =
  130. {
  131. { mapping_name, sizeof(mapping_name) }, /* name */
  132. SECTION_ALL_ACCESS | SYNCHRONIZE, /* valid_access */
  133. { /* mapping */
  134. STANDARD_RIGHTS_READ | SECTION_QUERY | SECTION_MAP_READ,
  135. STANDARD_RIGHTS_WRITE | SECTION_MAP_WRITE,
  136. STANDARD_RIGHTS_EXECUTE | SECTION_MAP_EXECUTE,
  137. SECTION_ALL_ACCESS
  138. },
  139. };
  140. struct mapping
  141. {
  142. struct object obj; /* object header */
  143. mem_size_t size; /* mapping size */
  144. unsigned int flags; /* SEC_* flags */
  145. struct fd *fd; /* fd for mapped file */
  146. pe_image_info_t image; /* image info (for PE image mapping) */
  147. struct ranges *committed; /* list of committed ranges in this mapping */
  148. struct shared_map *shared; /* temp file for shared PE mapping */
  149. };
  150. static void mapping_dump( struct object *obj, int verbose );
  151. static struct fd *mapping_get_fd( struct object *obj );
  152. static void mapping_destroy( struct object *obj );
  153. static enum server_fd_type mapping_get_fd_type( struct fd *fd );
  154. static const struct object_ops mapping_ops =
  155. {
  156. sizeof(struct mapping), /* size */
  157. &mapping_type, /* type */
  158. mapping_dump, /* dump */
  159. no_add_queue, /* add_queue */
  160. NULL, /* remove_queue */
  161. NULL, /* signaled */
  162. NULL, /* satisfied */
  163. no_signal, /* signal */
  164. mapping_get_fd, /* get_fd */
  165. default_map_access, /* map_access */
  166. default_get_sd, /* get_sd */
  167. default_set_sd, /* set_sd */
  168. default_get_full_name, /* get_full_name */
  169. no_lookup_name, /* lookup_name */
  170. directory_link_name, /* link_name */
  171. default_unlink_name, /* unlink_name */
  172. no_open_file, /* open_file */
  173. no_kernel_obj_list, /* get_kernel_obj_list */
  174. no_close_handle, /* close_handle */
  175. mapping_destroy /* destroy */
  176. };
  177. static const struct fd_ops mapping_fd_ops =
  178. {
  179. default_fd_get_poll_events, /* get_poll_events */
  180. default_poll_event, /* poll_event */
  181. mapping_get_fd_type, /* get_fd_type */
  182. no_fd_read, /* read */
  183. no_fd_write, /* write */
  184. no_fd_flush, /* flush */
  185. no_fd_get_file_info, /* get_file_info */
  186. no_fd_get_volume_info, /* get_volume_info */
  187. no_fd_ioctl, /* ioctl */
  188. no_fd_queue_async, /* queue_async */
  189. default_fd_reselect_async /* reselect_async */
  190. };
  191. static size_t page_mask;
  192. #define ROUND_SIZE(size) (((size) + page_mask) & ~page_mask)
  193. static void ranges_dump( struct object *obj, int verbose )
  194. {
  195. struct ranges *ranges = (struct ranges *)obj;
  196. fprintf( stderr, "Memory ranges count=%u\n", ranges->count );
  197. }
  198. static void ranges_destroy( struct object *obj )
  199. {
  200. struct ranges *ranges = (struct ranges *)obj;
  201. free( ranges->ranges );
  202. }
  203. static void shared_map_dump( struct object *obj, int verbose )
  204. {
  205. struct shared_map *shared = (struct shared_map *)obj;
  206. fprintf( stderr, "Shared mapping fd=%p file=%p\n", shared->fd, shared->file );
  207. }
  208. static void shared_map_destroy( struct object *obj )
  209. {
  210. struct shared_map *shared = (struct shared_map *)obj;
  211. release_object( shared->fd );
  212. release_object( shared->file );
  213. list_remove( &shared->entry );
  214. }
  215. /* extend a file beyond the current end of file */
  216. static int grow_file( int unix_fd, file_pos_t new_size )
  217. {
  218. static const char zero;
  219. off_t size = new_size;
  220. if (sizeof(new_size) > sizeof(size) && size != new_size)
  221. {
  222. set_error( STATUS_INVALID_PARAMETER );
  223. return 0;
  224. }
  225. /* extend the file one byte beyond the requested size and then truncate it */
  226. /* this should work around ftruncate implementations that can't extend files */
  227. if (pwrite( unix_fd, &zero, 1, size ) != -1)
  228. {
  229. ftruncate( unix_fd, size );
  230. return 1;
  231. }
  232. file_set_error();
  233. return 0;
  234. }
  235. /* check if the current directory allows exec mappings */
  236. static int check_current_dir_for_exec(void)
  237. {
  238. int fd;
  239. char tmpfn[] = "anonmap.XXXXXX";
  240. void *ret = MAP_FAILED;
  241. fd = mkstemps( tmpfn, 0 );
  242. if (fd == -1) return 0;
  243. if (grow_file( fd, 1 ))
  244. {
  245. ret = mmap( NULL, get_page_size(), PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0 );
  246. if (ret != MAP_FAILED) munmap( ret, get_page_size() );
  247. }
  248. close( fd );
  249. unlink( tmpfn );
  250. return (ret != MAP_FAILED);
  251. }
  252. /* create a temp file for anonymous mappings */
  253. static int create_temp_file( file_pos_t size )
  254. {
  255. static int temp_dir_fd = -1;
  256. char tmpfn[] = "anonmap.XXXXXX";
  257. int fd;
  258. if (temp_dir_fd == -1)
  259. {
  260. temp_dir_fd = server_dir_fd;
  261. if (!check_current_dir_for_exec())
  262. {
  263. /* the server dir is noexec, try the config dir instead */
  264. fchdir( config_dir_fd );
  265. if (check_current_dir_for_exec())
  266. temp_dir_fd = config_dir_fd;
  267. else /* neither works, fall back to server dir */
  268. fchdir( server_dir_fd );
  269. }
  270. }
  271. else if (temp_dir_fd != server_dir_fd) fchdir( temp_dir_fd );
  272. fd = mkstemps( tmpfn, 0 );
  273. if (fd != -1)
  274. {
  275. if (!grow_file( fd, size ))
  276. {
  277. close( fd );
  278. fd = -1;
  279. }
  280. unlink( tmpfn );
  281. }
  282. else file_set_error();
  283. if (temp_dir_fd != server_dir_fd) fchdir( server_dir_fd );
  284. return fd;
  285. }
  286. /* find a memory view from its base address */
  287. struct memory_view *find_mapped_view( struct process *process, client_ptr_t base )
  288. {
  289. struct memory_view *view;
  290. LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
  291. if (view->base == base) return view;
  292. set_error( STATUS_NOT_MAPPED_VIEW );
  293. return NULL;
  294. }
  295. /* find a memory view from any address inside it */
  296. static struct memory_view *find_mapped_addr( struct process *process, client_ptr_t addr )
  297. {
  298. struct memory_view *view;
  299. LIST_FOR_EACH_ENTRY( view, &process->views, struct memory_view, entry )
  300. if (addr >= view->base && addr < view->base + view->size) return view;
  301. set_error( STATUS_NOT_MAPPED_VIEW );
  302. return NULL;
  303. }
  304. /* get the main exe memory view */
  305. struct memory_view *get_exe_view( struct process *process )
  306. {
  307. return LIST_ENTRY( list_head( &process->views ), struct memory_view, entry );
  308. }
  309. /* add a view to the process list */
  310. static void add_process_view( struct thread *thread, struct memory_view *view )
  311. {
  312. struct process *process = thread->process;
  313. struct unicode_str name;
  314. if (view->flags & SEC_IMAGE)
  315. {
  316. if (is_process_init_done( process ))
  317. generate_debug_event( thread, DbgLoadDllStateChange, view );
  318. else if (!(view->image.image_charact & IMAGE_FILE_DLL))
  319. {
  320. /* main exe */
  321. list_add_head( &process->views, &view->entry );
  322. if (get_view_nt_name( view, &name ) && (process->image = memdup( name.str, name.len )))
  323. process->imagelen = name.len;
  324. return;
  325. }
  326. }
  327. list_add_tail( &process->views, &view->entry );
  328. }
  329. static void free_memory_view( struct memory_view *view )
  330. {
  331. if (view->fd) release_object( view->fd );
  332. if (view->committed) release_object( view->committed );
  333. if (view->shared) release_object( view->shared );
  334. list_remove( &view->entry );
  335. free( view );
  336. }
  337. /* free all mapped views at process exit */
  338. void free_mapped_views( struct process *process )
  339. {
  340. struct list *ptr;
  341. while ((ptr = list_head( &process->views )))
  342. free_memory_view( LIST_ENTRY( ptr, struct memory_view, entry ));
  343. }
  344. /* find the shared PE mapping for a given mapping */
  345. static struct shared_map *get_shared_file( struct fd *fd )
  346. {
  347. struct shared_map *ptr;
  348. LIST_FOR_EACH_ENTRY( ptr, &shared_map_list, struct shared_map, entry )
  349. if (is_same_file_fd( ptr->fd, fd ))
  350. return (struct shared_map *)grab_object( ptr );
  351. return NULL;
  352. }
  353. /* return the size of the memory mapping and file range of a given section */
  354. static inline void get_section_sizes( const IMAGE_SECTION_HEADER *sec, size_t *map_size,
  355. off_t *file_start, size_t *file_size )
  356. {
  357. static const unsigned int sector_align = 0x1ff;
  358. if (!sec->Misc.VirtualSize) *map_size = ROUND_SIZE( sec->SizeOfRawData );
  359. else *map_size = ROUND_SIZE( sec->Misc.VirtualSize );
  360. *file_start = sec->PointerToRawData & ~sector_align;
  361. *file_size = (sec->SizeOfRawData + (sec->PointerToRawData & sector_align) + sector_align) & ~sector_align;
  362. if (*file_size > *map_size) *file_size = *map_size;
  363. }
  364. /* add a range to the committed list */
  365. static void add_committed_range( struct memory_view *view, file_pos_t start, file_pos_t end )
  366. {
  367. unsigned int i, j;
  368. struct ranges *committed = view->committed;
  369. struct range *ranges;
  370. if ((start & page_mask) || (end & page_mask) ||
  371. start >= view->size || end >= view->size ||
  372. start >= end)
  373. {
  374. set_error( STATUS_INVALID_PARAMETER );
  375. return;
  376. }
  377. if (!committed) return; /* everything committed already */
  378. start += view->start;
  379. end += view->start;
  380. for (i = 0, ranges = committed->ranges; i < committed->count; i++)
  381. {
  382. if (ranges[i].start > end) break;
  383. if (ranges[i].end < start) continue;
  384. if (ranges[i].start > start) ranges[i].start = start; /* extend downwards */
  385. if (ranges[i].end < end) /* extend upwards and maybe merge with next */
  386. {
  387. for (j = i + 1; j < committed->count; j++)
  388. {
  389. if (ranges[j].start > end) break;
  390. if (ranges[j].end > end) end = ranges[j].end;
  391. }
  392. if (j > i + 1)
  393. {
  394. memmove( &ranges[i + 1], &ranges[j], (committed->count - j) * sizeof(*ranges) );
  395. committed->count -= j - (i + 1);
  396. }
  397. ranges[i].end = end;
  398. }
  399. return;
  400. }
  401. /* now add a new range */
  402. if (committed->count == committed->max)
  403. {
  404. unsigned int new_size = committed->max * 2;
  405. struct range *new_ptr = realloc( committed->ranges, new_size * sizeof(*new_ptr) );
  406. if (!new_ptr) return;
  407. committed->max = new_size;
  408. ranges = committed->ranges = new_ptr;
  409. }
  410. memmove( &ranges[i + 1], &ranges[i], (committed->count - i) * sizeof(*ranges) );
  411. ranges[i].start = start;
  412. ranges[i].end = end;
  413. committed->count++;
  414. }
  415. /* find the range containing start and return whether it's committed */
  416. static int find_committed_range( struct memory_view *view, file_pos_t start, mem_size_t *size )
  417. {
  418. unsigned int i;
  419. struct ranges *committed = view->committed;
  420. struct range *ranges;
  421. if ((start & page_mask) || start >= view->size)
  422. {
  423. set_error( STATUS_INVALID_PARAMETER );
  424. return 0;
  425. }
  426. if (!committed) /* everything is committed */
  427. {
  428. *size = view->size - start;
  429. return 1;
  430. }
  431. for (i = 0, ranges = committed->ranges; i < committed->count; i++)
  432. {
  433. if (ranges[i].start > view->start + start)
  434. {
  435. *size = min( ranges[i].start, view->start + view->size ) - (view->start + start);
  436. return 0;
  437. }
  438. if (ranges[i].end > view->start + start)
  439. {
  440. *size = min( ranges[i].end, view->start + view->size ) - (view->start + start);
  441. return 1;
  442. }
  443. }
  444. *size = view->size - start;
  445. return 0;
  446. }
  447. /* allocate and fill the temp file for a shared PE image mapping */
  448. static int build_shared_mapping( struct mapping *mapping, int fd,
  449. IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
  450. {
  451. struct shared_map *shared;
  452. struct file *file;
  453. unsigned int i;
  454. mem_size_t total_size;
  455. size_t file_size, map_size, max_size;
  456. off_t shared_pos, read_pos, write_pos;
  457. char *buffer = NULL;
  458. int shared_fd;
  459. long toread;
  460. /* compute the total size of the shared mapping */
  461. total_size = max_size = 0;
  462. for (i = 0; i < nb_sec; i++)
  463. {
  464. if ((sec[i].Characteristics & IMAGE_SCN_MEM_SHARED) &&
  465. (sec[i].Characteristics & IMAGE_SCN_MEM_WRITE))
  466. {
  467. get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
  468. if (file_size > max_size) max_size = file_size;
  469. total_size += map_size;
  470. }
  471. }
  472. if (!total_size) return 1; /* nothing to do */
  473. if ((mapping->shared = get_shared_file( mapping->fd ))) return 1;
  474. /* create a temp file for the mapping */
  475. if ((shared_fd = create_temp_file( total_size )) == -1) return 0;
  476. if (!(file = create_file_for_fd( shared_fd, FILE_GENERIC_READ|FILE_GENERIC_WRITE, 0 ))) return 0;
  477. if (!(buffer = malloc( max_size ))) goto error;
  478. /* copy the shared sections data into the temp file */
  479. shared_pos = 0;
  480. for (i = 0; i < nb_sec; i++)
  481. {
  482. if (!(sec[i].Characteristics & IMAGE_SCN_MEM_SHARED)) continue;
  483. if (!(sec[i].Characteristics & IMAGE_SCN_MEM_WRITE)) continue;
  484. get_section_sizes( &sec[i], &map_size, &read_pos, &file_size );
  485. write_pos = shared_pos;
  486. shared_pos += map_size;
  487. if (!sec[i].PointerToRawData || !file_size) continue;
  488. toread = file_size;
  489. while (toread)
  490. {
  491. long res = pread( fd, buffer + file_size - toread, toread, read_pos );
  492. if (!res && toread < 0x200) /* partial sector at EOF is not an error */
  493. {
  494. file_size -= toread;
  495. break;
  496. }
  497. if (res <= 0) goto error;
  498. toread -= res;
  499. read_pos += res;
  500. }
  501. if (pwrite( shared_fd, buffer, file_size, write_pos ) != file_size) goto error;
  502. }
  503. if (!(shared = alloc_object( &shared_map_ops ))) goto error;
  504. shared->fd = (struct fd *)grab_object( mapping->fd );
  505. shared->file = file;
  506. list_add_head( &shared_map_list, &shared->entry );
  507. mapping->shared = shared;
  508. free( buffer );
  509. return 1;
  510. error:
  511. release_object( file );
  512. free( buffer );
  513. return 0;
  514. }
  515. /* load the CLR header from its section */
  516. static int load_clr_header( IMAGE_COR20_HEADER *hdr, size_t va, size_t size, int unix_fd,
  517. IMAGE_SECTION_HEADER *sec, unsigned int nb_sec )
  518. {
  519. ssize_t ret;
  520. size_t map_size, file_size;
  521. off_t file_start;
  522. unsigned int i;
  523. if (!va || !size) return 0;
  524. for (i = 0; i < nb_sec; i++)
  525. {
  526. if (va < sec[i].VirtualAddress) continue;
  527. if (sec[i].Misc.VirtualSize && va - sec[i].VirtualAddress >= sec[i].Misc.VirtualSize) continue;
  528. get_section_sizes( &sec[i], &map_size, &file_start, &file_size );
  529. if (size >= map_size) continue;
  530. if (va - sec[i].VirtualAddress >= map_size - size) continue;
  531. file_size = min( file_size, map_size );
  532. size = min( size, sizeof(*hdr) );
  533. ret = pread( unix_fd, hdr, min( size, file_size ), file_start + va - sec[i].VirtualAddress );
  534. if (ret <= 0) break;
  535. if (ret < sizeof(*hdr)) memset( (char *)hdr + ret, 0, sizeof(*hdr) - ret );
  536. return (hdr->MajorRuntimeVersion > COR_VERSION_MAJOR_V2 ||
  537. (hdr->MajorRuntimeVersion == COR_VERSION_MAJOR_V2 &&
  538. hdr->MinorRuntimeVersion >= COR_VERSION_MINOR));
  539. }
  540. return 0;
  541. }
  542. /* retrieve the mapping parameters for an executable (PE) image */
  543. static unsigned int get_image_params( struct mapping *mapping, file_pos_t file_size, int unix_fd )
  544. {
  545. static const char builtin_signature[] = "Wine builtin DLL";
  546. static const char fakedll_signature[] = "Wine placeholder DLL";
  547. IMAGE_COR20_HEADER clr;
  548. IMAGE_SECTION_HEADER sec[96];
  549. struct
  550. {
  551. IMAGE_DOS_HEADER dos;
  552. char buffer[32];
  553. } mz;
  554. struct
  555. {
  556. DWORD Signature;
  557. IMAGE_FILE_HEADER FileHeader;
  558. union
  559. {
  560. IMAGE_OPTIONAL_HEADER32 hdr32;
  561. IMAGE_OPTIONAL_HEADER64 hdr64;
  562. } opt;
  563. } nt;
  564. off_t pos;
  565. int size, opt_size;
  566. size_t mz_size, clr_va, clr_size;
  567. unsigned int i, cpu_mask = get_supported_cpu_mask();
  568. /* load the headers */
  569. if (!file_size) return STATUS_INVALID_FILE_FOR_SECTION;
  570. size = pread( unix_fd, &mz, sizeof(mz), 0 );
  571. if (size < sizeof(mz.dos)) return STATUS_INVALID_IMAGE_NOT_MZ;
  572. if (mz.dos.e_magic != IMAGE_DOS_SIGNATURE) return STATUS_INVALID_IMAGE_NOT_MZ;
  573. mz_size = size;
  574. pos = mz.dos.e_lfanew;
  575. size = pread( unix_fd, &nt, sizeof(nt), pos );
  576. if (size < sizeof(nt.Signature) + sizeof(nt.FileHeader)) return STATUS_INVALID_IMAGE_PROTECT;
  577. /* zero out Optional header in the case it's not present or partial */
  578. opt_size = max( nt.FileHeader.SizeOfOptionalHeader, offsetof( IMAGE_OPTIONAL_HEADER32, CheckSum ));
  579. size = min( size, sizeof(nt.Signature) + sizeof(nt.FileHeader) + opt_size );
  580. if (size < sizeof(nt)) memset( (char *)&nt + size, 0, sizeof(nt) - size );
  581. if (nt.Signature != IMAGE_NT_SIGNATURE)
  582. {
  583. IMAGE_OS2_HEADER *os2 = (IMAGE_OS2_HEADER *)&nt;
  584. if (os2->ne_magic != IMAGE_OS2_SIGNATURE) return STATUS_INVALID_IMAGE_PROTECT;
  585. if (os2->ne_exetyp == 2) return STATUS_INVALID_IMAGE_WIN_16;
  586. if (os2->ne_exetyp == 5) return STATUS_INVALID_IMAGE_PROTECT;
  587. return STATUS_INVALID_IMAGE_NE_FORMAT;
  588. }
  589. switch (nt.opt.hdr32.Magic)
  590. {
  591. case IMAGE_NT_OPTIONAL_HDR32_MAGIC:
  592. switch (nt.FileHeader.Machine)
  593. {
  594. case IMAGE_FILE_MACHINE_I386:
  595. if (cpu_mask & (CPU_FLAG(CPU_x86) | CPU_FLAG(CPU_x86_64))) break;
  596. return STATUS_INVALID_IMAGE_FORMAT;
  597. case IMAGE_FILE_MACHINE_ARMNT:
  598. if (cpu_mask & (CPU_FLAG(CPU_ARM) | CPU_FLAG(CPU_ARM64))) break;
  599. return STATUS_INVALID_IMAGE_FORMAT;
  600. case IMAGE_FILE_MACHINE_POWERPC:
  601. if (cpu_mask & CPU_FLAG(CPU_POWERPC)) break;
  602. return STATUS_INVALID_IMAGE_FORMAT;
  603. default:
  604. return STATUS_INVALID_IMAGE_FORMAT;
  605. }
  606. clr_va = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
  607. clr_size = nt.opt.hdr32.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
  608. mapping->image.base = nt.opt.hdr32.ImageBase;
  609. mapping->image.entry_point = nt.opt.hdr32.ImageBase + nt.opt.hdr32.AddressOfEntryPoint;
  610. mapping->image.map_size = ROUND_SIZE( nt.opt.hdr32.SizeOfImage );
  611. mapping->image.stack_size = nt.opt.hdr32.SizeOfStackReserve;
  612. mapping->image.stack_commit = nt.opt.hdr32.SizeOfStackCommit;
  613. mapping->image.subsystem = nt.opt.hdr32.Subsystem;
  614. mapping->image.subsystem_minor = nt.opt.hdr32.MinorSubsystemVersion;
  615. mapping->image.subsystem_major = nt.opt.hdr32.MajorSubsystemVersion;
  616. mapping->image.osversion_minor = nt.opt.hdr32.MinorOperatingSystemVersion;
  617. mapping->image.osversion_major = nt.opt.hdr32.MajorOperatingSystemVersion;
  618. mapping->image.dll_charact = nt.opt.hdr32.DllCharacteristics;
  619. mapping->image.contains_code = (nt.opt.hdr32.SizeOfCode ||
  620. nt.opt.hdr32.AddressOfEntryPoint ||
  621. nt.opt.hdr32.SectionAlignment & page_mask);
  622. mapping->image.header_size = nt.opt.hdr32.SizeOfHeaders;
  623. mapping->image.checksum = nt.opt.hdr32.CheckSum;
  624. mapping->image.image_flags = 0;
  625. if (nt.opt.hdr32.SectionAlignment & page_mask)
  626. mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
  627. if ((nt.opt.hdr32.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
  628. mapping->image.contains_code && !(clr_va && clr_size))
  629. mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
  630. break;
  631. case IMAGE_NT_OPTIONAL_HDR64_MAGIC:
  632. if (!(cpu_mask & CPU_64BIT_MASK)) return STATUS_INVALID_IMAGE_WIN_64;
  633. switch (nt.FileHeader.Machine)
  634. {
  635. case IMAGE_FILE_MACHINE_AMD64:
  636. if (cpu_mask & (CPU_FLAG(CPU_x86) | CPU_FLAG(CPU_x86_64))) break;
  637. return STATUS_INVALID_IMAGE_FORMAT;
  638. case IMAGE_FILE_MACHINE_ARM64:
  639. if (cpu_mask & (CPU_FLAG(CPU_ARM) | CPU_FLAG(CPU_ARM64))) break;
  640. return STATUS_INVALID_IMAGE_FORMAT;
  641. default:
  642. return STATUS_INVALID_IMAGE_FORMAT;
  643. }
  644. clr_va = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].VirtualAddress;
  645. clr_size = nt.opt.hdr64.DataDirectory[IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR].Size;
  646. mapping->image.base = nt.opt.hdr64.ImageBase;
  647. mapping->image.entry_point = nt.opt.hdr64.ImageBase + nt.opt.hdr64.AddressOfEntryPoint;
  648. mapping->image.map_size = ROUND_SIZE( nt.opt.hdr64.SizeOfImage );
  649. mapping->image.stack_size = nt.opt.hdr64.SizeOfStackReserve;
  650. mapping->image.stack_commit = nt.opt.hdr64.SizeOfStackCommit;
  651. mapping->image.subsystem = nt.opt.hdr64.Subsystem;
  652. mapping->image.subsystem_minor = nt.opt.hdr64.MinorSubsystemVersion;
  653. mapping->image.subsystem_major = nt.opt.hdr64.MajorSubsystemVersion;
  654. mapping->image.osversion_minor = nt.opt.hdr64.MinorOperatingSystemVersion;
  655. mapping->image.osversion_major = nt.opt.hdr64.MajorOperatingSystemVersion;
  656. mapping->image.dll_charact = nt.opt.hdr64.DllCharacteristics;
  657. mapping->image.contains_code = (nt.opt.hdr64.SizeOfCode ||
  658. nt.opt.hdr64.AddressOfEntryPoint ||
  659. nt.opt.hdr64.SectionAlignment & page_mask);
  660. mapping->image.header_size = nt.opt.hdr64.SizeOfHeaders;
  661. mapping->image.checksum = nt.opt.hdr64.CheckSum;
  662. mapping->image.image_flags = 0;
  663. if (nt.opt.hdr64.SectionAlignment & page_mask)
  664. mapping->image.image_flags |= IMAGE_FLAGS_ImageMappedFlat;
  665. if ((nt.opt.hdr64.DllCharacteristics & IMAGE_DLLCHARACTERISTICS_DYNAMIC_BASE) &&
  666. mapping->image.contains_code && !(clr_va && clr_size))
  667. mapping->image.image_flags |= IMAGE_FLAGS_ImageDynamicallyRelocated;
  668. break;
  669. default:
  670. return STATUS_INVALID_IMAGE_FORMAT;
  671. }
  672. mapping->image.image_charact = nt.FileHeader.Characteristics;
  673. mapping->image.machine = nt.FileHeader.Machine;
  674. mapping->image.dbg_offset = nt.FileHeader.PointerToSymbolTable;
  675. mapping->image.dbg_size = nt.FileHeader.NumberOfSymbols;
  676. mapping->image.zerobits = 0; /* FIXME */
  677. mapping->image.file_size = file_size;
  678. mapping->image.loader_flags = clr_va && clr_size;
  679. if (mz_size == sizeof(mz) && !memcmp( mz.buffer, builtin_signature, sizeof(builtin_signature) ))
  680. mapping->image.image_flags |= IMAGE_FLAGS_WineBuiltin;
  681. else if (mz_size == sizeof(mz) && !memcmp( mz.buffer, fakedll_signature, sizeof(fakedll_signature) ))
  682. mapping->image.image_flags |= IMAGE_FLAGS_WineFakeDll;
  683. /* load the section headers */
  684. pos += sizeof(nt.Signature) + sizeof(nt.FileHeader) + nt.FileHeader.SizeOfOptionalHeader;
  685. if (nt.FileHeader.NumberOfSections > ARRAY_SIZE( sec )) return STATUS_INVALID_IMAGE_FORMAT;
  686. size = sizeof(*sec) * nt.FileHeader.NumberOfSections;
  687. if (!mapping->size) mapping->size = mapping->image.map_size;
  688. else if (mapping->size > mapping->image.map_size) return STATUS_SECTION_TOO_BIG;
  689. if (pos + size > mapping->image.map_size) return STATUS_INVALID_FILE_FOR_SECTION;
  690. if (pos + size > mapping->image.header_size) mapping->image.header_size = pos + size;
  691. if (pread( unix_fd, sec, size, pos ) != size) return STATUS_INVALID_FILE_FOR_SECTION;
  692. for (i = 0; i < nt.FileHeader.NumberOfSections && !mapping->image.contains_code; i++)
  693. if (sec[i].Characteristics & IMAGE_SCN_MEM_EXECUTE) mapping->image.contains_code = 1;
  694. if (load_clr_header( &clr, clr_va, clr_size, unix_fd, sec, nt.FileHeader.NumberOfSections ) &&
  695. (clr.Flags & COMIMAGE_FLAGS_ILONLY))
  696. {
  697. mapping->image.image_flags |= IMAGE_FLAGS_ComPlusILOnly;
  698. if (nt.opt.hdr32.Magic == IMAGE_NT_OPTIONAL_HDR32_MAGIC)
  699. {
  700. if (!(clr.Flags & COMIMAGE_FLAGS_32BITREQUIRED))
  701. mapping->image.image_flags |= IMAGE_FLAGS_ComPlusNativeReady;
  702. if (clr.Flags & COMIMAGE_FLAGS_32BITPREFERRED)
  703. mapping->image.image_flags |= IMAGE_FLAGS_ComPlusPrefer32bit;
  704. }
  705. }
  706. if (!build_shared_mapping( mapping, unix_fd, sec, nt.FileHeader.NumberOfSections ))
  707. return STATUS_INVALID_FILE_FOR_SECTION;
  708. return STATUS_SUCCESS;
  709. }
  710. static struct ranges *create_ranges(void)
  711. {
  712. struct ranges *ranges = alloc_object( &ranges_ops );
  713. if (!ranges) return NULL;
  714. ranges->count = 0;
  715. ranges->max = 8;
  716. if (!(ranges->ranges = mem_alloc( ranges->max * sizeof(*ranges->ranges) )))
  717. {
  718. release_object( ranges );
  719. return NULL;
  720. }
  721. return ranges;
  722. }
  723. static unsigned int get_mapping_flags( obj_handle_t handle, unsigned int flags )
  724. {
  725. switch (flags & (SEC_IMAGE | SEC_RESERVE | SEC_COMMIT | SEC_FILE))
  726. {
  727. case SEC_IMAGE:
  728. if (flags & (SEC_WRITECOMBINE | SEC_LARGE_PAGES)) break;
  729. if (handle) return SEC_FILE | SEC_IMAGE;
  730. set_error( STATUS_INVALID_FILE_FOR_SECTION );
  731. return 0;
  732. case SEC_COMMIT:
  733. if (!handle) return flags;
  734. /* fall through */
  735. case SEC_RESERVE:
  736. if (flags & SEC_LARGE_PAGES) break;
  737. if (handle) return SEC_FILE | (flags & (SEC_NOCACHE | SEC_WRITECOMBINE));
  738. return flags;
  739. }
  740. set_error( STATUS_INVALID_PARAMETER );
  741. return 0;
  742. }
  743. static struct mapping *create_mapping( struct object *root, const struct unicode_str *name,
  744. unsigned int attr, mem_size_t size, unsigned int flags,
  745. obj_handle_t handle, unsigned int file_access,
  746. const struct security_descriptor *sd )
  747. {
  748. struct mapping *mapping;
  749. struct file *file;
  750. struct fd *fd;
  751. int unix_fd;
  752. struct stat st;
  753. if (!page_mask) page_mask = sysconf( _SC_PAGESIZE ) - 1;
  754. if (!(mapping = create_named_object( root, &mapping_ops, name, attr, sd )))
  755. return NULL;
  756. if (get_error() == STATUS_OBJECT_NAME_EXISTS)
  757. return mapping; /* Nothing else to do */
  758. mapping->size = size;
  759. mapping->fd = NULL;
  760. mapping->shared = NULL;
  761. mapping->committed = NULL;
  762. if (!(mapping->flags = get_mapping_flags( handle, flags ))) goto error;
  763. if (handle)
  764. {
  765. const unsigned int sharing = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
  766. unsigned int mapping_access = FILE_MAPPING_ACCESS;
  767. if (!(file = get_file_obj( current->process, handle, file_access ))) goto error;
  768. fd = get_obj_fd( (struct object *)file );
  769. /* file sharing rules for mappings are different so we use magic the access rights */
  770. if (flags & SEC_IMAGE) mapping_access |= FILE_MAPPING_IMAGE;
  771. else if (file_access & FILE_WRITE_DATA) mapping_access |= FILE_MAPPING_WRITE;
  772. if (!(mapping->fd = get_fd_object_for_mapping( fd, mapping_access, sharing )))
  773. {
  774. mapping->fd = dup_fd_object( fd, mapping_access, sharing, FILE_SYNCHRONOUS_IO_NONALERT );
  775. if (mapping->fd) set_fd_user( mapping->fd, &mapping_fd_ops, NULL );
  776. }
  777. release_object( file );
  778. release_object( fd );
  779. if (!mapping->fd) goto error;
  780. if ((unix_fd = get_unix_fd( mapping->fd )) == -1) goto error;
  781. if (fstat( unix_fd, &st ) == -1)
  782. {
  783. file_set_error();
  784. goto error;
  785. }
  786. if (flags & SEC_IMAGE)
  787. {
  788. unsigned int err = get_image_params( mapping, st.st_size, unix_fd );
  789. if (!err) return mapping;
  790. set_error( err );
  791. goto error;
  792. }
  793. if (!mapping->size)
  794. {
  795. if (!(mapping->size = st.st_size))
  796. {
  797. set_error( STATUS_MAPPED_FILE_SIZE_ZERO );
  798. goto error;
  799. }
  800. }
  801. else if (st.st_size < mapping->size)
  802. {
  803. if (!(file_access & FILE_WRITE_DATA))
  804. {
  805. set_error( STATUS_SECTION_TOO_BIG );
  806. goto error;
  807. }
  808. if (!grow_file( unix_fd, mapping->size )) goto error;
  809. }
  810. }
  811. else /* Anonymous mapping (no associated file) */
  812. {
  813. if (!mapping->size)
  814. {
  815. set_error( STATUS_INVALID_PARAMETER );
  816. goto error;
  817. }
  818. if ((flags & SEC_RESERVE) && !(mapping->committed = create_ranges())) goto error;
  819. mapping->size = (mapping->size + page_mask) & ~((mem_size_t)page_mask);
  820. if ((unix_fd = create_temp_file( mapping->size )) == -1) goto error;
  821. if (!(mapping->fd = create_anonymous_fd( &mapping_fd_ops, unix_fd, &mapping->obj,
  822. FILE_SYNCHRONOUS_IO_NONALERT ))) goto error;
  823. allow_fd_caching( mapping->fd );
  824. }
  825. return mapping;
  826. error:
  827. release_object( mapping );
  828. return NULL;
  829. }
  830. /* create a read-only file mapping for the specified fd */
  831. struct mapping *create_fd_mapping( struct object *root, const struct unicode_str *name,
  832. struct fd *fd, unsigned int attr, const struct security_descriptor *sd )
  833. {
  834. struct mapping *mapping;
  835. int unix_fd;
  836. struct stat st;
  837. if (!(mapping = create_named_object( root, &mapping_ops, name, attr, sd ))) return NULL;
  838. if (get_error() == STATUS_OBJECT_NAME_EXISTS) return mapping; /* Nothing else to do */
  839. mapping->shared = NULL;
  840. mapping->committed = NULL;
  841. mapping->flags = SEC_FILE;
  842. mapping->fd = (struct fd *)grab_object( fd );
  843. set_fd_user( mapping->fd, &mapping_fd_ops, NULL );
  844. if ((unix_fd = get_unix_fd( mapping->fd )) == -1) goto error;
  845. if (fstat( unix_fd, &st ) == -1)
  846. {
  847. file_set_error();
  848. goto error;
  849. }
  850. if (!(mapping->size = st.st_size))
  851. {
  852. set_error( STATUS_MAPPED_FILE_SIZE_ZERO );
  853. goto error;
  854. }
  855. return mapping;
  856. error:
  857. release_object( mapping );
  858. return NULL;
  859. }
  860. static struct mapping *get_mapping_obj( struct process *process, obj_handle_t handle, unsigned int access )
  861. {
  862. return (struct mapping *)get_handle_obj( process, handle, access, &mapping_ops );
  863. }
  864. /* open a new file for the file descriptor backing the view */
  865. struct file *get_view_file( const struct memory_view *view, unsigned int access, unsigned int sharing )
  866. {
  867. if (!view->fd) return NULL;
  868. return create_file_for_fd_obj( view->fd, access, sharing );
  869. }
  870. /* get the image info for a SEC_IMAGE mapped view */
  871. const pe_image_info_t *get_view_image_info( const struct memory_view *view, client_ptr_t *base )
  872. {
  873. if (!(view->flags & SEC_IMAGE)) return NULL;
  874. *base = view->base;
  875. return &view->image;
  876. }
  877. /* get the file name for a mapped view */
  878. int get_view_nt_name( const struct memory_view *view, struct unicode_str *name )
  879. {
  880. if (view->namelen) /* .so builtin */
  881. {
  882. name->str = view->name;
  883. name->len = view->namelen;
  884. return 1;
  885. }
  886. if (!view->fd) return 0;
  887. get_nt_name( view->fd, name );
  888. return 1;
  889. }
  890. /* generate all startup events of a given process */
  891. void generate_startup_debug_events( struct process *process )
  892. {
  893. struct memory_view *view;
  894. struct list *ptr = list_head( &process->views );
  895. struct thread *thread, *first_thread = get_process_first_thread( process );
  896. if (!ptr) return;
  897. view = LIST_ENTRY( ptr, struct memory_view, entry );
  898. generate_debug_event( first_thread, DbgCreateProcessStateChange, view );
  899. /* generate ntdll.dll load event */
  900. while (ptr && (ptr = list_next( &process->views, ptr )))
  901. {
  902. view = LIST_ENTRY( ptr, struct memory_view, entry );
  903. if (!(view->flags & SEC_IMAGE)) continue;
  904. generate_debug_event( first_thread, DbgLoadDllStateChange, view );
  905. break;
  906. }
  907. /* generate creation events */
  908. LIST_FOR_EACH_ENTRY( thread, &process->thread_list, struct thread, proc_entry )
  909. {
  910. if (thread != first_thread)
  911. generate_debug_event( thread, DbgCreateThreadStateChange, NULL );
  912. }
  913. /* generate dll events (in loading order) */
  914. while (ptr && (ptr = list_next( &process->views, ptr )))
  915. {
  916. view = LIST_ENTRY( ptr, struct memory_view, entry );
  917. if (!(view->flags & SEC_IMAGE)) continue;
  918. generate_debug_event( first_thread, DbgLoadDllStateChange, view );
  919. }
  920. }
  921. static void mapping_dump( struct object *obj, int verbose )
  922. {
  923. struct mapping *mapping = (struct mapping *)obj;
  924. assert( obj->ops == &mapping_ops );
  925. fprintf( stderr, "Mapping size=%08x%08x flags=%08x fd=%p shared=%p\n",
  926. (unsigned int)(mapping->size >> 32), (unsigned int)mapping->size,
  927. mapping->flags, mapping->fd, mapping->shared );
  928. }
  929. static struct fd *mapping_get_fd( struct object *obj )
  930. {
  931. struct mapping *mapping = (struct mapping *)obj;
  932. return (struct fd *)grab_object( mapping->fd );
  933. }
  934. static void mapping_destroy( struct object *obj )
  935. {
  936. struct mapping *mapping = (struct mapping *)obj;
  937. assert( obj->ops == &mapping_ops );
  938. if (mapping->fd) release_object( mapping->fd );
  939. if (mapping->committed) release_object( mapping->committed );
  940. if (mapping->shared) release_object( mapping->shared );
  941. }
  942. static enum server_fd_type mapping_get_fd_type( struct fd *fd )
  943. {
  944. return FD_TYPE_FILE;
  945. }
  946. int get_page_size(void)
  947. {
  948. if (!page_mask) page_mask = sysconf( _SC_PAGESIZE ) - 1;
  949. return page_mask + 1;
  950. }
  951. struct object *create_user_data_mapping( struct object *root, const struct unicode_str *name,
  952. unsigned int attr, const struct security_descriptor *sd )
  953. {
  954. void *ptr;
  955. struct mapping *mapping;
  956. if (!(mapping = create_mapping( root, name, attr, sizeof(KSHARED_USER_DATA),
  957. SEC_COMMIT, 0, FILE_READ_DATA | FILE_WRITE_DATA, sd ))) return NULL;
  958. ptr = mmap( NULL, mapping->size, PROT_WRITE, MAP_SHARED, get_unix_fd( mapping->fd ), 0 );
  959. if (ptr != MAP_FAILED)
  960. {
  961. user_shared_data = ptr;
  962. user_shared_data->SystemCall = 1;
  963. }
  964. return &mapping->obj;
  965. }
  966. /* create a file mapping */
  967. DECL_HANDLER(create_mapping)
  968. {
  969. struct object *root;
  970. struct mapping *mapping;
  971. struct unicode_str name;
  972. const struct security_descriptor *sd;
  973. const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root );
  974. if (!objattr) return;
  975. if ((mapping = create_mapping( root, &name, objattr->attributes, req->size, req->flags,
  976. req->file_handle, req->file_access, sd )))
  977. {
  978. if (get_error() == STATUS_OBJECT_NAME_EXISTS)
  979. reply->handle = alloc_handle( current->process, &mapping->obj, req->access, objattr->attributes );
  980. else
  981. reply->handle = alloc_handle_no_access_check( current->process, &mapping->obj,
  982. req->access, objattr->attributes );
  983. release_object( mapping );
  984. }
  985. if (root) release_object( root );
  986. }
  987. /* open a handle to a mapping */
  988. DECL_HANDLER(open_mapping)
  989. {
  990. struct unicode_str name = get_req_unicode_str();
  991. reply->handle = open_object( current->process, req->rootdir, req->access,
  992. &mapping_ops, &name, req->attributes );
  993. }
  994. /* get a mapping information */
  995. DECL_HANDLER(get_mapping_info)
  996. {
  997. struct mapping *mapping;
  998. if (!(mapping = get_mapping_obj( current->process, req->handle, req->access ))) return;
  999. reply->size = mapping->size;
  1000. reply->flags = mapping->flags;
  1001. if (mapping->flags & SEC_IMAGE)
  1002. {
  1003. struct unicode_str name = { NULL, 0 };
  1004. data_size_t size;
  1005. void *data;
  1006. if (mapping->fd) get_nt_name( mapping->fd, &name );
  1007. size = min( sizeof(pe_image_info_t) + name.len, get_reply_max_size() );
  1008. if ((data = set_reply_data_size( size )))
  1009. {
  1010. memcpy( data, &mapping->image, min( sizeof(pe_image_info_t), size ));
  1011. if (size > sizeof(pe_image_info_t))
  1012. memcpy( (pe_image_info_t *)data + 1, name.str, size - sizeof(pe_image_info_t) );
  1013. }
  1014. reply->total = sizeof(pe_image_info_t) + name.len;
  1015. }
  1016. if (!(req->access & (SECTION_MAP_READ | SECTION_MAP_WRITE))) /* query only */
  1017. {
  1018. release_object( mapping );
  1019. return;
  1020. }
  1021. if (mapping->shared)
  1022. reply->shared_file = alloc_handle( current->process, mapping->shared->file,
  1023. GENERIC_READ|GENERIC_WRITE, 0 );
  1024. release_object( mapping );
  1025. }
  1026. /* add a memory view in the current process */
  1027. DECL_HANDLER(map_view)
  1028. {
  1029. struct mapping *mapping = NULL;
  1030. struct memory_view *view;
  1031. data_size_t namelen = 0;
  1032. if (!req->size || (req->base & page_mask) || req->base + req->size < req->base) /* overflow */
  1033. {
  1034. set_error( STATUS_INVALID_PARAMETER );
  1035. return;
  1036. }
  1037. /* make sure we don't already have an overlapping view */
  1038. LIST_FOR_EACH_ENTRY( view, &current->process->views, struct memory_view, entry )
  1039. {
  1040. if (view->base + view->size <= req->base) continue;
  1041. if (view->base >= req->base + req->size) continue;
  1042. set_error( STATUS_INVALID_PARAMETER );
  1043. return;
  1044. }
  1045. if (!req->mapping) /* image mapping for a .so dll */
  1046. {
  1047. if (get_req_data_size() > sizeof(view->image)) namelen = get_req_data_size() - sizeof(view->image);
  1048. if (!(view = mem_alloc( offsetof( struct memory_view, name[namelen] )))) return;
  1049. memset( view, 0, sizeof(*view) );
  1050. view->base = req->base;
  1051. view->size = req->size;
  1052. view->start = req->start;
  1053. view->flags = SEC_IMAGE;
  1054. view->namelen = namelen;
  1055. memcpy( &view->image, get_req_data(), min( sizeof(view->image), get_req_data_size() ));
  1056. memcpy( view->name, (pe_image_info_t *)get_req_data() + 1, namelen );
  1057. add_process_view( current, view );
  1058. return;
  1059. }
  1060. if (!(mapping = get_mapping_obj( current->process, req->mapping, req->access ))) return;
  1061. if (mapping->flags & SEC_IMAGE)
  1062. {
  1063. if (req->start || req->size > mapping->image.map_size)
  1064. {
  1065. set_error( STATUS_INVALID_PARAMETER );
  1066. goto done;
  1067. }
  1068. }
  1069. else if (req->start >= mapping->size ||
  1070. req->start + req->size < req->start ||
  1071. req->start + req->size > ((mapping->size + page_mask) & ~(mem_size_t)page_mask))
  1072. {
  1073. set_error( STATUS_INVALID_PARAMETER );
  1074. goto done;
  1075. }
  1076. if ((view = mem_alloc( offsetof( struct memory_view, name[namelen] ))))
  1077. {
  1078. view->base = req->base;
  1079. view->size = req->size;
  1080. view->start = req->start;
  1081. view->flags = mapping->flags;
  1082. view->namelen = namelen;
  1083. view->fd = !is_fd_removable( mapping->fd ) ? (struct fd *)grab_object( mapping->fd ) : NULL;
  1084. view->committed = mapping->committed ? (struct ranges *)grab_object( mapping->committed ) : NULL;
  1085. view->shared = mapping->shared ? (struct shared_map *)grab_object( mapping->shared ) : NULL;
  1086. if (view->flags & SEC_IMAGE) view->image = mapping->image;
  1087. add_process_view( current, view );
  1088. if (view->flags & SEC_IMAGE && view->base != mapping->image.base)
  1089. set_error( STATUS_IMAGE_NOT_AT_BASE );
  1090. }
  1091. done:
  1092. release_object( mapping );
  1093. }
  1094. /* unmap a memory view from the current process */
  1095. DECL_HANDLER(unmap_view)
  1096. {
  1097. struct memory_view *view = find_mapped_view( current->process, req->base );
  1098. if (!view) return;
  1099. if (view->flags & SEC_IMAGE) generate_debug_event( current, DbgUnloadDllStateChange, view );
  1100. free_memory_view( view );
  1101. }
  1102. /* get a range of committed pages in a file mapping */
  1103. DECL_HANDLER(get_mapping_committed_range)
  1104. {
  1105. struct memory_view *view = find_mapped_view( current->process, req->base );
  1106. if (view) reply->committed = find_committed_range( view, req->offset, &reply->size );
  1107. }
  1108. /* add a range to the committed pages in a file mapping */
  1109. DECL_HANDLER(add_mapping_committed_range)
  1110. {
  1111. struct memory_view *view = find_mapped_view( current->process, req->base );
  1112. if (view) add_committed_range( view, req->offset, req->offset + req->size );
  1113. }
  1114. /* check if two memory maps are for the same file */
  1115. DECL_HANDLER(is_same_mapping)
  1116. {
  1117. struct memory_view *view1 = find_mapped_view( current->process, req->base1 );
  1118. struct memory_view *view2 = find_mapped_view( current->process, req->base2 );
  1119. if (!view1 || !view2) return;
  1120. if (!view1->fd || !view2->fd ||
  1121. !(view1->flags & SEC_IMAGE) || !(view2->flags & SEC_IMAGE) ||
  1122. !is_same_file_fd( view1->fd, view2->fd ))
  1123. set_error( STATUS_NOT_SAME_DEVICE );
  1124. }
  1125. /* get the filename of a mapping */
  1126. DECL_HANDLER(get_mapping_filename)
  1127. {
  1128. struct process *process;
  1129. struct memory_view *view;
  1130. struct unicode_str name;
  1131. if (!(process = get_process_from_handle( req->process, PROCESS_QUERY_INFORMATION ))) return;
  1132. if ((view = find_mapped_addr( process, req->addr )) && get_view_nt_name( view, &name ))
  1133. {
  1134. reply->len = name.len;
  1135. if (name.len > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
  1136. else if (!name.len) set_error( STATUS_FILE_INVALID );
  1137. else set_reply_data( name.str, name.len );
  1138. }
  1139. else set_error( STATUS_INVALID_ADDRESS );
  1140. release_object( process );
  1141. }