esync.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590
  1. /*
  2. * eventfd-based synchronization objects
  3. *
  4. * Copyright (C) 2018 Zebediah Figura
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. */
  20. #include "config.h"
  21. #include "wine/port.h"
  22. #include <fcntl.h>
  23. #include <stdint.h>
  24. #include <stdio.h>
  25. #include <stdarg.h>
  26. #ifdef HAVE_SYS_EVENTFD_H
  27. # include <sys/eventfd.h>
  28. #endif
  29. #ifdef HAVE_SYS_MMAN_H
  30. # include <sys/mman.h>
  31. #endif
  32. #ifdef HAVE_SYS_STAT_H
  33. # include <sys/stat.h>
  34. #endif
  35. #include <unistd.h>
  36. #include "ntstatus.h"
  37. #define WIN32_NO_STATUS
  38. #include "windef.h"
  39. #include "winternl.h"
  40. #include "handle.h"
  41. #include "request.h"
  42. #include "file.h"
  43. #include "esync.h"
  44. int do_esync(void)
  45. {
  46. #ifdef HAVE_SYS_EVENTFD_H
  47. static int do_esync_cached = -1;
  48. if (do_esync_cached == -1)
  49. do_esync_cached = getenv("WINEESYNC") && atoi(getenv("WINEESYNC"));
  50. return do_esync_cached;
  51. #else
  52. return 0;
  53. #endif
  54. }
  55. static char shm_name[29];
  56. static int shm_fd;
  57. static off_t shm_size;
  58. static void **shm_addrs;
  59. static int shm_addrs_size; /* length of the allocated shm_addrs array */
  60. static long pagesize;
  61. static void shm_cleanup(void)
  62. {
  63. close( shm_fd );
  64. if (shm_unlink( shm_name ) == -1)
  65. perror( "shm_unlink" );
  66. }
  67. void esync_init(void)
  68. {
  69. struct stat st;
  70. if (fstat( config_dir_fd, &st ) == -1)
  71. fatal_error( "cannot stat config dir\n" );
  72. if (st.st_ino != (unsigned long)st.st_ino)
  73. sprintf( shm_name, "/wine-%lx%08lx-esync", (unsigned long)((unsigned long long)st.st_ino >> 32), (unsigned long)st.st_ino );
  74. else
  75. sprintf( shm_name, "/wine-%lx-esync", (unsigned long)st.st_ino );
  76. shm_unlink( shm_name );
  77. shm_fd = shm_open( shm_name, O_RDWR | O_CREAT | O_EXCL, 0644 );
  78. if (shm_fd == -1)
  79. perror( "shm_open" );
  80. pagesize = sysconf( _SC_PAGESIZE );
  81. shm_addrs = calloc( 128, sizeof(shm_addrs[0]) );
  82. shm_addrs_size = 128;
  83. shm_size = pagesize;
  84. if (ftruncate( shm_fd, shm_size ) == -1)
  85. perror( "ftruncate" );
  86. atexit( shm_cleanup );
  87. }
  88. static struct list mutex_list = LIST_INIT(mutex_list);
  89. struct esync
  90. {
  91. struct object obj; /* object header */
  92. int fd; /* eventfd file descriptor */
  93. enum esync_type type;
  94. unsigned int shm_idx; /* index into the shared memory section */
  95. struct list mutex_entry; /* entry in the mutex list (if applicable) */
  96. };
  97. static void esync_dump( struct object *obj, int verbose );
  98. static int esync_get_esync_fd( struct object *obj, enum esync_type *type );
  99. static unsigned int esync_map_access( struct object *obj, unsigned int access );
  100. static void esync_destroy( struct object *obj );
  101. const struct object_ops esync_ops =
  102. {
  103. sizeof(struct esync), /* size */
  104. esync_dump, /* dump */
  105. no_get_type, /* get_type */
  106. no_add_queue, /* add_queue */
  107. NULL, /* remove_queue */
  108. NULL, /* signaled */
  109. esync_get_esync_fd, /* get_esync_fd */
  110. NULL, /* satisfied */
  111. no_signal, /* signal */
  112. no_get_fd, /* get_fd */
  113. esync_map_access, /* map_access */
  114. default_get_sd, /* get_sd */
  115. default_set_sd, /* set_sd */
  116. no_lookup_name, /* lookup_name */
  117. directory_link_name, /* link_name */
  118. default_unlink_name, /* unlink_name */
  119. no_open_file, /* open_file */
  120. no_kernel_obj_list, /* get_kernel_obj_list */
  121. no_alloc_handle, /* alloc_handle */
  122. no_close_handle, /* close_handle */
  123. esync_destroy /* destroy */
  124. };
  125. static void esync_dump( struct object *obj, int verbose )
  126. {
  127. struct esync *esync = (struct esync *)obj;
  128. assert( obj->ops == &esync_ops );
  129. fprintf( stderr, "esync fd=%d\n", esync->fd );
  130. }
  131. static int esync_get_esync_fd( struct object *obj, enum esync_type *type )
  132. {
  133. struct esync *esync = (struct esync *)obj;
  134. *type = esync->type;
  135. return esync->fd;
  136. }
  137. static unsigned int esync_map_access( struct object *obj, unsigned int access )
  138. {
  139. /* Sync objects have the same flags. */
  140. if (access & GENERIC_READ) access |= STANDARD_RIGHTS_READ | EVENT_QUERY_STATE;
  141. if (access & GENERIC_WRITE) access |= STANDARD_RIGHTS_WRITE | EVENT_MODIFY_STATE;
  142. if (access & GENERIC_EXECUTE) access |= STANDARD_RIGHTS_EXECUTE | SYNCHRONIZE;
  143. if (access & GENERIC_ALL) access |= STANDARD_RIGHTS_ALL | EVENT_QUERY_STATE | EVENT_MODIFY_STATE;
  144. return access & ~(GENERIC_READ | GENERIC_WRITE | GENERIC_EXECUTE | GENERIC_ALL);
  145. }
  146. static void esync_destroy( struct object *obj )
  147. {
  148. struct esync *esync = (struct esync *)obj;
  149. if (esync->type == ESYNC_MUTEX)
  150. list_remove( &esync->mutex_entry );
  151. close( esync->fd );
  152. }
  153. static int type_matches( enum esync_type type1, enum esync_type type2 )
  154. {
  155. return (type1 == type2) ||
  156. ((type1 == ESYNC_AUTO_EVENT || type1 == ESYNC_MANUAL_EVENT) &&
  157. (type2 == ESYNC_AUTO_EVENT || type2 == ESYNC_MANUAL_EVENT));
  158. }
  159. static void *get_shm( unsigned int idx )
  160. {
  161. int entry = (idx * 8) / pagesize;
  162. int offset = (idx * 8) % pagesize;
  163. if (entry >= shm_addrs_size)
  164. {
  165. if (!(shm_addrs = realloc( shm_addrs, (entry + 1) * sizeof(shm_addrs[0]) )))
  166. fprintf( stderr, "esync: couldn't expand shm_addrs array to size %d\n", entry + 1 );
  167. memset( &shm_addrs[shm_addrs_size], 0, (entry + 1 - shm_addrs_size) * sizeof(shm_addrs[0]) );
  168. shm_addrs_size = entry + 1;
  169. }
  170. if (!shm_addrs[entry])
  171. {
  172. void *addr = mmap( NULL, pagesize, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, entry * pagesize );
  173. if (addr == (void *)-1)
  174. {
  175. fprintf( stderr, "esync: failed to map page %d (offset %#lx): ", entry, entry * pagesize );
  176. perror( "mmap" );
  177. }
  178. if (debug_level)
  179. fprintf( stderr, "esync: Mapping page %d at %p.\n", entry, addr );
  180. if (interlocked_cmpxchg_ptr( &shm_addrs[entry], addr, 0 ))
  181. munmap( addr, pagesize ); /* someone beat us to it */
  182. }
  183. return (void *)((unsigned long)shm_addrs[entry] + offset);
  184. }
  185. struct semaphore
  186. {
  187. int max;
  188. int count;
  189. };
  190. C_ASSERT(sizeof(struct semaphore) == 8);
  191. struct mutex
  192. {
  193. DWORD tid;
  194. int count; /* recursion count */
  195. };
  196. C_ASSERT(sizeof(struct mutex) == 8);
  197. struct event
  198. {
  199. int signaled;
  200. int locked;
  201. };
  202. C_ASSERT(sizeof(struct event) == 8);
  203. static struct esync *create_esync( struct object *root, const struct unicode_str *name,
  204. unsigned int attr, int initval, int max, enum esync_type type,
  205. const struct security_descriptor *sd )
  206. {
  207. #ifdef HAVE_SYS_EVENTFD_H
  208. struct esync *esync;
  209. if ((esync = create_named_object( root, &esync_ops, name, attr, sd )))
  210. {
  211. if (get_error() != STATUS_OBJECT_NAME_EXISTS)
  212. {
  213. int flags = EFD_CLOEXEC | EFD_NONBLOCK;
  214. if (type == ESYNC_SEMAPHORE)
  215. flags |= EFD_SEMAPHORE;
  216. /* initialize it if it didn't already exist */
  217. esync->fd = eventfd( initval, flags );
  218. if (esync->fd == -1)
  219. {
  220. perror( "eventfd" );
  221. file_set_error();
  222. release_object( esync );
  223. return NULL;
  224. }
  225. esync->type = type;
  226. /* Use the fd as index, since that'll be unique across all
  227. * processes, but should hopefully end up also allowing reuse. */
  228. esync->shm_idx = esync->fd + 1; /* we keep index 0 reserved */
  229. while (esync->shm_idx * 8 >= shm_size)
  230. {
  231. /* Better expand the shm section. */
  232. shm_size += pagesize;
  233. if (ftruncate( shm_fd, shm_size ) == -1)
  234. {
  235. fprintf( stderr, "esync: couldn't expand %s to size %ld: ",
  236. shm_name, shm_size );
  237. perror( "ftruncate" );
  238. }
  239. }
  240. /* Initialize the shared memory portion. We want to do this on the
  241. * server side to avoid a potential though unlikely race whereby
  242. * the same object is opened and used between the time it's created
  243. * and the time its shared memory portion is initialized. */
  244. switch (type)
  245. {
  246. case ESYNC_SEMAPHORE:
  247. {
  248. struct semaphore *semaphore = get_shm( esync->shm_idx );
  249. semaphore->max = max;
  250. semaphore->count = initval;
  251. break;
  252. }
  253. case ESYNC_AUTO_EVENT:
  254. case ESYNC_MANUAL_EVENT:
  255. {
  256. struct event *event = get_shm( esync->shm_idx );
  257. event->signaled = initval ? 1 : 0;
  258. event->locked = 0;
  259. break;
  260. }
  261. case ESYNC_MUTEX:
  262. {
  263. struct mutex *mutex = get_shm( esync->shm_idx );
  264. mutex->tid = initval ? 0 : current->id;
  265. mutex->count = initval ? 0 : 1;
  266. list_add_tail( &mutex_list, &esync->mutex_entry );
  267. break;
  268. }
  269. default:
  270. assert( 0 );
  271. }
  272. }
  273. else
  274. {
  275. /* validate the type */
  276. if (!type_matches( type, esync->type ))
  277. {
  278. release_object( &esync->obj );
  279. set_error( STATUS_OBJECT_TYPE_MISMATCH );
  280. return NULL;
  281. }
  282. }
  283. }
  284. return esync;
  285. #else
  286. /* FIXME: Provide a fallback implementation using pipe(). */
  287. set_error( STATUS_NOT_IMPLEMENTED );
  288. return NULL;
  289. #endif
  290. }
  291. /* Create a file descriptor for an existing handle.
  292. * Caller must close the handle when it's done; it's not linked to an esync
  293. * server object in any way. */
  294. int esync_create_fd( int initval, int flags )
  295. {
  296. #ifdef HAVE_SYS_EVENTFD_H
  297. int fd;
  298. fd = eventfd( initval, flags | EFD_CLOEXEC | EFD_NONBLOCK );
  299. if (fd == -1)
  300. perror( "eventfd" );
  301. return fd;
  302. #else
  303. return -1;
  304. #endif
  305. }
  306. /* Wake up a specific fd. */
  307. void esync_wake_fd( int fd )
  308. {
  309. static const uint64_t value = 1;
  310. if (write( fd, &value, sizeof(value) ) == -1)
  311. perror( "esync: write" );
  312. }
  313. /* Wake up a server-side esync object. */
  314. void esync_wake_up( struct object *obj )
  315. {
  316. enum esync_type dummy;
  317. int fd;
  318. if (obj->ops->get_esync_fd)
  319. {
  320. fd = obj->ops->get_esync_fd( obj, &dummy );
  321. esync_wake_fd( fd );
  322. }
  323. }
  324. void esync_clear( int fd )
  325. {
  326. uint64_t value;
  327. /* we don't care about the return value */
  328. read( fd, &value, sizeof(value) );
  329. }
  330. static inline void small_pause(void)
  331. {
  332. #ifdef __i386__
  333. __asm__ __volatile__( "rep;nop" : : : "memory" );
  334. #else
  335. __asm__ __volatile__( "" : : : "memory" );
  336. #endif
  337. }
  338. /* Server-side event support. */
  339. void esync_set_event( struct esync *esync )
  340. {
  341. static const uint64_t value = 1;
  342. struct event *event = get_shm( esync->shm_idx );
  343. assert( esync->obj.ops == &esync_ops );
  344. assert( event != NULL );
  345. if (debug_level)
  346. fprintf( stderr, "esync_set_event() fd=%d\n", esync->fd );
  347. if (esync->type == ESYNC_MANUAL_EVENT)
  348. {
  349. /* Acquire the spinlock. */
  350. while (interlocked_cmpxchg( &event->locked, 1, 0 ))
  351. small_pause();
  352. }
  353. if (!interlocked_xchg( &event->signaled, 1 ))
  354. {
  355. if (write( esync->fd, &value, sizeof(value) ) == -1)
  356. perror( "esync: write" );
  357. }
  358. if (esync->type == ESYNC_MANUAL_EVENT)
  359. {
  360. /* Release the spinlock. */
  361. event->locked = 0;
  362. }
  363. }
  364. void esync_reset_event( struct esync *esync )
  365. {
  366. static uint64_t value = 1;
  367. struct event *event = get_shm( esync->shm_idx );
  368. assert( esync->obj.ops == &esync_ops );
  369. assert( event != NULL );
  370. if (debug_level)
  371. fprintf( stderr, "esync_reset_event() fd=%d\n", esync->fd );
  372. if (esync->type == ESYNC_MANUAL_EVENT)
  373. {
  374. /* Acquire the spinlock. */
  375. while (interlocked_cmpxchg( &event->locked, 1, 0 ))
  376. small_pause();
  377. }
  378. /* Only bother signaling the fd if we weren't already signaled. */
  379. if (interlocked_xchg( &event->signaled, 0 ))
  380. {
  381. /* we don't care about the return value */
  382. read( esync->fd, &value, sizeof(value) );
  383. }
  384. if (esync->type == ESYNC_MANUAL_EVENT)
  385. {
  386. /* Release the spinlock. */
  387. event->locked = 0;
  388. }
  389. }
  390. void esync_abandon_mutexes( struct thread *thread )
  391. {
  392. unsigned int index = 0;
  393. struct esync *esync;
  394. LIST_FOR_EACH_ENTRY( esync, &mutex_list, struct esync, mutex_entry )
  395. {
  396. struct mutex *mutex = get_shm( esync->shm_idx );
  397. if (mutex->tid == thread->id)
  398. {
  399. if (debug_level)
  400. fprintf( stderr, "esync_abandon_mutexes() fd=%d\n", esync->fd );
  401. mutex->tid = ~0;
  402. mutex->count = 0;
  403. esync_wake_fd( esync->fd );
  404. }
  405. }
  406. }
  407. DECL_HANDLER(create_esync)
  408. {
  409. struct esync *esync;
  410. struct unicode_str name;
  411. struct object *root;
  412. const struct security_descriptor *sd;
  413. const struct object_attributes *objattr = get_req_object_attributes( &sd, &name, &root );
  414. if (!do_esync())
  415. {
  416. set_error( STATUS_NOT_IMPLEMENTED );
  417. return;
  418. }
  419. if (!req->type)
  420. {
  421. set_error( STATUS_INVALID_PARAMETER_4 );
  422. return;
  423. }
  424. if (!objattr) return;
  425. if ((esync = create_esync( root, &name, objattr->attributes, req->initval,
  426. req->max, req->type, sd )))
  427. {
  428. if (get_error() == STATUS_OBJECT_NAME_EXISTS)
  429. reply->handle = alloc_handle( current->process, esync, req->access, objattr->attributes );
  430. else
  431. reply->handle = alloc_handle_no_access_check( current->process, esync,
  432. req->access, objattr->attributes );
  433. reply->type = esync->type;
  434. reply->shm_idx = esync->shm_idx;
  435. send_client_fd( current->process, esync->fd, reply->handle );
  436. release_object( esync );
  437. }
  438. if (root) release_object( root );
  439. }
  440. DECL_HANDLER(open_esync)
  441. {
  442. struct unicode_str name = get_req_unicode_str();
  443. reply->handle = open_object( current->process, req->rootdir, req->access,
  444. &esync_ops, &name, req->attributes );
  445. /* send over the fd */
  446. if (reply->handle)
  447. {
  448. struct esync *esync;
  449. if (!(esync = (struct esync *)get_handle_obj( current->process, reply->handle,
  450. 0, &esync_ops )))
  451. return;
  452. if (!type_matches( req->type, esync->type ))
  453. {
  454. set_error( STATUS_OBJECT_TYPE_MISMATCH );
  455. release_object( esync );
  456. return;
  457. }
  458. reply->type = esync->type;
  459. reply->shm_idx = esync->shm_idx;
  460. send_client_fd( current->process, esync->fd, reply->handle );
  461. release_object( esync );
  462. }
  463. }
  464. /* Retrieve a file descriptor for an esync object which will be signaled by the
  465. * server. The client should only read from (i.e. wait on) this object. */
  466. DECL_HANDLER(get_esync_fd)
  467. {
  468. struct object *obj;
  469. enum esync_type type;
  470. int fd;
  471. if (!(obj = get_handle_obj( current->process, req->handle, SYNCHRONIZE, NULL )))
  472. return;
  473. if (obj->ops->get_esync_fd)
  474. {
  475. fd = obj->ops->get_esync_fd( obj, &type );
  476. reply->type = type;
  477. if (obj->ops == &esync_ops)
  478. {
  479. struct esync *esync = (struct esync *)obj;
  480. reply->shm_idx = esync->shm_idx;
  481. }
  482. else
  483. reply->shm_idx = 0;
  484. send_client_fd( current->process, fd, req->handle );
  485. }
  486. else
  487. {
  488. if (debug_level)
  489. {
  490. fprintf( stderr, "%04x: esync: can't wait on object: ", current->id );
  491. obj->ops->dump( obj, 0 );
  492. }
  493. set_error( STATUS_NOT_IMPLEMENTED );
  494. }
  495. release_object( obj );
  496. }
  497. /* Return the fd used for waiting on user APCs. */
  498. DECL_HANDLER(get_esync_apc_fd)
  499. {
  500. send_client_fd( current->process, current->esync_apc_fd, current->id );
  501. }