device.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546
  1. /*
  2. * Server-side device support
  3. *
  4. * Copyright (C) 2007 Alexandre Julliard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  19. */
  20. #include "config.h"
  21. #include "wine/port.h"
  22. #include "wine/rbtree.h"
  23. #include <assert.h>
  24. #include <fcntl.h>
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include <stdarg.h>
  28. #include "ntstatus.h"
  29. #define WIN32_NO_STATUS
  30. #include "windef.h"
  31. #include "winternl.h"
  32. #include "ddk/wdm.h"
  33. #include "device.h"
  34. #include "object.h"
  35. #include "file.h"
  36. #include "handle.h"
  37. #include "request.h"
  38. #include "process.h"
  39. #include "esync.h"
  40. /* IRP object */
  41. struct irp_call
  42. {
  43. struct object obj; /* object header */
  44. struct list dev_entry; /* entry in device queue */
  45. struct list mgr_entry; /* entry in manager queue */
  46. struct device_file *file; /* file containing this irp */
  47. struct thread *thread; /* thread that queued the irp */
  48. struct async *async; /* pending async op */
  49. irp_params_t params; /* irp parameters */
  50. struct iosb *iosb; /* I/O status block */
  51. int canceled; /* the call was canceled */
  52. client_ptr_t user_ptr; /* client side pointer */
  53. };
  54. static void irp_call_dump( struct object *obj, int verbose );
  55. static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry );
  56. static void irp_call_destroy( struct object *obj );
  57. static const struct object_ops irp_call_ops =
  58. {
  59. sizeof(struct irp_call), /* size */
  60. irp_call_dump, /* dump */
  61. no_get_type, /* get_type */
  62. add_queue, /* add_queue */
  63. remove_queue, /* remove_queue */
  64. irp_call_signaled, /* signaled */
  65. NULL, /* get_esync_fd */
  66. no_satisfied, /* satisfied */
  67. no_signal, /* signal */
  68. no_get_fd, /* get_fd */
  69. no_map_access, /* map_access */
  70. default_get_sd, /* get_sd */
  71. default_set_sd, /* set_sd */
  72. no_lookup_name, /* lookup_name */
  73. no_link_name, /* link_name */
  74. NULL, /* unlink_name */
  75. no_open_file, /* open_file */
  76. no_kernel_obj_list, /* get_kernel_obj_list */
  77. no_alloc_handle, /* alloc_handle */
  78. no_close_handle, /* close_handle */
  79. irp_call_destroy /* destroy */
  80. };
  81. /* device manager (a list of devices managed by the same client process) */
  82. struct callback_entry
  83. {
  84. struct list entry;
  85. krnl_cbdata_t data;
  86. struct thread *client_thread;
  87. struct unicode_str string_param;
  88. struct callback_event *event;
  89. };
  90. struct device_manager
  91. {
  92. struct object obj; /* object header */
  93. struct list entry; /* entry in global list of device managers */
  94. struct list devices; /* list of devices */
  95. struct list requests; /* list of pending irps across all devices */
  96. int callback_mask; /* mask of which callbacks we accept */
  97. struct list callbacks; /* list of pending callbacks */
  98. struct irp_call *current_call; /* call currently executed on client side */
  99. struct thread *current_cb_client;
  100. struct callback_event *current_cb_event;
  101. struct thread *main_loop_thread;
  102. struct wine_rb_tree kernel_objects; /* map of objects that have client side pointer associated */
  103. int esync_fd; /* esync file descriptor */
  104. };
  105. static void device_manager_dump( struct object *obj, int verbose );
  106. static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry );
  107. static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type );
  108. static void device_manager_destroy( struct object *obj );
  109. static const struct object_ops device_manager_ops =
  110. {
  111. sizeof(struct device_manager), /* size */
  112. device_manager_dump, /* dump */
  113. no_get_type, /* get_type */
  114. add_queue, /* add_queue */
  115. remove_queue, /* remove_queue */
  116. device_manager_signaled, /* signaled */
  117. device_manager_get_esync_fd, /* get_esync_fd */
  118. no_satisfied, /* satisfied */
  119. no_signal, /* signal */
  120. no_get_fd, /* get_fd */
  121. no_map_access, /* map_access */
  122. default_get_sd, /* get_sd */
  123. default_set_sd, /* set_sd */
  124. no_lookup_name, /* lookup_name */
  125. no_link_name, /* link_name */
  126. NULL, /* unlink_name */
  127. no_open_file, /* open_file */
  128. no_kernel_obj_list, /* get_kernel_obj_list */
  129. no_alloc_handle, /* alloc_handle */
  130. no_close_handle, /* close_handle */
  131. device_manager_destroy /* destroy */
  132. };
  133. /* device (a single device object) */
  134. struct device
  135. {
  136. struct object obj; /* object header */
  137. struct device_manager *manager; /* manager for this device (or NULL if deleted) */
  138. char *unix_path; /* path to unix device if any */
  139. struct list kernel_object; /* list of kernel object pointers */
  140. struct list entry; /* entry in device manager list */
  141. struct list files; /* list of open files */
  142. };
  143. static void device_dump( struct object *obj, int verbose );
  144. static struct object_type *device_get_type( struct object *obj );
  145. static void device_destroy( struct object *obj );
  146. static struct object *device_open_file( struct object *obj, unsigned int access,
  147. unsigned int sharing, unsigned int options );
  148. static struct list *device_get_kernel_obj_list( struct object *obj );
  149. static const struct object_ops device_ops =
  150. {
  151. sizeof(struct device), /* size */
  152. device_dump, /* dump */
  153. device_get_type, /* get_type */
  154. no_add_queue, /* add_queue */
  155. NULL, /* remove_queue */
  156. NULL, /* signaled */
  157. NULL, /* get_esync_fd */
  158. no_satisfied, /* satisfied */
  159. no_signal, /* signal */
  160. no_get_fd, /* get_fd */
  161. default_fd_map_access, /* map_access */
  162. default_get_sd, /* get_sd */
  163. default_set_sd, /* set_sd */
  164. no_lookup_name, /* lookup_name */
  165. directory_link_name, /* link_name */
  166. default_unlink_name, /* unlink_name */
  167. device_open_file, /* open_file */
  168. device_get_kernel_obj_list, /* get_kernel_obj_list */
  169. no_alloc_handle, /* alloc_handle */
  170. no_close_handle, /* close_handle */
  171. device_destroy /* destroy */
  172. };
  173. /* device file (an open file handle to a device) */
  174. struct device_file
  175. {
  176. struct object obj; /* object header */
  177. struct device *device; /* device for this file */
  178. struct fd *fd; /* file descriptor for irp */
  179. struct list kernel_object; /* list of kernel object pointers */
  180. int closed; /* closed file flag */
  181. struct list entry; /* entry in device list */
  182. struct list requests; /* list of pending irp requests */
  183. };
  184. static void device_file_dump( struct object *obj, int verbose );
  185. static struct fd *device_file_get_fd( struct object *obj );
  186. static struct list *device_file_get_kernel_obj_list( struct object *obj );
  187. static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  188. static void device_file_destroy( struct object *obj );
  189. static enum server_fd_type device_file_get_fd_type( struct fd *fd );
  190. static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos );
  191. static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos );
  192. static int device_file_flush( struct fd *fd, struct async *async );
  193. static int device_file_ioctl( struct fd *fd, ioctl_code_t code, client_ptr_t in_buf, client_ptr_t out_buf, struct async *async );
  194. static void device_file_reselect_async( struct fd *fd, struct async_queue *queue );
  195. static const struct object_ops device_file_ops =
  196. {
  197. sizeof(struct device_file), /* size */
  198. device_file_dump, /* dump */
  199. file_get_type, /* get_type */
  200. add_queue, /* add_queue */
  201. remove_queue, /* remove_queue */
  202. default_fd_signaled, /* signaled */
  203. NULL, /* get_esync_fd */
  204. no_satisfied, /* satisfied */
  205. no_signal, /* signal */
  206. device_file_get_fd, /* get_fd */
  207. default_fd_map_access, /* map_access */
  208. default_get_sd, /* get_sd */
  209. default_set_sd, /* set_sd */
  210. no_lookup_name, /* lookup_name */
  211. no_link_name, /* link_name */
  212. NULL, /* unlink_name */
  213. no_open_file, /* open_file */
  214. device_file_get_kernel_obj_list, /* get_kernel_obj_list */
  215. no_alloc_handle, /* alloc_handle */
  216. device_file_close_handle, /* close_handle */
  217. device_file_destroy /* destroy */
  218. };
  219. static const struct fd_ops device_file_fd_ops =
  220. {
  221. default_fd_get_poll_events, /* get_poll_events */
  222. default_poll_event, /* poll_event */
  223. device_file_get_fd_type, /* get_fd_type */
  224. device_file_read, /* read */
  225. device_file_write, /* write */
  226. device_file_flush, /* flush */
  227. default_fd_get_file_info, /* get_file_info */
  228. no_fd_get_volume_info, /* get_volume_info */
  229. device_file_ioctl, /* ioctl */
  230. default_fd_queue_async, /* queue_async */
  231. device_file_reselect_async /* reselect_async */
  232. };
  233. /* callback event (an event signaled when the callback has been processed by every device manager) */
  234. struct callback_event{
  235. struct object obj;
  236. unsigned int remaining_count;
  237. };
  238. static void callback_event_dump( struct object *obj, int verbose );
  239. static int callback_event_signaled( struct object *obj, struct wait_queue_entry *entry );
  240. static void callback_event_destroy( struct object *obj );
  241. static const struct object_ops callback_event_ops =
  242. {
  243. sizeof(struct object_ops),
  244. callback_event_dump,
  245. no_get_type,
  246. add_queue,
  247. remove_queue,
  248. callback_event_signaled,
  249. NULL,
  250. no_satisfied,
  251. no_signal,
  252. no_get_fd,
  253. no_map_access,
  254. default_get_sd,
  255. default_set_sd,
  256. no_lookup_name,
  257. no_link_name,
  258. NULL,
  259. no_open_file,
  260. no_kernel_obj_list,
  261. no_alloc_handle,
  262. no_close_handle,
  263. callback_event_destroy
  264. };
  265. struct list *no_kernel_obj_list( struct object *obj )
  266. {
  267. return NULL;
  268. }
  269. struct kernel_object
  270. {
  271. struct device_manager *manager;
  272. client_ptr_t user_ptr;
  273. struct object *object;
  274. int owned;
  275. struct list list_entry;
  276. struct wine_rb_entry rb_entry;
  277. };
  278. static int compare_kernel_object( const void *k, const struct wine_rb_entry *entry )
  279. {
  280. struct kernel_object *ptr = WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry );
  281. return memcmp( k, &ptr->user_ptr, sizeof(client_ptr_t) );
  282. }
  283. static struct kernel_object *kernel_object_from_obj( struct device_manager *manager, struct object *obj )
  284. {
  285. struct kernel_object *kernel_object;
  286. struct list *list;
  287. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
  288. LIST_FOR_EACH_ENTRY( kernel_object, list, struct kernel_object, list_entry )
  289. {
  290. if (kernel_object->manager != manager) continue;
  291. return kernel_object;
  292. }
  293. return NULL;
  294. }
  295. client_ptr_t get_kernel_object_ptr( struct device_manager *manager, struct object *obj )
  296. {
  297. struct kernel_object *kernel_object = kernel_object_from_obj( manager, obj );
  298. return kernel_object ? kernel_object->user_ptr : 0;
  299. }
  300. static struct kernel_object *set_kernel_object( struct device_manager *manager, struct object *obj, client_ptr_t user_ptr )
  301. {
  302. struct kernel_object *kernel_object;
  303. struct list *list;
  304. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return NULL;
  305. if (!(kernel_object = malloc( sizeof(*kernel_object) ))) return NULL;
  306. kernel_object->manager = manager;
  307. kernel_object->user_ptr = user_ptr;
  308. kernel_object->object = obj;
  309. kernel_object->owned = 0;
  310. if (wine_rb_put( &manager->kernel_objects, &user_ptr, &kernel_object->rb_entry ))
  311. {
  312. /* kernel_object pointer already set */
  313. free( kernel_object );
  314. return NULL;
  315. }
  316. list_add_head( list, &kernel_object->list_entry );
  317. return kernel_object;
  318. }
  319. static struct kernel_object *kernel_object_from_ptr( struct device_manager *manager, client_ptr_t client_ptr )
  320. {
  321. struct wine_rb_entry *entry = wine_rb_get( &manager->kernel_objects, &client_ptr );
  322. return entry ? WINE_RB_ENTRY_VALUE( entry, struct kernel_object, rb_entry ) : NULL;
  323. }
  324. static void grab_kernel_object( struct kernel_object *ptr )
  325. {
  326. if (!ptr->owned)
  327. {
  328. grab_object( ptr->object );
  329. ptr->owned = 1;
  330. }
  331. }
  332. static void irp_call_dump( struct object *obj, int verbose )
  333. {
  334. struct irp_call *irp = (struct irp_call *)obj;
  335. fprintf( stderr, "IRP call file=%p\n", irp->file );
  336. }
  337. static int irp_call_signaled( struct object *obj, struct wait_queue_entry *entry )
  338. {
  339. struct irp_call *irp = (struct irp_call *)obj;
  340. return !irp->file; /* file is cleared once the irp has completed */
  341. }
  342. static void irp_call_destroy( struct object *obj )
  343. {
  344. struct irp_call *irp = (struct irp_call *)obj;
  345. if (irp->async)
  346. {
  347. async_terminate( irp->async, STATUS_CANCELLED );
  348. release_object( irp->async );
  349. }
  350. if (irp->iosb) release_object( irp->iosb );
  351. if (irp->file) release_object( irp->file );
  352. if (irp->thread) release_object( irp->thread );
  353. }
  354. static struct irp_call *create_irp( struct device_file *file, const irp_params_t *params, struct async *async )
  355. {
  356. struct irp_call *irp;
  357. if (file && !file->device->manager) /* it has been deleted */
  358. {
  359. set_error( STATUS_FILE_DELETED );
  360. return NULL;
  361. }
  362. if ((irp = alloc_object( &irp_call_ops )))
  363. {
  364. irp->file = file ? (struct device_file *)grab_object( file ) : NULL;
  365. irp->thread = NULL;
  366. irp->async = NULL;
  367. irp->params = *params;
  368. irp->iosb = NULL;
  369. irp->canceled = 0;
  370. irp->user_ptr = 0;
  371. if (async) irp->iosb = async_get_iosb( async );
  372. if (!irp->iosb && !(irp->iosb = create_iosb( NULL, 0, 0 )))
  373. {
  374. release_object( irp );
  375. irp = NULL;
  376. }
  377. }
  378. return irp;
  379. }
  380. static void set_irp_result( struct irp_call *irp, unsigned int status,
  381. const void *out_data, data_size_t out_size, data_size_t result )
  382. {
  383. struct device_file *file = irp->file;
  384. struct iosb *iosb = irp->iosb;
  385. if (!file) return; /* already finished */
  386. /* FIXME: handle the STATUS_PENDING case */
  387. iosb->status = status;
  388. iosb->result = result;
  389. iosb->out_size = min( iosb->out_size, out_size );
  390. if (iosb->out_size && !(iosb->out_data = memdup( out_data, iosb->out_size )))
  391. iosb->out_size = 0;
  392. /* remove it from the device queue */
  393. list_remove( &irp->dev_entry );
  394. irp->file = NULL;
  395. if (irp->async)
  396. {
  397. if (result) status = STATUS_ALERTED;
  398. async_terminate( irp->async, status );
  399. release_object( irp->async );
  400. irp->async = NULL;
  401. }
  402. wake_up( &irp->obj, 0 );
  403. release_object( irp ); /* no longer on the device queue */
  404. release_object( file );
  405. }
  406. static void device_dump( struct object *obj, int verbose )
  407. {
  408. fputs( "Device\n", stderr );
  409. }
  410. static struct object_type *device_get_type( struct object *obj )
  411. {
  412. static const WCHAR name[] = {'D','e','v','i','c','e'};
  413. static const struct unicode_str str = { name, sizeof(name) };
  414. return get_object_type( &str );
  415. }
  416. static void device_destroy( struct object *obj )
  417. {
  418. struct device *device = (struct device *)obj;
  419. assert( list_empty( &device->files ));
  420. free( device->unix_path );
  421. if (device->manager) list_remove( &device->entry );
  422. }
  423. static void add_irp_to_queue( struct device_manager *manager, struct irp_call *irp, struct thread *thread )
  424. {
  425. grab_object( irp ); /* grab reference for queued irp */
  426. irp->thread = thread ? (struct thread *)grab_object( thread ) : NULL;
  427. if (irp->file) list_add_tail( &irp->file->requests, &irp->dev_entry );
  428. list_add_tail( &manager->requests, &irp->mgr_entry );
  429. if (list_head( &manager->requests ) == &irp->mgr_entry) wake_up( &manager->obj, 0 ); /* first one */
  430. }
  431. static struct object *device_open_file( struct object *obj, unsigned int access,
  432. unsigned int sharing, unsigned int options )
  433. {
  434. struct device *device = (struct device *)obj;
  435. struct device_file *file;
  436. if (!(file = alloc_object( &device_file_ops ))) return NULL;
  437. file->device = (struct device *)grab_object( device );
  438. file->closed = 0;
  439. list_init( &file->kernel_object );
  440. list_init( &file->requests );
  441. list_add_tail( &device->files, &file->entry );
  442. if (device->unix_path)
  443. {
  444. mode_t mode = 0666;
  445. access = file->obj.ops->map_access( &file->obj, access );
  446. file->fd = open_fd( NULL, device->unix_path, O_NONBLOCK | O_LARGEFILE,
  447. &mode, access, sharing, options );
  448. if (file->fd) set_fd_user( file->fd, &device_file_fd_ops, &file->obj );
  449. }
  450. else file->fd = alloc_pseudo_fd( &device_file_fd_ops, &file->obj, options );
  451. if (!file->fd)
  452. {
  453. release_object( file );
  454. return NULL;
  455. }
  456. allow_fd_caching( file->fd );
  457. if (device->manager)
  458. {
  459. struct irp_call *irp;
  460. irp_params_t params;
  461. memset( &params, 0, sizeof(params) );
  462. params.create.type = IRP_CALL_CREATE;
  463. params.create.access = access;
  464. params.create.sharing = sharing;
  465. params.create.options = options;
  466. params.create.device = get_kernel_object_ptr( device->manager, &device->obj );
  467. if ((irp = create_irp( file, &params, NULL )))
  468. {
  469. add_irp_to_queue( device->manager, irp, current );
  470. release_object( irp );
  471. }
  472. }
  473. return &file->obj;
  474. }
  475. static struct list *device_get_kernel_obj_list( struct object *obj )
  476. {
  477. struct device *device = (struct device *)obj;
  478. return &device->kernel_object;
  479. }
  480. static void device_file_dump( struct object *obj, int verbose )
  481. {
  482. struct device_file *file = (struct device_file *)obj;
  483. fprintf( stderr, "File on device %p\n", file->device );
  484. }
  485. static struct fd *device_file_get_fd( struct object *obj )
  486. {
  487. struct device_file *file = (struct device_file *)obj;
  488. return (struct fd *)grab_object( file->fd );
  489. }
  490. static struct list *device_file_get_kernel_obj_list( struct object *obj )
  491. {
  492. struct device_file *file = (struct device_file *)obj;
  493. return &file->kernel_object;
  494. }
  495. static int device_file_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  496. {
  497. struct device_file *file = (struct device_file *)obj;
  498. if (!file->closed && file->device->manager && obj->handle_count == 1) /* last handle */
  499. {
  500. struct irp_call *irp;
  501. irp_params_t params;
  502. file->closed = 1;
  503. memset( &params, 0, sizeof(params) );
  504. params.close.type = IRP_CALL_CLOSE;
  505. if ((irp = create_irp( file, &params, NULL )))
  506. {
  507. add_irp_to_queue( file->device->manager, irp, current );
  508. release_object( irp );
  509. }
  510. }
  511. return 1;
  512. }
  513. static void device_file_destroy( struct object *obj )
  514. {
  515. struct device_file *file = (struct device_file *)obj;
  516. struct irp_call *irp, *next;
  517. LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
  518. {
  519. list_remove( &irp->dev_entry );
  520. release_object( irp ); /* no longer on the device queue */
  521. }
  522. if (file->fd) release_object( file->fd );
  523. list_remove( &file->entry );
  524. release_object( file->device );
  525. }
  526. static int fill_irp_params( struct device_manager *manager, struct irp_call *irp, irp_params_t *params )
  527. {
  528. switch (irp->params.type)
  529. {
  530. case IRP_CALL_NONE:
  531. case IRP_CALL_FREE:
  532. case IRP_CALL_CANCEL:
  533. break;
  534. case IRP_CALL_CREATE:
  535. irp->params.create.file = alloc_handle( current->process, irp->file,
  536. irp->params.create.access, 0 );
  537. if (!irp->params.create.file) return 0;
  538. break;
  539. case IRP_CALL_CLOSE:
  540. irp->params.close.file = get_kernel_object_ptr( manager, &irp->file->obj );
  541. break;
  542. case IRP_CALL_READ:
  543. irp->params.read.file = get_kernel_object_ptr( manager, &irp->file->obj );
  544. irp->params.read.out_size = irp->iosb->out_size;
  545. break;
  546. case IRP_CALL_WRITE:
  547. irp->params.write.file = get_kernel_object_ptr( manager, &irp->file->obj );
  548. break;
  549. case IRP_CALL_FLUSH:
  550. irp->params.flush.file = get_kernel_object_ptr( manager, &irp->file->obj );
  551. break;
  552. case IRP_CALL_IOCTL:
  553. irp->params.ioctl.file = get_kernel_object_ptr( manager, &irp->file->obj );
  554. irp->params.ioctl.out_size = irp->iosb->out_size;
  555. break;
  556. }
  557. *params = irp->params;
  558. return 1;
  559. }
  560. static void free_irp_params( struct irp_call *irp )
  561. {
  562. switch (irp->params.type)
  563. {
  564. case IRP_CALL_CREATE:
  565. close_handle( current->process, irp->params.create.file );
  566. break;
  567. default:
  568. break;
  569. }
  570. }
  571. /* queue an irp to the device */
  572. static int queue_irp( struct device_file *file, const irp_params_t *params, struct async *async )
  573. {
  574. struct irp_call *irp = create_irp( file, params, async );
  575. if (!irp) return 0;
  576. fd_queue_async( file->fd, async, ASYNC_TYPE_WAIT );
  577. irp->async = (struct async *)grab_object( async );
  578. add_irp_to_queue( file->device->manager, irp, current );
  579. release_object( irp );
  580. set_error( STATUS_PENDING );
  581. return 0;
  582. }
  583. static enum server_fd_type device_file_get_fd_type( struct fd *fd )
  584. {
  585. return FD_TYPE_DEVICE;
  586. }
  587. static int device_file_read( struct fd *fd, struct async *async, file_pos_t pos )
  588. {
  589. struct device_file *file = get_fd_user( fd );
  590. irp_params_t params;
  591. memset( &params, 0, sizeof(params) );
  592. params.read.type = IRP_CALL_READ;
  593. params.read.key = 0;
  594. params.read.pos = pos;
  595. return queue_irp( file, &params, async );
  596. }
  597. static int device_file_write( struct fd *fd, struct async *async, file_pos_t pos )
  598. {
  599. struct device_file *file = get_fd_user( fd );
  600. irp_params_t params;
  601. memset( &params, 0, sizeof(params) );
  602. params.write.type = IRP_CALL_WRITE;
  603. params.write.key = 0;
  604. params.write.pos = pos;
  605. return queue_irp( file, &params, async );
  606. }
  607. static int device_file_flush( struct fd *fd, struct async *async )
  608. {
  609. struct device_file *file = get_fd_user( fd );
  610. irp_params_t params;
  611. memset( &params, 0, sizeof(params) );
  612. params.flush.type = IRP_CALL_FLUSH;
  613. return queue_irp( file, &params, async );
  614. }
  615. static int device_file_ioctl( struct fd *fd, ioctl_code_t code, client_ptr_t in_buf, client_ptr_t out_buf, struct async *async )
  616. {
  617. struct device_file *file = get_fd_user( fd );
  618. irp_params_t params;
  619. memset( &params, 0, sizeof(params) );
  620. params.ioctl.type = IRP_CALL_IOCTL;
  621. params.ioctl.code = code;
  622. params.ioctl.in_buf = in_buf;
  623. params.ioctl.out_buf = out_buf;
  624. return queue_irp( file, &params, async );
  625. }
  626. static void cancel_irp_call( struct irp_call *irp )
  627. {
  628. struct irp_call *cancel_irp;
  629. irp_params_t params;
  630. irp->canceled = 1;
  631. if (!irp->user_ptr || !irp->file || !irp->file->device->manager) return;
  632. memset( &params, 0, sizeof(params) );
  633. params.cancel.type = IRP_CALL_CANCEL;
  634. params.cancel.irp = irp->user_ptr;
  635. if ((cancel_irp = create_irp( NULL, &params, NULL )))
  636. {
  637. add_irp_to_queue( irp->file->device->manager, cancel_irp, NULL );
  638. release_object( cancel_irp );
  639. }
  640. set_irp_result( irp, STATUS_CANCELLED, NULL, 0, 0 );
  641. }
  642. static void device_file_reselect_async( struct fd *fd, struct async_queue *queue )
  643. {
  644. struct device_file *file = get_fd_user( fd );
  645. struct irp_call *irp;
  646. LIST_FOR_EACH_ENTRY( irp, &file->requests, struct irp_call, dev_entry )
  647. if (irp->iosb->status != STATUS_PENDING)
  648. {
  649. cancel_irp_call( irp );
  650. return;
  651. }
  652. }
  653. static struct device *create_device( struct object *root, const struct unicode_str *name,
  654. struct device_manager *manager )
  655. {
  656. struct device *device;
  657. if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
  658. {
  659. device->unix_path = NULL;
  660. device->manager = manager;
  661. grab_object( device );
  662. list_add_tail( &manager->devices, &device->entry );
  663. list_init( &device->kernel_object );
  664. list_init( &device->files );
  665. }
  666. return device;
  667. }
  668. struct object *create_unix_device( struct object *root, const struct unicode_str *name,
  669. const char *unix_path )
  670. {
  671. struct device *device;
  672. if ((device = create_named_object( root, &device_ops, name, 0, NULL )))
  673. {
  674. device->unix_path = strdup( unix_path );
  675. device->manager = NULL; /* no manager, requests go straight to the Unix device */
  676. list_init( &device->kernel_object );
  677. list_init( &device->files );
  678. }
  679. return &device->obj;
  680. }
  681. /* terminate requests when the underlying device is deleted */
  682. static void delete_file( struct device_file *file )
  683. {
  684. struct irp_call *irp, *next;
  685. /* the pending requests may be the only thing holding a reference to the file */
  686. grab_object( file );
  687. /* terminate all pending requests */
  688. LIST_FOR_EACH_ENTRY_SAFE( irp, next, &file->requests, struct irp_call, dev_entry )
  689. {
  690. if (do_esync() && file->device->manager && list_empty( &file->device->manager->requests ))
  691. esync_clear( file->device->manager->esync_fd );
  692. list_remove( &irp->mgr_entry );
  693. set_irp_result( irp, STATUS_FILE_DELETED, NULL, 0, 0 );
  694. }
  695. release_object( file );
  696. }
  697. static void delete_device( struct device *device )
  698. {
  699. struct device_file *file, *next;
  700. if (!device->manager) return; /* already deleted */
  701. LIST_FOR_EACH_ENTRY_SAFE( file, next, &device->files, struct device_file, entry )
  702. delete_file( file );
  703. unlink_named_object( &device->obj );
  704. list_remove( &device->entry );
  705. device->manager = NULL;
  706. release_object( device );
  707. }
  708. static void device_manager_dump( struct object *obj, int verbose )
  709. {
  710. fprintf( stderr, "Device manager\n" );
  711. }
  712. static int device_manager_signaled( struct object *obj, struct wait_queue_entry *entry )
  713. {
  714. struct device_manager *manager = (struct device_manager *)obj;
  715. return !list_empty( &manager->requests ) || !list_empty( &manager->callbacks);
  716. }
  717. static int device_manager_get_esync_fd( struct object *obj, enum esync_type *type )
  718. {
  719. struct device_manager *manager = (struct device_manager *)obj;
  720. *type = ESYNC_MANUAL_SERVER;
  721. return manager->esync_fd;
  722. }
  723. static void device_manager_destroy( struct object *obj )
  724. {
  725. struct device_manager *manager = (struct device_manager *)obj;
  726. struct kernel_object *kernel_object;
  727. struct list *ptr;
  728. list_remove(&manager->entry);
  729. if (current)
  730. current->process->dev_mgr = NULL;
  731. if (manager->current_call)
  732. {
  733. release_object( manager->current_call );
  734. manager->current_call = NULL;
  735. }
  736. if (manager->current_cb_client)
  737. {
  738. release_object( manager->current_cb_client );
  739. manager->current_cb_client = NULL;
  740. }
  741. if (manager->main_loop_thread)
  742. {
  743. release_object (manager->main_loop_thread );
  744. manager->main_loop_thread = NULL;
  745. }
  746. while (manager->kernel_objects.root)
  747. {
  748. kernel_object = WINE_RB_ENTRY_VALUE( manager->kernel_objects.root, struct kernel_object, rb_entry );
  749. wine_rb_remove( &manager->kernel_objects, &kernel_object->rb_entry );
  750. list_remove( &kernel_object->list_entry );
  751. if (kernel_object->owned) release_object( kernel_object->object );
  752. free( kernel_object );
  753. }
  754. while ((ptr = list_head( &manager->devices )))
  755. {
  756. struct device *device = LIST_ENTRY( ptr, struct device, entry );
  757. delete_device( device );
  758. }
  759. while ((ptr = list_head( &manager->requests )))
  760. {
  761. struct irp_call *irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
  762. list_remove( &irp->mgr_entry );
  763. assert( !irp->file && !irp->async );
  764. release_object( irp );
  765. }
  766. while ((ptr = list_head( &manager->callbacks )))
  767. {
  768. struct callback_entry *entry = LIST_ENTRY( ptr, struct callback_entry, entry );
  769. list_remove( &entry->entry );
  770. free(entry);
  771. }
  772. if (do_esync())
  773. close( manager->esync_fd );
  774. }
  775. static struct list device_managers = LIST_INIT( device_managers );
  776. static struct device_manager *create_device_manager(void)
  777. {
  778. struct device_manager *manager;
  779. if ((manager = alloc_object( &device_manager_ops )))
  780. {
  781. manager->current_call = NULL;
  782. manager->current_cb_client = NULL;
  783. manager->main_loop_thread = (struct thread *)grab_object(current);
  784. list_init( &manager->devices );
  785. list_init( &manager->requests );
  786. list_init( &manager->callbacks );
  787. list_add_tail(&device_managers, &manager->entry);
  788. manager->callback_mask = 0;
  789. manager->current_cb_event = NULL;
  790. wine_rb_init( &manager->kernel_objects, compare_kernel_object );
  791. if (do_esync())
  792. manager->esync_fd = esync_create_fd( 0, 0 );
  793. }
  794. return manager;
  795. }
  796. void free_kernel_objects( struct object *obj )
  797. {
  798. struct list *ptr, *list;
  799. if (!(list = obj->ops->get_kernel_obj_list( obj ))) return;
  800. while ((ptr = list_head( list )))
  801. {
  802. struct kernel_object *kernel_object = LIST_ENTRY( ptr, struct kernel_object, list_entry );
  803. struct irp_call *irp;
  804. irp_params_t params;
  805. assert( !kernel_object->owned );
  806. memset( &params, 0, sizeof(params) );
  807. params.free.type = IRP_CALL_FREE;
  808. params.free.obj = kernel_object->user_ptr;
  809. if ((irp = create_irp( NULL, &params, NULL )))
  810. {
  811. add_irp_to_queue( kernel_object->manager, irp, NULL );
  812. release_object( irp );
  813. }
  814. list_remove( &kernel_object->list_entry );
  815. wine_rb_remove( &kernel_object->manager->kernel_objects, &kernel_object->rb_entry );
  816. free( kernel_object );
  817. }
  818. }
  819. static void callback_event_dump( struct object *obj, int verbose )
  820. {
  821. struct callback_event *se = (struct callback_event *)obj;
  822. assert( obj->ops == &callback_event_ops );
  823. fprintf( stderr, "Callback event remaining=%u\n", se->remaining_count);
  824. }
  825. static int callback_event_signaled( struct object *obj, struct wait_queue_entry *entry )
  826. {
  827. struct callback_event *se = (struct callback_event *)obj;
  828. assert( obj->ops == &callback_event_ops );
  829. return se->remaining_count == 0;
  830. }
  831. static void callback_event_destroy( struct object *obj )
  832. {
  833. assert( obj->ops == &callback_event_ops );
  834. }
  835. /* create a device manager */
  836. DECL_HANDLER(create_device_manager)
  837. {
  838. struct device_manager *manager;
  839. if (current->process->dev_mgr)
  840. {
  841. set_error(STATUS_CONFLICTING_ADDRESSES);
  842. return;
  843. }
  844. if ((manager = create_device_manager()))
  845. {
  846. current->process->dev_mgr = manager;
  847. reply->handle = alloc_handle( current->process, manager, req->access, req->attributes );
  848. release_object( manager );
  849. }
  850. }
  851. /* create a device */
  852. DECL_HANDLER(create_device)
  853. {
  854. struct device *device;
  855. struct unicode_str name = get_req_unicode_str();
  856. struct device_manager *manager;
  857. struct object *root = NULL;
  858. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  859. 0, &device_manager_ops )))
  860. return;
  861. if (req->rootdir && !(root = get_directory_obj( current->process, req->rootdir )))
  862. {
  863. release_object( manager );
  864. return;
  865. }
  866. if ((device = create_device( root, &name, manager )))
  867. {
  868. struct kernel_object *ptr = set_kernel_object( manager, &device->obj, req->user_ptr );
  869. if (ptr)
  870. grab_kernel_object( ptr );
  871. else
  872. set_error( STATUS_NO_MEMORY );
  873. release_object( device );
  874. }
  875. if (root) release_object( root );
  876. release_object( manager );
  877. }
  878. /* delete a device */
  879. DECL_HANDLER(delete_device)
  880. {
  881. struct device_manager *manager;
  882. struct kernel_object *ref;
  883. struct device *device;
  884. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  885. 0, &device_manager_ops )))
  886. return;
  887. if ((ref = kernel_object_from_ptr( manager, req->device )) && ref->object->ops == &device_ops)
  888. {
  889. device = (struct device *)grab_object( ref->object );
  890. delete_device( device );
  891. release_object( device );
  892. }
  893. else set_error( STATUS_INVALID_HANDLE );
  894. release_object( manager );
  895. }
  896. /* retrieve the next pending device irp request */
  897. DECL_HANDLER(get_next_device_request)
  898. {
  899. struct irp_call *irp;
  900. struct device_manager *manager;
  901. struct list *ptr;
  902. struct iosb *iosb;
  903. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  904. 0, &device_manager_ops )))
  905. return;
  906. if (current->attached_process)
  907. {
  908. release_object(current->attached_process);
  909. current->attached_process = NULL;
  910. }
  911. if (manager->current_cb_event)
  912. {
  913. manager->current_cb_event->remaining_count--;
  914. if (!manager->current_cb_event->remaining_count)
  915. wake_up(&manager->current_cb_event->obj, 0);
  916. release_object(manager->current_cb_event);
  917. manager->current_cb_event = NULL;
  918. }
  919. if (req->prev) close_handle( current->process, req->prev ); /* avoid an extra round-trip for close */
  920. /* process result of previous call */
  921. if (manager->current_call)
  922. {
  923. irp = manager->current_call;
  924. irp->user_ptr = req->user_ptr;
  925. if (req->status)
  926. set_irp_result( irp, req->status, NULL, 0, 0 );
  927. if (irp->canceled)
  928. /* if it was canceled during dispatch, we couldn't queue cancel call without client pointer,
  929. * so we need to do it now */
  930. cancel_irp_call( irp );
  931. else if (irp->async)
  932. set_async_pending( irp->async, irp->file && is_fd_overlapped( irp->file->fd ) );
  933. free_irp_params( irp );
  934. release_object( irp );
  935. manager->current_call = NULL;
  936. }
  937. clear_error();
  938. if ((ptr = list_head( &manager->requests )))
  939. {
  940. struct thread *thread;
  941. irp = LIST_ENTRY( ptr, struct irp_call, mgr_entry );
  942. thread = irp->thread ? irp->thread : current;
  943. reply->client_thread = get_kernel_object_ptr( manager, &thread->obj );
  944. reply->client_tid = get_thread_id( thread );
  945. iosb = irp->iosb;
  946. reply->in_size = iosb->in_size;
  947. if (iosb->in_size > get_reply_max_size()) set_error( STATUS_BUFFER_OVERFLOW );
  948. else if (!irp->file || (reply->next = alloc_handle( current->process, irp, 0, 0 )))
  949. {
  950. if (fill_irp_params( manager, irp, &reply->params ))
  951. {
  952. set_reply_data_ptr( iosb->in_data, iosb->in_size );
  953. iosb->in_data = NULL;
  954. iosb->in_size = 0;
  955. list_remove( &irp->mgr_entry );
  956. list_init( &irp->mgr_entry );
  957. /* we already own the object if it's only on manager queue */
  958. if (irp->file) grab_object( irp );
  959. manager->current_call = irp;
  960. if (do_esync() && list_empty( &manager->requests ))
  961. esync_clear( manager->esync_fd );
  962. }
  963. else close_handle( current->process, reply->next );
  964. }
  965. }
  966. else set_error( STATUS_PENDING );
  967. release_object( manager );
  968. }
  969. /* store results of an async irp */
  970. DECL_HANDLER(set_irp_result)
  971. {
  972. struct irp_call *irp;
  973. if ((irp = (struct irp_call *)get_handle_obj( current->process, req->handle, 0, &irp_call_ops )))
  974. {
  975. if (!irp->canceled)
  976. set_irp_result( irp, req->status, get_req_data(), get_req_data_size(), req->size );
  977. else if(irp->user_ptr) /* cancel already queued */
  978. set_error( STATUS_MORE_PROCESSING_REQUIRED );
  979. else /* we may be still dispatching the IRP. don't bother queuing cancel if it's already complete */
  980. irp->canceled = 0;
  981. close_handle( current->process, req->handle ); /* avoid an extra round-trip for close */
  982. release_object( irp );
  983. }
  984. }
  985. /* get kernel pointer from server object */
  986. DECL_HANDLER(get_kernel_object_ptr)
  987. {
  988. struct device_manager *manager;
  989. struct object *object = NULL;
  990. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  991. 0, &device_manager_ops )))
  992. return;
  993. if ((object = get_handle_obj( current->process, req->handle, 0, NULL )))
  994. {
  995. reply->user_ptr = get_kernel_object_ptr( manager, object );
  996. release_object( object );
  997. }
  998. release_object( manager );
  999. }
  1000. /* associate kernel pointer with server object */
  1001. DECL_HANDLER(set_kernel_object_ptr)
  1002. {
  1003. struct device_manager *manager;
  1004. struct object *object = NULL;
  1005. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  1006. 0, &device_manager_ops )))
  1007. return;
  1008. if (!(object = get_handle_obj( current->process, req->handle, 0, NULL )))
  1009. {
  1010. release_object( manager );
  1011. return;
  1012. }
  1013. if (!set_kernel_object( manager, object, req->user_ptr ))
  1014. set_error( STATUS_INVALID_HANDLE );
  1015. release_object( object );
  1016. release_object( manager );
  1017. }
  1018. /* grab server object reference from kernel object pointer */
  1019. DECL_HANDLER(grab_kernel_object)
  1020. {
  1021. struct device_manager *manager;
  1022. struct kernel_object *ref;
  1023. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  1024. 0, &device_manager_ops )))
  1025. return;
  1026. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && !ref->owned)
  1027. grab_kernel_object( ref );
  1028. else
  1029. set_error( STATUS_INVALID_HANDLE );
  1030. release_object( manager );
  1031. }
  1032. /* release server object reference from kernel object pointer */
  1033. DECL_HANDLER(release_kernel_object)
  1034. {
  1035. struct device_manager *manager;
  1036. struct kernel_object *ref;
  1037. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  1038. 0, &device_manager_ops )))
  1039. return;
  1040. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )) && ref->owned)
  1041. {
  1042. ref->owned = 0;
  1043. release_object( ref->object );
  1044. }
  1045. else set_error( STATUS_INVALID_HANDLE );
  1046. release_object( manager );
  1047. }
  1048. /* get handle from kernel object pointer */
  1049. DECL_HANDLER(get_kernel_object_handle)
  1050. {
  1051. struct device_manager *manager;
  1052. struct kernel_object *ref;
  1053. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  1054. 0, &device_manager_ops )))
  1055. return;
  1056. if ((ref = kernel_object_from_ptr( manager, req->user_ptr )))
  1057. {
  1058. if (req->attributes & OBJ_KERNEL_HANDLE)
  1059. reply->handle = alloc_handle( current->process, ref->object, req->access, 0 );
  1060. else
  1061. {
  1062. struct thread *client_thread;
  1063. struct process *user_process = NULL;
  1064. if (current->attached_process)
  1065. {
  1066. user_process = current->attached_process;
  1067. }
  1068. else if ((client_thread = device_manager_client_thread(manager, current)))
  1069. {
  1070. user_process = client_thread->process;
  1071. release_object(client_thread);
  1072. }
  1073. else
  1074. set_error(STATUS_INVALID_PARAMETER);
  1075. if (user_process)
  1076. reply->handle = alloc_handle( user_process, ref->object, req->access, req->attributes );
  1077. }
  1078. }
  1079. else
  1080. set_error( STATUS_INVALID_HANDLE );
  1081. release_object( manager );
  1082. }
  1083. DECL_HANDLER(callback_subscribe)
  1084. {
  1085. struct device_manager *manager;
  1086. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  1087. 0, &device_manager_ops )))
  1088. return;
  1089. manager->callback_mask = req->callback_mask;
  1090. }
  1091. struct object *get_handle_event_object(krnl_cbdata_t *cb)
  1092. {
  1093. if (cb->cb_type == SERVER_CALLBACK_HANDLE_EVENT)
  1094. {
  1095. struct object *obj;
  1096. if ((obj = (struct object *) ((unsigned long int) cb->handle_event.object << 32 | cb->handle_event.padding)))
  1097. return obj;
  1098. }
  1099. return NULL;
  1100. }
  1101. krnl_cbdata_t grab_cbdata(krnl_cbdata_t *cb)
  1102. {
  1103. krnl_cbdata_t ret = *cb;
  1104. struct object *obj;
  1105. if ((obj = get_handle_event_object(cb)))
  1106. grab_object(obj);
  1107. return ret;
  1108. }
  1109. void free_cbdata(krnl_cbdata_t *cb)
  1110. {
  1111. if (cb->cb_type == SERVER_CALLBACK_HANDLE_EVENT)
  1112. {
  1113. struct object *obj;
  1114. if ((obj = (struct object *) ((unsigned long int) cb->handle_event.object << 32 | cb->handle_event.padding)))
  1115. release_object(obj);
  1116. }
  1117. }
  1118. struct callback_entry* allocate_callback_entry(krnl_cbdata_t *cb, struct unicode_str *string_param, struct callback_event *event)
  1119. {
  1120. struct callback_entry *entry;
  1121. if (!(entry = malloc(sizeof(*entry))))
  1122. return NULL;
  1123. entry->data = grab_cbdata(cb);
  1124. entry->client_thread = current ? (struct thread *)grab_object(current) : NULL;
  1125. if (string_param)
  1126. {
  1127. entry->string_param.str = memdup(string_param->str, string_param->len);
  1128. entry->string_param.len = string_param->len;
  1129. }
  1130. else
  1131. entry->string_param.str = NULL;
  1132. entry->event = (struct callback_event *)grab_object(event);
  1133. event->remaining_count++;
  1134. return entry;
  1135. }
  1136. struct callback_entry* copy_callback_entry(struct callback_entry *source)
  1137. {
  1138. struct callback_entry *dest;
  1139. if (!(dest = malloc(sizeof(*dest))))
  1140. return NULL;
  1141. memcpy(dest, source, sizeof(*dest));
  1142. grab_cbdata(&dest->data);
  1143. if (source->string_param.str)
  1144. dest->string_param.str = memdup(source->string_param.str, source->string_param.len);
  1145. return dest;
  1146. }
  1147. void free_callback_entry(struct callback_entry *entry)
  1148. {
  1149. free_cbdata(&entry->data);
  1150. if (entry->string_param.str)
  1151. free(entry->string_param.str);
  1152. free(entry);
  1153. }
  1154. static int is_extending;
  1155. void queue_callback(krnl_cbdata_t *cb, struct unicode_str *string_param, struct object **done_event)
  1156. {
  1157. struct device_manager *manager;
  1158. struct callback_event *event;
  1159. if (is_extending || (current && current->process->is_kernel))
  1160. goto done;
  1161. event = alloc_object( &callback_event_ops );
  1162. if (event)
  1163. event->remaining_count = 0;
  1164. LIST_FOR_EACH_ENTRY(manager, &device_managers, struct device_manager, entry)
  1165. {
  1166. struct callback_entry *entry;
  1167. if (!(manager->callback_mask & cb->cb_type))
  1168. continue;
  1169. if (!(entry = allocate_callback_entry(cb, string_param, event)))
  1170. break;
  1171. list_add_tail(&manager->callbacks, &entry->entry);
  1172. if (list_head( &manager->callbacks ) == &entry->entry) wake_up( &manager->obj, 0 );
  1173. }
  1174. if (event)
  1175. {
  1176. if (done_event && event->remaining_count != 0)
  1177. *done_event = (struct object *)event;
  1178. else
  1179. release_object( event );
  1180. }
  1181. done:
  1182. free_cbdata(cb);
  1183. }
  1184. DECL_HANDLER(get_next_callback_event)
  1185. {
  1186. struct device_manager *manager;
  1187. struct list *ptr;
  1188. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  1189. 0, &device_manager_ops )))
  1190. return;
  1191. if (current->attached_process)
  1192. {
  1193. release_object(current->attached_process);
  1194. current->attached_process = NULL;
  1195. }
  1196. if ((ptr = list_head( &manager->callbacks )))
  1197. {
  1198. struct callback_entry *cb = LIST_ENTRY( ptr, struct callback_entry, entry );
  1199. reply->cb_data = cb->data;
  1200. if (manager->current_cb_client)
  1201. {
  1202. release_object(manager->current_cb_client);
  1203. manager->current_cb_client = NULL;
  1204. }
  1205. if (manager->current_cb_event)
  1206. {
  1207. manager->current_cb_event->remaining_count--;
  1208. if (!manager->current_cb_event->remaining_count)
  1209. wake_up(&manager->current_cb_event->obj, 0);
  1210. release_object(manager->current_cb_event);
  1211. manager->current_cb_event = NULL;
  1212. }
  1213. if (cb->client_thread)
  1214. {
  1215. reply->client_thread = get_kernel_object_ptr( manager, &cb->client_thread->obj );
  1216. reply->client_tid = cb->client_thread->id;
  1217. manager->current_cb_client = cb->client_thread;
  1218. }
  1219. else
  1220. reply->client_tid = 0;
  1221. if (reply->cb_data.cb_type == SERVER_CALLBACK_HANDLE_EVENT)
  1222. {
  1223. struct object *obj;
  1224. if ((obj = (struct object *) ((unsigned long int) reply->cb_data.handle_event.object << 32 | reply->cb_data.handle_event.padding)))
  1225. {
  1226. is_extending = 1;
  1227. reply->cb_data.handle_event.object = alloc_handle_no_access_check(current->process, obj, MAXIMUM_ALLOWED, 0);
  1228. is_extending = 0;
  1229. release_object(obj);
  1230. }
  1231. else
  1232. reply->cb_data.handle_event.object = 0;
  1233. }
  1234. if (cb->string_param.str)
  1235. {
  1236. set_reply_data( cb->string_param.str, min( cb->string_param.len, get_reply_max_size() ) );
  1237. free(cb->string_param.str);
  1238. }
  1239. manager->current_cb_event = cb->event;
  1240. list_remove(ptr);
  1241. free(cb);
  1242. }
  1243. else set_error( STATUS_PENDING );
  1244. release_object( manager );
  1245. }
  1246. struct thread *device_manager_client_thread(struct device_manager *dev_mgr, struct thread *thread)
  1247. {
  1248. if (thread != dev_mgr->main_loop_thread)
  1249. return NULL;
  1250. return dev_mgr->current_call ? (struct thread *)grab_object((struct object *) dev_mgr->current_call->thread) :
  1251. dev_mgr->current_cb_client ?(struct thread *)grab_object((struct object *) dev_mgr->current_cb_client) : NULL;
  1252. }
  1253. DECL_HANDLER(attach_process)
  1254. {
  1255. struct device_manager *manager;
  1256. struct process *process;
  1257. struct kernel_object *ref;
  1258. struct thread *client_thread;
  1259. struct process *current_process;
  1260. if (!(manager = (struct device_manager *)get_handle_obj( current->process, req->manager,
  1261. 0, &device_manager_ops )))
  1262. return;
  1263. if ((client_thread = device_manager_client_thread(manager, current)))\
  1264. {
  1265. current_process = client_thread->process;
  1266. release_object(client_thread);
  1267. }
  1268. else
  1269. current_process = current->process;
  1270. if (!(ref = kernel_object_from_ptr( manager, req->process )))
  1271. {
  1272. set_error( STATUS_INVALID_PARAMETER );
  1273. release_object(manager);
  1274. return;
  1275. }
  1276. process = (struct process *)ref->object;
  1277. release_object(manager);
  1278. if (!(is_process(&process->obj)))
  1279. {
  1280. set_error( STATUS_OBJECT_TYPE_MISMATCH );
  1281. return;
  1282. }
  1283. grab_object(process);
  1284. if (current->attached_process)
  1285. {
  1286. release_object(current->attached_process);
  1287. current->attached_process = NULL;
  1288. }
  1289. if (req->detach && process == current_process)
  1290. release_object(process);
  1291. else
  1292. current->attached_process = process;
  1293. }