change.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291
  1. /*
  2. * Server-side change notification management
  3. *
  4. * Copyright (C) 1998 Alexandre Julliard
  5. * Copyright (C) 2006 Mike McCormack
  6. *
  7. * This library is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * This library is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with this library; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
  20. */
  21. #include "config.h"
  22. #include "wine/port.h"
  23. #include <assert.h>
  24. #include <fcntl.h>
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include <signal.h>
  28. #include <sys/stat.h>
  29. #include <sys/types.h>
  30. #include <limits.h>
  31. #include <dirent.h>
  32. #include <errno.h>
  33. #ifdef HAVE_POLL_H
  34. # include <poll.h>
  35. #endif
  36. #ifdef HAVE_SYS_INOTIFY_H
  37. #include <sys/inotify.h>
  38. #endif
  39. #include "ntstatus.h"
  40. #define WIN32_NO_STATUS
  41. #include "windef.h"
  42. #include "file.h"
  43. #include "handle.h"
  44. #include "thread.h"
  45. #include "request.h"
  46. #include "process.h"
  47. #include "security.h"
  48. #include "winternl.h"
  49. /* dnotify support */
  50. #ifdef linux
  51. #ifndef F_NOTIFY
  52. #define F_NOTIFY 1026
  53. #define DN_ACCESS 0x00000001 /* File accessed */
  54. #define DN_MODIFY 0x00000002 /* File modified */
  55. #define DN_CREATE 0x00000004 /* File created */
  56. #define DN_DELETE 0x00000008 /* File removed */
  57. #define DN_RENAME 0x00000010 /* File renamed */
  58. #define DN_ATTRIB 0x00000020 /* File changed attributes */
  59. #define DN_MULTISHOT 0x80000000 /* Don't remove notifier */
  60. #endif
  61. #endif
  62. /* inotify support */
  63. struct inode;
  64. static void free_inode( struct inode *inode );
  65. static struct fd *inotify_fd;
  66. struct change_record {
  67. struct list entry;
  68. unsigned int cookie;
  69. struct filesystem_event event;
  70. };
  71. struct dir
  72. {
  73. struct object obj; /* object header */
  74. struct fd *fd; /* file descriptor to the directory */
  75. mode_t mode; /* file stat.st_mode */
  76. uid_t uid; /* file stat.st_uid */
  77. struct list entry; /* entry in global change notifications list */
  78. unsigned int filter; /* notification filter */
  79. volatile int notified; /* SIGIO counter */
  80. int want_data; /* return change data */
  81. int subtree; /* do we want to watch subdirectories? */
  82. struct list change_records; /* data for the change */
  83. struct list in_entry; /* entry in the inode dirs list */
  84. struct inode *inode; /* inode of the associated directory */
  85. struct process *client_process; /* client process that has a cache for this directory */
  86. int client_entry; /* entry in client process cache */
  87. };
  88. static struct fd *dir_get_fd( struct object *obj );
  89. static struct security_descriptor *dir_get_sd( struct object *obj );
  90. static int dir_set_sd( struct object *obj, const struct security_descriptor *sd,
  91. unsigned int set_info );
  92. static void dir_dump( struct object *obj, int verbose );
  93. static int dir_close_handle( struct object *obj, struct process *process, obj_handle_t handle );
  94. static void dir_destroy( struct object *obj );
  95. static const struct object_ops dir_ops =
  96. {
  97. sizeof(struct dir), /* size */
  98. &file_type, /* type */
  99. dir_dump, /* dump */
  100. add_queue, /* add_queue */
  101. remove_queue, /* remove_queue */
  102. default_fd_signaled, /* signaled */
  103. no_satisfied, /* satisfied */
  104. no_signal, /* signal */
  105. dir_get_fd, /* get_fd */
  106. default_map_access, /* map_access */
  107. dir_get_sd, /* get_sd */
  108. dir_set_sd, /* set_sd */
  109. no_get_full_name, /* get_full_name */
  110. no_lookup_name, /* lookup_name */
  111. no_link_name, /* link_name */
  112. NULL, /* unlink_name */
  113. no_open_file, /* open_file */
  114. no_kernel_obj_list, /* get_kernel_obj_list */
  115. dir_close_handle, /* close_handle */
  116. dir_destroy /* destroy */
  117. };
  118. static int dir_get_poll_events( struct fd *fd );
  119. static enum server_fd_type dir_get_fd_type( struct fd *fd );
  120. static const struct fd_ops dir_fd_ops =
  121. {
  122. dir_get_poll_events, /* get_poll_events */
  123. default_poll_event, /* poll_event */
  124. dir_get_fd_type, /* get_fd_type */
  125. no_fd_read, /* read */
  126. no_fd_write, /* write */
  127. no_fd_flush, /* flush */
  128. default_fd_get_file_info, /* get_file_info */
  129. no_fd_get_volume_info, /* get_volume_info */
  130. default_fd_ioctl, /* ioctl */
  131. default_fd_queue_async, /* queue_async */
  132. default_fd_reselect_async /* reselect_async */
  133. };
  134. static struct list change_list = LIST_INIT(change_list);
  135. /* per-process structure to keep track of cache entries on the client size */
  136. struct dir_cache
  137. {
  138. unsigned int size;
  139. unsigned int count;
  140. unsigned char state[1];
  141. };
  142. enum dir_cache_state
  143. {
  144. DIR_CACHE_STATE_FREE,
  145. DIR_CACHE_STATE_INUSE,
  146. DIR_CACHE_STATE_RELEASED
  147. };
  148. /* return an array of cache entries that can be freed on the client side */
  149. static int *get_free_dir_cache_entries( struct process *process, data_size_t *size )
  150. {
  151. int *ret;
  152. struct dir_cache *cache = process->dir_cache;
  153. unsigned int i, j, count;
  154. if (!cache) return NULL;
  155. for (i = count = 0; i < cache->count && count < *size / sizeof(*ret); i++)
  156. if (cache->state[i] == DIR_CACHE_STATE_RELEASED) count++;
  157. if (!count) return NULL;
  158. if ((ret = malloc( count * sizeof(*ret) )))
  159. {
  160. for (i = j = 0; j < count; i++)
  161. {
  162. if (cache->state[i] != DIR_CACHE_STATE_RELEASED) continue;
  163. cache->state[i] = DIR_CACHE_STATE_FREE;
  164. ret[j++] = i;
  165. }
  166. *size = count * sizeof(*ret);
  167. }
  168. return ret;
  169. }
  170. /* allocate a new client-side directory cache entry */
  171. static int alloc_dir_cache_entry( struct dir *dir, struct process *process )
  172. {
  173. unsigned int i = 0;
  174. struct dir_cache *cache = process->dir_cache;
  175. if (cache)
  176. for (i = 0; i < cache->count; i++)
  177. if (cache->state[i] == DIR_CACHE_STATE_FREE) goto found;
  178. if (!cache || cache->count == cache->size)
  179. {
  180. unsigned int size = cache ? cache->size * 2 : 256;
  181. if (!(cache = realloc( cache, offsetof( struct dir_cache, state[size] ))))
  182. {
  183. set_error( STATUS_NO_MEMORY );
  184. return -1;
  185. }
  186. process->dir_cache = cache;
  187. cache->size = size;
  188. }
  189. cache->count = i + 1;
  190. found:
  191. cache->state[i] = DIR_CACHE_STATE_INUSE;
  192. return i;
  193. }
  194. /* release a directory cache entry; it will be freed on the client side on the next cache request */
  195. static void release_dir_cache_entry( struct dir *dir )
  196. {
  197. struct dir_cache *cache;
  198. if (!dir->client_process) return;
  199. cache = dir->client_process->dir_cache;
  200. cache->state[dir->client_entry] = DIR_CACHE_STATE_RELEASED;
  201. release_object( dir->client_process );
  202. dir->client_process = NULL;
  203. }
  204. static void dnotify_adjust_changes( struct dir *dir )
  205. {
  206. #if defined(F_SETSIG) && defined(F_NOTIFY)
  207. int fd = get_unix_fd( dir->fd );
  208. unsigned int filter = dir->filter;
  209. unsigned int val;
  210. if ( 0 > fcntl( fd, F_SETSIG, SIGIO) )
  211. return;
  212. val = DN_MULTISHOT;
  213. if (filter & FILE_NOTIFY_CHANGE_FILE_NAME)
  214. val |= DN_RENAME | DN_DELETE | DN_CREATE;
  215. if (filter & FILE_NOTIFY_CHANGE_DIR_NAME)
  216. val |= DN_RENAME | DN_DELETE | DN_CREATE;
  217. if (filter & FILE_NOTIFY_CHANGE_ATTRIBUTES)
  218. val |= DN_ATTRIB;
  219. if (filter & FILE_NOTIFY_CHANGE_SIZE)
  220. val |= DN_MODIFY;
  221. if (filter & FILE_NOTIFY_CHANGE_LAST_WRITE)
  222. val |= DN_MODIFY;
  223. if (filter & FILE_NOTIFY_CHANGE_LAST_ACCESS)
  224. val |= DN_ACCESS;
  225. if (filter & FILE_NOTIFY_CHANGE_CREATION)
  226. val |= DN_CREATE;
  227. if (filter & FILE_NOTIFY_CHANGE_SECURITY)
  228. val |= DN_ATTRIB;
  229. fcntl( fd, F_NOTIFY, val );
  230. #endif
  231. }
  232. /* insert change in the global list */
  233. static inline void insert_change( struct dir *dir )
  234. {
  235. sigset_t sigset;
  236. sigemptyset( &sigset );
  237. sigaddset( &sigset, SIGIO );
  238. sigprocmask( SIG_BLOCK, &sigset, NULL );
  239. list_add_head( &change_list, &dir->entry );
  240. sigprocmask( SIG_UNBLOCK, &sigset, NULL );
  241. }
  242. /* remove change from the global list */
  243. static inline void remove_change( struct dir *dir )
  244. {
  245. sigset_t sigset;
  246. sigemptyset( &sigset );
  247. sigaddset( &sigset, SIGIO );
  248. sigprocmask( SIG_BLOCK, &sigset, NULL );
  249. list_remove( &dir->entry );
  250. sigprocmask( SIG_UNBLOCK, &sigset, NULL );
  251. }
  252. static void dir_dump( struct object *obj, int verbose )
  253. {
  254. struct dir *dir = (struct dir *)obj;
  255. assert( obj->ops == &dir_ops );
  256. fprintf( stderr, "Dirfile fd=%p filter=%08x\n", dir->fd, dir->filter );
  257. }
  258. /* enter here directly from SIGIO signal handler */
  259. void do_change_notify( int unix_fd )
  260. {
  261. struct dir *dir;
  262. /* FIXME: this is O(n) ... probably can be improved */
  263. LIST_FOR_EACH_ENTRY( dir, &change_list, struct dir, entry )
  264. {
  265. if (get_unix_fd( dir->fd ) != unix_fd) continue;
  266. dir->notified = 1;
  267. break;
  268. }
  269. }
  270. /* SIGIO callback, called synchronously with the poll loop */
  271. void sigio_callback(void)
  272. {
  273. struct dir *dir;
  274. LIST_FOR_EACH_ENTRY( dir, &change_list, struct dir, entry )
  275. {
  276. if (!dir->notified) continue;
  277. dir->notified = 0;
  278. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  279. }
  280. }
  281. static struct fd *dir_get_fd( struct object *obj )
  282. {
  283. struct dir *dir = (struct dir *)obj;
  284. assert( obj->ops == &dir_ops );
  285. return (struct fd *)grab_object( dir->fd );
  286. }
  287. static int get_dir_unix_fd( struct dir *dir )
  288. {
  289. return get_unix_fd( dir->fd );
  290. }
  291. static struct security_descriptor *dir_get_sd( struct object *obj )
  292. {
  293. struct dir *dir = (struct dir *)obj;
  294. int unix_fd;
  295. struct stat st;
  296. struct security_descriptor *sd;
  297. assert( obj->ops == &dir_ops );
  298. unix_fd = get_dir_unix_fd( dir );
  299. if (unix_fd == -1 || fstat( unix_fd, &st ) == -1)
  300. return obj->sd;
  301. /* mode and uid the same? if so, no need to re-generate security descriptor */
  302. if (obj->sd &&
  303. (st.st_mode & (S_IRWXU|S_IRWXO)) == (dir->mode & (S_IRWXU|S_IRWXO)) &&
  304. (st.st_uid == dir->uid))
  305. return obj->sd;
  306. sd = mode_to_sd( st.st_mode,
  307. security_unix_uid_to_sid( st.st_uid ),
  308. token_get_primary_group( current->process->token ));
  309. if (!sd) return obj->sd;
  310. dir->mode = st.st_mode;
  311. dir->uid = st.st_uid;
  312. free( obj->sd );
  313. obj->sd = sd;
  314. return sd;
  315. }
  316. static int dir_set_sd( struct object *obj, const struct security_descriptor *sd,
  317. unsigned int set_info )
  318. {
  319. struct dir *dir = (struct dir *)obj;
  320. const SID *owner;
  321. struct stat st;
  322. mode_t mode;
  323. int unix_fd;
  324. assert( obj->ops == &dir_ops );
  325. unix_fd = get_dir_unix_fd( dir );
  326. if (unix_fd == -1 || fstat( unix_fd, &st ) == -1) return 1;
  327. if (set_info & OWNER_SECURITY_INFORMATION)
  328. {
  329. owner = sd_get_owner( sd );
  330. if (!owner)
  331. {
  332. set_error( STATUS_INVALID_SECURITY_DESCR );
  333. return 0;
  334. }
  335. if (!obj->sd || !security_equal_sid( owner, sd_get_owner( obj->sd ) ))
  336. {
  337. /* FIXME: get Unix uid and call fchown */
  338. }
  339. }
  340. else if (obj->sd)
  341. owner = sd_get_owner( obj->sd );
  342. else
  343. owner = token_get_user( current->process->token );
  344. if (set_info & DACL_SECURITY_INFORMATION)
  345. {
  346. /* keep the bits that we don't map to access rights in the ACL */
  347. mode = st.st_mode & (S_ISUID|S_ISGID|S_ISVTX);
  348. mode |= sd_to_mode( sd, owner );
  349. if (((st.st_mode ^ mode) & (S_IRWXU|S_IRWXG|S_IRWXO)) && fchmod( unix_fd, mode ) == -1)
  350. {
  351. file_set_error();
  352. return 0;
  353. }
  354. }
  355. return 1;
  356. }
  357. static struct change_record *get_first_change_record( struct dir *dir )
  358. {
  359. struct list *ptr = list_head( &dir->change_records );
  360. if (!ptr) return NULL;
  361. list_remove( ptr );
  362. return LIST_ENTRY( ptr, struct change_record, entry );
  363. }
  364. static int dir_close_handle( struct object *obj, struct process *process, obj_handle_t handle )
  365. {
  366. struct dir *dir = (struct dir *)obj;
  367. if (obj->handle_count == 1) release_dir_cache_entry( dir ); /* closing last handle, release cache */
  368. return 1; /* ok to close */
  369. }
  370. static void dir_destroy( struct object *obj )
  371. {
  372. struct change_record *record;
  373. struct dir *dir = (struct dir *)obj;
  374. assert (obj->ops == &dir_ops);
  375. if (dir->filter)
  376. remove_change( dir );
  377. if (dir->inode)
  378. {
  379. list_remove( &dir->in_entry );
  380. free_inode( dir->inode );
  381. }
  382. while ((record = get_first_change_record( dir ))) free( record );
  383. release_dir_cache_entry( dir );
  384. release_object( dir->fd );
  385. if (inotify_fd && list_empty( &change_list ))
  386. {
  387. release_object( inotify_fd );
  388. inotify_fd = NULL;
  389. }
  390. }
  391. struct dir *get_dir_obj( struct process *process, obj_handle_t handle, unsigned int access )
  392. {
  393. return (struct dir *)get_handle_obj( process, handle, access, &dir_ops );
  394. }
  395. static int dir_get_poll_events( struct fd *fd )
  396. {
  397. return 0;
  398. }
  399. static enum server_fd_type dir_get_fd_type( struct fd *fd )
  400. {
  401. return FD_TYPE_DIR;
  402. }
  403. #ifdef HAVE_SYS_INOTIFY_H
  404. #define HASH_SIZE 31
  405. struct inode {
  406. struct list ch_entry; /* entry in the children list */
  407. struct list children; /* children of this inode */
  408. struct inode *parent; /* parent of this inode */
  409. struct list dirs; /* directory handles watching this inode */
  410. struct list ino_entry; /* entry in the inode hash */
  411. struct list wd_entry; /* entry in the watch descriptor hash */
  412. dev_t dev; /* device number */
  413. ino_t ino; /* device's inode number */
  414. int wd; /* inotify's watch descriptor */
  415. char *name; /* basename name of the inode */
  416. };
  417. static struct list inode_hash[ HASH_SIZE ];
  418. static struct list wd_hash[ HASH_SIZE ];
  419. static int inotify_add_dir( char *path, unsigned int filter );
  420. static struct inode *inode_from_wd( int wd )
  421. {
  422. struct list *bucket = &wd_hash[ wd % HASH_SIZE ];
  423. struct inode *inode;
  424. LIST_FOR_EACH_ENTRY( inode, bucket, struct inode, wd_entry )
  425. if (inode->wd == wd)
  426. return inode;
  427. return NULL;
  428. }
  429. static inline struct list *get_hash_list( dev_t dev, ino_t ino )
  430. {
  431. return &inode_hash[ (ino ^ dev) % HASH_SIZE ];
  432. }
  433. static struct inode *find_inode( dev_t dev, ino_t ino )
  434. {
  435. struct list *bucket = get_hash_list( dev, ino );
  436. struct inode *inode;
  437. LIST_FOR_EACH_ENTRY( inode, bucket, struct inode, ino_entry )
  438. if (inode->ino == ino && inode->dev == dev)
  439. return inode;
  440. return NULL;
  441. }
  442. static struct inode *create_inode( dev_t dev, ino_t ino )
  443. {
  444. struct inode *inode;
  445. inode = malloc( sizeof *inode );
  446. if (inode)
  447. {
  448. list_init( &inode->children );
  449. list_init( &inode->dirs );
  450. inode->ino = ino;
  451. inode->dev = dev;
  452. inode->wd = -1;
  453. inode->parent = NULL;
  454. inode->name = NULL;
  455. list_add_tail( get_hash_list( dev, ino ), &inode->ino_entry );
  456. }
  457. return inode;
  458. }
  459. static struct inode *get_inode( dev_t dev, ino_t ino )
  460. {
  461. struct inode *inode;
  462. inode = find_inode( dev, ino );
  463. if (inode)
  464. return inode;
  465. return create_inode( dev, ino );
  466. }
  467. static void inode_set_wd( struct inode *inode, int wd )
  468. {
  469. if (inode->wd != -1)
  470. list_remove( &inode->wd_entry );
  471. inode->wd = wd;
  472. list_add_tail( &wd_hash[ wd % HASH_SIZE ], &inode->wd_entry );
  473. }
  474. static void inode_set_name( struct inode *inode, const char *name )
  475. {
  476. free (inode->name);
  477. inode->name = name ? strdup( name ) : NULL;
  478. }
  479. static void free_inode( struct inode *inode )
  480. {
  481. int subtree = 0, watches = 0;
  482. struct inode *tmp, *next;
  483. struct dir *dir;
  484. LIST_FOR_EACH_ENTRY( dir, &inode->dirs, struct dir, in_entry )
  485. {
  486. subtree |= dir->subtree;
  487. watches++;
  488. }
  489. if (!subtree && !inode->parent)
  490. {
  491. LIST_FOR_EACH_ENTRY_SAFE( tmp, next, &inode->children,
  492. struct inode, ch_entry )
  493. {
  494. assert( tmp != inode );
  495. assert( tmp->parent == inode );
  496. free_inode( tmp );
  497. }
  498. }
  499. if (watches)
  500. return;
  501. if (inode->parent)
  502. list_remove( &inode->ch_entry );
  503. /* disconnect remaining children from the parent */
  504. LIST_FOR_EACH_ENTRY_SAFE( tmp, next, &inode->children, struct inode, ch_entry )
  505. {
  506. list_remove( &tmp->ch_entry );
  507. tmp->parent = NULL;
  508. }
  509. if (inode->wd != -1)
  510. {
  511. inotify_rm_watch( get_unix_fd( inotify_fd ), inode->wd );
  512. list_remove( &inode->wd_entry );
  513. }
  514. list_remove( &inode->ino_entry );
  515. free( inode->name );
  516. free( inode );
  517. }
  518. static struct inode *inode_add( struct inode *parent,
  519. dev_t dev, ino_t ino, const char *name )
  520. {
  521. struct inode *inode;
  522. inode = get_inode( dev, ino );
  523. if (!inode)
  524. return NULL;
  525. if (!inode->parent)
  526. {
  527. list_add_tail( &parent->children, &inode->ch_entry );
  528. inode->parent = parent;
  529. assert( inode != parent );
  530. }
  531. inode_set_name( inode, name );
  532. return inode;
  533. }
  534. static struct inode *inode_from_name( struct inode *inode, const char *name )
  535. {
  536. struct inode *i;
  537. LIST_FOR_EACH_ENTRY( i, &inode->children, struct inode, ch_entry )
  538. if (i->name && !strcmp( i->name, name ))
  539. return i;
  540. return NULL;
  541. }
  542. static int inotify_get_poll_events( struct fd *fd );
  543. static void inotify_poll_event( struct fd *fd, int event );
  544. static const struct fd_ops inotify_fd_ops =
  545. {
  546. inotify_get_poll_events, /* get_poll_events */
  547. inotify_poll_event, /* poll_event */
  548. NULL, /* flush */
  549. NULL, /* get_fd_type */
  550. NULL, /* ioctl */
  551. NULL, /* queue_async */
  552. NULL /* reselect_async */
  553. };
  554. static int inotify_get_poll_events( struct fd *fd )
  555. {
  556. return POLLIN;
  557. }
  558. static void inotify_do_change_notify( struct dir *dir, unsigned int action,
  559. unsigned int cookie, const char *relpath )
  560. {
  561. struct change_record *record;
  562. assert( dir->obj.ops == &dir_ops );
  563. if (dir->want_data)
  564. {
  565. size_t len = strlen(relpath);
  566. record = malloc( offsetof(struct change_record, event.name[len]) );
  567. if (!record)
  568. return;
  569. record->cookie = cookie;
  570. record->event.action = action;
  571. memcpy( record->event.name, relpath, len );
  572. record->event.len = len;
  573. list_add_tail( &dir->change_records, &record->entry );
  574. }
  575. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  576. }
  577. static unsigned int filter_from_event( struct inotify_event *ie )
  578. {
  579. unsigned int filter = 0;
  580. if (ie->mask & (IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE | IN_CREATE))
  581. filter |= FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME;
  582. if (ie->mask & IN_MODIFY)
  583. filter |= FILE_NOTIFY_CHANGE_SIZE | FILE_NOTIFY_CHANGE_LAST_WRITE | FILE_NOTIFY_CHANGE_LAST_ACCESS;
  584. if (ie->mask & IN_ATTRIB)
  585. filter |= FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY;
  586. if (ie->mask & IN_CREATE)
  587. filter |= FILE_NOTIFY_CHANGE_CREATION;
  588. if (ie->mask & IN_ISDIR)
  589. filter &= ~FILE_NOTIFY_CHANGE_FILE_NAME;
  590. else
  591. filter &= ~FILE_NOTIFY_CHANGE_DIR_NAME;
  592. return filter;
  593. }
  594. /* scan up the parent directories for watches */
  595. static unsigned int filter_from_inode( struct inode *inode, int is_parent )
  596. {
  597. unsigned int filter = 0;
  598. struct dir *dir;
  599. /* combine filters from parents watching subtrees */
  600. while (inode)
  601. {
  602. LIST_FOR_EACH_ENTRY( dir, &inode->dirs, struct dir, in_entry )
  603. if (dir->subtree || !is_parent)
  604. filter |= dir->filter;
  605. is_parent = 1;
  606. inode = inode->parent;
  607. }
  608. return filter;
  609. }
  610. static char *inode_get_path( struct inode *inode, int sz )
  611. {
  612. struct list *head;
  613. char *path;
  614. int len;
  615. if (!inode)
  616. return NULL;
  617. head = list_head( &inode->dirs );
  618. if (head)
  619. {
  620. int unix_fd = get_unix_fd( LIST_ENTRY( head, struct dir, in_entry )->fd );
  621. path = malloc ( 32 + sz );
  622. if (path)
  623. sprintf( path, "/proc/self/fd/%u/", unix_fd );
  624. return path;
  625. }
  626. if (!inode->name)
  627. return NULL;
  628. len = strlen( inode->name );
  629. path = inode_get_path( inode->parent, sz + len + 1 );
  630. if (!path)
  631. return NULL;
  632. strcat( path, inode->name );
  633. strcat( path, "/" );
  634. return path;
  635. }
  636. static void inode_check_dir( struct inode *parent, const char *name )
  637. {
  638. char *path;
  639. unsigned int filter;
  640. struct inode *inode;
  641. struct stat st;
  642. int wd = -1;
  643. path = inode_get_path( parent, strlen(name) );
  644. if (!path)
  645. return;
  646. strcat( path, name );
  647. if (stat( path, &st ) < 0)
  648. goto end;
  649. filter = filter_from_inode( parent, 1 );
  650. if (!filter)
  651. goto end;
  652. inode = inode_add( parent, st.st_dev, st.st_ino, name );
  653. if (!inode || inode->wd != -1)
  654. goto end;
  655. wd = inotify_add_dir( path, filter );
  656. if (wd != -1)
  657. inode_set_wd( inode, wd );
  658. else
  659. free_inode( inode );
  660. end:
  661. free( path );
  662. }
  663. static int prepend( char **path, const char *segment )
  664. {
  665. int extra;
  666. char *p;
  667. extra = strlen( segment ) + 1;
  668. if (*path)
  669. {
  670. int len = strlen( *path ) + 1;
  671. p = realloc( *path, len + extra );
  672. if (!p) return 0;
  673. memmove( &p[ extra ], p, len );
  674. p[ extra - 1 ] = '/';
  675. memcpy( p, segment, extra - 1 );
  676. }
  677. else
  678. {
  679. p = malloc( extra );
  680. if (!p) return 0;
  681. memcpy( p, segment, extra );
  682. }
  683. *path = p;
  684. return 1;
  685. }
  686. static void inotify_notify_all( struct inotify_event *ie )
  687. {
  688. unsigned int filter, action;
  689. struct inode *inode, *i;
  690. char *path = NULL;
  691. struct dir *dir;
  692. inode = inode_from_wd( ie->wd );
  693. if (!inode)
  694. {
  695. fprintf( stderr, "no inode matches %d\n", ie->wd);
  696. return;
  697. }
  698. filter = filter_from_event( ie );
  699. if (ie->mask & IN_CREATE)
  700. {
  701. if (ie->mask & IN_ISDIR)
  702. inode_check_dir( inode, ie->name );
  703. action = FILE_ACTION_ADDED;
  704. }
  705. else if (ie->mask & IN_DELETE)
  706. action = FILE_ACTION_REMOVED;
  707. else if (ie->mask & IN_MOVED_FROM)
  708. action = FILE_ACTION_RENAMED_OLD_NAME;
  709. else if (ie->mask & IN_MOVED_TO)
  710. action = FILE_ACTION_RENAMED_NEW_NAME;
  711. else
  712. action = FILE_ACTION_MODIFIED;
  713. /*
  714. * Work our way up the inode hierarchy
  715. * extending the relative path as we go
  716. * and notifying all recursive watches.
  717. */
  718. if (!prepend( &path, ie->name ))
  719. return;
  720. for (i = inode; i; i = i->parent)
  721. {
  722. LIST_FOR_EACH_ENTRY( dir, &i->dirs, struct dir, in_entry )
  723. if ((filter & dir->filter) && (i==inode || dir->subtree))
  724. inotify_do_change_notify( dir, action, ie->cookie, path );
  725. if (!i->name || !prepend( &path, i->name ))
  726. break;
  727. }
  728. free( path );
  729. if (ie->mask & IN_DELETE)
  730. {
  731. i = inode_from_name( inode, ie->name );
  732. if (i)
  733. free_inode( i );
  734. }
  735. }
  736. static void inotify_poll_event( struct fd *fd, int event )
  737. {
  738. int r, ofs, unix_fd;
  739. char buffer[0x1000];
  740. struct inotify_event *ie;
  741. unix_fd = get_unix_fd( fd );
  742. r = read( unix_fd, buffer, sizeof buffer );
  743. if (r < 0)
  744. {
  745. fprintf(stderr,"inotify_poll_event(): inotify read failed!\n");
  746. return;
  747. }
  748. for( ofs = 0; ofs < r - offsetof(struct inotify_event, name); )
  749. {
  750. ie = (struct inotify_event*) &buffer[ofs];
  751. ofs += offsetof( struct inotify_event, name[ie->len] );
  752. if (ofs > r) break;
  753. if (ie->len) inotify_notify_all( ie );
  754. }
  755. }
  756. static inline struct fd *create_inotify_fd( void )
  757. {
  758. int unix_fd;
  759. unix_fd = inotify_init();
  760. if (unix_fd<0)
  761. return NULL;
  762. return create_anonymous_fd( &inotify_fd_ops, unix_fd, NULL, 0 );
  763. }
  764. static int map_flags( unsigned int filter )
  765. {
  766. unsigned int mask;
  767. /* always watch these so we can track subdirectories in recursive watches */
  768. mask = (IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_DELETE_SELF);
  769. if (filter & FILE_NOTIFY_CHANGE_ATTRIBUTES)
  770. mask |= IN_ATTRIB;
  771. if (filter & FILE_NOTIFY_CHANGE_SIZE)
  772. mask |= IN_MODIFY;
  773. if (filter & FILE_NOTIFY_CHANGE_LAST_WRITE)
  774. mask |= IN_MODIFY;
  775. if (filter & FILE_NOTIFY_CHANGE_LAST_ACCESS)
  776. mask |= IN_MODIFY;
  777. if (filter & FILE_NOTIFY_CHANGE_SECURITY)
  778. mask |= IN_ATTRIB;
  779. return mask;
  780. }
  781. static int inotify_add_dir( char *path, unsigned int filter )
  782. {
  783. int wd = inotify_add_watch( get_unix_fd( inotify_fd ),
  784. path, map_flags( filter ) );
  785. if (wd != -1)
  786. set_fd_events( inotify_fd, POLLIN );
  787. return wd;
  788. }
  789. static int init_inotify( void )
  790. {
  791. int i;
  792. if (inotify_fd)
  793. return 1;
  794. inotify_fd = create_inotify_fd();
  795. if (!inotify_fd)
  796. return 0;
  797. for (i=0; i<HASH_SIZE; i++)
  798. {
  799. list_init( &inode_hash[i] );
  800. list_init( &wd_hash[i] );
  801. }
  802. return 1;
  803. }
  804. static int inotify_adjust_changes( struct dir *dir )
  805. {
  806. unsigned int filter;
  807. struct inode *inode;
  808. struct stat st;
  809. char path[32];
  810. int wd, unix_fd;
  811. if (!inotify_fd)
  812. return 0;
  813. unix_fd = get_unix_fd( dir->fd );
  814. inode = dir->inode;
  815. if (!inode)
  816. {
  817. /* check if this fd is already being watched */
  818. if (-1 == fstat( unix_fd, &st ))
  819. return 0;
  820. inode = get_inode( st.st_dev, st.st_ino );
  821. if (!inode)
  822. inode = create_inode( st.st_dev, st.st_ino );
  823. if (!inode)
  824. return 0;
  825. list_add_tail( &inode->dirs, &dir->in_entry );
  826. dir->inode = inode;
  827. }
  828. filter = filter_from_inode( inode, 0 );
  829. sprintf( path, "/proc/self/fd/%u", unix_fd );
  830. wd = inotify_add_dir( path, filter );
  831. if (wd == -1) return 0;
  832. inode_set_wd( inode, wd );
  833. return 1;
  834. }
  835. static char *get_basename( const char *link )
  836. {
  837. char *buffer, *name = NULL;
  838. int r, n = 0x100;
  839. while (1)
  840. {
  841. buffer = malloc( n );
  842. if (!buffer) return NULL;
  843. r = readlink( link, buffer, n );
  844. if (r < 0)
  845. break;
  846. if (r < n)
  847. {
  848. name = buffer;
  849. break;
  850. }
  851. free( buffer );
  852. n *= 2;
  853. }
  854. if (name)
  855. {
  856. while (r > 0 && name[ r - 1 ] == '/' )
  857. r--;
  858. name[ r ] = 0;
  859. name = strrchr( name, '/' );
  860. if (name)
  861. name = strdup( &name[1] );
  862. }
  863. free( buffer );
  864. return name;
  865. }
  866. static int dir_add_to_existing_notify( struct dir *dir )
  867. {
  868. struct inode *inode, *parent;
  869. unsigned int filter = 0;
  870. struct stat st, st_new;
  871. char link[35], *name;
  872. int wd, unix_fd;
  873. if (!inotify_fd)
  874. return 0;
  875. unix_fd = get_unix_fd( dir->fd );
  876. /* check if it's in the list of inodes we want to watch */
  877. if (-1 == fstat( unix_fd, &st_new ))
  878. return 0;
  879. inode = find_inode( st_new.st_dev, st_new.st_ino );
  880. if (inode)
  881. return 0;
  882. /* lookup the parent */
  883. sprintf( link, "/proc/self/fd/%u/..", unix_fd );
  884. if (-1 == stat( link, &st ))
  885. return 0;
  886. /*
  887. * If there's no parent, stop. We could keep going adding
  888. * ../ to the path until we hit the root of the tree or
  889. * find a recursively watched ancestor.
  890. * Assume it's too expensive to search up the tree for now.
  891. */
  892. parent = find_inode( st.st_dev, st.st_ino );
  893. if (!parent)
  894. return 0;
  895. if (parent->wd == -1)
  896. return 0;
  897. filter = filter_from_inode( parent, 1 );
  898. if (!filter)
  899. return 0;
  900. sprintf( link, "/proc/self/fd/%u", unix_fd );
  901. name = get_basename( link );
  902. if (!name)
  903. return 0;
  904. inode = inode_add( parent, st_new.st_dev, st_new.st_ino, name );
  905. free( name );
  906. if (!inode)
  907. return 0;
  908. /* Couldn't find this inode at the start of the function, must be new */
  909. assert( inode->wd == -1 );
  910. wd = inotify_add_dir( link, filter );
  911. if (wd != -1)
  912. inode_set_wd( inode, wd );
  913. return 1;
  914. }
  915. #else
  916. static int init_inotify( void )
  917. {
  918. return 0;
  919. }
  920. static int inotify_adjust_changes( struct dir *dir )
  921. {
  922. return 0;
  923. }
  924. static void free_inode( struct inode *inode )
  925. {
  926. assert( 0 );
  927. }
  928. static int dir_add_to_existing_notify( struct dir *dir )
  929. {
  930. return 0;
  931. }
  932. #endif /* HAVE_SYS_INOTIFY_H */
  933. struct object *create_dir_obj( struct fd *fd, unsigned int access, mode_t mode )
  934. {
  935. struct dir *dir;
  936. dir = alloc_object( &dir_ops );
  937. if (!dir)
  938. return NULL;
  939. list_init( &dir->change_records );
  940. dir->filter = 0;
  941. dir->notified = 0;
  942. dir->want_data = 0;
  943. dir->inode = NULL;
  944. grab_object( fd );
  945. dir->fd = fd;
  946. dir->mode = mode;
  947. dir->uid = ~(uid_t)0;
  948. dir->client_process = NULL;
  949. set_fd_user( fd, &dir_fd_ops, &dir->obj );
  950. dir_add_to_existing_notify( dir );
  951. return &dir->obj;
  952. }
  953. /* retrieve (or allocate) the client-side directory cache entry */
  954. DECL_HANDLER(get_directory_cache_entry)
  955. {
  956. struct dir *dir;
  957. int *free_entries;
  958. data_size_t free_size;
  959. if (!(dir = get_dir_obj( current->process, req->handle, 0 ))) return;
  960. if (!dir->client_process)
  961. {
  962. if ((dir->client_entry = alloc_dir_cache_entry( dir, current->process )) == -1) goto done;
  963. dir->client_process = (struct process *)grab_object( current->process );
  964. }
  965. if (dir->client_process == current->process) reply->entry = dir->client_entry;
  966. else set_error( STATUS_SHARING_VIOLATION );
  967. done: /* allow freeing entries even on failure */
  968. free_size = get_reply_max_size();
  969. free_entries = get_free_dir_cache_entries( current->process, &free_size );
  970. if (free_entries) set_reply_data_ptr( free_entries, free_size );
  971. release_object( dir );
  972. }
  973. /* enable change notifications for a directory */
  974. DECL_HANDLER(read_directory_changes)
  975. {
  976. struct dir *dir;
  977. struct async *async;
  978. if (!req->filter)
  979. {
  980. set_error(STATUS_INVALID_PARAMETER);
  981. return;
  982. }
  983. dir = get_dir_obj( current->process, req->async.handle, 0 );
  984. if (!dir)
  985. return;
  986. /* requests don't timeout */
  987. if (!(async = create_async( dir->fd, current, &req->async, NULL ))) goto end;
  988. fd_queue_async( dir->fd, async, ASYNC_TYPE_WAIT );
  989. /* assign it once */
  990. if (!dir->filter)
  991. {
  992. init_inotify();
  993. insert_change( dir );
  994. dir->filter = req->filter;
  995. dir->subtree = req->subtree;
  996. dir->want_data = req->want_data;
  997. }
  998. /* if there's already a change in the queue, send it */
  999. if (!list_empty( &dir->change_records ))
  1000. fd_async_wake_up( dir->fd, ASYNC_TYPE_WAIT, STATUS_ALERTED );
  1001. /* setup the real notification */
  1002. if (!inotify_adjust_changes( dir ))
  1003. dnotify_adjust_changes( dir );
  1004. set_error(STATUS_PENDING);
  1005. release_object( async );
  1006. end:
  1007. release_object( dir );
  1008. }
  1009. DECL_HANDLER(read_change)
  1010. {
  1011. struct change_record *record, *next;
  1012. struct dir *dir;
  1013. struct list events;
  1014. char *data, *event;
  1015. int size = 0;
  1016. dir = get_dir_obj( current->process, req->handle, 0 );
  1017. if (!dir)
  1018. return;
  1019. list_init( &events );
  1020. list_move_tail( &events, &dir->change_records );
  1021. release_object( dir );
  1022. if (list_empty( &events ))
  1023. {
  1024. set_error( STATUS_NO_DATA_DETECTED );
  1025. return;
  1026. }
  1027. LIST_FOR_EACH_ENTRY( record, &events, struct change_record, entry )
  1028. {
  1029. size += (offsetof(struct filesystem_event, name[record->event.len])
  1030. + sizeof(int)-1) / sizeof(int) * sizeof(int);
  1031. }
  1032. if (size > get_reply_max_size())
  1033. set_error( STATUS_BUFFER_TOO_SMALL );
  1034. else if ((data = mem_alloc( size )) != NULL)
  1035. {
  1036. event = data;
  1037. LIST_FOR_EACH_ENTRY( record, &events, struct change_record, entry )
  1038. {
  1039. data_size_t len = offsetof( struct filesystem_event, name[record->event.len] );
  1040. /* FIXME: rename events are sometimes reported as delete/create */
  1041. if (record->event.action == FILE_ACTION_RENAMED_OLD_NAME)
  1042. {
  1043. struct list *elem = list_next( &events, &record->entry );
  1044. if (elem)
  1045. next = LIST_ENTRY(elem, struct change_record, entry);
  1046. if (elem && next->cookie == record->cookie)
  1047. next->cookie = 0;
  1048. else
  1049. record->event.action = FILE_ACTION_REMOVED;
  1050. }
  1051. else if (record->event.action == FILE_ACTION_RENAMED_NEW_NAME && record->cookie)
  1052. record->event.action = FILE_ACTION_ADDED;
  1053. memcpy( event, &record->event, len );
  1054. event += len;
  1055. if (len % sizeof(int))
  1056. {
  1057. memset( event, 0, sizeof(int) - len % sizeof(int) );
  1058. event += sizeof(int) - len % sizeof(int);
  1059. }
  1060. }
  1061. set_reply_data_ptr( data, size );
  1062. }
  1063. LIST_FOR_EACH_ENTRY_SAFE( record, next, &events, struct change_record, entry )
  1064. {
  1065. list_remove( &record->entry );
  1066. free( record );
  1067. }
  1068. }