gsync.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. /* Copyright (C) 2016 Free Software Foundation, Inc.
  2. Contributed by Agustina Arzille <avarzille@riseup.net>, 2016.
  3. This program is free software; you can redistribute it and/or
  4. modify it under the terms of the GNU General Public License
  5. as published by the Free Software Foundation; either
  6. version 2 of the license, or (at your option) any later version.
  7. This program is distributed in the hope that it will be useful,
  8. but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. GNU General Public License for more details.
  11. You should have received a copy of the GNU General Public
  12. License along with this program; if not, see
  13. <http://www.gnu.org/licenses/>.
  14. */
  15. #include <kern/gsync.h>
  16. #include <kern/kmutex.h>
  17. #include <kern/sched_prim.h>
  18. #include <kern/thread.h>
  19. #include <kern/list.h>
  20. #include <vm/vm_map.h>
  21. #include <vm/vm_kern.h>
  22. /* An entry in the global hash table. */
  23. struct gsync_hbucket
  24. {
  25. struct list entries;
  26. struct kmutex lock;
  27. };
  28. /* A key used to uniquely identify an address that a thread is
  29. * waiting on. Its members' values depend on whether said
  30. * address is shared or task-local. Note that different types of keys
  31. * should never compare equal, since a task map should never have
  32. * the same address as a VM object. */
  33. union gsync_key
  34. {
  35. struct
  36. {
  37. vm_map_t map;
  38. vm_offset_t addr;
  39. } local;
  40. struct
  41. {
  42. vm_object_t obj;
  43. vm_offset_t off;
  44. } shared;
  45. struct
  46. {
  47. unsigned long u;
  48. unsigned long v;
  49. } any;
  50. };
  51. /* A thread that is blocked on an address with 'gsync_wait'. */
  52. struct gsync_waiter
  53. {
  54. struct list link;
  55. union gsync_key key;
  56. thread_t waiter;
  57. };
  58. /* Needed data for temporary mappings. */
  59. struct vm_args
  60. {
  61. vm_object_t obj;
  62. vm_offset_t off;
  63. };
  64. #define GSYNC_NBUCKETS 512
  65. static struct gsync_hbucket gsync_buckets[GSYNC_NBUCKETS];
  66. void gsync_setup (void)
  67. {
  68. int i;
  69. for (i = 0; i < GSYNC_NBUCKETS; ++i)
  70. {
  71. list_init (&gsync_buckets[i].entries);
  72. kmutex_init (&gsync_buckets[i].lock);
  73. }
  74. }
  75. /* Convenience comparison functions for gsync_key's. */
  76. static inline int
  77. gsync_key_eq (const union gsync_key *lp,
  78. const union gsync_key *rp)
  79. {
  80. return (lp->any.u == rp->any.u && lp->any.v == rp->any.v);
  81. }
  82. static inline int
  83. gsync_key_lt (const union gsync_key *lp,
  84. const union gsync_key *rp)
  85. {
  86. return (lp->any.u < rp->any.u ||
  87. (lp->any.u == rp->any.u && lp->any.v < rp->any.v));
  88. }
  89. #define MIX2_LL(x, y) ((((x) << 5) | ((x) >> 27)) ^ (y))
  90. static inline unsigned int
  91. gsync_key_hash (const union gsync_key *keyp)
  92. {
  93. unsigned int ret = sizeof (void *);
  94. #ifndef __LP64__
  95. ret = MIX2_LL (ret, keyp->any.u);
  96. ret = MIX2_LL (ret, keyp->any.v);
  97. #else
  98. ret = MIX2_LL (ret, keyp->any.u & ~0U);
  99. ret = MIX2_LL (ret, keyp->any.u >> 32);
  100. ret = MIX2_LL (ret, keyp->any.v & ~0U);
  101. ret = MIX2_LL (ret, keyp->any.v >> 32);
  102. #endif
  103. return (ret);
  104. }
  105. /* Perform a VM lookup for the address in the map. The FLAGS
  106. * parameter is used to specify some attributes for the address,
  107. * such as protection. Place the corresponding VM object/offset pair
  108. * in VAP. Returns 0 if successful, -1 otherwise. */
  109. static int
  110. probe_address (vm_map_t map, vm_offset_t addr,
  111. int flags, struct vm_args *vap)
  112. {
  113. vm_prot_t prot = VM_PROT_READ |
  114. ((flags & GSYNC_MUTATE) ? VM_PROT_WRITE : 0);
  115. vm_map_version_t ver;
  116. vm_prot_t rprot;
  117. boolean_t wired_p;
  118. if (vm_map_lookup (&map, addr, prot, &ver,
  119. &vap->obj, &vap->off, &rprot, &wired_p) != KERN_SUCCESS)
  120. return (-1);
  121. else if ((rprot & prot) != prot)
  122. {
  123. vm_object_unlock (vap->obj);
  124. return (-1);
  125. }
  126. return (0);
  127. }
  128. /* Initialize the key with its needed members, depending on whether the
  129. * address is local or shared. Also stores the VM object and offset inside
  130. * the argument VAP for future use. */
  131. static int
  132. gsync_prepare_key (task_t task, vm_offset_t addr, int flags,
  133. union gsync_key *keyp, struct vm_args *vap)
  134. {
  135. if (probe_address (task->map, addr, flags, vap) < 0)
  136. return (-1);
  137. else if (flags & GSYNC_SHARED)
  138. {
  139. /* For a shared address, we need the VM object
  140. * and offset as the keys. */
  141. keyp->shared.obj = vap->obj;
  142. keyp->shared.off = vap->off;
  143. }
  144. else
  145. {
  146. /* Task-local address. The keys are the task's map and
  147. * the virtual address itself. */
  148. keyp->local.map = task->map;
  149. keyp->local.addr = addr;
  150. }
  151. return ((int)(gsync_key_hash (keyp) % GSYNC_NBUCKETS));
  152. }
  153. static inline struct gsync_waiter*
  154. node_to_waiter (struct list *nodep)
  155. {
  156. return (list_entry (nodep, struct gsync_waiter, link));
  157. }
  158. static inline struct list*
  159. gsync_find_key (const struct list *entries,
  160. const union gsync_key *keyp, int *exactp)
  161. {
  162. /* Look for a key that matches. We take advantage of the fact
  163. * that the entries are sorted to break out of the loop as
  164. * early as possible. */
  165. struct list *runp;
  166. list_for_each (entries, runp)
  167. {
  168. struct gsync_waiter *p = node_to_waiter (runp);
  169. if (gsync_key_lt (keyp, &p->key))
  170. break;
  171. else if (gsync_key_eq (keyp, &p->key))
  172. {
  173. if (exactp != 0)
  174. *exactp = 1;
  175. break;
  176. }
  177. }
  178. return (runp);
  179. }
  180. /* Create a temporary mapping in the kernel.*/
  181. static inline vm_offset_t
  182. temp_mapping (struct vm_args *vap, vm_offset_t addr, vm_prot_t prot)
  183. {
  184. vm_offset_t paddr;
  185. /* Adjust the offset for addresses that aren't page-aligned. */
  186. vm_offset_t off = vap->off - (addr - trunc_page (addr));
  187. if (vm_map_enter (kernel_map, &paddr, PAGE_SIZE,
  188. 0, TRUE, vap->obj, off, FALSE, prot, VM_PROT_ALL,
  189. VM_INHERIT_DEFAULT) != KERN_SUCCESS)
  190. paddr = 0;
  191. return (paddr);
  192. }
  193. kern_return_t gsync_wait (task_t task, vm_offset_t addr,
  194. unsigned int lo, unsigned int hi, natural_t msec, int flags)
  195. {
  196. if (task == 0)
  197. return (KERN_INVALID_TASK);
  198. else if (addr % sizeof (int) != 0)
  199. return (KERN_INVALID_ADDRESS);
  200. vm_map_lock_read (task->map);
  201. struct gsync_waiter w;
  202. struct vm_args va;
  203. boolean_t remote = task != current_task ();
  204. int bucket = gsync_prepare_key (task, addr, flags, &w.key, &va);
  205. if (bucket < 0)
  206. {
  207. vm_map_unlock_read (task->map);
  208. return (KERN_INVALID_ADDRESS);
  209. }
  210. else if (remote)
  211. /* The VM object is returned locked. However, we are about to acquire
  212. * a sleeping lock for a bucket, so we must not hold any simple
  213. * locks. To prevent this object from going away, we add a reference
  214. * to it when requested. */
  215. vm_object_reference_locked (va.obj);
  216. /* We no longer need the lock on the VM object. */
  217. vm_object_unlock (va.obj);
  218. struct gsync_hbucket *hbp = gsync_buckets + bucket;
  219. kmutex_lock (&hbp->lock, FALSE);
  220. /* Before doing any work, check that the expected value(s)
  221. * match the contents of the address. Otherwise, the waiting
  222. * thread could potentially miss a wakeup. */
  223. boolean_t equal;
  224. if (! remote)
  225. equal = ((unsigned int *)addr)[0] == lo &&
  226. ((flags & GSYNC_QUAD) == 0 ||
  227. ((unsigned int *)addr)[1] == hi);
  228. else
  229. {
  230. vm_offset_t paddr = temp_mapping (&va, addr, VM_PROT_READ);
  231. if (unlikely (paddr == 0))
  232. {
  233. kmutex_unlock (&hbp->lock);
  234. vm_map_unlock_read (task->map);
  235. /* Make sure to remove the reference we added. */
  236. vm_object_deallocate (va.obj);
  237. return (KERN_MEMORY_FAILURE);
  238. }
  239. vm_offset_t off = addr & (PAGE_SIZE - 1);
  240. paddr += off;
  241. equal = ((unsigned int *)paddr)[0] == lo &&
  242. ((flags & GSYNC_QUAD) == 0 ||
  243. ((unsigned int *)paddr)[1] == hi);
  244. paddr -= off;
  245. /* Note that the call to 'vm_map_remove' will unreference
  246. * the VM object, so we don't have to do it ourselves. */
  247. vm_map_remove (kernel_map, paddr, paddr + PAGE_SIZE);
  248. }
  249. /* Done with the task's map. */
  250. vm_map_unlock_read (task->map);
  251. if (! equal)
  252. {
  253. kmutex_unlock (&hbp->lock);
  254. return (KERN_INVALID_ARGUMENT);
  255. }
  256. /* Look for the first entry in the hash bucket that
  257. * compares strictly greater than this waiter. */
  258. struct list *runp;
  259. list_for_each (&hbp->entries, runp)
  260. if (gsync_key_lt (&w.key, &node_to_waiter(runp)->key))
  261. break;
  262. /* Finally, add ourselves to the list and go to sleep. */
  263. list_add (runp->prev, runp, &w.link);
  264. w.waiter = current_thread ();
  265. if (flags & GSYNC_TIMED)
  266. thread_will_wait_with_timeout (w.waiter, msec);
  267. else
  268. thread_will_wait (w.waiter);
  269. kmutex_unlock (&hbp->lock);
  270. thread_block (thread_no_continuation);
  271. /* We're back. */
  272. kern_return_t ret = KERN_SUCCESS;
  273. if (current_thread()->wait_result != THREAD_AWAKENED)
  274. {
  275. /* We were interrupted or timed out. */
  276. kmutex_lock (&hbp->lock, FALSE);
  277. if (!list_node_unlinked (&w.link))
  278. list_remove (&w.link);
  279. kmutex_unlock (&hbp->lock);
  280. /* Map the error code. */
  281. ret = current_thread()->wait_result == THREAD_INTERRUPTED ?
  282. KERN_INTERRUPTED : KERN_TIMEDOUT;
  283. }
  284. return (ret);
  285. }
  286. /* Remove a waiter from the queue, wake it up, and
  287. * return the next node. */
  288. static inline struct list*
  289. dequeue_waiter (struct list *nodep)
  290. {
  291. struct list *nextp = list_next (nodep);
  292. list_remove (nodep);
  293. list_node_init (nodep);
  294. clear_wait (node_to_waiter(nodep)->waiter,
  295. THREAD_AWAKENED, FALSE);
  296. return (nextp);
  297. }
  298. kern_return_t gsync_wake (task_t task,
  299. vm_offset_t addr, unsigned int val, int flags)
  300. {
  301. if (task == 0)
  302. return (KERN_INVALID_TASK);
  303. else if (addr % sizeof (int) != 0)
  304. return (KERN_INVALID_ADDRESS);
  305. vm_map_lock_read (task->map);
  306. union gsync_key key;
  307. struct vm_args va;
  308. int bucket = gsync_prepare_key (task, addr, flags, &key, &va);
  309. if (bucket < 0)
  310. {
  311. vm_map_unlock_read (task->map);
  312. return (KERN_INVALID_ADDRESS);
  313. }
  314. else if (current_task () != task && (flags & GSYNC_MUTATE) != 0)
  315. /* See above on why we do this. */
  316. vm_object_reference_locked (va.obj);
  317. /* Done with the VM object lock. */
  318. vm_object_unlock (va.obj);
  319. kern_return_t ret = KERN_INVALID_ARGUMENT;
  320. struct gsync_hbucket *hbp = gsync_buckets + bucket;
  321. kmutex_lock (&hbp->lock, FALSE);
  322. if (flags & GSYNC_MUTATE)
  323. {
  324. /* Set the contents of the address to the specified value,
  325. * even if we don't end up waking any threads. Note that
  326. * the buckets' simple locks give us atomicity. */
  327. if (task != current_task ())
  328. {
  329. vm_offset_t paddr = temp_mapping (&va, addr,
  330. VM_PROT_READ | VM_PROT_WRITE);
  331. if (paddr == 0)
  332. {
  333. kmutex_unlock (&hbp->lock);
  334. vm_map_unlock_read (task->map);
  335. vm_object_deallocate (va.obj);
  336. return (KERN_MEMORY_FAILURE);
  337. }
  338. addr = paddr + (addr & (PAGE_SIZE - 1));
  339. }
  340. *(unsigned int *)addr = val;
  341. if (task != current_task ())
  342. vm_map_remove (kernel_map, addr, addr + sizeof (int));
  343. }
  344. vm_map_unlock_read (task->map);
  345. int found = 0;
  346. struct list *runp = gsync_find_key (&hbp->entries, &key, &found);
  347. if (found)
  348. {
  349. do
  350. runp = dequeue_waiter (runp);
  351. while ((flags & GSYNC_BROADCAST) &&
  352. !list_end (&hbp->entries, runp) &&
  353. gsync_key_eq (&node_to_waiter(runp)->key, &key));
  354. ret = KERN_SUCCESS;
  355. }
  356. kmutex_unlock (&hbp->lock);
  357. return (ret);
  358. }
  359. kern_return_t gsync_requeue (task_t task, vm_offset_t src,
  360. vm_offset_t dst, boolean_t wake_one, int flags)
  361. {
  362. if (task == 0)
  363. return (KERN_INVALID_TASK);
  364. else if (src % sizeof (int) != 0 || dst % sizeof (int) != 0)
  365. return (KERN_INVALID_ADDRESS);
  366. union gsync_key src_k, dst_k;
  367. struct vm_args va;
  368. int src_bkt = gsync_prepare_key (task, src, flags, &src_k, &va);
  369. if (src_bkt < 0)
  370. return (KERN_INVALID_ADDRESS);
  371. /* Unlock the VM object before the second lookup. */
  372. vm_object_unlock (va.obj);
  373. int dst_bkt = gsync_prepare_key (task, dst, flags, &dst_k, &va);
  374. if (dst_bkt < 0)
  375. return (KERN_INVALID_ADDRESS);
  376. /* We never create any temporary mappings in 'requeue', so we
  377. * can unlock the VM object right now. */
  378. vm_object_unlock (va.obj);
  379. /* If we're asked to unconditionally wake up a waiter, then
  380. * we need to remove a maximum of two threads from the queue. */
  381. unsigned int nw = 1 + wake_one;
  382. struct gsync_hbucket *bp1 = gsync_buckets + src_bkt;
  383. struct gsync_hbucket *bp2 = gsync_buckets + dst_bkt;
  384. /* Acquire the locks in order, to prevent any potential deadlock. */
  385. if (bp1 == bp2)
  386. kmutex_lock (&bp1->lock, FALSE);
  387. else if ((unsigned long)bp1 < (unsigned long)bp2)
  388. {
  389. kmutex_lock (&bp1->lock, FALSE);
  390. kmutex_lock (&bp2->lock, FALSE);
  391. }
  392. else
  393. {
  394. kmutex_lock (&bp2->lock, FALSE);
  395. kmutex_lock (&bp1->lock, FALSE);
  396. }
  397. kern_return_t ret = KERN_SUCCESS;
  398. int exact;
  399. struct list *inp = gsync_find_key (&bp1->entries, &src_k, &exact);
  400. if (! exact)
  401. /* There are no waiters in the source queue. */
  402. ret = KERN_INVALID_ARGUMENT;
  403. else
  404. {
  405. struct list *outp = gsync_find_key (&bp2->entries, &dst_k, 0);
  406. /* We're going to need a node that points one past the
  407. * end of the waiters in the source queue. */
  408. struct list *endp = inp;
  409. do
  410. {
  411. /* Modify the keys while iterating. */
  412. node_to_waiter(endp)->key = dst_k;
  413. endp = list_next (endp);
  414. }
  415. while (((flags & GSYNC_BROADCAST) || --nw != 0) &&
  416. !list_end (&bp1->entries, endp) &&
  417. gsync_key_eq (&node_to_waiter(endp)->key, &src_k));
  418. /* Splice the list by removing waiters from the source queue
  419. * and inserting them into the destination queue. */
  420. inp->prev->next = endp;
  421. endp->prev->next = outp->next;
  422. endp->prev = inp->prev;
  423. outp->next = inp;
  424. inp->prev = outp;
  425. if (wake_one)
  426. (void)dequeue_waiter (inp);
  427. }
  428. /* Release the locks and we're done.*/
  429. kmutex_unlock (&bp1->lock);
  430. if (bp1 != bp2)
  431. kmutex_unlock (&bp2->lock);
  432. return (ret);
  433. }