ds_routines.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University
  4. * All Rights Reserved.
  5. *
  6. * Permission to use, copy, modify and distribute this software and its
  7. * documentation is hereby granted, provided that both the copyright
  8. * notice and this permission notice appear in all copies of the
  9. * software, derivative works or modified versions, and any portions
  10. * thereof, and that both notices appear in supporting documentation.
  11. *
  12. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  13. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  14. * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  15. *
  16. * Carnegie Mellon requests users of this software to return to
  17. *
  18. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  19. * School of Computer Science
  20. * Carnegie Mellon University
  21. * Pittsburgh PA 15213-3890
  22. *
  23. * any improvements or extensions that they make and grant Carnegie Mellon
  24. * the rights to redistribute these changes.
  25. */
  26. /*
  27. * Author: David B. Golub, Carnegie Mellon University
  28. * Date: 3/89
  29. */
  30. /*
  31. * Mach device server routines (i386at version).
  32. *
  33. * Copyright (c) 1996 The University of Utah and
  34. * the Computer Systems Laboratory at the University of Utah (CSL).
  35. * All rights reserved.
  36. *
  37. * Permission to use, copy, modify and distribute this software is hereby
  38. * granted provided that (1) source code retains these copyright, permission,
  39. * and disclaimer notices, and (2) redistributions including binaries
  40. * reproduce the notices in supporting documentation, and (3) all advertising
  41. * materials mentioning features or use of this software display the following
  42. * acknowledgement: ``This product includes software developed by the
  43. * Computer Systems Laboratory at the University of Utah.''
  44. *
  45. * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
  46. * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
  47. * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  48. *
  49. * CSL requests users of this software to return to csl-dist@cs.utah.edu any
  50. * improvements that they make and grant CSL redistribution rights.
  51. *
  52. * Author: Shantanu Goel, University of Utah CSL
  53. */
  54. #include <kern/printf.h>
  55. #include <string.h>
  56. #include <mach/boolean.h>
  57. #include <mach/kern_return.h>
  58. #include <mach/mig_errors.h>
  59. #include <mach/port.h>
  60. #include <mach/vm_param.h>
  61. #include <mach/notify.h>
  62. #include <machine/locore.h>
  63. #include <machine/machspl.h> /* spl definitions */
  64. #include <ipc/ipc_port.h>
  65. #include <ipc/ipc_space.h>
  66. #include <kern/ast.h>
  67. #include <kern/counters.h>
  68. #include <kern/debug.h>
  69. #include <kern/printf.h>
  70. #include <kern/queue.h>
  71. #include <kern/slab.h>
  72. #include <kern/thread.h>
  73. #include <kern/task.h>
  74. #include <kern/sched_prim.h>
  75. #include <vm/memory_object.h>
  76. #include <vm/vm_map.h>
  77. #include <vm/vm_kern.h>
  78. #include <vm/vm_user.h>
  79. #include <device/device_types.h>
  80. #include <device/dev_hdr.h>
  81. #include <device/conf.h>
  82. #include <device/io_req.h>
  83. #include <device/ds_routines.h>
  84. #include <device/net_status.h>
  85. #include <device/device_port.h>
  86. #include <device/device_reply.user.h>
  87. #include <device/device_emul.h>
  88. #include <machine/machspl.h>
  89. #ifdef LINUX_DEV
  90. extern struct device_emulation_ops linux_block_emulation_ops;
  91. #ifdef CONFIG_INET
  92. extern struct device_emulation_ops linux_net_emulation_ops;
  93. extern void free_skbuffs (void);
  94. #ifdef CONFIG_PCMCIA
  95. extern struct device_emulation_ops linux_pcmcia_emulation_ops;
  96. #endif /* CONFIG_PCMCIA */
  97. #endif /* CONFIG_INET */
  98. #endif /* LINUX_DEV */
  99. #ifdef MACH_HYP
  100. extern struct device_emulation_ops hyp_block_emulation_ops;
  101. extern struct device_emulation_ops hyp_net_emulation_ops;
  102. #endif /* MACH_HYP */
  103. extern struct device_emulation_ops mach_device_emulation_ops;
  104. /* List of emulations. */
  105. static struct device_emulation_ops *emulation_list[] =
  106. {
  107. #ifdef LINUX_DEV
  108. &linux_block_emulation_ops,
  109. #ifdef CONFIG_INET
  110. &linux_net_emulation_ops,
  111. #ifdef CONFIG_PCMCIA
  112. &linux_pcmcia_emulation_ops,
  113. #endif /* CONFIG_PCMCIA */
  114. #endif /* CONFIG_INET */
  115. #endif /* LINUX_DEV */
  116. #ifdef MACH_HYP
  117. &hyp_block_emulation_ops,
  118. &hyp_net_emulation_ops,
  119. #endif /* MACH_HYP */
  120. &mach_device_emulation_ops,
  121. };
  122. static struct vm_map device_io_map_store;
  123. vm_map_t device_io_map = &device_io_map_store;
  124. #define NUM_EMULATION (sizeof (emulation_list) / sizeof (emulation_list[0]))
  125. io_return_t
  126. ds_device_open (ipc_port_t open_port, ipc_port_t reply_port,
  127. mach_msg_type_name_t reply_port_type, dev_mode_t mode,
  128. char *name, device_t *devp)
  129. {
  130. unsigned i;
  131. io_return_t err;
  132. /* Open must be called on the master device port. */
  133. if (open_port != master_device_port)
  134. return D_INVALID_OPERATION;
  135. /* There must be a reply port. */
  136. if (! IP_VALID (reply_port))
  137. {
  138. printf ("ds_* invalid reply port\n");
  139. SoftDebugger ("ds_* reply_port");
  140. return MIG_NO_REPLY;
  141. }
  142. /* Call each emulation's open routine to find the device. */
  143. for (i = 0; i < NUM_EMULATION; i++)
  144. {
  145. err = (*emulation_list[i]->open) (reply_port, reply_port_type,
  146. mode, name, devp);
  147. if (err != D_NO_SUCH_DEVICE)
  148. break;
  149. }
  150. return err;
  151. }
  152. io_return_t
  153. ds_device_close (device_t dev)
  154. {
  155. /* Refuse if device is dead or not completely open. */
  156. if (dev == DEVICE_NULL)
  157. return D_NO_SUCH_DEVICE;
  158. return (dev->emul_ops->close
  159. ? (*dev->emul_ops->close) (dev->emul_data)
  160. : D_SUCCESS);
  161. }
  162. io_return_t
  163. ds_device_write (device_t dev, ipc_port_t reply_port,
  164. mach_msg_type_name_t reply_port_type, dev_mode_t mode,
  165. recnum_t recnum, io_buf_ptr_t data, unsigned int count,
  166. int *bytes_written)
  167. {
  168. /* Refuse if device is dead or not completely open. */
  169. if (dev == DEVICE_NULL)
  170. return D_NO_SUCH_DEVICE;
  171. if (data == 0)
  172. return D_INVALID_SIZE;
  173. if (! dev->emul_ops->write)
  174. return D_INVALID_OPERATION;
  175. return (*dev->emul_ops->write) (dev->emul_data, reply_port,
  176. reply_port_type, mode, recnum,
  177. data, count, bytes_written);
  178. }
  179. io_return_t
  180. ds_device_write_inband (device_t dev, ipc_port_t reply_port,
  181. mach_msg_type_name_t reply_port_type,
  182. dev_mode_t mode, recnum_t recnum,
  183. io_buf_ptr_inband_t data, unsigned count,
  184. int *bytes_written)
  185. {
  186. /* Refuse if device is dead or not completely open. */
  187. if (dev == DEVICE_NULL)
  188. return D_NO_SUCH_DEVICE;
  189. if (data == 0)
  190. return D_INVALID_SIZE;
  191. if (! dev->emul_ops->write_inband)
  192. return D_INVALID_OPERATION;
  193. return (*dev->emul_ops->write_inband) (dev->emul_data, reply_port,
  194. reply_port_type, mode, recnum,
  195. data, count, bytes_written);
  196. }
  197. io_return_t
  198. ds_device_read (device_t dev, ipc_port_t reply_port,
  199. mach_msg_type_name_t reply_port_type, dev_mode_t mode,
  200. recnum_t recnum, int count, io_buf_ptr_t *data,
  201. unsigned *bytes_read)
  202. {
  203. /* Refuse if device is dead or not completely open. */
  204. if (dev == DEVICE_NULL)
  205. return D_NO_SUCH_DEVICE;
  206. if (! dev->emul_ops->read)
  207. return D_INVALID_OPERATION;
  208. return (*dev->emul_ops->read) (dev->emul_data, reply_port,
  209. reply_port_type, mode, recnum,
  210. count, data, bytes_read);
  211. }
  212. io_return_t
  213. ds_device_read_inband (device_t dev, ipc_port_t reply_port,
  214. mach_msg_type_name_t reply_port_type, dev_mode_t mode,
  215. recnum_t recnum, int count, char *data,
  216. unsigned *bytes_read)
  217. {
  218. /* Refuse if device is dead or not completely open. */
  219. if (dev == DEVICE_NULL)
  220. return D_NO_SUCH_DEVICE;
  221. if (! dev->emul_ops->read_inband)
  222. return D_INVALID_OPERATION;
  223. return (*dev->emul_ops->read_inband) (dev->emul_data, reply_port,
  224. reply_port_type, mode, recnum,
  225. count, data, bytes_read);
  226. }
  227. io_return_t
  228. ds_device_set_status (device_t dev, dev_flavor_t flavor,
  229. dev_status_t status, mach_msg_type_number_t status_count)
  230. {
  231. /* Refuse if device is dead or not completely open. */
  232. if (dev == DEVICE_NULL)
  233. return D_NO_SUCH_DEVICE;
  234. if (! dev->emul_ops->set_status)
  235. return D_INVALID_OPERATION;
  236. return (*dev->emul_ops->set_status) (dev->emul_data, flavor, status,
  237. status_count);
  238. }
  239. io_return_t
  240. ds_device_get_status (device_t dev, dev_flavor_t flavor, dev_status_t status,
  241. mach_msg_type_number_t *status_count)
  242. {
  243. /* Refuse if device is dead or not completely open. */
  244. if (dev == DEVICE_NULL)
  245. return D_NO_SUCH_DEVICE;
  246. if (! dev->emul_ops->get_status)
  247. return D_INVALID_OPERATION;
  248. return (*dev->emul_ops->get_status) (dev->emul_data, flavor, status,
  249. status_count);
  250. }
  251. io_return_t
  252. ds_device_set_filter (device_t dev, ipc_port_t receive_port, int priority,
  253. filter_t *filter, unsigned filter_count)
  254. {
  255. /* Refuse if device is dead or not completely open. */
  256. if (dev == DEVICE_NULL)
  257. return D_NO_SUCH_DEVICE;
  258. if (! dev->emul_ops->set_filter)
  259. return D_INVALID_OPERATION;
  260. return (*dev->emul_ops->set_filter) (dev->emul_data, receive_port,
  261. priority, filter, filter_count);
  262. }
  263. io_return_t
  264. ds_device_map (device_t dev, vm_prot_t prot, vm_offset_t offset,
  265. vm_size_t size, ipc_port_t *pager, boolean_t unmap)
  266. {
  267. /* Refuse if device is dead or not completely open. */
  268. if (dev == DEVICE_NULL)
  269. return D_NO_SUCH_DEVICE;
  270. if (! dev->emul_ops->map)
  271. return D_INVALID_OPERATION;
  272. return (*dev->emul_ops->map) (dev->emul_data, prot,
  273. offset, size, pager, unmap);
  274. }
  275. boolean_t
  276. ds_notify (mach_msg_header_t *msg)
  277. {
  278. if (msg->msgh_id == MACH_NOTIFY_NO_SENDERS)
  279. {
  280. device_t dev;
  281. mach_no_senders_notification_t *ns;
  282. ns = (mach_no_senders_notification_t *) msg;
  283. dev = dev_port_lookup((ipc_port_t) ns->not_header.msgh_remote_port);
  284. assert(dev);
  285. if (dev->emul_ops->no_senders)
  286. (*dev->emul_ops->no_senders) (ns);
  287. return TRUE;
  288. }
  289. printf ("ds_notify: strange notification %d\n", msg->msgh_id);
  290. return FALSE;
  291. }
  292. io_return_t
  293. ds_device_write_trap (device_t dev, dev_mode_t mode,
  294. recnum_t recnum, vm_offset_t data, vm_size_t count)
  295. {
  296. /* Refuse if device is dead or not completely open. */
  297. if (dev == DEVICE_NULL)
  298. return D_NO_SUCH_DEVICE;
  299. if (! dev->emul_ops->write_trap)
  300. return D_INVALID_OPERATION;
  301. return (*dev->emul_ops->write_trap) (dev->emul_data,
  302. mode, recnum, data, count);
  303. }
  304. io_return_t
  305. ds_device_writev_trap (device_t dev, dev_mode_t mode,
  306. recnum_t recnum, io_buf_vec_t *iovec, vm_size_t count)
  307. {
  308. /* Refuse if device is dead or not completely open. */
  309. if (dev == DEVICE_NULL)
  310. return D_NO_SUCH_DEVICE;
  311. if (! dev->emul_ops->writev_trap)
  312. return D_INVALID_OPERATION;
  313. return (*dev->emul_ops->writev_trap) (dev->emul_data,
  314. mode, recnum, iovec, count);
  315. }
  316. void
  317. device_reference (device_t dev)
  318. {
  319. /* Refuse if device is dead or not completely open. */
  320. if (dev == DEVICE_NULL)
  321. return;
  322. if (dev->emul_ops->reference)
  323. (*dev->emul_ops->reference) (dev->emul_data);
  324. }
  325. void
  326. device_deallocate (device_t dev)
  327. {
  328. /* Refuse if device is dead or not completely open. */
  329. if (dev == DEVICE_NULL)
  330. return;
  331. if (dev->emul_ops->dealloc)
  332. (*dev->emul_ops->dealloc) (dev->emul_data);
  333. }
  334. /*
  335. * What follows is the interface for the native Mach devices.
  336. */
  337. ipc_port_t
  338. mach_convert_device_to_port (mach_device_t device)
  339. {
  340. ipc_port_t port;
  341. if (! device)
  342. return IP_NULL;
  343. device_lock(device);
  344. if (device->state == DEV_STATE_OPEN)
  345. port = ipc_port_make_send(device->port);
  346. else
  347. port = IP_NULL;
  348. device_unlock(device);
  349. mach_device_deallocate(device);
  350. return port;
  351. }
  352. static io_return_t
  353. device_open(const ipc_port_t reply_port,
  354. mach_msg_type_name_t reply_port_type,
  355. dev_mode_t mode,
  356. char * name,
  357. device_t *device_p)
  358. {
  359. mach_device_t device;
  360. kern_return_t result;
  361. io_req_t ior;
  362. ipc_port_t notify;
  363. /*
  364. * Find the device.
  365. */
  366. device = device_lookup(name);
  367. if (device == MACH_DEVICE_NULL)
  368. return (D_NO_SUCH_DEVICE);
  369. /*
  370. * If the device is being opened or closed,
  371. * wait for that operation to finish.
  372. */
  373. device_lock(device);
  374. while (device->state == DEV_STATE_OPENING ||
  375. device->state == DEV_STATE_CLOSING) {
  376. device->io_wait = TRUE;
  377. thread_sleep((event_t)device, simple_lock_addr(device->lock), TRUE);
  378. device_lock(device);
  379. }
  380. /*
  381. * If the device is already open, increment the open count
  382. * and return.
  383. */
  384. if (device->state == DEV_STATE_OPEN) {
  385. if (device->flag & D_EXCL_OPEN) {
  386. /*
  387. * Cannot open a second time.
  388. */
  389. device_unlock(device);
  390. mach_device_deallocate(device);
  391. return (D_ALREADY_OPEN);
  392. }
  393. device->open_count++;
  394. device_unlock(device);
  395. *device_p = &device->dev;
  396. return (D_SUCCESS);
  397. /*
  398. * Return deallocates device reference while acquiring
  399. * port.
  400. */
  401. }
  402. /*
  403. * Allocate the device port and register the device before
  404. * opening it.
  405. */
  406. device->state = DEV_STATE_OPENING;
  407. device_unlock(device);
  408. /*
  409. * Allocate port, keeping a reference for it.
  410. */
  411. device->port = ipc_port_alloc_kernel();
  412. if (device->port == IP_NULL) {
  413. device_lock(device);
  414. device->state = DEV_STATE_INIT;
  415. device->port = IP_NULL;
  416. if (device->io_wait) {
  417. device->io_wait = FALSE;
  418. thread_wakeup((event_t)device);
  419. }
  420. device_unlock(device);
  421. mach_device_deallocate(device);
  422. return (KERN_RESOURCE_SHORTAGE);
  423. }
  424. dev_port_enter(device);
  425. /*
  426. * Request no-senders notifications on device port.
  427. */
  428. notify = ipc_port_make_sonce(device->port);
  429. ip_lock(device->port);
  430. ipc_port_nsrequest(device->port, 1, notify, &notify);
  431. assert(notify == IP_NULL);
  432. /*
  433. * Open the device.
  434. */
  435. io_req_alloc(ior, 0);
  436. ior->io_device = device;
  437. ior->io_unit = device->dev_number;
  438. ior->io_op = IO_OPEN | IO_CALL;
  439. ior->io_mode = mode;
  440. ior->io_error = 0;
  441. ior->io_done = ds_open_done;
  442. ior->io_reply_port = reply_port;
  443. ior->io_reply_port_type = reply_port_type;
  444. result = (*device->dev_ops->d_open)(device->dev_number, (int)mode, ior);
  445. if (result == D_IO_QUEUED)
  446. return (MIG_NO_REPLY);
  447. /*
  448. * Return result via ds_open_done.
  449. */
  450. ior->io_error = result;
  451. (void) ds_open_done(ior);
  452. io_req_free(ior);
  453. return (MIG_NO_REPLY); /* reply already sent */
  454. }
  455. boolean_t
  456. ds_open_done(const io_req_t ior)
  457. {
  458. kern_return_t result;
  459. mach_device_t device;
  460. device = ior->io_device;
  461. result = ior->io_error;
  462. if (result != D_SUCCESS) {
  463. /*
  464. * Open failed. Deallocate port and device.
  465. */
  466. dev_port_remove(device);
  467. ipc_port_dealloc_kernel(device->port);
  468. device->port = IP_NULL;
  469. device_lock(device);
  470. device->state = DEV_STATE_INIT;
  471. if (device->io_wait) {
  472. device->io_wait = FALSE;
  473. thread_wakeup((event_t)device);
  474. }
  475. device_unlock(device);
  476. mach_device_deallocate(device);
  477. device = MACH_DEVICE_NULL;
  478. }
  479. else {
  480. /*
  481. * Open succeeded.
  482. */
  483. device_lock(device);
  484. device->state = DEV_STATE_OPEN;
  485. device->open_count = 1;
  486. if (device->io_wait) {
  487. device->io_wait = FALSE;
  488. thread_wakeup((event_t)device);
  489. }
  490. device_unlock(device);
  491. /* donate device reference to get port */
  492. }
  493. /*
  494. * Must explicitly convert device to port, since
  495. * device_reply interface is built as 'user' side
  496. * (thus cannot get translation).
  497. */
  498. if (IP_VALID(ior->io_reply_port)) {
  499. (void) ds_device_open_reply(ior->io_reply_port,
  500. ior->io_reply_port_type,
  501. result,
  502. mach_convert_device_to_port(device));
  503. } else
  504. mach_device_deallocate(device);
  505. return (TRUE);
  506. }
  507. static io_return_t
  508. device_close(void *dev)
  509. {
  510. mach_device_t device = dev;
  511. device_lock(device);
  512. /*
  513. * If device will remain open, do nothing.
  514. */
  515. if (--device->open_count > 0) {
  516. device_unlock(device);
  517. return (D_SUCCESS);
  518. }
  519. /*
  520. * If device is being closed, do nothing.
  521. */
  522. if (device->state == DEV_STATE_CLOSING) {
  523. device_unlock(device);
  524. return (D_SUCCESS);
  525. }
  526. /*
  527. * Mark device as closing, to prevent new IO.
  528. * Outstanding IO will still be in progress.
  529. */
  530. device->state = DEV_STATE_CLOSING;
  531. device_unlock(device);
  532. /*
  533. * ? wait for IO to end ?
  534. * only if device wants to
  535. */
  536. /*
  537. * Remove the device-port association.
  538. */
  539. dev_port_remove(device);
  540. ipc_port_dealloc_kernel(device->port);
  541. /*
  542. * Close the device
  543. */
  544. (*device->dev_ops->d_close)(device->dev_number, 0);
  545. /*
  546. * Finally mark it closed. If someone else is trying
  547. * to open it, the open can now proceed.
  548. */
  549. device_lock(device);
  550. device->state = DEV_STATE_INIT;
  551. if (device->io_wait) {
  552. device->io_wait = FALSE;
  553. thread_wakeup((event_t)device);
  554. }
  555. device_unlock(device);
  556. return (D_SUCCESS);
  557. }
  558. /*
  559. * Write to a device.
  560. */
  561. static io_return_t
  562. device_write(void *dev,
  563. const ipc_port_t reply_port,
  564. mach_msg_type_name_t reply_port_type,
  565. dev_mode_t mode,
  566. recnum_t recnum,
  567. const io_buf_ptr_t data,
  568. unsigned int data_count,
  569. int *bytes_written)
  570. {
  571. mach_device_t device = dev;
  572. io_req_t ior;
  573. io_return_t result;
  574. if (device->state != DEV_STATE_OPEN)
  575. return (D_NO_SUCH_DEVICE);
  576. /*
  577. * XXX Need logic to reject ridiculously big requests.
  578. */
  579. /* XXX note that a CLOSE may proceed at any point */
  580. /*
  581. * Package the write request for the device driver
  582. */
  583. io_req_alloc(ior, data_count);
  584. ior->io_device = device;
  585. ior->io_unit = device->dev_number;
  586. ior->io_op = IO_WRITE | IO_CALL;
  587. ior->io_mode = mode;
  588. ior->io_recnum = recnum;
  589. ior->io_data = data;
  590. ior->io_count = data_count;
  591. ior->io_total = data_count;
  592. ior->io_alloc_size = 0;
  593. ior->io_residual = 0;
  594. ior->io_error = 0;
  595. ior->io_done = ds_write_done;
  596. ior->io_reply_port = reply_port;
  597. ior->io_reply_port_type = reply_port_type;
  598. ior->io_copy = VM_MAP_COPY_NULL;
  599. /*
  600. * The ior keeps an extra reference for the device.
  601. */
  602. mach_device_reference(device);
  603. /*
  604. * And do the write ...
  605. *
  606. * device_write_dealoc returns false if there's more
  607. * to do; it has updated the ior appropriately and expects
  608. * its caller to reinvoke it on the device.
  609. */
  610. do {
  611. result = (*device->dev_ops->d_write)(device->dev_number, ior);
  612. /*
  613. * If the IO was queued, delay reply until it is finished.
  614. */
  615. if (result == D_IO_QUEUED)
  616. return (MIG_NO_REPLY);
  617. /*
  618. * Discard the local mapping of the data.
  619. */
  620. } while (!device_write_dealloc(ior));
  621. /*
  622. * Return the number of bytes actually written.
  623. */
  624. *bytes_written = ior->io_total - ior->io_residual;
  625. /*
  626. * Remove the extra reference.
  627. */
  628. mach_device_deallocate(device);
  629. io_req_free(ior);
  630. return (result);
  631. }
  632. /*
  633. * Write to a device, but memory is in message.
  634. */
  635. static io_return_t
  636. device_write_inband(void *dev,
  637. const ipc_port_t reply_port,
  638. mach_msg_type_name_t reply_port_type,
  639. dev_mode_t mode,
  640. recnum_t recnum,
  641. io_buf_ptr_inband_t data,
  642. unsigned int data_count,
  643. int *bytes_written)
  644. {
  645. mach_device_t device = dev;
  646. io_req_t ior;
  647. io_return_t result;
  648. if (device->state != DEV_STATE_OPEN)
  649. return (D_NO_SUCH_DEVICE);
  650. /* XXX note that a CLOSE may proceed at any point */
  651. /*
  652. * Package the write request for the device driver.
  653. */
  654. io_req_alloc(ior, 0);
  655. ior->io_device = device;
  656. ior->io_unit = device->dev_number;
  657. ior->io_op = IO_WRITE | IO_CALL | IO_INBAND;
  658. ior->io_mode = mode;
  659. ior->io_recnum = recnum;
  660. ior->io_data = data;
  661. ior->io_count = data_count;
  662. ior->io_total = data_count;
  663. ior->io_alloc_size = 0;
  664. ior->io_residual = 0;
  665. ior->io_error = 0;
  666. ior->io_done = ds_write_done;
  667. ior->io_reply_port = reply_port;
  668. ior->io_reply_port_type = reply_port_type;
  669. /*
  670. * The ior keeps an extra reference for the device.
  671. */
  672. mach_device_reference(device);
  673. /*
  674. * And do the write.
  675. */
  676. result = (*device->dev_ops->d_write)(device->dev_number, ior);
  677. /*
  678. * If the IO was queued, delay reply until it is finished.
  679. */
  680. if (result == D_IO_QUEUED)
  681. return (MIG_NO_REPLY);
  682. /*
  683. * Return the number of bytes actually written.
  684. */
  685. *bytes_written = ior->io_total - ior->io_residual;
  686. /*
  687. * Remove the extra reference.
  688. */
  689. mach_device_deallocate(device);
  690. io_req_free(ior);
  691. return (result);
  692. }
  693. /*
  694. * Wire down incoming memory to give to device.
  695. */
  696. kern_return_t
  697. device_write_get(
  698. io_req_t ior,
  699. boolean_t *wait)
  700. {
  701. vm_map_copy_t io_copy;
  702. vm_offset_t new_addr;
  703. kern_return_t result;
  704. int bsize;
  705. vm_size_t min_size;
  706. /*
  707. * By default, caller does not have to wait.
  708. */
  709. *wait = FALSE;
  710. /*
  711. * Nothing to do if no data.
  712. */
  713. if (ior->io_count == 0)
  714. return (KERN_SUCCESS);
  715. /*
  716. * Loaned iors already have valid data.
  717. */
  718. if (ior->io_op & IO_LOANED)
  719. return (KERN_SUCCESS);
  720. /*
  721. * Inband case.
  722. */
  723. if (ior->io_op & IO_INBAND) {
  724. assert(ior->io_count <= sizeof (io_buf_ptr_inband_t));
  725. new_addr = kmem_cache_alloc(&io_inband_cache);
  726. memcpy((void*)new_addr, ior->io_data, ior->io_count);
  727. ior->io_data = (io_buf_ptr_t)new_addr;
  728. ior->io_alloc_size = sizeof (io_buf_ptr_inband_t);
  729. return (KERN_SUCCESS);
  730. }
  731. /*
  732. * Figure out how much data to move this time. If the device
  733. * won't return a block size, then we have to do the whole
  734. * request in one shot (ditto if this is a block fragment),
  735. * otherwise, move at least one block's worth.
  736. */
  737. result = (*ior->io_device->dev_ops->d_dev_info)(
  738. ior->io_device->dev_number,
  739. D_INFO_BLOCK_SIZE,
  740. &bsize);
  741. if (result != KERN_SUCCESS || ior->io_count < (vm_size_t) bsize)
  742. min_size = (vm_size_t) ior->io_count;
  743. else
  744. min_size = (vm_size_t) bsize;
  745. /*
  746. * Map the pages from this page list into memory.
  747. * io_data records location of data.
  748. * io_alloc_size is the vm size of the region to deallocate.
  749. */
  750. io_copy = (vm_map_copy_t) ior->io_data;
  751. result = kmem_io_map_copyout(device_io_map,
  752. (vm_offset_t*)&ior->io_data, &new_addr,
  753. &ior->io_alloc_size, io_copy, min_size);
  754. if (result != KERN_SUCCESS)
  755. return (result);
  756. if ((ior->io_data + ior->io_count) >
  757. (((char *)new_addr) + ior->io_alloc_size)) {
  758. /*
  759. * Operation has to be split. Reset io_count for how
  760. * much we can do this time.
  761. */
  762. assert(vm_map_copy_has_cont(io_copy));
  763. assert(ior->io_count == io_copy->size);
  764. ior->io_count = ior->io_alloc_size -
  765. (ior->io_data - ((char *)new_addr));
  766. /*
  767. * Caller must wait synchronously.
  768. */
  769. ior->io_op &= ~IO_CALL;
  770. *wait = TRUE;
  771. }
  772. ior->io_copy = io_copy; /* vm_map_copy to discard */
  773. return (KERN_SUCCESS);
  774. }
  775. /*
  776. * Clean up memory allocated for IO.
  777. */
  778. boolean_t
  779. device_write_dealloc(io_req_t ior)
  780. {
  781. vm_map_copy_t new_copy = VM_MAP_COPY_NULL;
  782. vm_map_copy_t io_copy;
  783. kern_return_t result;
  784. vm_offset_t size_to_do;
  785. int bsize;
  786. if (ior->io_alloc_size == 0)
  787. return (TRUE);
  788. /*
  789. * Inband case.
  790. */
  791. if (ior->io_op & IO_INBAND) {
  792. kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data);
  793. return (TRUE);
  794. }
  795. if ((io_copy = ior->io_copy) == VM_MAP_COPY_NULL)
  796. return (TRUE);
  797. /*
  798. * To prevent a possible deadlock with the default pager,
  799. * we have to release space in the device_io_map before
  800. * we allocate any memory. (Which vm_map_copy_invoke_cont
  801. * might do.) See the discussion in mach_device_init.
  802. */
  803. kmem_io_map_deallocate(device_io_map,
  804. trunc_page(ior->io_data),
  805. ior->io_alloc_size);
  806. if (vm_map_copy_has_cont(io_copy)) {
  807. /*
  808. * Remember how much is left, then
  809. * invoke or abort the continuation.
  810. */
  811. size_to_do = io_copy->size - ior->io_count;
  812. if (ior->io_error == 0) {
  813. vm_map_copy_invoke_cont(io_copy, &new_copy, &result);
  814. }
  815. else {
  816. vm_map_copy_abort_cont(io_copy);
  817. result = KERN_FAILURE;
  818. }
  819. if (result == KERN_SUCCESS && new_copy != VM_MAP_COPY_NULL) {
  820. int res;
  821. /*
  822. * We have a new continuation, reset the ior to
  823. * represent the remainder of the request. Must
  824. * adjust the recnum because drivers assume
  825. * that the residual is zero.
  826. */
  827. ior->io_op &= ~IO_DONE;
  828. ior->io_op |= IO_CALL;
  829. res = (*ior->io_device->dev_ops->d_dev_info)(
  830. ior->io_device->dev_number,
  831. D_INFO_BLOCK_SIZE,
  832. &bsize);
  833. if (res != D_SUCCESS)
  834. panic("device_write_dealloc: No block size");
  835. ior->io_recnum += ior->io_count/bsize;
  836. ior->io_count = new_copy->size;
  837. }
  838. else {
  839. /*
  840. * No continuation. Add amount we didn't get
  841. * to into residual.
  842. */
  843. ior->io_residual += size_to_do;
  844. }
  845. }
  846. /*
  847. * Clean up the state for the IO that just completed.
  848. */
  849. vm_map_copy_discard(ior->io_copy);
  850. ior->io_copy = VM_MAP_COPY_NULL;
  851. ior->io_data = (char *) new_copy;
  852. /*
  853. * Return FALSE if there's more IO to do.
  854. */
  855. return(new_copy == VM_MAP_COPY_NULL);
  856. }
  857. /*
  858. * Send write completion message to client, and discard the data.
  859. */
  860. boolean_t
  861. ds_write_done(const io_req_t ior)
  862. {
  863. /*
  864. * device_write_dealloc discards the data that has been
  865. * written, but may decide that there is more to write.
  866. */
  867. while (!device_write_dealloc(ior)) {
  868. io_return_t result;
  869. mach_device_t device;
  870. /*
  871. * More IO to do -- invoke it.
  872. */
  873. device = ior->io_device;
  874. result = (*device->dev_ops->d_write)(device->dev_number, ior);
  875. /*
  876. * If the IO was queued, return FALSE -- not done yet.
  877. */
  878. if (result == D_IO_QUEUED)
  879. return (FALSE);
  880. }
  881. /*
  882. * Now the write is really complete. Send reply.
  883. */
  884. if (IP_VALID(ior->io_reply_port)) {
  885. (void) (*((ior->io_op & IO_INBAND) ?
  886. ds_device_write_reply_inband :
  887. ds_device_write_reply))(ior->io_reply_port,
  888. ior->io_reply_port_type,
  889. ior->io_error,
  890. (int) (ior->io_total -
  891. ior->io_residual));
  892. }
  893. mach_device_deallocate(ior->io_device);
  894. return (TRUE);
  895. }
  896. /*
  897. * Read from a device.
  898. */
  899. static io_return_t
  900. device_read(void *dev,
  901. const ipc_port_t reply_port,
  902. mach_msg_type_name_t reply_port_type,
  903. dev_mode_t mode,
  904. recnum_t recnum,
  905. int bytes_wanted,
  906. io_buf_ptr_t *data,
  907. unsigned int *data_count)
  908. {
  909. mach_device_t device = dev;
  910. io_req_t ior;
  911. io_return_t result;
  912. if (device->state != DEV_STATE_OPEN)
  913. return (D_NO_SUCH_DEVICE);
  914. /* XXX note that a CLOSE may proceed at any point */
  915. /*
  916. * There must be a reply port.
  917. */
  918. if (!IP_VALID(reply_port)) {
  919. printf("ds_* invalid reply port\n");
  920. SoftDebugger("ds_* reply_port");
  921. return (MIG_NO_REPLY); /* no sense in doing anything */
  922. }
  923. /*
  924. * Package the read request for the device driver
  925. */
  926. io_req_alloc(ior, 0);
  927. ior->io_device = device;
  928. ior->io_unit = device->dev_number;
  929. ior->io_op = IO_READ | IO_CALL;
  930. ior->io_mode = mode;
  931. ior->io_recnum = recnum;
  932. ior->io_data = 0; /* driver must allocate data */
  933. ior->io_count = bytes_wanted;
  934. ior->io_alloc_size = 0; /* no data allocated yet */
  935. ior->io_residual = 0;
  936. ior->io_error = 0;
  937. ior->io_done = ds_read_done;
  938. ior->io_reply_port = reply_port;
  939. ior->io_reply_port_type = reply_port_type;
  940. /*
  941. * The ior keeps an extra reference for the device.
  942. */
  943. mach_device_reference(device);
  944. /*
  945. * And do the read.
  946. */
  947. result = (*device->dev_ops->d_read)(device->dev_number, ior);
  948. /*
  949. * If the IO was queued, delay reply until it is finished.
  950. */
  951. if (result == D_IO_QUEUED)
  952. return (MIG_NO_REPLY);
  953. /*
  954. * Return result via ds_read_done.
  955. */
  956. ior->io_error = result;
  957. (void) ds_read_done(ior);
  958. io_req_free(ior);
  959. return (MIG_NO_REPLY); /* reply has already been sent. */
  960. }
  961. /*
  962. * Read from a device, but return the data 'inband.'
  963. */
  964. static io_return_t
  965. device_read_inband(void *dev,
  966. const ipc_port_t reply_port,
  967. mach_msg_type_name_t reply_port_type,
  968. dev_mode_t mode,
  969. recnum_t recnum,
  970. int bytes_wanted,
  971. char *data,
  972. unsigned int *data_count)
  973. {
  974. mach_device_t device = dev;
  975. io_req_t ior;
  976. io_return_t result;
  977. if (device->state != DEV_STATE_OPEN)
  978. return (D_NO_SUCH_DEVICE);
  979. /* XXX note that a CLOSE may proceed at any point */
  980. /*
  981. * There must be a reply port.
  982. */
  983. if (!IP_VALID(reply_port)) {
  984. printf("ds_* invalid reply port\n");
  985. SoftDebugger("ds_* reply_port");
  986. return (MIG_NO_REPLY); /* no sense in doing anything */
  987. }
  988. /*
  989. * Package the read for the device driver
  990. */
  991. io_req_alloc(ior, 0);
  992. ior->io_device = device;
  993. ior->io_unit = device->dev_number;
  994. ior->io_op = IO_READ | IO_CALL | IO_INBAND;
  995. ior->io_mode = mode;
  996. ior->io_recnum = recnum;
  997. ior->io_data = 0; /* driver must allocate data */
  998. ior->io_count =
  999. ((bytes_wanted < sizeof(io_buf_ptr_inband_t)) ?
  1000. bytes_wanted : sizeof(io_buf_ptr_inband_t));
  1001. ior->io_alloc_size = 0; /* no data allocated yet */
  1002. ior->io_residual = 0;
  1003. ior->io_error = 0;
  1004. ior->io_done = ds_read_done;
  1005. ior->io_reply_port = reply_port;
  1006. ior->io_reply_port_type = reply_port_type;
  1007. /*
  1008. * The ior keeps an extra reference for the device.
  1009. */
  1010. mach_device_reference(device);
  1011. /*
  1012. * Do the read.
  1013. */
  1014. result = (*device->dev_ops->d_read)(device->dev_number, ior);
  1015. /*
  1016. * If the io was queued, delay reply until it is finished.
  1017. */
  1018. if (result == D_IO_QUEUED)
  1019. return (MIG_NO_REPLY);
  1020. /*
  1021. * Return result, via ds_read_done.
  1022. */
  1023. ior->io_error = result;
  1024. (void) ds_read_done(ior);
  1025. io_req_free(ior);
  1026. return (MIG_NO_REPLY); /* reply has already been sent. */
  1027. }
  1028. /*
  1029. * Allocate wired-down memory for device read.
  1030. */
  1031. kern_return_t device_read_alloc(
  1032. io_req_t ior,
  1033. vm_size_t size)
  1034. {
  1035. vm_offset_t addr;
  1036. kern_return_t kr;
  1037. /*
  1038. * Nothing to do if no data.
  1039. */
  1040. if (ior->io_count == 0)
  1041. return (KERN_SUCCESS);
  1042. if (ior->io_op & IO_INBAND) {
  1043. ior->io_data = (io_buf_ptr_t) kmem_cache_alloc(&io_inband_cache);
  1044. ior->io_alloc_size = sizeof(io_buf_ptr_inband_t);
  1045. } else {
  1046. size = round_page(size);
  1047. kr = kmem_alloc(kernel_map, &addr, size);
  1048. if (kr != KERN_SUCCESS)
  1049. return (kr);
  1050. ior->io_data = (io_buf_ptr_t) addr;
  1051. ior->io_alloc_size = size;
  1052. }
  1053. return (KERN_SUCCESS);
  1054. }
  1055. boolean_t ds_read_done(const io_req_t ior)
  1056. {
  1057. vm_offset_t start_data, end_data;
  1058. vm_offset_t start_sent, end_sent;
  1059. vm_size_t size_read;
  1060. if (ior->io_error)
  1061. size_read = 0;
  1062. else
  1063. size_read = ior->io_count - ior->io_residual;
  1064. start_data = (vm_offset_t)ior->io_data;
  1065. end_data = start_data + size_read;
  1066. start_sent = (ior->io_op & IO_INBAND) ? start_data :
  1067. trunc_page(start_data);
  1068. end_sent = (ior->io_op & IO_INBAND) ?
  1069. start_data + ior->io_alloc_size : round_page(end_data);
  1070. /*
  1071. * Zero memory that the device did not fill.
  1072. */
  1073. if (start_sent < start_data)
  1074. memset((void *)start_sent, 0, start_data - start_sent);
  1075. if (end_sent > end_data)
  1076. memset((void *)end_data, 0, end_sent - end_data);
  1077. /*
  1078. * Touch the data being returned, to mark it dirty.
  1079. * If the pages were filled by DMA, the pmap module
  1080. * may think that they are clean.
  1081. */
  1082. {
  1083. vm_offset_t touch;
  1084. int c;
  1085. for (touch = start_sent; touch < end_sent; touch += PAGE_SIZE) {
  1086. c = *(volatile char *)touch;
  1087. *(volatile char *)touch = c;
  1088. }
  1089. }
  1090. /*
  1091. * Send the data to the reply port - this
  1092. * unwires and deallocates it.
  1093. */
  1094. if (ior->io_op & IO_INBAND) {
  1095. (void)ds_device_read_reply_inband(ior->io_reply_port,
  1096. ior->io_reply_port_type,
  1097. ior->io_error,
  1098. (char *) start_data,
  1099. size_read);
  1100. } else {
  1101. vm_map_copy_t copy;
  1102. kern_return_t kr;
  1103. kr = vm_map_copyin_page_list(kernel_map, start_data,
  1104. size_read, TRUE, TRUE,
  1105. &copy, FALSE);
  1106. if (kr != KERN_SUCCESS)
  1107. panic("read_done: vm_map_copyin_page_list failed");
  1108. (void)ds_device_read_reply(ior->io_reply_port,
  1109. ior->io_reply_port_type,
  1110. ior->io_error,
  1111. (char *) copy,
  1112. size_read);
  1113. }
  1114. /*
  1115. * Free any memory that was allocated but not sent.
  1116. */
  1117. if (ior->io_count != 0) {
  1118. if (ior->io_op & IO_INBAND) {
  1119. if (ior->io_alloc_size > 0)
  1120. kmem_cache_free(&io_inband_cache, (vm_offset_t)ior->io_data);
  1121. } else {
  1122. vm_offset_t end_alloc;
  1123. end_alloc = start_sent + round_page(ior->io_alloc_size);
  1124. if (end_alloc > end_sent)
  1125. (void) vm_deallocate(kernel_map,
  1126. end_sent,
  1127. end_alloc - end_sent);
  1128. }
  1129. }
  1130. mach_device_deallocate(ior->io_device);
  1131. return (TRUE);
  1132. }
  1133. static io_return_t
  1134. device_set_status(
  1135. void *dev,
  1136. dev_flavor_t flavor,
  1137. dev_status_t status,
  1138. mach_msg_type_number_t status_count)
  1139. {
  1140. mach_device_t device = dev;
  1141. if (device->state != DEV_STATE_OPEN)
  1142. return (D_NO_SUCH_DEVICE);
  1143. /* XXX note that a CLOSE may proceed at any point */
  1144. return ((*device->dev_ops->d_setstat)(device->dev_number,
  1145. flavor,
  1146. status,
  1147. status_count));
  1148. }
  1149. io_return_t
  1150. mach_device_get_status(
  1151. void *dev,
  1152. dev_flavor_t flavor,
  1153. dev_status_t status, /* pointer to OUT array */
  1154. mach_msg_type_number_t *status_count) /* out */
  1155. {
  1156. mach_device_t device = dev;
  1157. if (device->state != DEV_STATE_OPEN)
  1158. return (D_NO_SUCH_DEVICE);
  1159. /* XXX note that a CLOSE may proceed at any point */
  1160. return ((*device->dev_ops->d_getstat)(device->dev_number,
  1161. flavor,
  1162. status,
  1163. status_count));
  1164. }
  1165. static io_return_t
  1166. device_set_filter(void *dev,
  1167. const ipc_port_t receive_port,
  1168. int priority,
  1169. filter_t filter[],
  1170. unsigned int filter_count)
  1171. {
  1172. mach_device_t device = dev;
  1173. if (device->state != DEV_STATE_OPEN)
  1174. return (D_NO_SUCH_DEVICE);
  1175. /* XXX note that a CLOSE may proceed at any point */
  1176. /*
  1177. * Request is absurd if no receive port is specified.
  1178. */
  1179. if (!IP_VALID(receive_port))
  1180. return (D_INVALID_OPERATION);
  1181. return ((*device->dev_ops->d_async_in)(device->dev_number,
  1182. receive_port,
  1183. priority,
  1184. filter,
  1185. filter_count));
  1186. }
  1187. static io_return_t
  1188. device_map(
  1189. void *dev,
  1190. vm_prot_t protection,
  1191. vm_offset_t offset,
  1192. vm_size_t size,
  1193. ipc_port_t *pager, /* out */
  1194. boolean_t unmap) /* ? */
  1195. {
  1196. mach_device_t device = dev;
  1197. if (protection & ~VM_PROT_ALL)
  1198. return (KERN_INVALID_ARGUMENT);
  1199. if (device->state != DEV_STATE_OPEN)
  1200. return (D_NO_SUCH_DEVICE);
  1201. /* XXX note that a CLOSE may proceed at any point */
  1202. return (device_pager_setup(device, protection, offset, size,
  1203. (mach_port_t*)pager));
  1204. }
  1205. /*
  1206. * Doesn't do anything (yet).
  1207. */
  1208. static void
  1209. ds_no_senders(mach_no_senders_notification_t *notification)
  1210. {
  1211. printf("ds_no_senders called! device_port=0x%lx count=%d\n",
  1212. notification->not_header.msgh_remote_port,
  1213. notification->not_count);
  1214. }
  1215. queue_head_t io_done_list;
  1216. decl_simple_lock_data(, io_done_list_lock)
  1217. #define splio splsched /* XXX must block ALL io devices */
  1218. void iodone(io_req_t ior)
  1219. {
  1220. spl_t s;
  1221. /*
  1222. * If this ior was loaned to us, return it directly.
  1223. */
  1224. if (ior->io_op & IO_LOANED) {
  1225. (*ior->io_done)(ior);
  1226. return;
  1227. }
  1228. /*
  1229. * If !IO_CALL, some thread is waiting for this. Must lock
  1230. * structure to interlock correctly with iowait(). Else can
  1231. * toss on queue for io_done thread to call completion.
  1232. */
  1233. s = splio();
  1234. if ((ior->io_op & IO_CALL) == 0) {
  1235. ior_lock(ior);
  1236. ior->io_op |= IO_DONE;
  1237. ior->io_op &= ~IO_WANTED;
  1238. ior_unlock(ior);
  1239. thread_wakeup((event_t)ior);
  1240. } else {
  1241. ior->io_op |= IO_DONE;
  1242. simple_lock(&io_done_list_lock);
  1243. enqueue_tail(&io_done_list, (queue_entry_t)ior);
  1244. thread_wakeup((event_t)&io_done_list);
  1245. simple_unlock(&io_done_list_lock);
  1246. }
  1247. splx(s);
  1248. }
  1249. void __attribute__ ((noreturn)) io_done_thread_continue(void)
  1250. {
  1251. for (;;) {
  1252. spl_t s;
  1253. io_req_t ior;
  1254. #if defined (LINUX_DEV) && defined (CONFIG_INET)
  1255. free_skbuffs ();
  1256. #endif
  1257. s = splio();
  1258. simple_lock(&io_done_list_lock);
  1259. while ((ior = (io_req_t)dequeue_head(&io_done_list)) != 0) {
  1260. simple_unlock(&io_done_list_lock);
  1261. (void) splx(s);
  1262. if ((*ior->io_done)(ior)) {
  1263. /*
  1264. * IO done - free io_req_elt
  1265. */
  1266. io_req_free(ior);
  1267. }
  1268. /* else routine has re-queued it somewhere */
  1269. s = splio();
  1270. simple_lock(&io_done_list_lock);
  1271. }
  1272. assert_wait(&io_done_list, FALSE);
  1273. simple_unlock(&io_done_list_lock);
  1274. (void) splx(s);
  1275. counter(c_io_done_thread_block++);
  1276. thread_block(io_done_thread_continue);
  1277. }
  1278. }
  1279. void io_done_thread(void)
  1280. {
  1281. /*
  1282. * Set thread privileges and highest priority.
  1283. */
  1284. current_thread()->vm_privilege = 1;
  1285. stack_privilege(current_thread());
  1286. thread_set_own_priority(0);
  1287. io_done_thread_continue();
  1288. /*NOTREACHED*/
  1289. }
  1290. #define DEVICE_IO_MAP_SIZE (16 * 1024 * 1024)
  1291. static void mach_device_trap_init(void); /* forward */
  1292. void mach_device_init(void)
  1293. {
  1294. vm_offset_t device_io_min, device_io_max;
  1295. queue_init(&io_done_list);
  1296. simple_lock_init(&io_done_list_lock);
  1297. kmem_submap(device_io_map, kernel_map, &device_io_min, &device_io_max,
  1298. DEVICE_IO_MAP_SIZE);
  1299. /*
  1300. * If the kernel receives many device_write requests, the
  1301. * device_io_map might run out of space. To prevent
  1302. * device_write_get from failing in this case, we enable
  1303. * wait_for_space on the map. This causes kmem_io_map_copyout
  1304. * to block until there is sufficient space.
  1305. * (XXX Large writes may be starved by small writes.)
  1306. *
  1307. * There is a potential deadlock problem with this solution,
  1308. * if a device_write from the default pager has to wait
  1309. * for the completion of a device_write which needs to wait
  1310. * for memory allocation. Hence, once device_write_get
  1311. * allocates space in device_io_map, no blocking memory
  1312. * allocations should happen until device_write_dealloc
  1313. * frees the space. (XXX A large write might starve
  1314. * a small write from the default pager.)
  1315. */
  1316. device_io_map->wait_for_space = TRUE;
  1317. kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband",
  1318. sizeof(io_buf_ptr_inband_t), 0, NULL, 0);
  1319. mach_device_trap_init();
  1320. }
  1321. void iowait(io_req_t ior)
  1322. {
  1323. spl_t s;
  1324. s = splio();
  1325. ior_lock(ior);
  1326. while ((ior->io_op&IO_DONE)==0) {
  1327. assert_wait((event_t)ior, FALSE);
  1328. ior_unlock(ior);
  1329. thread_block((void (*)()) 0);
  1330. ior_lock(ior);
  1331. }
  1332. ior_unlock(ior);
  1333. splx(s);
  1334. }
  1335. /*
  1336. * Device trap support.
  1337. */
  1338. /*
  1339. * Memory Management
  1340. *
  1341. * This currently has a single pool of 2k wired buffers
  1342. * since we only handle writes to an ethernet device.
  1343. * Should be more general.
  1344. */
  1345. #define IOTRAP_REQSIZE 2048
  1346. struct kmem_cache io_trap_cache;
  1347. /*
  1348. * Initialization. Called from mach_device_init().
  1349. */
  1350. static void
  1351. mach_device_trap_init(void)
  1352. {
  1353. kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0,
  1354. NULL, 0);
  1355. }
  1356. /*
  1357. * Allocate an io_req_t.
  1358. * Currently allocates from io_trap_cache.
  1359. *
  1360. * Could have lists of different size caches.
  1361. * Could call a device-specific routine.
  1362. */
  1363. io_req_t
  1364. ds_trap_req_alloc(const mach_device_t device, vm_size_t data_size)
  1365. {
  1366. return (io_req_t) kmem_cache_alloc(&io_trap_cache);
  1367. }
  1368. /*
  1369. * Called by iodone to release ior.
  1370. */
  1371. boolean_t
  1372. ds_trap_write_done(const io_req_t ior)
  1373. {
  1374. mach_device_t dev;
  1375. dev = ior->io_device;
  1376. /*
  1377. * Should look at reply port and maybe send a message.
  1378. */
  1379. kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
  1380. /*
  1381. * Give up device reference from ds_write_trap.
  1382. */
  1383. mach_device_deallocate(dev);
  1384. return TRUE;
  1385. }
  1386. /*
  1387. * Like device_write except that data is in user space.
  1388. */
  1389. static io_return_t
  1390. device_write_trap (mach_device_t device, dev_mode_t mode,
  1391. recnum_t recnum, vm_offset_t data, vm_size_t data_count)
  1392. {
  1393. io_req_t ior;
  1394. io_return_t result;
  1395. if (device->state != DEV_STATE_OPEN)
  1396. return (D_NO_SUCH_DEVICE);
  1397. /* XXX note that a CLOSE may proceed at any point */
  1398. /*
  1399. * Get a buffer to hold the ioreq.
  1400. */
  1401. ior = ds_trap_req_alloc(device, data_count);
  1402. /*
  1403. * Package the write request for the device driver.
  1404. */
  1405. ior->io_device = device;
  1406. ior->io_unit = device->dev_number;
  1407. ior->io_op = IO_WRITE | IO_CALL | IO_LOANED;
  1408. ior->io_mode = mode;
  1409. ior->io_recnum = recnum;
  1410. ior->io_data = (io_buf_ptr_t)
  1411. (vm_offset_t)ior + sizeof(struct io_req);
  1412. ior->io_count = data_count;
  1413. ior->io_total = data_count;
  1414. ior->io_alloc_size = 0;
  1415. ior->io_residual = 0;
  1416. ior->io_error = 0;
  1417. ior->io_done = ds_trap_write_done;
  1418. ior->io_reply_port = IP_NULL; /* XXX */
  1419. ior->io_reply_port_type = 0; /* XXX */
  1420. /*
  1421. * Copy the data from user space.
  1422. */
  1423. if (data_count > 0)
  1424. copyin((void *)data, ior->io_data, data_count);
  1425. /*
  1426. * The ior keeps an extra reference for the device.
  1427. */
  1428. mach_device_reference(device);
  1429. /*
  1430. * And do the write.
  1431. */
  1432. result = (*device->dev_ops->d_write)(device->dev_number, ior);
  1433. /*
  1434. * If the IO was queued, delay reply until it is finished.
  1435. */
  1436. if (result == D_IO_QUEUED)
  1437. return (MIG_NO_REPLY);
  1438. /*
  1439. * Remove the extra reference.
  1440. */
  1441. mach_device_deallocate(device);
  1442. kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
  1443. return (result);
  1444. }
  1445. static io_return_t
  1446. device_writev_trap (mach_device_t device, dev_mode_t mode,
  1447. recnum_t recnum, io_buf_vec_t *iovec, vm_size_t iocount)
  1448. {
  1449. io_req_t ior;
  1450. io_return_t result;
  1451. io_buf_vec_t stack_iovec[16]; /* XXX */
  1452. vm_size_t data_count;
  1453. unsigned i;
  1454. if (device->state != DEV_STATE_OPEN)
  1455. return (D_NO_SUCH_DEVICE);
  1456. /* XXX note that a CLOSE may proceed at any point */
  1457. /*
  1458. * Copyin user addresses.
  1459. */
  1460. if (iocount > 16)
  1461. return KERN_INVALID_VALUE; /* lame */
  1462. copyin(iovec,
  1463. stack_iovec,
  1464. iocount * sizeof(io_buf_vec_t));
  1465. for (data_count = 0, i = 0; i < iocount; i++)
  1466. data_count += stack_iovec[i].count;
  1467. /*
  1468. * Get a buffer to hold the ioreq.
  1469. */
  1470. ior = ds_trap_req_alloc(device, data_count);
  1471. /*
  1472. * Package the write request for the device driver.
  1473. */
  1474. ior->io_device = device;
  1475. ior->io_unit = device->dev_number;
  1476. ior->io_op = IO_WRITE | IO_CALL | IO_LOANED;
  1477. ior->io_mode = mode;
  1478. ior->io_recnum = recnum;
  1479. ior->io_data = (io_buf_ptr_t)
  1480. (vm_offset_t)ior + sizeof(struct io_req);
  1481. ior->io_count = data_count;
  1482. ior->io_total = data_count;
  1483. ior->io_alloc_size = 0;
  1484. ior->io_residual = 0;
  1485. ior->io_error = 0;
  1486. ior->io_done = ds_trap_write_done;
  1487. ior->io_reply_port = IP_NULL; /* XXX */
  1488. ior->io_reply_port_type = 0; /* XXX */
  1489. /*
  1490. * Copy the data from user space.
  1491. */
  1492. if (data_count > 0) {
  1493. vm_offset_t p;
  1494. p = (vm_offset_t) ior->io_data;
  1495. for (i = 0; i < iocount; i++) {
  1496. copyin((void *) stack_iovec[i].data,
  1497. (void *) p,
  1498. stack_iovec[i].count);
  1499. p += stack_iovec[i].count;
  1500. }
  1501. }
  1502. /*
  1503. * The ior keeps an extra reference for the device.
  1504. */
  1505. mach_device_reference(device);
  1506. /*
  1507. * And do the write.
  1508. */
  1509. result = (*device->dev_ops->d_write)(device->dev_number, ior);
  1510. /*
  1511. * If the IO was queued, delay reply until it is finished.
  1512. */
  1513. if (result == D_IO_QUEUED)
  1514. return (MIG_NO_REPLY);
  1515. /*
  1516. * Remove the extra reference.
  1517. */
  1518. mach_device_deallocate(device);
  1519. kmem_cache_free(&io_trap_cache, (vm_offset_t) ior);
  1520. return (result);
  1521. }
  1522. struct device_emulation_ops mach_device_emulation_ops =
  1523. {
  1524. (void*) mach_device_reference,
  1525. (void*) mach_device_deallocate,
  1526. (void*) mach_convert_device_to_port,
  1527. device_open,
  1528. device_close,
  1529. device_write,
  1530. device_write_inband,
  1531. device_read,
  1532. device_read_inband,
  1533. device_set_status,
  1534. mach_device_get_status,
  1535. device_set_filter,
  1536. device_map,
  1537. ds_no_senders,
  1538. (void*) device_write_trap,
  1539. (void*) device_writev_trap
  1540. };