chario.c 22 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /*
  2. * Mach Operating System
  3. * Copyright (c) 1993-1988 Carnegie Mellon University
  4. * All Rights Reserved.
  5. *
  6. * Permission to use, copy, modify and distribute this software and its
  7. * documentation is hereby granted, provided that both the copyright
  8. * notice and this permission notice appear in all copies of the
  9. * software, derivative works or modified versions, and any portions
  10. * thereof, and that both notices appear in supporting documentation.
  11. *
  12. * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
  13. * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
  14. * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
  15. *
  16. * Carnegie Mellon requests users of this software to return to
  17. *
  18. * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
  19. * School of Computer Science
  20. * Carnegie Mellon University
  21. * Pittsburgh PA 15213-3890
  22. *
  23. * any improvements or extensions that they make and grant Carnegie Mellon
  24. * the rights to redistribute these changes.
  25. */
  26. /*
  27. * Author: David B. Golub, Carnegie Mellon University
  28. * Date: 8/88
  29. *
  30. * TTY io.
  31. * Compatibility with old TTY device drivers.
  32. */
  33. #include <mach/kern_return.h>
  34. #include <mach/mig_errors.h>
  35. #include <mach/vm_param.h>
  36. #include <machine/machspl.h> /* spl definitions */
  37. #include <ipc/ipc_port.h>
  38. #include <kern/lock.h>
  39. #include <kern/queue.h>
  40. #include <vm/vm_map.h>
  41. #include <vm/vm_kern.h>
  42. #include <vm/vm_user.h>
  43. #include <device/device_types.h>
  44. #include <device/io_req.h>
  45. #include <device/ds_routines.h>
  46. #include <device/device_reply.user.h>
  47. #include <device/chario.h>
  48. #include <device/tty.h>
  49. /* If you change these, check that tty_outq_size and tty_inq_size
  50. * is greater than largest tthiwat entry.
  51. */
  52. short tthiwat[NSPEEDS] =
  53. { 100,100,100,100,100,100,100,200,200,400,400,400,650,650,1300,2000,
  54. 2000,2000 };
  55. short ttlowat[NSPEEDS] =
  56. { 30, 30, 30, 30, 30, 30, 30, 50, 50,120,120,120,125,125, 125, 125,
  57. 125,125 };
  58. /*
  59. * Fake 'line discipline' switch for the benefit of old code
  60. * that wants to call through it.
  61. */
  62. struct ldisc_switch linesw[] = {
  63. {
  64. char_read,
  65. char_write,
  66. ttyinput,
  67. ttymodem,
  68. tty_output
  69. }
  70. };
  71. /*
  72. * Sizes for input and output circular buffers.
  73. */
  74. const unsigned int tty_inq_size = 4096; /* big nuf */
  75. const unsigned int tty_outq_size = 2048; /* Must be bigger that tthiwat */
  76. boolean_t pdma_default = TRUE; /* turn pseudo dma on by default */
  77. /*
  78. * compute pseudo-dma tables
  79. */
  80. int pdma_timeouts[NSPEEDS]; /* how many ticks in timeout */
  81. int pdma_water_mark[NSPEEDS];
  82. void chario_init(void)
  83. {
  84. /* the basic idea with the timeouts is two allow enough
  85. time for a character to show up if data is coming in at full data rate
  86. plus a little slack. 2 ticks is considered slack
  87. Below 300 baud we just glob a character at a time */
  88. #define _PR(x) ((hz/x) + 2)
  89. int i;
  90. for (i = B0; i < B300; i++)
  91. pdma_timeouts[i] = 0;
  92. pdma_timeouts[B300] = _PR(30);
  93. pdma_timeouts[B600] = _PR(60);
  94. pdma_timeouts[B1200] = _PR(120);
  95. pdma_timeouts[B1800] = _PR(180);
  96. pdma_timeouts[B2400] = _PR(240);
  97. pdma_timeouts[B4800] = _PR(480);
  98. pdma_timeouts[B9600] = _PR(960);
  99. pdma_timeouts[EXTA] = _PR(1440); /* >14400 baud */
  100. pdma_timeouts[EXTB] = _PR(1920); /* >19200 baud */
  101. pdma_timeouts[B57600] = _PR(5760);
  102. pdma_timeouts[B115200] = _PR(11520);
  103. for (i = B0; i < B300; i++)
  104. pdma_water_mark[i] = 0;
  105. /* for the slow speeds, we try to buffer 0.02 of the baud rate
  106. (20% of the character rate). For the faster lines,
  107. we try to buffer 1/2 the input queue size */
  108. #undef _PR
  109. #define _PR(x) (0.20 * x)
  110. pdma_water_mark[B300] = _PR(120);
  111. pdma_water_mark[B600] = _PR(120);
  112. pdma_water_mark[B1200] = _PR(120);
  113. pdma_water_mark[B1800] = _PR(180);
  114. pdma_water_mark[B2400] = _PR(240);
  115. pdma_water_mark[B4800] = _PR(480);
  116. i = tty_inq_size/2;
  117. pdma_water_mark[B9600] = i;
  118. pdma_water_mark[EXTA] = i; /* >14400 baud */
  119. pdma_water_mark[EXTB] = i; /* >19200 baud */
  120. pdma_water_mark[B57600] = i;
  121. pdma_water_mark[B115200] = i;
  122. return;
  123. }
  124. /*
  125. * Open TTY, waiting for CARR_ON.
  126. * No locks may be held.
  127. * May run on any CPU.
  128. */
  129. io_return_t char_open(
  130. int dev,
  131. struct tty * tp,
  132. dev_mode_t mode,
  133. io_req_t ior)
  134. {
  135. spl_t s;
  136. io_return_t rc = D_SUCCESS;
  137. s = spltty();
  138. simple_lock(&tp->t_lock);
  139. tp->t_dev = dev;
  140. if (tp->t_mctl)
  141. (*tp->t_mctl)(tp, TM_DTR, DMSET);
  142. if (pdma_default)
  143. tp->t_state |= TS_MIN;
  144. if ((tp->t_state & TS_CARR_ON) == 0) {
  145. /*
  146. * No carrier.
  147. */
  148. if (mode & D_NODELAY) {
  149. tp->t_state |= TS_ONDELAY;
  150. }
  151. else {
  152. /*
  153. * Don`t return from open until carrier detected.
  154. */
  155. tp->t_state |= TS_WOPEN;
  156. ior->io_dev_ptr = (char *)tp;
  157. queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done);
  158. rc = D_IO_QUEUED;
  159. goto out;
  160. }
  161. }
  162. tp->t_state |= TS_ISOPEN;
  163. if (tp->t_mctl)
  164. (*tp->t_mctl)(tp, TM_RTS, DMBIS);
  165. out:
  166. simple_unlock(&tp->t_lock);
  167. splx(s);
  168. return rc;
  169. }
  170. /*
  171. * Retry wait for CARR_ON for open.
  172. * No locks may be held.
  173. * May run on any CPU.
  174. */
  175. boolean_t char_open_done(
  176. io_req_t ior)
  177. {
  178. struct tty *tp = (struct tty *)ior->io_dev_ptr;
  179. spl_t s = spltty();
  180. simple_lock(&tp->t_lock);
  181. if ((tp->t_state & TS_ISOPEN) == 0) {
  182. queue_delayed_reply(&tp->t_delayed_open, ior, char_open_done);
  183. simple_unlock(&tp->t_lock);
  184. splx(s);
  185. return FALSE;
  186. }
  187. tp->t_state |= TS_ISOPEN;
  188. tp->t_state &= ~TS_WOPEN;
  189. if (tp->t_mctl)
  190. (*tp->t_mctl)(tp, TM_RTS, DMBIS);
  191. simple_unlock(&tp->t_lock);
  192. splx(s);
  193. ior->io_error = D_SUCCESS;
  194. (void) ds_open_done(ior);
  195. return TRUE;
  196. }
  197. boolean_t tty_close_open_reply(
  198. io_req_t ior)
  199. {
  200. ior->io_error = D_DEVICE_DOWN;
  201. (void) ds_open_done(ior);
  202. return TRUE;
  203. }
  204. /*
  205. * Write to TTY.
  206. * No locks may be held.
  207. * Calls device start routine; must already be on master if
  208. * device needs to run on master.
  209. */
  210. io_return_t char_write(
  211. struct tty * tp,
  212. io_req_t ior)
  213. {
  214. spl_t s;
  215. int count;
  216. char *data;
  217. vm_offset_t addr = 0;
  218. io_return_t rc = D_SUCCESS;
  219. data = ior->io_data;
  220. count = ior->io_count;
  221. if (count == 0)
  222. return rc;
  223. if (!(ior->io_op & IO_INBAND)) {
  224. /*
  225. * Copy out-of-line data into kernel address space.
  226. * Since data is copied as page list, it will be
  227. * accessible.
  228. */
  229. vm_map_copy_t copy = (vm_map_copy_t) data;
  230. kern_return_t kr;
  231. kr = vm_map_copyout(device_io_map, &addr, copy);
  232. if (kr != KERN_SUCCESS)
  233. return kr;
  234. data = (char *) addr;
  235. }
  236. /*
  237. * Check for tty operating.
  238. */
  239. s = spltty();
  240. simple_lock(&tp->t_lock);
  241. if ((tp->t_state & TS_CARR_ON) == 0) {
  242. if ((tp->t_state & TS_ONDELAY) == 0) {
  243. /*
  244. * No delayed writes - tell caller that device is down
  245. */
  246. rc = D_IO_ERROR;
  247. goto out;
  248. }
  249. if (ior->io_mode & D_NOWAIT) {
  250. rc = D_WOULD_BLOCK;
  251. goto out;
  252. }
  253. }
  254. /*
  255. * Copy data into the output buffer.
  256. * Report the amount not copied.
  257. */
  258. ior->io_residual = b_to_q(data, count, &tp->t_outq);
  259. /*
  260. * Start hardware output.
  261. */
  262. tp->t_state &= ~TS_TTSTOP;
  263. tty_output(tp);
  264. if (tp->t_outq.c_cc > TTHIWAT(tp) ||
  265. (tp->t_state & TS_CARR_ON) == 0) {
  266. /*
  267. * Do not send reply until some characters have been sent.
  268. */
  269. ior->io_dev_ptr = (char *)tp;
  270. queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done);
  271. rc = D_IO_QUEUED;
  272. }
  273. out:
  274. simple_unlock(&tp->t_lock);
  275. splx(s);
  276. if (!(ior->io_op & IO_INBAND))
  277. (void) vm_deallocate(device_io_map, addr, ior->io_count);
  278. return rc;
  279. }
  280. /*
  281. * Retry wait for output queue emptied, for write.
  282. * No locks may be held.
  283. * May run on any CPU.
  284. */
  285. boolean_t char_write_done(
  286. io_req_t ior)
  287. {
  288. struct tty *tp = (struct tty *)ior->io_dev_ptr;
  289. spl_t s = spltty();
  290. simple_lock(&tp->t_lock);
  291. if (tp->t_outq.c_cc > TTHIWAT(tp) ||
  292. (tp->t_state & TS_CARR_ON) == 0) {
  293. queue_delayed_reply(&tp->t_delayed_write, ior, char_write_done);
  294. simple_unlock(&tp->t_lock);
  295. splx(s);
  296. return FALSE;
  297. }
  298. simple_unlock(&tp->t_lock);
  299. splx(s);
  300. if (IP_VALID(ior->io_reply_port)) {
  301. (void) (*((ior->io_op & IO_INBAND) ?
  302. ds_device_write_reply_inband :
  303. ds_device_write_reply))(ior->io_reply_port,
  304. ior->io_reply_port_type,
  305. ior->io_error,
  306. (int) (ior->io_total -
  307. ior->io_residual));
  308. }
  309. mach_device_deallocate(ior->io_device);
  310. return TRUE;
  311. }
  312. boolean_t tty_close_write_reply(
  313. io_req_t ior)
  314. {
  315. ior->io_residual = ior->io_count;
  316. ior->io_error = D_DEVICE_DOWN;
  317. (void) ds_write_done(ior);
  318. return TRUE;
  319. }
  320. /*
  321. * Read from TTY.
  322. * No locks may be held.
  323. * May run on any CPU - does not talk to device driver.
  324. */
  325. io_return_t char_read(
  326. struct tty *tp,
  327. io_req_t ior)
  328. {
  329. spl_t s;
  330. kern_return_t rc;
  331. /*
  332. * Allocate memory for read buffer.
  333. */
  334. rc = device_read_alloc(ior, (vm_size_t)ior->io_count);
  335. if (rc != KERN_SUCCESS)
  336. return rc;
  337. s = spltty();
  338. simple_lock(&tp->t_lock);
  339. if ((tp->t_state & TS_CARR_ON) == 0) {
  340. if ((tp->t_state & TS_ONDELAY) == 0) {
  341. /*
  342. * No delayed writes - tell caller that device is down
  343. */
  344. rc = D_IO_ERROR;
  345. goto out;
  346. }
  347. if (ior->io_mode & D_NOWAIT) {
  348. rc = D_WOULD_BLOCK;
  349. goto out;
  350. }
  351. }
  352. if (tp->t_inq.c_cc <= 0 ||
  353. (tp->t_state & TS_CARR_ON) == 0) {
  354. ior->io_dev_ptr = (char *)tp;
  355. queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done);
  356. rc = D_IO_QUEUED;
  357. goto out;
  358. }
  359. ior->io_residual = ior->io_count - q_to_b(&tp->t_inq,
  360. ior->io_data,
  361. (int)ior->io_count);
  362. if (tp->t_state & TS_RTS_DOWN) {
  363. (*tp->t_mctl)(tp, TM_RTS, DMBIS);
  364. tp->t_state &= ~TS_RTS_DOWN;
  365. }
  366. out:
  367. simple_unlock(&tp->t_lock);
  368. splx(s);
  369. return rc;
  370. }
  371. /*
  372. * Retry wait for characters, for read.
  373. * No locks may be held.
  374. * May run on any CPU - does not talk to device driver.
  375. */
  376. boolean_t char_read_done(
  377. io_req_t ior)
  378. {
  379. struct tty *tp = (struct tty *)ior->io_dev_ptr;
  380. spl_t s = spltty();
  381. simple_lock(&tp->t_lock);
  382. if (tp->t_inq.c_cc <= 0 ||
  383. (tp->t_state & TS_CARR_ON) == 0) {
  384. queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done);
  385. simple_unlock(&tp->t_lock);
  386. splx(s);
  387. return FALSE;
  388. }
  389. ior->io_residual = ior->io_count - q_to_b(&tp->t_inq,
  390. ior->io_data,
  391. (int)ior->io_count);
  392. if (tp->t_state & TS_RTS_DOWN) {
  393. (*tp->t_mctl)(tp, TM_RTS, DMBIS);
  394. tp->t_state &= ~TS_RTS_DOWN;
  395. }
  396. simple_unlock(&tp->t_lock);
  397. splx(s);
  398. (void) ds_read_done(ior);
  399. return TRUE;
  400. }
  401. boolean_t tty_close_read_reply(
  402. io_req_t ior)
  403. {
  404. ior->io_residual = ior->io_count;
  405. ior->io_error = D_DEVICE_DOWN;
  406. (void) ds_read_done(ior);
  407. return TRUE;
  408. }
  409. /*
  410. * Close the tty.
  411. * Tty must be locked (at spltty).
  412. * Iff modem control should run on master.
  413. */
  414. void ttyclose(
  415. struct tty *tp)
  416. {
  417. io_req_t ior;
  418. /*
  419. * Flush the read and write queues. Signal
  420. * the open queue so that those waiting for open
  421. * to complete will see that the tty is closed.
  422. */
  423. while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_read)) != 0) {
  424. ior->io_done = tty_close_read_reply;
  425. iodone(ior);
  426. }
  427. while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_write)) != 0) {
  428. ior->io_done = tty_close_write_reply;
  429. iodone(ior);
  430. }
  431. while ((ior = (io_req_t)dequeue_head(&tp->t_delayed_open)) != 0) {
  432. ior->io_done = tty_close_open_reply;
  433. iodone(ior);
  434. }
  435. /* Close down modem */
  436. if (tp->t_mctl) {
  437. (*tp->t_mctl)(tp, TM_BRK|TM_RTS, DMBIC);
  438. if ((tp->t_state&(TS_HUPCLS|TS_WOPEN)) || (tp->t_state&TS_ISOPEN)==0)
  439. (*tp->t_mctl)(tp, TM_HUP, DMSET);
  440. }
  441. /* only save buffering bit, and carrier */
  442. tp->t_state = tp->t_state & (TS_MIN|TS_CARR_ON);
  443. }
  444. /*
  445. * Port-death routine to clean up reply messages.
  446. */
  447. boolean_t
  448. tty_queue_clean(
  449. queue_t q,
  450. const ipc_port_t port,
  451. boolean_t (*routine)(io_req_t) )
  452. {
  453. io_req_t ior;
  454. ior = (io_req_t)queue_first(q);
  455. while (!queue_end(q, (queue_entry_t)ior)) {
  456. if (ior->io_reply_port == port) {
  457. remqueue(q, (queue_entry_t)ior);
  458. ior->io_done = routine;
  459. iodone(ior);
  460. return TRUE;
  461. }
  462. ior = ior->io_next;
  463. }
  464. return FALSE;
  465. }
  466. /*
  467. * Handle port-death (dead reply port) for tty.
  468. * No locks may be held.
  469. * May run on any CPU.
  470. */
  471. boolean_t
  472. tty_portdeath(
  473. struct tty * tp,
  474. const ipc_port_t port)
  475. {
  476. spl_t spl = spltty();
  477. boolean_t result;
  478. simple_lock(&tp->t_lock);
  479. /*
  480. * The queues may never have been initialized
  481. */
  482. if (tp->t_delayed_read.next == 0) {
  483. result = FALSE;
  484. }
  485. else {
  486. result =
  487. tty_queue_clean(&tp->t_delayed_read, port,
  488. tty_close_read_reply)
  489. || tty_queue_clean(&tp->t_delayed_write, port,
  490. tty_close_write_reply)
  491. || tty_queue_clean(&tp->t_delayed_open, port,
  492. tty_close_open_reply);
  493. }
  494. simple_unlock(&tp->t_lock);
  495. splx(spl);
  496. return result;
  497. }
  498. /*
  499. * Get TTY status.
  500. * No locks may be held.
  501. * May run on any CPU.
  502. */
  503. io_return_t tty_get_status(
  504. struct tty *tp,
  505. dev_flavor_t flavor,
  506. int * data, /* pointer to OUT array */
  507. natural_t *count) /* out */
  508. {
  509. spl_t s;
  510. switch (flavor) {
  511. case TTY_STATUS:
  512. {
  513. struct tty_status *tsp =
  514. (struct tty_status *) data;
  515. if (*count < TTY_STATUS_COUNT)
  516. return (D_INVALID_OPERATION);
  517. s = spltty();
  518. simple_lock(&tp->t_lock);
  519. tsp->tt_ispeed = tp->t_ispeed;
  520. tsp->tt_ospeed = tp->t_ospeed;
  521. tsp->tt_breakc = tp->t_breakc;
  522. tsp->tt_flags = tp->t_flags;
  523. if (tp->t_state & TS_HUPCLS)
  524. tsp->tt_flags |= TF_HUPCLS;
  525. simple_unlock(&tp->t_lock);
  526. splx(s);
  527. *count = TTY_STATUS_COUNT;
  528. break;
  529. }
  530. default:
  531. return D_INVALID_OPERATION;
  532. }
  533. return D_SUCCESS;
  534. }
  535. /*
  536. * Set TTY status.
  537. * No locks may be held.
  538. * Calls device start or stop routines; must already be on master if
  539. * device needs to run on master.
  540. */
  541. io_return_t tty_set_status(
  542. struct tty *tp,
  543. dev_flavor_t flavor,
  544. int * data,
  545. natural_t count)
  546. {
  547. int s;
  548. switch (flavor) {
  549. case TTY_FLUSH:
  550. {
  551. int flags;
  552. if (count < TTY_FLUSH_COUNT)
  553. return D_INVALID_OPERATION;
  554. flags = *data;
  555. if (flags == 0)
  556. flags = D_READ | D_WRITE;
  557. s = spltty();
  558. simple_lock(&tp->t_lock);
  559. tty_flush(tp, flags);
  560. simple_unlock(&tp->t_lock);
  561. splx(s);
  562. break;
  563. }
  564. case TTY_STOP:
  565. /* stop output */
  566. s = spltty();
  567. simple_lock(&tp->t_lock);
  568. if ((tp->t_state & TS_TTSTOP) == 0) {
  569. tp->t_state |= TS_TTSTOP;
  570. (*tp->t_stop)(tp, 0);
  571. }
  572. simple_unlock(&tp->t_lock);
  573. splx(s);
  574. break;
  575. case TTY_START:
  576. /* start output */
  577. s = spltty();
  578. simple_lock(&tp->t_lock);
  579. if (tp->t_state & TS_TTSTOP) {
  580. tp->t_state &= ~TS_TTSTOP;
  581. tty_output(tp);
  582. }
  583. simple_unlock(&tp->t_lock);
  584. splx(s);
  585. break;
  586. case TTY_STATUS:
  587. /* set special characters and speed */
  588. {
  589. struct tty_status *tsp;
  590. if (count < TTY_STATUS_COUNT)
  591. return D_INVALID_OPERATION;
  592. tsp = (struct tty_status *)data;
  593. if (tsp->tt_ispeed < 0 ||
  594. tsp->tt_ispeed >= NSPEEDS ||
  595. tsp->tt_ospeed < 0 ||
  596. tsp->tt_ospeed >= NSPEEDS)
  597. {
  598. return D_INVALID_OPERATION;
  599. }
  600. s = spltty();
  601. simple_lock(&tp->t_lock);
  602. tp->t_ispeed = tsp->tt_ispeed;
  603. tp->t_ospeed = tsp->tt_ospeed;
  604. tp->t_breakc = tsp->tt_breakc;
  605. tp->t_flags = tsp->tt_flags & ~TF_HUPCLS;
  606. if (tsp->tt_flags & TF_HUPCLS)
  607. tp->t_state |= TS_HUPCLS;
  608. simple_unlock(&tp->t_lock);
  609. splx(s);
  610. break;
  611. }
  612. default:
  613. return D_INVALID_OPERATION;
  614. }
  615. return D_SUCCESS;
  616. }
  617. /*
  618. * [internal]
  619. * Queue IOR on reply queue, to wait for TTY operation.
  620. * TTY must be locked (at spltty).
  621. */
  622. void queue_delayed_reply(
  623. queue_t qh,
  624. io_req_t ior,
  625. boolean_t (*io_done)(io_req_t) )
  626. {
  627. ior->io_done = io_done;
  628. enqueue_tail(qh, (queue_entry_t)ior);
  629. }
  630. /*
  631. * Retry delayed IO operations for TTY.
  632. * TTY containing queue must be locked (at spltty).
  633. */
  634. void tty_queue_completion(
  635. queue_t qh)
  636. {
  637. io_req_t ior;
  638. while ((ior = (io_req_t)dequeue_head(qh)) != 0) {
  639. iodone(ior);
  640. }
  641. }
  642. /*
  643. * Set the default special characters.
  644. * Since this routine is called whenever a tty has never been opened,
  645. * we can initialize the queues here.
  646. */
  647. void ttychars(
  648. struct tty *tp)
  649. {
  650. if ((tp->t_flags & TS_INIT) == 0) {
  651. /*
  652. * Initialize queues
  653. */
  654. queue_init(&tp->t_delayed_open);
  655. queue_init(&tp->t_delayed_read);
  656. queue_init(&tp->t_delayed_write);
  657. /*
  658. * Initialize character buffers
  659. */
  660. cb_alloc(&tp->t_inq, tty_inq_size);
  661. /* if we might do modem flow control */
  662. if (tp->t_mctl && tp->t_inq.c_hog > 30)
  663. tp->t_inq.c_hog -= 30;
  664. cb_alloc(&tp->t_outq, tty_outq_size);
  665. /*
  666. * Mark initialized
  667. */
  668. tp->t_state |= TS_INIT;
  669. }
  670. tp->t_breakc = 0;
  671. }
  672. /*
  673. * Flush all TTY queues.
  674. * Called at spltty, tty already locked.
  675. * Calls device STOP routine; must already be on master if
  676. * device needs to run on master.
  677. */
  678. void tty_flush(
  679. struct tty *tp,
  680. int rw)
  681. {
  682. if (rw & D_READ) {
  683. cb_clear(&tp->t_inq);
  684. tty_queue_completion(&tp->t_delayed_read);
  685. }
  686. if (rw & D_WRITE) {
  687. tp->t_state &= ~TS_TTSTOP;
  688. (*tp->t_stop)(tp, rw);
  689. cb_clear(&tp->t_outq);
  690. tty_queue_completion(&tp->t_delayed_write);
  691. }
  692. }
  693. /*
  694. * Restart character output after a delay timeout.
  695. * Calls device start routine - must be on master CPU.
  696. *
  697. * Timeout routines are called only on master CPU.
  698. * What if device runs on a different CPU?
  699. */
  700. void ttrstrt(
  701. struct tty *tp)
  702. {
  703. spl_t s;
  704. s = spltty();
  705. simple_lock(&tp->t_lock);
  706. tp->t_state &= ~TS_TIMEOUT;
  707. ttstart (tp);
  708. simple_unlock(&tp->t_lock);
  709. splx(s);
  710. }
  711. /*
  712. * Start output on the typewriter. It is used from the top half
  713. * after some characters have been put on the output queue,
  714. * from the interrupt routine to transmit the next
  715. * character, and after a timeout has finished.
  716. *
  717. * Called at spltty, tty already locked.
  718. * Must be on master CPU if device runs on master.
  719. */
  720. void ttstart(struct tty *tp)
  721. {
  722. if ((tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) == 0) {
  723. /*
  724. * Start up the hardware again
  725. */
  726. (*tp->t_start)(tp);
  727. /*
  728. * Wake up those waiting for write completion.
  729. */
  730. if (tp->t_outq.c_cc <= TTLOWAT(tp))
  731. tty_queue_completion(&tp->t_delayed_write);
  732. }
  733. }
  734. /*
  735. * Start character output, if the device is not busy or
  736. * stopped or waiting for a timeout.
  737. *
  738. * Called at spltty, tty already locked.
  739. * Must be on master CPU if device runs on master.
  740. */
  741. void tty_output(
  742. struct tty *tp)
  743. {
  744. if ((tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) == 0) {
  745. /*
  746. * Not busy. Start output.
  747. */
  748. (*tp->t_start)(tp);
  749. /*
  750. * Wake up those waiting for write completion.
  751. */
  752. if (tp->t_outq.c_cc <= TTLOWAT(tp))
  753. tty_queue_completion(&tp->t_delayed_write);
  754. }
  755. }
  756. /*
  757. * Send any buffered recvd chars up to user
  758. */
  759. void ttypush(
  760. void * _tp)
  761. {
  762. struct tty *tp = _tp;
  763. spl_t s = spltty();
  764. int state;
  765. simple_lock(&tp->t_lock);
  766. /*
  767. The pdma timeout has gone off.
  768. If no character has been received since the timeout
  769. was set, push any pending characters up.
  770. If any characters were received in the last interval
  771. then just reset the timeout and the character received bit.
  772. */
  773. state = tp->t_state;
  774. if (state & TS_MIN_TO)
  775. {
  776. if (state & TS_MIN_TO_RCV)
  777. { /* a character was received */
  778. tp->t_state = state & ~TS_MIN_TO_RCV;
  779. timeout(ttypush, tp, pdma_timeouts[tp->t_ispeed]);
  780. }
  781. else
  782. {
  783. tp->t_state = state & ~TS_MIN_TO;
  784. if (tp->t_inq.c_cc) /* pending characters */
  785. tty_queue_completion(&tp->t_delayed_read);
  786. }
  787. }
  788. else
  789. {
  790. tp->t_state = state & ~TS_MIN_TO_RCV;/* sanity */
  791. }
  792. simple_unlock(&tp->t_lock);
  793. splx(s);
  794. }
  795. /*
  796. * Put input character on input queue.
  797. *
  798. * Called at spltty, tty already locked.
  799. */
  800. void ttyinput(
  801. unsigned int c,
  802. struct tty *tp)
  803. {
  804. if (tp->t_inq.c_cc >= tp->t_inq.c_hog) {
  805. /*
  806. * Do not want to overflow input queue
  807. */
  808. if (tp->t_mctl) {
  809. (*tp->t_mctl)(tp, TM_RTS, DMBIC);
  810. tp->t_state |= TS_RTS_DOWN;
  811. }
  812. tty_queue_completion(&tp->t_delayed_read);
  813. return;
  814. }
  815. c &= 0xff;
  816. (void) putc(c, &tp->t_inq);
  817. if ((tp->t_state & TS_MIN) == 0 ||
  818. tp->t_inq.c_cc > pdma_water_mark[tp->t_ispeed])
  819. {
  820. /*
  821. * No input buffering, or input minimum exceeded.
  822. * Grab a request from input queue and queue it
  823. * to io_done thread.
  824. */
  825. if (tp->t_state & TS_MIN_TO) {
  826. tp->t_state &= ~(TS_MIN_TO|TS_MIN_TO_RCV);
  827. untimeout(ttypush, tp);
  828. }
  829. tty_queue_completion(&tp->t_delayed_read);
  830. }
  831. else {
  832. /*
  833. * Not enough characters.
  834. * If no timeout is set, initiate the timeout
  835. * Otherwise set the character received during timeout interval
  836. * flag.
  837. * One alternative approach would be just to reset the timeout
  838. * into the future, but this involves making a timeout/untimeout
  839. * call on every character.
  840. */
  841. int ptime = pdma_timeouts[tp->t_ispeed];
  842. if (ptime > 0)
  843. {
  844. if ((tp->t_state & TS_MIN_TO) == 0)
  845. {
  846. tp->t_state |= TS_MIN_TO;
  847. timeout(ttypush, tp, ptime);
  848. }
  849. else
  850. {
  851. tp->t_state |= TS_MIN_TO_RCV;
  852. }
  853. }
  854. }
  855. }
  856. /*
  857. * Put many characters on input queue.
  858. *
  859. * Called at spltty, tty already locked.
  860. */
  861. void ttyinput_many(
  862. struct tty *tp,
  863. char *chars,
  864. int count)
  865. {
  866. /*
  867. * Do not want to overflow input queue
  868. */
  869. if (tp->t_inq.c_cc < tp->t_inq.c_hog)
  870. count -= b_to_q(chars, count, &tp->t_inq);
  871. tty_queue_completion(&tp->t_delayed_read);
  872. }
  873. /*
  874. * Handle modem control transition on a tty.
  875. * Flag indicates new state of carrier.
  876. * Returns FALSE if the line should be turned off.
  877. *
  878. * Called at spltty, tty already locked.
  879. */
  880. boolean_t ttymodem(
  881. struct tty * tp,
  882. boolean_t carrier_up)
  883. {
  884. if ((tp->t_state&TS_WOPEN) == 0 && (tp->t_flags & MDMBUF)) {
  885. /*
  886. * Flow control by carrier. Carrier down stops
  887. * output; carrier up restarts output.
  888. */
  889. if (carrier_up) {
  890. tp->t_state &= ~TS_TTSTOP;
  891. tty_output(tp);
  892. }
  893. else if ((tp->t_state&TS_TTSTOP) == 0) {
  894. tp->t_state |= TS_TTSTOP;
  895. (*tp->t_stop)(tp, 0);
  896. }
  897. }
  898. else if (carrier_up) {
  899. /*
  900. * Carrier now on.
  901. */
  902. tp->t_state |= TS_CARR_ON;
  903. tt_open_wakeup(tp);
  904. }
  905. else {
  906. /*
  907. * Lost carrier.
  908. */
  909. tp->t_state &= ~TS_CARR_ON;
  910. if (tp->t_state & TS_ISOPEN &&
  911. (tp->t_flags & NOHANG) == 0)
  912. {
  913. /*
  914. * Hang up TTY if carrier drops.
  915. * Need to alert users, somehow...
  916. */
  917. tty_flush(tp, D_READ|D_WRITE);
  918. return FALSE;
  919. }
  920. }
  921. return TRUE;
  922. }
  923. /*
  924. * Similarly, handle transitions on the ClearToSend
  925. * signal. Nowadays, it is used by many modems as
  926. * a flow-control device: they turn it down to stop
  927. * us from sending more chars. We do the same with
  928. * the RequestToSend signal. [Yes, that is exactly
  929. * why those signals are defined in the standard.]
  930. *
  931. * Tty must be locked and on master.
  932. */
  933. void
  934. tty_cts(
  935. struct tty * tp,
  936. boolean_t cts_up)
  937. {
  938. if (tp->t_state & TS_ISOPEN){
  939. if (cts_up) {
  940. tp->t_state &= ~(TS_TTSTOP|TS_BUSY);
  941. tty_output(tp);
  942. } else {
  943. tp->t_state |= (TS_TTSTOP|TS_BUSY);
  944. (*tp->t_stop)(tp, D_WRITE);
  945. }
  946. }
  947. }