2 * PPP async serial channel driver for Linux.
4 * Copyright 1999 Paul Mackerras.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * This driver provides the encapsulation and framing for sending
12 * and receiving PPP frames over async serial lines. It relies on
13 * the generic PPP layer to give it frames to send and to process
14 * received frames. It implements the PPP line discipline.
16 * Part of the code in this driver was inspired by the old async-only
17 * PPP driver, written by Michael Callahan and Al Longyear, and
18 * subsequently hacked by Paul Mackerras.
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/skbuff.h>
24 #include <linux/tty.h>
25 #include <linux/netdevice.h>
26 #include <linux/poll.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/if_ppp.h>
29 #include <linux/ppp_channel.h>
30 #include <linux/spinlock.h>
31 #include <linux/init.h>
32 #include <asm/uaccess.h>
34 #define PPP_VERSION "2.4.2"
38 /* Structure for storing local state. */
40 struct tty_struct *tty;
47 unsigned long xmit_flags;
50 unsigned int bytes_sent;
51 unsigned int bytes_rcvd;
58 unsigned long last_xmit;
62 struct sk_buff_head rqueue;
64 struct tasklet_struct tsk;
67 struct semaphore dead_sem;
68 struct ppp_channel chan; /* interface to generic ppp layer */
69 unsigned char obuf[OBUFSIZE];
72 /* Bit numbers in xmit_flags */
80 #define SC_PREV_ERROR 4
83 #define SC_RCV_BITS (SC_RCV_B7_1|SC_RCV_B7_0|SC_RCV_ODDP|SC_RCV_EVNP)
85 static int flag_time = HZ;
86 MODULE_PARM(flag_time, "i");
87 MODULE_PARM_DESC(flag_time, "ppp_async: interval between flagged packets (in clock ticks)");
88 MODULE_LICENSE("GPL");
89 MODULE_ALIAS_LDISC(N_PPP);
94 static int ppp_async_encode(struct asyncppp *ap);
95 static int ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb);
96 static int ppp_async_push(struct asyncppp *ap);
97 static void ppp_async_flush_output(struct asyncppp *ap);
98 static void ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
99 char *flags, int count);
100 static int ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd,
102 static void ppp_async_process(unsigned long arg);
104 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
105 int len, int inbound);
107 static struct ppp_channel_ops async_ops = {
113 * Routines implementing the PPP line discipline.
117 * We have a potential race on dereferencing tty->disc_data,
118 * because the tty layer provides no locking at all - thus one
119 * cpu could be running ppp_asynctty_receive while another
120 * calls ppp_asynctty_close, which zeroes tty->disc_data and
121 * frees the memory that ppp_asynctty_receive is using. The best
122 * way to fix this is to use a rwlock in the tty struct, but for now
123 * we use a single global rwlock for all ttys in ppp line discipline.
125 static rwlock_t disc_data_lock = RW_LOCK_UNLOCKED;
127 static struct asyncppp *ap_get(struct tty_struct *tty)
131 read_lock(&disc_data_lock);
134 atomic_inc(&ap->refcnt);
135 read_unlock(&disc_data_lock);
139 static void ap_put(struct asyncppp *ap)
141 if (atomic_dec_and_test(&ap->refcnt))
146 * Called when a tty is put into PPP line discipline.
149 ppp_asynctty_open(struct tty_struct *tty)
155 ap = kmalloc(sizeof(*ap), GFP_KERNEL);
159 /* initialize the asyncppp structure */
160 memset(ap, 0, sizeof(*ap));
163 spin_lock_init(&ap->xmit_lock);
164 spin_lock_init(&ap->recv_lock);
166 ap->xaccm[3] = 0x60000000U;
172 skb_queue_head_init(&ap->rqueue);
173 tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap);
175 atomic_set(&ap->refcnt, 1);
176 init_MUTEX_LOCKED(&ap->dead_sem);
178 ap->chan.private = ap;
179 ap->chan.ops = &async_ops;
180 ap->chan.mtu = PPP_MRU;
181 err = ppp_register_channel(&ap->chan);
196 * Called when the tty is put into another line discipline
197 * or it hangs up. We have to wait for any cpu currently
198 * executing in any of the other ppp_asynctty_* routines to
199 * finish before we can call ppp_unregister_channel and free
200 * the asyncppp struct. This routine must be called from
201 * process context, not interrupt or softirq context.
204 ppp_asynctty_close(struct tty_struct *tty)
208 write_lock(&disc_data_lock);
211 write_unlock(&disc_data_lock);
216 * We have now ensured that nobody can start using ap from now
217 * on, but we have to wait for all existing users to finish.
218 * Note that ppp_unregister_channel ensures that no calls to
219 * our channel ops (i.e. ppp_async_send/ioctl) are in progress
220 * by the time it returns.
222 if (!atomic_dec_and_test(&ap->refcnt))
224 tasklet_kill(&ap->tsk);
226 ppp_unregister_channel(&ap->chan);
229 skb_queue_purge(&ap->rqueue);
236 * Read does nothing - no data is ever available this way.
237 * Pppd reads and writes packets via /dev/ppp instead.
240 ppp_asynctty_read(struct tty_struct *tty, struct file *file,
241 unsigned char *buf, size_t count)
247 * Write on the tty does nothing, the packets all come in
248 * from the ppp generic stuff.
251 ppp_asynctty_write(struct tty_struct *tty, struct file *file,
252 const unsigned char *buf, size_t count)
258 ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
259 unsigned int cmd, unsigned long arg)
261 struct asyncppp *ap = ap_get(tty);
273 if (put_user(ppp_channel_index(&ap->chan), (int *) arg))
283 if (put_user(ppp_unit_number(&ap->chan), (int *) arg))
290 err = n_tty_ioctl(tty, file, cmd, arg);
294 /* flush our buffers and the serial port's buffer */
295 if (arg == TCIOFLUSH || arg == TCOFLUSH)
296 ppp_async_flush_output(ap);
297 err = n_tty_ioctl(tty, file, cmd, arg);
302 if (put_user(val, (int *) arg))
315 /* No kernel lock - fine */
317 ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
323 ppp_asynctty_room(struct tty_struct *tty)
329 * This can now be called from hard interrupt level as well
330 * as soft interrupt level or mainline.
333 ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
334 char *cflags, int count)
336 struct asyncppp *ap = ap_get(tty);
341 spin_lock_irqsave(&ap->recv_lock, flags);
342 ppp_async_input(ap, buf, cflags, count);
343 spin_unlock_irqrestore(&ap->recv_lock, flags);
344 if (skb_queue_len(&ap->rqueue))
345 tasklet_schedule(&ap->tsk);
347 if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
348 && tty->driver->unthrottle)
349 tty->driver->unthrottle(tty);
353 ppp_asynctty_wakeup(struct tty_struct *tty)
355 struct asyncppp *ap = ap_get(tty);
357 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
360 set_bit(XMIT_WAKEUP, &ap->xmit_flags);
361 tasklet_schedule(&ap->tsk);
366 static struct tty_ldisc ppp_ldisc = {
367 .owner = THIS_MODULE,
368 .magic = TTY_LDISC_MAGIC,
370 .open = ppp_asynctty_open,
371 .close = ppp_asynctty_close,
372 .read = ppp_asynctty_read,
373 .write = ppp_asynctty_write,
374 .ioctl = ppp_asynctty_ioctl,
375 .poll = ppp_asynctty_poll,
376 .receive_room = ppp_asynctty_room,
377 .receive_buf = ppp_asynctty_receive,
378 .write_wakeup = ppp_asynctty_wakeup,
386 err = tty_register_ldisc(N_PPP, &ppp_ldisc);
388 printk(KERN_ERR "PPP_async: error %d registering line disc.\n",
394 * The following routines provide the PPP channel interface.
397 ppp_async_ioctl(struct ppp_channel *chan, unsigned int cmd, unsigned long arg)
399 struct asyncppp *ap = chan->private;
406 val = ap->flags | ap->rbits;
407 if (put_user(val, (int *) arg))
412 if (get_user(val, (int *) arg))
414 ap->flags = val & ~SC_RCV_BITS;
415 spin_lock_irq(&ap->recv_lock);
416 ap->rbits = val & SC_RCV_BITS;
417 spin_unlock_irq(&ap->recv_lock);
421 case PPPIOCGASYNCMAP:
422 if (put_user(ap->xaccm[0], (u32 *) arg))
426 case PPPIOCSASYNCMAP:
427 if (get_user(ap->xaccm[0], (u32 *) arg))
432 case PPPIOCGRASYNCMAP:
433 if (put_user(ap->raccm, (u32 *) arg))
437 case PPPIOCSRASYNCMAP:
438 if (get_user(ap->raccm, (u32 *) arg))
443 case PPPIOCGXASYNCMAP:
444 if (copy_to_user((void __user *) arg, ap->xaccm, sizeof(ap->xaccm)))
448 case PPPIOCSXASYNCMAP:
449 if (copy_from_user(accm, (void __user *) arg, sizeof(accm)))
451 accm[2] &= ~0x40000000U; /* can't escape 0x5e */
452 accm[3] |= 0x60000000U; /* must escape 0x7d, 0x7e */
453 memcpy(ap->xaccm, accm, sizeof(ap->xaccm));
458 if (put_user(ap->mru, (int *) arg))
463 if (get_user(val, (int *) arg))
479 * This is called at softirq level to deliver received packets
480 * to the ppp_generic code, and to tell the ppp_generic code
481 * if we can accept more output now.
483 static void ppp_async_process(unsigned long arg)
485 struct asyncppp *ap = (struct asyncppp *) arg;
488 /* process received packets */
489 while ((skb = skb_dequeue(&ap->rqueue)) != NULL) {
491 ppp_input_error(&ap->chan, 0);
492 ppp_input(&ap->chan, skb);
495 /* try to push more stuff out */
496 if (test_bit(XMIT_WAKEUP, &ap->xmit_flags) && ppp_async_push(ap))
497 ppp_output_wakeup(&ap->chan);
501 * Procedures for encapsulation and framing.
504 u16 ppp_crc16_table[256] = {
505 0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf,
506 0x8c48, 0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7,
507 0x1081, 0x0108, 0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e,
508 0x9cc9, 0x8d40, 0xbfdb, 0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876,
509 0x2102, 0x308b, 0x0210, 0x1399, 0x6726, 0x76af, 0x4434, 0x55bd,
510 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e, 0xfae7, 0xc87c, 0xd9f5,
511 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e, 0x54b5, 0x453c,
512 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd, 0xc974,
513 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
514 0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3,
515 0x5285, 0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a,
516 0xdecd, 0xcf44, 0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72,
517 0x6306, 0x728f, 0x4014, 0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9,
518 0xef4e, 0xfec7, 0xcc5c, 0xddd5, 0xa96a, 0xb8e3, 0x8a78, 0x9bf1,
519 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3, 0x242a, 0x16b1, 0x0738,
520 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862, 0x9af9, 0x8b70,
521 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e, 0xf0b7,
522 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
523 0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036,
524 0x18c1, 0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e,
525 0xa50a, 0xb483, 0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5,
526 0x2942, 0x38cb, 0x0a50, 0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd,
527 0xb58b, 0xa402, 0x9699, 0x8710, 0xf3af, 0xe226, 0xd0bd, 0xc134,
528 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7, 0x6e6e, 0x5cf5, 0x4d7c,
529 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1, 0xa33a, 0xb2b3,
530 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72, 0x3efb,
531 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
532 0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a,
533 0xe70e, 0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1,
534 0x6b46, 0x7acf, 0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9,
535 0xf78f, 0xe606, 0xd49d, 0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330,
536 0x7bc7, 0x6a4e, 0x58d5, 0x495c, 0x3de3, 0x2c6a, 0x1ef1, 0x0f78
538 EXPORT_SYMBOL(ppp_crc16_table);
539 #define fcstab ppp_crc16_table /* for PPP_FCS macro */
542 * Procedure to encode the data for async serial transmission.
543 * Does octet stuffing (escaping), puts the address/control bytes
544 * on if A/C compression is disabled, and does protocol compression.
545 * Assumes ap->tpkt != 0 on entry.
546 * Returns 1 if we finished the current frame, 0 otherwise.
549 #define PUT_BYTE(ap, buf, c, islcp) do { \
550 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
551 *buf++ = PPP_ESCAPE; \
558 ppp_async_encode(struct asyncppp *ap)
560 int fcs, i, count, c, proto;
561 unsigned char *buf, *buflim;
569 data = ap->tpkt->data;
570 count = ap->tpkt->len;
572 proto = (data[0] << 8) + data[1];
575 * LCP packets with code values between 1 (configure-reqest)
576 * and 7 (code-reject) must be sent as though no options
577 * had been negotiated.
579 islcp = proto == PPP_LCP && 1 <= data[2] && data[2] <= 7;
583 async_lcp_peek(ap, data, count, 0);
586 * Start of a new packet - insert the leading FLAG
587 * character if necessary.
589 if (islcp || flag_time == 0
590 || jiffies - ap->last_xmit >= flag_time)
592 ap->last_xmit = jiffies;
596 * Put in the address/control bytes if necessary
598 if ((ap->flags & SC_COMP_AC) == 0 || islcp) {
599 PUT_BYTE(ap, buf, 0xff, islcp);
600 fcs = PPP_FCS(fcs, 0xff);
601 PUT_BYTE(ap, buf, 0x03, islcp);
602 fcs = PPP_FCS(fcs, 0x03);
607 * Once we put in the last byte, we need to put in the FCS
608 * and closing flag, so make sure there is at least 7 bytes
609 * of free space in the output buffer.
611 buflim = ap->obuf + OBUFSIZE - 6;
612 while (i < count && buf < buflim) {
614 if (i == 1 && c == 0 && (ap->flags & SC_COMP_PROT))
615 continue; /* compress protocol field */
616 fcs = PPP_FCS(fcs, c);
617 PUT_BYTE(ap, buf, c, islcp);
622 * Remember where we are up to in this packet.
631 * We have finished the packet. Add the FCS and flag.
635 PUT_BYTE(ap, buf, c, islcp);
636 c = (fcs >> 8) & 0xff;
637 PUT_BYTE(ap, buf, c, islcp);
647 * Transmit-side routines.
651 * Send a packet to the peer over an async tty line.
652 * Returns 1 iff the packet was accepted.
653 * If the packet was not accepted, we will call ppp_output_wakeup
654 * at some later time.
657 ppp_async_send(struct ppp_channel *chan, struct sk_buff *skb)
659 struct asyncppp *ap = chan->private;
663 if (test_and_set_bit(XMIT_FULL, &ap->xmit_flags))
664 return 0; /* already full */
673 * Push as much data as possible out to the tty.
676 ppp_async_push(struct asyncppp *ap)
678 int avail, sent, done = 0;
679 struct tty_struct *tty = ap->tty;
683 * We can get called recursively here if the tty write
684 * function calls our wakeup function. This can happen
685 * for example on a pty with both the master and slave
686 * set to PPP line discipline.
687 * We use the XMIT_BUSY bit to detect this and get out,
688 * leaving the XMIT_WAKEUP bit set to tell the other
689 * instance that it may now be able to write more now.
691 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
693 spin_lock_bh(&ap->xmit_lock);
695 if (test_and_clear_bit(XMIT_WAKEUP, &ap->xmit_flags))
697 if (!tty_stuffed && ap->optr < ap->olim) {
698 avail = ap->olim - ap->optr;
699 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
700 sent = tty->driver->write(tty, 0, ap->optr, avail);
702 goto flush; /* error, e.g. loss of CD */
708 if (ap->optr >= ap->olim && ap->tpkt != 0) {
709 if (ppp_async_encode(ap)) {
710 /* finished processing ap->tpkt */
711 clear_bit(XMIT_FULL, &ap->xmit_flags);
717 * We haven't made any progress this time around.
718 * Clear XMIT_BUSY to let other callers in, but
719 * after doing so we have to check if anyone set
720 * XMIT_WAKEUP since we last checked it. If they
721 * did, we should try again to set XMIT_BUSY and go
722 * around again in case XMIT_BUSY was still set when
723 * the other caller tried.
725 clear_bit(XMIT_BUSY, &ap->xmit_flags);
726 /* any more work to do? if not, exit the loop */
727 if (!(test_bit(XMIT_WAKEUP, &ap->xmit_flags)
728 || (!tty_stuffed && ap->tpkt != 0)))
730 /* more work to do, see if we can do it now */
731 if (test_and_set_bit(XMIT_BUSY, &ap->xmit_flags))
734 spin_unlock_bh(&ap->xmit_lock);
738 clear_bit(XMIT_BUSY, &ap->xmit_flags);
742 clear_bit(XMIT_FULL, &ap->xmit_flags);
746 spin_unlock_bh(&ap->xmit_lock);
751 * Flush output from our internal buffers.
752 * Called for the TCFLSH ioctl.
755 ppp_async_flush_output(struct asyncppp *ap)
759 spin_lock_bh(&ap->xmit_lock);
761 if (ap->tpkt != NULL) {
764 clear_bit(XMIT_FULL, &ap->xmit_flags);
767 spin_unlock_bh(&ap->xmit_lock);
769 ppp_output_wakeup(&ap->chan);
773 * Receive-side routines.
776 /* see how many ordinary chars there are at the start of buf */
778 scan_ordinary(struct asyncppp *ap, const unsigned char *buf, int count)
782 for (i = 0; i < count; ++i) {
784 if (c == PPP_ESCAPE || c == PPP_FLAG
785 || (c < 0x20 && (ap->raccm & (1 << c)) != 0))
791 /* called when a flag is seen - do end-of-packet processing */
793 process_input_packet(struct asyncppp *ap)
797 unsigned int len, fcs, proto;
800 if (ap->state & (SC_TOSS | SC_ESCAPE))
804 return; /* 0-length packet */
810 goto err; /* too short */
812 for (; len > 0; --len)
813 fcs = PPP_FCS(fcs, *p++);
814 if (fcs != PPP_GOODFCS)
815 goto err; /* bad FCS */
816 skb_trim(skb, skb->len - 2);
818 /* check for address/control and protocol compression */
820 if (p[0] == PPP_ALLSTATIONS && p[1] == PPP_UI) {
821 /* chop off address/control */
824 p = skb_pull(skb, 2);
828 /* protocol is compressed */
829 skb_push(skb, 1)[0] = 0;
833 proto = (proto << 8) + p[1];
834 if (proto == PPP_LCP)
835 async_lcp_peek(ap, p, skb->len, 1);
838 /* queue the frame to be processed */
839 skb->cb[0] = ap->state;
840 skb_queue_tail(&ap->rqueue, skb);
846 /* frame had an error, remember that, reset SC_TOSS & SC_ESCAPE */
847 ap->state = SC_PREV_ERROR;
852 /* called when the tty driver has data for us. */
854 ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
855 char *flags, int count)
858 int c, i, j, n, s, f;
861 /* update bits used for 8-bit cleanness detection */
862 if (~ap->rbits & SC_RCV_BITS) {
864 for (i = 0; i < count; ++i) {
866 if (flags != 0 && flags[i] != 0)
868 s |= (c & 0x80)? SC_RCV_B7_1: SC_RCV_B7_0;
869 c = ((c >> 4) ^ c) & 0xf;
870 s |= (0x6996 & (1 << c))? SC_RCV_ODDP: SC_RCV_EVNP;
876 /* scan through and see how many chars we can do in bulk */
877 if ((ap->state & SC_ESCAPE) && buf[0] == PPP_ESCAPE)
880 n = scan_ordinary(ap, buf, count);
883 if (flags != 0 && (ap->state & SC_TOSS) == 0) {
884 /* check the flags to see if any char had an error */
885 for (j = 0; j < n; ++j)
886 if ((f = flags[j]) != 0)
891 ap->state |= SC_TOSS;
893 } else if (n > 0 && (ap->state & SC_TOSS) == 0) {
894 /* stuff the chars in the skb */
897 skb = dev_alloc_skb(ap->mru + PPP_HDRLEN + 2);
900 /* Try to get the payload 4-byte aligned */
901 if (buf[0] != PPP_ALLSTATIONS)
902 skb_reserve(skb, 2 + (buf[0] & 1));
905 if (n > skb_tailroom(skb)) {
906 /* packet overflowed MRU */
907 ap->state |= SC_TOSS;
909 sp = skb_put(skb, n);
911 if (ap->state & SC_ESCAPE) {
913 ap->state &= ~SC_ESCAPE;
923 process_input_packet(ap);
924 } else if (c == PPP_ESCAPE) {
925 ap->state |= SC_ESCAPE;
926 } else if (I_IXON(ap->tty)) {
927 if (c == START_CHAR(ap->tty))
929 else if (c == STOP_CHAR(ap->tty))
932 /* otherwise it's a char in the recv ACCM */
943 printk(KERN_ERR "PPPasync: no memory (input pkt)\n");
944 ap->state |= SC_TOSS;
948 * We look at LCP frames going past so that we can notice
949 * and react to the LCP configure-ack from the peer.
950 * In the situation where the peer has been sent a configure-ack
951 * already, LCP is up once it has sent its configure-ack
952 * so the immediately following packet can be sent with the
953 * configured LCP options. This allows us to process the following
954 * packet correctly without pppd needing to respond quickly.
956 * We only respond to the received configure-ack if we have just
957 * sent a configure-request, and the configure-ack contains the
958 * same data (this is checked using a 16-bit crc of the data).
960 #define CONFREQ 1 /* LCP code field values */
962 #define LCP_MRU 1 /* LCP option numbers */
963 #define LCP_ASYNCMAP 2
965 static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
966 int len, int inbound)
968 int dlen, fcs, i, code;
971 data += 2; /* skip protocol bytes */
973 if (len < 4) /* 4 = code, ID, length */
976 if (code != CONFACK && code != CONFREQ)
978 dlen = (data[2] << 8) + data[3];
980 return; /* packet got truncated or length is bogus */
982 if (code == (inbound? CONFACK: CONFREQ)) {
984 * sent confreq or received confack:
985 * calculate the crc of the data from the ID field on.
988 for (i = 1; i < dlen; ++i)
989 fcs = PPP_FCS(fcs, data[i]);
992 /* outbound confreq - remember the crc for later */
997 /* received confack, check the crc */
1003 return; /* not interested in received confreq */
1005 /* process the options in the confack */
1008 /* data[0] is code, data[1] is length */
1009 while (dlen >= 2 && dlen >= data[1]) {
1012 val = (data[2] << 8) + data[3];
1019 val = (data[2] << 24) + (data[3] << 16)
1020 + (data[4] << 8) + data[5];
1032 static void __exit ppp_async_cleanup(void)
1034 if (tty_register_ldisc(N_PPP, NULL) != 0)
1035 printk(KERN_ERR "failed to unregister PPP line discipline\n");
1038 module_init(ppp_async_init);
1039 module_exit(ppp_async_cleanup);