* PPP driver, written by Michael Callahan and Al Longyear, and
* subsequently hacked by Paul Mackerras.
*
- * ==FILEVERSION 20020217==
+ * ==FILEVERSION 20041108==
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/devfs_fs_kernel.h>
#include <linux/netdevice.h>
#include <linux/poll.h>
#include <linux/ppp_defs.h>
#include <linux/rwsem.h>
#include <linux/stddef.h>
#include <linux/device.h>
+#include <linux/mutex.h>
#include <net/slhc_vj.h>
#include <asm/atomic.h>
/*
* Bits in flags: SC_NO_TCP_CCID, SC_CCP_OPEN, SC_CCP_UP, SC_LOOP_TRAFFIC,
- * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP.
+ * SC_MULTILINK, SC_MP_SHORTSEQ, SC_MP_XSHORTSEQ, SC_COMP_TCP, SC_REJ_COMP_TCP,
+ * SC_MUST_COMP
* Bits in rstate: SC_DECOMP_RUN, SC_DC_ERROR, SC_DC_FERROR.
* Bits in xstate: SC_COMP_RUN
*/
#define SC_FLAG_BITS (SC_NO_TCP_CCID|SC_CCP_OPEN|SC_CCP_UP|SC_LOOP_TRAFFIC \
|SC_MULTILINK|SC_MP_SHORTSEQ|SC_MP_XSHORTSEQ \
- |SC_COMP_TCP|SC_REJ_COMP_TCP)
+ |SC_COMP_TCP|SC_REJ_COMP_TCP|SC_MUST_COMP)
/*
* Private data structure for each channel.
void *ptr[CARDMAP_WIDTH];
};
static void *cardmap_get(struct cardmap *map, unsigned int nr);
-static void cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
+static int cardmap_set(struct cardmap **map, unsigned int nr, void *ptr);
static unsigned int cardmap_find_first_free(struct cardmap *map);
static void cardmap_destroy(struct cardmap **map);
/*
- * all_ppp_sem protects the all_ppp_units mapping.
+ * all_ppp_mutex protects the all_ppp_units mapping.
* It also ensures that finding a ppp unit in the all_ppp_units map
* and updating its file.refcnt field is atomic.
*/
-static DECLARE_MUTEX(all_ppp_sem);
+static DEFINE_MUTEX(all_ppp_mutex);
static struct cardmap *all_ppp_units;
static atomic_t ppp_unit_count = ATOMIC_INIT(0);
* and the atomicity of find a channel and updating its file.refcnt
* field.
*/
-static spinlock_t all_channels_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(all_channels_lock);
static LIST_HEAD(all_channels);
static LIST_HEAD(new_channels);
static int last_channel_index;
static int ppp_disconnect_channel(struct channel *pch);
static void ppp_destroy_channel(struct channel *pch);
-static struct class_simple *ppp_class;
+static struct class *ppp_class;
/* Translates a PPP protocol number to a NP index (NP == network protocol) */
static inline int proto_to_npindex(int proto)
PPP_MPLS_UC,
PPP_MPLS_MC,
};
-
+
/* Translates an ethertype into an NP index */
static inline int ethertype_to_npindex(int ethertype)
{
struct ppp *ppp;
if (pf != 0) {
- file->private_data = 0;
+ file->private_data = NULL;
if (pf->kind == INTERFACE) {
ppp = PF_TO_PPP(pf);
if (file == ppp->owner)
struct ppp_file *pf = file->private_data;
DECLARE_WAITQUEUE(wait, current);
ssize_t ret;
- struct sk_buff *skb = 0;
+ struct sk_buff *skb = NULL;
ret = count;
ret = 0;
if (pf->dead)
break;
+ if (pf->kind == INTERFACE) {
+ /*
+ * Return 0 (EOF) on an interface that has no
+ * channels connected, unless it is looping
+ * network traffic (demand mode).
+ */
+ struct ppp *ppp = PF_TO_PPP(pf);
+ if (ppp->n_channels == 0
+ && (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+ break;
+ }
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
mask |= POLLIN | POLLRDNORM;
if (pf->dead)
mask |= POLLHUP;
+ else if (pf->kind == INTERFACE) {
+ /* see comment in ppp_read */
+ struct ppp *ppp = PF_TO_PPP(pf);
+ if (ppp->n_channels == 0
+ && (ppp->flags & SC_LOOP_TRAFFIC) == 0)
+ mask |= POLLIN | POLLRDNORM;
+ }
+
return mask;
}
if (copy_from_user(&uprog, arg, sizeof(uprog)))
return -EFAULT;
- if (uprog.len > BPF_MAXINSNS)
- return -EINVAL;
-
if (!uprog.len) {
*p = NULL;
return 0;
/* Attach to an existing ppp unit */
if (get_user(unit, p))
break;
- down(&all_ppp_sem);
+ mutex_lock(&all_ppp_mutex);
err = -ENXIO;
ppp = ppp_find_unit(unit);
if (ppp != 0) {
file->private_data = &ppp->file;
err = 0;
}
- up(&all_ppp_sem);
+ mutex_unlock(&all_ppp_mutex);
break;
case PPPIOCATTCHAN:
printk(KERN_INFO "PPP generic driver version " PPP_VERSION "\n");
err = register_chrdev(PPP_MAJOR, "ppp", &ppp_device_fops);
if (!err) {
- ppp_class = class_simple_create(THIS_MODULE, "ppp");
+ ppp_class = class_create(THIS_MODULE, "ppp");
if (IS_ERR(ppp_class)) {
err = PTR_ERR(ppp_class);
goto out_chrdev;
}
- class_simple_device_add(ppp_class, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
- err = devfs_mk_cdev(MKDEV(PPP_MAJOR, 0),
- S_IFCHR|S_IRUSR|S_IWUSR, "ppp");
- if (err)
- goto out_class;
+ device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), "ppp");
}
out:
printk(KERN_ERR "failed to register PPP device (%d)\n", err);
return err;
-out_class:
- class_simple_device_remove(MKDEV(PPP_MAJOR,0));
- class_simple_destroy(ppp_class);
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
goto out;
ppp_xmit_unlock(ppp);
}
+static inline struct sk_buff *
+pad_compress_skb(struct ppp *ppp, struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+ int len;
+ int new_skb_size = ppp->dev->mtu +
+ ppp->xcomp->comp_extra + ppp->dev->hard_header_len;
+ int compressor_skb_size = ppp->dev->mtu +
+ ppp->xcomp->comp_extra + PPP_HDRLEN;
+ new_skb = alloc_skb(new_skb_size, GFP_ATOMIC);
+ if (!new_skb) {
+ if (net_ratelimit())
+ printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+ return NULL;
+ }
+ if (ppp->dev->hard_header_len > PPP_HDRLEN)
+ skb_reserve(new_skb,
+ ppp->dev->hard_header_len - PPP_HDRLEN);
+
+ /* compressor still expects A/C bytes in hdr */
+ len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
+ new_skb->data, skb->len + 2,
+ compressor_skb_size);
+ if (len > 0 && (ppp->flags & SC_CCP_UP)) {
+ kfree_skb(skb);
+ skb = new_skb;
+ skb_put(skb, len);
+ skb_pull(skb, 2); /* pull off A/C bytes */
+ } else if (len == 0) {
+ /* didn't compress, or CCP not up yet */
+ kfree_skb(new_skb);
+ new_skb = skb;
+ } else {
+ /*
+ * (len < 0)
+ * MPPE requires that we do not send unencrypted
+ * frames. The compressor will return -1 if we
+ * should drop the frame. We cannot simply test
+ * the compress_proto because MPPE and MPPC share
+ * the same number.
+ */
+ if (net_ratelimit())
+ printk(KERN_ERR "ppp: compressor dropped pkt\n");
+ kfree_skb(skb);
+ kfree_skb(new_skb);
+ new_skb = NULL;
+ }
+ return new_skb;
+}
+
/*
* Compress and send a frame.
* The caller should have locked the xmit path,
/* check if we should pass this packet */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
- {
- u_int16_t *p = (u_int16_t *) skb_push(skb, 2);
-
- *p = htons(4); /* indicate outbound in DLT_LINUX_SLL */;
- }
+ *skb_push(skb, 2) = 1;
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
/* try to do packet compression */
if ((ppp->xstate & SC_COMP_RUN) && ppp->xc_state != 0
&& proto != PPP_LCP && proto != PPP_CCP) {
- new_skb = alloc_skb(ppp->dev->mtu + ppp->dev->hard_header_len,
- GFP_ATOMIC);
- if (new_skb == 0) {
- printk(KERN_ERR "PPP: no memory (comp pkt)\n");
+ if (!(ppp->flags & SC_CCP_UP) && (ppp->flags & SC_MUST_COMP)) {
+ if (net_ratelimit())
+ printk(KERN_ERR "ppp: compression required but down - pkt dropped.\n");
goto drop;
}
- if (ppp->dev->hard_header_len > PPP_HDRLEN)
- skb_reserve(new_skb,
- ppp->dev->hard_header_len - PPP_HDRLEN);
-
- /* compressor still expects A/C bytes in hdr */
- len = ppp->xcomp->compress(ppp->xc_state, skb->data - 2,
- new_skb->data, skb->len + 2,
- ppp->dev->mtu + PPP_HDRLEN);
- if (len > 0 && (ppp->flags & SC_CCP_UP)) {
- kfree_skb(skb);
- skb = new_skb;
- skb_put(skb, len);
- skb_pull(skb, 2); /* pull off A/C bytes */
- } else {
- /* didn't compress, or CCP not up yet */
- kfree_skb(new_skb);
- }
+ skb = pad_compress_skb(ppp, skb);
+ if (!skb)
+ goto drop;
}
/*
return;
drop:
- kfree_skb(skb);
+ if (skb)
+ kfree_skb(skb);
++ppp->stats.tx_errors;
}
list = &ppp->channels;
if (list_empty(list)) {
/* nowhere to send the packet, just drop it */
- ppp->xmit_pending = 0;
+ ppp->xmit_pending = NULL;
kfree_skb(skb);
return;
}
spin_lock_bh(&pch->downl);
if (pch->chan) {
if (pch->chan->ops->start_xmit(pch->chan, skb))
- ppp->xmit_pending = 0;
+ ppp->xmit_pending = NULL;
} else {
/* channel got unregistered */
kfree_skb(skb);
- ppp->xmit_pending = 0;
+ ppp->xmit_pending = NULL;
}
spin_unlock_bh(&pch->downl);
return;
return;
#endif /* CONFIG_PPP_MULTILINK */
- ppp->xmit_pending = 0;
+ ppp->xmit_pending = NULL;
kfree_skb(skb);
}
*/
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
{
- int nch, len, fragsize;
+ int len, fragsize;
int i, bits, hdrlen, mtu;
- int flen, fnb;
+ int flen;
+ int navail, nfree;
+ int nbigger;
unsigned char *p, *q;
struct list_head *list;
struct channel *pch;
struct sk_buff *frag;
struct ppp_channel *chan;
- nch = 0;
+ nfree = 0; /* # channels which have no packet already queued */
+ navail = 0; /* total # of usable channels (not deregistered) */
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
- list = &ppp->channels;
- while ((list = list->next) != &ppp->channels) {
- pch = list_entry(list, struct channel, clist);
- nch += pch->avail = (skb_queue_len(&pch->file.xq) == 0);
- /*
- * If a channel hasn't had a fragment yet, it has to get
- * one before we send any fragments on later channels.
- * If it can't take a fragment now, don't give any
- * to subsequent channels.
- */
- if (!pch->had_frag && !pch->avail) {
- while ((list = list->next) != &ppp->channels) {
- pch = list_entry(list, struct channel, clist);
- pch->avail = 0;
+ i = 0;
+ list_for_each_entry(pch, &ppp->channels, clist) {
+ navail += pch->avail = (pch->chan != NULL);
+ if (pch->avail) {
+ if (skb_queue_empty(&pch->file.xq) ||
+ !pch->had_frag) {
+ pch->avail = 2;
+ ++nfree;
}
- break;
+ if (!pch->had_frag && i < ppp->nxchan)
+ ppp->nxchan = i;
}
+ ++i;
}
- if (nch == 0)
+
+ /*
+ * Don't start sending this packet unless at least half of
+ * the channels are free. This gives much better TCP
+ * performance if we have a lot of channels.
+ */
+ if (nfree == 0 || nfree < navail / 2)
return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression (XXX this should be optional) */
--len;
}
- /* decide on fragment size */
+ /*
+ * Decide on fragment size.
+ * We create a fragment for each free channel regardless of
+ * how small they are (i.e. even 0 length) in order to minimize
+ * the time that it will take to detect when a channel drops
+ * a fragment.
+ */
fragsize = len;
- if (nch > 1) {
- int maxch = ROUNDUP(len, MIN_FRAG_SIZE);
- if (nch > maxch)
- nch = maxch;
- fragsize = ROUNDUP(fragsize, nch);
- }
+ if (nfree > 1)
+ fragsize = ROUNDUP(fragsize, nfree);
+ /* nbigger channels get fragsize bytes, the rest get fragsize-1,
+ except if nbigger==0, then they all get fragsize. */
+ nbigger = len % nfree;
/* skip to the channel after the one we last used
and start at that one */
+ list = &ppp->channels;
for (i = 0; i < ppp->nxchan; ++i) {
list = list->next;
if (list == &ppp->channels) {
/* create a fragment for each channel */
bits = B;
- do {
+ while (nfree > 0 || len > 0) {
list = list->next;
if (list == &ppp->channels) {
i = 0;
if (!pch->avail)
continue;
+ /*
+ * Skip this channel if it has a fragment pending already and
+ * we haven't given a fragment to all of the free channels.
+ */
+ if (pch->avail == 1) {
+ if (nfree > 0)
+ continue;
+ } else {
+ --nfree;
+ pch->avail = 1;
+ }
+
/* check the channel's mtu and whether it is still attached. */
spin_lock_bh(&pch->downl);
- if (pch->chan == 0 || (mtu = pch->chan->mtu) < hdrlen) {
- /* can't use this channel */
+ if (pch->chan == NULL) {
+ /* can't use this channel, it's being deregistered */
spin_unlock_bh(&pch->downl);
pch->avail = 0;
- if (--nch == 0)
+ if (--navail == 0)
break;
continue;
}
/*
- * We have to create multiple fragments for this channel
- * if fragsize is greater than the channel's mtu.
+ * Create a fragment for this channel of
+ * min(max(mtu+2-hdrlen, 4), fragsize, len) bytes.
+ * If mtu+2-hdrlen < 4, that is a ridiculously small
+ * MTU, so we use mtu = 2 + hdrlen.
*/
if (fragsize > len)
fragsize = len;
- for (flen = fragsize; flen > 0; flen -= fnb) {
- fnb = flen;
- if (fnb > mtu + 2 - hdrlen)
- fnb = mtu + 2 - hdrlen;
- if (fnb >= len)
- bits |= E;
- frag = alloc_skb(fnb + hdrlen, GFP_ATOMIC);
- if (frag == 0)
- goto noskb;
- q = skb_put(frag, fnb + hdrlen);
- /* make the MP header */
- q[0] = PPP_MP >> 8;
- q[1] = PPP_MP;
- if (ppp->flags & SC_MP_XSHORTSEQ) {
- q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
- q[3] = ppp->nxseq;
- } else {
- q[2] = bits;
- q[3] = ppp->nxseq >> 16;
- q[4] = ppp->nxseq >> 8;
- q[5] = ppp->nxseq;
- }
-
- /* copy the data in */
- memcpy(q + hdrlen, p, fnb);
-
- /* try to send it down the channel */
- chan = pch->chan;
- if (!chan->ops->start_xmit(chan, frag))
- skb_queue_tail(&pch->file.xq, frag);
- pch->had_frag = 1;
- p += fnb;
- len -= fnb;
- ++ppp->nxseq;
- bits = 0;
+ flen = fragsize;
+ mtu = pch->chan->mtu + 2 - hdrlen;
+ if (mtu < 4)
+ mtu = 4;
+ if (flen > mtu)
+ flen = mtu;
+ if (flen == len && nfree == 0)
+ bits |= E;
+ frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
+ if (frag == 0)
+ goto noskb;
+ q = skb_put(frag, flen + hdrlen);
+
+ /* make the MP header */
+ q[0] = PPP_MP >> 8;
+ q[1] = PPP_MP;
+ if (ppp->flags & SC_MP_XSHORTSEQ) {
+ q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
+ q[3] = ppp->nxseq;
+ } else {
+ q[2] = bits;
+ q[3] = ppp->nxseq >> 16;
+ q[4] = ppp->nxseq >> 8;
+ q[5] = ppp->nxseq;
}
+
+ /*
+ * Copy the data in.
+ * Unfortunately there is a bug in older versions of
+ * the Linux PPP multilink reconstruction code where it
+ * drops 0-length fragments. Therefore we make sure the
+ * fragment has at least one byte of data. Any bytes
+ * we add in this situation will end up as padding on the
+ * end of the reconstructed packet.
+ */
+ if (flen == 0)
+ *skb_put(frag, 1) = 0;
+ else
+ memcpy(q + hdrlen, p, flen);
+
+ /* try to send it down the channel */
+ chan = pch->chan;
+ if (!skb_queue_empty(&pch->file.xq) ||
+ !chan->ops->start_xmit(chan, frag))
+ skb_queue_tail(&pch->file.xq, frag);
+ pch->had_frag = 1;
+ p += flen;
+ len -= flen;
+ ++ppp->nxseq;
+ bits = 0;
spin_unlock_bh(&pch->downl);
- } while (len > 0);
+
+ if (--nbigger == 0 && fragsize > 0)
+ --fragsize;
+ }
ppp->nxchan = i;
return 1;
spin_lock_bh(&pch->downl);
if (pch->chan != 0) {
- while (skb_queue_len(&pch->file.xq) > 0) {
+ while (!skb_queue_empty(&pch->file.xq)) {
skb = skb_dequeue(&pch->file.xq);
if (!pch->chan->ops->start_xmit(pch->chan, skb)) {
/* put the packet back and try again later */
}
spin_unlock_bh(&pch->downl);
/* see if there is anything from the attached unit to be sent */
- if (skb_queue_len(&pch->file.xq) == 0) {
+ if (skb_queue_empty(&pch->file.xq)) {
read_lock_bh(&pch->upl);
ppp = pch->ppp;
if (ppp != 0)
kfree_skb(skb);
return;
}
-
+
proto = PPP_PROTO(skb);
read_lock_bh(&pch->upl);
if (pch->ppp == 0 || proto >= 0xc000 || proto == PPP_CCPFRAG) {
&& (ppp->rstate & (SC_DC_FERROR | SC_DC_ERROR)) == 0)
skb = ppp_decompress_frame(ppp, skb);
+ if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR)
+ goto err;
+
proto = PPP_PROTO(skb);
switch (proto) {
case PPP_VJC_COMP:
kfree_skb(skb);
skb = ns;
}
- else if (!pskb_may_pull(skb, skb->len))
- goto err;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
len = slhc_uncompress(ppp->vj, skb->data + 2, skb->len - 2);
if (len <= 0) {
case PPP_VJC_UNCOMP:
if (ppp->vj == 0 || (ppp->flags & SC_REJ_COMP_TCP))
goto err;
-
+
/* Until we fix the decompressor need to make sure
* data portion is linear.
*/
- if (!pskb_may_pull(skb, skb->len))
+ if (!pskb_may_pull(skb, skb->len))
goto err;
if (slhc_remember(ppp->vj, skb->data + 2, skb->len - 2) <= 0) {
/* check if the packet passes the pass and active filters */
/* the filter instructions are constructed assuming
a four-byte PPP header on each packet */
- {
- u_int16_t *p = (u_int16_t *) skb_push(skb, 2);
-
- *p = 0; /* indicate inbound in DLT_LINUX_SLL */
- }
+ *skb_push(skb, 2) = 0;
if (ppp->pass_filter
&& sk_run_filter(skb, ppp->pass_filter,
ppp->pass_len) == 0) {
|| ppp->npmode[npi] != NPMODE_PASS) {
kfree_skb(skb);
} else {
- skb_pull(skb, 2); /* chop off protocol */
+ /* chop off protocol */
+ skb_pull_rcsum(skb, 2);
skb->dev = ppp->dev;
skb->protocol = htons(npindex_to_ethertype[npi]);
skb->mac.raw = skb->data;
ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
u32 mask, seq;
- struct list_head *l;
+ struct channel *ch;
int mphdrlen = (ppp->flags & SC_MP_SHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
- if (!pskb_may_pull(skb, mphdrlen + 1) || ppp->mrru == 0)
+ if (!pskb_may_pull(skb, mphdrlen) || ppp->mrru == 0)
goto err; /* no good, throw it away */
/* Decode sequence number and begin/end bits */
* The list of channels can't change because we have the receive
* side of the ppp unit locked.
*/
- for (l = ppp->channels.next; l != &ppp->channels; l = l->next) {
- struct channel *ch = list_entry(l, struct channel, clist);
+ list_for_each_entry(ch, &ppp->channels, clist) {
if (seq_before(ch->lastseq, seq))
seq = ch->lastseq;
}
{
struct channel *pch;
- pch = kmalloc(sizeof(struct channel), GFP_KERNEL);
+ pch = kzalloc(sizeof(struct channel), GFP_KERNEL);
if (pch == 0)
return -ENOMEM;
- memset(pch, 0, sizeof(struct channel));
pch->ppp = NULL;
pch->chan = chan;
chan->ppp = pch;
#endif /* CONFIG_PPP_MULTILINK */
init_rwsem(&pch->chan_sem);
spin_lock_init(&pch->downl);
- pch->upl = RW_LOCK_UNLOCKED;
+ rwlock_init(&pch->upl);
spin_lock_bh(&all_channels_lock);
pch->file.index = ++last_channel_index;
list_add(&pch->list, &new_channels);
if (pch == 0)
return; /* should never happen */
- chan->ppp = 0;
+ chan->ppp = NULL;
/*
* This ensures that we have returned from any calls into the
*/
down_write(&pch->chan_sem);
spin_lock_bh(&pch->downl);
- pch->chan = 0;
+ pch->chan = NULL;
spin_unlock_bh(&pch->downl);
up_write(&pch->chan_sem);
ppp_disconnect_channel(pch);
switch (CCP_CODE(dp)) {
case CCP_CONFREQ:
- /* A ConfReq starts negotiation of compression
+ /* A ConfReq starts negotiation of compression
* in one direction of transmission,
* and hence brings it down...but which way?
*
if(inbound)
/* He is proposing what I should send */
ppp->xstate &= ~SC_COMP_RUN;
- else
+ else
/* I am proposing to what he should send */
ppp->rstate &= ~SC_DECOMP_RUN;
-
+
break;
-
+
case CCP_TERMREQ:
case CCP_TERMACK:
/*
- * CCP is going down, both directions of transmission
+ * CCP is going down, both directions of transmission
*/
ppp->rstate &= ~SC_DECOMP_RUN;
ppp->xstate &= ~SC_COMP_RUN;
ppp->xstate = 0;
xcomp = ppp->xcomp;
xstate = ppp->xc_state;
- ppp->xc_state = 0;
+ ppp->xc_state = NULL;
ppp->rstate = 0;
rcomp = ppp->rcomp;
rstate = ppp->rc_state;
- ppp->rc_state = 0;
+ ppp->rc_state = NULL;
ppp_unlock(ppp);
if (xstate) {
/* List of compressors. */
static LIST_HEAD(compressor_list);
-static spinlock_t compressor_list_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(compressor_list_lock);
struct compressor_entry {
struct list_head list;
find_comp_entry(int proto)
{
struct compressor_entry *ce;
- struct list_head *list = &compressor_list;
- while ((list = list->next) != &compressor_list) {
- ce = list_entry(list, struct compressor_entry, list);
+ list_for_each_entry(ce, &compressor_list, list) {
if (ce->comp->compress_proto == proto)
return ce;
}
- return 0;
+ return NULL;
}
/* Register a compressor */
find_compressor(int type)
{
struct compressor_entry *ce;
- struct compressor *cp = 0;
+ struct compressor *cp = NULL;
spin_lock(&compressor_list_lock);
ce = find_comp_entry(type);
int ret = -ENOMEM;
int i;
- ppp = kmalloc(sizeof(struct ppp), GFP_KERNEL);
+ ppp = kzalloc(sizeof(struct ppp), GFP_KERNEL);
if (!ppp)
goto out;
dev = alloc_netdev(0, "", ppp_setup);
if (!dev)
goto out1;
- memset(ppp, 0, sizeof(struct ppp));
ppp->mru = PPP_MRU;
init_ppp_file(&ppp->file, INTERFACE);
dev->do_ioctl = ppp_net_ioctl;
ret = -EEXIST;
- down(&all_ppp_sem);
+ mutex_lock(&all_ppp_mutex);
if (unit < 0)
unit = cardmap_find_first_free(all_ppp_units);
else if (cardmap_get(all_ppp_units, unit) != NULL)
}
atomic_inc(&ppp_unit_count);
- cardmap_set(&all_ppp_units, unit, ppp);
- up(&all_ppp_sem);
+ ret = cardmap_set(&all_ppp_units, unit, ppp);
+ if (ret != 0)
+ goto out3;
+
+ mutex_unlock(&all_ppp_mutex);
*retp = 0;
return ppp;
+out3:
+ atomic_dec(&ppp_unit_count);
out2:
- up(&all_ppp_sem);
+ mutex_unlock(&all_ppp_mutex);
free_netdev(dev);
out1:
kfree(ppp);
{
struct net_device *dev;
- down(&all_ppp_sem);
+ mutex_lock(&all_ppp_mutex);
ppp_lock(ppp);
dev = ppp->dev;
- ppp->dev = 0;
+ ppp->dev = NULL;
ppp_unlock(ppp);
/* This will call dev_close() for us. */
if (dev) {
ppp->file.dead = 1;
ppp->owner = NULL;
wake_up_interruptible(&ppp->file.rwait);
- up(&all_ppp_sem);
+ mutex_unlock(&all_ppp_mutex);
}
/*
ppp_ccp_closed(ppp);
if (ppp->vj) {
slhc_free(ppp->vj);
- ppp->vj = 0;
+ ppp->vj = NULL;
}
skb_queue_purge(&ppp->file.xq);
skb_queue_purge(&ppp->file.rq);
skb_queue_purge(&ppp->mrq);
#endif /* CONFIG_PPP_MULTILINK */
#ifdef CONFIG_PPP_FILTER
- if (ppp->pass_filter) {
- kfree(ppp->pass_filter);
- ppp->pass_filter = NULL;
- }
- if (ppp->active_filter) {
- kfree(ppp->active_filter);
- ppp->active_filter = 0;
- }
+ kfree(ppp->pass_filter);
+ ppp->pass_filter = NULL;
+ kfree(ppp->active_filter);
+ ppp->active_filter = NULL;
#endif /* CONFIG_PPP_FILTER */
+ if (ppp->xmit_pending)
+ kfree_skb(ppp->xmit_pending);
+
kfree(ppp);
}
/*
* Locate an existing ppp unit.
- * The caller should have locked the all_ppp_sem.
+ * The caller should have locked the all_ppp_mutex.
*/
static struct ppp *
ppp_find_unit(int unit)
ppp_find_channel(int unit)
{
struct channel *pch;
- struct list_head *list;
- list = &new_channels;
- while ((list = list->next) != &new_channels) {
- pch = list_entry(list, struct channel, list);
+ list_for_each_entry(pch, &new_channels, list) {
if (pch->file.index == unit) {
- list_del(&pch->list);
- list_add(&pch->list, &all_channels);
+ list_move(&pch->list, &all_channels);
return pch;
}
}
- list = &all_channels;
- while ((list = list->next) != &all_channels) {
- pch = list_entry(list, struct channel, list);
+ list_for_each_entry(pch, &all_channels, list) {
if (pch->file.index == unit)
return pch;
}
- return 0;
+ return NULL;
}
/*
int ret = -ENXIO;
int hdrlen;
- down(&all_ppp_sem);
+ mutex_lock(&all_ppp_mutex);
ppp = ppp_find_unit(unit);
if (ppp == 0)
goto out;
outl:
write_unlock_bh(&pch->upl);
out:
- up(&all_ppp_sem);
+ mutex_unlock(&all_ppp_mutex);
return ret;
}
/* remove it from the ppp unit's list */
ppp_lock(ppp);
list_del(&pch->clist);
- --ppp->n_channels;
+ if (--ppp->n_channels == 0)
+ wake_up_interruptible(&ppp->file.rwait);
ppp_unlock(ppp);
if (atomic_dec_and_test(&ppp->file.refcnt))
ppp_destroy_interface(ppp);
cardmap_destroy(&all_ppp_units);
if (unregister_chrdev(PPP_MAJOR, "ppp") != 0)
printk(KERN_ERR "PPP: failed to unregister PPP device\n");
- devfs_remove("ppp");
- class_simple_device_remove(MKDEV(PPP_MAJOR, 0));
- class_simple_destroy(ppp_class);
+ device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
+ class_destroy(ppp_class);
}
/*
return NULL;
}
-static void cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
+static int cardmap_set(struct cardmap **pmap, unsigned int nr, void *ptr)
{
struct cardmap *p;
int i;
if (p == NULL || (nr >> p->shift) >= CARDMAP_WIDTH) {
do {
/* need a new top level */
- struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
- memset(np, 0, sizeof(*np));
+ struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto enomem;
np->ptr[0] = p;
if (p != NULL) {
np->shift = p->shift + CARDMAP_ORDER;
while (p->shift > 0) {
i = (nr >> p->shift) & CARDMAP_MASK;
if (p->ptr[i] == NULL) {
- struct cardmap *np = kmalloc(sizeof(*np), GFP_KERNEL);
- memset(np, 0, sizeof(*np));
+ struct cardmap *np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
+ goto enomem;
np->shift = p->shift - CARDMAP_ORDER;
np->parent = p;
p->ptr[i] = np;
set_bit(i, &p->inuse);
else
clear_bit(i, &p->inuse);
+ return 0;
+ enomem:
+ return -ENOMEM;
}
static unsigned int cardmap_find_first_free(struct cardmap *map)
EXPORT_SYMBOL(ppp_output_wakeup);
EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
-EXPORT_SYMBOL(all_ppp_units); /* for debugging */
-EXPORT_SYMBOL(all_channels); /* for debugging */
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV_MAJOR(PPP_MAJOR);
MODULE_ALIAS("/dev/ppp");