X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fsctp%2Fulpqueue.c;h=575e556aeb3eb58c30dd80f86c6158defb3c664f;hb=34a75f0025b9cf803b6a88db032e6ad6950c9313;hp=0b912c87e6cdab8f7005a054b1ade00f5f71bc4e;hpb=5273a3df6485dc2ad6aa7ddd441b9a21970f003b;p=linux-2.6.git diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index 0b912c87e..575e556ae 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -49,32 +49,13 @@ #include /* Forward declarations for internal helpers. */ -static inline struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, - struct sctp_ulpevent *); -static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *, - struct sctp_ulpevent *); +static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *); +static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, + struct sctp_ulpevent *); /* 1st Level Abstractions */ -/* Create a new ULP queue. */ -struct sctp_ulpq *sctp_ulpq_new(struct sctp_association *asoc, int gfp) -{ - struct sctp_ulpq *ulpq; - - ulpq = kmalloc(sizeof(struct sctp_ulpq), gfp); - if (!ulpq) - goto fail; - if (!sctp_ulpq_init(ulpq, asoc)) - goto fail_init; - ulpq->malloced = 1; - return ulpq; - -fail_init: - kfree(ulpq); -fail: - return NULL; -} - /* Initialize a ULP queue from a block of memory. */ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, struct sctp_association *asoc) @@ -92,17 +73,17 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, /* Flush the reassembly and ordering queues. */ -void sctp_ulpq_flush(struct sctp_ulpq *ulpq) +static void sctp_ulpq_flush(struct sctp_ulpq *ulpq) { struct sk_buff *skb; struct sctp_ulpevent *event; - while ((skb = __skb_dequeue(&ulpq->lobby))) { + while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) { event = sctp_skb2event(skb); sctp_ulpevent_free(event); } - while ((skb = __skb_dequeue(&ulpq->reasm))) { + while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) { event = sctp_skb2event(skb); sctp_ulpevent_free(event); } @@ -119,7 +100,7 @@ void sctp_ulpq_free(struct sctp_ulpq *ulpq) /* Process an incoming DATA chunk. */ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, - int gfp) + gfp_t gfp) { struct sk_buff_head temp; sctp_data_chunk_t *hdr; @@ -144,7 +125,9 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, event = sctp_ulpq_order(ulpq, event); } - /* Send event to the ULP. */ + /* Send event to the ULP. 'event' is the sctp_ulpevent for + * very first SKB on the 'temp' list. + */ if (event) sctp_ulpq_tail_event(ulpq, event); @@ -157,8 +140,7 @@ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, */ int sctp_clear_pd(struct sock *sk) { - struct sctp_opt *sp; - sp = sctp_sk(sk); + struct sctp_sock *sp = sctp_sk(sk); sp->pd_mode = 0; if (!skb_queue_empty(&sp->pd_lobby)) { @@ -178,14 +160,18 @@ static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) return sctp_clear_pd(ulpq->asoc->base.sk); } - - +/* If the SKB of 'event' is on a list, it is the first such member + * of that list. + */ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { struct sock *sk = ulpq->asoc->base.sk; - struct sk_buff_head *queue; + struct sk_buff_head *queue, *skb_list; + struct sk_buff *skb = sctp_event2skb(event); int clear_pd = 0; + skb_list = (struct sk_buff_head *) skb->prev; + /* If the socket is just going to throw this away, do not * even try to deliver it. */ @@ -217,10 +203,10 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) /* If we are harvesting multiple skbs they will be * collected on a list. */ - if (sctp_event2skb(event)->list) - sctp_skb_list_tail(sctp_event2skb(event)->list, queue); + if (skb_list) + sctp_skb_list_tail(skb_list, queue); else - __skb_queue_tail(queue, sctp_event2skb(event)); + __skb_queue_tail(queue, skb); /* Did we just complete partial delivery and need to get * rolling again? Move pending data to the receive @@ -234,10 +220,11 @@ int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) return 1; out_free: - if (sctp_event2skb(event)->list) - sctp_queue_purge_ulpevents(sctp_event2skb(event)->list); + if (skb_list) + sctp_queue_purge_ulpevents(skb_list); else sctp_ulpevent_free(event); + return 0; } @@ -289,9 +276,10 @@ static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, * payload was fragmented on the way and ip had to reassemble them. * We add the rest of skb's to the first skb's fraglist. */ -static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) +static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag) { struct sk_buff *pos; + struct sk_buff *new = NULL; struct sctp_ulpevent *event; struct sk_buff *pnext, *last; struct sk_buff *list = skb_shinfo(f_frag)->frag_list; @@ -310,11 +298,33 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, */ if (last) last->next = pos; - else - skb_shinfo(f_frag)->frag_list = pos; + else { + if (skb_cloned(f_frag)) { + /* This is a cloned skb, we can't just modify + * the frag_list. We need a new skb to do that. + * Instead of calling skb_unshare(), we'll do it + * ourselves since we need to delay the free. + */ + new = skb_copy(f_frag, GFP_ATOMIC); + if (!new) + return NULL; /* try again later */ + + new->sk = f_frag->sk; + + skb_shinfo(new)->frag_list = pos; + } else + skb_shinfo(f_frag)->frag_list = pos; + } /* Remove the first fragment from the reassembly queue. */ - __skb_unlink(f_frag, f_frag->list); + __skb_unlink(f_frag, queue); + + /* if we did unshare, then free the old skb and re-assign */ + if (new) { + kfree_skb(f_frag); + f_frag = new; + } + while (pos) { pnext = pos->next; @@ -324,7 +334,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, f_frag->data_len += pos->len; /* Remove the fragment from the reassembly queue. */ - __skb_unlink(pos, pos->list); + __skb_unlink(pos, queue); /* Break if we have reached the last fragment. */ if (pos == l_frag) @@ -334,7 +344,7 @@ static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, }; event = sctp_skb2event(f_frag); - SCTP_INC_STATS(SctpReasmUsrMsgs); + SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); return event; } @@ -395,7 +405,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_u done: return retval; found: - retval = sctp_make_reassembled_event(first_frag, pos); + retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos); if (retval) retval->msg_flags |= MSG_EOR; goto done; @@ -455,7 +465,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq * further. */ done: - retval = sctp_make_reassembled_event(first_frag, last_frag); + retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); if (retval && is_last) retval->msg_flags |= MSG_EOR; @@ -466,8 +476,8 @@ done: /* Helper function to reassemble chunks. Hold chunks on the reasm queue that * need reassembling. */ -static inline struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, - struct sctp_ulpevent *event) +static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) { struct sctp_ulpevent *retval = NULL; @@ -547,7 +557,7 @@ static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *u * further. */ done: - retval = sctp_make_reassembled_event(first_frag, last_frag); + retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag); return retval; } @@ -557,6 +567,7 @@ done: static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) { + struct sk_buff_head *event_list; struct sk_buff *pos, *tmp; struct sctp_ulpevent *cevent; struct sctp_stream *in; @@ -567,6 +578,8 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, ssn = event->ssn; in = &ulpq->asoc->ssnmap->in; + event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev; + /* We are holding the chunks by stream, by SSN. */ sctp_skb_for_each(pos, &ulpq->lobby, tmp) { cevent = (struct sctp_ulpevent *) pos->cb; @@ -587,10 +600,10 @@ static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, /* Found it, so mark in the ssnmap. */ sctp_ssn_next(in, sid); - __skb_unlink(pos, pos->list); + __skb_unlink(pos, &ulpq->lobby); /* Attach all gathered skbs to the event. */ - __skb_queue_tail(sctp_event2skb(event)->list, pos); + __skb_queue_tail(event_list, pos); } } @@ -645,8 +658,8 @@ static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, } -static inline struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, - struct sctp_ulpevent *event) +static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, + struct sctp_ulpevent *event) { __u16 sid, ssn; struct sctp_stream *in; @@ -687,7 +700,7 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) { struct sk_buff *pos, *tmp; struct sctp_ulpevent *cevent; - struct sctp_ulpevent *event = NULL; + struct sctp_ulpevent *event; struct sctp_stream *in; struct sk_buff_head temp; __u16 csid, cssn; @@ -695,6 +708,8 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) in = &ulpq->asoc->ssnmap->in; /* We are holding the chunks by stream, by SSN. */ + skb_queue_head_init(&temp); + event = NULL; sctp_skb_for_each(pos, &ulpq->lobby, tmp) { cevent = (struct sctp_ulpevent *) pos->cb; csid = cevent->stream; @@ -706,19 +721,20 @@ static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) /* Found it, so mark in the ssnmap. */ sctp_ssn_next(in, csid); - __skb_unlink(pos, pos->list); + __skb_unlink(pos, &ulpq->lobby); if (!event) { /* Create a temporary list to collect chunks on. */ event = sctp_skb2event(pos); - skb_queue_head_init(&temp); __skb_queue_tail(&temp, sctp_event2skb(event)); } else { /* Attach all gathered skbs to the event. */ - __skb_queue_tail(sctp_event2skb(event)->list, pos); + __skb_queue_tail(&temp, pos); } } - /* Send event to the ULP. */ + /* Send event to the ULP. 'event' is the sctp_ulpevent for + * very first SKB on the 'temp' list. + */ if (event) sctp_ulpq_tail_event(ulpq, event); } @@ -756,7 +772,7 @@ static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) tsnmap = &ulpq->asoc->peer.tsn_map; - while ((skb = __skb_dequeue_tail(&ulpq->lobby))) { + while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) { freed += skb_headlen(skb); event = sctp_skb2event(skb); tsn = event->tsn; @@ -782,7 +798,7 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) tsnmap = &ulpq->asoc->peer.tsn_map; /* Walk backwards through the list, reneges the newest tsns. */ - while ((skb = __skb_dequeue_tail(&ulpq->reasm))) { + while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) { freed += skb_headlen(skb); event = sctp_skb2event(skb); tsn = event->tsn; @@ -798,7 +814,8 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) /* Partial deliver the first message as there is pressure on rwnd. */ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, - struct sctp_chunk *chunk, int gfp) + struct sctp_chunk *chunk, + gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_association *asoc; @@ -822,7 +839,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, /* Renege some packets to make room for an incoming chunk. */ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, - int gfp) + gfp_t gfp) { struct sctp_association *asoc; __u16 needed, freed; @@ -861,7 +878,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, /* Notify the application if an association is aborted and in * partial delivery mode. Send up any pending received messages. */ -void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int gfp) +void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) { struct sctp_ulpevent *ev = NULL; struct sock *sk;