X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=net%2Fdccp%2Foutput.c;h=3435542e96528f5cce06257980e7ee95cc28ccf8;hb=a2f44b27303a5353859d77a3e96a1d3f33f56ab7;hp=efd7ffb903a149a46d38d57e691b20bfba416c67;hpb=134734d875a0a48d994ef20b9905209b4b8b6f75;p=linux-2.6.git diff --git a/net/dccp/output.c b/net/dccp/output.c index efd7ffb90..3435542e9 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -1,6 +1,6 @@ /* * net/dccp/output.c - * + * * An implementation of the DCCP protocol * Arnaldo Carvalho de Melo * @@ -10,7 +10,6 @@ * 2 of the License, or (at your option) any later version. */ -#include #include #include #include @@ -27,7 +26,7 @@ static inline void dccp_event_ack_sent(struct sock *sk) inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); } -static inline void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) +static void dccp_skb_entail(struct sock *sk, struct sk_buff *skb) { skb_set_owner_w(skb, sk); WARN_ON(sk->sk_send_head); @@ -49,7 +48,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); struct dccp_hdr *dh; /* XXX For now we're using only 48 bits sequence numbers */ - const int dccp_header_size = sizeof(*dh) + + const u32 dccp_header_size = sizeof(*dh) + sizeof(struct dccp_hdr_ext) + dccp_packet_hdr_len(dcb->dccpd_type); int err, set_ack = 1; @@ -64,6 +63,10 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) case DCCP_PKT_DATAACK: break; + case DCCP_PKT_REQUEST: + set_ack = 0; + /* fall through */ + case DCCP_PKT_SYNC: case DCCP_PKT_SYNCACK: ackno = dcb->dccpd_seq; @@ -79,18 +82,21 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) } dcb->dccpd_seq = dp->dccps_gss; - dccp_insert_options(sk, skb); + + if (dccp_insert_options(sk, skb)) { + kfree_skb(skb); + return -EPROTO; + } - skb->h.raw = skb_push(skb, dccp_header_size); - dh = dccp_hdr(skb); /* Build DCCP header and checksum it. */ - memset(dh, 0, dccp_header_size); + dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_type = dcb->dccpd_type; dh->dccph_sport = inet->sport; dh->dccph_dport = inet->dport; dh->dccph_doff = (dccp_header_size + dcb->dccpd_opt_len) / 4; dh->dccph_ccval = dcb->dccpd_ccval; + dh->dccph_cscov = dp->dccps_pcslen; /* XXX For now we're using only 48 bits sequence numbers */ dh->dccph_x = 1; @@ -110,7 +116,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) break; } - icsk->icsk_af_ops->send_check(sk, skb->len, skb); + icsk->icsk_af_ops->send_check(sk, 0, skb); if (set_ack) dccp_event_ack_sent(sk); @@ -119,16 +125,7 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb) memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); err = icsk->icsk_af_ops->queue_xmit(skb, 0); - if (err <= 0) - return err; - - /* NET_XMIT_CN is special. It does not guarantee, - * that this packet is lost. It tells that device - * is about to start to drop packets or already - * drops some packets of the same priority and - * invokes us to send less aggressively. - */ - return err == NET_XMIT_CN ? 0 : err; + return net_xmit_eval(err); } return -ENOBUFS; } @@ -178,37 +175,29 @@ void dccp_write_space(struct sock *sk) /** * dccp_wait_for_ccid - Wait for ccid to tell us we can send a packet * @sk: socket to wait for - * @timeo: for how long */ -static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, - long *timeo) +static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb) { struct dccp_sock *dp = dccp_sk(sk); DEFINE_WAIT(wait); - long delay; + unsigned long delay; int rc; while (1) { prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); - if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) + if (sk->sk_err) goto do_error; - if (!*timeo) - goto do_nonblock; if (signal_pending(current)) goto do_interrupted; - rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, - skb->len); + rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); if (rc <= 0) break; delay = msecs_to_jiffies(rc); - if (delay > *timeo || delay < 0) - goto do_nonblock; - sk->sk_write_pending++; release_sock(sk); - *timeo -= schedule_timeout(delay); + schedule_timeout(delay); lock_sock(sk); sk->sk_write_pending--; } @@ -219,45 +208,70 @@ out: do_error: rc = -EPIPE; goto out; -do_nonblock: - rc = -EAGAIN; - goto out; do_interrupted: - rc = sock_intr_errno(*timeo); + rc = -EINTR; goto out; } -int dccp_write_xmit(struct sock *sk, struct sk_buff *skb, long *timeo) +static void dccp_write_xmit_timer(unsigned long data) { + struct sock *sk = (struct sock *)data; + struct dccp_sock *dp = dccp_sk(sk); + + bh_lock_sock(sk); + if (sock_owned_by_user(sk)) + sk_reset_timer(sk, &dp->dccps_xmit_timer, jiffies+1); + else + dccp_write_xmit(sk, 0); + bh_unlock_sock(sk); + sock_put(sk); +} + +void dccp_write_xmit(struct sock *sk, int block) { - const struct dccp_sock *dp = dccp_sk(sk); - int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb, - skb->len); + struct dccp_sock *dp = dccp_sk(sk); + struct sk_buff *skb; - if (err > 0) - err = dccp_wait_for_ccid(sk, skb, timeo); + while ((skb = skb_peek(&sk->sk_write_queue))) { + int err = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); + + if (err > 0) { + if (!block) { + sk_reset_timer(sk, &dp->dccps_xmit_timer, + msecs_to_jiffies(err)+jiffies); + break; + } else + err = dccp_wait_for_ccid(sk, skb); + if (err && err != -EINTR) + DCCP_BUG("err=%d after dccp_wait_for_ccid", err); + } - if (err == 0) { - struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); - const int len = skb->len; + skb_dequeue(&sk->sk_write_queue); + if (err == 0) { + struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb); + const int len = skb->len; - if (sk->sk_state == DCCP_PARTOPEN) { - /* See 8.1.5. Handshake Completion */ - inet_csk_schedule_ack(sk); - inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, + if (sk->sk_state == DCCP_PARTOPEN) { + /* See 8.1.5. Handshake Completion */ + inet_csk_schedule_ack(sk); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, inet_csk(sk)->icsk_rto, DCCP_RTO_MAX); - dcb->dccpd_type = DCCP_PKT_DATAACK; - } else if (dccp_ack_pending(sk)) - dcb->dccpd_type = DCCP_PKT_DATAACK; - else - dcb->dccpd_type = DCCP_PKT_DATA; - - err = dccp_transmit_skb(sk, skb); - ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); - } else - kfree_skb(skb); - - return err; + dcb->dccpd_type = DCCP_PKT_DATAACK; + } else if (dccp_ack_pending(sk)) + dcb->dccpd_type = DCCP_PKT_DATAACK; + else + dcb->dccpd_type = DCCP_PKT_DATA; + + err = dccp_transmit_skb(sk, skb); + ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, 0, len); + if (err) + DCCP_BUG("err=%d after ccid_hc_tx_packet_sent", + err); + } else { + dccp_pr_debug("packet discarded\n"); + kfree(skb); + } + } } int dccp_retransmit_skb(struct sock *sk, struct sk_buff *skb) @@ -275,30 +289,32 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, { struct dccp_hdr *dh; struct dccp_request_sock *dreq; - const int dccp_header_size = sizeof(struct dccp_hdr) + + const u32 dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_response); - struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + - dccp_header_size, 1, + struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); if (skb == NULL) return NULL; /* Reserve space for headers. */ - skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); + skb_reserve(skb, sk->sk_prot->max_header); skb->dst = dst_clone(dst); - skb->csum = 0; dreq = dccp_rsk(req); + if (inet_rsk(req)->acked) /* increase ISS upon retransmission */ + dccp_inc_seqno(&dreq->dreq_iss); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESPONSE; DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_iss; - dccp_insert_options(sk, skb); - skb->h.raw = skb_push(skb, dccp_header_size); + if (dccp_insert_options(sk, skb)) { + kfree_skb(skb); + return NULL; + } - dh = dccp_hdr(skb); - memset(dh, 0, dccp_header_size); + /* Build and checksum header */ + dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_sport = inet_sk(sk)->sport; dh->dccph_dport = inet_rsk(req)->rmt_port; @@ -310,47 +326,46 @@ struct sk_buff *dccp_make_response(struct sock *sk, struct dst_entry *dst, dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_isr); dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; - dh->dccph_checksum = dccp_v4_checksum(skb, inet_rsk(req)->loc_addr, - inet_rsk(req)->rmt_addr); + dccp_csum_outgoing(skb); + /* We use `acked' to remember that a Response was already sent. */ + inet_rsk(req)->acked = 1; DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; } EXPORT_SYMBOL_GPL(dccp_make_response); -struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, - const enum dccp_reset_codes code) - +static struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, + const enum dccp_reset_codes code) { struct dccp_hdr *dh; struct dccp_sock *dp = dccp_sk(sk); - const int dccp_header_size = sizeof(struct dccp_hdr) + + const u32 dccp_header_size = sizeof(struct dccp_hdr) + sizeof(struct dccp_hdr_ext) + sizeof(struct dccp_hdr_reset); - struct sk_buff *skb = sock_wmalloc(sk, MAX_HEADER + DCCP_MAX_OPT_LEN + - dccp_header_size, 1, + struct sk_buff *skb = sock_wmalloc(sk, sk->sk_prot->max_header, 1, GFP_ATOMIC); if (skb == NULL) return NULL; /* Reserve space for headers. */ - skb_reserve(skb, MAX_HEADER + DCCP_MAX_OPT_LEN + dccp_header_size); + skb_reserve(skb, sk->sk_prot->max_header); skb->dst = dst_clone(dst); - skb->csum = 0; dccp_inc_seqno(&dp->dccps_gss); DCCP_SKB_CB(skb)->dccpd_reset_code = code; DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_RESET; DCCP_SKB_CB(skb)->dccpd_seq = dp->dccps_gss; - dccp_insert_options(sk, skb); - skb->h.raw = skb_push(skb, dccp_header_size); + if (dccp_insert_options(sk, skb)) { + kfree_skb(skb); + return NULL; + } - dh = dccp_hdr(skb); - memset(dh, 0, dccp_header_size); + dh = dccp_zeroed_hdr(skb, dccp_header_size); dh->dccph_sport = inet_sk(sk)->sport; dh->dccph_dport = inet_sk(sk)->dport; @@ -362,14 +377,33 @@ struct sk_buff *dccp_make_reset(struct sock *sk, struct dst_entry *dst, dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dp->dccps_gsr); dccp_hdr_reset(skb)->dccph_reset_code = code; - - dh->dccph_checksum = dccp_v4_checksum(skb, inet_sk(sk)->saddr, - inet_sk(sk)->daddr); + inet_csk(sk)->icsk_af_ops->send_check(sk, 0, skb); DCCP_INC_STATS(DCCP_MIB_OUTSEGS); return skb; } +int dccp_send_reset(struct sock *sk, enum dccp_reset_codes code) +{ + /* + * FIXME: what if rebuild_header fails? + * Should we be doing a rebuild_header here? + */ + int err = inet_sk_rebuild_header(sk); + + if (err == 0) { + struct sk_buff *skb = dccp_make_reset(sk, sk->sk_dst_cache, + code); + if (skb != NULL) { + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); + err = inet_csk(sk)->icsk_af_ops->queue_xmit(skb, 0); + return net_xmit_eval(err); + } + } + + return err; +} + /* * Do all connect socket setups that can be done AF independent. */ @@ -384,18 +418,25 @@ static inline void dccp_connect_init(struct sock *sk) dccp_sync_mss(sk, dst_mtu(dst)); - dccp_update_gss(sk, dp->dccps_iss); - /* + /* * SWL and AWL are initially adjusted so that they are not less than * the initial Sequence Numbers received and sent, respectively: * SWL := max(GSR + 1 - floor(W/4), ISR), * AWL := max(GSS - W' + 1, ISS). * These adjustments MUST be applied only at the beginning of the * connection. - */ + */ + dccp_update_gss(sk, dp->dccps_iss); dccp_set_seqno(&dp->dccps_awl, max48(dp->dccps_awl, dp->dccps_iss)); + /* S.GAR - greatest valid acknowledgement number received on a non-Sync; + * initialized to S.ISS (sec. 8.5) */ + dp->dccps_gar = dp->dccps_iss; + icsk->icsk_retransmits = 0; + init_timer(&dp->dccps_xmit_timer); + dp->dccps_xmit_timer.data = (unsigned long)sk; + dp->dccps_xmit_timer.function = dccp_write_xmit_timer; } int dccp_connect(struct sock *sk) @@ -405,15 +446,14 @@ int dccp_connect(struct sock *sk) dccp_connect_init(sk); - skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation); + skb = alloc_skb(sk->sk_prot->max_header, sk->sk_allocation); if (unlikely(skb == NULL)) return -ENOBUFS; /* Reserve space for headers. */ - skb_reserve(skb, MAX_DCCP_HEADER); + skb_reserve(skb, sk->sk_prot->max_header); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST; - skb->csum = 0; dccp_skb_entail(sk, skb); dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL)); @@ -431,7 +471,8 @@ void dccp_send_ack(struct sock *sk) { /* If we have been reset, we may not send again. */ if (sk->sk_state != DCCP_CLOSED) { - struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); + struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, + GFP_ATOMIC); if (skb == NULL) { inet_csk_schedule_ack(sk); @@ -443,8 +484,7 @@ void dccp_send_ack(struct sock *sk) } /* Reserve space for headers */ - skb_reserve(skb, MAX_DCCP_HEADER); - skb->csum = 0; + skb_reserve(skb, sk->sk_prot->max_header); DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_ACK; dccp_transmit_skb(sk, skb); } @@ -490,21 +530,22 @@ void dccp_send_sync(struct sock *sk, const u64 seq, * dccp_transmit_skb() will set the ownership to this * sock. */ - struct sk_buff *skb = alloc_skb(MAX_DCCP_HEADER, GFP_ATOMIC); + struct sk_buff *skb = alloc_skb(sk->sk_prot->max_header, GFP_ATOMIC); if (skb == NULL) /* FIXME: how to make sure the sync is sent? */ return; /* Reserve space for headers and prepare control bits. */ - skb_reserve(skb, MAX_DCCP_HEADER); - skb->csum = 0; + skb_reserve(skb, sk->sk_prot->max_header); DCCP_SKB_CB(skb)->dccpd_type = pkt_type; DCCP_SKB_CB(skb)->dccpd_seq = seq; dccp_transmit_skb(sk, skb); } +EXPORT_SYMBOL_GPL(dccp_send_sync); + /* * Send a DCCP_PKT_CLOSE/CLOSEREQ. The caller locks the socket for us. This * cannot be allowed to fail queueing a DCCP_PKT_CLOSE/CLOSEREQ frame under @@ -522,13 +563,14 @@ void dccp_send_close(struct sock *sk, const int active) /* Reserve space for headers and prepare control bits. */ skb_reserve(skb, sk->sk_prot->max_header); - skb->csum = 0; DCCP_SKB_CB(skb)->dccpd_type = dp->dccps_role == DCCP_ROLE_CLIENT ? DCCP_PKT_CLOSE : DCCP_PKT_CLOSEREQ; if (active) { + dccp_write_xmit(sk, 1); dccp_skb_entail(sk, skb); dccp_transmit_skb(sk, skb_clone(skb, prio)); + /* FIXME do we need a retransmit timer here? */ } else dccp_transmit_skb(sk, skb); }