kernel.org linux-2.6.10
[linux-2.6.git] / net / ipv6 / tcp_ipv6.c
1 /*
2  *      TCP over IPv6
3  *      Linux INET6 implementation 
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>     
7  *
8  *      $Id: tcp_ipv6.c,v 1.144 2002/02/01 22:01:04 davem Exp $
9  *
10  *      Based on: 
11  *      linux/net/ipv4/tcp.c
12  *      linux/net/ipv4/tcp_input.c
13  *      linux/net/ipv4/tcp_output.c
14  *
15  *      Fixes:
16  *      Hideaki YOSHIFUJI       :       sin6_scope_id support
17  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
18  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
19  *                                      a single port at the same time.
20  *      YOSHIFUJI Hideaki @USAGI:       convert /proc/net/tcp6 to seq_file.
21  *
22  *      This program is free software; you can redistribute it and/or
23  *      modify it under the terms of the GNU General Public License
24  *      as published by the Free Software Foundation; either version
25  *      2 of the License, or (at your option) any later version.
26  */
27
28 #include <linux/module.h>
29 #include <linux/config.h>
30 #include <linux/errno.h>
31 #include <linux/types.h>
32 #include <linux/socket.h>
33 #include <linux/sockios.h>
34 #include <linux/net.h>
35 #include <linux/jiffies.h>
36 #include <linux/in.h>
37 #include <linux/in6.h>
38 #include <linux/netdevice.h>
39 #include <linux/init.h>
40 #include <linux/jhash.h>
41 #include <linux/ipsec.h>
42 #include <linux/times.h>
43
44 #include <linux/ipv6.h>
45 #include <linux/icmpv6.h>
46 #include <linux/random.h>
47
48 #include <net/tcp.h>
49 #include <net/ndisc.h>
50 #include <net/ipv6.h>
51 #include <net/transp_v6.h>
52 #include <net/addrconf.h>
53 #include <net/ip6_route.h>
54 #include <net/ip6_checksum.h>
55 #include <net/inet_ecn.h>
56 #include <net/protocol.h>
57 #include <net/xfrm.h>
58 #include <net/addrconf.h>
59 #include <net/snmp.h>
60 #include <net/dsfield.h>
61
62 #include <asm/uaccess.h>
63
64 #include <linux/proc_fs.h>
65 #include <linux/seq_file.h>
66
67 static void     tcp_v6_send_reset(struct sk_buff *skb);
68 static void     tcp_v6_or_send_ack(struct sk_buff *skb, struct open_request *req);
69 static void     tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, 
70                                   struct sk_buff *skb);
71
72 static int      tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
73 static int      tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
74
75 static struct tcp_func ipv6_mapped;
76 static struct tcp_func ipv6_specific;
77
78 /* I have no idea if this is a good hash for v6 or not. -DaveM */
79 static __inline__ int tcp_v6_hashfn(struct in6_addr *laddr, u16 lport,
80                                     struct in6_addr *faddr, u16 fport)
81 {
82         int hashent = (lport ^ fport);
83
84         hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
85         hashent ^= hashent>>16;
86         hashent ^= hashent>>8;
87         return (hashent & (tcp_ehash_size - 1));
88 }
89
90 static __inline__ int tcp_v6_sk_hashfn(struct sock *sk)
91 {
92         struct inet_opt *inet = inet_sk(sk);
93         struct ipv6_pinfo *np = inet6_sk(sk);
94         struct in6_addr *laddr = &np->rcv_saddr;
95         struct in6_addr *faddr = &np->daddr;
96         __u16 lport = inet->num;
97         __u16 fport = inet->dport;
98         return tcp_v6_hashfn(laddr, lport, faddr, fport);
99 }
100
101 static inline int tcp_v6_bind_conflict(struct sock *sk,
102                                        struct tcp_bind_bucket *tb)
103 {
104         struct sock *sk2;
105         struct hlist_node *node;
106
107         /* We must walk the whole port owner list in this case. -DaveM */
108         sk_for_each_bound(sk2, node, &tb->owners) {
109                 if (sk != sk2 &&
110                     (!sk->sk_bound_dev_if ||
111                      !sk2->sk_bound_dev_if ||
112                      sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
113                     (!sk->sk_reuse || !sk2->sk_reuse ||
114                      sk2->sk_state == TCP_LISTEN) &&
115                      ipv6_rcv_saddr_equal(sk, sk2))
116                         break;
117         }
118
119         return node != NULL;
120 }
121
122 /* Grrr, addr_type already calculated by caller, but I don't want
123  * to add some silly "cookie" argument to this method just for that.
124  * But it doesn't matter, the recalculation is in the rarest path
125  * this function ever takes.
126  */
127 static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
128 {
129         struct tcp_bind_hashbucket *head;
130         struct tcp_bind_bucket *tb;
131         struct hlist_node *node;
132         int ret;
133
134         local_bh_disable();
135         if (snum == 0) {
136                 int low = sysctl_local_port_range[0];
137                 int high = sysctl_local_port_range[1];
138                 int remaining = (high - low) + 1;
139                 int rover;
140
141                 spin_lock(&tcp_portalloc_lock);
142                 rover = tcp_port_rover;
143                 do {    rover++;
144                         if ((rover < low) || (rover > high))
145                                 rover = low;
146                         head = &tcp_bhash[tcp_bhashfn(rover)];
147                         spin_lock(&head->lock);
148                         tb_for_each(tb, node, &head->chain)
149                                 if (tb->port == rover)
150                                         goto next;
151                         break;
152                 next:
153                         spin_unlock(&head->lock);
154                 } while (--remaining > 0);
155                 tcp_port_rover = rover;
156                 spin_unlock(&tcp_portalloc_lock);
157
158                 /* Exhausted local port range during search? */
159                 ret = 1;
160                 if (remaining <= 0)
161                         goto fail;
162
163                 /* OK, here is the one we will use. */
164                 snum = rover;
165         } else {
166                 head = &tcp_bhash[tcp_bhashfn(snum)];
167                 spin_lock(&head->lock);
168                 tb_for_each(tb, node, &head->chain)
169                         if (tb->port == snum)
170                                 goto tb_found;
171         }
172         tb = NULL;
173         goto tb_not_found;
174 tb_found:
175         if (tb && !hlist_empty(&tb->owners)) {
176                 if (tb->fastreuse > 0 && sk->sk_reuse &&
177                     sk->sk_state != TCP_LISTEN) {
178                         goto success;
179                 } else {
180                         ret = 1;
181                         if (tcp_v6_bind_conflict(sk, tb))
182                                 goto fail_unlock;
183                 }
184         }
185 tb_not_found:
186         ret = 1;
187         if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
188                 goto fail_unlock;
189         if (hlist_empty(&tb->owners)) {
190                 if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
191                         tb->fastreuse = 1;
192                 else
193                         tb->fastreuse = 0;
194         } else if (tb->fastreuse &&
195                    (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
196                 tb->fastreuse = 0;
197
198 success:
199         if (!tcp_sk(sk)->bind_hash)
200                 tcp_bind_hash(sk, tb, snum);
201         BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
202         ret = 0;
203
204 fail_unlock:
205         spin_unlock(&head->lock);
206 fail:
207         local_bh_enable();
208         return ret;
209 }
210
211 static __inline__ void __tcp_v6_hash(struct sock *sk)
212 {
213         struct hlist_head *list;
214         rwlock_t *lock;
215
216         BUG_TRAP(sk_unhashed(sk));
217
218         if (sk->sk_state == TCP_LISTEN) {
219                 list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
220                 lock = &tcp_lhash_lock;
221                 tcp_listen_wlock();
222         } else {
223                 sk->sk_hashent = tcp_v6_sk_hashfn(sk);
224                 list = &tcp_ehash[sk->sk_hashent].chain;
225                 lock = &tcp_ehash[sk->sk_hashent].lock;
226                 write_lock(lock);
227         }
228
229         __sk_add_node(sk, list);
230         sock_prot_inc_use(sk->sk_prot);
231         write_unlock(lock);
232 }
233
234
235 static void tcp_v6_hash(struct sock *sk)
236 {
237         if (sk->sk_state != TCP_CLOSE) {
238                 struct tcp_opt *tp = tcp_sk(sk);
239
240                 if (tp->af_specific == &ipv6_mapped) {
241                         tcp_prot.hash(sk);
242                         return;
243                 }
244                 local_bh_disable();
245                 __tcp_v6_hash(sk);
246                 local_bh_enable();
247         }
248 }
249
250 static struct sock *tcp_v6_lookup_listener(struct in6_addr *daddr, unsigned short hnum, int dif)
251 {
252         struct sock *sk;
253         struct hlist_node *node;
254         struct sock *result = NULL;
255         int score, hiscore;
256
257         hiscore=0;
258         read_lock(&tcp_lhash_lock);
259         sk_for_each(sk, node, &tcp_listening_hash[tcp_lhashfn(hnum)]) {
260                 if (inet_sk(sk)->num == hnum && sk->sk_family == PF_INET6) {
261                         struct ipv6_pinfo *np = inet6_sk(sk);
262                         
263                         score = 1;
264                         if (!ipv6_addr_any(&np->rcv_saddr)) {
265                                 if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
266                                         continue;
267                                 score++;
268                         }
269                         if (sk->sk_bound_dev_if) {
270                                 if (sk->sk_bound_dev_if != dif)
271                                         continue;
272                                 score++;
273                         }
274                         if (score == 3) {
275                                 result = sk;
276                                 break;
277                         }
278                         if (score > hiscore) {
279                                 hiscore = score;
280                                 result = sk;
281                         }
282                 }
283         }
284         if (result)
285                 sock_hold(result);
286         read_unlock(&tcp_lhash_lock);
287         return result;
288 }
289
290 /* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
291  * we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
292  *
293  * The sockhash lock must be held as a reader here.
294  */
295
296 static inline struct sock *__tcp_v6_lookup_established(struct in6_addr *saddr, u16 sport,
297                                                        struct in6_addr *daddr, u16 hnum,
298                                                        int dif)
299 {
300         struct tcp_ehash_bucket *head;
301         struct sock *sk;
302         struct hlist_node *node;
303         __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
304         int hash;
305
306         /* Optimize here for direct hit, only listening connections can
307          * have wildcards anyways.
308          */
309         hash = tcp_v6_hashfn(daddr, hnum, saddr, sport);
310         head = &tcp_ehash[hash];
311         read_lock(&head->lock);
312         sk_for_each(sk, node, &head->chain) {
313                 /* For IPV6 do the cheaper port and family tests first. */
314                 if(TCP_IPV6_MATCH(sk, saddr, daddr, ports, dif))
315                         goto hit; /* You sunk my battleship! */
316         }
317         /* Must check for a TIME_WAIT'er before going to listener hash. */
318         sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
319                 /* FIXME: acme: check this... */
320                 struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
321
322                 if(*((__u32 *)&(tw->tw_dport))  == ports        &&
323                    sk->sk_family                == PF_INET6) {
324                         if(ipv6_addr_equal(&tw->tw_v6_daddr, saddr)     &&
325                            ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) &&
326                            (!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
327                                 goto hit;
328                 }
329         }
330         read_unlock(&head->lock);
331         return NULL;
332
333 hit:
334         sock_hold(sk);
335         read_unlock(&head->lock);
336         return sk;
337 }
338
339
340 static inline struct sock *__tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
341                                            struct in6_addr *daddr, u16 hnum,
342                                            int dif)
343 {
344         struct sock *sk;
345
346         sk = __tcp_v6_lookup_established(saddr, sport, daddr, hnum, dif);
347
348         if (sk)
349                 return sk;
350
351         return tcp_v6_lookup_listener(daddr, hnum, dif);
352 }
353
354 inline struct sock *tcp_v6_lookup(struct in6_addr *saddr, u16 sport,
355                                   struct in6_addr *daddr, u16 dport,
356                                   int dif)
357 {
358         struct sock *sk;
359
360         local_bh_disable();
361         sk = __tcp_v6_lookup(saddr, sport, daddr, ntohs(dport), dif);
362         local_bh_enable();
363
364         return sk;
365 }
366
367 EXPORT_SYMBOL_GPL(tcp_v6_lookup);
368
369
370 /*
371  * Open request hash tables.
372  */
373
374 static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
375 {
376         u32 a, b, c;
377
378         a = raddr->s6_addr32[0];
379         b = raddr->s6_addr32[1];
380         c = raddr->s6_addr32[2];
381
382         a += JHASH_GOLDEN_RATIO;
383         b += JHASH_GOLDEN_RATIO;
384         c += rnd;
385         __jhash_mix(a, b, c);
386
387         a += raddr->s6_addr32[3];
388         b += (u32) rport;
389         __jhash_mix(a, b, c);
390
391         return c & (TCP_SYNQ_HSIZE - 1);
392 }
393
394 static struct open_request *tcp_v6_search_req(struct tcp_opt *tp,
395                                               struct open_request ***prevp,
396                                               __u16 rport,
397                                               struct in6_addr *raddr,
398                                               struct in6_addr *laddr,
399                                               int iif)
400 {
401         struct tcp_listen_opt *lopt = tp->listen_opt;
402         struct open_request *req, **prev;  
403
404         for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
405              (req = *prev) != NULL;
406              prev = &req->dl_next) {
407                 if (req->rmt_port == rport &&
408                     req->class->family == AF_INET6 &&
409                     ipv6_addr_equal(&req->af.v6_req.rmt_addr, raddr) &&
410                     ipv6_addr_equal(&req->af.v6_req.loc_addr, laddr) &&
411                     (!req->af.v6_req.iif || req->af.v6_req.iif == iif)) {
412                         BUG_TRAP(req->sk == NULL);
413                         *prevp = prev;
414                         return req;
415                 }
416         }
417
418         return NULL;
419 }
420
421 static __inline__ u16 tcp_v6_check(struct tcphdr *th, int len,
422                                    struct in6_addr *saddr, 
423                                    struct in6_addr *daddr, 
424                                    unsigned long base)
425 {
426         return csum_ipv6_magic(saddr, daddr, len, IPPROTO_TCP, base);
427 }
428
429 static __u32 tcp_v6_init_sequence(struct sock *sk, struct sk_buff *skb)
430 {
431         if (skb->protocol == htons(ETH_P_IPV6)) {
432                 return secure_tcpv6_sequence_number(skb->nh.ipv6h->daddr.s6_addr32,
433                                                     skb->nh.ipv6h->saddr.s6_addr32,
434                                                     skb->h.th->dest,
435                                                     skb->h.th->source);
436         } else {
437                 return secure_tcp_sequence_number(skb->nh.iph->daddr,
438                                                   skb->nh.iph->saddr,
439                                                   skb->h.th->dest,
440                                                   skb->h.th->source);
441         }
442 }
443
444 static int tcp_v6_check_established(struct sock *sk)
445 {
446         struct inet_opt *inet = inet_sk(sk);
447         struct ipv6_pinfo *np = inet6_sk(sk);
448         struct in6_addr *daddr = &np->rcv_saddr;
449         struct in6_addr *saddr = &np->daddr;
450         int dif = sk->sk_bound_dev_if;
451         u32 ports = TCP_COMBINED_PORTS(inet->dport, inet->num);
452         int hash = tcp_v6_hashfn(daddr, inet->num, saddr, inet->dport);
453         struct tcp_ehash_bucket *head = &tcp_ehash[hash];
454         struct sock *sk2;
455         struct hlist_node *node;
456         struct tcp_tw_bucket *tw;
457
458         write_lock_bh(&head->lock);
459
460         /* Check TIME-WAIT sockets first. */
461         sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
462                 tw = (struct tcp_tw_bucket*)sk2;
463
464                 if(*((__u32 *)&(tw->tw_dport))  == ports        &&
465                    sk2->sk_family               == PF_INET6     &&
466                    ipv6_addr_equal(&tw->tw_v6_daddr, saddr)     &&
467                    ipv6_addr_equal(&tw->tw_v6_rcv_saddr, daddr) &&
468                    sk2->sk_bound_dev_if == sk->sk_bound_dev_if) {
469                         struct tcp_opt *tp = tcp_sk(sk);
470
471                         if (tw->tw_ts_recent_stamp) {
472                                 /* See comment in tcp_ipv4.c */
473                                 tp->write_seq = tw->tw_snd_nxt + 65535 + 2;
474                                 if (!tp->write_seq)
475                                         tp->write_seq = 1;
476                                 tp->ts_recent = tw->tw_ts_recent;
477                                 tp->ts_recent_stamp = tw->tw_ts_recent_stamp;
478                                 sock_hold(sk2);
479                                 goto unique;
480                         } else
481                                 goto not_unique;
482                 }
483         }
484         tw = NULL;
485
486         /* And established part... */
487         sk_for_each(sk2, node, &head->chain) {
488                 if(TCP_IPV6_MATCH(sk2, saddr, daddr, ports, dif))
489                         goto not_unique;
490         }
491
492 unique:
493         BUG_TRAP(sk_unhashed(sk));
494         __sk_add_node(sk, &head->chain);
495         sk->sk_hashent = hash;
496         sock_prot_inc_use(sk->sk_prot);
497         write_unlock_bh(&head->lock);
498
499         if (tw) {
500                 /* Silly. Should hash-dance instead... */
501                 local_bh_disable();
502                 tcp_tw_deschedule(tw);
503                 NET_INC_STATS_BH(LINUX_MIB_TIMEWAITRECYCLED);
504                 local_bh_enable();
505
506                 tcp_tw_put(tw);
507         }
508         return 0;
509
510 not_unique:
511         write_unlock_bh(&head->lock);
512         return -EADDRNOTAVAIL;
513 }
514
515 static int tcp_v6_hash_connect(struct sock *sk)
516 {
517         struct tcp_bind_hashbucket *head;
518         struct tcp_bind_bucket *tb;
519
520         /* XXX */
521         if (inet_sk(sk)->num == 0) { 
522                 int err = tcp_v6_get_port(sk, inet_sk(sk)->num);
523                 if (err)
524                         return err;
525                 inet_sk(sk)->sport = htons(inet_sk(sk)->num);
526         }
527
528         head = &tcp_bhash[tcp_bhashfn(inet_sk(sk)->num)];
529         tb = tb_head(head);
530
531         spin_lock_bh(&head->lock);
532
533         if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
534                 __tcp_v6_hash(sk);
535                 spin_unlock_bh(&head->lock);
536                 return 0;
537         } else {
538                 spin_unlock_bh(&head->lock);
539                 return tcp_v6_check_established(sk);
540         }
541 }
542
543 static __inline__ int tcp_v6_iif(struct sk_buff *skb)
544 {
545         return IP6CB(skb)->iif;
546 }
547
548 static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, 
549                           int addr_len)
550 {
551         struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
552         struct inet_opt *inet = inet_sk(sk);
553         struct ipv6_pinfo *np = inet6_sk(sk);
554         struct tcp_opt *tp = tcp_sk(sk);
555         struct in6_addr *saddr = NULL, *final_p = NULL, final;
556         struct flowi fl;
557         struct dst_entry *dst;
558         int addr_type;
559         int err;
560
561         if (addr_len < SIN6_LEN_RFC2133) 
562                 return -EINVAL;
563
564         if (usin->sin6_family != AF_INET6) 
565                 return(-EAFNOSUPPORT);
566
567         memset(&fl, 0, sizeof(fl));
568
569         if (np->sndflow) {
570                 fl.fl6_flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
571                 IP6_ECN_flow_init(fl.fl6_flowlabel);
572                 if (fl.fl6_flowlabel&IPV6_FLOWLABEL_MASK) {
573                         struct ip6_flowlabel *flowlabel;
574                         flowlabel = fl6_sock_lookup(sk, fl.fl6_flowlabel);
575                         if (flowlabel == NULL)
576                                 return -EINVAL;
577                         ipv6_addr_copy(&usin->sin6_addr, &flowlabel->dst);
578                         fl6_sock_release(flowlabel);
579                 }
580         }
581
582         /*
583          *      connect() to INADDR_ANY means loopback (BSD'ism).
584          */
585         
586         if(ipv6_addr_any(&usin->sin6_addr))
587                 usin->sin6_addr.s6_addr[15] = 0x1; 
588
589         addr_type = ipv6_addr_type(&usin->sin6_addr);
590
591         if(addr_type & IPV6_ADDR_MULTICAST)
592                 return -ENETUNREACH;
593
594         if (addr_type&IPV6_ADDR_LINKLOCAL) {
595                 if (addr_len >= sizeof(struct sockaddr_in6) &&
596                     usin->sin6_scope_id) {
597                         /* If interface is set while binding, indices
598                          * must coincide.
599                          */
600                         if (sk->sk_bound_dev_if &&
601                             sk->sk_bound_dev_if != usin->sin6_scope_id)
602                                 return -EINVAL;
603
604                         sk->sk_bound_dev_if = usin->sin6_scope_id;
605                 }
606
607                 /* Connect to link-local address requires an interface */
608                 if (!sk->sk_bound_dev_if)
609                         return -EINVAL;
610         }
611
612         if (tp->ts_recent_stamp &&
613             !ipv6_addr_equal(&np->daddr, &usin->sin6_addr)) {
614                 tp->ts_recent = 0;
615                 tp->ts_recent_stamp = 0;
616                 tp->write_seq = 0;
617         }
618
619         ipv6_addr_copy(&np->daddr, &usin->sin6_addr);
620         np->flow_label = fl.fl6_flowlabel;
621
622         /*
623          *      TCP over IPv4
624          */
625
626         if (addr_type == IPV6_ADDR_MAPPED) {
627                 u32 exthdrlen = tp->ext_header_len;
628                 struct sockaddr_in sin;
629
630                 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
631
632                 if (__ipv6_only_sock(sk))
633                         return -ENETUNREACH;
634
635                 sin.sin_family = AF_INET;
636                 sin.sin_port = usin->sin6_port;
637                 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
638
639                 tp->af_specific = &ipv6_mapped;
640                 sk->sk_backlog_rcv = tcp_v4_do_rcv;
641
642                 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
643
644                 if (err) {
645                         tp->ext_header_len = exthdrlen;
646                         tp->af_specific = &ipv6_specific;
647                         sk->sk_backlog_rcv = tcp_v6_do_rcv;
648                         goto failure;
649                 } else {
650                         ipv6_addr_set(&np->saddr, 0, 0, htonl(0x0000FFFF),
651                                       inet->saddr);
652                         ipv6_addr_set(&np->rcv_saddr, 0, 0, htonl(0x0000FFFF),
653                                       inet->rcv_saddr);
654                 }
655
656                 return err;
657         }
658
659         if (!ipv6_addr_any(&np->rcv_saddr))
660                 saddr = &np->rcv_saddr;
661
662         fl.proto = IPPROTO_TCP;
663         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
664         ipv6_addr_copy(&fl.fl6_src,
665                        (saddr ? saddr : &np->saddr));
666         fl.oif = sk->sk_bound_dev_if;
667         fl.fl_ip_dport = usin->sin6_port;
668         fl.fl_ip_sport = inet->sport;
669
670         if (np->opt && np->opt->srcrt) {
671                 struct rt0_hdr *rt0 = (struct rt0_hdr *)np->opt->srcrt;
672                 ipv6_addr_copy(&final, &fl.fl6_dst);
673                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
674                 final_p = &final;
675         }
676
677         err = ip6_dst_lookup(sk, &dst, &fl);
678         if (err)
679                 goto failure;
680         if (final_p)
681                 ipv6_addr_copy(&fl.fl6_dst, final_p);
682
683         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
684                 dst_release(dst);
685                 goto failure;
686         }
687
688         if (saddr == NULL) {
689                 saddr = &fl.fl6_src;
690                 ipv6_addr_copy(&np->rcv_saddr, saddr);
691         }
692
693         /* set the source address */
694         ipv6_addr_copy(&np->saddr, saddr);
695         inet->rcv_saddr = LOOPBACK4_IPV6;
696
697         ip6_dst_store(sk, dst, NULL);
698         sk->sk_route_caps = dst->dev->features &
699                 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
700
701         tp->ext_header_len = 0;
702         if (np->opt)
703                 tp->ext_header_len = np->opt->opt_flen + np->opt->opt_nflen;
704         tp->ext2_header_len = dst->header_len;
705
706         tp->mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
707
708         inet->dport = usin->sin6_port;
709
710         tcp_set_state(sk, TCP_SYN_SENT);
711         err = tcp_v6_hash_connect(sk);
712         if (err)
713                 goto late_failure;
714
715         if (!tp->write_seq)
716                 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
717                                                              np->daddr.s6_addr32,
718                                                              inet->sport,
719                                                              inet->dport);
720
721         err = tcp_connect(sk);
722         if (err)
723                 goto late_failure;
724
725         return 0;
726
727 late_failure:
728         tcp_set_state(sk, TCP_CLOSE);
729         __sk_dst_reset(sk);
730 failure:
731         inet->dport = 0;
732         sk->sk_route_caps = 0;
733         return err;
734 }
735
736 static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
737                 int type, int code, int offset, __u32 info)
738 {
739         struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data;
740         struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
741         struct ipv6_pinfo *np;
742         struct sock *sk;
743         int err;
744         struct tcp_opt *tp; 
745         __u32 seq;
746
747         sk = tcp_v6_lookup(&hdr->daddr, th->dest, &hdr->saddr, th->source, skb->dev->ifindex);
748
749         if (sk == NULL) {
750                 ICMP6_INC_STATS_BH(__in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
751                 return;
752         }
753
754         if (sk->sk_state == TCP_TIME_WAIT) {
755                 tcp_tw_put((struct tcp_tw_bucket*)sk);
756                 return;
757         }
758
759         bh_lock_sock(sk);
760         if (sock_owned_by_user(sk))
761                 NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS);
762
763         if (sk->sk_state == TCP_CLOSE)
764                 goto out;
765
766         tp = tcp_sk(sk);
767         seq = ntohl(th->seq); 
768         if (sk->sk_state != TCP_LISTEN &&
769             !between(seq, tp->snd_una, tp->snd_nxt)) {
770                 NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
771                 goto out;
772         }
773
774         np = inet6_sk(sk);
775
776         if (type == ICMPV6_PKT_TOOBIG) {
777                 struct dst_entry *dst = NULL;
778
779                 if (sock_owned_by_user(sk))
780                         goto out;
781                 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
782                         goto out;
783
784                 /* icmp should have updated the destination cache entry */
785                 dst = __sk_dst_check(sk, np->dst_cookie);
786
787                 if (dst == NULL) {
788                         struct inet_opt *inet = inet_sk(sk);
789                         struct flowi fl;
790
791                         /* BUGGG_FUTURE: Again, it is not clear how
792                            to handle rthdr case. Ignore this complexity
793                            for now.
794                          */
795                         memset(&fl, 0, sizeof(fl));
796                         fl.proto = IPPROTO_TCP;
797                         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
798                         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
799                         fl.oif = sk->sk_bound_dev_if;
800                         fl.fl_ip_dport = inet->dport;
801                         fl.fl_ip_sport = inet->sport;
802
803                         if ((err = ip6_dst_lookup(sk, &dst, &fl))) {
804                                 sk->sk_err_soft = -err;
805                                 goto out;
806                         }
807
808                         if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
809                                 sk->sk_err_soft = -err;
810                                 goto out;
811                         }
812
813                 } else
814                         dst_hold(dst);
815
816                 if (tp->pmtu_cookie > dst_pmtu(dst)) {
817                         tcp_sync_mss(sk, dst_pmtu(dst));
818                         tcp_simple_retransmit(sk);
819                 } /* else let the usual retransmit timer handle it */
820                 dst_release(dst);
821                 goto out;
822         }
823
824         icmpv6_err_convert(type, code, &err);
825
826         /* Might be for an open_request */
827         switch (sk->sk_state) {
828                 struct open_request *req, **prev;
829         case TCP_LISTEN:
830                 if (sock_owned_by_user(sk))
831                         goto out;
832
833                 req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr,
834                                         &hdr->saddr, tcp_v6_iif(skb));
835                 if (!req)
836                         goto out;
837
838                 /* ICMPs are not backlogged, hence we cannot get
839                  * an established socket here.
840                  */
841                 BUG_TRAP(req->sk == NULL);
842
843                 if (seq != req->snt_isn) {
844                         NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
845                         goto out;
846                 }
847
848                 tcp_synq_drop(sk, req, prev);
849                 goto out;
850
851         case TCP_SYN_SENT:
852         case TCP_SYN_RECV:  /* Cannot happen.
853                                It can, it SYNs are crossed. --ANK */ 
854                 if (!sock_owned_by_user(sk)) {
855                         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
856                         sk->sk_err = err;
857                         sk->sk_error_report(sk);                /* Wake people up to see the error (see connect in sock.c) */
858
859                         tcp_done(sk);
860                 } else
861                         sk->sk_err_soft = err;
862                 goto out;
863         }
864
865         if (!sock_owned_by_user(sk) && np->recverr) {
866                 sk->sk_err = err;
867                 sk->sk_error_report(sk);
868         } else
869                 sk->sk_err_soft = err;
870
871 out:
872         bh_unlock_sock(sk);
873         sock_put(sk);
874 }
875
876
877 static int tcp_v6_send_synack(struct sock *sk, struct open_request *req,
878                               struct dst_entry *dst)
879 {
880         struct ipv6_pinfo *np = inet6_sk(sk);
881         struct sk_buff * skb;
882         struct ipv6_txoptions *opt = NULL;
883         struct in6_addr * final_p = NULL, final;
884         struct flowi fl;
885         int err = -1;
886
887         memset(&fl, 0, sizeof(fl));
888         fl.proto = IPPROTO_TCP;
889         ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr);
890         ipv6_addr_copy(&fl.fl6_src, &req->af.v6_req.loc_addr);
891         fl.fl6_flowlabel = 0;
892         fl.oif = req->af.v6_req.iif;
893         fl.fl_ip_dport = req->rmt_port;
894         fl.fl_ip_sport = inet_sk(sk)->sport;
895
896         if (dst == NULL) {
897                 opt = np->opt;
898                 if (opt == NULL &&
899                     np->rxopt.bits.srcrt == 2 &&
900                     req->af.v6_req.pktopts) {
901                         struct sk_buff *pktopts = req->af.v6_req.pktopts;
902                         struct inet6_skb_parm *rxopt = IP6CB(pktopts);
903                         if (rxopt->srcrt)
904                                 opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt));
905                 }
906
907                 if (opt && opt->srcrt) {
908                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
909                         ipv6_addr_copy(&final, &fl.fl6_dst);
910                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
911                         final_p = &final;
912                 }
913
914                 err = ip6_dst_lookup(sk, &dst, &fl);
915                 if (err)
916                         goto done;
917                 if (final_p)
918                         ipv6_addr_copy(&fl.fl6_dst, final_p);
919                 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0)
920                         goto done;
921         }
922
923         skb = tcp_make_synack(sk, dst, req);
924         if (skb) {
925                 struct tcphdr *th = skb->h.th;
926
927                 th->check = tcp_v6_check(th, skb->len,
928                                          &req->af.v6_req.loc_addr, &req->af.v6_req.rmt_addr,
929                                          csum_partial((char *)th, skb->len, skb->csum));
930
931                 ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr);
932                 err = ip6_xmit(sk, skb, &fl, opt, 0);
933                 if (err == NET_XMIT_CN)
934                         err = 0;
935         }
936
937 done:
938         dst_release(dst);
939         if (opt && opt != np->opt)
940                 sock_kfree_s(sk, opt, opt->tot_len);
941         return err;
942 }
943
944 static void tcp_v6_or_free(struct open_request *req)
945 {
946         if (req->af.v6_req.pktopts)
947                 kfree_skb(req->af.v6_req.pktopts);
948 }
949
950 static struct or_calltable or_ipv6 = {
951         .family         =       AF_INET6,
952         .rtx_syn_ack    =       tcp_v6_send_synack,
953         .send_ack       =       tcp_v6_or_send_ack,
954         .destructor     =       tcp_v6_or_free,
955         .send_reset     =       tcp_v6_send_reset
956 };
957
958 static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
959 {
960         struct ipv6_pinfo *np = inet6_sk(sk);
961         struct inet6_skb_parm *opt = IP6CB(skb);
962
963         if (np->rxopt.all) {
964                 if ((opt->hop && np->rxopt.bits.hopopts) ||
965                     ((IPV6_FLOWINFO_MASK&*(u32*)skb->nh.raw) &&
966                      np->rxopt.bits.rxflow) ||
967                     (opt->srcrt && np->rxopt.bits.srcrt) ||
968                     ((opt->dst1 || opt->dst0) && np->rxopt.bits.dstopts))
969                         return 1;
970         }
971         return 0;
972 }
973
974
975 static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len, 
976                               struct sk_buff *skb)
977 {
978         struct ipv6_pinfo *np = inet6_sk(sk);
979
980         if (skb->ip_summed == CHECKSUM_HW) {
981                 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP,  0);
982                 skb->csum = offsetof(struct tcphdr, check);
983         } else {
984                 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 
985                                             csum_partial((char *)th, th->doff<<2, 
986                                                          skb->csum));
987         }
988 }
989
990
991 static void tcp_v6_send_reset(struct sk_buff *skb)
992 {
993         struct tcphdr *th = skb->h.th, *t1; 
994         struct sk_buff *buff;
995         struct flowi fl;
996
997         if (th->rst)
998                 return;
999
1000         if (!ipv6_unicast_destination(skb))
1001                 return; 
1002
1003         /*
1004          * We need to grab some memory, and put together an RST,
1005          * and then put it into the queue to be sent.
1006          */
1007
1008         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr),
1009                          GFP_ATOMIC);
1010         if (buff == NULL) 
1011                 return;
1012
1013         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + sizeof(struct tcphdr));
1014
1015         t1 = (struct tcphdr *) skb_push(buff,sizeof(struct tcphdr));
1016
1017         /* Swap the send and the receive. */
1018         memset(t1, 0, sizeof(*t1));
1019         t1->dest = th->source;
1020         t1->source = th->dest;
1021         t1->doff = sizeof(*t1)/4;
1022         t1->rst = 1;
1023   
1024         if(th->ack) {
1025                 t1->seq = th->ack_seq;
1026         } else {
1027                 t1->ack = 1;
1028                 t1->ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin
1029                                     + skb->len - (th->doff<<2));
1030         }
1031
1032         buff->csum = csum_partial((char *)t1, sizeof(*t1), 0);
1033
1034         memset(&fl, 0, sizeof(fl));
1035         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1036         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1037
1038         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1039                                     sizeof(*t1), IPPROTO_TCP,
1040                                     buff->csum);
1041
1042         fl.proto = IPPROTO_TCP;
1043         fl.oif = tcp_v6_iif(skb);
1044         fl.fl_ip_dport = t1->dest;
1045         fl.fl_ip_sport = t1->source;
1046
1047         /* sk = NULL, but it is safe for now. RST socket required. */
1048         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1049
1050                 if ((xfrm_lookup(&buff->dst, &fl, NULL, 0)) < 0) {
1051                         dst_release(buff->dst);
1052                         return;
1053                 }
1054
1055                 ip6_xmit(NULL, buff, &fl, NULL, 0);
1056                 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1057                 TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
1058                 return;
1059         }
1060
1061         kfree_skb(buff);
1062 }
1063
1064 static void tcp_v6_send_ack(struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 ts)
1065 {
1066         struct tcphdr *th = skb->h.th, *t1;
1067         struct sk_buff *buff;
1068         struct flowi fl;
1069         int tot_len = sizeof(struct tcphdr);
1070
1071         if (ts)
1072                 tot_len += 3*4;
1073
1074         buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
1075                          GFP_ATOMIC);
1076         if (buff == NULL)
1077                 return;
1078
1079         skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
1080
1081         t1 = (struct tcphdr *) skb_push(buff,tot_len);
1082
1083         /* Swap the send and the receive. */
1084         memset(t1, 0, sizeof(*t1));
1085         t1->dest = th->source;
1086         t1->source = th->dest;
1087         t1->doff = tot_len/4;
1088         t1->seq = htonl(seq);
1089         t1->ack_seq = htonl(ack);
1090         t1->ack = 1;
1091         t1->window = htons(win);
1092         
1093         if (ts) {
1094                 u32 *ptr = (u32*)(t1 + 1);
1095                 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1096                                (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1097                 *ptr++ = htonl(tcp_time_stamp);
1098                 *ptr = htonl(ts);
1099         }
1100
1101         buff->csum = csum_partial((char *)t1, tot_len, 0);
1102
1103         memset(&fl, 0, sizeof(fl));
1104         ipv6_addr_copy(&fl.fl6_dst, &skb->nh.ipv6h->saddr);
1105         ipv6_addr_copy(&fl.fl6_src, &skb->nh.ipv6h->daddr);
1106
1107         t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst,
1108                                     tot_len, IPPROTO_TCP,
1109                                     buff->csum);
1110
1111         fl.proto = IPPROTO_TCP;
1112         fl.oif = tcp_v6_iif(skb);
1113         fl.fl_ip_dport = t1->dest;
1114         fl.fl_ip_sport = t1->source;
1115
1116         if (!ip6_dst_lookup(NULL, &buff->dst, &fl)) {
1117                 if ((xfrm_lookup(&buff->dst, &fl, NULL, 0)) < 0) {
1118                         dst_release(buff->dst);
1119                         return;
1120                 }
1121                 ip6_xmit(NULL, buff, &fl, NULL, 0);
1122                 TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
1123                 return;
1124         }
1125
1126         kfree_skb(buff);
1127 }
1128
1129 static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1130 {
1131         struct tcp_tw_bucket *tw = (struct tcp_tw_bucket *)sk;
1132
1133         tcp_v6_send_ack(skb, tw->tw_snd_nxt, tw->tw_rcv_nxt,
1134                         tw->tw_rcv_wnd >> tw->tw_rcv_wscale, tw->tw_ts_recent);
1135
1136         tcp_tw_put(tw);
1137 }
1138
1139 static void tcp_v6_or_send_ack(struct sk_buff *skb, struct open_request *req)
1140 {
1141         tcp_v6_send_ack(skb, req->snt_isn+1, req->rcv_isn+1, req->rcv_wnd, req->ts_recent);
1142 }
1143
1144
1145 static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
1146 {
1147         struct open_request *req, **prev;
1148         struct tcphdr *th = skb->h.th;
1149         struct tcp_opt *tp = tcp_sk(sk);
1150         struct sock *nsk;
1151
1152         /* Find possible connection requests. */
1153         req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr,
1154                                 &skb->nh.ipv6h->daddr, tcp_v6_iif(skb));
1155         if (req)
1156                 return tcp_check_req(sk, skb, req, prev);
1157
1158         nsk = __tcp_v6_lookup_established(&skb->nh.ipv6h->saddr,
1159                                           th->source,
1160                                           &skb->nh.ipv6h->daddr,
1161                                           ntohs(th->dest),
1162                                           tcp_v6_iif(skb));
1163
1164         if (nsk) {
1165                 if (nsk->sk_state != TCP_TIME_WAIT) {
1166                         bh_lock_sock(nsk);
1167                         return nsk;
1168                 }
1169                 tcp_tw_put((struct tcp_tw_bucket*)nsk);
1170                 return NULL;
1171         }
1172
1173 #if 0 /*def CONFIG_SYN_COOKIES*/
1174         if (!th->rst && !th->syn && th->ack)
1175                 sk = cookie_v6_check(sk, skb, &(IPCB(skb)->opt));
1176 #endif
1177         return sk;
1178 }
1179
1180 static void tcp_v6_synq_add(struct sock *sk, struct open_request *req)
1181 {
1182         struct tcp_opt *tp = tcp_sk(sk);
1183         struct tcp_listen_opt *lopt = tp->listen_opt;
1184         u32 h = tcp_v6_synq_hash(&req->af.v6_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
1185
1186         req->sk = NULL;
1187         req->expires = jiffies + TCP_TIMEOUT_INIT;
1188         req->retrans = 0;
1189         req->dl_next = lopt->syn_table[h];
1190
1191         write_lock(&tp->syn_wait_lock);
1192         lopt->syn_table[h] = req;
1193         write_unlock(&tp->syn_wait_lock);
1194
1195         tcp_synq_added(sk);
1196 }
1197
1198
1199 /* FIXME: this is substantially similar to the ipv4 code.
1200  * Can some kind of merge be done? -- erics
1201  */
1202 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1203 {
1204         struct ipv6_pinfo *np = inet6_sk(sk);
1205         struct tcp_opt tmptp, *tp = tcp_sk(sk);
1206         struct open_request *req = NULL;
1207         __u32 isn = TCP_SKB_CB(skb)->when;
1208
1209         if (skb->protocol == htons(ETH_P_IP))
1210                 return tcp_v4_conn_request(sk, skb);
1211
1212         if (!ipv6_unicast_destination(skb))
1213                 goto drop; 
1214
1215         /*
1216          *      There are no SYN attacks on IPv6, yet...        
1217          */
1218         if (tcp_synq_is_full(sk) && !isn) {
1219                 if (net_ratelimit())
1220                         printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
1221                 goto drop;              
1222         }
1223
1224         if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
1225                 goto drop;
1226
1227         req = tcp_openreq_alloc();
1228         if (req == NULL)
1229                 goto drop;
1230
1231         tcp_clear_options(&tmptp);
1232         tmptp.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
1233         tmptp.user_mss = tp->user_mss;
1234
1235         tcp_parse_options(skb, &tmptp, 0);
1236
1237         tmptp.tstamp_ok = tmptp.saw_tstamp;
1238         tcp_openreq_init(req, &tmptp, skb);
1239
1240         req->class = &or_ipv6;
1241         ipv6_addr_copy(&req->af.v6_req.rmt_addr, &skb->nh.ipv6h->saddr);
1242         ipv6_addr_copy(&req->af.v6_req.loc_addr, &skb->nh.ipv6h->daddr);
1243         TCP_ECN_create_request(req, skb->h.th);
1244         req->af.v6_req.pktopts = NULL;
1245         if (ipv6_opt_accepted(sk, skb) ||
1246             np->rxopt.bits.rxinfo ||
1247             np->rxopt.bits.rxhlim) {
1248                 atomic_inc(&skb->users);
1249                 req->af.v6_req.pktopts = skb;
1250         }
1251         req->af.v6_req.iif = sk->sk_bound_dev_if;
1252
1253         /* So that link locals have meaning */
1254         if (!sk->sk_bound_dev_if &&
1255             ipv6_addr_type(&req->af.v6_req.rmt_addr) & IPV6_ADDR_LINKLOCAL)
1256                 req->af.v6_req.iif = tcp_v6_iif(skb);
1257
1258         if (isn == 0) 
1259                 isn = tcp_v6_init_sequence(sk,skb);
1260
1261         req->snt_isn = isn;
1262
1263         if (tcp_v6_send_synack(sk, req, NULL))
1264                 goto drop;
1265
1266         tcp_v6_synq_add(sk, req);
1267
1268         return 0;
1269
1270 drop:
1271         if (req)
1272                 tcp_openreq_free(req);
1273
1274         TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
1275         return 0; /* don't send reset */
1276 }
1277
1278 static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1279                                           struct open_request *req,
1280                                           struct dst_entry *dst)
1281 {
1282         struct ipv6_pinfo *newnp, *np = inet6_sk(sk);
1283         struct tcp6_sock *newtcp6sk;
1284         struct inet_opt *newinet;
1285         struct tcp_opt *newtp;
1286         struct sock *newsk;
1287         struct ipv6_txoptions *opt;
1288
1289         if (skb->protocol == htons(ETH_P_IP)) {
1290                 /*
1291                  *      v6 mapped
1292                  */
1293
1294                 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst);
1295
1296                 if (newsk == NULL) 
1297                         return NULL;
1298
1299                 newtcp6sk = (struct tcp6_sock *)newsk;
1300                 newtcp6sk->pinet6 = &newtcp6sk->inet6;
1301
1302                 newinet = inet_sk(newsk);
1303                 newnp = inet6_sk(newsk);
1304                 newtp = tcp_sk(newsk);
1305
1306                 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1307
1308                 ipv6_addr_set(&newnp->daddr, 0, 0, htonl(0x0000FFFF),
1309                               newinet->daddr);
1310
1311                 ipv6_addr_set(&newnp->saddr, 0, 0, htonl(0x0000FFFF),
1312                               newinet->saddr);
1313
1314                 ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
1315
1316                 newtp->af_specific = &ipv6_mapped;
1317                 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
1318                 newnp->pktoptions  = NULL;
1319                 newnp->opt         = NULL;
1320                 newnp->mcast_oif   = tcp_v6_iif(skb);
1321                 newnp->mcast_hops  = skb->nh.ipv6h->hop_limit;
1322
1323                 /* Charge newly allocated IPv6 socket. Though it is mapped,
1324                  * it is IPv6 yet.
1325                  */
1326 #ifdef INET_REFCNT_DEBUG
1327                 atomic_inc(&inet6_sock_nr);
1328 #endif
1329
1330                 /* It is tricky place. Until this moment IPv4 tcp
1331                    worked with IPv6 af_tcp.af_specific.
1332                    Sync it now.
1333                  */
1334                 tcp_sync_mss(newsk, newtp->pmtu_cookie);
1335
1336                 return newsk;
1337         }
1338
1339         opt = np->opt;
1340
1341         if (sk_acceptq_is_full(sk))
1342                 goto out_overflow;
1343
1344         if (np->rxopt.bits.srcrt == 2 &&
1345             opt == NULL && req->af.v6_req.pktopts) {
1346                 struct inet6_skb_parm *rxopt = IP6CB(req->af.v6_req.pktopts);
1347                 if (rxopt->srcrt)
1348                         opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(req->af.v6_req.pktopts->nh.raw+rxopt->srcrt));
1349         }
1350
1351         if (dst == NULL) {
1352                 struct in6_addr *final_p = NULL, final;
1353                 struct flowi fl;
1354
1355                 memset(&fl, 0, sizeof(fl));
1356                 fl.proto = IPPROTO_TCP;
1357                 ipv6_addr_copy(&fl.fl6_dst, &req->af.v6_req.rmt_addr);
1358                 if (opt && opt->srcrt) {
1359                         struct rt0_hdr *rt0 = (struct rt0_hdr *) opt->srcrt;
1360                         ipv6_addr_copy(&final, &fl.fl6_dst);
1361                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1362                         final_p = &final;
1363                 }
1364                 ipv6_addr_copy(&fl.fl6_src, &req->af.v6_req.loc_addr);
1365                 fl.oif = sk->sk_bound_dev_if;
1366                 fl.fl_ip_dport = req->rmt_port;
1367                 fl.fl_ip_sport = inet_sk(sk)->sport;
1368
1369                 if (ip6_dst_lookup(sk, &dst, &fl))
1370                         goto out;
1371
1372                 if (final_p)
1373                         ipv6_addr_copy(&fl.fl6_dst, final_p);
1374
1375                 if ((xfrm_lookup(&dst, &fl, sk, 0)) < 0)
1376                         goto out;
1377         } 
1378
1379         newsk = tcp_create_openreq_child(sk, req, skb);
1380         if (newsk == NULL)
1381                 goto out;
1382
1383         /* Charge newly allocated IPv6 socket */
1384 #ifdef INET_REFCNT_DEBUG
1385         atomic_inc(&inet6_sock_nr);
1386 #endif
1387
1388         ip6_dst_store(newsk, dst, NULL);
1389         newsk->sk_route_caps = dst->dev->features &
1390                 ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1391
1392         newtcp6sk = (struct tcp6_sock *)newsk;
1393         newtcp6sk->pinet6 = &newtcp6sk->inet6;
1394
1395         newtp = tcp_sk(newsk);
1396         newinet = inet_sk(newsk);
1397         newnp = inet6_sk(newsk);
1398
1399         memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1400
1401         ipv6_addr_copy(&newnp->daddr, &req->af.v6_req.rmt_addr);
1402         ipv6_addr_copy(&newnp->saddr, &req->af.v6_req.loc_addr);
1403         ipv6_addr_copy(&newnp->rcv_saddr, &req->af.v6_req.loc_addr);
1404         newsk->sk_bound_dev_if = req->af.v6_req.iif;
1405
1406         /* Now IPv6 options... 
1407
1408            First: no IPv4 options.
1409          */
1410         newinet->opt = NULL;
1411
1412         /* Clone RX bits */
1413         newnp->rxopt.all = np->rxopt.all;
1414
1415         /* Clone pktoptions received with SYN */
1416         newnp->pktoptions = NULL;
1417         if (req->af.v6_req.pktopts) {
1418                 newnp->pktoptions = skb_clone(req->af.v6_req.pktopts,
1419                                               GFP_ATOMIC);
1420                 kfree_skb(req->af.v6_req.pktopts);
1421                 req->af.v6_req.pktopts = NULL;
1422                 if (newnp->pktoptions)
1423                         skb_set_owner_r(newnp->pktoptions, newsk);
1424         }
1425         newnp->opt        = NULL;
1426         newnp->mcast_oif  = tcp_v6_iif(skb);
1427         newnp->mcast_hops = skb->nh.ipv6h->hop_limit;
1428
1429         /* Clone native IPv6 options from listening socket (if any)
1430
1431            Yes, keeping reference count would be much more clever,
1432            but we make one more one thing there: reattach optmem
1433            to newsk.
1434          */
1435         if (opt) {
1436                 newnp->opt = ipv6_dup_options(newsk, opt);
1437                 if (opt != np->opt)
1438                         sock_kfree_s(sk, opt, opt->tot_len);
1439         }
1440
1441         newtp->ext_header_len = 0;
1442         if (newnp->opt)
1443                 newtp->ext_header_len = newnp->opt->opt_nflen +
1444                                         newnp->opt->opt_flen;
1445         newtp->ext2_header_len = dst->header_len;
1446
1447         tcp_sync_mss(newsk, dst_pmtu(dst));
1448         newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
1449         tcp_initialize_rcv_mss(newsk);
1450
1451         newinet->daddr = newinet->saddr = newinet->rcv_saddr = LOOPBACK4_IPV6;
1452
1453         __tcp_v6_hash(newsk);
1454         tcp_inherit_port(sk, newsk);
1455
1456         return newsk;
1457
1458 out_overflow:
1459         NET_INC_STATS_BH(LINUX_MIB_LISTENOVERFLOWS);
1460 out:
1461         NET_INC_STATS_BH(LINUX_MIB_LISTENDROPS);
1462         if (opt && opt != np->opt)
1463                 sock_kfree_s(sk, opt, opt->tot_len);
1464         dst_release(dst);
1465         return NULL;
1466 }
1467
1468 static int tcp_v6_checksum_init(struct sk_buff *skb)
1469 {
1470         if (skb->ip_summed == CHECKSUM_HW) {
1471                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1472                 if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1473                                   &skb->nh.ipv6h->daddr,skb->csum))
1474                         return 0;
1475                 LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v6 csum failed\n"));
1476         }
1477         if (skb->len <= 76) {
1478                 if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1479                                  &skb->nh.ipv6h->daddr,skb_checksum(skb, 0, skb->len, 0)))
1480                         return -1;
1481                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1482         } else {
1483                 skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
1484                                           &skb->nh.ipv6h->daddr,0);
1485         }
1486         return 0;
1487 }
1488
1489 /* The socket must have it's spinlock held when we get
1490  * here.
1491  *
1492  * We have a potential double-lock case here, so even when
1493  * doing backlog processing we use the BH locking scheme.
1494  * This is because we cannot sleep with the original spinlock
1495  * held.
1496  */
1497 static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1498 {
1499         struct ipv6_pinfo *np = inet6_sk(sk);
1500         struct tcp_opt *tp;
1501         struct sk_buff *opt_skb = NULL;
1502
1503         /* Imagine: socket is IPv6. IPv4 packet arrives,
1504            goes to IPv4 receive handler and backlogged.
1505            From backlog it always goes here. Kerboom...
1506            Fortunately, tcp_rcv_established and rcv_established
1507            handle them correctly, but it is not case with
1508            tcp_v6_hnd_req and tcp_v6_send_reset().   --ANK
1509          */
1510
1511         if (skb->protocol == htons(ETH_P_IP))
1512                 return tcp_v4_do_rcv(sk, skb);
1513
1514         if (sk_filter(sk, skb, 0))
1515                 goto discard;
1516
1517         /*
1518          *      socket locking is here for SMP purposes as backlog rcv
1519          *      is currently called with bh processing disabled.
1520          */
1521
1522         /* Do Stevens' IPV6_PKTOPTIONS.
1523
1524            Yes, guys, it is the only place in our code, where we
1525            may make it not affecting IPv4.
1526            The rest of code is protocol independent,
1527            and I do not like idea to uglify IPv4.
1528
1529            Actually, all the idea behind IPV6_PKTOPTIONS
1530            looks not very well thought. For now we latch
1531            options, received in the last packet, enqueued
1532            by tcp. Feel free to propose better solution.
1533                                                --ANK (980728)
1534          */
1535         if (np->rxopt.all)
1536                 opt_skb = skb_clone(skb, GFP_ATOMIC);
1537
1538         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1539                 TCP_CHECK_TIMER(sk);
1540                 if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
1541                         goto reset;
1542                 TCP_CHECK_TIMER(sk);
1543                 if (opt_skb)
1544                         goto ipv6_pktoptions;
1545                 return 0;
1546         }
1547
1548         if (skb->len < (skb->h.th->doff<<2) || tcp_checksum_complete(skb))
1549                 goto csum_err;
1550
1551         if (sk->sk_state == TCP_LISTEN) { 
1552                 struct sock *nsk = tcp_v6_hnd_req(sk, skb);
1553                 if (!nsk)
1554                         goto discard;
1555
1556                 /*
1557                  * Queue it on the new socket if the new socket is active,
1558                  * otherwise we just shortcircuit this and continue with
1559                  * the new socket..
1560                  */
1561                 if(nsk != sk) {
1562                         if (tcp_child_process(sk, nsk, skb))
1563                                 goto reset;
1564                         if (opt_skb)
1565                                 __kfree_skb(opt_skb);
1566                         return 0;
1567                 }
1568         }
1569
1570         TCP_CHECK_TIMER(sk);
1571         if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
1572                 goto reset;
1573         TCP_CHECK_TIMER(sk);
1574         if (opt_skb)
1575                 goto ipv6_pktoptions;
1576         return 0;
1577
1578 reset:
1579         tcp_v6_send_reset(skb);
1580 discard:
1581         if (opt_skb)
1582                 __kfree_skb(opt_skb);
1583         kfree_skb(skb);
1584         return 0;
1585 csum_err:
1586         TCP_INC_STATS_BH(TCP_MIB_INERRS);
1587         goto discard;
1588
1589
1590 ipv6_pktoptions:
1591         /* Do you ask, what is it?
1592
1593            1. skb was enqueued by tcp.
1594            2. skb is added to tail of read queue, rather than out of order.
1595            3. socket is not in passive state.
1596            4. Finally, it really contains options, which user wants to receive.
1597          */
1598         tp = tcp_sk(sk);
1599         if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1600             !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
1601                 if (np->rxopt.bits.rxinfo)
1602                         np->mcast_oif = tcp_v6_iif(opt_skb);
1603                 if (np->rxopt.bits.rxhlim)
1604                         np->mcast_hops = opt_skb->nh.ipv6h->hop_limit;
1605                 if (ipv6_opt_accepted(sk, opt_skb)) {
1606                         skb_set_owner_r(opt_skb, sk);
1607                         opt_skb = xchg(&np->pktoptions, opt_skb);
1608                 } else {
1609                         __kfree_skb(opt_skb);
1610                         opt_skb = xchg(&np->pktoptions, NULL);
1611                 }
1612         }
1613
1614         if (opt_skb)
1615                 kfree_skb(opt_skb);
1616         return 0;
1617 }
1618
1619 static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
1620 {
1621         struct sk_buff *skb = *pskb;
1622         struct tcphdr *th;      
1623         struct sock *sk;
1624         int ret;
1625
1626         if (skb->pkt_type != PACKET_HOST)
1627                 goto discard_it;
1628
1629         /*
1630          *      Count it even if it's bad.
1631          */
1632         TCP_INC_STATS_BH(TCP_MIB_INSEGS);
1633
1634         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1635                 goto discard_it;
1636
1637         th = skb->h.th;
1638
1639         if (th->doff < sizeof(struct tcphdr)/4)
1640                 goto bad_packet;
1641         if (!pskb_may_pull(skb, th->doff*4))
1642                 goto discard_it;
1643
1644         if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
1645              tcp_v6_checksum_init(skb) < 0))
1646                 goto bad_packet;
1647
1648         th = skb->h.th;
1649         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1650         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1651                                     skb->len - th->doff*4);
1652         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1653         TCP_SKB_CB(skb)->when = 0;
1654         TCP_SKB_CB(skb)->flags = ipv6_get_dsfield(skb->nh.ipv6h);
1655         TCP_SKB_CB(skb)->sacked = 0;
1656
1657         sk = __tcp_v6_lookup(&skb->nh.ipv6h->saddr, th->source,
1658                              &skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb));
1659
1660         if (!sk)
1661                 goto no_tcp_socket;
1662
1663 process:
1664         if (sk->sk_state == TCP_TIME_WAIT)
1665                 goto do_time_wait;
1666
1667         if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1668                 goto discard_and_relse;
1669
1670         if (sk_filter(sk, skb, 0))
1671                 goto discard_and_relse;
1672
1673         skb->dev = NULL;
1674
1675         bh_lock_sock(sk);
1676         ret = 0;
1677         if (!sock_owned_by_user(sk)) {
1678                 if (!tcp_prequeue(sk, skb))
1679                         ret = tcp_v6_do_rcv(sk, skb);
1680         } else
1681                 sk_add_backlog(sk, skb);
1682         bh_unlock_sock(sk);
1683
1684         sock_put(sk);
1685         return ret ? -1 : 0;
1686
1687 no_tcp_socket:
1688         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1689                 goto discard_it;
1690
1691         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1692 bad_packet:
1693                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1694         } else {
1695                 tcp_v6_send_reset(skb);
1696         }
1697
1698 discard_it:
1699
1700         /*
1701          *      Discard frame
1702          */
1703
1704         kfree_skb(skb);
1705         return 0;
1706
1707 discard_and_relse:
1708         sock_put(sk);
1709         goto discard_it;
1710
1711 do_time_wait:
1712         if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1713                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1714                 goto discard_it;
1715         }
1716
1717         if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
1718                 TCP_INC_STATS_BH(TCP_MIB_INERRS);
1719                 tcp_tw_put((struct tcp_tw_bucket *) sk);
1720                 goto discard_it;
1721         }
1722
1723         switch(tcp_timewait_state_process((struct tcp_tw_bucket *)sk,
1724                                           skb, th, skb->len)) {
1725         case TCP_TW_SYN:
1726         {
1727                 struct sock *sk2;
1728
1729                 sk2 = tcp_v6_lookup_listener(&skb->nh.ipv6h->daddr, ntohs(th->dest), tcp_v6_iif(skb));
1730                 if (sk2 != NULL) {
1731                         tcp_tw_deschedule((struct tcp_tw_bucket *)sk);
1732                         tcp_tw_put((struct tcp_tw_bucket *)sk);
1733                         sk = sk2;
1734                         goto process;
1735                 }
1736                 /* Fall through to ACK */
1737         }
1738         case TCP_TW_ACK:
1739                 tcp_v6_timewait_ack(sk, skb);
1740                 break;
1741         case TCP_TW_RST:
1742                 goto no_tcp_socket;
1743         case TCP_TW_SUCCESS:;
1744         }
1745         goto discard_it;
1746 }
1747
1748 static int tcp_v6_rebuild_header(struct sock *sk)
1749 {
1750         int err;
1751         struct dst_entry *dst;
1752         struct ipv6_pinfo *np = inet6_sk(sk);
1753
1754         dst = __sk_dst_check(sk, np->dst_cookie);
1755
1756         if (dst == NULL) {
1757                 struct inet_opt *inet = inet_sk(sk);
1758                 struct in6_addr *final_p = NULL, final;
1759                 struct flowi fl;
1760
1761                 memset(&fl, 0, sizeof(fl));
1762                 fl.proto = IPPROTO_TCP;
1763                 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1764                 ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1765                 fl.fl6_flowlabel = np->flow_label;
1766                 fl.oif = sk->sk_bound_dev_if;
1767                 fl.fl_ip_dport = inet->dport;
1768                 fl.fl_ip_sport = inet->sport;
1769
1770                 if (np->opt && np->opt->srcrt) {
1771                         struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1772                         ipv6_addr_copy(&final, &fl.fl6_dst);
1773                         ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1774                         final_p = &final;
1775                 }
1776
1777                 err = ip6_dst_lookup(sk, &dst, &fl);
1778                 if (err) {
1779                         sk->sk_route_caps = 0;
1780                         return err;
1781                 }
1782                 if (final_p)
1783                         ipv6_addr_copy(&fl.fl6_dst, final_p);
1784
1785                 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
1786                         sk->sk_err_soft = -err;
1787                         dst_release(dst);
1788                         return err;
1789                 }
1790
1791                 ip6_dst_store(sk, dst, NULL);
1792                 sk->sk_route_caps = dst->dev->features &
1793                         ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1794                 tcp_sk(sk)->ext2_header_len = dst->header_len;
1795         }
1796
1797         return 0;
1798 }
1799
1800 static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok)
1801 {
1802         struct sock *sk = skb->sk;
1803         struct inet_opt *inet = inet_sk(sk);
1804         struct ipv6_pinfo *np = inet6_sk(sk);
1805         struct flowi fl;
1806         struct dst_entry *dst;
1807         struct in6_addr *final_p = NULL, final;
1808
1809         memset(&fl, 0, sizeof(fl));
1810         fl.proto = IPPROTO_TCP;
1811         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1812         ipv6_addr_copy(&fl.fl6_src, &np->saddr);
1813         fl.fl6_flowlabel = np->flow_label;
1814         IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel);
1815         fl.oif = sk->sk_bound_dev_if;
1816         fl.fl_ip_sport = inet->sport;
1817         fl.fl_ip_dport = inet->dport;
1818
1819         if (np->opt && np->opt->srcrt) {
1820                 struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt;
1821                 ipv6_addr_copy(&final, &fl.fl6_dst);
1822                 ipv6_addr_copy(&fl.fl6_dst, rt0->addr);
1823                 final_p = &final;
1824         }
1825
1826         dst = __sk_dst_check(sk, np->dst_cookie);
1827
1828         if (dst == NULL) {
1829                 int err = ip6_dst_lookup(sk, &dst, &fl);
1830
1831                 if (err) {
1832                         sk->sk_err_soft = -err;
1833                         return err;
1834                 }
1835
1836                 if (final_p)
1837                         ipv6_addr_copy(&fl.fl6_dst, final_p);
1838
1839                 if ((err = xfrm_lookup(&dst, &fl, sk, 0)) < 0) {
1840                         sk->sk_route_caps = 0;
1841                         dst_release(dst);
1842                         return err;
1843                 }
1844
1845                 ip6_dst_store(sk, dst, NULL);
1846                 sk->sk_route_caps = dst->dev->features &
1847                         ~(NETIF_F_IP_CSUM | NETIF_F_TSO);
1848                 tcp_sk(sk)->ext2_header_len = dst->header_len;
1849         }
1850
1851         skb->dst = dst_clone(dst);
1852
1853         /* Restore final destination back after routing done */
1854         ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
1855
1856         return ip6_xmit(sk, skb, &fl, np->opt, 0);
1857 }
1858
1859 static void v6_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
1860 {
1861         struct ipv6_pinfo *np = inet6_sk(sk);
1862         struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
1863
1864         sin6->sin6_family = AF_INET6;
1865         ipv6_addr_copy(&sin6->sin6_addr, &np->daddr);
1866         sin6->sin6_port = inet_sk(sk)->dport;
1867         /* We do not store received flowlabel for TCP */
1868         sin6->sin6_flowinfo = 0;
1869         sin6->sin6_scope_id = 0;
1870         if (sk->sk_bound_dev_if &&
1871             ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
1872                 sin6->sin6_scope_id = sk->sk_bound_dev_if;
1873 }
1874
1875 static int tcp_v6_remember_stamp(struct sock *sk)
1876 {
1877         /* Alas, not yet... */
1878         return 0;
1879 }
1880
1881 static struct tcp_func ipv6_specific = {
1882         .queue_xmit     =       tcp_v6_xmit,
1883         .send_check     =       tcp_v6_send_check,
1884         .rebuild_header =       tcp_v6_rebuild_header,
1885         .conn_request   =       tcp_v6_conn_request,
1886         .syn_recv_sock  =       tcp_v6_syn_recv_sock,
1887         .remember_stamp =       tcp_v6_remember_stamp,
1888         .net_header_len =       sizeof(struct ipv6hdr),
1889
1890         .setsockopt     =       ipv6_setsockopt,
1891         .getsockopt     =       ipv6_getsockopt,
1892         .addr2sockaddr  =       v6_addr2sockaddr,
1893         .sockaddr_len   =       sizeof(struct sockaddr_in6)
1894 };
1895
1896 /*
1897  *      TCP over IPv4 via INET6 API
1898  */
1899
1900 static struct tcp_func ipv6_mapped = {
1901         .queue_xmit     =       ip_queue_xmit,
1902         .send_check     =       tcp_v4_send_check,
1903         .rebuild_header =       tcp_v4_rebuild_header,
1904         .conn_request   =       tcp_v6_conn_request,
1905         .syn_recv_sock  =       tcp_v6_syn_recv_sock,
1906         .remember_stamp =       tcp_v4_remember_stamp,
1907         .net_header_len =       sizeof(struct iphdr),
1908
1909         .setsockopt     =       ipv6_setsockopt,
1910         .getsockopt     =       ipv6_getsockopt,
1911         .addr2sockaddr  =       v6_addr2sockaddr,
1912         .sockaddr_len   =       sizeof(struct sockaddr_in6)
1913 };
1914
1915
1916
1917 /* NOTE: A lot of things set to zero explicitly by call to
1918  *       sk_alloc() so need not be done here.
1919  */
1920 static int tcp_v6_init_sock(struct sock *sk)
1921 {
1922         struct tcp_opt *tp = tcp_sk(sk);
1923
1924         skb_queue_head_init(&tp->out_of_order_queue);
1925         tcp_init_xmit_timers(sk);
1926         tcp_prequeue_init(tp);
1927
1928         tp->rto  = TCP_TIMEOUT_INIT;
1929         tp->mdev = TCP_TIMEOUT_INIT;
1930
1931         /* So many TCP implementations out there (incorrectly) count the
1932          * initial SYN frame in their delayed-ACK and congestion control
1933          * algorithms that we must have the following bandaid to talk
1934          * efficiently to them.  -DaveM
1935          */
1936         tp->snd_cwnd = 2;
1937
1938         /* See draft-stevens-tcpca-spec-01 for discussion of the
1939          * initialization of these values.
1940          */
1941         tp->snd_ssthresh = 0x7fffffff;
1942         tp->snd_cwnd_clamp = ~0;
1943         tp->mss_cache_std = tp->mss_cache = 536;
1944
1945         tp->reordering = sysctl_tcp_reordering;
1946
1947         sk->sk_state = TCP_CLOSE;
1948
1949         tp->af_specific = &ipv6_specific;
1950
1951         sk->sk_write_space = sk_stream_write_space;
1952         sk->sk_use_write_queue = 1;
1953
1954         sk->sk_sndbuf = sysctl_tcp_wmem[1];
1955         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
1956
1957         atomic_inc(&tcp_sockets_allocated);
1958
1959         return 0;
1960 }
1961
1962 static int tcp_v6_destroy_sock(struct sock *sk)
1963 {
1964         extern int tcp_v4_destroy_sock(struct sock *sk);
1965
1966         tcp_v4_destroy_sock(sk);
1967         return inet6_destroy_sock(sk);
1968 }
1969
1970 /* Proc filesystem TCPv6 sock list dumping. */
1971 static void get_openreq6(struct seq_file *seq, 
1972                          struct sock *sk, struct open_request *req, int i, int uid)
1973 {
1974         struct in6_addr *dest, *src;
1975         int ttd = req->expires - jiffies;
1976
1977         if (ttd < 0)
1978                 ttd = 0;
1979
1980         src = &req->af.v6_req.loc_addr;
1981         dest = &req->af.v6_req.rmt_addr;
1982         seq_printf(seq,
1983                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
1984                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
1985                    i,
1986                    src->s6_addr32[0], src->s6_addr32[1],
1987                    src->s6_addr32[2], src->s6_addr32[3],
1988                    ntohs(inet_sk(sk)->sport),
1989                    dest->s6_addr32[0], dest->s6_addr32[1],
1990                    dest->s6_addr32[2], dest->s6_addr32[3],
1991                    ntohs(req->rmt_port),
1992                    TCP_SYN_RECV,
1993                    0,0, /* could print option size, but that is af dependent. */
1994                    1,   /* timers active (only the expire timer) */  
1995                    jiffies_to_clock_t(ttd), 
1996                    req->retrans,
1997                    uid,
1998                    0,  /* non standard timer */  
1999                    0, /* open_requests have no inode */
2000                    0, req);
2001 }
2002
2003 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2004 {
2005         struct in6_addr *dest, *src;
2006         __u16 destp, srcp;
2007         int timer_active;
2008         unsigned long timer_expires;
2009         struct inet_opt *inet = inet_sk(sp);
2010         struct tcp_opt *tp = tcp_sk(sp);
2011         struct ipv6_pinfo *np = inet6_sk(sp);
2012
2013         dest  = &np->daddr;
2014         src   = &np->rcv_saddr;
2015         destp = ntohs(inet->dport);
2016         srcp  = ntohs(inet->sport);
2017         if (tp->pending == TCP_TIME_RETRANS) {
2018                 timer_active    = 1;
2019                 timer_expires   = tp->timeout;
2020         } else if (tp->pending == TCP_TIME_PROBE0) {
2021                 timer_active    = 4;
2022                 timer_expires   = tp->timeout;
2023         } else if (timer_pending(&sp->sk_timer)) {
2024                 timer_active    = 2;
2025                 timer_expires   = sp->sk_timer.expires;
2026         } else {
2027                 timer_active    = 0;
2028                 timer_expires = jiffies;
2029         }
2030
2031         seq_printf(seq,
2032                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2033                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
2034                    i,
2035                    src->s6_addr32[0], src->s6_addr32[1],
2036                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2037                    dest->s6_addr32[0], dest->s6_addr32[1],
2038                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2039                    sp->sk_state, 
2040                    tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
2041                    timer_active,
2042                    jiffies_to_clock_t(timer_expires - jiffies),
2043                    tp->retransmits,
2044                    sock_i_uid(sp),
2045                    tp->probes_out,
2046                    sock_i_ino(sp),
2047                    atomic_read(&sp->sk_refcnt), sp,
2048                    tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong,
2049                    tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
2050                    );
2051 }
2052
2053 static void get_timewait6_sock(struct seq_file *seq, 
2054                                struct tcp_tw_bucket *tw, int i)
2055 {
2056         struct in6_addr *dest, *src;
2057         __u16 destp, srcp;
2058         int ttd = tw->tw_ttd - jiffies;
2059
2060         if (ttd < 0)
2061                 ttd = 0;
2062
2063         dest  = &tw->tw_v6_daddr;
2064         src   = &tw->tw_v6_rcv_saddr;
2065         destp = ntohs(tw->tw_dport);
2066         srcp  = ntohs(tw->tw_sport);
2067
2068         seq_printf(seq,
2069                    "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
2070                    "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p\n",
2071                    i,
2072                    src->s6_addr32[0], src->s6_addr32[1],
2073                    src->s6_addr32[2], src->s6_addr32[3], srcp,
2074                    dest->s6_addr32[0], dest->s6_addr32[1],
2075                    dest->s6_addr32[2], dest->s6_addr32[3], destp,
2076                    tw->tw_substate, 0, 0,
2077                    3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2078                    atomic_read(&tw->tw_refcnt), tw);
2079 }
2080
2081 #ifdef CONFIG_PROC_FS
2082 static int tcp6_seq_show(struct seq_file *seq, void *v)
2083 {
2084         struct tcp_iter_state *st;
2085
2086         if (v == SEQ_START_TOKEN) {
2087                 seq_puts(seq,
2088                          "  sl  "
2089                          "local_address                         "
2090                          "remote_address                        "
2091                          "st tx_queue rx_queue tr tm->when retrnsmt"
2092                          "   uid  timeout inode\n");
2093                 goto out;
2094         }
2095         st = seq->private;
2096
2097         switch (st->state) {
2098         case TCP_SEQ_STATE_LISTENING:
2099         case TCP_SEQ_STATE_ESTABLISHED:
2100                 get_tcp6_sock(seq, v, st->num);
2101                 break;
2102         case TCP_SEQ_STATE_OPENREQ:
2103                 get_openreq6(seq, st->syn_wait_sk, v, st->num, st->uid);
2104                 break;
2105         case TCP_SEQ_STATE_TIME_WAIT:
2106                 get_timewait6_sock(seq, v, st->num);
2107                 break;
2108         }
2109 out:
2110         return 0;
2111 }
2112
2113 static struct file_operations tcp6_seq_fops;
2114 static struct tcp_seq_afinfo tcp6_seq_afinfo = {
2115         .owner          = THIS_MODULE,
2116         .name           = "tcp6",
2117         .family         = AF_INET6,
2118         .seq_show       = tcp6_seq_show,
2119         .seq_fops       = &tcp6_seq_fops,
2120 };
2121
2122 int __init tcp6_proc_init(void)
2123 {
2124         return tcp_proc_register(&tcp6_seq_afinfo);
2125 }
2126
2127 void tcp6_proc_exit(void)
2128 {
2129         tcp_proc_unregister(&tcp6_seq_afinfo);
2130 }
2131 #endif
2132
2133 struct proto tcpv6_prot = {
2134         .name                   = "TCPv6",
2135         .owner                  = THIS_MODULE,
2136         .close                  = tcp_close,
2137         .connect                = tcp_v6_connect,
2138         .disconnect             = tcp_disconnect,
2139         .accept                 = tcp_accept,
2140         .ioctl                  = tcp_ioctl,
2141         .init                   = tcp_v6_init_sock,
2142         .destroy                = tcp_v6_destroy_sock,
2143         .shutdown               = tcp_shutdown,
2144         .setsockopt             = tcp_setsockopt,
2145         .getsockopt             = tcp_getsockopt,
2146         .sendmsg                = tcp_sendmsg,
2147         .recvmsg                = tcp_recvmsg,
2148         .backlog_rcv            = tcp_v6_do_rcv,
2149         .hash                   = tcp_v6_hash,
2150         .unhash                 = tcp_unhash,
2151         .get_port               = tcp_v6_get_port,
2152         .enter_memory_pressure  = tcp_enter_memory_pressure,
2153         .sockets_allocated      = &tcp_sockets_allocated,
2154         .memory_allocated       = &tcp_memory_allocated,
2155         .memory_pressure        = &tcp_memory_pressure,
2156         .sysctl_mem             = sysctl_tcp_mem,
2157         .sysctl_wmem            = sysctl_tcp_wmem,
2158         .sysctl_rmem            = sysctl_tcp_rmem,
2159         .max_header             = MAX_TCP_HEADER,
2160         .slab_obj_size          = sizeof(struct tcp6_sock),
2161 };
2162
2163 static struct inet6_protocol tcpv6_protocol = {
2164         .handler        =       tcp_v6_rcv,
2165         .err_handler    =       tcp_v6_err,
2166         .flags          =       INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2167 };
2168
2169 extern struct proto_ops inet6_stream_ops;
2170
2171 static struct inet_protosw tcpv6_protosw = {
2172         .type           =       SOCK_STREAM,
2173         .protocol       =       IPPROTO_TCP,
2174         .prot           =       &tcpv6_prot,
2175         .ops            =       &inet6_stream_ops,
2176         .capability     =       -1,
2177         .no_check       =       0,
2178         .flags          =       INET_PROTOSW_PERMANENT,
2179 };
2180
2181 void __init tcpv6_init(void)
2182 {
2183         /* register inet6 protocol */
2184         if (inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP) < 0)
2185                 printk(KERN_ERR "tcpv6_init: Could not register protocol\n");
2186         inet6_register_protosw(&tcpv6_protosw);
2187 }