ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-2.6.6.tar.bz2
[linux-2.6.git] / net / ipv4 / igmp.c
1 /*
2  *      Linux NET3:     Internet Group Management Protocol  [IGMP]
3  *
4  *      This code implements the IGMP protocol as defined in RFC1112. There has
5  *      been a further revision of this protocol since which is now supported.
6  *
7  *      If you have trouble with this module be careful what gcc you have used,
8  *      the older version didn't come out right using gcc 2.5.8, the newer one
9  *      seems to fall out with gcc 2.6.2.
10  *
11  *      Version: $Id: igmp.c,v 1.47 2002/02/01 22:01:03 davem Exp $
12  *
13  *      Authors:
14  *              Alan Cox <Alan.Cox@linux.org>
15  *
16  *      This program is free software; you can redistribute it and/or
17  *      modify it under the terms of the GNU General Public License
18  *      as published by the Free Software Foundation; either version
19  *      2 of the License, or (at your option) any later version.
20  *
21  *      Fixes:
22  *
23  *              Alan Cox        :       Added lots of __inline__ to optimise
24  *                                      the memory usage of all the tiny little
25  *                                      functions.
26  *              Alan Cox        :       Dumped the header building experiment.
27  *              Alan Cox        :       Minor tweaks ready for multicast routing
28  *                                      and extended IGMP protocol.
29  *              Alan Cox        :       Removed a load of inline directives. Gcc 2.5.8
30  *                                      writes utterly bogus code otherwise (sigh)
31  *                                      fixed IGMP loopback to behave in the manner
32  *                                      desired by mrouted, fixed the fact it has been
33  *                                      broken since 1.3.6 and cleaned up a few minor
34  *                                      points.
35  *
36  *              Chih-Jen Chang  :       Tried to revise IGMP to Version 2
37  *              Tsu-Sheng Tsao          E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
38  *                                      The enhancements are mainly based on Steve Deering's 
39  *                                      ipmulti-3.5 source code.
40  *              Chih-Jen Chang  :       Added the igmp_get_mrouter_info and
41  *              Tsu-Sheng Tsao          igmp_set_mrouter_info to keep track of
42  *                                      the mrouted version on that device.
43  *              Chih-Jen Chang  :       Added the max_resp_time parameter to
44  *              Tsu-Sheng Tsao          igmp_heard_query(). Using this parameter
45  *                                      to identify the multicast router version
46  *                                      and do what the IGMP version 2 specified.
47  *              Chih-Jen Chang  :       Added a timer to revert to IGMP V2 router
48  *              Tsu-Sheng Tsao          if the specified time expired.
49  *              Alan Cox        :       Stop IGMP from 0.0.0.0 being accepted.
50  *              Alan Cox        :       Use GFP_ATOMIC in the right places.
51  *              Christian Daudt :       igmp timer wasn't set for local group
52  *                                      memberships but was being deleted, 
53  *                                      which caused a "del_timer() called 
54  *                                      from %p with timer not initialized\n"
55  *                                      message (960131).
56  *              Christian Daudt :       removed del_timer from 
57  *                                      igmp_timer_expire function (960205).
58  *             Christian Daudt :       igmp_heard_report now only calls
59  *                                     igmp_timer_expire if tm->running is
60  *                                     true (960216).
61  *              Malcolm Beattie :       ttl comparison wrong in igmp_rcv made
62  *                                      igmp_heard_query never trigger. Expiry
63  *                                      miscalculation fixed in igmp_heard_query
64  *                                      and random() made to return unsigned to
65  *                                      prevent negative expiry times.
66  *              Alexey Kuznetsov:       Wrong group leaving behaviour, backport
67  *                                      fix from pending 2.1.x patches.
68  *              Alan Cox:               Forget to enable FDDI support earlier.
69  *              Alexey Kuznetsov:       Fixed leaving groups on device down.
70  *              Alexey Kuznetsov:       Accordance to igmp-v2-06 draft.
71  *              David L Stevens:        IGMPv3 support, with help from
72  *                                      Vinay Kulkarni
73  */
74
75 #include <linux/config.h>
76 #include <linux/module.h>
77 #include <asm/uaccess.h>
78 #include <asm/system.h>
79 #include <linux/types.h>
80 #include <linux/kernel.h>
81 #include <linux/jiffies.h>
82 #include <linux/string.h>
83 #include <linux/socket.h>
84 #include <linux/sockios.h>
85 #include <linux/in.h>
86 #include <linux/inet.h>
87 #include <linux/netdevice.h>
88 #include <linux/skbuff.h>
89 #include <linux/inetdevice.h>
90 #include <linux/igmp.h>
91 #include <linux/if_arp.h>
92 #include <linux/rtnetlink.h>
93 #include <linux/times.h>
94 #include <net/ip.h>
95 #include <net/protocol.h>
96 #include <net/route.h>
97 #include <net/sock.h>
98 #include <net/checksum.h>
99 #include <linux/netfilter_ipv4.h>
100 #ifdef CONFIG_IP_MROUTE
101 #include <linux/mroute.h>
102 #endif
103 #ifdef CONFIG_PROC_FS
104 #include <linux/proc_fs.h>
105 #include <linux/seq_file.h>
106 #endif
107
108 #define IP_MAX_MEMBERSHIPS      20
109 #define IP_MAX_MSF              10
110
111 #ifdef CONFIG_IP_MULTICAST
112 /* Parameter names and values are taken from igmp-v2-06 draft */
113
114 #define IGMP_V1_Router_Present_Timeout          (400*HZ)
115 #define IGMP_V2_Router_Present_Timeout          (400*HZ)
116 #define IGMP_Unsolicited_Report_Interval        (10*HZ)
117 #define IGMP_Query_Response_Interval            (10*HZ)
118 #define IGMP_Unsolicited_Report_Count           2
119
120
121 #define IGMP_Initial_Report_Delay               (1)
122
123 /* IGMP_Initial_Report_Delay is not from IGMP specs!
124  * IGMP specs require to report membership immediately after
125  * joining a group, but we delay the first report by a
126  * small interval. It seems more natural and still does not
127  * contradict to specs provided this delay is small enough.
128  */
129
130 #define IGMP_V1_SEEN(in_dev) (ipv4_devconf.force_igmp_version == 1 || \
131                 (in_dev)->cnf.force_igmp_version == 1 || \
132                 ((in_dev)->mr_v1_seen && \
133                 time_before(jiffies, (in_dev)->mr_v1_seen)))
134 #define IGMP_V2_SEEN(in_dev) (ipv4_devconf.force_igmp_version == 2 || \
135                 (in_dev)->cnf.force_igmp_version == 2 || \
136                 ((in_dev)->mr_v2_seen && \
137                 time_before(jiffies, (in_dev)->mr_v2_seen)))
138
139 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
140 static void igmpv3_del_delrec(struct in_device *in_dev, __u32 multiaddr);
141 static void igmpv3_clear_delrec(struct in_device *in_dev);
142 static int sf_setstate(struct ip_mc_list *pmc);
143 static void sf_markstate(struct ip_mc_list *pmc);
144 #endif
145 static void ip_mc_clear_src(struct ip_mc_list *pmc);
146 int ip_mc_add_src(struct in_device *in_dev, __u32 *pmca, int sfmode,
147         int sfcount, __u32 *psfsrc, int delta);
148
149 static void ip_ma_put(struct ip_mc_list *im)
150 {
151         if (atomic_dec_and_test(&im->refcnt)) {
152                 in_dev_put(im->interface);
153                 kfree(im);
154         }
155 }
156
157 #ifdef CONFIG_IP_MULTICAST
158
159 /*
160  *      Timer management
161  */
162
163 static __inline__ void igmp_stop_timer(struct ip_mc_list *im)
164 {
165         spin_lock_bh(&im->lock);
166         if (del_timer(&im->timer))
167                 atomic_dec(&im->refcnt);
168         im->tm_running=0;
169         im->reporter = 0;
170         im->unsolicit_count = 0;
171         spin_unlock_bh(&im->lock);
172 }
173
174 /* It must be called with locked im->lock */
175 static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
176 {
177         int tv=net_random() % max_delay;
178
179         im->tm_running=1;
180         if (!mod_timer(&im->timer, jiffies+tv+2))
181                 atomic_inc(&im->refcnt);
182 }
183
184 static void igmp_gq_start_timer(struct in_device *in_dev)
185 {
186         int tv = net_random() % in_dev->mr_maxdelay;
187
188         in_dev->mr_gq_running = 1;
189         if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
190                 in_dev_hold(in_dev);
191 }
192
193 static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
194 {
195         int tv = net_random() % delay;
196
197         if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
198                 in_dev_hold(in_dev);
199 }
200
201 static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
202 {
203         spin_lock_bh(&im->lock);
204         im->unsolicit_count = 0;
205         if (del_timer(&im->timer)) {
206                 if ((long)(im->timer.expires-jiffies) < max_delay) {
207                         add_timer(&im->timer);
208                         im->tm_running=1;
209                         spin_unlock_bh(&im->lock);
210                         return;
211                 }
212                 atomic_dec(&im->refcnt);
213         }
214         igmp_start_timer(im, max_delay);
215         spin_unlock_bh(&im->lock);
216 }
217
218
219 /*
220  *      Send an IGMP report.
221  */
222
223 #define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
224
225
226 static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
227         int gdeleted, int sdeleted)
228 {
229         switch (type) {
230         case IGMPV3_MODE_IS_INCLUDE:
231         case IGMPV3_MODE_IS_EXCLUDE:
232                 if (gdeleted || sdeleted)
233                         return 0;
234                 return !(pmc->gsquery && !psf->sf_gsresp);
235         case IGMPV3_CHANGE_TO_INCLUDE:
236                 if (gdeleted || sdeleted)
237                         return 0;
238                 return psf->sf_count[MCAST_INCLUDE] != 0;
239         case IGMPV3_CHANGE_TO_EXCLUDE:
240                 if (gdeleted || sdeleted)
241                         return 0;
242                 if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
243                     psf->sf_count[MCAST_INCLUDE])
244                         return 0;
245                 return pmc->sfcount[MCAST_EXCLUDE] ==
246                         psf->sf_count[MCAST_EXCLUDE];
247         case IGMPV3_ALLOW_NEW_SOURCES:
248                 if (gdeleted || !psf->sf_crcount)
249                         return 0;
250                 return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
251         case IGMPV3_BLOCK_OLD_SOURCES:
252                 if (pmc->sfmode == MCAST_INCLUDE)
253                         return gdeleted || (psf->sf_crcount && sdeleted);
254                 return psf->sf_crcount && !gdeleted && !sdeleted;
255         }
256         return 0;
257 }
258
259 static int
260 igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
261 {
262         struct ip_sf_list *psf;
263         int scount = 0;
264
265         for (psf=pmc->sources; psf; psf=psf->sf_next) {
266                 if (!is_in(pmc, psf, type, gdeleted, sdeleted))
267                         continue;
268                 scount++;
269         }
270         return scount;
271 }
272
273 static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
274 {
275         struct sk_buff *skb;
276         struct rtable *rt;
277         struct iphdr *pip;
278         struct igmpv3_report *pig;
279
280         skb = alloc_skb(size + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
281         if (skb == NULL)
282                 return 0;
283
284         {
285                 struct flowi fl = { .oif = dev->ifindex,
286                                     .nl_u = { .ip4_u = {
287                                     .daddr = IGMPV3_ALL_MCR } },
288                                     .proto = IPPROTO_IGMP };
289                 if (ip_route_output_key(&rt, &fl)) {
290                         kfree_skb(skb);
291                         return 0;
292                 }
293         }
294         if (rt->rt_src == 0) {
295                 ip_rt_put(rt);
296                 return 0;
297         }
298
299         skb->dst = &rt->u.dst;
300         skb->dev = dev;
301
302         skb_reserve(skb, LL_RESERVED_SPACE(dev));
303
304         skb->nh.iph = pip =(struct iphdr *)skb_put(skb, sizeof(struct iphdr)+4);
305
306         pip->version  = 4;
307         pip->ihl      = (sizeof(struct iphdr)+4)>>2;
308         pip->tos      = 0xc0;
309         pip->frag_off = htons(IP_DF);
310         pip->ttl      = 1;
311         pip->daddr    = rt->rt_dst;
312         pip->saddr    = rt->rt_src;
313         pip->protocol = IPPROTO_IGMP;
314         pip->tot_len  = 0;      /* filled in later */
315         ip_select_ident(pip, &rt->u.dst, NULL);
316         ((u8*)&pip[1])[0] = IPOPT_RA;
317         ((u8*)&pip[1])[1] = 4;
318         ((u8*)&pip[1])[2] = 0;
319         ((u8*)&pip[1])[3] = 0;
320
321         pig =(struct igmpv3_report *)skb_put(skb, sizeof(*pig));
322         skb->h.igmph = (struct igmphdr *)pig;
323         pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
324         pig->resv1 = 0;
325         pig->csum = 0;
326         pig->resv2 = 0;
327         pig->ngrec = 0;
328         return skb;
329 }
330
331 static int igmpv3_sendpack(struct sk_buff *skb)
332 {
333         struct iphdr *pip = skb->nh.iph;
334         struct igmphdr *pig = skb->h.igmph;
335         int iplen, igmplen;
336
337         iplen = skb->tail - (unsigned char *)skb->nh.iph;
338         pip->tot_len = htons(iplen);
339         ip_send_check(pip);
340
341         igmplen = skb->tail - (unsigned char *)skb->h.igmph;
342         pig->csum = ip_compute_csum((void *)skb->h.igmph, igmplen);
343
344         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dev,
345                        dst_output);
346 }
347
348 static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
349 {
350         return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc,type,gdel,sdel);
351 }
352
353 static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
354         int type, struct igmpv3_grec **ppgr)
355 {
356         struct net_device *dev = pmc->interface->dev;
357         struct igmpv3_report *pih;
358         struct igmpv3_grec *pgr;
359
360         if (!skb)
361                 skb = igmpv3_newpack(dev, dev->mtu);
362         if (!skb)
363                 return 0;
364         pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
365         pgr->grec_type = type;
366         pgr->grec_auxwords = 0;
367         pgr->grec_nsrcs = 0;
368         pgr->grec_mca = pmc->multiaddr;
369         pih = (struct igmpv3_report *)skb->h.igmph;
370         pih->ngrec = htons(ntohs(pih->ngrec)+1);
371         *ppgr = pgr;
372         return skb;
373 }
374
375 #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
376         skb_tailroom(skb)) : 0)
377
378 static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
379         int type, int gdeleted, int sdeleted)
380 {
381         struct net_device *dev = pmc->interface->dev;
382         struct igmpv3_report *pih;
383         struct igmpv3_grec *pgr = 0;
384         struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
385         int scount, first, isquery, truncate;
386
387         if (pmc->multiaddr == IGMP_ALL_HOSTS)
388                 return skb;
389
390         isquery = type == IGMPV3_MODE_IS_INCLUDE ||
391                   type == IGMPV3_MODE_IS_EXCLUDE;
392         truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
393                     type == IGMPV3_CHANGE_TO_EXCLUDE;
394
395         psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
396
397         if (!*psf_list) {
398                 if (type == IGMPV3_ALLOW_NEW_SOURCES ||
399                     type == IGMPV3_BLOCK_OLD_SOURCES)
400                         return skb;
401                 if (pmc->crcount || isquery) {
402                         /* make sure we have room for group header and at
403                          * least one source.
404                          */
405                         if (skb && AVAILABLE(skb) < sizeof(struct igmpv3_grec)+
406                             sizeof(__u32)) {
407                                 igmpv3_sendpack(skb);
408                                 skb = 0; /* add_grhead will get a new one */
409                         }
410                         skb = add_grhead(skb, pmc, type, &pgr);
411                 }
412                 return skb;
413         }
414         pih = skb ? (struct igmpv3_report *)skb->h.igmph : 0;
415
416         /* EX and TO_EX get a fresh packet, if needed */
417         if (truncate) {
418                 if (pih && pih->ngrec &&
419                     AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
420                         if (skb)
421                                 igmpv3_sendpack(skb);
422                         skb = igmpv3_newpack(dev, dev->mtu);
423                 }
424         }
425         first = 1;
426         scount = 0;
427         psf_prev = 0;
428         for (psf=*psf_list; psf; psf=psf_next) {
429                 u32 *psrc;
430
431                 psf_next = psf->sf_next;
432
433                 if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
434                         psf_prev = psf;
435                         continue;
436                 }
437
438                 /* clear marks on query responses */
439                 if (isquery)
440                         psf->sf_gsresp = 0;
441
442                 if (AVAILABLE(skb) < sizeof(u32) +
443                     first*sizeof(struct igmpv3_grec)) {
444                         if (truncate && !first)
445                                 break;   /* truncate these */
446                         if (pgr)
447                                 pgr->grec_nsrcs = htons(scount);
448                         if (skb)
449                                 igmpv3_sendpack(skb);
450                         skb = igmpv3_newpack(dev, dev->mtu);
451                         first = 1;
452                         scount = 0;
453                 }
454                 if (first) {
455                         skb = add_grhead(skb, pmc, type, &pgr);
456                         first = 0;
457                 }
458                 psrc = (u32 *)skb_put(skb, sizeof(u32));
459                 *psrc = psf->sf_inaddr;
460                 scount++;
461                 if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
462                      type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
463                         psf->sf_crcount--;
464                         if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
465                                 if (psf_prev)
466                                         psf_prev->sf_next = psf->sf_next;
467                                 else
468                                         *psf_list = psf->sf_next;
469                                 kfree(psf);
470                                 continue;
471                         }
472                 }
473                 psf_prev = psf;
474         }
475         if (pgr)
476                 pgr->grec_nsrcs = htons(scount);
477
478         if (isquery)
479                 pmc->gsquery = 0;       /* clear query state on report */
480         return skb;
481 }
482
483 static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
484 {
485         struct sk_buff *skb = 0;
486         int type;
487
488         if (!pmc) {
489                 read_lock(&in_dev->lock);
490                 for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
491                         if (pmc->multiaddr == IGMP_ALL_HOSTS)
492                                 continue;
493                         spin_lock_bh(&pmc->lock);
494                         if (pmc->sfcount[MCAST_EXCLUDE])
495                                 type = IGMPV3_MODE_IS_EXCLUDE;
496                         else
497                                 type = IGMPV3_MODE_IS_INCLUDE;
498                         skb = add_grec(skb, pmc, type, 0, 0);
499                         spin_unlock_bh(&pmc->lock);
500                 }
501                 read_unlock(&in_dev->lock);
502         } else {
503                 spin_lock_bh(&pmc->lock);
504                 if (pmc->sfcount[MCAST_EXCLUDE])
505                         type = IGMPV3_MODE_IS_EXCLUDE;
506                 else
507                         type = IGMPV3_MODE_IS_INCLUDE;
508                 skb = add_grec(skb, pmc, type, 0, 0);
509                 spin_unlock_bh(&pmc->lock);
510         }
511         if (!skb)
512                 return 0;
513         return igmpv3_sendpack(skb);
514 }
515
516 /*
517  * remove zero-count source records from a source filter list
518  */
519 static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
520 {
521         struct ip_sf_list *psf_prev, *psf_next, *psf;
522
523         psf_prev = 0;
524         for (psf=*ppsf; psf; psf = psf_next) {
525                 psf_next = psf->sf_next;
526                 if (psf->sf_crcount == 0) {
527                         if (psf_prev)
528                                 psf_prev->sf_next = psf->sf_next;
529                         else
530                                 *ppsf = psf->sf_next;
531                         kfree(psf);
532                 } else
533                         psf_prev = psf;
534         }
535 }
536
537 static void igmpv3_send_cr(struct in_device *in_dev)
538 {
539         struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
540         struct sk_buff *skb = 0;
541         int type, dtype;
542
543         read_lock(&in_dev->lock);
544         write_lock_bh(&in_dev->mc_lock);
545
546         /* deleted MCA's */
547         pmc_prev = 0;
548         for (pmc=in_dev->mc_tomb; pmc; pmc=pmc_next) {
549                 pmc_next = pmc->next;
550                 if (pmc->sfmode == MCAST_INCLUDE) {
551                         type = IGMPV3_BLOCK_OLD_SOURCES;
552                         dtype = IGMPV3_BLOCK_OLD_SOURCES;
553                         skb = add_grec(skb, pmc, type, 1, 0);
554                         skb = add_grec(skb, pmc, dtype, 1, 1);
555                 }
556                 if (pmc->crcount) {
557                         pmc->crcount--;
558                         if (pmc->sfmode == MCAST_EXCLUDE) {
559                                 type = IGMPV3_CHANGE_TO_INCLUDE;
560                                 skb = add_grec(skb, pmc, type, 1, 0);
561                         }
562                         if (pmc->crcount == 0) {
563                                 igmpv3_clear_zeros(&pmc->tomb);
564                                 igmpv3_clear_zeros(&pmc->sources);
565                         }
566                 }
567                 if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
568                         if (pmc_prev)
569                                 pmc_prev->next = pmc_next;
570                         else
571                                 in_dev->mc_tomb = pmc_next;
572                         in_dev_put(pmc->interface);
573                         kfree(pmc);
574                 } else
575                         pmc_prev = pmc;
576         }
577         write_unlock_bh(&in_dev->mc_lock);
578
579         /* change recs */
580         for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
581                 spin_lock_bh(&pmc->lock);
582                 if (pmc->sfcount[MCAST_EXCLUDE]) {
583                         type = IGMPV3_BLOCK_OLD_SOURCES;
584                         dtype = IGMPV3_ALLOW_NEW_SOURCES;
585                 } else {
586                         type = IGMPV3_ALLOW_NEW_SOURCES;
587                         dtype = IGMPV3_BLOCK_OLD_SOURCES;
588                 }
589                 skb = add_grec(skb, pmc, type, 0, 0);
590                 skb = add_grec(skb, pmc, dtype, 0, 1);  /* deleted sources */
591
592                 /* filter mode changes */
593                 if (pmc->crcount) {
594                         pmc->crcount--;
595                         if (pmc->sfmode == MCAST_EXCLUDE)
596                                 type = IGMPV3_CHANGE_TO_EXCLUDE;
597                         else
598                                 type = IGMPV3_CHANGE_TO_INCLUDE;
599                         skb = add_grec(skb, pmc, type, 0, 0);
600                 }
601                 spin_unlock_bh(&pmc->lock);
602         }
603         read_unlock(&in_dev->lock);
604         if (!skb)
605                 return;
606         (void) igmpv3_sendpack(skb);
607 }
608
609 static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
610         int type)
611 {
612         struct sk_buff *skb;
613         struct iphdr *iph;
614         struct igmphdr *ih;
615         struct rtable *rt;
616         struct net_device *dev = in_dev->dev;
617         u32     group = pmc ? pmc->multiaddr : 0;
618         u32     dst;
619
620         if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
621                 return igmpv3_send_report(in_dev, pmc);
622         else if (type == IGMP_HOST_LEAVE_MESSAGE)
623                 dst = IGMP_ALL_ROUTER;
624         else
625                 dst = group;
626
627         {
628                 struct flowi fl = { .oif = dev->ifindex,
629                                     .nl_u = { .ip4_u = { .daddr = dst } },
630                                     .proto = IPPROTO_IGMP };
631                 if (ip_route_output_key(&rt, &fl))
632                         return -1;
633         }
634         if (rt->rt_src == 0) {
635                 ip_rt_put(rt);
636                 return -1;
637         }
638
639         skb=alloc_skb(IGMP_SIZE+LL_RESERVED_SPACE(dev), GFP_ATOMIC);
640         if (skb == NULL) {
641                 ip_rt_put(rt);
642                 return -1;
643         }
644
645         skb->dst = &rt->u.dst;
646
647         skb_reserve(skb, LL_RESERVED_SPACE(dev));
648
649         skb->nh.iph = iph = (struct iphdr *)skb_put(skb, sizeof(struct iphdr)+4);
650
651         iph->version  = 4;
652         iph->ihl      = (sizeof(struct iphdr)+4)>>2;
653         iph->tos      = 0xc0;
654         iph->frag_off = htons(IP_DF);
655         iph->ttl      = 1;
656         iph->daddr    = dst;
657         iph->saddr    = rt->rt_src;
658         iph->protocol = IPPROTO_IGMP;
659         iph->tot_len  = htons(IGMP_SIZE);
660         ip_select_ident(iph, &rt->u.dst, NULL);
661         ((u8*)&iph[1])[0] = IPOPT_RA;
662         ((u8*)&iph[1])[1] = 4;
663         ((u8*)&iph[1])[2] = 0;
664         ((u8*)&iph[1])[3] = 0;
665         ip_send_check(iph);
666
667         ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
668         ih->type=type;
669         ih->code=0;
670         ih->csum=0;
671         ih->group=group;
672         ih->csum=ip_compute_csum((void *)ih, sizeof(struct igmphdr));
673
674         return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
675                        dst_output);
676 }
677
678 static void igmp_gq_timer_expire(unsigned long data)
679 {
680         struct in_device *in_dev = (struct in_device *)data;
681
682         in_dev->mr_gq_running = 0;
683         igmpv3_send_report(in_dev, 0);
684         __in_dev_put(in_dev);
685 }
686
687 static void igmp_ifc_timer_expire(unsigned long data)
688 {
689         struct in_device *in_dev = (struct in_device *)data;
690
691         igmpv3_send_cr(in_dev);
692         if (in_dev->mr_ifc_count) {
693                 in_dev->mr_ifc_count--;
694                 igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
695         }
696         __in_dev_put(in_dev);
697 }
698
699 static void igmp_ifc_event(struct in_device *in_dev)
700 {
701         if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
702                 return;
703         in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv : 
704                 IGMP_Unsolicited_Report_Count;
705         igmp_ifc_start_timer(in_dev, 1);
706 }
707
708
709 static void igmp_timer_expire(unsigned long data)
710 {
711         struct ip_mc_list *im=(struct ip_mc_list *)data;
712         struct in_device *in_dev = im->interface;
713
714         spin_lock(&im->lock);
715         im->tm_running=0;
716
717         if (im->unsolicit_count) {
718                 im->unsolicit_count--;
719                 igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
720         }
721         im->reporter = 1;
722         spin_unlock(&im->lock);
723
724         if (IGMP_V1_SEEN(in_dev))
725                 igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
726         else if (IGMP_V2_SEEN(in_dev))
727                 igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
728         else
729                 igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
730
731         ip_ma_put(im);
732 }
733
734 static void igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __u32 *srcs)
735 {
736         struct ip_sf_list *psf;
737         int i, scount;
738
739         scount = 0;
740         for (psf=pmc->sources; psf; psf=psf->sf_next) {
741                 if (scount == nsrcs)
742                         break;
743                 for (i=0; i<nsrcs; i++)
744                         if (srcs[i] == psf->sf_inaddr) {
745                                 psf->sf_gsresp = 1;
746                                 scount++;
747                                 break;
748                         }
749         }
750 }
751
752 static void igmp_heard_report(struct in_device *in_dev, u32 group)
753 {
754         struct ip_mc_list *im;
755
756         /* Timers are only set for non-local groups */
757
758         if (group == IGMP_ALL_HOSTS)
759                 return;
760
761         read_lock(&in_dev->lock);
762         for (im=in_dev->mc_list; im!=NULL; im=im->next) {
763                 if (im->multiaddr == group) {
764                         igmp_stop_timer(im);
765                         break;
766                 }
767         }
768         read_unlock(&in_dev->lock);
769 }
770
771 static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
772         int len)
773 {
774         struct igmphdr          *ih = skb->h.igmph;
775         struct igmpv3_query *ih3 = (struct igmpv3_query *)ih;
776         struct ip_mc_list       *im;
777         u32                     group = ih->group;
778         int                     max_delay;
779         int                     mark = 0;
780
781
782         if (len == 8) {
783                 if (ih->code == 0) {
784                         /* Alas, old v1 router presents here. */
785         
786                         max_delay = IGMP_Query_Response_Interval;
787                         in_dev->mr_v1_seen = jiffies +
788                                 IGMP_V1_Router_Present_Timeout;
789                         group = 0;
790                 } else {
791                         /* v2 router present */
792                         max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
793                         in_dev->mr_v2_seen = jiffies +
794                                 IGMP_V2_Router_Present_Timeout;
795                 }
796                 /* cancel the interface change timer */
797                 in_dev->mr_ifc_count = 0;
798                 if (del_timer(&in_dev->mr_ifc_timer))
799                         __in_dev_put(in_dev);
800                 /* clear deleted report items */
801                 igmpv3_clear_delrec(in_dev);
802         } else if (len < 12) {
803                 return; /* ignore bogus packet; freed by caller */
804         } else { /* v3 */
805                 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
806                         return;
807                 
808                 ih3 = (struct igmpv3_query *) skb->h.raw;
809                 if (ih3->nsrcs) {
810                         if (!pskb_may_pull(skb, sizeof(struct igmpv3_query) 
811                                            + ntohs(ih3->nsrcs)*sizeof(__u32)))
812                                 return;
813                         ih3 = (struct igmpv3_query *) skb->h.raw;
814                 }
815
816                 max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
817                 if (!max_delay)
818                         max_delay = 1;  /* can't mod w/ 0 */
819                 in_dev->mr_maxdelay = max_delay;
820                 if (ih3->qrv)
821                         in_dev->mr_qrv = ih3->qrv;
822                 if (!group) { /* general query */
823                         if (ih3->nsrcs)
824                                 return; /* no sources allowed */
825                         igmp_gq_start_timer(in_dev);
826                         return;
827                 }
828                 /* mark sources to include, if group & source-specific */
829                 mark = ih3->nsrcs != 0;
830         }
831
832         /*
833          * - Start the timers in all of our membership records
834          *   that the query applies to for the interface on
835          *   which the query arrived excl. those that belong
836          *   to a "local" group (224.0.0.X)
837          * - For timers already running check if they need to
838          *   be reset.
839          * - Use the igmp->igmp_code field as the maximum
840          *   delay possible
841          */
842         read_lock(&in_dev->lock);
843         for (im=in_dev->mc_list; im!=NULL; im=im->next) {
844                 if (group && group != im->multiaddr)
845                         continue;
846                 if (im->multiaddr == IGMP_ALL_HOSTS)
847                         continue;
848                 spin_lock_bh(&im->lock);
849                 if (im->tm_running)
850                         im->gsquery = im->gsquery && mark;
851                 else
852                         im->gsquery = mark;
853                 if (im->gsquery)
854                         igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
855                 spin_unlock_bh(&im->lock);
856                 igmp_mod_timer(im, max_delay);
857         }
858         read_unlock(&in_dev->lock);
859 }
860
861 int igmp_rcv(struct sk_buff *skb)
862 {
863         /* This basically follows the spec line by line -- see RFC1112 */
864         struct igmphdr *ih;
865         struct in_device *in_dev = in_dev_get(skb->dev);
866         int len = skb->len;
867
868         if (in_dev==NULL) {
869                 kfree_skb(skb);
870                 return 0;
871         }
872
873         if (!pskb_may_pull(skb, sizeof(struct igmphdr)) || 
874             (u16)csum_fold(skb_checksum(skb, 0, len, 0))) {
875                 in_dev_put(in_dev);
876                 kfree_skb(skb);
877                 return 0;
878         }
879
880         ih = skb->h.igmph;
881         switch (ih->type) {
882         case IGMP_HOST_MEMBERSHIP_QUERY:
883                 igmp_heard_query(in_dev, skb, len);
884                 break;
885         case IGMP_HOST_MEMBERSHIP_REPORT:
886         case IGMPV2_HOST_MEMBERSHIP_REPORT:
887         case IGMPV3_HOST_MEMBERSHIP_REPORT:
888                 /* Is it our report looped back? */
889                 if (((struct rtable*)skb->dst)->fl.iif == 0)
890                         break;
891                 igmp_heard_report(in_dev, ih->group);
892                 break;
893         case IGMP_PIM:
894 #ifdef CONFIG_IP_PIMSM_V1
895                 in_dev_put(in_dev);
896                 return pim_rcv_v1(skb);
897 #endif
898         case IGMP_DVMRP:
899         case IGMP_TRACE:
900         case IGMP_HOST_LEAVE_MESSAGE:
901         case IGMP_MTRACE:
902         case IGMP_MTRACE_RESP:
903                 break;
904         default:
905                 NETDEBUG(printk(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type));
906         }
907         in_dev_put(in_dev);
908         kfree_skb(skb);
909         return 0;
910 }
911
912 #endif
913
914
915 /*
916  *      Add a filter to a device
917  */
918
919 static void ip_mc_filter_add(struct in_device *in_dev, u32 addr)
920 {
921         char buf[MAX_ADDR_LEN];
922         struct net_device *dev = in_dev->dev;
923
924         /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
925            We will get multicast token leakage, when IFF_MULTICAST
926            is changed. This check should be done in dev->set_multicast_list
927            routine. Something sort of:
928            if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
929            --ANK
930            */
931         if (arp_mc_map(addr, buf, dev, 0) == 0)
932                 dev_mc_add(dev,buf,dev->addr_len,0);
933 }
934
935 /*
936  *      Remove a filter from a device
937  */
938
939 static void ip_mc_filter_del(struct in_device *in_dev, u32 addr)
940 {
941         char buf[MAX_ADDR_LEN];
942         struct net_device *dev = in_dev->dev;
943
944         if (arp_mc_map(addr, buf, dev, 0) == 0)
945                 dev_mc_delete(dev,buf,dev->addr_len,0);
946 }
947
948 #ifdef CONFIG_IP_MULTICAST
949 /*
950  * deleted ip_mc_list manipulation
951  */
952 static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
953 {
954         struct ip_mc_list *pmc;
955
956         /* this is an "ip_mc_list" for convenience; only the fields below
957          * are actually used. In particular, the refcnt and users are not
958          * used for management of the delete list. Using the same structure
959          * for deleted items allows change reports to use common code with
960          * non-deleted or query-response MCA's.
961          */
962         pmc = (struct ip_mc_list *)kmalloc(sizeof(*pmc), GFP_KERNEL);
963         if (!pmc)
964                 return;
965         memset(pmc, 0, sizeof(*pmc));
966         spin_lock_bh(&im->lock);
967         pmc->interface = im->interface;
968         in_dev_hold(in_dev);
969         pmc->multiaddr = im->multiaddr;
970         pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
971                 IGMP_Unsolicited_Report_Count;
972         pmc->sfmode = im->sfmode;
973         if (pmc->sfmode == MCAST_INCLUDE) {
974                 struct ip_sf_list *psf;
975
976                 pmc->tomb = im->tomb;
977                 pmc->sources = im->sources;
978                 im->tomb = im->sources = 0;
979                 for (psf=pmc->sources; psf; psf=psf->sf_next)
980                         psf->sf_crcount = pmc->crcount;
981         }
982         spin_unlock_bh(&im->lock);
983
984         write_lock_bh(&in_dev->mc_lock);
985         pmc->next = in_dev->mc_tomb;
986         in_dev->mc_tomb = pmc;
987         write_unlock_bh(&in_dev->mc_lock);
988 }
989
990 static void igmpv3_del_delrec(struct in_device *in_dev, __u32 multiaddr)
991 {
992         struct ip_mc_list *pmc, *pmc_prev;
993         struct ip_sf_list *psf, *psf_next;
994
995         write_lock_bh(&in_dev->mc_lock);
996         pmc_prev = 0;
997         for (pmc=in_dev->mc_tomb; pmc; pmc=pmc->next) {
998                 if (pmc->multiaddr == multiaddr)
999                         break;
1000                 pmc_prev = pmc;
1001         }
1002         if (pmc) {
1003                 if (pmc_prev)
1004                         pmc_prev->next = pmc->next;
1005                 else
1006                         in_dev->mc_tomb = pmc->next;
1007         }
1008         write_unlock_bh(&in_dev->mc_lock);
1009         if (pmc) {
1010                 for (psf=pmc->tomb; psf; psf=psf_next) {
1011                         psf_next = psf->sf_next;
1012                         kfree(psf);
1013                 }
1014                 in_dev_put(pmc->interface);
1015                 kfree(pmc);
1016         }
1017 }
1018
1019 static void igmpv3_clear_delrec(struct in_device *in_dev)
1020 {
1021         struct ip_mc_list *pmc, *nextpmc;
1022
1023         write_lock_bh(&in_dev->mc_lock);
1024         pmc = in_dev->mc_tomb;
1025         in_dev->mc_tomb = 0;
1026         write_unlock_bh(&in_dev->mc_lock);
1027
1028         for (; pmc; pmc = nextpmc) {
1029                 nextpmc = pmc->next;
1030                 ip_mc_clear_src(pmc);
1031                 in_dev_put(pmc->interface);
1032                 kfree(pmc);
1033         }
1034         /* clear dead sources, too */
1035         read_lock(&in_dev->lock);
1036         for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
1037                 struct ip_sf_list *psf, *psf_next;
1038
1039                 spin_lock_bh(&pmc->lock);
1040                 psf = pmc->tomb;
1041                 pmc->tomb = 0;
1042                 spin_unlock_bh(&pmc->lock);
1043                 for (; psf; psf=psf_next) {
1044                         psf_next = psf->sf_next;
1045                         kfree(psf);
1046                 }
1047         }
1048         read_unlock(&in_dev->lock);
1049 }
1050 #endif
1051
1052 static void igmp_group_dropped(struct ip_mc_list *im)
1053 {
1054         struct in_device *in_dev = im->interface;
1055 #ifdef CONFIG_IP_MULTICAST
1056         int reporter;
1057 #endif
1058
1059         if (im->loaded) {
1060                 im->loaded = 0;
1061                 ip_mc_filter_del(in_dev, im->multiaddr);
1062         }
1063
1064 #ifdef CONFIG_IP_MULTICAST
1065         if (im->multiaddr == IGMP_ALL_HOSTS)
1066                 return;
1067
1068         reporter = im->reporter;
1069         igmp_stop_timer(im);
1070
1071         if (!in_dev->dead) {
1072                 if (IGMP_V1_SEEN(in_dev))
1073                         goto done;
1074                 if (IGMP_V2_SEEN(in_dev)) {
1075                         if (reporter)
1076                                 igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
1077                         goto done;
1078                 }
1079                 /* IGMPv3 */
1080                 igmpv3_add_delrec(in_dev, im);
1081
1082                 igmp_ifc_event(in_dev);
1083         }
1084 done:
1085 #endif
1086         ip_mc_clear_src(im);
1087 }
1088
1089 static void igmp_group_added(struct ip_mc_list *im)
1090 {
1091         struct in_device *in_dev = im->interface;
1092
1093         if (im->loaded == 0) {
1094                 im->loaded = 1;
1095                 ip_mc_filter_add(in_dev, im->multiaddr);
1096         }
1097
1098 #ifdef CONFIG_IP_MULTICAST
1099         if (im->multiaddr == IGMP_ALL_HOSTS)
1100                 return;
1101
1102         if (in_dev->dead)
1103                 return;
1104         if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
1105                 spin_lock_bh(&im->lock);
1106                 igmp_start_timer(im, IGMP_Initial_Report_Delay);
1107                 spin_unlock_bh(&im->lock);
1108                 return;
1109         }
1110         /* else, v3 */
1111
1112         im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
1113                 IGMP_Unsolicited_Report_Count;
1114         igmp_ifc_event(in_dev);
1115 #endif
1116 }
1117
1118
1119 /*
1120  *      Multicast list managers
1121  */
1122
1123
1124 /*
1125  *      A socket has joined a multicast group on device dev.
1126  */
1127
1128 void ip_mc_inc_group(struct in_device *in_dev, u32 addr)
1129 {
1130         struct ip_mc_list *im;
1131
1132         ASSERT_RTNL();
1133
1134         for (im=in_dev->mc_list; im; im=im->next) {
1135                 if (im->multiaddr == addr) {
1136                         im->users++;
1137                         ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, 0, 0);
1138                         goto out;
1139                 }
1140         }
1141
1142         im = (struct ip_mc_list *)kmalloc(sizeof(*im), GFP_KERNEL);
1143         if (!im)
1144                 goto out;
1145
1146         im->users=1;
1147         im->interface=in_dev;
1148         in_dev_hold(in_dev);
1149         im->multiaddr=addr;
1150         /* initial mode is (EX, empty) */
1151         im->sfmode = MCAST_EXCLUDE;
1152         im->sfcount[MCAST_INCLUDE] = 0;
1153         im->sfcount[MCAST_EXCLUDE] = 1;
1154         im->sources = 0;
1155         im->tomb = 0;
1156         im->crcount = 0;
1157         atomic_set(&im->refcnt, 1);
1158         spin_lock_init(&im->lock);
1159 #ifdef CONFIG_IP_MULTICAST
1160         im->tm_running=0;
1161         init_timer(&im->timer);
1162         im->timer.data=(unsigned long)im;
1163         im->timer.function=&igmp_timer_expire;
1164         im->unsolicit_count = IGMP_Unsolicited_Report_Count;
1165         im->reporter = 0;
1166         im->gsquery = 0;
1167 #endif
1168         im->loaded = 0;
1169         write_lock_bh(&in_dev->lock);
1170         im->next=in_dev->mc_list;
1171         in_dev->mc_list=im;
1172         write_unlock_bh(&in_dev->lock);
1173 #ifdef CONFIG_IP_MULTICAST
1174         igmpv3_del_delrec(in_dev, im->multiaddr);
1175 #endif
1176         igmp_group_added(im);
1177         if (!in_dev->dead)
1178                 ip_rt_multicast_event(in_dev);
1179 out:
1180         return;
1181 }
1182
1183 /*
1184  *      A socket has left a multicast group on device dev
1185  */
1186
1187 void ip_mc_dec_group(struct in_device *in_dev, u32 addr)
1188 {
1189         struct ip_mc_list *i, **ip;
1190         
1191         ASSERT_RTNL();
1192         
1193         for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
1194                 if (i->multiaddr==addr) {
1195                         if (--i->users == 0) {
1196                                 write_lock_bh(&in_dev->lock);
1197                                 *ip = i->next;
1198                                 write_unlock_bh(&in_dev->lock);
1199                                 igmp_group_dropped(i);
1200
1201                                 if (!in_dev->dead)
1202                                         ip_rt_multicast_event(in_dev);
1203
1204                                 ip_ma_put(i);
1205                                 return;
1206                         }
1207                         break;
1208                 }
1209         }
1210 }
1211
1212 /* Device going down */
1213
1214 void ip_mc_down(struct in_device *in_dev)
1215 {
1216         struct ip_mc_list *i;
1217
1218         ASSERT_RTNL();
1219
1220         for (i=in_dev->mc_list; i; i=i->next)
1221                 igmp_group_dropped(i);
1222
1223 #ifdef CONFIG_IP_MULTICAST
1224         in_dev->mr_ifc_count = 0;
1225         if (del_timer(&in_dev->mr_ifc_timer))
1226                 __in_dev_put(in_dev);
1227         in_dev->mr_gq_running = 0;
1228         if (del_timer(&in_dev->mr_gq_timer))
1229                 __in_dev_put(in_dev);
1230         igmpv3_clear_delrec(in_dev);
1231 #endif
1232
1233         ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
1234 }
1235
1236 void ip_mc_init_dev(struct in_device *in_dev)
1237 {
1238         ASSERT_RTNL();
1239
1240         in_dev->mc_tomb = 0;
1241 #ifdef CONFIG_IP_MULTICAST
1242         in_dev->mr_gq_running = 0;
1243         init_timer(&in_dev->mr_gq_timer);
1244         in_dev->mr_gq_timer.data=(unsigned long) in_dev;
1245         in_dev->mr_gq_timer.function=&igmp_gq_timer_expire;
1246         in_dev->mr_ifc_count = 0;
1247         init_timer(&in_dev->mr_ifc_timer);
1248         in_dev->mr_ifc_timer.data=(unsigned long) in_dev;
1249         in_dev->mr_ifc_timer.function=&igmp_ifc_timer_expire;
1250         in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
1251 #endif
1252
1253         in_dev->mc_lock = RW_LOCK_UNLOCKED;
1254 }
1255
1256 /* Device going up */
1257
1258 void ip_mc_up(struct in_device *in_dev)
1259 {
1260         struct ip_mc_list *i;
1261
1262         ASSERT_RTNL();
1263
1264         ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
1265
1266         for (i=in_dev->mc_list; i; i=i->next)
1267                 igmp_group_added(i);
1268 }
1269
1270 /*
1271  *      Device is about to be destroyed: clean up.
1272  */
1273
1274 void ip_mc_destroy_dev(struct in_device *in_dev)
1275 {
1276         struct ip_mc_list *i;
1277
1278         ASSERT_RTNL();
1279
1280         /* Deactivate timers */
1281         ip_mc_down(in_dev);
1282
1283         write_lock_bh(&in_dev->lock);
1284         while ((i = in_dev->mc_list) != NULL) {
1285                 in_dev->mc_list = i->next;
1286                 write_unlock_bh(&in_dev->lock);
1287
1288                 igmp_group_dropped(i);
1289                 ip_ma_put(i);
1290
1291                 write_lock_bh(&in_dev->lock);
1292         }
1293         write_unlock_bh(&in_dev->lock);
1294 }
1295
1296 static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
1297 {
1298         struct flowi fl = { .nl_u = { .ip4_u =
1299                                       { .daddr = imr->imr_multiaddr.s_addr } } };
1300         struct rtable *rt;
1301         struct net_device *dev = NULL;
1302         struct in_device *idev = NULL;
1303
1304         if (imr->imr_ifindex) {
1305                 idev = inetdev_by_index(imr->imr_ifindex);
1306                 if (idev)
1307                         __in_dev_put(idev);
1308                 return idev;
1309         }
1310         if (imr->imr_address.s_addr) {
1311                 dev = ip_dev_find(imr->imr_address.s_addr);
1312                 if (!dev)
1313                         return NULL;
1314                 __dev_put(dev);
1315         }
1316
1317         if (!dev && !ip_route_output_key(&rt, &fl)) {
1318                 dev = rt->u.dst.dev;
1319                 ip_rt_put(rt);
1320         }
1321         if (dev) {
1322                 imr->imr_ifindex = dev->ifindex;
1323                 idev = __in_dev_get(dev);
1324         }
1325         return idev;
1326 }
1327
1328 /*
1329  *      Join a socket to a group
1330  */
1331 int sysctl_igmp_max_memberships = IP_MAX_MEMBERSHIPS;
1332 int sysctl_igmp_max_msf = IP_MAX_MSF;
1333
1334
1335 static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
1336         __u32 *psfsrc)
1337 {
1338         struct ip_sf_list *psf, *psf_prev;
1339         int rv = 0;
1340
1341         psf_prev = 0;
1342         for (psf=pmc->sources; psf; psf=psf->sf_next) {
1343                 if (psf->sf_inaddr == *psfsrc)
1344                         break;
1345                 psf_prev = psf;
1346         }
1347         if (!psf || psf->sf_count[sfmode] == 0) {
1348                 /* source filter not found, or count wrong =>  bug */
1349                 return -ESRCH;
1350         }
1351         psf->sf_count[sfmode]--;
1352         if (psf->sf_count[sfmode] == 0) {
1353                 ip_rt_multicast_event(pmc->interface);
1354         }
1355         if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
1356 #ifdef CONFIG_IP_MULTICAST
1357                 struct in_device *in_dev = pmc->interface;
1358 #endif
1359
1360                 /* no more filters for this source */
1361                 if (psf_prev)
1362                         psf_prev->sf_next = psf->sf_next;
1363                 else
1364                         pmc->sources = psf->sf_next;
1365 #ifdef CONFIG_IP_MULTICAST
1366                 if (psf->sf_oldin &&
1367                     !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
1368                         psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv : 
1369                                 IGMP_Unsolicited_Report_Count;
1370                         psf->sf_next = pmc->tomb;
1371                         pmc->tomb = psf;
1372                         rv = 1;
1373                 } else
1374 #endif
1375                         kfree(psf);
1376         }
1377         return rv;
1378 }
1379
1380 #ifndef CONFIG_IP_MULTICAST
1381 #define igmp_ifc_event(x)       do { } while (0)
1382 #endif
1383
1384 int ip_mc_del_src(struct in_device *in_dev, __u32 *pmca, int sfmode,
1385         int sfcount, __u32 *psfsrc, int delta)
1386 {
1387         struct ip_mc_list *pmc;
1388         int     changerec = 0;
1389         int     i, err;
1390
1391         if (!in_dev)
1392                 return -ENODEV;
1393         read_lock(&in_dev->lock);
1394         for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
1395                 if (*pmca == pmc->multiaddr)
1396                         break;
1397         }
1398         if (!pmc) {
1399                 /* MCA not found?? bug */
1400                 read_unlock(&in_dev->lock);
1401                 return -ESRCH;
1402         }
1403         spin_lock_bh(&pmc->lock);
1404         read_unlock(&in_dev->lock);
1405 #ifdef CONFIG_IP_MULTICAST
1406         sf_markstate(pmc);
1407 #endif
1408         if (!delta) {
1409                 err = -EINVAL;
1410                 if (!pmc->sfcount[sfmode])
1411                         goto out_unlock;
1412                 pmc->sfcount[sfmode]--;
1413         }
1414         err = 0;
1415         for (i=0; i<sfcount; i++) {
1416                 int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1417
1418                 changerec |= rv > 0;
1419                 if (!err && rv < 0)
1420                         err = rv;
1421         }
1422         if (pmc->sfmode == MCAST_EXCLUDE &&
1423             pmc->sfcount[MCAST_EXCLUDE] == 0 &&
1424             pmc->sfcount[MCAST_INCLUDE]) {
1425 #ifdef CONFIG_IP_MULTICAST
1426                 struct ip_sf_list *psf;
1427 #endif
1428
1429                 /* filter mode change */
1430                 pmc->sfmode = MCAST_INCLUDE;
1431 #ifdef CONFIG_IP_MULTICAST
1432                 pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : 
1433                         IGMP_Unsolicited_Report_Count;
1434                 in_dev->mr_ifc_count = pmc->crcount;
1435                 for (psf=pmc->sources; psf; psf = psf->sf_next)
1436                         psf->sf_crcount = 0;
1437                 igmp_ifc_event(pmc->interface);
1438         } else if (sf_setstate(pmc) || changerec) {
1439                 igmp_ifc_event(pmc->interface);
1440 #endif
1441         }
1442 out_unlock:
1443         spin_unlock_bh(&pmc->lock);
1444         return err;
1445 }
1446
1447 /*
1448  * Add multicast single-source filter to the interface list
1449  */
1450 static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
1451         __u32 *psfsrc, int delta)
1452 {
1453         struct ip_sf_list *psf, *psf_prev;
1454
1455         psf_prev = 0;
1456         for (psf=pmc->sources; psf; psf=psf->sf_next) {
1457                 if (psf->sf_inaddr == *psfsrc)
1458                         break;
1459                 psf_prev = psf;
1460         }
1461         if (!psf) {
1462                 psf = (struct ip_sf_list *)kmalloc(sizeof(*psf), GFP_ATOMIC);
1463                 if (!psf)
1464                         return -ENOBUFS;
1465                 memset(psf, 0, sizeof(*psf));
1466                 psf->sf_inaddr = *psfsrc;
1467                 if (psf_prev) {
1468                         psf_prev->sf_next = psf;
1469                 } else
1470                         pmc->sources = psf;
1471         }
1472         psf->sf_count[sfmode]++;
1473         if (psf->sf_count[sfmode] == 1) {
1474                 ip_rt_multicast_event(pmc->interface);
1475         }
1476         return 0;
1477 }
1478
1479 #ifdef CONFIG_IP_MULTICAST
1480 static void sf_markstate(struct ip_mc_list *pmc)
1481 {
1482         struct ip_sf_list *psf;
1483         int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
1484
1485         for (psf=pmc->sources; psf; psf=psf->sf_next)
1486                 if (pmc->sfcount[MCAST_EXCLUDE]) {
1487                         psf->sf_oldin = mca_xcount ==
1488                                 psf->sf_count[MCAST_EXCLUDE] &&
1489                                 !psf->sf_count[MCAST_INCLUDE];
1490                 } else
1491                         psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
1492 }
1493
1494 static int sf_setstate(struct ip_mc_list *pmc)
1495 {
1496         struct ip_sf_list *psf;
1497         int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
1498         int qrv = pmc->interface->mr_qrv;
1499         int new_in, rv;
1500
1501         rv = 0;
1502         for (psf=pmc->sources; psf; psf=psf->sf_next) {
1503                 if (pmc->sfcount[MCAST_EXCLUDE]) {
1504                         new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
1505                                 !psf->sf_count[MCAST_INCLUDE];
1506                 } else
1507                         new_in = psf->sf_count[MCAST_INCLUDE] != 0;
1508                 if (new_in != psf->sf_oldin) {
1509                         psf->sf_crcount = qrv;
1510                         rv++;
1511                 }
1512         }
1513         return rv;
1514 }
1515 #endif
1516
1517 /*
1518  * Add multicast source filter list to the interface list
1519  */
1520 int ip_mc_add_src(struct in_device *in_dev, __u32 *pmca, int sfmode,
1521         int sfcount, __u32 *psfsrc, int delta)
1522 {
1523         struct ip_mc_list *pmc;
1524         int     isexclude;
1525         int     i, err;
1526
1527         if (!in_dev)
1528                 return -ENODEV;
1529         read_lock(&in_dev->lock);
1530         for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
1531                 if (*pmca == pmc->multiaddr)
1532                         break;
1533         }
1534         if (!pmc) {
1535                 /* MCA not found?? bug */
1536                 read_unlock(&in_dev->lock);
1537                 return -ESRCH;
1538         }
1539         spin_lock_bh(&pmc->lock);
1540         read_unlock(&in_dev->lock);
1541
1542 #ifdef CONFIG_IP_MULTICAST
1543         sf_markstate(pmc);
1544 #endif
1545         isexclude = pmc->sfmode == MCAST_EXCLUDE;
1546         if (!delta)
1547                 pmc->sfcount[sfmode]++;
1548         err = 0;
1549         for (i=0; i<sfcount; i++) {
1550                 err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i], delta);
1551                 if (err)
1552                         break;
1553         }
1554         if (err) {
1555                 int j;
1556
1557                 pmc->sfcount[sfmode]--;
1558                 for (j=0; j<i; j++)
1559                         (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
1560         } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
1561 #ifdef CONFIG_IP_MULTICAST
1562                 struct in_device *in_dev = pmc->interface;
1563                 struct ip_sf_list *psf;
1564 #endif
1565
1566                 /* filter mode change */
1567                 if (pmc->sfcount[MCAST_EXCLUDE])
1568                         pmc->sfmode = MCAST_EXCLUDE;
1569                 else if (pmc->sfcount[MCAST_INCLUDE])
1570                         pmc->sfmode = MCAST_INCLUDE;
1571 #ifdef CONFIG_IP_MULTICAST
1572                 /* else no filters; keep old mode for reports */
1573
1574                 pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv : 
1575                         IGMP_Unsolicited_Report_Count;
1576                 in_dev->mr_ifc_count = pmc->crcount;
1577                 for (psf=pmc->sources; psf; psf = psf->sf_next)
1578                         psf->sf_crcount = 0;
1579                 igmp_ifc_event(in_dev);
1580         } else if (sf_setstate(pmc)) {
1581                 igmp_ifc_event(in_dev);
1582 #endif
1583         }
1584         spin_unlock_bh(&pmc->lock);
1585         return err;
1586 }
1587
1588 static void ip_mc_clear_src(struct ip_mc_list *pmc)
1589 {
1590         struct ip_sf_list *psf, *nextpsf;
1591
1592         for (psf=pmc->tomb; psf; psf=nextpsf) {
1593                 nextpsf = psf->sf_next;
1594                 kfree(psf);
1595         }
1596         pmc->tomb = 0;
1597         for (psf=pmc->sources; psf; psf=nextpsf) {
1598                 nextpsf = psf->sf_next;
1599                 kfree(psf);
1600         }
1601         pmc->sources = 0;
1602         pmc->sfmode = MCAST_EXCLUDE;
1603         pmc->sfcount[MCAST_EXCLUDE] = 0;
1604         pmc->sfcount[MCAST_EXCLUDE] = 1;
1605 }
1606
1607
1608 /*
1609  * Join a multicast group
1610  */
1611 int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
1612 {
1613         int err;
1614         u32 addr = imr->imr_multiaddr.s_addr;
1615         struct ip_mc_socklist *iml, *i;
1616         struct in_device *in_dev;
1617         struct inet_opt *inet = inet_sk(sk);
1618         int count = 0;
1619
1620         if (!MULTICAST(addr))
1621                 return -EINVAL;
1622
1623         rtnl_shlock();
1624
1625         in_dev = ip_mc_find_dev(imr);
1626
1627         if (!in_dev) {
1628                 iml = NULL;
1629                 err = -ENODEV;
1630                 goto done;
1631         }
1632
1633         iml = (struct ip_mc_socklist *)sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
1634
1635         err = -EADDRINUSE;
1636         for (i = inet->mc_list; i; i = i->next) {
1637                 if (memcmp(&i->multi, imr, sizeof(*imr)) == 0) {
1638                         /* New style additions are reference counted */
1639                         if (imr->imr_address.s_addr == 0) {
1640                                 i->count++;
1641                                 err = 0;
1642                         }
1643                         goto done;
1644                 }
1645                 count++;
1646         }
1647         err = -ENOBUFS;
1648         if (iml == NULL || count >= sysctl_igmp_max_memberships)
1649                 goto done;
1650         memcpy(&iml->multi, imr, sizeof(*imr));
1651         iml->next = inet->mc_list;
1652         iml->count = 1;
1653         iml->sflist = NULL;
1654         iml->sfmode = MCAST_EXCLUDE;
1655         inet->mc_list = iml;
1656         ip_mc_inc_group(in_dev, addr);
1657         iml = NULL;
1658         err = 0;
1659
1660 done:
1661         rtnl_shunlock();
1662         if (iml)
1663                 sock_kfree_s(sk, iml, sizeof(*iml));
1664         return err;
1665 }
1666
1667 int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
1668         struct in_device *in_dev)
1669 {
1670         int err;
1671
1672         if (iml->sflist == 0) {
1673                 /* any-source empty exclude case */
1674                 return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1675                         iml->sfmode, 0, 0, 0);
1676         }
1677         err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
1678                         iml->sfmode, iml->sflist->sl_count,
1679                         iml->sflist->sl_addr, 0);
1680         sock_kfree_s(sk, iml->sflist, IP_SFLSIZE(iml->sflist->sl_max));
1681         iml->sflist = 0;
1682         return err;
1683 }
1684
1685 /*
1686  *      Ask a socket to leave a group.
1687  */
1688
1689 int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1690 {
1691         struct inet_opt *inet = inet_sk(sk);
1692         struct ip_mc_socklist *iml, **imlp;
1693
1694         rtnl_lock();
1695         for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
1696                 if (iml->multi.imr_multiaddr.s_addr==imr->imr_multiaddr.s_addr &&
1697                     iml->multi.imr_address.s_addr==imr->imr_address.s_addr &&
1698                     (!imr->imr_ifindex || iml->multi.imr_ifindex==imr->imr_ifindex)) {
1699                         struct in_device *in_dev;
1700
1701                         in_dev = inetdev_by_index(iml->multi.imr_ifindex);
1702                         if (in_dev)
1703                                 (void) ip_mc_leave_src(sk, iml, in_dev);
1704                         if (--iml->count) {
1705                                 rtnl_unlock();
1706                                 if (in_dev)
1707                                         in_dev_put(in_dev);
1708                                 return 0;
1709                         }
1710
1711                         *imlp = iml->next;
1712
1713                         if (in_dev) {
1714                                 ip_mc_dec_group(in_dev, imr->imr_multiaddr.s_addr);
1715                                 in_dev_put(in_dev);
1716                         }
1717                         rtnl_unlock();
1718                         sock_kfree_s(sk, iml, sizeof(*iml));
1719                         return 0;
1720                 }
1721         }
1722         rtnl_unlock();
1723         return -EADDRNOTAVAIL;
1724 }
1725
1726 int ip_mc_source(int add, int omode, struct sock *sk, struct
1727         ip_mreq_source *mreqs, int ifindex)
1728 {
1729         int err;
1730         struct ip_mreqn imr;
1731         u32 addr = mreqs->imr_multiaddr;
1732         struct ip_mc_socklist *pmc;
1733         struct in_device *in_dev = 0;
1734         struct inet_opt *inet = inet_sk(sk);
1735         struct ip_sf_socklist *psl;
1736         int i, j, rv;
1737
1738         if (!MULTICAST(addr))
1739                 return -EINVAL;
1740
1741         rtnl_shlock();
1742
1743         imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
1744         imr.imr_address.s_addr = mreqs->imr_interface;
1745         imr.imr_ifindex = ifindex;
1746         in_dev = ip_mc_find_dev(&imr);
1747
1748         if (!in_dev) {
1749                 err = -ENODEV;
1750                 goto done;
1751         }
1752         err = -EADDRNOTAVAIL;
1753
1754         for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
1755                 if (memcmp(&pmc->multi, mreqs, 2*sizeof(__u32)) == 0)
1756                         break;
1757         }
1758         if (!pmc)               /* must have a prior join */
1759                 goto done;
1760         /* if a source filter was set, must be the same mode as before */
1761         if (pmc->sflist) {
1762                 if (pmc->sfmode != omode)
1763                         goto done;
1764         } else if (pmc->sfmode != omode) {
1765                 /* allow mode switches for empty-set filters */
1766                 ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, 0, 0);
1767                 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0, 
1768                         0, 0);
1769                 pmc->sfmode = omode;
1770         }
1771
1772         psl = pmc->sflist;
1773         if (!add) {
1774                 if (!psl)
1775                         goto done;
1776                 rv = !0;
1777                 for (i=0; i<psl->sl_count; i++) {
1778                         rv = memcmp(&psl->sl_addr, &mreqs->imr_multiaddr,
1779                                 sizeof(__u32));
1780                         if (rv >= 0)
1781                                 break;
1782                 }
1783                 if (!rv)        /* source not found */
1784                         goto done;
1785
1786                 /* update the interface filter */
1787                 ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 
1788                         &mreqs->imr_sourceaddr, 1);
1789
1790                 for (j=i+1; j<psl->sl_count; j++)
1791                         psl->sl_addr[j-1] = psl->sl_addr[j];
1792                 psl->sl_count--;
1793                 err = 0;
1794                 goto done;
1795         }
1796         /* else, add a new source to the filter */
1797
1798         if (psl && psl->sl_count >= sysctl_igmp_max_msf) {
1799                 err = -ENOBUFS;
1800                 goto done;
1801         }
1802         if (!psl || psl->sl_count == psl->sl_max) {
1803                 struct ip_sf_socklist *newpsl;
1804                 int count = IP_SFBLOCK;
1805
1806                 if (psl)
1807                         count += psl->sl_max;
1808                 newpsl = (struct ip_sf_socklist *)sock_kmalloc(sk,
1809                         IP_SFLSIZE(count), GFP_KERNEL);
1810                 if (!newpsl) {
1811                         err = -ENOBUFS;
1812                         goto done;
1813                 }
1814                 newpsl->sl_max = count;
1815                 newpsl->sl_count = count - IP_SFBLOCK;
1816                 if (psl) {
1817                         for (i=0; i<psl->sl_count; i++)
1818                                 newpsl->sl_addr[i] = psl->sl_addr[i];
1819                         sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max));
1820                 }
1821                 pmc->sflist = psl = newpsl;
1822         }
1823         rv = 1; /* > 0 for insert logic below if sl_count is 0 */
1824         for (i=0; i<psl->sl_count; i++) {
1825                 rv = memcmp(&psl->sl_addr, &mreqs->imr_multiaddr,
1826                         sizeof(__u32));
1827                 if (rv >= 0)
1828                         break;
1829         }
1830         if (rv == 0)            /* address already there is an error */
1831                 goto done;
1832         for (j=psl->sl_count-1; j>=i; j--)
1833                 psl->sl_addr[j+1] = psl->sl_addr[j];
1834         psl->sl_addr[i] = mreqs->imr_sourceaddr;
1835         psl->sl_count++;
1836         err = 0;
1837         /* update the interface list */
1838         ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1, 
1839                 &mreqs->imr_sourceaddr, 1);
1840 done:
1841         rtnl_shunlock();
1842         return err;
1843 }
1844
1845 int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
1846 {
1847         int err;
1848         struct ip_mreqn imr;
1849         u32 addr = msf->imsf_multiaddr;
1850         struct ip_mc_socklist *pmc;
1851         struct in_device *in_dev;
1852         struct inet_opt *inet = inet_sk(sk);
1853         struct ip_sf_socklist *newpsl, *psl;
1854
1855         if (!MULTICAST(addr))
1856                 return -EINVAL;
1857         if (msf->imsf_fmode != MCAST_INCLUDE &&
1858             msf->imsf_fmode != MCAST_EXCLUDE)
1859                 return -EINVAL;
1860
1861         rtnl_shlock();
1862
1863         imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
1864         imr.imr_address.s_addr = msf->imsf_interface;
1865         imr.imr_ifindex = ifindex;
1866         in_dev = ip_mc_find_dev(&imr);
1867
1868         if (!in_dev) {
1869                 err = -ENODEV;
1870                 goto done;
1871         }
1872         err = -EADDRNOTAVAIL;
1873
1874         for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
1875                 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
1876                     pmc->multi.imr_ifindex == imr.imr_ifindex)
1877                         break;
1878         }
1879         if (!pmc)               /* must have a prior join */
1880                 goto done;
1881         if (msf->imsf_numsrc) {
1882                 newpsl = (struct ip_sf_socklist *)sock_kmalloc(sk,
1883                                 IP_SFLSIZE(msf->imsf_numsrc), GFP_KERNEL);
1884                 if (!newpsl) {
1885                         err = -ENOBUFS;
1886                         goto done;
1887                 }
1888                 newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
1889                 memcpy(newpsl->sl_addr, msf->imsf_slist,
1890                         msf->imsf_numsrc * sizeof(msf->imsf_slist[0]));
1891                 err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
1892                         msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
1893                 if (err) {
1894                         sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
1895                         goto done;
1896                 }
1897         } else
1898                 newpsl = 0;
1899         psl = pmc->sflist;
1900         if (psl) {
1901                 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
1902                         psl->sl_count, psl->sl_addr, 0);
1903                 sock_kfree_s(sk, psl, IP_SFLSIZE(psl->sl_max));
1904         } else
1905                 (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
1906                         0, 0, 0);
1907         pmc->sflist = newpsl;
1908         pmc->sfmode = msf->imsf_fmode;
1909 done:
1910         rtnl_shunlock();
1911         return err;
1912 }
1913
1914 int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
1915         struct ip_msfilter *optval, int *optlen)
1916 {
1917         int err, len, count, copycount;
1918         struct ip_mreqn imr;
1919         u32 addr = msf->imsf_multiaddr;
1920         struct ip_mc_socklist *pmc;
1921         struct in_device *in_dev;
1922         struct inet_opt *inet = inet_sk(sk);
1923         struct ip_sf_socklist *psl;
1924
1925         if (!MULTICAST(addr))
1926                 return -EINVAL;
1927
1928         rtnl_shlock();
1929
1930         imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
1931         imr.imr_address.s_addr = msf->imsf_interface;
1932         imr.imr_ifindex = 0;
1933         in_dev = ip_mc_find_dev(&imr);
1934
1935         if (!in_dev) {
1936                 err = -ENODEV;
1937                 goto done;
1938         }
1939         err = -EADDRNOTAVAIL;
1940
1941         for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
1942                 if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
1943                     pmc->multi.imr_ifindex == imr.imr_ifindex)
1944                         break;
1945         }
1946         if (!pmc)               /* must have a prior join */
1947                 goto done;
1948         msf->imsf_fmode = pmc->sfmode;
1949         psl = pmc->sflist;
1950         rtnl_shunlock();
1951         if (!psl) {
1952                 len = 0;
1953                 count = 0;
1954         } else {
1955                 count = psl->sl_count;
1956         }
1957         copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
1958         len = copycount * sizeof(psl->sl_addr[0]);
1959         msf->imsf_numsrc = count;
1960         if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
1961             copy_to_user((void *)optval, msf, IP_MSFILTER_SIZE(0))) {
1962                 return -EFAULT;
1963         }
1964         if (len &&
1965             copy_to_user((void *)&optval->imsf_slist[0], psl->sl_addr, len))
1966                 return -EFAULT;
1967         return 0;
1968 done:
1969         rtnl_shunlock();
1970         return err;
1971 }
1972
1973 int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
1974         struct group_filter *optval, int *optlen)
1975 {
1976         int err, i, count, copycount;
1977         struct sockaddr_in *psin;
1978         u32 addr;
1979         struct ip_mc_socklist *pmc;
1980         struct inet_opt *inet = inet_sk(sk);
1981         struct ip_sf_socklist *psl;
1982
1983         psin = (struct sockaddr_in *)&gsf->gf_group;
1984         if (psin->sin_family != AF_INET)
1985                 return -EINVAL;
1986         addr = psin->sin_addr.s_addr;
1987         if (!MULTICAST(addr))
1988                 return -EINVAL;
1989
1990         rtnl_shlock();
1991
1992         err = -EADDRNOTAVAIL;
1993
1994         for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
1995                 if (pmc->multi.imr_multiaddr.s_addr == addr &&
1996                     pmc->multi.imr_ifindex == gsf->gf_interface)
1997                         break;
1998         }
1999         if (!pmc)               /* must have a prior join */
2000                 goto done;
2001         gsf->gf_fmode = pmc->sfmode;
2002         psl = pmc->sflist;
2003         rtnl_shunlock();
2004         count = psl ? psl->sl_count : 0;
2005         copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
2006         gsf->gf_numsrc = count;
2007         if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
2008             copy_to_user((void *)optval, gsf, GROUP_FILTER_SIZE(0))) {
2009                 return -EFAULT;
2010         }
2011         for (i=0; i<copycount; i++) {
2012                 struct sockaddr_in *psin;
2013                 struct sockaddr_storage ss;
2014
2015                 psin = (struct sockaddr_in *)&ss;
2016                 memset(&ss, 0, sizeof(ss));
2017                 psin->sin_family = AF_INET;
2018                 psin->sin_addr.s_addr = psl->sl_addr[i];
2019                 if (copy_to_user((void *)&optval->gf_slist[i], &ss, sizeof(ss)))
2020                         return -EFAULT;
2021         }
2022         return 0;
2023 done:
2024         rtnl_shunlock();
2025         return err;
2026 }
2027
2028 /*
2029  * check if a multicast source filter allows delivery for a given <src,dst,intf>
2030  */
2031 int ip_mc_sf_allow(struct sock *sk, u32 loc_addr, u32 rmt_addr, int dif)
2032 {
2033         struct inet_opt *inet = inet_sk(sk);
2034         struct ip_mc_socklist *pmc;
2035         struct ip_sf_socklist *psl;
2036         int i;
2037
2038         if (!MULTICAST(loc_addr))
2039                 return 1;
2040
2041         for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
2042                 if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
2043                     pmc->multi.imr_ifindex == dif)
2044                         break;
2045         }
2046         if (!pmc)
2047                 return 1;
2048         psl = pmc->sflist;
2049         if (!psl)
2050                 return pmc->sfmode == MCAST_EXCLUDE;
2051
2052         for (i=0; i<psl->sl_count; i++) {
2053                 if (psl->sl_addr[i] == rmt_addr)
2054                         break;
2055         }
2056         if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
2057                 return 0;
2058         if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
2059                 return 0;
2060         return 1;
2061 }
2062
2063 /*
2064  *      A socket is closing.
2065  */
2066
2067 void ip_mc_drop_socket(struct sock *sk)
2068 {
2069         struct inet_opt *inet = inet_sk(sk);
2070         struct ip_mc_socklist *iml;
2071
2072         if (inet->mc_list == NULL)
2073                 return;
2074
2075         rtnl_lock();
2076         while ((iml = inet->mc_list) != NULL) {
2077                 struct in_device *in_dev;
2078                 inet->mc_list = iml->next;
2079
2080                 if ((in_dev = inetdev_by_index(iml->multi.imr_ifindex)) != NULL) {
2081                         (void) ip_mc_leave_src(sk, iml, in_dev);
2082                         ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
2083                         in_dev_put(in_dev);
2084                 }
2085                 sock_kfree_s(sk, iml, sizeof(*iml));
2086
2087         }
2088         rtnl_unlock();
2089 }
2090
2091 int ip_check_mc(struct in_device *in_dev, u32 mc_addr, u32 src_addr, u16 proto)
2092 {
2093         struct ip_mc_list *im;
2094         struct ip_sf_list *psf;
2095         int rv = 0;
2096
2097         read_lock(&in_dev->lock);
2098         for (im=in_dev->mc_list; im; im=im->next) {
2099                 if (im->multiaddr == mc_addr)
2100                         break;
2101         }
2102         if (im && proto == IPPROTO_IGMP) {
2103                 rv = 1;
2104         } else if (im) {
2105                 if (src_addr) {
2106                         for (psf=im->sources; psf; psf=psf->sf_next) {
2107                                 if (psf->sf_inaddr == src_addr)
2108                                         break;
2109                         }
2110                         if (psf)
2111                                 rv = psf->sf_count[MCAST_INCLUDE] ||
2112                                         psf->sf_count[MCAST_EXCLUDE] !=
2113                                         im->sfcount[MCAST_EXCLUDE];
2114                         else
2115                                 rv = im->sfcount[MCAST_EXCLUDE] != 0;
2116                 } else
2117                         rv = 1; /* unspecified source; tentatively allow */
2118         }
2119         read_unlock(&in_dev->lock);
2120         return rv;
2121 }
2122
2123 #if defined(CONFIG_PROC_FS)
2124 struct igmp_mc_iter_state {
2125         struct net_device *dev;
2126         struct in_device *in_dev;
2127 };
2128
2129 #define igmp_mc_seq_private(seq)        ((struct igmp_mc_iter_state *)(seq)->private)
2130
2131 static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
2132 {
2133         struct ip_mc_list *im = NULL;
2134         struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2135
2136         for (state->dev = dev_base, state->in_dev = NULL;
2137              state->dev; 
2138              state->dev = state->dev->next) {
2139                 struct in_device *in_dev;
2140                 in_dev = in_dev_get(state->dev);
2141                 if (!in_dev)
2142                         continue;
2143                 read_lock(&in_dev->lock);
2144                 im = in_dev->mc_list;
2145                 if (im) {
2146                         state->in_dev = in_dev;
2147                         break;
2148                 }
2149                 read_unlock(&in_dev->lock);
2150                 in_dev_put(in_dev);
2151         }
2152         return im;
2153 }
2154
2155 static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
2156 {
2157         struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2158         im = im->next;
2159         while (!im) {
2160                 if (likely(state->in_dev != NULL)) {
2161                         read_unlock(&state->in_dev->lock);
2162                         in_dev_put(state->in_dev);
2163                 }
2164                 state->dev = state->dev->next;
2165                 if (!state->dev) {
2166                         state->in_dev = NULL;
2167                         break;
2168                 }
2169                 state->in_dev = in_dev_get(state->dev);
2170                 if (!state->in_dev)
2171                         continue;
2172                 read_lock(&state->in_dev->lock);
2173                 im = state->in_dev->mc_list;
2174         }
2175         return im;
2176 }
2177
2178 static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
2179 {
2180         struct ip_mc_list *im = igmp_mc_get_first(seq);
2181         if (im)
2182                 while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
2183                         --pos;
2184         return pos ? NULL : im;
2185 }
2186
2187 static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
2188 {
2189         read_lock(&dev_base_lock);
2190         return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2191 }
2192
2193 static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2194 {
2195         struct ip_mc_list *im;
2196         if (v == SEQ_START_TOKEN)
2197                 im = igmp_mc_get_first(seq);
2198         else
2199                 im = igmp_mc_get_next(seq, v);
2200         ++*pos;
2201         return im;
2202 }
2203
2204 static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
2205 {
2206         struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2207         if (likely(state->in_dev != NULL)) {
2208                 read_unlock(&state->in_dev->lock);
2209                 in_dev_put(state->in_dev);
2210                 state->in_dev = NULL;
2211         }
2212         state->dev = NULL;
2213         read_unlock(&dev_base_lock);
2214 }
2215
2216 static int igmp_mc_seq_show(struct seq_file *seq, void *v)
2217 {
2218         if (v == SEQ_START_TOKEN)
2219                 seq_printf(seq, 
2220                            "Idx\tDevice    : Count Querier\tGroup    Users Timer\tReporter\n");
2221         else {
2222                 struct ip_mc_list *im = (struct ip_mc_list *)v;
2223                 struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
2224                 char   *querier;
2225 #ifdef CONFIG_IP_MULTICAST
2226                 querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
2227                           IGMP_V2_SEEN(state->in_dev) ? "V2" :
2228                           "V3";
2229 #else
2230                 querier = "NONE";
2231 #endif
2232
2233                 if (state->in_dev->mc_list == im) {
2234                         seq_printf(seq, "%d\t%-10s: %5d %7s\n",
2235                                    state->dev->ifindex, state->dev->name, state->dev->mc_count, querier);
2236                 }
2237
2238                 seq_printf(seq,
2239                            "\t\t\t\t%08lX %5d %d:%08lX\t\t%d\n",
2240                            im->multiaddr, im->users,
2241                            im->tm_running, im->tm_running ?
2242                            jiffies_to_clock_t(im->timer.expires-jiffies) : 0,
2243                            im->reporter);
2244         }
2245         return 0;
2246 }
2247
2248 static struct seq_operations igmp_mc_seq_ops = {
2249         .start  =       igmp_mc_seq_start,
2250         .next   =       igmp_mc_seq_next,
2251         .stop   =       igmp_mc_seq_stop,
2252         .show   =       igmp_mc_seq_show,
2253 };
2254
2255 static int igmp_mc_seq_open(struct inode *inode, struct file *file)
2256 {
2257         struct seq_file *seq;
2258         int rc = -ENOMEM;
2259         struct igmp_mc_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
2260
2261         if (!s)
2262                 goto out;
2263         rc = seq_open(file, &igmp_mc_seq_ops);
2264         if (rc)
2265                 goto out_kfree;
2266
2267         seq = file->private_data;
2268         seq->private = s;
2269         memset(s, 0, sizeof(*s));
2270 out:
2271         return rc;
2272 out_kfree:
2273         kfree(s);
2274         goto out;
2275 }
2276
2277 static struct file_operations igmp_mc_seq_fops = {
2278         .owner          =       THIS_MODULE,
2279         .open           =       igmp_mc_seq_open,
2280         .read           =       seq_read,
2281         .llseek         =       seq_lseek,
2282         .release        =       seq_release_private,
2283 };
2284
2285 struct igmp_mcf_iter_state {
2286         struct net_device *dev;
2287         struct in_device *idev;
2288         struct ip_mc_list *im;
2289 };
2290
2291 #define igmp_mcf_seq_private(seq)       ((struct igmp_mcf_iter_state *)(seq)->private)
2292
2293 static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
2294 {
2295         struct ip_sf_list *psf = NULL;
2296         struct ip_mc_list *im = NULL;
2297         struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2298
2299         for (state->dev = dev_base, state->idev = NULL, state->im = NULL;
2300              state->dev; 
2301              state->dev = state->dev->next) {
2302                 struct in_device *idev;
2303                 idev = in_dev_get(state->dev);
2304                 if (unlikely(idev == NULL))
2305                         continue;
2306                 read_lock_bh(&idev->lock);
2307                 im = idev->mc_list;
2308                 if (likely(im != NULL)) {
2309                         spin_lock_bh(&im->lock);
2310                         psf = im->sources;
2311                         if (likely(psf != NULL)) {
2312                                 state->im = im;
2313                                 state->idev = idev;
2314                                 break;
2315                         }
2316                         spin_unlock_bh(&im->lock);
2317                 }
2318                 read_unlock_bh(&idev->lock);
2319                 in_dev_put(idev);
2320         }
2321         return psf;
2322 }
2323
2324 static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
2325 {
2326         struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2327
2328         psf = psf->sf_next;
2329         while (!psf) {
2330                 spin_unlock_bh(&state->im->lock);
2331                 state->im = state->im->next;
2332                 while (!state->im) {
2333                         if (likely(state->idev != NULL)) {
2334                                 read_unlock_bh(&state->idev->lock);
2335                                 in_dev_put(state->idev);
2336                         }
2337                         state->dev = state->dev->next;
2338                         if (!state->dev) {
2339                                 state->idev = NULL;
2340                                 goto out;
2341                         }
2342                         state->idev = in_dev_get(state->dev);
2343                         if (!state->idev)
2344                                 continue;
2345                         read_lock_bh(&state->idev->lock);
2346                         state->im = state->idev->mc_list;
2347                 }
2348                 if (!state->im)
2349                         break;
2350                 spin_lock_bh(&state->im->lock);
2351                 psf = state->im->sources;
2352         }
2353 out:
2354         return psf;
2355 }
2356
2357 static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
2358 {
2359         struct ip_sf_list *psf = igmp_mcf_get_first(seq);
2360         if (psf)
2361                 while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
2362                         --pos;
2363         return pos ? NULL : psf;
2364 }
2365
2366 static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
2367 {
2368         read_lock(&dev_base_lock);
2369         return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2370 }
2371
2372 static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2373 {
2374         struct ip_sf_list *psf;
2375         if (v == SEQ_START_TOKEN)
2376                 psf = igmp_mcf_get_first(seq);
2377         else
2378                 psf = igmp_mcf_get_next(seq, v);
2379         ++*pos;
2380         return psf;
2381 }
2382
2383 static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
2384 {
2385         struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2386         if (likely(state->im != NULL)) {
2387                 spin_unlock_bh(&state->im->lock);
2388                 state->im = NULL;
2389         }
2390         if (likely(state->idev != NULL)) {
2391                 read_unlock_bh(&state->idev->lock);
2392                 in_dev_put(state->idev);
2393                 state->idev = NULL;
2394         }
2395         state->dev = NULL;
2396         read_unlock(&dev_base_lock);
2397 }
2398
2399 static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
2400 {
2401         struct ip_sf_list *psf = (struct ip_sf_list *)v;
2402         struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
2403
2404         if (v == SEQ_START_TOKEN) {
2405                 seq_printf(seq, 
2406                            "%3s %6s "
2407                            "%10s %10s %6s %6s\n", "Idx",
2408                            "Device", "MCA",
2409                            "SRC", "INC", "EXC");
2410         } else {
2411                 seq_printf(seq,
2412                            "%3d %6.6s 0x%08x "
2413                            "0x%08x %6lu %6lu\n", 
2414                            state->dev->ifindex, state->dev->name, 
2415                            ntohl(state->im->multiaddr),
2416                            ntohl(psf->sf_inaddr),
2417                            psf->sf_count[MCAST_INCLUDE],
2418                            psf->sf_count[MCAST_EXCLUDE]);
2419         }
2420         return 0;
2421 }
2422
2423 static struct seq_operations igmp_mcf_seq_ops = {
2424         .start  =       igmp_mcf_seq_start,
2425         .next   =       igmp_mcf_seq_next,
2426         .stop   =       igmp_mcf_seq_stop,
2427         .show   =       igmp_mcf_seq_show,
2428 };
2429
2430 static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
2431 {
2432         struct seq_file *seq;
2433         int rc = -ENOMEM;
2434         struct igmp_mcf_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL);
2435
2436         if (!s)
2437                 goto out;
2438         rc = seq_open(file, &igmp_mcf_seq_ops);
2439         if (rc)
2440                 goto out_kfree;
2441
2442         seq = file->private_data;
2443         seq->private = s;
2444         memset(s, 0, sizeof(*s));
2445 out:
2446         return rc;
2447 out_kfree:
2448         kfree(s);
2449         goto out;
2450 }
2451
2452 static struct file_operations igmp_mcf_seq_fops = {
2453         .owner          =       THIS_MODULE,
2454         .open           =       igmp_mcf_seq_open,
2455         .read           =       seq_read,
2456         .llseek         =       seq_lseek,
2457         .release        =       seq_release_private,
2458 };
2459
2460 int __init igmp_mc_proc_init(void)
2461 {
2462         proc_net_fops_create("igmp", S_IRUGO, &igmp_mc_seq_fops);
2463         proc_net_fops_create("mcfilter", S_IRUGO, &igmp_mcf_seq_fops);
2464         return 0;
2465 }
2466 #endif
2467
2468 EXPORT_SYMBOL(ip_mc_dec_group);
2469 EXPORT_SYMBOL(ip_mc_inc_group);
2470 EXPORT_SYMBOL(ip_mc_join_group);