static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
+static void ip6_dst_destroy(struct dst_entry *);
+static void ip6_dst_ifdown(struct dst_entry *,
+ struct net_device *dev, int how);
static int ip6_dst_gc(void);
static int ip6_pkt_discard(struct sk_buff *skb);
+static int ip6_pkt_discard_out(struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
static void ip6_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
.gc = ip6_dst_gc,
.gc_thresh = 1024,
.check = ip6_dst_check,
+ .destroy = ip6_dst_destroy,
+ .ifdown = ip6_dst_ifdown,
.negative_advice = ip6_negative_advice,
.link_failure = ip6_link_failure,
.update_pmtu = ip6_rt_update_pmtu,
.error = -ENETUNREACH,
.metrics = { [RTAX_HOPLIMIT - 1] = 255, },
.input = ip6_pkt_discard,
- .output = ip6_pkt_discard,
+ .output = ip6_pkt_discard_out,
.ops = &ip6_dst_ops,
.path = (struct dst_entry*)&ip6_null_entry,
}
/* Protects all the ip6 fib */
-rwlock_t rt6_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(rt6_lock);
/* allocate dst with ip6_dst_ops */
static __inline__ struct rt6_info *ip6_dst_alloc(void)
{
- return dst_alloc(&ip6_dst_ops);
+ return (struct rt6_info *)dst_alloc(&ip6_dst_ops);
+}
+
+static void ip6_dst_destroy(struct dst_entry *dst)
+{
+ struct rt6_info *rt = (struct rt6_info *)dst;
+ struct inet6_dev *idev = rt->rt6i_idev;
+
+ if (idev != NULL) {
+ rt->rt6i_idev = NULL;
+ in6_dev_put(idev);
+ }
+}
+
+static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+ int how)
+{
+ struct rt6_info *rt = (struct rt6_info *)dst;
+ struct inet6_dev *idev = rt->rt6i_idev;
+
+ if (dev != &loopback_dev && idev != NULL && idev->dev == dev) {
+ struct inet6_dev *loopback_idev = in6_dev_get(&loopback_dev);
+ if (loopback_idev != NULL) {
+ rt->rt6i_idev = loopback_idev;
+ in6_dev_put(idev);
+ }
+ }
+}
+
+static __inline__ int rt6_check_expired(const struct rt6_info *rt)
+{
+ return (rt->rt6i_flags & RTF_EXPIRES &&
+ time_after(jiffies, rt->rt6i_expires));
}
/*
struct net_device *dev = sprt->rt6i_dev;
if (dev->ifindex == oif)
return sprt;
- if (dev->flags&IFF_LOOPBACK)
+ if (dev->flags & IFF_LOOPBACK) {
+ if (sprt->rt6i_idev == NULL ||
+ sprt->rt6i_idev->dev->ifindex != oif) {
+ if (strict && oif)
+ continue;
+ if (local && (!oif ||
+ local->rt6i_idev->dev->ifindex == oif))
+ continue;
+ }
local = sprt;
+ }
}
if (local)
* pointer to the last default router chosen. BH is disabled locally.
*/
static struct rt6_info *rt6_dflt_pointer;
-static spinlock_t rt6_dflt_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rt6_dflt_lock);
+
+void rt6_reset_dflt_pointer(struct rt6_info *rt)
+{
+ spin_lock_bh(&rt6_dflt_lock);
+ if (rt == NULL || rt == rt6_dflt_pointer) {
+ RT6_TRACE("reset default router: %p->NULL\n", rt6_dflt_pointer);
+ rt6_dflt_pointer = NULL;
+ }
+ spin_unlock_bh(&rt6_dflt_lock);
+}
/* Default Router Selection (RFC 2461 6.3.6) */
static struct rt6_info *rt6_best_dflt(struct rt6_info *rt, int oif)
sprt->rt6i_dev->ifindex == oif))
m += 8;
+ if (rt6_check_expired(sprt))
+ continue;
+
if (sprt == rt6_dflt_pointer)
m += 4;
for (sprt = rt6_dflt_pointer->u.next;
sprt; sprt = sprt->u.next) {
if (sprt->u.dst.obsolete <= 0 &&
- sprt->u.dst.error == 0) {
+ sprt->u.dst.error == 0 &&
+ !rt6_check_expired(sprt)) {
match = sprt;
break;
}
!match && sprt;
sprt = sprt->u.next) {
if (sprt->u.dst.obsolete <= 0 &&
- sprt->u.dst.error == 0) {
+ sprt->u.dst.error == 0 &&
+ !rt6_check_expired(sprt)) {
match = sprt;
break;
}
*/
for (sprt = ip6_routing_table.leaf;
sprt; sprt = sprt->u.next) {
- if ((sprt->rt6i_flags & RTF_DEFAULT) &&
+ if (!rt6_check_expired(sprt) &&
+ (sprt->rt6i_flags & RTF_DEFAULT) &&
(!oif ||
(sprt->rt6i_dev &&
sprt->rt6i_dev->ifindex == oif))) {
return NULL;
}
-/* rt6_ins is called with FREE rt6_lock.
+/* ip6_ins_rt is called with FREE rt6_lock.
It takes new route entry, the addition fails by any reason the
route is freed. In any case, if caller does not hold it, it may
be destroyed.
*/
-static int rt6_ins(struct rt6_info *rt, struct nlmsghdr *nlh, void *_rtattr)
+int ip6_ins_rt(struct rt6_info *rt, struct nlmsghdr *nlh, void *_rtattr)
{
int err;
dst_hold(&rt->u.dst);
- err = rt6_ins(rt, NULL, NULL);
+ err = ip6_ins_rt(rt, NULL, NULL);
if (err == 0)
return rt;
BACKTRACK();
if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) {
+ struct rt6_info *nrt;
+ dst_hold(&rt->u.dst);
read_unlock_bh(&rt6_lock);
- rt = rt6_cow(rt, &skb->nh.ipv6h->daddr,
- &skb->nh.ipv6h->saddr);
-
+ nrt = rt6_cow(rt, &skb->nh.ipv6h->daddr,
+ &skb->nh.ipv6h->saddr);
+
+ dst_release(&rt->u.dst);
+ rt = nrt;
+
if (rt->u.dst.error != -EEXIST || --attempts <= 0)
goto out2;
+
/* Race condition! In the gap, when rt6_lock was
released someone could insert this route. Relookup.
*/
}
if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) {
+ struct rt6_info *nrt;
+ dst_hold(&rt->u.dst);
read_unlock_bh(&rt6_lock);
- rt = rt6_cow(rt, &fl->fl6_dst, &fl->fl6_src);
+ nrt = rt6_cow(rt, &fl->fl6_dst, &fl->fl6_src);
+
+ dst_release(&rt->u.dst);
+ rt = nrt;
if (rt->u.dst.error != -EEXIST || --attempts <= 0)
goto out2;
if (mtu < dst_pmtu(dst) && rt6->rt6i_dst.plen == 128) {
rt6->rt6i_flags |= RTF_MODIFIED;
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
dst->metrics[RTAX_MTU-1] = mtu;
}
}
/* Protected by rt6_lock. */
static struct dst_entry *ndisc_dst_gc_list;
+static int ipv6_get_mtu(struct net_device *dev);
+
+static inline unsigned int ipv6_advmss(unsigned int mtu)
+{
+ mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
+
+ if (mtu < ip6_rt_min_advmss)
+ mtu = ip6_rt_min_advmss;
+
+ /*
+ * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
+ * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
+ * IPV6_MAXPLEN is also valid and means: "any MSS,
+ * rely only on pmtu discovery"
+ */
+ if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
+ mtu = IPV6_MAXPLEN;
+ return mtu;
+}
struct dst_entry *ndisc_dst_alloc(struct net_device *dev,
struct neighbour *neigh,
struct in6_addr *addr,
int (*output)(struct sk_buff *))
{
- struct rt6_info *rt = ip6_dst_alloc();
+ struct rt6_info *rt;
+ struct inet6_dev *idev = in6_dev_get(dev);
- if (unlikely(rt == NULL))
+ if (unlikely(idev == NULL))
+ return NULL;
+
+ rt = ip6_dst_alloc();
+ if (unlikely(rt == NULL)) {
+ in6_dev_put(idev);
goto out;
+ }
- if (dev)
- dev_hold(dev);
+ dev_hold(dev);
if (neigh)
neigh_hold(neigh);
else
neigh = ndisc_get_neigh(dev, addr);
rt->rt6i_dev = dev;
+ rt->rt6i_idev = idev;
rt->rt6i_nexthop = neigh;
- rt->rt6i_expires = 0;
- rt->rt6i_flags = RTF_LOCAL;
- rt->rt6i_metric = 0;
atomic_set(&rt->u.dst.__refcnt, 1);
rt->u.dst.metrics[RTAX_HOPLIMIT-1] = 255;
+ rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
+ rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_pmtu(&rt->u.dst));
rt->u.dst.output = output;
+#if 0 /* there's no chance to use these for ndisc */
+ rt->u.dst.flags = ipv6_addr_type(addr) & IPV6_ADDR_UNICAST
+ ? DST_HOST
+ : 0;
+ ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
+ rt->rt6i_dst.plen = 128;
+#endif
+
write_lock_bh(&rt6_lock);
rt->u.dst.next = ndisc_dst_gc_list;
ndisc_dst_gc_list = &rt->u.dst;
return mtu;
}
-static inline unsigned int ipv6_advmss(unsigned int mtu)
-{
- mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
-
- if (mtu < ip6_rt_min_advmss)
- mtu = ip6_rt_min_advmss;
-
- /*
- * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
- * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
- * IPV6_MAXPLEN is also valid and means: "any MSS,
- * rely only on pmtu discovery"
- */
- if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
- mtu = IPV6_MAXPLEN;
- return mtu;
-}
-
static int ipv6_get_hoplimit(struct net_device *dev)
{
int hoplimit = ipv6_devconf.hop_limit;
int err;
struct rtmsg *r;
struct rtattr **rta;
- struct rt6_info *rt;
+ struct rt6_info *rt = NULL;
struct net_device *dev = NULL;
+ struct inet6_dev *idev = NULL;
int addr_type;
rta = (struct rtattr **) _rtattr;
if (rtmsg->rtmsg_src_len)
return -EINVAL;
#endif
+ if (rtmsg->rtmsg_ifindex) {
+ err = -ENODEV;
+ dev = dev_get_by_index(rtmsg->rtmsg_ifindex);
+ if (!dev)
+ goto out;
+ idev = in6_dev_get(dev);
+ if (!idev)
+ goto out;
+ }
+
if (rtmsg->rtmsg_metric == 0)
rtmsg->rtmsg_metric = IP6_RT_PRIO_USER;
rt = ip6_dst_alloc();
- if (rt == NULL)
- return -ENOMEM;
+ if (rt == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
rt->u.dst.obsolete = -1;
rt->rt6i_expires = clock_t_to_jiffies(rtmsg->rtmsg_info);
rt->u.dst.output = ip6_output;
- if (rtmsg->rtmsg_ifindex) {
- dev = dev_get_by_index(rtmsg->rtmsg_ifindex);
- err = -ENODEV;
- if (dev == NULL)
- goto out;
- }
-
ipv6_addr_prefix(&rt->rt6i_dst.addr,
&rtmsg->rtmsg_dst, rtmsg->rtmsg_dst_len);
rt->rt6i_dst.plen = rtmsg->rtmsg_dst_len;
*/
if ((rtmsg->rtmsg_flags&RTF_REJECT) ||
(dev && (dev->flags&IFF_LOOPBACK) && !(addr_type&IPV6_ADDR_LOOPBACK))) {
- if (dev)
- dev_put(dev);
- dev = &loopback_dev;
- dev_hold(dev);
- rt->u.dst.output = ip6_pkt_discard;
+ /* hold loopback dev/idev if we haven't done so. */
+ if (dev != &loopback_dev) {
+ if (dev) {
+ dev_put(dev);
+ in6_dev_put(idev);
+ }
+ dev = &loopback_dev;
+ dev_hold(dev);
+ idev = in6_dev_get(dev);
+ if (!idev) {
+ err = -ENODEV;
+ goto out;
+ }
+ }
+ rt->u.dst.output = ip6_pkt_discard_out;
rt->u.dst.input = ip6_pkt_discard;
rt->u.dst.error = -ENETUNREACH;
rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
}
} else {
dev = grt->rt6i_dev;
+ idev = grt->rt6i_idev;
dev_hold(dev);
+ in6_dev_hold(grt->rt6i_idev);
}
if (!(grt->rt6i_flags&RTF_GATEWAY))
err = 0;
if (!rt->u.dst.metrics[RTAX_ADVMSS-1])
rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_pmtu(&rt->u.dst));
rt->u.dst.dev = dev;
- return rt6_ins(rt, nlh, _rtattr);
+ rt->rt6i_idev = idev;
+ return ip6_ins_rt(rt, nlh, _rtattr);
out:
if (dev)
dev_put(dev);
- dst_free((struct dst_entry *) rt);
+ if (idev)
+ in6_dev_put(idev);
+ if (rt)
+ dst_free((struct dst_entry *) rt);
return err;
}
write_lock_bh(&rt6_lock);
- spin_lock_bh(&rt6_dflt_lock);
- rt6_dflt_pointer = NULL;
- spin_unlock_bh(&rt6_dflt_lock);
+ rt6_reset_dflt_pointer(NULL);
+ err = fib6_del(rt, nlh, _rtattr);
dst_release(&rt->u.dst);
- err = fib6_del(rt, nlh, _rtattr);
write_unlock_bh(&rt6_lock);
return err;
rt->rt6i_dev->ifindex != rtmsg->rtmsg_ifindex))
continue;
if (rtmsg->rtmsg_flags&RTF_GATEWAY &&
- ipv6_addr_cmp(&rtmsg->rtmsg_gateway, &rt->rt6i_gateway))
+ !ipv6_addr_equal(&rtmsg->rtmsg_gateway, &rt->rt6i_gateway))
continue;
if (rtmsg->rtmsg_metric &&
rtmsg->rtmsg_metric != rt->rt6i_metric)
* Handle redirects
*/
void rt6_redirect(struct in6_addr *dest, struct in6_addr *saddr,
- struct neighbour *neigh, int on_link)
+ struct neighbour *neigh, u8 *lladdr, int on_link)
{
struct rt6_info *rt, *nrt;
if (neigh->dev != rt->rt6i_dev)
goto out;
- /* Redirect received -> path was valid.
- Look, redirects are sent only in response to data packets,
- so that this nexthop apparently is reachable. --ANK
- */
- dst_confirm(&rt->u.dst);
-
- /* Duplicate redirect: silently ignore. */
- if (neigh == rt->u.dst.neighbour)
- goto out;
-
- /* Current route is on-link; redirect is always invalid.
-
- Seems, previous statement is not true. It could
- be node, which looks for us as on-link (f.e. proxy ndisc)
- But then router serving it might decide, that we should
- know truth 8)8) --ANK (980726).
+ /*
+ * Current route is on-link; redirect is always invalid.
+ *
+ * Seems, previous statement is not true. It could
+ * be node, which looks for us as on-link (f.e. proxy ndisc)
+ * But then router serving it might decide, that we should
+ * know truth 8)8) --ANK (980726).
*/
if (!(rt->rt6i_flags&RTF_GATEWAY))
goto out;
* is a bit fuzzy and one might need to check all default
* routers.
*/
-
- if (ipv6_addr_cmp(saddr, &rt->rt6i_gateway)) {
+ if (!ipv6_addr_equal(saddr, &rt->rt6i_gateway)) {
if (rt->rt6i_flags & RTF_DEFAULT) {
struct rt6_info *rt1;
read_lock(&rt6_lock);
for (rt1 = ip6_routing_table.leaf; rt1; rt1 = rt1->u.next) {
- if (!ipv6_addr_cmp(saddr, &rt1->rt6i_gateway)) {
+ if (ipv6_addr_equal(saddr, &rt1->rt6i_gateway)) {
dst_hold(&rt1->u.dst);
dst_release(&rt->u.dst);
read_unlock(&rt6_lock);
* We have finally decided to accept it.
*/
+ neigh_update(neigh, lladdr, NUD_STALE,
+ NEIGH_UPDATE_F_WEAK_OVERRIDE|
+ NEIGH_UPDATE_F_OVERRIDE|
+ (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
+ NEIGH_UPDATE_F_ISROUTER))
+ );
+
+ /*
+ * Redirect received -> path was valid.
+ * Look, redirects are sent only in response to data packets,
+ * so that this nexthop apparently is reachable. --ANK
+ */
+ dst_confirm(&rt->u.dst);
+
+ /* Duplicate redirect: silently ignore. */
+ if (neigh == rt->u.dst.neighbour)
+ goto out;
+
nrt = ip6_rt_copy(rt);
if (nrt == NULL)
goto out;
nrt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
nrt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_pmtu(&nrt->u.dst));
- if (rt6_ins(nrt, NULL, NULL))
+ if (ip6_ins_rt(nrt, NULL, NULL))
goto out;
if (rt->rt6i_flags&RTF_CACHE) {
dst_set_expires(&nrt->u.dst, ip6_rt_mtu_expires);
nrt->rt6i_flags |= RTF_DYNAMIC|RTF_CACHE|RTF_EXPIRES;
nrt->u.dst.metrics[RTAX_MTU-1] = pmtu;
- rt6_ins(nrt, NULL, NULL);
+ ip6_ins_rt(nrt, NULL, NULL);
}
out:
rt->u.dst.dev = ort->u.dst.dev;
if (rt->u.dst.dev)
dev_hold(rt->u.dst.dev);
+ rt->rt6i_idev = ort->rt6i_idev;
+ if (rt->rt6i_idev)
+ in6_dev_hold(rt->rt6i_idev);
rt->u.dst.lastuse = jiffies;
rt->rt6i_expires = 0;
write_lock_bh(&rt6_lock);
for (rt = fn->leaf; rt; rt=rt->u.next) {
if (dev == rt->rt6i_dev &&
- ipv6_addr_cmp(&rt->rt6i_gateway, addr) == 0)
+ ipv6_addr_equal(&rt->rt6i_gateway, addr))
break;
}
if (rt)
rtmsg.rtmsg_type = RTMSG_NEWROUTE;
ipv6_addr_copy(&rtmsg.rtmsg_gateway, gwaddr);
rtmsg.rtmsg_metric = 1024;
- rtmsg.rtmsg_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP;
+ rtmsg.rtmsg_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES;
rtmsg.rtmsg_ifindex = dev->ifindex;
return rt6_get_dflt_router(gwaddr, dev);
}
-void rt6_purge_dflt_routers(int last_resort)
+void rt6_purge_dflt_routers(void)
{
struct rt6_info *rt;
- u32 flags;
-
- if (last_resort)
- flags = RTF_ALLONLINK;
- else
- flags = RTF_DEFAULT | RTF_ADDRCONF;
restart:
read_lock_bh(&rt6_lock);
for (rt = ip6_routing_table.leaf; rt; rt = rt->u.next) {
- if (rt->rt6i_flags & flags) {
+ if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) {
dst_hold(&rt->u.dst);
- spin_lock_bh(&rt6_dflt_lock);
- rt6_dflt_pointer = NULL;
- spin_unlock_bh(&rt6_dflt_lock);
+ rt6_reset_dflt_pointer(NULL);
read_unlock_bh(&rt6_lock);
read_unlock_bh(&rt6_lock);
}
-int ipv6_route_ioctl(unsigned int cmd, void *arg)
+int ipv6_route_ioctl(unsigned int cmd, void __user *arg)
{
struct in6_rtmsg rtmsg;
int err;
int ip6_pkt_discard(struct sk_buff *skb)
{
- IP6_INC_STATS(Ip6OutNoRoutes);
+ IP6_INC_STATS(IPSTATS_MIB_OUTNOROUTES);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOROUTE, 0, skb->dev);
kfree_skb(skb);
return 0;
}
+int ip6_pkt_discard_out(struct sk_buff *skb)
+{
+ skb->dev = skb->dst->dev;
+ return ip6_pkt_discard(skb);
+}
+
/*
- * Add address
+ * Allocate a dst for local (unicast / anycast) address.
*/
-int ip6_rt_addr_add(struct in6_addr *addr, struct net_device *dev, int anycast)
+struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
+ const struct in6_addr *addr,
+ int anycast)
{
struct rt6_info *rt = ip6_dst_alloc();
if (rt == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
dev_hold(&loopback_dev);
+ in6_dev_hold(idev);
rt->u.dst.flags = DST_HOST;
rt->u.dst.input = ip6_input;
rt->u.dst.output = ip6_output;
rt->rt6i_dev = &loopback_dev;
+ rt->rt6i_idev = idev;
rt->u.dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
rt->u.dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dst_pmtu(&rt->u.dst));
rt->u.dst.metrics[RTAX_HOPLIMIT-1] = ipv6_get_hoplimit(rt->rt6i_dev);
rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_gateway);
if (rt->rt6i_nexthop == NULL) {
dst_free((struct dst_entry *) rt);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
ipv6_addr_copy(&rt->rt6i_dst.addr, addr);
rt->rt6i_dst.plen = 128;
- rt6_ins(rt, NULL, NULL);
-
- return 0;
-}
-
-/* Delete address. Warning: you should check that this address
- disappeared before calling this function.
- */
-int ip6_rt_addr_del(struct in6_addr *addr, struct net_device *dev)
-{
- struct rt6_info *rt;
- int err = -ENOENT;
-
- rt = rt6_lookup(addr, NULL, loopback_dev.ifindex, 1);
- if (rt) {
- if (rt->rt6i_dst.plen == 128)
- err = ip6_del_rt(rt, NULL, NULL);
- else
- dst_release(&rt->u.dst);
- }
+ atomic_set(&rt->u.dst.__refcnt, 1);
- return err;
+ return rt;
}
static int fib6_ifdown(struct rt6_info *rt, void *arg)
rtm->rtm_protocol = rt->rt6i_protocol;
if (rt->rt6i_flags&RTF_DYNAMIC)
rtm->rtm_protocol = RTPROT_REDIRECT;
- else if (rt->rt6i_flags&(RTF_ADDRCONF|RTF_ALLONLINK))
+ else if (rt->rt6i_flags & RTF_ADDRCONF)
rtm->rtm_protocol = RTPROT_KERNEL;
else if (rt->rt6i_flags&RTF_DEFAULT)
rtm->rtm_protocol = RTPROT_RA;
static
int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, struct file * filp,
- void *buffer, size_t *lenp)
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
if (write) {
- proc_dointvec(ctl, write, filp, buffer, lenp);
+ proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
if (flush_delay < 0)
flush_delay = 0;
fib6_run_gc((unsigned long)flush_delay);