2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the IP module.
8 * Version: @(#)ip.h 1.0.2 05/07/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Alan Cox, <gw4pts@gw4pts.ampr.org>
15 * Mike McLagan : Routing by source
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
25 #include <linux/config.h>
26 #include <linux/types.h>
27 #include <linux/socket.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/in_route.h>
33 #include <net/route.h>
40 #include <net/sock.h> /* struct sock */
44 struct ip_options opt; /* Compiled IP options */
47 #define IPSKB_MASQUERADED 1
48 #define IPSKB_TRANSLATED 2
49 #define IPSKB_FORWARDED 4
50 #define IPSKB_XFRM_TUNNEL_SIZE 8
57 struct ip_options *opt;
60 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
64 struct ip_ra_chain *next;
66 void (*destructor)(struct sock *);
69 extern struct ip_ra_chain *ip_ra_chain;
70 extern rwlock_t ip_ra_lock;
73 #define IP_CE 0x8000 /* Flag: "Congestion" */
74 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
75 #define IP_MF 0x2000 /* Flag: "More Fragments" */
76 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
78 #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
80 extern void ip_mc_dropsocket(struct sock *);
81 extern void ip_mc_dropdevice(struct net_device *dev);
82 extern int igmp_mc_proc_init(void);
85 * Functions provided by ip.c
88 extern int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
90 struct ip_options *opt);
91 extern int ip_rcv(struct sk_buff *skb, struct net_device *dev,
92 struct packet_type *pt);
93 extern int ip_local_deliver(struct sk_buff *skb);
94 extern int ip_mr_input(struct sk_buff *skb);
95 extern int ip_output(struct sk_buff *skb);
96 extern int ip_mc_output(struct sk_buff *skb);
97 extern int ip_fragment(struct sk_buff *skb, int (*out)(struct sk_buff*));
98 extern int ip_do_nat(struct sk_buff *skb);
99 extern void ip_send_check(struct iphdr *ip);
100 extern int ip_queue_xmit(struct sk_buff *skb, int ipfragok);
101 extern void ip_init(void);
102 extern int ip_append_data(struct sock *sk,
103 int getfrag(void *from, char *to, int offset, int len,
104 int odd, struct sk_buff *skb),
105 void *from, int len, int protolen,
106 struct ipcm_cookie *ipc,
109 extern int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb);
110 extern ssize_t ip_append_page(struct sock *sk, struct page *page,
111 int offset, size_t size, int flags);
112 extern int ip_push_pending_frames(struct sock *sk);
113 extern void ip_flush_pending_frames(struct sock *sk);
117 * Map a multicast IP onto multicast MAC for type Token Ring.
118 * This conforms to RFC1469 Option 2 Multicasting i.e.
119 * using a functional address to transmit / receive
123 static inline void ip_tr_mc_map(u32 addr, char *buf)
133 struct ip_reply_arg {
136 int csumoffset; /* u16 offset of csum in iov[0].iov_base */
137 /* -1 if not needed */
140 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
143 extern int ip_finish_output(struct sk_buff *skb);
152 extern struct ipv4_config ipv4_config;
153 DECLARE_SNMP_STAT(struct ip_mib, ip_statistics);
154 #define IP_INC_STATS(field) SNMP_INC_STATS(ip_statistics, field)
155 #define IP_INC_STATS_BH(field) SNMP_INC_STATS_BH(ip_statistics, field)
156 #define IP_INC_STATS_USER(field) SNMP_INC_STATS_USER(ip_statistics, field)
157 DECLARE_SNMP_STAT(struct linux_mib, net_statistics);
158 #define NET_INC_STATS(field) SNMP_INC_STATS(net_statistics, field)
159 #define NET_INC_STATS_BH(field) SNMP_INC_STATS_BH(net_statistics, field)
160 #define NET_INC_STATS_USER(field) SNMP_INC_STATS_USER(net_statistics, field)
161 #define NET_ADD_STATS_BH(field, adnd) SNMP_ADD_STATS_BH(net_statistics, field, adnd)
162 #define NET_ADD_STATS_USER(field, adnd) SNMP_ADD_STATS_USER(net_statistics, field, adnd)
164 extern int sysctl_local_port_range[2];
165 extern int sysctl_ip_default_ttl;
168 /* The function in 2.2 was invalid, producing wrong result for
169 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
171 int ip_decrease_ttl(struct iphdr *iph)
173 u32 check = iph->check;
174 check += htons(0x0100);
175 iph->check = check + (check>=0xFFFF);
180 int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
182 return (inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO ||
183 (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT &&
184 !(dst_metric(dst, RTAX_LOCK)&(1<<RTAX_MTU))));
187 extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more);
189 static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
191 if (iph->frag_off & htons(IP_DF)) {
192 /* This is only to work around buggy Windows95/2000
193 * VJ compression implementations. If the ID field
194 * does not change, they drop every other packet in
195 * a TCP stream using header compression.
197 iph->id = (sk && inet_sk(sk)->daddr) ?
198 htons(inet_sk(sk)->id++) : 0;
200 __ip_select_ident(iph, dst, 0);
203 static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more)
205 if (iph->frag_off & htons(IP_DF)) {
206 if (sk && inet_sk(sk)->daddr) {
207 iph->id = htons(inet_sk(sk)->id);
208 inet_sk(sk)->id += 1 + more;
212 __ip_select_ident(iph, dst, more);
216 * Map a multicast IP onto multicast MAC for type ethernet.
219 static inline void ip_eth_mc_map(u32 addr, char *buf)
232 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
233 #include <linux/ipv6.h>
236 static __inline__ void inet_reset_saddr(struct sock *sk)
238 inet_sk(sk)->rcv_saddr = inet_sk(sk)->saddr = 0;
239 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
240 if (sk->sk_family == PF_INET6) {
241 struct ipv6_pinfo *np = inet6_sk(sk);
243 memset(&np->saddr, 0, sizeof(np->saddr));
244 memset(&np->rcv_saddr, 0, sizeof(np->rcv_saddr));
251 extern int ip_call_ra_chain(struct sk_buff *skb);
254 * Functions provided by ip_fragment.o
257 struct sk_buff *ip_defrag(struct sk_buff *skb);
258 extern int ip_frag_nqueues;
259 extern atomic_t ip_frag_mem;
262 * Functions provided by ip_forward.c
265 extern int ip_forward(struct sk_buff *skb);
266 extern int ip_net_unreachable(struct sk_buff *skb);
269 * Functions provided by ip_options.c
272 extern void ip_options_build(struct sk_buff *skb, struct ip_options *opt, u32 daddr, struct rtable *rt, int is_frag);
273 extern int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb);
274 extern void ip_options_fragment(struct sk_buff *skb);
275 extern int ip_options_compile(struct ip_options *opt, struct sk_buff *skb);
276 extern int ip_options_get(struct ip_options **optp, unsigned char *data, int optlen, int user);
277 extern void ip_options_undo(struct ip_options * opt);
278 extern void ip_forward_options(struct sk_buff *skb);
279 extern int ip_options_rcv_srr(struct sk_buff *skb);
282 * Functions provided by ip_sockglue.c
285 extern void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
286 extern int ip_cmsg_send(struct msghdr *msg, struct ipcm_cookie *ipc);
287 extern int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, int optlen);
288 extern int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen);
289 extern int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *));
291 extern int ip_recv_error(struct sock *sk, struct msghdr *msg, int len);
292 extern void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
293 u16 port, u32 info, u8 *payload);
294 extern void ip_local_error(struct sock *sk, int err, u32 daddr, u16 dport,
297 extern int ipv4_proc_init(void);
299 /* sysctl helpers - any sysctl which holds a value that ends up being
300 * fed into the routing cache should use these handlers.
302 int ipv4_doint_and_flush(ctl_table *ctl, int write,
303 struct file* filp, void __user *buffer,
305 int ipv4_doint_and_flush_strategy(ctl_table *table, int __user *name, int nlen,
306 void __user *oldval, size_t __user *oldlenp,
307 void __user *newval, size_t newlen,