1 /* ip_nat_helper.c - generic support functions for NAT helpers
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2004 Netfilter Core Team <coreteam@netfilter.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * 14 Jan 2002 Harald Welte <laforge@gnumonks.org>:
11 * - add support for SACK adjustment
12 * 14 Mar 2002 Harald Welte <laforge@gnumonks.org>:
13 * - merge SACK support into newnat API
14 * 16 Aug 2002 Brian J. Murrell <netfilter@interlinx.bc.ca>:
15 * - make ip_nat_resize_packet more generic (TCP and UDP)
16 * - add ip_nat_mangle_udp_packet
18 #include <linux/config.h>
19 #include <linux/module.h>
20 #include <linux/kmod.h>
21 #include <linux/types.h>
22 #include <linux/timer.h>
23 #include <linux/skbuff.h>
24 #include <linux/netfilter_ipv4.h>
25 #include <net/checksum.h>
31 #define ASSERT_READ_LOCK(x) MUST_BE_READ_LOCKED(&ip_nat_lock)
32 #define ASSERT_WRITE_LOCK(x) MUST_BE_WRITE_LOCKED(&ip_nat_lock)
34 #include <linux/netfilter_ipv4/ip_conntrack.h>
35 #include <linux/netfilter_ipv4/ip_conntrack_helper.h>
36 #include <linux/netfilter_ipv4/ip_nat.h>
37 #include <linux/netfilter_ipv4/ip_nat_protocol.h>
38 #include <linux/netfilter_ipv4/ip_nat_core.h>
39 #include <linux/netfilter_ipv4/ip_nat_helper.h>
40 #include <linux/netfilter_ipv4/listhelp.h>
44 #define DUMP_OFFSET(x) printk("offset_before=%d, offset_after=%d, correction_pos=%u\n", x->offset_before, x->offset_after, x->correction_pos);
46 #define DEBUGP(format, args...)
47 #define DUMP_OFFSET(x)
50 static LIST_HEAD(helpers);
51 DECLARE_LOCK(ip_nat_seqofs_lock);
53 /* Setup TCP sequence correction given this change at this sequence */
55 adjust_tcp_sequence(u32 seq,
57 struct ip_conntrack *ct,
58 enum ip_conntrack_info ctinfo)
61 struct ip_nat_seq *this_way, *other_way;
63 DEBUGP("ip_nat_resize_packet: old_size = %u, new_size = %u\n",
64 (*skb)->len, new_size);
66 dir = CTINFO2DIR(ctinfo);
68 this_way = &ct->nat.info.seq[dir];
69 other_way = &ct->nat.info.seq[!dir];
71 DEBUGP("ip_nat_resize_packet: Seq_offset before: ");
72 DUMP_OFFSET(this_way);
74 LOCK_BH(&ip_nat_seqofs_lock);
76 /* SYN adjust. If it's uninitialized, or this is after last
77 * correction, record it: we don't handle more than one
78 * adjustment in the window, but do deal with common case of a
80 if (this_way->offset_before == this_way->offset_after
81 || before(this_way->correction_pos, seq)) {
82 this_way->correction_pos = seq;
83 this_way->offset_before = this_way->offset_after;
84 this_way->offset_after += sizediff;
86 UNLOCK_BH(&ip_nat_seqofs_lock);
88 DEBUGP("ip_nat_resize_packet: Seq_offset after: ");
89 DUMP_OFFSET(this_way);
92 /* Frobs data inside this packet, which is linear. */
93 static void mangle_contents(struct sk_buff *skb,
95 unsigned int match_offset,
96 unsigned int match_len,
97 const char *rep_buffer,
102 BUG_ON(skb_is_nonlinear(skb));
103 data = (unsigned char *)skb->nh.iph + dataoff;
105 /* move post-replacement */
106 memmove(data + match_offset + rep_len,
107 data + match_offset + match_len,
108 skb->tail - (data + match_offset + match_len));
110 /* insert data from buffer */
111 memcpy(data + match_offset, rep_buffer, rep_len);
113 /* update skb info */
114 if (rep_len > match_len) {
115 DEBUGP("ip_nat_mangle_packet: Extending packet by "
116 "%u from %u bytes\n", rep_len - match_len,
118 skb_put(skb, rep_len - match_len);
120 DEBUGP("ip_nat_mangle_packet: Shrinking packet from "
121 "%u from %u bytes\n", match_len - rep_len,
123 __skb_trim(skb, skb->len + rep_len - match_len);
126 /* fix IP hdr checksum information */
127 skb->nh.iph->tot_len = htons(skb->len);
128 ip_send_check(skb->nh.iph);
131 /* Unusual, but possible case. */
132 static int enlarge_skb(struct sk_buff **pskb, unsigned int extra)
134 struct sk_buff *nskb;
136 if ((*pskb)->len + extra > 65535)
139 nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC);
143 /* Transfer socket to new skb. */
145 skb_set_owner_w(nskb, (*pskb)->sk);
146 #ifdef CONFIG_NETFILTER_DEBUG
147 nskb->nf_debug = (*pskb)->nf_debug;
154 /* Generic function for mangling variable-length address changes inside
155 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
158 * Takes care about all the nasty sequence number changes, checksumming,
159 * skb enlargement, ...
163 ip_nat_mangle_tcp_packet(struct sk_buff **pskb,
164 struct ip_conntrack *ct,
165 enum ip_conntrack_info ctinfo,
166 unsigned int match_offset,
167 unsigned int match_len,
168 const char *rep_buffer,
169 unsigned int rep_len)
175 if (!skb_ip_make_writable(pskb, (*pskb)->len))
178 if (rep_len > match_len
179 && rep_len - match_len > skb_tailroom(*pskb)
180 && !enlarge_skb(pskb, rep_len - match_len))
183 SKB_LINEAR_ASSERT(*pskb);
185 iph = (*pskb)->nh.iph;
186 tcph = (void *)iph + iph->ihl*4;
188 mangle_contents(*pskb, iph->ihl*4 + tcph->doff*4,
189 match_offset, match_len, rep_buffer, rep_len);
191 datalen = (*pskb)->len - iph->ihl*4;
193 tcph->check = tcp_v4_check(tcph, datalen, iph->saddr, iph->daddr,
194 csum_partial((char *)tcph, datalen, 0));
196 adjust_tcp_sequence(ntohl(tcph->seq),
197 (int)rep_len - (int)match_len,
202 /* Generic function for mangling variable-length address changes inside
203 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
204 * command in the Amanda protocol)
206 * Takes care about all the nasty sequence number changes, checksumming,
207 * skb enlargement, ...
209 * XXX - This function could be merged with ip_nat_mangle_tcp_packet which
210 * should be fairly easy to do.
213 ip_nat_mangle_udp_packet(struct sk_buff **pskb,
214 struct ip_conntrack *ct,
215 enum ip_conntrack_info ctinfo,
216 unsigned int match_offset,
217 unsigned int match_len,
218 const char *rep_buffer,
219 unsigned int rep_len)
224 /* UDP helpers might accidentally mangle the wrong packet */
225 iph = (*pskb)->nh.iph;
226 if ((*pskb)->len < iph->ihl*4 + sizeof(*udph) +
227 match_offset + match_len)
230 if (!skb_ip_make_writable(pskb, (*pskb)->len))
233 if (rep_len > match_len
234 && rep_len - match_len > skb_tailroom(*pskb)
235 && !enlarge_skb(pskb, rep_len - match_len))
238 iph = (*pskb)->nh.iph;
239 udph = (void *)iph + iph->ihl*4;
240 mangle_contents(*pskb, iph->ihl*4 + sizeof(*udph),
241 match_offset, match_len, rep_buffer, rep_len);
243 /* update the length of the UDP packet */
244 udph->len = htons((*pskb)->len - iph->ihl*4);
246 /* fix udp checksum if udp checksum was previously calculated */
248 int datalen = (*pskb)->len - iph->ihl * 4;
250 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
251 datalen, IPPROTO_UDP,
252 csum_partial((char *)udph,
259 /* Adjust one found SACK option including checksum correction */
261 sack_adjust(struct sk_buff *skb,
263 unsigned int sackoff,
264 unsigned int sackend,
265 struct ip_nat_seq *natseq)
267 while (sackoff < sackend) {
268 struct tcp_sack_block *sack;
269 u_int32_t new_start_seq, new_end_seq;
271 sack = (void *)skb->data + sackoff;
272 if (after(ntohl(sack->start_seq) - natseq->offset_before,
273 natseq->correction_pos))
274 new_start_seq = ntohl(sack->start_seq)
275 - natseq->offset_after;
277 new_start_seq = ntohl(sack->start_seq)
278 - natseq->offset_before;
279 new_start_seq = htonl(new_start_seq);
281 if (after(ntohl(sack->end_seq) - natseq->offset_before,
282 natseq->correction_pos))
283 new_end_seq = ntohl(sack->end_seq)
284 - natseq->offset_after;
286 new_end_seq = ntohl(sack->end_seq)
287 - natseq->offset_before;
288 new_end_seq = htonl(new_end_seq);
290 DEBUGP("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
291 ntohl(sack->start_seq), new_start_seq,
292 ntohl(sack->end_seq), new_end_seq);
295 ip_nat_cheat_check(~sack->start_seq, new_start_seq,
296 ip_nat_cheat_check(~sack->end_seq,
299 sack->start_seq = new_start_seq;
300 sack->end_seq = new_end_seq;
301 sackoff += sizeof(*sack);
305 /* TCP SACK sequence number adjustment */
306 static inline unsigned int
307 ip_nat_sack_adjust(struct sk_buff **pskb,
309 struct ip_conntrack *ct,
310 enum ip_conntrack_info ctinfo)
312 unsigned int dir, optoff, optend;
314 optoff = (*pskb)->nh.iph->ihl*4 + sizeof(struct tcphdr);
315 optend = (*pskb)->nh.iph->ihl*4 + tcph->doff*4;
317 if (!skb_ip_make_writable(pskb, optend))
320 dir = CTINFO2DIR(ctinfo);
322 while (optoff < optend) {
323 /* Usually: option, length. */
324 unsigned char *op = (*pskb)->data + optoff;
333 /* no partial options */
334 if (optoff + 1 == optend
335 || optoff + op[1] > optend
338 if (op[0] == TCPOPT_SACK
339 && op[1] >= 2+TCPOLEN_SACK_PERBLOCK
340 && ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
341 sack_adjust(*pskb, tcph, optoff+2,
343 &ct->nat.info.seq[!dir]);
350 /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
352 ip_nat_seq_adjust(struct sk_buff **pskb,
353 struct ip_conntrack *ct,
354 enum ip_conntrack_info ctinfo)
357 int dir, newseq, newack;
358 struct ip_nat_seq *this_way, *other_way;
360 dir = CTINFO2DIR(ctinfo);
362 this_way = &ct->nat.info.seq[dir];
363 other_way = &ct->nat.info.seq[!dir];
365 /* No adjustments to make? Very common case. */
366 if (!this_way->offset_before && !this_way->offset_after
367 && !other_way->offset_before && !other_way->offset_after)
370 if (!skb_ip_make_writable(pskb, (*pskb)->nh.iph->ihl*4+sizeof(*tcph)))
373 tcph = (void *)(*pskb)->data + (*pskb)->nh.iph->ihl*4;
374 if (after(ntohl(tcph->seq), this_way->correction_pos))
375 newseq = ntohl(tcph->seq) + this_way->offset_after;
377 newseq = ntohl(tcph->seq) + this_way->offset_before;
378 newseq = htonl(newseq);
380 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
381 other_way->correction_pos))
382 newack = ntohl(tcph->ack_seq) - other_way->offset_after;
384 newack = ntohl(tcph->ack_seq) - other_way->offset_before;
385 newack = htonl(newack);
387 tcph->check = ip_nat_cheat_check(~tcph->seq, newseq,
388 ip_nat_cheat_check(~tcph->ack_seq,
392 DEBUGP("Adjusting sequence number from %u->%u, ack from %u->%u\n",
393 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
397 tcph->ack_seq = newack;
399 if (!ip_nat_sack_adjust(pskb, tcph, ct, ctinfo))
402 ip_conntrack_tcp_update(*pskb, ct, dir);
408 helper_cmp(const struct ip_nat_helper *helper,
409 const struct ip_conntrack_tuple *tuple)
411 return ip_ct_tuple_mask_cmp(tuple, &helper->tuple, &helper->mask);
414 int ip_nat_helper_register(struct ip_nat_helper *me)
418 WRITE_LOCK(&ip_nat_lock);
419 if (LIST_FIND(&helpers, helper_cmp, struct ip_nat_helper *,&me->tuple))
422 list_prepend(&helpers, me);
423 WRITE_UNLOCK(&ip_nat_lock);
428 struct ip_nat_helper *
429 __ip_nat_find_helper(const struct ip_conntrack_tuple *tuple)
431 return LIST_FIND(&helpers, helper_cmp, struct ip_nat_helper *, tuple);
434 struct ip_nat_helper *
435 ip_nat_find_helper(const struct ip_conntrack_tuple *tuple)
437 struct ip_nat_helper *h;
439 READ_LOCK(&ip_nat_lock);
440 h = __ip_nat_find_helper(tuple);
441 READ_UNLOCK(&ip_nat_lock);
447 kill_helper(const struct ip_conntrack *i, void *helper)
451 READ_LOCK(&ip_nat_lock);
452 ret = (i->nat.info.helper == helper);
453 READ_UNLOCK(&ip_nat_lock);
458 void ip_nat_helper_unregister(struct ip_nat_helper *me)
460 WRITE_LOCK(&ip_nat_lock);
461 /* Autoloading conntrack helper might have failed */
462 if (LIST_FIND(&helpers, helper_cmp, struct ip_nat_helper *,&me->tuple)) {
463 LIST_DELETE(&helpers, me);
465 WRITE_UNLOCK(&ip_nat_lock);
467 /* Someone could be still looking at the helper in a bh. */
470 /* Find anything using it, and umm, kill them. We can't turn
471 them into normal connections: if we've adjusted SYNs, then
472 they'll ackstorm. So we just drop it. We used to just
473 bump module count when a connection existed, but that
474 forces admins to gen fake RSTs or bounce box, either of
475 which is just a long-winded way of making things
477 ip_ct_selective_cleanup(kill_helper, me);