3 * Distributed under the terms of the GNU GPL version 2.
4 * Copyright (c) 2008 Nicira Networks
7 #include <linux/netdevice.h>
8 #include <linux/netfilter.h>
9 #include <linux/netfilter_ipv4.h>
12 #include <linux/icmp.h>
13 #include <linux/if_ether.h>
15 #include <net/route.h>
19 #include "nx_act_snat.h"
22 /* We need these fake structures to make netfilter happy --
23 * lots of places assume that skb->dst != NULL, which isn't
24 * all that unreasonable.
26 * Currently, we fill in the PMTU entry because netfilter
27 * refragmentation needs it, and the rt_flags entry because
28 * ipt_REJECT needs it. Future netfilter modules might
29 * require us to fill additional fields. */
30 static struct net_device __fake_net_device = {
31 .hard_header_len = ETH_HLEN
34 static struct rtable __fake_rtable = {
37 .__refcnt = ATOMIC_INIT(1),
38 .dev = &__fake_net_device,
39 .path = &__fake_rtable.u.dst,
40 .metrics = {[RTAX_MTU - 1] = 1500},
47 /* Define ARP for IP since the Linux headers don't do it cleanly. */
54 uint8_t ar_sha[ETH_ALEN];
56 uint8_t ar_tha[ETH_ALEN];
58 } __attribute__((packed));
59 OFP_ASSERT(sizeof(struct ip_arphdr) == 28);
62 /* Push the Ethernet header back on and tranmit the packet. */
64 dp_xmit_skb_push(struct sk_buff *skb)
66 skb_push(skb, ETH_HLEN);
67 return dp_xmit_skb(skb);
70 /* Perform maintainence related to a SNAT'd interface. Currently, this only
71 * checks whether MAC->IP bindings have expired.
73 * Called with the RCU read lock */
75 snat_maint(struct net_bridge_port *p)
78 struct snat_mapping *m, *n;
80 unsigned long timeout;
82 spin_lock_irqsave(&p->lock, flags);
87 timeout = sc->mac_timeout * HZ;
89 list_for_each_entry_safe (m, n, &sc->mappings, node) {
90 if (time_after(jiffies, m->used + timeout)) {
97 spin_unlock_irqrestore(&p->lock, flags);
100 /* When the packet is bound for a local interface, strip off the fake
103 void snat_local_in(struct sk_buff *skb)
105 if (skb->dst == (struct dst_entry *)&__fake_rtable) {
106 dst_release(skb->dst);
111 /* Check whether destination IP's address is in the IP->MAC mappings.
112 * If it is, then overwrite the destination MAC with the value from the
115 * Returns -1 if there is a problem, otherwise 0. */
117 dnat_mac(struct net_bridge_port *p, struct sk_buff *skb)
119 struct snat_conf *sc;
120 struct iphdr *iph = ip_hdr(skb);
121 struct ethhdr *eh = eth_hdr(skb);
122 struct snat_mapping *m;
125 spin_lock_irqsave(&p->lock, flags);
128 spin_unlock_irqrestore(&p->lock, flags);
132 if (skb->protocol != htons(ETH_P_IP)) {
133 spin_unlock_irqrestore(&p->lock, flags);
137 list_for_each_entry (m, &sc->mappings, node) {
138 if (m->ip_addr == iph->daddr){
140 if (!make_writable(&skb)) {
142 printk("make_writable failed\n");
143 spin_unlock_irqrestore(&p->lock, flags);
147 memcpy(eh->h_dest, m->hw_addr, ETH_ALEN);
152 spin_unlock_irqrestore(&p->lock, flags);
157 snat_pre_route_finish(struct sk_buff *skb)
159 struct net_bridge_port *p = skb->dev->br_port;
161 skb->dst = (struct dst_entry *)&__fake_rtable;
164 /* If SNAT is configured for this input device, check the IP->MAC
165 * mappings to see if we should update the destination MAC. */
167 dnat_mac(skb->dev->br_port, skb);
172 /* Checks whether 'skb' is an ARP request for an SNAT'd interface. If
173 * so, it will generate a response.
175 * Returns 0 if the packet was not handled. Otherwise, -1 is returned
176 * and the caller is responsible for freeing 'skb'. */
178 handle_arp_snat(struct sk_buff *skb)
180 struct net_bridge_port *p = skb->dev->br_port;
181 struct ip_arphdr *ah = (struct ip_arphdr *)arp_hdr(skb);
184 struct snat_conf *sc;
186 if ((ah->ar_op != htons(ARPOP_REQUEST))
187 || ah->ar_hln != ETH_ALEN
188 || ah->ar_pro != htons(ETH_P_IP)
192 ip_addr = ntohl(ah->ar_tip);
193 spin_lock_irqsave(&p->lock, flags);
196 /* We're only interested in addresses we rewrite. */
197 if (!sc || (sc && ((ip_addr < sc->ip_addr_start)
198 || (ip_addr > sc->ip_addr_end)))) {
199 spin_unlock_irqrestore(&p->lock, flags);
202 spin_unlock_irqrestore(&p->lock, flags);
204 arp_send(ARPOP_REPLY, ETH_P_ARP, ah->ar_sip, skb->dev, ah->ar_tip,
205 ah->ar_sha, p->dp->netdev->dev_addr, ah->ar_sha);
210 /* Checks whether 'skb' is a ping request for an SNAT'd interface. If
211 * so, it will generate a response.
213 * Returns 0 if the packet was not handled. Otherwise, -1 is returned
214 * and the caller is responsible for freeing 'skb'. */
216 handle_icmp_snat(struct sk_buff *skb)
218 struct net_bridge_port *p = skb->dev->br_port;
219 struct snat_conf *sc;
221 struct iphdr *iph = ip_hdr(skb);
223 struct icmphdr *icmph;
224 unsigned int datalen;
225 uint8_t tmp_eth[ETH_ALEN];
227 struct sk_buff *nskb;
231 ip_addr = ntohl(iph->daddr);
232 spin_lock_irqsave(&p->lock, flags);
235 /* We're only interested in addresses we rewrite. */
236 if (!sc || (sc && ((ip_addr < sc->ip_addr_start)
237 || (ip_addr > sc->ip_addr_end)))) {
238 spin_unlock_irqrestore(&p->lock, flags);
241 spin_unlock_irqrestore(&p->lock, flags);
243 icmph = (struct icmphdr *) ((u_int32_t *)iph + iph->ihl);
244 datalen = skb->len - iph->ihl * 4;
246 /* Drop fragments and packets not long enough to hold the ICMP
248 if (((ntohs(iph->frag_off) & IP_OFFSET) != 0) || datalen < 4)
251 /* We only respond to echo requests to our address. Continue
252 * processing replies and other ICMP messages since they may be
253 * intended for NAT'd hosts. */
254 if (icmph->type != ICMP_ECHO)
257 /* Send an echo reply in response */
258 nskb = skb_copy(skb, GFP_ATOMIC);
261 printk("skb copy failed for icmp reply\n");
267 icmph = (struct icmphdr *) ((u_int32_t *)iph + iph->ihl);
270 iph->daddr = iph->saddr;
273 memcpy(tmp_eth, eh->h_dest, ETH_ALEN);
274 memcpy(eh->h_dest, eh->h_source, ETH_ALEN);
275 memcpy(eh->h_source, tmp_eth, ETH_ALEN);
277 icmph->type = ICMP_ECHOREPLY;
279 dp_xmit_skb_push(nskb);
284 /* Check if any SNAT maintenance needs to be done on 'skb' before it's
285 * checked against the datapath's tables. This includes DNAT
286 * modification based on prior SNAT action and responding to ARP and
287 * echo requests for the SNAT interface.
289 * Returns 0 if 'skb' should continue to be processed by the caller.
290 * Returns -1 if the packet was handled, and the caller should free
294 snat_pre_route(struct sk_buff *skb)
299 if (skb->protocol == htons(ETH_P_ARP))
300 return handle_arp_snat(skb);
301 else if (skb->protocol != htons(ETH_P_IP))
305 if (iph->ihl < 5 || iph->version != 4)
308 if (!pskb_may_pull(skb, iph->ihl*4))
311 /* Check if we need to echo reply for this address */
312 if ((iph->protocol == IPPROTO_ICMP) && (handle_icmp_snat(skb)))
315 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
318 len = ntohs(iph->tot_len);
319 if ((skb->len < len) || len < (iph->ihl*4))
322 if (pskb_trim_rcsum(skb, len))
325 return NF_HOOK(PF_INET, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
326 snat_pre_route_finish);
334 snat_skb_finish(struct sk_buff *skb)
336 NF_HOOK(PF_INET, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
342 /* Update the MAC->IP mappings for the private side of the SNAT'd
345 update_mapping(struct net_bridge_port *p, struct sk_buff *skb)
348 struct snat_conf *sc;
349 struct iphdr *iph = ip_hdr(skb);
350 struct ethhdr *eh = eth_hdr(skb);
351 struct snat_mapping *m;
353 spin_lock_irqsave(&p->lock, flags);
358 list_for_each_entry (m, &sc->mappings, node) {
359 if (m->ip_addr == iph->saddr){
360 if (memcmp(m->hw_addr, eh->h_source, ETH_ALEN)) {
361 memcpy(m->hw_addr, eh->h_source, ETH_ALEN);
368 m = kmalloc(sizeof *m, GFP_ATOMIC);
369 m->ip_addr = iph->saddr;
370 memcpy(m->hw_addr, eh->h_source, ETH_ALEN);
373 list_add(&m->node, &sc->mappings);
376 spin_unlock_irqrestore(&p->lock, flags);
379 /* Perform SNAT modification on 'skb' and send out 'out_port'. If the
380 * port was not configured for SNAT, it will be sent through the interface
381 * unmodified. 'skb' is not consumed, so caller will need to free it.
384 snat_skb(struct datapath *dp, struct sk_buff *skb, int out_port)
386 struct net_bridge_port *p = dp->ports[out_port];
387 struct sk_buff *nskb;
392 nskb = skb_copy(skb, GFP_ATOMIC);
398 /* We only SNAT IP, so just send it on its way if not */
399 if (skb->protocol != htons(ETH_P_IP)) {
404 /* Set the source MAC to the OF interface */
405 memcpy(eth_hdr(nskb)->h_source, dp->netdev->dev_addr, ETH_ALEN);
407 update_mapping(p, skb);
409 /* Take the Ethernet header back off for netfilter hooks. */
410 skb_pull(nskb, ETH_HLEN);
412 NF_HOOK(PF_INET, NF_INET_FORWARD, nskb, skb->dev, nskb->dev,
416 /* Remove SNAT configuration on port 'p'.
418 * NB: The caller must hold the port's spinlock. */
420 snat_free_conf(struct net_bridge_port *p)
422 struct snat_conf *sc = p->snat;
427 /* Free existing mapping entries */
428 while (!list_empty(&sc->mappings)) {
429 struct snat_mapping *m = list_entry(sc->mappings.next,
430 struct snat_mapping, node);
441 /* Remove SNAT configuration from an interface. */
443 snat_del_port(struct datapath *dp, uint16_t port)
446 struct net_bridge_port *p = dp->ports[port];
450 printk("Attempt to remove snat on non-existent port: %d\n", port);
454 spin_lock_irqsave(&p->lock, flags);
455 if (snat_free_conf(p)) {
456 /* SNAT not configured on this port */
457 spin_unlock_irqrestore(&p->lock, flags);
459 printk("Attempt to remove snat on non-snat port: %d\n", port);
463 spin_unlock_irqrestore(&p->lock, flags);
468 /* Add SNAT configuration to an interface. */
470 snat_add_port(struct datapath *dp, uint16_t port,
471 uint32_t ip_addr_start, uint32_t ip_addr_end,
472 uint16_t mac_timeout)
475 struct net_bridge_port *p = dp->ports[port];
476 struct snat_conf *sc;
479 if (mac_timeout == 0)
480 mac_timeout = MAC_TIMEOUT_DEFAULT;
484 printk("Attempt to add snat on non-existent port: %d\n", port);
488 /* If SNAT is already configured on the port, check whether the same
489 * IP addresses are used. If so, just update the mac timeout
490 * configuration. Otherwise, drop all SNAT configuration and
492 spin_lock_irqsave(&p->lock, flags);
494 if ((p->snat->ip_addr_start == ip_addr_start)
495 && (p->snat->ip_addr_end = ip_addr_end)) {
496 p->snat->mac_timeout = mac_timeout;
497 spin_unlock_irqrestore(&p->lock, flags);
501 /* Free the existing configuration and mappings. */
505 sc = kzalloc(sizeof *sc, GFP_ATOMIC);
507 spin_unlock_irqrestore(&p->lock, flags);
511 sc->ip_addr_start = ip_addr_start;
512 sc->ip_addr_end = ip_addr_end;
513 sc->mac_timeout = mac_timeout;
514 INIT_LIST_HEAD(&sc->mappings);
517 spin_unlock_irqrestore(&p->lock, flags);
522 /* Handle a SNAT configuration message.
524 * Returns 0 if no problems are found. Otherwise, a negative errno. */
526 snat_mod_config(struct datapath *dp, const struct nx_act_config *nac)
528 int n_entries = (ntohs(nac->header.header.length) - sizeof *nac)
529 / sizeof (struct nx_snat_config);
533 for (i=0; i<n_entries; i++) {
534 const struct nx_snat_config *sc = &nac->snat[i];
535 uint16_t port = ntohs(sc->port);
538 if (sc->command == NXSC_ADD)
539 r = snat_add_port(dp, port,
540 ntohl(sc->ip_addr_start), ntohl(sc->ip_addr_end),
541 ntohs(sc->mac_timeout));
543 r = snat_del_port(dp, port);