2 * Distributed under the terms of the GNU GPL version 2.
3 * Copyright (c) 2007, 2008, 2009, 2010 Nicira Networks.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
11 #include <linux/netdevice.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_vlan.h>
15 #include <net/llc_pdu.h>
16 #include <linux/kernel.h>
17 #include <linux/jhash.h>
18 #include <linux/jiffies.h>
19 #include <linux/llc.h>
20 #include <linux/module.h>
22 #include <linux/rcupdate.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
26 #include <linux/tcp.h>
27 #include <linux/udp.h>
28 #include <linux/icmp.h>
29 #include <net/inet_ecn.h>
34 struct kmem_cache *flow_cache;
35 static unsigned int hash_seed;
39 __be16 ar_hrd; /* format of hardware address */
40 __be16 ar_pro; /* format of protocol address */
41 unsigned char ar_hln; /* length of hardware address */
42 unsigned char ar_pln; /* length of protocol address */
43 __be16 ar_op; /* ARP opcode (command) */
45 /* Ethernet+IPv4 specific members. */
46 unsigned char ar_sha[ETH_ALEN]; /* sender hardware address */
47 unsigned char ar_sip[4]; /* sender IP address */
48 unsigned char ar_tha[ETH_ALEN]; /* target hardware address */
49 unsigned char ar_tip[4]; /* target IP address */
50 } __attribute__((packed));
52 static inline int arphdr_ok(struct sk_buff *skb)
54 int nh_ofs = skb_network_offset(skb);
55 return pskb_may_pull(skb, nh_ofs + sizeof(struct arp_eth_header));
58 static inline int iphdr_ok(struct sk_buff *skb)
60 int nh_ofs = skb_network_offset(skb);
61 if (skb->len >= nh_ofs + sizeof(struct iphdr)) {
62 int ip_len = ip_hdrlen(skb);
63 return (ip_len >= sizeof(struct iphdr)
64 && pskb_may_pull(skb, nh_ofs + ip_len));
69 static inline int tcphdr_ok(struct sk_buff *skb)
71 int th_ofs = skb_transport_offset(skb);
72 if (pskb_may_pull(skb, th_ofs + sizeof(struct tcphdr))) {
73 int tcp_len = tcp_hdrlen(skb);
74 return (tcp_len >= sizeof(struct tcphdr)
75 && skb->len >= th_ofs + tcp_len);
80 static inline int udphdr_ok(struct sk_buff *skb)
82 int th_ofs = skb_transport_offset(skb);
83 return pskb_may_pull(skb, th_ofs + sizeof(struct udphdr));
86 static inline int icmphdr_ok(struct sk_buff *skb)
88 int th_ofs = skb_transport_offset(skb);
89 return pskb_may_pull(skb, th_ofs + sizeof(struct icmphdr));
92 #define TCP_FLAGS_OFFSET 13
93 #define TCP_FLAG_MASK 0x3f
95 void flow_used(struct sw_flow *flow, struct sk_buff *skb)
99 if (flow->key.dl_type == htons(ETH_P_IP) &&
100 flow->key.nw_proto == IPPROTO_TCP) {
101 u8 *tcp = (u8 *)tcp_hdr(skb);
102 tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK;
105 spin_lock_bh(&flow->lock);
106 flow->used = jiffies;
107 flow->packet_count++;
108 flow->byte_count += skb->len;
109 flow->tcp_flags |= tcp_flags;
110 spin_unlock_bh(&flow->lock);
113 struct sw_flow_actions *flow_actions_alloc(size_t n_actions)
115 struct sw_flow_actions *sfa;
117 if (n_actions > (PAGE_SIZE - sizeof *sfa) / sizeof(union xflow_action))
118 return ERR_PTR(-EINVAL);
120 sfa = kmalloc(sizeof *sfa + n_actions * sizeof(union xflow_action),
123 return ERR_PTR(-ENOMEM);
125 sfa->n_actions = n_actions;
130 /* Frees 'flow' immediately. */
131 static void flow_free(struct sw_flow *flow)
135 kfree(flow->sf_acts);
136 kmem_cache_free(flow_cache, flow);
139 void flow_free_tbl(struct tbl_node *node)
141 struct sw_flow *flow = flow_cast(node);
145 /* RCU callback used by flow_deferred_free. */
146 static void rcu_free_flow_callback(struct rcu_head *rcu)
148 struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
152 /* Schedules 'flow' to be freed after the next RCU grace period.
153 * The caller must hold rcu_read_lock for this to be sensible. */
154 void flow_deferred_free(struct sw_flow *flow)
156 call_rcu(&flow->rcu, rcu_free_flow_callback);
159 /* RCU callback used by flow_deferred_free_acts. */
160 static void rcu_free_acts_callback(struct rcu_head *rcu)
162 struct sw_flow_actions *sf_acts = container_of(rcu,
163 struct sw_flow_actions, rcu);
167 /* Schedules 'sf_acts' to be freed after the next RCU grace period.
168 * The caller must hold rcu_read_lock for this to be sensible. */
169 void flow_deferred_free_acts(struct sw_flow_actions *sf_acts)
171 call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
174 #define SNAP_OUI_LEN 3
179 u8 dsap; /* Always 0xAA */
180 u8 ssap; /* Always 0xAA */
182 u8 oui[SNAP_OUI_LEN];
184 } __attribute__ ((packed));
186 static int is_snap(const struct eth_snap_hdr *esh)
188 return (esh->dsap == LLC_SAP_SNAP
189 && esh->ssap == LLC_SAP_SNAP
190 && !memcmp(esh->oui, "\0\0\0", 3));
193 /* Parses the Ethernet frame in 'skb', which was received on 'in_port',
194 * and initializes 'key' to match. Returns 1 if 'skb' contains an IP
195 * fragment, 0 otherwise. */
196 int flow_extract(struct sk_buff *skb, u16 in_port, struct xflow_key *key)
199 struct eth_snap_hdr *esh;
203 memset(key, 0, sizeof *key);
204 key->tun_id = OVS_CB(skb)->tun_id;
205 key->in_port = in_port;
206 key->dl_tci = htons(0);
208 if (skb->len < sizeof *eth)
210 if (!pskb_may_pull(skb, skb->len >= 64 ? 64 : skb->len)) {
214 skb_reset_mac_header(skb);
216 esh = (struct eth_snap_hdr *) eth;
217 nh_ofs = sizeof *eth;
218 if (likely(ntohs(eth->h_proto) >= XFLOW_DL_TYPE_ETH2_CUTOFF))
219 key->dl_type = eth->h_proto;
220 else if (skb->len >= sizeof *esh && is_snap(esh)) {
221 key->dl_type = esh->ethertype;
222 nh_ofs = sizeof *esh;
224 key->dl_type = htons(XFLOW_DL_TYPE_NOT_ETH_TYPE);
225 if (skb->len >= nh_ofs + sizeof(struct llc_pdu_un)) {
226 nh_ofs += sizeof(struct llc_pdu_un);
230 /* Check for a VLAN tag */
231 if (key->dl_type == htons(ETH_P_8021Q) &&
232 skb->len >= nh_ofs + sizeof(struct vlan_hdr)) {
233 struct vlan_hdr *vh = (struct vlan_hdr*)(skb->data + nh_ofs);
234 key->dl_type = vh->h_vlan_encapsulated_proto;
235 key->dl_tci = vh->h_vlan_TCI | htons(XFLOW_TCI_PRESENT);
236 nh_ofs += sizeof(struct vlan_hdr);
238 memcpy(key->dl_src, eth->h_source, ETH_ALEN);
239 memcpy(key->dl_dst, eth->h_dest, ETH_ALEN);
240 skb_set_network_header(skb, nh_ofs);
243 if (key->dl_type == htons(ETH_P_IP) && iphdr_ok(skb)) {
244 struct iphdr *nh = ip_hdr(skb);
245 int th_ofs = nh_ofs + nh->ihl * 4;
246 key->nw_src = nh->saddr;
247 key->nw_dst = nh->daddr;
248 key->nw_tos = nh->tos & ~INET_ECN_MASK;
249 key->nw_proto = nh->protocol;
250 skb_set_transport_header(skb, th_ofs);
252 /* Transport layer. */
253 if (!(nh->frag_off & htons(IP_MF | IP_OFFSET))) {
254 if (key->nw_proto == IPPROTO_TCP) {
255 if (tcphdr_ok(skb)) {
256 struct tcphdr *tcp = tcp_hdr(skb);
257 key->tp_src = tcp->source;
258 key->tp_dst = tcp->dest;
260 /* Avoid tricking other code into
261 * thinking that this packet has an L4
265 } else if (key->nw_proto == IPPROTO_UDP) {
266 if (udphdr_ok(skb)) {
267 struct udphdr *udp = udp_hdr(skb);
268 key->tp_src = udp->source;
269 key->tp_dst = udp->dest;
271 /* Avoid tricking other code into
272 * thinking that this packet has an L4
276 } else if (key->nw_proto == IPPROTO_ICMP) {
277 if (icmphdr_ok(skb)) {
278 struct icmphdr *icmp = icmp_hdr(skb);
279 /* The ICMP type and code fields use the 16-bit
280 * transport port fields, so we need to store them
281 * in 16-bit network byte order. */
282 key->tp_src = htons(icmp->type);
283 key->tp_dst = htons(icmp->code);
285 /* Avoid tricking other code into
286 * thinking that this packet has an L4
294 } else if (key->dl_type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
295 struct arp_eth_header *arp;
297 arp = (struct arp_eth_header *)skb_network_header(skb);
299 if (arp->ar_hrd == htons(ARPHRD_ETHER)
300 && arp->ar_pro == htons(ETH_P_IP)
301 && arp->ar_hln == ETH_ALEN
302 && arp->ar_pln == 4) {
304 /* We only match on the lower 8 bits of the opcode. */
305 if (ntohs(arp->ar_op) <= 0xff) {
306 key->nw_proto = ntohs(arp->ar_op);
309 if (key->nw_proto == ARPOP_REQUEST
310 || key->nw_proto == ARPOP_REPLY) {
311 memcpy(&key->nw_src, arp->ar_sip, sizeof(key->nw_src));
312 memcpy(&key->nw_dst, arp->ar_tip, sizeof(key->nw_dst));
316 skb_reset_transport_header(skb);
321 u32 flow_hash(const struct xflow_key *key)
323 return jhash2((u32*)key, sizeof *key / sizeof(u32), hash_seed);
326 int flow_cmp(const struct tbl_node *node, void *key2_)
328 const struct xflow_key *key1 = &flow_cast(node)->key;
329 const struct xflow_key *key2 = key2_;
331 return !memcmp(key1, key2, sizeof(struct xflow_key));
334 /* Initializes the flow module.
335 * Returns zero if successful or a negative error code. */
338 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
340 if (flow_cache == NULL)
343 get_random_bytes(&hash_seed, sizeof hash_seed);
348 /* Uninitializes the flow module. */
351 kmem_cache_destroy(flow_cache);