From: Ben Pfaff Date: Tue, 31 Aug 2010 17:41:00 +0000 (-0700) Subject: Merge "master" into "wdp". X-Git-Url: http://git.onelab.eu/?a=commitdiff_plain;h=d90c986fefa7f171b49e9130dad7d7b9c943ee58;p=sliver-openvswitch.git Merge "master" into "wdp". --- d90c986fefa7f171b49e9130dad7d7b9c943ee58 diff --cc datapath/Modules.mk index 8e8dd3376,b632297b8..fd6a5ffeb --- a/datapath/Modules.mk +++ b/datapath/Modules.mk @@@ -30,7 -32,9 +32,8 @@@ openvswitch_headers = datapath.h \ dp_sysfs.h \ flow.h \ - odp-compat.h \ table.h \ + tunnel.h \ vport.h \ vport-generic.h \ vport-internal_dev.h \ diff --cc datapath/actions.c index 7d39cd75e,b75aecdac..b83c1f84d --- a/datapath/actions.c +++ b/datapath/actions.c @@@ -82,10 -91,16 +83,16 @@@ static struct sk_buff *modify_vlan_tci( if (skb->protocol == htons(ETH_P_8021Q)) { /* Modify vlan id, but maintain other TCI values */ - struct vlan_ethhdr *vh = vlan_eth_hdr(skb); - __be16 old_tci = vh->h_vlan_TCI; + struct vlan_ethhdr *vh; + __be16 old_tci; + + if (skb->len < VLAN_ETH_HLEN) + return skb; + + vh = vlan_eth_hdr(skb); + old_tci = vh->h_vlan_TCI; - vh->h_vlan_TCI = htons((ntohs(vh->h_vlan_TCI) & ~mask) | tci); + vh->h_vlan_TCI = (vh->h_vlan_TCI & ~mask) | tci; if (OVS_CB(skb)->ip_summed == OVS_CSUM_COMPLETE) { __be16 diff[] = { ~old_tci, vh->h_vlan_TCI }; @@@ -229,40 -244,60 +236,60 @@@ static void update_csum(__sum16 *sum, s csum_unfold(*sum))); } -static bool is_ip(struct sk_buff *skb, const struct odp_flow_key *key) ++static bool is_ip(struct sk_buff *skb, const struct xflow_key *key) + { + return (key->dl_type == htons(ETH_P_IP) && + skb->transport_header > skb->network_header); + } + -static __sum16 *get_l4_checksum(struct sk_buff *skb, const struct odp_flow_key *key) ++static __sum16 *get_l4_checksum(struct sk_buff *skb, const struct xflow_key *key) + { + int transport_len = skb->len - skb_transport_offset(skb); + if (key->nw_proto == IPPROTO_TCP) { + if (likely(transport_len >= sizeof(struct tcphdr))) + return &tcp_hdr(skb)->check; + } else if (key->nw_proto == IPPROTO_UDP) { + if (likely(transport_len >= sizeof(struct udphdr))) + return &udp_hdr(skb)->check; + } + return NULL; + } + static struct sk_buff *set_nw_addr(struct sk_buff *skb, - const struct odp_flow_key *key, - const struct odp_action_nw_addr *a, + const struct xflow_key *key, + const struct xflow_action_nw_addr *a, gfp_t gfp) { - if (key->dl_type != htons(ETH_P_IP)) + struct iphdr *nh; + __sum16 *check; + __be32 *nwaddr; + + if (unlikely(!is_ip(skb, key))) return skb; skb = make_writable(skb, 0, gfp); - if (skb) { - struct iphdr *nh = ip_hdr(skb); - u32 *f = a->type == XFLOWAT_SET_NW_SRC ? &nh->saddr : &nh->daddr; - u32 old = *f; - u32 new = a->nw_addr; - - if (key->nw_proto == IPPROTO_TCP) { - struct tcphdr *th = tcp_hdr(skb); - update_csum(&th->check, skb, old, new, 1); - } else if (key->nw_proto == IPPROTO_UDP) { - struct udphdr *th = udp_hdr(skb); - update_csum(&th->check, skb, old, new, 1); - } - update_csum(&nh->check, skb, old, new, 0); - *f = new; - } + if (unlikely(!skb)) + return NULL; + + nh = ip_hdr(skb); - nwaddr = a->type == ODPAT_SET_NW_SRC ? &nh->saddr : &nh->daddr; ++ nwaddr = a->type == XFLOWAT_SET_NW_SRC ? &nh->saddr : &nh->daddr; + + check = get_l4_checksum(skb, key); + if (likely(check)) + update_csum(check, skb, *nwaddr, a->nw_addr, 1); + update_csum(&nh->check, skb, *nwaddr, a->nw_addr, 0); + + *nwaddr = a->nw_addr; + return skb; } static struct sk_buff *set_nw_tos(struct sk_buff *skb, - const struct odp_flow_key *key, - const struct odp_action_nw_tos *a, + const struct xflow_key *key, + const struct xflow_action_nw_tos *a, gfp_t gfp) { - if (key->dl_type != htons(ETH_P_IP)) + if (unlikely(!is_ip(skb, key))) return skb; skb = make_writable(skb, 0, gfp); @@@ -282,35 -317,69 +309,70 @@@ } static struct sk_buff *set_tp_port(struct sk_buff *skb, - const struct odp_flow_key *key, - const struct odp_action_tp_port *a, gfp_t gfp) + const struct xflow_key *key, + const struct xflow_action_tp_port *a, + gfp_t gfp) { - int check_ofs; + struct udphdr *th; + __sum16 *check; + __be16 *port; - if (key->dl_type != htons(ETH_P_IP)) + if (unlikely(!is_ip(skb, key))) return skb; - if (key->nw_proto == IPPROTO_TCP) - check_ofs = offsetof(struct tcphdr, check); - else if (key->nw_proto == IPPROTO_UDP) - check_ofs = offsetof(struct udphdr, check); - else + skb = make_writable(skb, 0, gfp); + if (unlikely(!skb)) + return NULL; + + /* Must follow make_writable() since that can move the skb data. */ + check = get_l4_checksum(skb, key); + if (unlikely(!check)) return skb; - skb = make_writable(skb, 0, gfp); - if (skb) { - struct udphdr *th = udp_hdr(skb); - u16 *f = a->type == XFLOWAT_SET_TP_SRC ? &th->source : &th->dest; - u16 old = *f; - u16 new = a->tp_port; - update_csum((u16*)(skb_transport_header(skb) + check_ofs), - skb, old, new, 0); - *f = new; - } + /* + * Update port and checksum. + * + * This is OK because source and destination port numbers are at the + * same offsets in both UDP and TCP headers, and get_l4_checksum() only + * supports those protocols. + */ + th = udp_hdr(skb); - port = a->type == ODPAT_SET_TP_SRC ? &th->source : &th->dest; ++ port = a->type == XFLOWAT_SET_TP_SRC ? &th->source : &th->dest; + update_csum(check, skb, *port, a->tp_port, 0); + *port = a->tp_port; + return skb; } + /** + * is_spoofed_arp - check for invalid ARP packet + * + * @skb: skbuff containing an Ethernet packet, with network header pointing + * just past the Ethernet and optional 802.1Q header. + * @key: flow key extracted from @skb by flow_extract() + * + * Returns true if @skb is an invalid Ethernet+IPv4 ARP packet: one with screwy + * or truncated header fields or one whose inner and outer Ethernet address + * differ. + */ -static bool is_spoofed_arp(struct sk_buff *skb, const struct odp_flow_key *key) ++static bool is_spoofed_arp(struct sk_buff *skb, const struct xflow_key *key) + { + struct arp_eth_header *arp; + + if (key->dl_type != htons(ETH_P_ARP)) + return false; + + if (skb_network_offset(skb) + sizeof(struct arp_eth_header) > skb->len) + return true; + + arp = (struct arp_eth_header *)skb_network_header(skb); + return (arp->ar_hrd != htons(ARPHRD_ETHER) || + arp->ar_pro != htons(ETH_P_IP) || + arp->ar_hln != ETH_ALEN || + arp->ar_pln != 4 || + compare_ether_addr(arp->ar_sha, eth_hdr(skb)->h_source)); + } + static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port) { struct dp_port *p; @@@ -473,9 -543,14 +535,14 @@@ int execute_actions(struct datapath *dp skb->priority = a->priority.priority; break; - case ODPAT_POP_PRIORITY: + case XFLOWAT_POP_PRIORITY: skb->priority = priority; break; + - case ODPAT_DROP_SPOOFED_ARP: ++ case XFLOWAT_DROP_SPOOFED_ARP: + if (unlikely(is_spoofed_arp(skb, key))) + goto exit; + break; } if (!skb) return -ENOMEM; diff --cc datapath/datapath.c index bb2486e42,5ee915740..131dcafcf --- a/datapath/datapath.c +++ b/datapath/datapath.c @@@ -551,12 -554,16 +554,16 @@@ void dp_process_received_packet(struct OVS_CB(skb)->dp_port = p; /* Extract flow from 'skb' into 'key'. */ - if (flow_extract(skb, p ? p->port_no : XFLOWP_NONE, &key)) { - if (dp->drop_frags) { - kfree_skb(skb); - stats_counter_off = offsetof(struct dp_stats_percpu, n_frags); - goto out; - } - error = flow_extract(skb, p ? p->port_no : ODPP_NONE, &key); ++ error = flow_extract(skb, p ? p->port_no : XFLOWP_NONE, &key); + if (unlikely(error)) { + kfree_skb(skb); + return; + } + + if (OVS_CB(skb)->is_frag && dp->drop_frags) { + kfree_skb(skb); + stats_counter_off = offsetof(struct dp_stats_percpu, n_frags); + goto out; } /* Look up flow. */ diff --cc datapath/flow.c index d1941796c,dbfe5dd73..48bebca52 --- a/datapath/flow.c +++ b/datapath/flow.c @@@ -218,23 -213,61 +212,60 @@@ static __be16 parse_ethertype(struct sk return llc->ethertype; } - /* Parses the Ethernet frame in 'skb', which was received on 'in_port', - * and initializes 'key' to match. Returns 1 if 'skb' contains an IP - * fragment, 0 otherwise. */ + /** + * flow_extract - extracts a flow key from an Ethernet frame. + * @skb: sk_buff that contains the frame, with skb->data pointing to the + * Ethernet header + * @in_port: port number on which @skb was received. + * @key: output flow key + * + * The caller must ensure that skb->len >= ETH_HLEN. + * + * Returns 0 if successful, otherwise a negative errno value. + * + * Initializes @skb header pointers as follows: + * + * - skb->mac_header: the Ethernet header. + * + * - skb->network_header: just past the Ethernet header, or just past the + * VLAN header, to the first byte of the Ethernet payload. + * + * - skb->transport_header: If key->dl_type is ETH_P_IP on output, then just + * past the IPv4 header, if one is present and of a correct length, + * otherwise the same as skb->network_header. For other key->dl_type + * values it is left untouched. + * + * Sets OVS_CB(skb)->is_frag to %true if @skb is an IPv4 fragment, otherwise to + * %false. + */ -int flow_extract(struct sk_buff *skb, u16 in_port, struct odp_flow_key *key) +int flow_extract(struct sk_buff *skb, u16 in_port, struct xflow_key *key) { struct ethhdr *eth; - int retval = 0; memset(key, 0, sizeof *key); key->tun_id = OVS_CB(skb)->tun_id; key->in_port = in_port; - key->dl_tci = htons(0); - - if (skb->len < sizeof *eth) - return 0; - if (!pskb_may_pull(skb, skb->len >= 64 ? 64 : skb->len)) - return 0; - key->dl_vlan = htons(ODP_VLAN_NONE); + OVS_CB(skb)->is_frag = false; + + /* + * We would really like to pull as many bytes as we could possibly + * want to parse into the linear data area. Currently that is: + * + * 14 Ethernet header + * 4 VLAN header + * 60 max IP header with options + * 20 max TCP/UDP/ICMP header (don't care about options) + * -- + * 98 + * + * But Xen only allocates 64 or 72 bytes for the linear data area in + * netback, which means that we would reallocate and copy the skb's + * linear data on every packet if we did that. So instead just pull 64 + * bytes, which is always sufficient without IP options, and then check + * whether we need to pull more later when we look at the IP header. + */ + if (!pskb_may_pull(skb, min(skb->len, 64u))) + return -ENOMEM; skb_reset_mac_header(skb); @@@ -324,13 -352,11 +350,11 @@@ memcpy(&key->nw_dst, arp->ar_tip, sizeof(key->nw_dst)); } } - } else { - skb_reset_transport_header(skb); } - return retval; + return 0; } -u32 flow_hash(const struct odp_flow_key *key) +u32 flow_hash(const struct xflow_key *key) { return jhash2((u32*)key, sizeof *key / sizeof(u32), hash_seed); } diff --cc datapath/vport-netdev.c index 7bf56f522,f6709e2a3..0f447f58b --- a/datapath/vport-netdev.c +++ b/datapath/vport-netdev.c @@@ -182,9 -208,14 +208,14 @@@ struct kobject *netdev_get_kobj(const s return &netdev_vport->dev->NETDEV_DEV_MEMBER.kobj; } -int netdev_get_stats(const struct vport *vport, struct odp_vport_stats *stats) +int netdev_get_stats(const struct vport *vport, struct xflow_vport_stats *stats) { const struct netdev_vport *netdev_vport = netdev_vport_priv(vport); + #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) + struct rtnl_link_stats64 *netdev_stats, storage; + + netdev_stats = dev_get_stats(netdev_vport->dev, &storage); + #else const struct net_device_stats *netdev_stats; netdev_stats = dev_get_stats(netdev_vport->dev); diff --cc include/openvswitch/automake.mk index 61859979f,f97c1b27a..9c77a6c48 --- a/include/openvswitch/automake.mk +++ b/include/openvswitch/automake.mk @@@ -1,5 -1,5 +1,5 @@@ noinst_HEADERS += \ - include/openvswitch/gre.h \ include/openvswitch/brcompat-netlink.h \ - include/openvswitch/datapath-protocol.h \ - include/openvswitch/tunnel.h ++ include/openvswitch/tunnel.h \ + include/openvswitch/xflow.h diff --cc include/openvswitch/xflow.h index a705927c6,000000000..82e467b17 mode 100644,000000..100644 --- a/include/openvswitch/xflow.h +++ b/include/openvswitch/xflow.h @@@ -1,437 -1,0 +1,438 @@@ +/* + * Copyright (c) 2009, 2010 Nicira Networks. + * + * This file is offered under your choice of two licenses: Apache 2.0 or GNU + * GPL 2.0 or later. The permission statements for each of these licenses is + * given below. You may license your modifications to this file under either + * of these licenses or both. If you wish to license your modifications under + * only one of these licenses, delete the permission text for the other + * license. + * + * ---------------------------------------------------------------------- + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ---------------------------------------------------------------------- + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + * ---------------------------------------------------------------------- + */ + +/* Protocol between userspace and kernel datapath. + * + * Be sure to update datapath/xflow-compat.h if you change any of the + * structures in here. */ + +#ifndef XFLOW_H +#define XFLOW_H 1 + +/* The ovs_be types indicate that an object is in big-endian, not + * native-endian, byte order. They are otherwise equivalent to uint_t. + * The Linux kernel already has __be types for this, which take on + * additional semantics when the "sparse" static checker is used, so we use + * those types when compiling the kernel. */ +#ifdef __KERNEL__ +#include +#define ovs_be16 __be16 +#define ovs_be32 __be32 +#define ovs_be64 __be64 +#else +#include +#define ovs_be16 uint16_t +#define ovs_be32 uint32_t +#define ovs_be64 uint64_t +#endif + +#define XFLOW_MAX 256 /* Maximum number of datapaths. */ + +#define XFLOW_DP_CREATE _IO('O', 0) +#define XFLOW_DP_DESTROY _IO('O', 1) +#define XFLOW_DP_STATS _IOW('O', 2, struct xflow_stats) + +#define XFLOW_GET_DROP_FRAGS _IOW('O', 3, int) +#define XFLOW_SET_DROP_FRAGS _IOR('O', 4, int) + +#define XFLOW_GET_LISTEN_MASK _IOW('O', 5, int) +#define XFLOW_SET_LISTEN_MASK _IOR('O', 6, int) + +#define XFLOW_PORT_ATTACH _IOR('O', 7, struct xflow_port) +#define XFLOW_PORT_DETACH _IOR('O', 8, int) +#define XFLOW_PORT_QUERY _IOWR('O', 9, struct xflow_port) +#define XFLOW_PORT_LIST _IOWR('O', 10, struct xflow_portvec) + +#define XFLOW_PORT_GROUP_SET _IOR('O', 11, struct xflow_port_group) +#define XFLOW_PORT_GROUP_GET _IOWR('O', 12, struct xflow_port_group) + +#define XFLOW_FLOW_GET _IOWR('O', 13, struct xflow_flow) +#define XFLOW_FLOW_PUT _IOWR('O', 14, struct xflow_flow) +#define XFLOW_FLOW_LIST _IOWR('O', 15, struct xflow_flowvec) +#define XFLOW_FLOW_FLUSH _IO('O', 16) +#define XFLOW_FLOW_DEL _IOWR('O', 17, struct xflow_flow) + +#define XFLOW_EXECUTE _IOR('O', 18, struct xflow_execute) + +#define XFLOW_SET_SFLOW_PROBABILITY _IOR('O', 19, int) +#define XFLOW_GET_SFLOW_PROBABILITY _IOW('O', 20, int) + +#define XFLOW_VPORT_ADD _IOR('O', 21, struct xflow_vport_add) +#define XFLOW_VPORT_MOD _IOR('O', 22, struct xflow_vport_mod) +#define XFLOW_VPORT_DEL _IO('O', 23) +#define XFLOW_VPORT_STATS_GET _IOWR('O', 24, struct xflow_vport_stats_req) +#define XFLOW_VPORT_ETHER_GET _IOWR('O', 25, struct xflow_vport_ether) +#define XFLOW_VPORT_ETHER_SET _IOW('O', 26, struct xflow_vport_ether) +#define XFLOW_VPORT_MTU_GET _IOWR('O', 27, struct xflow_vport_mtu) +#define XFLOW_VPORT_MTU_SET _IOW('O', 28, struct xflow_vport_mtu) +#define XFLOW_VPORT_STATS_SET _IOWR('O', 29, struct xflow_vport_stats_req) + +struct xflow_stats { + /* Flows. */ + uint32_t n_flows; /* Number of flows in flow table. */ + uint32_t cur_capacity; /* Current flow table capacity. */ + uint32_t max_capacity; /* Maximum expansion of flow table capacity. */ + + /* Ports. */ + uint32_t n_ports; /* Current number of ports. */ + uint32_t max_ports; /* Maximum supported number of ports. */ + uint16_t max_groups; /* Maximum number of port groups. */ + uint16_t reserved; + + /* Lookups. */ + uint64_t n_frags; /* Number of dropped IP fragments. */ + uint64_t n_hit; /* Number of flow table matches. */ + uint64_t n_missed; /* Number of flow table misses. */ + uint64_t n_lost; /* Number of misses not sent to userspace. */ + + /* Queues. */ + uint16_t max_miss_queue; /* Max length of XFLOWL_MISS queue. */ + uint16_t max_action_queue; /* Max length of XFLOWL_ACTION queue. */ + uint16_t max_sflow_queue; /* Max length of XFLOWL_SFLOW queue. */ +}; + +/* Logical ports. */ +#define XFLOWP_LOCAL ((uint16_t)0) +#define XFLOWP_NONE ((uint16_t)-1) +#define XFLOWP_NORMAL ((uint16_t)-2) + +/* Listening channels. */ +#define _XFLOWL_MISS_NR 0 /* Packet missed in flow table. */ +#define XFLOWL_MISS (1 << _XFLOWL_MISS_NR) +#define _XFLOWL_ACTION_NR 1 /* Packet output to XFLOWP_CONTROLLER. */ +#define XFLOWL_ACTION (1 << _XFLOWL_ACTION_NR) +#define _XFLOWL_SFLOW_NR 2 /* sFlow samples. */ +#define XFLOWL_SFLOW (1 << _XFLOWL_SFLOW_NR) +#define XFLOWL_ALL (XFLOWL_MISS | XFLOWL_ACTION | XFLOWL_SFLOW) + +/** + * struct xflow_msg - format of messages read from datapath fd. + * @type: One of the %_XFLOWL_* constants. + * @length: Total length of message, including this header. + * @port: Port that received the packet embedded in this message. + * @reserved: Not currently used. Should be set to 0. + * @arg: Argument value whose meaning depends on @type. + * + * For @type == %_XFLOWL_MISS_NR, the header is followed by packet data. The + * @arg member is the ID (in network byte order) of the tunnel that + * encapsulated this packet. It is 0 if the packet was not received on a tunnel. * + * For @type == %_XFLOWL_ACTION_NR, the header is followed by packet data. The + * @arg member is copied from the &struct xflow_action_controller that caused + * the &struct xflow_msg to be composed. + * + * For @type == %_XFLOWL_SFLOW_NR, the header is followed by &struct + * xflow_sflow_sample_header, then by an array of &union xflow_action (the + * number of which is specified in &struct xflow_sflow_sample_header), then by + * packet data. + */ +struct xflow_msg { + uint32_t type; + uint32_t length; + uint16_t port; + uint16_t reserved; + uint32_t arg; +}; + +/** + * struct xflow_sflow_sample_header - header added to sFlow sampled packet. + * @sample_pool: Number of packets that were candidates for sFlow sampling, + * regardless of whether they were actually chosen and sent down to userspace. + * @n_actions: Number of "union xflow_action"s immediately following this + * header. + * + * This header follows &struct xflow_msg when that structure's @type is + * %_XFLOWL_SFLOW_NR, and it is itself followed by an array of &union + * xflow_action (the number of which is specified in @n_actions) and then by + * packet data. + */ +struct xflow_sflow_sample_header { + uint32_t sample_pool; + uint32_t n_actions; +}; + +#define XFLOW_PORT_INTERNAL (1 << 0) /* This port is simulated. */ +struct xflow_port { + char devname[16]; /* IFNAMSIZ */ + uint16_t port; + uint16_t flags; + uint32_t reserved2; +}; + +struct xflow_portvec { + struct xflow_port *ports; + uint32_t n_ports; +}; + +struct xflow_port_group { + uint16_t *ports; + uint16_t n_ports; /* Number of ports. */ + uint16_t group; /* Group number. */ +}; + +struct xflow_flow_stats { + uint64_t n_packets; /* Number of matched packets. */ + uint64_t n_bytes; /* Number of matched bytes. */ + uint64_t used_sec; /* Time last used, in system monotonic time. */ + uint32_t used_nsec; + uint8_t tcp_flags; + uint8_t reserved; + uint16_t error; /* Used by XFLOW_FLOW_GET. */ +}; + +/* + * The datapath protocol adopts the Linux convention for TCI fields: if an + * 802.1Q header is present then its TCI value is used verbatim except that the + * CFI bit (0x1000) is always set to 1, and all-bits-zero indicates no 802.1Q + * header. + */ +#define XFLOW_TCI_PRESENT 0x1000 /* CFI bit */ + +struct xflow_key { + ovs_be32 tun_id; /* Encapsulating tunnel ID. */ + ovs_be32 nw_src; /* IP source address. */ + ovs_be32 nw_dst; /* IP destination address. */ + uint16_t in_port; /* Input switch port. */ + ovs_be16 dl_tci; /* All zeros if 802.1Q header absent, + * XFLOW_TCI_PRESENT set if present. */ + ovs_be16 dl_type; /* Ethernet frame type. */ + ovs_be16 tp_src; /* TCP/UDP source port. */ + ovs_be16 tp_dst; /* TCP/UDP destination port. */ + uint8_t dl_src[6]; /* Ethernet source address. */ + uint8_t dl_dst[6]; /* Ethernet destination address. */ + uint8_t nw_proto; /* IP protocol or low 8 bits of ARP opcode. */ + uint8_t nw_tos; /* IP ToS (DSCP field, 6 bits). */ +}; + +/* Flags for XFLOW_FLOW. */ +#define XFLOWFF_ZERO_TCP_FLAGS (1 << 0) /* Zero the TCP flags. */ + +struct xflow_flow { + struct xflow_flow_stats stats; + struct xflow_key key; + union xflow_action *actions; + uint32_t n_actions; + uint32_t flags; +}; + +/* Flags for XFLOW_FLOW_PUT. */ +#define XFLOWPF_CREATE (1 << 0) /* Allow creating a new flow. */ +#define XFLOWPF_MODIFY (1 << 1) /* Allow modifying an existing flow. */ +#define XFLOWPF_ZERO_STATS (1 << 2) /* Zero the stats of existing flow. */ + +/* XFLOW_FLOW_PUT argument. */ +struct xflow_flow_put { + struct xflow_flow flow; + uint32_t flags; +}; + +struct xflow_flowvec { + struct xflow_flow *flows; + uint32_t n_flows; +}; + +/* Action types. */ +#define XFLOWAT_OUTPUT 0 /* Output to switch port. */ +#define XFLOWAT_OUTPUT_GROUP 1 /* Output to all ports in group. */ +#define XFLOWAT_CONTROLLER 2 /* Send copy to controller. */ +#define XFLOWAT_SET_DL_TCI 3 /* Set the 802.1q VLAN VID and/or PCP. */ +#define XFLOWAT_STRIP_VLAN 4 /* Strip the 802.1q header. */ +#define XFLOWAT_SET_DL_SRC 5 /* Ethernet source address. */ +#define XFLOWAT_SET_DL_DST 6 /* Ethernet destination address. */ +#define XFLOWAT_SET_NW_SRC 7 /* IP source address. */ +#define XFLOWAT_SET_NW_DST 8 /* IP destination address. */ +#define XFLOWAT_SET_NW_TOS 9 /* IP ToS/DSCP field (6 bits). */ +#define XFLOWAT_SET_TP_SRC 10 /* TCP/UDP source port. */ +#define XFLOWAT_SET_TP_DST 11 /* TCP/UDP destination port. */ +#define XFLOWAT_SET_TUNNEL 12 /* Set the encapsulating tunnel ID. */ +#define XFLOWAT_SET_PRIORITY 14 /* Set skb->priority. */ +#define XFLOWAT_POP_PRIORITY 15 /* Restore original skb->priority. */ - #define XFLOWAT_N_ACTIONS 16 ++#define XFLOWAT_DROP_SPOOFED_ARP 16 /* Drop ARPs with spoofed source MAC. */ ++#define XFLOWAT_N_ACTIONS 17 + +struct xflow_action_output { + uint16_t type; /* XFLOWAT_OUTPUT. */ + uint16_t port; /* Output port. */ + uint16_t reserved1; + uint16_t reserved2; +}; + +struct xflow_action_output_group { + uint16_t type; /* XFLOWAT_OUTPUT_GROUP. */ + uint16_t group; /* Group number. */ + uint16_t reserved1; + uint16_t reserved2; +}; + +struct xflow_action_controller { + uint16_t type; /* XFLOWAT_OUTPUT_CONTROLLER. */ + uint16_t reserved; + uint32_t arg; /* Copied to struct xflow_msg 'arg' member. */ +}; + +struct xflow_action_tunnel { + uint16_t type; /* XFLOWAT_SET_TUNNEL. */ + uint16_t reserved; + ovs_be32 tun_id; /* Tunnel ID. */ +}; + +/* Action structure for XFLOWAT_SET_DL_TCI. */ +struct xflow_action_dl_tci { + uint16_t type; /* XFLOWAT_SET_DL_TCI. */ + ovs_be16 tci; /* New TCI. Bits not in mask must be zero. */ + ovs_be16 mask; /* 0x0fff to set VID, 0xe000 to set PCP, + * or 0xefff to set both. */ + uint16_t reserved; +}; + +/* Action structure for XFLOWAT_SET_DL_SRC/DST. */ +struct xflow_action_dl_addr { + uint16_t type; /* XFLOWAT_SET_DL_SRC/DST. */ + uint8_t dl_addr[6]; /* Ethernet address. */ +}; + +/* Action structure for XFLOWAT_SET_NW_SRC/DST. */ +struct xflow_action_nw_addr { + uint16_t type; /* XFLOWAT_SET_TW_SRC/DST. */ + uint16_t reserved; + ovs_be32 nw_addr; /* IP address. */ +}; + +struct xflow_action_nw_tos { + uint16_t type; /* XFLOWAT_SET_NW_TOS. */ + uint8_t nw_tos; /* IP ToS/DSCP field (6 bits). */ + uint8_t reserved1; + uint16_t reserved2; + uint16_t reserved3; +}; + +/* Action structure for XFLOWAT_SET_PRIORITY. */ +struct xflow_action_priority { + uint16_t type; /* XFLOWAT_SET_PRIORITY. */ + uint16_t reserved; + uint32_t priority; /* skb->priority value. */ +}; + +/* Action structure for XFLOWAT_SET_TP_SRC/DST. */ +struct xflow_action_tp_port { + uint16_t type; /* XFLOWAT_SET_TP_SRC/DST. */ + ovs_be16 tp_port; /* TCP/UDP port. */ + uint16_t reserved1; + uint16_t reserved2; +}; + +union xflow_action { + uint16_t type; + struct xflow_action_output output; + struct xflow_action_output_group output_group; + struct xflow_action_controller controller; + struct xflow_action_tunnel tunnel; + struct xflow_action_dl_tci dl_tci; + struct xflow_action_dl_addr dl_addr; + struct xflow_action_nw_addr nw_addr; + struct xflow_action_nw_tos nw_tos; + struct xflow_action_tp_port tp_port; + struct xflow_action_priority priority; +}; + +struct xflow_execute { + uint16_t in_port; + uint16_t reserved1; + uint32_t reserved2; + + union xflow_action *actions; + uint32_t n_actions; + + const void *data; + uint32_t length; +}; + +#define VPORT_TYPE_SIZE 16 +struct xflow_vport_add { + char port_type[VPORT_TYPE_SIZE]; + char devname[16]; /* IFNAMSIZ */ + void *config; +}; + +struct xflow_vport_mod { + char devname[16]; /* IFNAMSIZ */ + void *config; +}; + +struct xflow_vport_stats { + uint64_t rx_packets; + uint64_t tx_packets; + uint64_t rx_bytes; + uint64_t tx_bytes; + uint64_t rx_dropped; + uint64_t tx_dropped; + uint64_t rx_errors; + uint64_t tx_errors; + uint64_t rx_frame_err; + uint64_t rx_over_err; + uint64_t rx_crc_err; + uint64_t collisions; +}; + +struct xflow_vport_stats_req { + char devname[16]; /* IFNAMSIZ */ + struct xflow_vport_stats stats; +}; + +struct xflow_vport_ether { + char devname[16]; /* IFNAMSIZ */ + unsigned char ether_addr[6]; +}; + +struct xflow_vport_mtu { + char devname[16]; /* IFNAMSIZ */ + uint16_t mtu; +}; + +/* Values below this cutoff are 802.3 packets and the two bytes + * following MAC addresses are used as a frame length. Otherwise, the + * two bytes are used as the Ethernet type. + */ +#define XFLOW_DL_TYPE_ETH2_CUTOFF 0x0600 + +/* Value of dl_type to indicate that the frame does not include an + * Ethernet type. + */ +#define XFLOW_DL_TYPE_NOT_ETH_TYPE 0x05ff + +#endif /* openvswitch/xflow.h */ diff --cc lib/automake.mk index d29a9e5f0,efb84c5a0..a14462917 --- a/lib/automake.mk +++ b/lib/automake.mk @@@ -175,9 -174,10 +175,9 @@@ endi if HAVE_NETLINK lib_libopenvswitch_a_SOURCES += \ - lib/netdev-gre.c \ - lib/dpif-linux.c \ lib/netdev-linux.c \ lib/netdev-patch.c \ + lib/netdev-tunnel.c \ lib/netdev-vport.c \ lib/netdev-vport.h \ lib/netlink-protocol.h \ diff --cc lib/classifier.c index d0d21d541,378faf8e0..f844f5232 --- a/lib/classifier.c +++ b/lib/classifier.c @@@ -383,7 -379,7 +383,7 @@@ classifier_rule_overlaps(const struct c LIST_FOR_EACH (rule, struct cls_rule, node.list, &bucket->rules) { - if (rule->flow.priority == target->priority - if (rule->priority == priority ++ if (rule->flow.priority == target->priority && rules_match_2wild(rule, &target_rule, 0)) { return true; } @@@ -868,10 -828,10 +868,10 @@@ static boo rules_match_2wild(const struct cls_rule *wild1, const struct cls_rule *wild2, int field_idx) { - return rules_match(wild1, wild2, - wild1->flow.wildcards | wild2->flow.wildcards, + return rules_match(wild1, wild2, - wild1->wc.wildcards | wild2->wc.wildcards, ++ wild1->flow.wildcards | wild2->flow.wildcards, wild1->wc.nw_src_mask & wild2->wc.nw_src_mask, - wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask, + wild1->wc.nw_dst_mask & wild2->wc.nw_dst_mask, field_idx); } diff --cc lib/flow.c index 4b844b469,462df08c2..96ec4988b --- a/lib/flow.c +++ b/lib/flow.c @@@ -120,9 -120,23 +120,23 @@@ parse_ethertype(struct ofpbuf *b return llc->snap.snap_type; } - /* Returns 1 if 'packet' is an IP fragment, 0 otherwise. - * 'tun_id' is in network byte order, while 'in_port' is in host byte order. - * These byte orders are the same as they are in struct xflow_key. */ + /* 'tun_id' is in network byte order, while 'in_port' is in host byte order. - * These byte orders are the same as they are in struct odp_flow_key. ++ * These byte orders are the same as they are in struct xflow_key. + * + * Initializes packet header pointers as follows: + * + * - packet->l2 to the start of the Ethernet header. + * + * - packet->l3 to just past the Ethernet header, or just past the + * vlan_header if one is present, to the first byte of the payload of the + * Ethernet frame. + * + * - packet->l4 to just past the IPv4 header, if one is present and has a + * correct length, and otherwise NULL. + * + * - packet->l7 to just past the TCP or UDP or ICMP header, if one is + * present and has a correct length, and otherwise NULL. + */ int flow_extract(struct ofpbuf *packet, uint32_t tun_id, uint16_t in_port, flow_t *flow) @@@ -233,8 -235,8 +235,8 @@@ * arguments must have been initialized through a call to flow_extract(). */ void - flow_extract_stats(const flow_t *flow, struct ofpbuf *packet, + flow_extract_stats(const flow_t *flow, struct ofpbuf *packet, - struct odp_flow_stats *stats) + struct xflow_flow_stats *stats) { memset(stats, '\0', sizeof(*stats)); diff --cc lib/flow.h index b963afdd1,603c4aceb..42abef37c --- a/lib/flow.h +++ b/lib/flow.h @@@ -36,40 -31,15 +36,40 @@@ struct ds struct ofp_match; struct ofpbuf; -typedef struct odp_flow_key flow_t; +typedef struct flow flow_t; +struct flow { + uint32_t wildcards; /* Wildcards. */ + uint32_t priority; /* Priority. */ + uint32_t tun_id; /* Encapsulating tunnel ID. */ + uint32_t nw_src; /* IP source address. */ + uint32_t nw_dst; /* IP destination address. */ + uint16_t in_port; /* Input switch port. */ + uint16_t dl_vlan; /* Input VLAN. */ + uint16_t dl_type; /* Ethernet frame type. */ + uint16_t tp_src; /* TCP/UDP source port. */ + uint16_t tp_dst; /* TCP/UDP destination port. */ + uint8_t dl_src[ETH_ADDR_LEN]; /* Ethernet source address. */ + uint8_t dl_dst[ETH_ADDR_LEN]; /* Ethernet destination address. */ + uint8_t nw_proto; /* IP protocol or low 8 bits of ARP opcode. */ + uint8_t dl_vlan_pcp; /* Input VLAN priority. */ + uint8_t nw_tos; /* IP ToS (DSCP field, 6 bits). */ +}; + +/* Assert that there are FLOW_SIG_SIZE bytes of significant data in "struct + * flow", followed by FLOW_PAD_SIZE bytes of padding. */ +#define FLOW_SIG_SIZE 45 +#define FLOW_PAD_SIZE 3 +BUILD_ASSERT_DECL(offsetof(struct flow, nw_tos) == FLOW_SIG_SIZE - 1); +BUILD_ASSERT_DECL(sizeof(((struct flow *)0)->nw_tos) == 1); +BUILD_ASSERT_DECL(sizeof(struct flow) == FLOW_SIG_SIZE + FLOW_PAD_SIZE); int flow_extract(struct ofpbuf *, uint32_t tun_id, uint16_t in_port, flow_t *); - void flow_extract_stats(const flow_t *flow, struct ofpbuf *packet, + void flow_extract_stats(const flow_t *flow, struct ofpbuf *packet, - struct odp_flow_stats *stats); -void flow_to_match(const flow_t *, uint32_t wildcards, bool tun_id_cookie, - struct ofp_match *); -void flow_from_match(const struct ofp_match *, bool tun_id_from_cookie, - uint64_t cookie, flow_t *, uint32_t *wildcards); + struct xflow_flow_stats *stats); +void flow_to_match(const flow_t *, + bool tun_id_from_cookie, struct ofp_match *); +void flow_from_match(const struct ofp_match *, uint32_t priority, + bool tun_id_from_cookie, uint64_t cookie, flow_t *); char *flow_to_string(const flow_t *); void flow_format(struct ds *, const flow_t *); void flow_print(FILE *, const flow_t *); diff --cc lib/netdev-tunnel.c index 46355fb18,d0ecd98e2..9ce0f74dc --- a/lib/netdev-tunnel.c +++ b/lib/netdev-tunnel.c @@@ -23,8 -23,8 +23,8 @@@ #include "netdev-provider.h" #include "netdev-vport.h" #include "openflow/openflow.h" - #include "openvswitch/gre.h" -#include "openvswitch/datapath-protocol.h" + #include "openvswitch/tunnel.h" +#include "openvswitch/xflow.h" #include "packets.h" #include "socket-util.h" #include "vlog.h" @@@ -134,15 -135,15 +135,15 @@@ parse_config(const char *name, const ch } static int - netdev_gre_create(const char *name, const char *type OVS_UNUSED, - const struct shash *args, struct netdev_dev **netdev_devp) + netdev_tunnel_create(const char *name, const char *type, + const struct shash *args, struct netdev_dev **netdev_devp) { int err; - struct odp_vport_add ova; + struct xflow_vport_add ova; - struct gre_port_config port_config; - struct netdev_dev_gre *netdev_dev; + struct tnl_port_config port_config; + struct netdev_dev_tunnel *netdev_dev; - ovs_strlcpy(ova.port_type, "gre", sizeof ova.port_type); + ovs_strlcpy(ova.port_type, type, sizeof ova.port_type); ovs_strlcpy(ova.devname, name, sizeof ova.devname); ova.config = &port_config; @@@ -175,11 -181,11 +181,11 @@@ } static int - netdev_gre_reconfigure(struct netdev_dev *netdev_dev_, const struct shash *args) + netdev_tunnel_reconfigure(struct netdev_dev *netdev_dev_, const struct shash *args) { const char *name = netdev_dev_get_name(netdev_dev_); - struct odp_vport_mod ovm; + struct xflow_vport_mod ovm; - struct gre_port_config port_config; + struct tnl_port_config port_config; int err; ovs_strlcpy(ovm.devname, name, sizeof ovm.devname); @@@ -194,11 -201,11 +201,11 @@@ } static void - netdev_gre_destroy(struct netdev_dev *netdev_dev_) + netdev_tunnel_destroy(struct netdev_dev *netdev_dev_) { - struct netdev_dev_gre *netdev_dev = netdev_dev_gre_cast(netdev_dev_); + struct netdev_dev_tunnel *netdev_dev = netdev_dev_tunnel_cast(netdev_dev_); - netdev_vport_do_ioctl(ODP_VPORT_DEL, (char *)netdev_dev_get_name(netdev_dev_)); + netdev_vport_do_ioctl(XFLOW_VPORT_DEL, (char *)netdev_dev_get_name(netdev_dev_)); free(netdev_dev); } diff --cc lib/ofp-print.c index 7a2f881b8,870487816..e44cf36a1 --- a/lib/ofp-print.c +++ b/lib/ofp-print.c @@@ -766,13 -765,10 +766,13 @@@ ofp_print_flow_mod(struct ds *string, c ds_put_cstr(string, " DEL_STRICT: "); break; default: - ds_put_format(string, " cmd:%d ", ntohs(ofm->command)); + ds_put_format(string, " cmd:%u ", command); + } + if (command & 0xff00) { + ds_put_format(string, "table_id:%u ", command >> 8); } ds_put_format(string, "cookie:0x%"PRIx64" idle:%d hard:%d pri:%d " - "buf:%#x flags:%"PRIx16" ", ntohll(ofm->cookie), + "buf:%#x flags:%"PRIx16" ", ntohll(ofm->cookie), ntohs(ofm->idle_timeout), ntohs(ofm->hard_timeout), ofm->match.wildcards ? ntohs(ofm->priority) : (uint16_t)-1, ntohl(ofm->buffer_id), ntohs(ofm->flags)); diff --cc lib/vlog-modules.def index 9cfe59e4a,03f310375..4487662fa --- a/lib/vlog-modules.def +++ b/lib/vlog-modules.def @@@ -25,9 -25,11 +25,8 @@@ VLOG_MODULE(daemon VLOG_MODULE(dhcp) VLOG_MODULE(dhcp_client) VLOG_MODULE(discovery) -VLOG_MODULE(dpif) -VLOG_MODULE(dpif_linux) -VLOG_MODULE(dpif_netdev) VLOG_MODULE(dpctl) VLOG_MODULE(entropy) - VLOG_MODULE(ezio_term) VLOG_MODULE(fail_open) VLOG_MODULE(fatal_signal) VLOG_MODULE(flow) @@@ -74,24 -73,18 +73,21 @@@ VLOG_MODULE(stream_fd VLOG_MODULE(stream_ssl) VLOG_MODULE(stream_tcp) VLOG_MODULE(stream_unix) -VLOG_MODULE(stream) -VLOG_MODULE(status) VLOG_MODULE(svec) - VLOG_MODULE(switchui) - VLOG_MODULE(terminal) VLOG_MODULE(timeval) - VLOG_MODULE(tty) + VLOG_MODULE(socket_util) VLOG_MODULE(unixctl) VLOG_MODULE(util) -VLOG_MODULE(vconn_stream) VLOG_MODULE(vconn) -VLOG_MODULE(vsctl) +VLOG_MODULE(vconn_stream) VLOG_MODULE(vlog) +VLOG_MODULE(vsctl) VLOG_MODULE(vswitchd) - VLOG_MODULE(vt) +VLOG_MODULE(wdp) +VLOG_MODULE(wdp_xflow) VLOG_MODULE(xenserver) +VLOG_MODULE(xfif) +VLOG_MODULE(xfif_linux) +VLOG_MODULE(xfif_netdev) #undef VLOG_MODULE diff --cc lib/xfif-netdev.c index 8ae9fc52a,000000000..1afb08c81 mode 100644,000000..100644 --- a/lib/xfif-netdev.c +++ b/lib/xfif-netdev.c @@@ -1,1363 -1,0 +1,1402 @@@ +/* + * Copyright (c) 2009, 2010 Nicira Networks. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "xfif.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "csum.h" +#include "flow.h" +#include "hmap.h" +#include "list.h" +#include "netdev.h" +#include "xflow-util.h" +#include "ofp-print.h" +#include "ofpbuf.h" +#include "packets.h" +#include "poll-loop.h" +#include "queue.h" +#include "timeval.h" +#include "util.h" +#include "vlog.h" +#include "xfif-provider.h" + +VLOG_DEFINE_THIS_MODULE(xfif_netdev) + +/* Configuration parameters. */ +enum { N_QUEUES = 2 }; /* Number of queues for xfif_recv(). */ +enum { MAX_QUEUE_LEN = 100 }; /* Maximum number of packets per queue. */ +enum { N_GROUPS = 16 }; /* Number of port groups. */ +enum { MAX_PORTS = 256 }; /* Maximum number of ports. */ +enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */ + +/* Enough headroom to add a vlan tag, plus an extra 2 bytes to allow IP + * headers to be aligned on a 4-byte boundary. */ +enum { XF_NETDEV_HEADROOM = 2 + VLAN_HEADER_LEN }; + +/* Datapath based on the network device interface from netdev.h. */ +struct xf_netdev { + struct list node; + int xf_idx; + int open_cnt; + bool destroyed; + + bool drop_frags; /* Drop all IP fragments, if true. */ + struct ovs_queue queues[N_QUEUES]; /* Messages queued for xfif_recv(). */ + struct hmap flow_table; /* Flow table. */ + struct xflow_port_group groups[N_GROUPS]; + + /* Statistics. */ + long long int n_frags; /* Number of dropped IP fragments. */ + long long int n_hit; /* Number of flow table matches. */ + long long int n_missed; /* Number of flow table misses. */ + long long int n_lost; /* Number of misses not passed to client. */ + + /* Ports. */ + int n_ports; + struct xf_netdev_port *ports[MAX_PORTS]; + struct list port_list; + unsigned int serial; +}; + +/* A port in a netdev-based datapath. */ +struct xf_netdev_port { + int port_no; /* Index into xf_netdev's 'ports'. */ + struct list node; /* Element in xf_netdev's 'port_list'. */ + struct netdev *netdev; + bool internal; /* Internal port (as XFLOW_PORT_INTERNAL)? */ +}; + +/* A flow in xf_netdev's 'flow_table'. */ +struct xf_netdev_flow { + struct hmap_node node; /* Element in xf_netdev's 'flow_table'. */ + struct xflow_key key; + + /* Statistics. */ + struct timespec used; /* Last used time. */ + long long int packet_count; /* Number of packets matched. */ + long long int byte_count; /* Number of bytes matched. */ + uint16_t tcp_ctl; /* Bitwise-OR of seen tcp_ctl values. */ + + /* Actions. */ + union xflow_action *actions; + unsigned int n_actions; +}; + +/* Interface to netdev-based datapath. */ +struct xfif_netdev { + struct xfif xfif; + struct xf_netdev *xf; + int listen_mask; + unsigned int xf_serial; +}; + +/* All netdev-based datapaths. */ +static struct xf_netdev *xf_netdevs[256]; +struct list xf_netdev_list = LIST_INITIALIZER(&xf_netdev_list); +enum { N_XF_NETDEVS = ARRAY_SIZE(xf_netdevs) }; + +/* Maximum port MTU seen so far. */ +static int max_mtu = ETH_PAYLOAD_MAX; + +static int get_port_by_number(struct xf_netdev *, uint16_t port_no, + struct xf_netdev_port **portp); +static int get_port_by_name(struct xf_netdev *, const char *devname, + struct xf_netdev_port **portp); +static void xf_netdev_free(struct xf_netdev *); +static void xf_netdev_flow_flush(struct xf_netdev *); +static int do_add_port(struct xf_netdev *, const char *devname, uint16_t flags, + uint16_t port_no); +static int do_del_port(struct xf_netdev *, uint16_t port_no); +static int xf_netdev_output_control(struct xf_netdev *, const struct ofpbuf *, + int queue_no, int port_no, uint32_t arg); +static int xf_netdev_execute_actions(struct xf_netdev *, + struct ofpbuf *, struct xflow_key *, + const union xflow_action *, int n); + +static struct xfif_netdev * +xfif_netdev_cast(const struct xfif *xfif) +{ + xfif_assert_class(xfif, &xfif_netdev_class); + return CONTAINER_OF(xfif, struct xfif_netdev, xfif); +} + +static struct xf_netdev * +get_xf_netdev(const struct xfif *xfif) +{ + return xfif_netdev_cast(xfif)->xf; +} + +static int +name_to_xf_idx(const char *name) +{ + if (!strncmp(name, "xf", 2) && isdigit((unsigned char)name[2])) { + int xf_idx = atoi(name + 2); + if (xf_idx >= 0 && xf_idx < N_XF_NETDEVS) { + return xf_idx; + } + } + return -1; +} + +static struct xf_netdev * +find_xf_netdev(const char *name) +{ + int xf_idx; + size_t i; + + xf_idx = name_to_xf_idx(name); + if (xf_idx >= 0) { + return xf_netdevs[xf_idx]; + } + + for (i = 0; i < N_XF_NETDEVS; i++) { + struct xf_netdev *xf = xf_netdevs[i]; + if (xf) { + struct xf_netdev_port *port; + if (!get_port_by_name(xf, name, &port)) { + return xf; + } + } + } + return NULL; +} + +static struct xfif * +create_xfif_netdev(struct xf_netdev *xf) +{ + struct xfif_netdev *xfif; + char *xfname; + + xf->open_cnt++; + + xfname = xasprintf("xf%d", xf->xf_idx); + xfif = xmalloc(sizeof *xfif); + xfif_init(&xfif->xfif, &xfif_netdev_class, xfname, xf->xf_idx, xf->xf_idx); + xfif->xf = xf; + xfif->listen_mask = 0; + xfif->xf_serial = xf->serial; + free(xfname); + + return &xfif->xfif; +} + +static int +create_xf_netdev(const char *name, int xf_idx, struct xfif **xfifp) +{ + struct xf_netdev *xf; + int error; + int i; + + if (xf_netdevs[xf_idx]) { + return EBUSY; + } + + /* Create datapath. */ + xf_netdevs[xf_idx] = xf = xzalloc(sizeof *xf); + list_push_back(&xf_netdev_list, &xf->node); + xf->xf_idx = xf_idx; + xf->open_cnt = 0; + xf->drop_frags = false; + for (i = 0; i < N_QUEUES; i++) { + queue_init(&xf->queues[i]); + } + hmap_init(&xf->flow_table); + for (i = 0; i < N_GROUPS; i++) { + xf->groups[i].ports = NULL; + xf->groups[i].n_ports = 0; + xf->groups[i].group = i; + } + list_init(&xf->port_list); + error = do_add_port(xf, name, XFLOW_PORT_INTERNAL, XFLOWP_LOCAL); + if (error) { + xf_netdev_free(xf); + return ENODEV; + } + + *xfifp = create_xfif_netdev(xf); + return 0; +} + +static int +xfif_netdev_open(const char *name, const char *type OVS_UNUSED, bool create, + struct xfif **xfifp) +{ + if (create) { + if (find_xf_netdev(name)) { + return EEXIST; + } else { + int xf_idx = name_to_xf_idx(name); + if (xf_idx >= 0) { + return create_xf_netdev(name, xf_idx, xfifp); + } else { + /* Scan for unused xf_idx number. */ + for (xf_idx = 0; xf_idx < N_XF_NETDEVS; xf_idx++) { + int error = create_xf_netdev(name, xf_idx, xfifp); + if (error != EBUSY) { + return error; + } + } + + /* All datapath numbers in use. */ + return ENOBUFS; + } + } + } else { + struct xf_netdev *xf = find_xf_netdev(name); + if (xf) { + *xfifp = create_xfif_netdev(xf); + return 0; + } else { + return ENODEV; + } + } +} + +static void +xf_netdev_free(struct xf_netdev *xf) +{ + int i; + + xf_netdev_flow_flush(xf); + while (xf->n_ports > 0) { + struct xf_netdev_port *port = CONTAINER_OF( + xf->port_list.next, struct xf_netdev_port, node); + do_del_port(xf, port->port_no); + } + for (i = 0; i < N_QUEUES; i++) { + queue_destroy(&xf->queues[i]); + } + hmap_destroy(&xf->flow_table); + for (i = 0; i < N_GROUPS; i++) { + free(xf->groups[i].ports); + } + xf_netdevs[xf->xf_idx] = NULL; + list_remove(&xf->node); + free(xf); +} + +static void +xfif_netdev_close(struct xfif *xfif) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + assert(xf->open_cnt > 0); + if (--xf->open_cnt == 0 && xf->destroyed) { + xf_netdev_free(xf); + } + free(xfif); +} + +static int +xfif_netdev_destroy(struct xfif *xfif) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + xf->destroyed = true; + return 0; +} + +static int +xfif_netdev_get_stats(const struct xfif *xfif, struct xflow_stats *stats) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + memset(stats, 0, sizeof *stats); + stats->n_flows = hmap_count(&xf->flow_table); + stats->cur_capacity = hmap_capacity(&xf->flow_table); + stats->max_capacity = MAX_FLOWS; + stats->n_ports = xf->n_ports; + stats->max_ports = MAX_PORTS; + stats->max_groups = N_GROUPS; + stats->n_frags = xf->n_frags; + stats->n_hit = xf->n_hit; + stats->n_missed = xf->n_missed; + stats->n_lost = xf->n_lost; + stats->max_miss_queue = MAX_QUEUE_LEN; + stats->max_action_queue = MAX_QUEUE_LEN; + return 0; +} + +static int +xfif_netdev_get_drop_frags(const struct xfif *xfif, bool *drop_fragsp) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + *drop_fragsp = xf->drop_frags; + return 0; +} + +static int +xfif_netdev_set_drop_frags(struct xfif *xfif, bool drop_frags) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + xf->drop_frags = drop_frags; + return 0; +} + +static int +do_add_port(struct xf_netdev *xf, const char *devname, uint16_t flags, + uint16_t port_no) +{ + bool internal = (flags & XFLOW_PORT_INTERNAL) != 0; + struct xf_netdev_port *port; + struct netdev_options netdev_options; + struct netdev *netdev; + int mtu; + int error; + + /* XXX reject devices already in some xf_netdev. */ + + /* Open and validate network device. */ + memset(&netdev_options, 0, sizeof netdev_options); + netdev_options.name = devname; + netdev_options.ethertype = NETDEV_ETH_TYPE_ANY; + if (internal) { + netdev_options.type = "tap"; + } + + error = netdev_open(&netdev_options, &netdev); + if (error) { + return error; + } + /* XXX reject loopback devices */ + /* XXX reject non-Ethernet devices */ + + error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, false); + if (error) { + netdev_close(netdev); + return error; + } + + port = xmalloc(sizeof *port); + port->port_no = port_no; + port->netdev = netdev; + port->internal = internal; + + netdev_get_mtu(netdev, &mtu); + if (mtu > max_mtu) { + max_mtu = mtu; + } + + list_push_back(&xf->port_list, &port->node); + xf->ports[port_no] = port; + xf->n_ports++; + xf->serial++; + + return 0; +} + +static int +xfif_netdev_port_add(struct xfif *xfif, const char *devname, uint16_t flags, + uint16_t *port_nop) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + int port_no; + + for (port_no = 0; port_no < MAX_PORTS; port_no++) { + if (!xf->ports[port_no]) { + *port_nop = port_no; + return do_add_port(xf, devname, flags, port_no); + } + } + return EFBIG; +} + +static int +xfif_netdev_port_del(struct xfif *xfif, uint16_t port_no) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + return port_no == XFLOWP_LOCAL ? EINVAL : do_del_port(xf, port_no); +} + +static bool +is_valid_port_number(uint16_t port_no) +{ + return port_no < MAX_PORTS; +} + +static int +get_port_by_number(struct xf_netdev *xf, + uint16_t port_no, struct xf_netdev_port **portp) +{ + if (!is_valid_port_number(port_no)) { + *portp = NULL; + return EINVAL; + } else { + *portp = xf->ports[port_no]; + return *portp ? 0 : ENOENT; + } +} + +static int +get_port_by_name(struct xf_netdev *xf, + const char *devname, struct xf_netdev_port **portp) +{ + struct xf_netdev_port *port; + + LIST_FOR_EACH (port, struct xf_netdev_port, node, &xf->port_list) { + if (!strcmp(netdev_get_name(port->netdev), devname)) { + *portp = port; + return 0; + } + } + return ENOENT; +} + +static int +do_del_port(struct xf_netdev *xf, uint16_t port_no) +{ + struct xf_netdev_port *port; + char *name; + int error; + + error = get_port_by_number(xf, port_no, &port); + if (error) { + return error; + } + + list_remove(&port->node); + xf->ports[port->port_no] = NULL; + xf->n_ports--; + xf->serial++; + + name = xstrdup(netdev_get_name(port->netdev)); + netdev_close(port->netdev); + + free(name); + free(port); + + return 0; +} + +static void +answer_port_query(const struct xf_netdev_port *port, struct xflow_port *xflow_port) +{ + memset(xflow_port, 0, sizeof *xflow_port); + ovs_strlcpy(xflow_port->devname, netdev_get_name(port->netdev), + sizeof xflow_port->devname); + xflow_port->port = port->port_no; + xflow_port->flags = port->internal ? XFLOW_PORT_INTERNAL : 0; +} + +static int +xfif_netdev_port_query_by_number(const struct xfif *xfif, uint16_t port_no, + struct xflow_port *xflow_port) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + struct xf_netdev_port *port; + int error; + + error = get_port_by_number(xf, port_no, &port); + if (!error) { + answer_port_query(port, xflow_port); + } + return error; +} + +static int +xfif_netdev_port_query_by_name(const struct xfif *xfif, const char *devname, + struct xflow_port *xflow_port) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + struct xf_netdev_port *port; + int error; + + error = get_port_by_name(xf, devname, &port); + if (!error) { + answer_port_query(port, xflow_port); + } + return error; +} + +static void +xf_netdev_free_flow(struct xf_netdev *xf, struct xf_netdev_flow *flow) +{ + hmap_remove(&xf->flow_table, &flow->node); + free(flow->actions); + free(flow); +} + +static void +xf_netdev_flow_flush(struct xf_netdev *xf) +{ + struct xf_netdev_flow *flow, *next; + + HMAP_FOR_EACH_SAFE (flow, next, struct xf_netdev_flow, node, + &xf->flow_table) { + xf_netdev_free_flow(xf, flow); + } +} + +static int +xfif_netdev_flow_flush(struct xfif *xfif) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + xf_netdev_flow_flush(xf); + return 0; +} + +static int +xfif_netdev_port_list(const struct xfif *xfif, struct xflow_port *ports, int n) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + struct xf_netdev_port *port; + int i; + + i = 0; + LIST_FOR_EACH (port, struct xf_netdev_port, node, &xf->port_list) { + struct xflow_port *xflow_port = &ports[i]; + if (i >= n) { + break; + } + answer_port_query(port, xflow_port); + i++; + } + return xf->n_ports; +} + +static int +xfif_netdev_port_poll(const struct xfif *xfif_, char **devnamep OVS_UNUSED) +{ + struct xfif_netdev *xfif = xfif_netdev_cast(xfif_); + if (xfif->xf_serial != xfif->xf->serial) { + xfif->xf_serial = xfif->xf->serial; + return ENOBUFS; + } else { + return EAGAIN; + } +} + +static void +xfif_netdev_port_poll_wait(const struct xfif *xfif_) +{ + struct xfif_netdev *xfif = xfif_netdev_cast(xfif_); + if (xfif->xf_serial != xfif->xf->serial) { + poll_immediate_wake(); + } +} + +static int +get_port_group(const struct xfif *xfif, int group_no, + struct xflow_port_group **groupp) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + + if (group_no >= 0 && group_no < N_GROUPS) { + *groupp = &xf->groups[group_no]; + return 0; + } else { + *groupp = NULL; + return EINVAL; + } +} + +static int +xfif_netdev_port_group_get(const struct xfif *xfif, int group_no, + uint16_t ports[], int n) +{ + struct xflow_port_group *group; + int error; + + if (n < 0) { + return -EINVAL; + } + + error = get_port_group(xfif, group_no, &group); + if (!error) { + memcpy(ports, group->ports, MIN(n, group->n_ports) * sizeof *ports); + return group->n_ports; + } else { + return -error; + } +} + +static int +xfif_netdev_port_group_set(struct xfif *xfif, int group_no, + const uint16_t ports[], int n) +{ + struct xflow_port_group *group; + int error; + + if (n < 0 || n > MAX_PORTS) { + return EINVAL; + } + + error = get_port_group(xfif, group_no, &group); + if (!error) { + free(group->ports); + group->ports = xmemdup(ports, n * sizeof *group->ports); + group->n_ports = n; + group->group = group_no; + } + return error; +} + +static struct xf_netdev_flow * +xf_netdev_lookup_flow(const struct xf_netdev *xf, + const struct xflow_key *key) +{ + struct xf_netdev_flow *flow; + + HMAP_FOR_EACH_WITH_HASH (flow, struct xf_netdev_flow, node, + xflow_key_hash(key, 0), &xf->flow_table) { + if (xflow_key_equal(&flow->key, key)) { + return flow; + } + } + return NULL; +} + +static void +answer_flow_query(struct xf_netdev_flow *flow, uint32_t query_flags, + struct xflow_flow *xflow_flow) +{ + if (flow) { + xflow_flow->key = flow->key; + xflow_flow->stats.n_packets = flow->packet_count; + xflow_flow->stats.n_bytes = flow->byte_count; + xflow_flow->stats.used_sec = flow->used.tv_sec; + xflow_flow->stats.used_nsec = flow->used.tv_nsec; + xflow_flow->stats.tcp_flags = TCP_FLAGS(flow->tcp_ctl); + xflow_flow->stats.reserved = 0; + xflow_flow->stats.error = 0; + if (xflow_flow->n_actions > 0) { + unsigned int n = MIN(xflow_flow->n_actions, flow->n_actions); + memcpy(xflow_flow->actions, flow->actions, + n * sizeof *xflow_flow->actions); + xflow_flow->n_actions = flow->n_actions; + } + + if (query_flags & XFLOWFF_ZERO_TCP_FLAGS) { + flow->tcp_ctl = 0; + } + + } else { + xflow_flow->stats.error = ENOENT; + } +} + +static int +xfif_netdev_flow_get(const struct xfif *xfif, struct xflow_flow flows[], int n) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + int i; + + for (i = 0; i < n; i++) { + struct xflow_flow *xflow_flow = &flows[i]; + answer_flow_query(xf_netdev_lookup_flow(xf, &xflow_flow->key), + xflow_flow->flags, xflow_flow); + } + return 0; +} + +static int +xfif_netdev_validate_actions(const union xflow_action *actions, int n_actions, + bool *mutates) +{ + unsigned int i; + + *mutates = false; + for (i = 0; i < n_actions; i++) { + const union xflow_action *a = &actions[i]; + switch (a->type) { + case XFLOWAT_OUTPUT: + if (a->output.port >= MAX_PORTS) { + return EINVAL; + } + break; + + case XFLOWAT_OUTPUT_GROUP: + *mutates = true; + if (a->output_group.group >= N_GROUPS) { + return EINVAL; + } + break; + + case XFLOWAT_CONTROLLER: + break; + + case XFLOWAT_SET_DL_TCI: + *mutates = true; + if (a->dl_tci.mask != htons(VLAN_VID_MASK) + && a->dl_tci.mask != htons(VLAN_PCP_MASK) + && a->dl_tci.mask != htons(VLAN_VID_MASK | VLAN_PCP_MASK)) { + return EINVAL; + } + if (a->dl_tci.tci & ~a->dl_tci.mask){ + return EINVAL; + } + break; + + case XFLOWAT_SET_NW_TOS: + *mutates = true; + if (a->nw_tos.nw_tos & IP_ECN_MASK) { + return EINVAL; + } + break; + + case XFLOWAT_STRIP_VLAN: + case XFLOWAT_SET_DL_SRC: + case XFLOWAT_SET_DL_DST: + case XFLOWAT_SET_NW_SRC: + case XFLOWAT_SET_NW_DST: + case XFLOWAT_SET_TP_SRC: + case XFLOWAT_SET_TP_DST: + *mutates = true; + break; + + default: + return EOPNOTSUPP; + } + } + return 0; +} + +static int +set_flow_actions(struct xf_netdev_flow *flow, struct xflow_flow *xflow_flow) +{ + size_t n_bytes; + bool mutates; + int error; + + if (xflow_flow->n_actions >= 4096 / sizeof *xflow_flow->actions) { + return EINVAL; + } + error = xfif_netdev_validate_actions(xflow_flow->actions, + xflow_flow->n_actions, &mutates); + if (error) { + return error; + } + + n_bytes = xflow_flow->n_actions * sizeof *flow->actions; + flow->actions = xrealloc(flow->actions, n_bytes); + flow->n_actions = xflow_flow->n_actions; + memcpy(flow->actions, xflow_flow->actions, n_bytes); + return 0; +} + +static int +add_flow(struct xfif *xfif, struct xflow_flow *xflow_flow) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + struct xf_netdev_flow *flow; + int error; + + flow = xzalloc(sizeof *flow); + flow->key = xflow_flow->key; + + error = set_flow_actions(flow, xflow_flow); + if (error) { + free(flow); + return error; + } + + hmap_insert(&xf->flow_table, &flow->node, + xflow_key_hash(&flow->key, 0)); + return 0; +} + +static void +clear_stats(struct xf_netdev_flow *flow) +{ + flow->used.tv_sec = 0; + flow->used.tv_nsec = 0; + flow->packet_count = 0; + flow->byte_count = 0; + flow->tcp_ctl = 0; +} + +static int +xfif_netdev_flow_put(struct xfif *xfif, struct xflow_flow_put *put) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + struct xf_netdev_flow *flow; + + flow = xf_netdev_lookup_flow(xf, &put->flow.key); + if (!flow) { + if (put->flags & XFLOWPF_CREATE) { + if (hmap_count(&xf->flow_table) < MAX_FLOWS) { + return add_flow(xfif, &put->flow); + } else { + return EFBIG; + } + } else { + return ENOENT; + } + } else { + if (put->flags & XFLOWPF_MODIFY) { + int error = set_flow_actions(flow, &put->flow); + if (!error && put->flags & XFLOWPF_ZERO_STATS) { + clear_stats(flow); + } + return error; + } else { + return EEXIST; + } + } +} + + +static int +xfif_netdev_flow_del(struct xfif *xfif, struct xflow_flow *xflow_flow) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + struct xf_netdev_flow *flow; + + flow = xf_netdev_lookup_flow(xf, &xflow_flow->key); + if (flow) { + answer_flow_query(flow, 0, xflow_flow); + xf_netdev_free_flow(xf, flow); + return 0; + } else { + return ENOENT; + } +} + +static int +xfif_netdev_flow_list(const struct xfif *xfif, struct xflow_flow flows[], int n) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + struct xf_netdev_flow *flow; + int i; + + i = 0; + HMAP_FOR_EACH (flow, struct xf_netdev_flow, node, &xf->flow_table) { + if (i >= n) { + break; + } + answer_flow_query(flow, 0, &flows[i++]); + } + return hmap_count(&xf->flow_table); +} + +static int +xfif_netdev_execute(struct xfif *xfif, uint16_t in_port, + const union xflow_action actions[], int n_actions, + const struct ofpbuf *packet) +{ + struct xf_netdev *xf = get_xf_netdev(xfif); + struct ofpbuf copy; + bool mutates; + struct xflow_key key; + flow_t flow; + int error; + + if (packet->size < ETH_HEADER_LEN || packet->size > UINT16_MAX) { + return EINVAL; + } + + error = xfif_netdev_validate_actions(actions, n_actions, &mutates); + if (error) { + return error; + } + + if (mutates) { + /* We need a deep copy of 'packet' since we're going to modify its + * data. */ + ofpbuf_init(©, XF_NETDEV_HEADROOM + packet->size); + copy.data = (char*)copy.base + XF_NETDEV_HEADROOM; + ofpbuf_put(©, packet->data, packet->size); + } else { + /* We still need a shallow copy of 'packet', even though we won't + * modify its data, because flow_extract() modifies packet->l2, etc. + * We could probably get away with modifying those but it's more polite + * if we don't. */ + copy = *packet; + } + flow_extract(©, 0, in_port, &flow); + xflow_key_from_flow(&key, &flow); + error = xf_netdev_execute_actions(xf, ©, &key, actions, n_actions); + if (mutates) { + ofpbuf_uninit(©); + } + return error; +} + +static int +xfif_netdev_recv_get_mask(const struct xfif *xfif, int *listen_mask) +{ + struct xfif_netdev *xfif_netdev = xfif_netdev_cast(xfif); + *listen_mask = xfif_netdev->listen_mask; + return 0; +} + +static int +xfif_netdev_recv_set_mask(struct xfif *xfif, int listen_mask) +{ + struct xfif_netdev *xfif_netdev = xfif_netdev_cast(xfif); + if (!(listen_mask & ~XFLOWL_ALL)) { + xfif_netdev->listen_mask = listen_mask; + return 0; + } else { + return EINVAL; + } +} + +static struct ovs_queue * +find_nonempty_queue(struct xfif *xfif) +{ + struct xfif_netdev *xfif_netdev = xfif_netdev_cast(xfif); + struct xf_netdev *xf = get_xf_netdev(xfif); + int mask = xfif_netdev->listen_mask; + int i; + + for (i = 0; i < N_QUEUES; i++) { + struct ovs_queue *q = &xf->queues[i]; + if (q->n && mask & (1u << i)) { + return q; + } + } + return NULL; +} + +static int +xfif_netdev_recv(struct xfif *xfif, struct ofpbuf **bufp) +{ + struct ovs_queue *q = find_nonempty_queue(xfif); + if (q) { + *bufp = queue_pop_head(q); + return 0; + } else { + return EAGAIN; + } +} + +static void +xfif_netdev_recv_wait(struct xfif *xfif) +{ + struct ovs_queue *q = find_nonempty_queue(xfif); + if (q) { + poll_immediate_wake(); + } else { + /* No messages ready to be received, and xf_wait() will ensure that we + * wake up to queue new messages, so there is nothing to do. */ + } +} + +static void +xf_netdev_flow_used(struct xf_netdev_flow *flow, + const struct xflow_key *key, + const struct ofpbuf *packet) +{ + time_timespec(&flow->used); + flow->packet_count++; + flow->byte_count += packet->size; + if (key->dl_type == htons(ETH_TYPE_IP) && key->nw_proto == IPPROTO_TCP) { + struct tcp_header *th = packet->l4; + flow->tcp_ctl |= th->tcp_ctl; + } +} + +static void +xf_netdev_port_input(struct xf_netdev *xf, struct xf_netdev_port *port, + struct ofpbuf *packet) +{ + struct xf_netdev_flow *flow; + struct xflow_key key; + flow_t f; + + if (packet->size < ETH_HEADER_LEN) { + return; + } + if (flow_extract(packet, 0, port->port_no, &f) && xf->drop_frags) { + xf->n_frags++; + return; + } + xflow_key_from_flow(&key, &f); + + flow = xf_netdev_lookup_flow(xf, &key); + if (flow) { + xf_netdev_flow_used(flow, &key, packet); + xf_netdev_execute_actions(xf, packet, &key, + flow->actions, flow->n_actions); + xf->n_hit++; + } else { + xf->n_missed++; + xf_netdev_output_control(xf, packet, _XFLOWL_MISS_NR, port->port_no, 0); + } +} + +static void +xf_netdev_run(void) +{ + struct ofpbuf packet; + struct xf_netdev *xf; + + ofpbuf_init(&packet, XF_NETDEV_HEADROOM + max_mtu); + LIST_FOR_EACH (xf, struct xf_netdev, node, &xf_netdev_list) { + struct xf_netdev_port *port; + + LIST_FOR_EACH (port, struct xf_netdev_port, node, &xf->port_list) { + int error; + + /* Reset packet contents. */ + packet.data = (char*)packet.base + XF_NETDEV_HEADROOM; + packet.size = 0; + + error = netdev_recv(port->netdev, &packet); + if (!error) { + xf_netdev_port_input(xf, port, &packet); + } else if (error != EAGAIN) { + struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); + VLOG_ERR_RL(&rl, "error receiving data from %s: %s", + netdev_get_name(port->netdev), strerror(error)); + } + } + } + ofpbuf_uninit(&packet); +} + +static void +xf_netdev_wait(void) +{ + struct xf_netdev *xf; + + LIST_FOR_EACH (xf, struct xf_netdev, node, &xf_netdev_list) { + struct xf_netdev_port *port; + LIST_FOR_EACH (port, struct xf_netdev_port, node, &xf->port_list) { + netdev_recv_wait(port->netdev); + } + } +} + + +/* Modify or add a 802.1Q header in 'packet' according to 'a'. */ +static void +xf_netdev_set_dl_tci(struct ofpbuf *packet, + const struct xflow_action_dl_tci *a) +{ + struct vlan_eth_header *veh; + struct eth_header *eh; + + eh = packet->l2; + if (packet->size >= sizeof(struct vlan_eth_header) + && eh->eth_type == htons(ETH_TYPE_VLAN)) { + veh = packet->l2; + veh->veth_tci = (veh->veth_tci & ~a->mask) | a->tci; + } else { + /* Insert new 802.1Q header. */ + struct eth_header *eh = packet->l2; + struct vlan_eth_header tmp; + memcpy(tmp.veth_dst, eh->eth_dst, ETH_ADDR_LEN); + memcpy(tmp.veth_src, eh->eth_src, ETH_ADDR_LEN); + tmp.veth_type = htons(ETH_TYPE_VLAN); + tmp.veth_tci = htons(a->tci); + tmp.veth_next_type = eh->eth_type; + + veh = ofpbuf_push_uninit(packet, VLAN_HEADER_LEN); + memcpy(veh, &tmp, sizeof tmp); + packet->l2 = (char*)packet->l2 - VLAN_HEADER_LEN; + } +} + +static void +xf_netdev_strip_vlan(struct ofpbuf *packet, struct xflow_key *key) +{ + struct vlan_eth_header *veh = packet->l2; + if (packet->size >= sizeof *veh + && veh->veth_type == htons(ETH_TYPE_VLAN)) { + struct eth_header tmp; + + memcpy(tmp.eth_dst, veh->veth_dst, ETH_ADDR_LEN); + memcpy(tmp.eth_src, veh->veth_src, ETH_ADDR_LEN); + tmp.eth_type = veh->veth_next_type; + + packet->size -= VLAN_HEADER_LEN; + packet->data = (char*)packet->data + VLAN_HEADER_LEN; + packet->l2 = (char*)packet->l2 + VLAN_HEADER_LEN; + memcpy(packet->data, &tmp, sizeof tmp); + + key->dl_tci = htons(0); + } +} + +static void +xf_netdev_set_dl_src(struct ofpbuf *packet, + const uint8_t dl_addr[ETH_ADDR_LEN]) +{ + struct eth_header *eh = packet->l2; + memcpy(eh->eth_src, dl_addr, sizeof eh->eth_src); +} + +static void +xf_netdev_set_dl_dst(struct ofpbuf *packet, + const uint8_t dl_addr[ETH_ADDR_LEN]) +{ + struct eth_header *eh = packet->l2; + memcpy(eh->eth_dst, dl_addr, sizeof eh->eth_dst); +} + ++static bool ++is_ip(const struct ofpbuf *packet, const struct xflow_key *key) ++{ ++ return key->dl_type == htons(ETH_TYPE_IP) && packet->l4; ++} ++ +static void +xf_netdev_set_nw_addr(struct ofpbuf *packet, const struct xflow_key *key, + const struct xflow_action_nw_addr *a) +{ - if (key->dl_type == htons(ETH_TYPE_IP)) { ++ if (is_ip(packet, key)) { + struct ip_header *nh = packet->l3; + uint32_t *field; + + field = a->type == XFLOWAT_SET_NW_SRC ? &nh->ip_src : &nh->ip_dst; - if (key->nw_proto == IP_TYPE_TCP) { ++ if (key->nw_proto == IP_TYPE_TCP && packet->l7) { + struct tcp_header *th = packet->l4; + th->tcp_csum = recalc_csum32(th->tcp_csum, *field, a->nw_addr); - } else if (key->nw_proto == IP_TYPE_UDP) { ++ } else if (key->nw_proto == IP_TYPE_UDP && packet->l7) { + struct udp_header *uh = packet->l4; + if (uh->udp_csum) { + uh->udp_csum = recalc_csum32(uh->udp_csum, *field, a->nw_addr); + if (!uh->udp_csum) { + uh->udp_csum = 0xffff; + } + } + } + nh->ip_csum = recalc_csum32(nh->ip_csum, *field, a->nw_addr); + *field = a->nw_addr; + } +} + +static void +xf_netdev_set_nw_tos(struct ofpbuf *packet, const struct xflow_key *key, + const struct xflow_action_nw_tos *a) +{ - if (key->dl_type == htons(ETH_TYPE_IP)) { ++ if (is_ip(packet, key)) { + struct ip_header *nh = packet->l3; + uint8_t *field = &nh->ip_tos; + + /* Set the DSCP bits and preserve the ECN bits. */ + uint8_t new = a->nw_tos | (nh->ip_tos & IP_ECN_MASK); + + nh->ip_csum = recalc_csum16(nh->ip_csum, htons((uint16_t)*field), + htons((uint16_t)a->nw_tos)); + *field = new; + } +} + +static void +xf_netdev_set_tp_port(struct ofpbuf *packet, const struct xflow_key *key, + const struct xflow_action_tp_port *a) +{ - if (key->dl_type == htons(ETH_TYPE_IP)) { ++ if (is_ip(packet, key)) { + uint16_t *field; - if (key->nw_proto == IPPROTO_TCP) { ++ if (key->nw_proto == IPPROTO_TCP && packet->l7) { + struct tcp_header *th = packet->l4; + field = a->type == XFLOWAT_SET_TP_SRC ? &th->tcp_src : &th->tcp_dst; + th->tcp_csum = recalc_csum16(th->tcp_csum, *field, a->tp_port); + *field = a->tp_port; - } else if (key->nw_proto == IPPROTO_UDP) { ++ } else if (key->nw_proto == IPPROTO_UDP && packet->l7) { + struct udp_header *uh = packet->l4; + field = a->type == XFLOWAT_SET_TP_SRC ? &uh->udp_src : &uh->udp_dst; + uh->udp_csum = recalc_csum16(uh->udp_csum, *field, a->tp_port); + *field = a->tp_port; + } else { + return; + } + } +} + +static void +xf_netdev_output_port(struct xf_netdev *xf, struct ofpbuf *packet, + uint16_t out_port) +{ + struct xf_netdev_port *p = xf->ports[out_port]; + if (p) { + netdev_send(p->netdev, packet); + } +} + +static void +xf_netdev_output_group(struct xf_netdev *xf, uint16_t group, uint16_t in_port, + struct ofpbuf *packet) +{ + struct xflow_port_group *g = &xf->groups[group]; + int i; + + for (i = 0; i < g->n_ports; i++) { + uint16_t out_port = g->ports[i]; + if (out_port != in_port) { + xf_netdev_output_port(xf, packet, out_port); + } + } +} + +static int +xf_netdev_output_control(struct xf_netdev *xf, const struct ofpbuf *packet, + int queue_no, int port_no, uint32_t arg) +{ + struct ovs_queue *q = &xf->queues[queue_no]; + struct xflow_msg *header; + struct ofpbuf *msg; + size_t msg_size; + + if (q->n >= MAX_QUEUE_LEN) { + xf->n_lost++; + return ENOBUFS; + } + + msg_size = sizeof *header + packet->size; + msg = ofpbuf_new(msg_size + XFIF_RECV_MSG_PADDING); + header = ofpbuf_put_uninit(msg, sizeof *header); + ofpbuf_reserve(msg, XFIF_RECV_MSG_PADDING); + header->type = queue_no; + header->length = msg_size; + header->port = port_no; + header->arg = arg; + ofpbuf_put(msg, packet->data, packet->size); + queue_push_tail(q, msg); + + return 0; +} + ++/* Returns true if 'packet' is an invalid Ethernet+IPv4 ARP packet: one with ++ * screwy or truncated header fields or one whose inner and outer Ethernet ++ * address differ. */ ++static bool ++xf_netdev_is_spoofed_arp(struct ofpbuf *packet, const struct xflow_key *key) ++{ ++ struct arp_eth_header *arp; ++ struct eth_header *eth; ++ ptrdiff_t l3_size; ++ ++ if (key->dl_type != htons(ETH_TYPE_ARP)) { ++ return false; ++ } ++ ++ l3_size = (char *) ofpbuf_end(packet) - (char *) packet->l3; ++ if (l3_size < sizeof(struct arp_eth_header)) { ++ return true; ++ } ++ ++ eth = packet->l2; ++ arp = packet->l3; ++ return (arp->ar_hrd != htons(ARP_HRD_ETHERNET) ++ || arp->ar_pro != htons(ARP_PRO_IP) ++ || arp->ar_hln != ETH_HEADER_LEN ++ || arp->ar_pln != 4 ++ || !eth_addr_equals(arp->ar_sha, eth->eth_src)); ++} ++ +static int +xf_netdev_execute_actions(struct xf_netdev *xf, + struct ofpbuf *packet, struct xflow_key *key, + const union xflow_action *actions, int n_actions) +{ + int i; + for (i = 0; i < n_actions; i++) { + const union xflow_action *a = &actions[i]; + + switch (a->type) { + case XFLOWAT_OUTPUT: + xf_netdev_output_port(xf, packet, a->output.port); + break; + + case XFLOWAT_OUTPUT_GROUP: + xf_netdev_output_group(xf, a->output_group.group, key->in_port, + packet); + break; + + case XFLOWAT_CONTROLLER: + xf_netdev_output_control(xf, packet, _XFLOWL_ACTION_NR, + key->in_port, a->controller.arg); + break; + + case XFLOWAT_SET_DL_TCI: + xf_netdev_set_dl_tci(packet, &a->dl_tci); + break; + + case XFLOWAT_STRIP_VLAN: + xf_netdev_strip_vlan(packet, key); + break; + + case XFLOWAT_SET_DL_SRC: + xf_netdev_set_dl_src(packet, a->dl_addr.dl_addr); + break; + + case XFLOWAT_SET_DL_DST: + xf_netdev_set_dl_dst(packet, a->dl_addr.dl_addr); + break; + + case XFLOWAT_SET_NW_SRC: + case XFLOWAT_SET_NW_DST: + xf_netdev_set_nw_addr(packet, key, &a->nw_addr); + break; + + case XFLOWAT_SET_NW_TOS: + xf_netdev_set_nw_tos(packet, key, &a->nw_tos); + break; + + case XFLOWAT_SET_TP_SRC: + case XFLOWAT_SET_TP_DST: + xf_netdev_set_tp_port(packet, key, &a->tp_port); + break; ++ ++ case XFLOWAT_DROP_SPOOFED_ARP: ++ if (xf_netdev_is_spoofed_arp(packet, key)) { ++ return 0; ++ } + } + } + return 0; +} + +const struct xfif_class xfif_netdev_class = { + "netdev", + xf_netdev_run, + xf_netdev_wait, + NULL, /* enumerate */ + xfif_netdev_open, + xfif_netdev_close, + NULL, /* get_all_names */ + xfif_netdev_destroy, + xfif_netdev_get_stats, + xfif_netdev_get_drop_frags, + xfif_netdev_set_drop_frags, + xfif_netdev_port_add, + xfif_netdev_port_del, + xfif_netdev_port_query_by_number, + xfif_netdev_port_query_by_name, + xfif_netdev_port_list, + xfif_netdev_port_poll, + xfif_netdev_port_poll_wait, + xfif_netdev_port_group_get, + xfif_netdev_port_group_set, + xfif_netdev_flow_get, + xfif_netdev_flow_put, + xfif_netdev_flow_del, + xfif_netdev_flow_flush, + xfif_netdev_flow_list, + xfif_netdev_execute, + xfif_netdev_recv_get_mask, + xfif_netdev_recv_set_mask, + NULL, /* get_sflow_probability */ + NULL, /* set_sflow_probability */ + NULL, /* queue_to_priority */ + xfif_netdev_recv, + xfif_netdev_recv_wait, +}; diff --cc lib/xfif-provider.h index 8eceba038,1106db888..baf8bc92e --- a/lib/xfif-provider.h +++ b/lib/xfif-provider.h @@@ -319,23 -317,15 +319,15 @@@ struct xfif_class * * This function must not block. If no message is ready to be received * when it is called, it should return EAGAIN without blocking. */ - int (*recv)(struct dpif *dpif, struct ofpbuf **packetp); + int (*recv)(struct xfif *xfif, struct ofpbuf **packetp); - /* Arranges for the poll loop to wake up when 'dpif' has a message queued + /* Arranges for the poll loop to wake up when 'xfif' has a message queued * to be received with the recv member function. */ - void (*recv_wait)(struct dpif *dpif); + void (*recv_wait)(struct xfif *xfif); }; - /* Minimum number of bytes of headroom for a packet returned by the 'recv' - * member function (see above). This headroom allows "struct xflow_msg" to be - * replaced by "struct ofp_packet_in" without copying the buffer. */ - #define XFIF_RECV_MSG_PADDING (sizeof(struct ofp_packet_in) \ - - sizeof(struct xflow_msg)) - BUILD_ASSERT_DECL(sizeof(struct ofp_packet_in) > sizeof(struct xflow_msg)); - BUILD_ASSERT_DECL(XFIF_RECV_MSG_PADDING % 4 == 0); - -extern const struct dpif_class dpif_linux_class; -extern const struct dpif_class dpif_netdev_class; +extern const struct xfif_class xfif_linux_class; +extern const struct xfif_class xfif_netdev_class; #ifdef __cplusplus } diff --cc lib/xfif.c index 50c47801f,01e905d93..2ba8dd049 --- a/lib/xfif.c +++ b/lib/xfif.c @@@ -314,16 -313,16 +314,16 @@@ xfif_create_and_open(const char *name, return error; } -/* Closes and frees the connection to 'dpif'. Does not destroy the datapath - * itself; call dpif_delete() first, instead, if that is desirable. */ +/* Closes and frees the connection to 'xfif'. Does not destroy the datapath + * itself; call xfif_delete() first, instead, if that is desirable. */ void -dpif_close(struct dpif *dpif) +xfif_close(struct xfif *xfif) { - if (dpif) { - struct registered_dpif_class *registered_class; + if (xfif) { + struct registered_xfif_class *registered_class; - registered_class = shash_find_data(&xfif_classes, - registered_class = shash_find_data(&dpif_classes, - dpif->dpif_class->type); ++ registered_class = shash_find_data(&xfif_classes, + xfif->xfif_class->type); assert(registered_class); assert(registered_class->refcount); diff --cc lib/xfif.h index 05115f103,000000000..aeabb11cb mode 100644,000000..100644 --- a/lib/xfif.h +++ b/lib/xfif.h @@@ -1,111 -1,0 +1,121 @@@ +/* + * Copyright (c) 2008, 2009, 2010 Nicira Networks. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef XFIF_H +#define XFIF_H 1 + - #include "openvswitch/xflow.h" +#include +#include +#include ++#include "openflow/openflow.h" ++#include "openvswitch/xflow.h" ++#include "util.h" + +#ifdef __cplusplus +extern "C" { +#endif + +struct xfif; +struct ofpbuf; +struct svec; +struct xfif_class; + +void xf_run(void); +void xf_wait(void); + +int xf_register_provider(const struct xfif_class *); +int xf_unregister_provider(const char *type); +void xf_enumerate_types(struct svec *types); + +int xf_enumerate_names(const char *type, struct svec *names); +void xf_parse_name(const char *datapath_name, char **name, char **type); + +int xfif_open(const char *name, const char *type, struct xfif **); +int xfif_create(const char *name, const char *type, struct xfif **); +int xfif_create_and_open(const char *name, const char *type, struct xfif **); +void xfif_close(struct xfif *); + +const char *xfif_name(const struct xfif *); +const char *xfif_base_name(const struct xfif *); +int xfif_get_all_names(const struct xfif *, struct svec *); + +int xfif_delete(struct xfif *); + +int xfif_get_xf_stats(const struct xfif *, struct xflow_stats *); +int xfif_get_drop_frags(const struct xfif *, bool *drop_frags); +int xfif_set_drop_frags(struct xfif *, bool drop_frags); + +int xfif_port_add(struct xfif *, const char *devname, uint16_t flags, + uint16_t *port_no); +int xfif_port_del(struct xfif *, uint16_t port_no); +int xfif_port_query_by_number(const struct xfif *, uint16_t port_no, + struct xflow_port *); +int xfif_port_query_by_name(const struct xfif *, const char *devname, + struct xflow_port *); +int xfif_port_get_name(struct xfif *, uint16_t port_no, + char *name, size_t name_size); +int xfif_port_list(const struct xfif *, struct xflow_port **, size_t *n_ports); + +int xfif_port_poll(const struct xfif *, char **devnamep); +void xfif_port_poll_wait(const struct xfif *); + +int xfif_port_group_get(const struct xfif *, uint16_t group, + uint16_t **ports, size_t *n_ports); +int xfif_port_group_set(struct xfif *, uint16_t group, + const uint16_t ports[], size_t n_ports); + +int xfif_flow_flush(struct xfif *); +int xfif_flow_put(struct xfif *, struct xflow_flow_put *); +int xfif_flow_del(struct xfif *, struct xflow_flow *); +int xfif_flow_get(const struct xfif *, struct xflow_flow *); +int xfif_flow_get_multiple(const struct xfif *, struct xflow_flow[], size_t n); +int xfif_flow_list(const struct xfif *, struct xflow_flow[], size_t n, + size_t *n_out); +int xfif_flow_list_all(const struct xfif *, + struct xflow_flow **flowsp, size_t *np); + +int xfif_execute(struct xfif *, uint16_t in_port, + const union xflow_action[], size_t n_actions, + const struct ofpbuf *); + ++/* Minimum number of bytes of headroom for a packet returned by xfif_recv() ++ * member function. This headroom allows "struct xflow_msg" to be replaced by ++ * "struct ofp_packet_in" without copying the buffer. */ ++#define XFIF_RECV_MSG_PADDING (sizeof(struct ofp_packet_in) \ ++ - sizeof(struct xflow_msg)) ++BUILD_ASSERT_DECL(sizeof(struct ofp_packet_in) > sizeof(struct xflow_msg)); ++BUILD_ASSERT_DECL(XFIF_RECV_MSG_PADDING % 4 == 0); ++ +int xfif_recv_get_mask(const struct xfif *, int *listen_mask); +int xfif_recv_set_mask(struct xfif *, int listen_mask); +int xfif_get_sflow_probability(const struct xfif *, uint32_t *probability); +int xfif_set_sflow_probability(struct xfif *, uint32_t probability); +int xfif_recv(struct xfif *, struct ofpbuf **); +int xfif_recv_purge(struct xfif *); +void xfif_recv_wait(struct xfif *); + +void xfif_get_netflow_ids(const struct xfif *, + uint8_t *engine_type, uint8_t *engine_id); + +int xfif_queue_to_priority(const struct xfif *, uint32_t queue_id, + uint32_t *priority); + +#ifdef __cplusplus +} +#endif + +#endif /* xfif.h */ diff --cc ofproto/in-band.h index 55724669b,12f57147d..ba3cc3bb9 --- a/ofproto/in-band.h +++ b/ofproto/in-band.h @@@ -37,10 -37,10 +37,10 @@@ void in_band_set_remotes(struct in_ban void in_band_run(struct in_band *); void in_band_wait(struct in_band *); - bool in_band_msg_in_hook(struct in_band *, const flow_t *, + bool in_band_msg_in_hook(struct in_band *, const flow_t *, const struct ofpbuf *packet); bool in_band_rule_check(struct in_band *, const flow_t *, - const struct odp_actions *); + const struct xflow_actions *); void in_band_flushed(struct in_band *); #endif /* in-band.h */ diff --cc ofproto/ofproto.c index 90bdf393b,adc52827c..3c6ba6a83 --- a/ofproto/ofproto.c +++ b/ofproto/ofproto.c @@@ -63,28 -63,81 +63,28 @@@ VLOG_DEFINE_THIS_MODULE(ofproto #include "sflow_api.h" -enum { - TABLEID_HASH = 0, - TABLEID_CLASSIFIER = 1 -}; - -struct ofport { - struct netdev *netdev; - struct ofp_phy_port opp; /* In host byte order. */ -}; - -static void ofport_free(struct ofport *); -static void hton_ofp_phy_port(struct ofp_phy_port *); - -static int xlate_actions(const union ofp_action *in, size_t n_in, - const flow_t *flow, struct ofproto *ofproto, - const struct ofpbuf *packet, - struct odp_actions *out, tag_type *tags, - bool *may_set_up_flow, uint16_t *nf_output_iface); - -struct rule { - struct cls_rule cr; - +struct ofproto_rule { - uint64_t flow_cookie; /* Controller-issued identifier. + uint64_t flow_cookie; /* Controller-issued identifier. (Kept in network-byte order.) */ - uint16_t idle_timeout; /* In seconds from time of last use. */ - uint16_t hard_timeout; /* In seconds from time of creation. */ bool send_flow_removed; /* Send a flow removed message? */ - long long int used; /* Last-used time (0 if never used). */ - long long int created; /* Creation time. */ - uint64_t packet_count; /* Number of packets received. */ - uint64_t byte_count; /* Number of bytes received. */ - uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */ tag_type tags; /* Tags (set only by hooks). */ - struct netflow_flow nf_flow; /* Per-flow NetFlow tracking data. */ - - /* If 'super' is non-NULL, this rule is a subrule, that is, it is an - * exact-match rule (having cr.wc.wildcards of 0) generated from the - * wildcard rule 'super'. In this case, 'list' is an element of the - * super-rule's list. - * - * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of - * a list of subrules. A super-rule with no wildcards (where - * cr.wc.wildcards is 0) will never have any subrules. */ - struct rule *super; - struct list list; +}; - /* OpenFlow actions. - * - * 'n_actions' is the number of elements in the 'actions' array. A single - * action may take up more more than one element's worth of space. - * - * A subrule has no actions (it uses the super-rule's actions). */ - int n_actions; - union ofp_action *actions; +static struct ofproto_rule * +ofproto_rule_cast(const struct wdp_rule *wdp_rule) +{ + return wdp_rule->client_data; +} - /* Datapath actions. - * - * A super-rule with wildcard fields never has ODP actions (since the - * datapath only supports exact-match flows). */ - bool installed; /* Installed in datapath? */ - bool may_install; /* True ordinarily; false if actions must - * be reassessed for every packet. */ - int n_odp_actions; - union odp_action *odp_actions; -}; +static void +ofproto_rule_init(struct wdp_rule *wdp_rule) +{ + wdp_rule->client_data = xzalloc(sizeof(struct ofproto_rule)); +} static inline bool -rule_is_hidden(const struct rule *rule) +rule_is_hidden(const struct wdp_rule *rule) { - /* Subrules are merely an implementation detail, so hide them from the - * controller. */ - if (rule->super != NULL) { - return true; - } - /* Rules with priority higher than UINT16_MAX are set up by ofproto itself * (e.g. by in-band control) and are intentionally hidden from the * controller. */ @@@ -1822,10 -3265,10 +1822,10 @@@ handle_flow_stats_request(struct ofprot cbdata.ofconn = ofconn; cbdata.out_port = fsr->out_port; cbdata.msg = start_stats_reply(osr, 1024); - cls_rule_from_match(&fsr->match, 0, false, 0, &target); - classifier_for_each_match(&p->cls, &target, - table_id_to_include(fsr->table_id), - flow_stats_cb, &cbdata); + flow_from_match(&fsr->match, 0, false, 0, &target); - wdp_flow_for_each_match(p->wdp, &target, ++ wdp_flow_for_each_match(p->wdp, &target, + table_id_to_include(fsr->table_id), + flow_stats_cb, &cbdata); queue_tx(cbdata.msg, ofconn, ofconn->reply_counter); return 0; } @@@ -1856,11 -3305,9 +1856,11 @@@ flow_stats_ds_cb(struct wdp_rule *rule ofp_print_match(results, &match, true); ofp_print_actions(results, &rule->actions->header, act_len); ds_put_cstr(results, "\n"); + + return 0; } - /* Adds a pretty-printed description of all flows to 'results', including + /* Adds a pretty-printed description of all flows to 'results', including * those marked hidden by secchan (e.g., by in-band control). */ void ofproto_get_all_flows(struct ofproto *p, struct ds *results) diff --cc ofproto/pktbuf.c index 63f349128,67adb5606..7e5981d5c --- a/ofproto/pktbuf.c +++ b/ofproto/pktbuf.c @@@ -153,8 -154,11 +155,11 @@@ pktbuf_get_null(void * datapath port number on which the packet was received in '*in_port'. The * caller becomes responsible for freeing the buffer. However, if 'id' * identifies a "null" packet buffer (created with pktbuf_get_null()), stores - * NULL in '*bufferp' and UINT16_max in '*in_port'. + * NULL in '*bufferp' and OFPP_NONE in '*in_port'. * + * A returned packet will have at least sizeof(struct ofp_packet_in) bytes of + * headroom. + * * On failure, stores NULL in in '*bufferp' and UINT16_MAX in '*in_port'. */ int pktbuf_retrieve(struct pktbuf *pb, uint32_t id, struct ofpbuf **bufferp, diff --cc ofproto/wdp-xflow.c index 32eace386,000000000..ce7ff2f1c mode 100644,000000..100644 --- a/ofproto/wdp-xflow.c +++ b/ofproto/wdp-xflow.c @@@ -1,2549 -1,0 +1,2659 @@@ +/* + * Copyright (c) 2010 Nicira Networks. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "wdp-xflow.h" + +#include +#include + +#include "coverage.h" +#include "dhcp.h" +#include "mac-learning.h" +#include "netdev.h" +#include "netflow.h" +#include "ofp-util.h" +#include "ofpbuf.h" +#include "ofproto.h" +#include "openflow/nicira-ext.h" +#include "openflow/openflow.h" +#include "packets.h" +#include "poll-loop.h" +#include "port-array.h" ++#include "queue.h" +#include "shash.h" +#include "svec.h" +#include "timeval.h" +#include "util.h" +#include "vconn.h" +#include "wdp-provider.h" +#include "xfif.h" +#include "xflow-util.h" +#include "vlog.h" +#include "xtoxll.h" + +VLOG_DEFINE_THIS_MODULE(wdp_xflow) + +enum { + TABLEID_HASH = 0, + TABLEID_CLASSIFIER = 1 +}; + +static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5); + +/* Maximum numbers of rules. */ +#define WX_MAX_WILD 65536 /* Wildcarded rules. */ +#define WX_MAX_EXACT 1048576 /* Exact-match rules. */ + +struct wx { + struct list list_node; + struct wdp wdp; + struct xfif *xfif; + struct classifier cls; + struct netdev_monitor *netdev_monitor; + struct port_array ports; /* Index is xflow port nr; + * wdp_port->opp.port_no is OFP port nr. */ + struct shash port_by_name; + long long int next_expiration; ++ int wdp_listen_mask; + + /* Rules that might need to be revalidated. */ + bool need_revalidate; /* Revalidate all subrules? */ + bool revalidate_all; /* Revalidate all subrules and other rules? */ + struct tag_set revalidate_set; /* Tag set of (sub)rules to revalidate. */ + + /* Hooks for ovs-vswitchd. */ + const struct ofhooks *ofhooks; + void *aux; + + /* Used by default ofhooks. */ + struct mac_learning *ml; ++ ++ /* List of "struct wdp_packets" queued for the controller by ++ * execute_xflow_actions(). */ ++#define MAX_CTL_PACKETS 50 ++ struct list ctl_packets; ++ int n_ctl_packets; +}; + +static const struct ofhooks default_ofhooks; + +static struct list all_wx = LIST_INITIALIZER(&all_wx); + +static int wx_port_init(struct wx *); +static void wx_port_process_change(struct wx *wx, int error, char *devname, + wdp_port_poll_cb_func *cb, void *aux); +static void wx_port_refresh_groups(struct wx *); + ++static void wx_purge_ctl_packets__(struct wx *); ++ +enum { + WX_GROUP_FLOOD = 0, + WX_GROUP_ALL = 1 +}; + +static struct wx * +wx_cast(const struct wdp *wdp) +{ + return CONTAINER_OF(wdp, struct wx, wdp); +} + +static int +wx_xlate_actions(struct wx *, const union ofp_action *, size_t n, + const flow_t *flow, const struct ofpbuf *packet, + tag_type *tags, struct xflow_actions *out, + bool *may_set_up_flow); + +struct wx_rule { + struct wdp_rule wr; + + uint64_t packet_count; /* Number of packets received. */ + uint64_t byte_count; /* Number of bytes received. */ + uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */ + long long int used; /* Last-used time (0 if never used). */ + tag_type tags; /* Tags (set only by hooks). */ + + /* If 'super' is non-NULL, this rule is a subrule, that is, it is an + * exact-match rule (having cr.wc.wildcards of 0) generated from the + * wildcard rule 'super'. In this case, 'list' is an element of the + * super-rule's list. + * + * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of + * a list of subrules. A super-rule with no wildcards (where + * cr.wc.wildcards is 0) will never have any subrules. */ + struct wx_rule *super; + struct list list; + + /* Datapath actions. + * + * A super-rule with wildcard fields never has xflow actions (since the + * datapath only supports exact-match flows). */ + bool installed; /* Installed in datapath? */ + bool may_install; /* True ordinarily; false if actions must + * be reassessed for every packet. */ + int n_xflow_actions; + union xflow_action *xflow_actions; +}; + +static void wx_rule_destroy(struct wx *, struct wx_rule *); +static void wx_rule_update_actions(struct wx *, struct wx_rule *); +static void wx_rule_execute(struct wx *, struct wx_rule *, + struct ofpbuf *packet, const flow_t *); +static bool wx_rule_make_actions(struct wx *, struct wx_rule *, + const struct ofpbuf *packet); +static void wx_rule_install(struct wx *, struct wx_rule *, + struct wx_rule *displaced_rule); + +static struct wx_rule * +wx_rule_cast(const struct cls_rule *cls_rule) +{ + return cls_rule ? CONTAINER_OF(cls_rule, struct wx_rule, wr.cr) : NULL; +} + +/* Returns true if 'rule' is merely an implementation detail that should be + * hidden from the client. */ +static inline bool +wx_rule_is_hidden(const struct wx_rule *rule) +{ + return rule->super != NULL; +} + +static void +wx_rule_free(struct wx_rule *rule) +{ + wdp_rule_uninit(&rule->wr); + free(rule->xflow_actions); + free(rule); +} + +static void +wx_rule_account(struct wx *wx OVS_UNUSED, struct wx_rule *rule OVS_UNUSED, + uint64_t extra_bytes OVS_UNUSED) +{ + /* XXX call account_cb hook */ +} + +static void +wx_rule_post_uninstall(struct wx *wx, struct wx_rule *rule) +{ + struct wx_rule *super = rule->super; + + wx_rule_account(wx, rule, 0); + + /* XXX netflow expiration */ + + if (super) { + super->packet_count += rule->packet_count; + super->byte_count += rule->byte_count; + + /* Reset counters to prevent double counting if the rule ever gets + * reinstalled. */ + rule->packet_count = 0; + rule->byte_count = 0; + rule->accounted_bytes = 0; + + //XXX netflow_flow_clear(&rule->nf_flow); + } +} + +static long long int +xflow_flow_stats_to_msec(const struct xflow_flow_stats *stats) +{ + return (stats->used_sec + ? stats->used_sec * 1000 + stats->used_nsec / 1000000 + : 0); +} + +static void +wx_rule_update_time(struct wx *wx OVS_UNUSED, struct wx_rule *rule, + const struct xflow_flow_stats *stats) +{ + long long int used = xflow_flow_stats_to_msec(stats); + if (used > rule->used) { + rule->used = used; + if (rule->super && used > rule->super->used) { + rule->super->used = used; + } + //XXX netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used); + } +} + +static void +wx_rule_update_stats(struct wx *wx, struct wx_rule *rule, + const struct xflow_flow_stats *stats) +{ + if (stats->n_packets) { + wx_rule_update_time(wx, rule, stats); + rule->packet_count += stats->n_packets; + rule->byte_count += stats->n_bytes; + /* XXX netflow_flow_update_flags(&rule->nf_flow, stats->tcp_flags); */ + } +} + +static void +wx_rule_uninstall(struct wx *wx, struct wx_rule *rule) +{ + assert(!rule->wr.cr.flow.wildcards); + if (rule->installed) { + struct xflow_flow xflow_flow; + + xflow_key_from_flow(&xflow_flow.key, &rule->wr.cr.flow); + xflow_flow.actions = NULL; + xflow_flow.n_actions = 0; + xflow_flow.flags = 0; + if (!xfif_flow_del(wx->xfif, &xflow_flow)) { + wx_rule_update_stats(wx, rule, &xflow_flow.stats); + } + rule->installed = false; + + wx_rule_post_uninstall(wx, rule); + } +} + +#if 0 +static bool +is_controller_rule(struct wx_rule *rule) +{ + /* If the only action is send to the controller then don't report + * NetFlow expiration messages since it is just part of the control + * logic for the network and not real traffic. */ + + return (rule + && rule->super + && rule->super->n_actions == 1 + && action_outputs_to_port(&rule->super->actions[0], + htons(OFPP_CONTROLLER))); +} +#endif + +static void +wx_rule_remove(struct wx *wx, struct wx_rule *rule) +{ + if (rule->wr.cr.flow.wildcards) { + COVERAGE_INC(wx_del_wc_flow); + wx->need_revalidate = true; + } else { + wx_rule_uninstall(wx, rule); + } + classifier_remove(&wx->cls, &rule->wr.cr); + wx_rule_destroy(wx, rule); +} + +static bool +wx_rule_revalidate(struct wx *wx, struct wx_rule *rule) +{ + const flow_t *flow = &rule->wr.cr.flow; + + COVERAGE_INC(wx_rule_revalidate); + if (rule->super) { + struct wx_rule *super; + super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow)); + if (!super) { + wx_rule_remove(wx, rule); + return false; + } else if (super != rule->super) { + COVERAGE_INC(wx_revalidate_moved); + list_remove(&rule->list); + list_push_back(&super->list, &rule->list); + rule->super = super; + rule->wr.hard_timeout = super->wr.hard_timeout; + rule->wr.idle_timeout = super->wr.idle_timeout; + rule->wr.created = super->wr.created; + rule->used = 0; + } + } + + wx_rule_update_actions(wx, rule); + return true; +} + +/* Destroys 'rule'. If 'rule' is a subrule, also removes it from its + * super-rule's list of subrules. If 'rule' is a super-rule, also iterates + * through all of its subrules and revalidates them, destroying any that no + * longer has a super-rule (which is probably all of them). + * + * Before calling this function, the caller must make have removed 'rule' from + * the classifier. If 'rule' is an exact-match rule, the caller is also + * responsible for ensuring that it has been uninstalled from the datapath. */ +static void +wx_rule_destroy(struct wx *wx, struct wx_rule *rule) +{ + if (!rule->super) { + struct wx_rule *subrule, *next; + LIST_FOR_EACH_SAFE (subrule, next, struct wx_rule, list, &rule->list) { + wx_rule_revalidate(wx, subrule); + } + } else { + list_remove(&rule->list); + } + wx_rule_free(rule); +} + +#if 0 +static bool +wx_rule_has_out_port(const struct wx_rule *rule, uint16_t out_port) +{ + const union ofp_action *oa; + struct actions_iterator i; + + if (out_port == htons(OFPP_NONE)) { + return true; + } + for (oa = actions_first(&i, rule->wr.actions, + rule->wr.n_actions); + oa; + oa = actions_next(&i)) { + if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) { + return true; + } + } + return false; +} +#endif + +/* Caller is responsible for initializing the 'cr' and ofp_table_id members of + * the returned rule. */ +static struct wx_rule * +wx_rule_create(struct wx_rule *super, + const union ofp_action *actions, size_t n_actions, + uint16_t idle_timeout, uint16_t hard_timeout) +{ + struct wx_rule *rule = xzalloc(sizeof *rule); + wdp_rule_init(&rule->wr, actions, n_actions); + rule->wr.idle_timeout = idle_timeout; + rule->wr.hard_timeout = hard_timeout; + rule->used = rule->wr.created; + rule->super = super; + if (super) { + list_push_back(&super->list, &rule->list); + } else { + list_init(&rule->list); + } +#if 0 + netflow_flow_clear(&rule->nf_flow); + netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created); +#endif + + return rule; +} + ++/* Executes, within 'wx', the 'n_actions' actions in 'actions' on 'packet', ++ * which arrived on 'in_port'. ++ * ++ * Takes ownership of 'packet'. */ ++static bool ++execute_xflow_actions(struct wx *wx, uint16_t in_port, ++ const union xflow_action *actions, size_t n_actions, ++ struct ofpbuf *packet) ++{ ++ if (n_actions == 1 && actions[0].type == XFLOWAT_CONTROLLER ++ && wx->n_ctl_packets < MAX_CTL_PACKETS) { ++ /* As an optimization, avoid a round-trip from userspace to kernel to ++ * userspace. This also avoids possibly filling up kernel packet ++ * buffers along the way. */ ++ struct wdp_packet *wdp_packet; ++ ++ if (!(wx->wdp_listen_mask & WDP_CHAN_ACTION)) { ++ return true; ++ } ++ ++ wdp_packet = xmalloc(sizeof *wdp_packet); ++ wdp_packet->channel = WDP_CHAN_ACTION; ++ wdp_packet->tun_id = 0; ++ wdp_packet->in_port = in_port; ++ wdp_packet->send_len = actions[0].controller.arg; ++ wdp_packet->payload = packet; ++ ++ list_push_back(&wx->ctl_packets, &wdp_packet->list); ++ ++ return true; ++ } else { ++ int error; ++ ++ error = xfif_execute(wx->xfif, in_port, actions, n_actions, packet); ++ ofpbuf_delete(packet); ++ return !error; ++ } ++} ++ +/* Executes the actions indicated by 'rule' on 'packet', which is in flow - * 'flow' and is considered to have arrived on xflow port 'in_port'. ++ * 'flow' and is considered to have arrived on xflow port 'in_port'. 'packet' ++ * must have at least sizeof(struct ofp_packet_in) bytes of headroom. + * + * The flow that 'packet' actually contains does not need to actually match + * 'rule'; the actions in 'rule' will be applied to it either way. Likewise, + * the packet and byte counters for 'rule' will be credited for the packet sent + * out whether or not the packet actually matches 'rule'. + * + * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow, + * the caller must already have accurately composed xflow actions for it given + * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if + * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this + * function will compose a set of xflow actions based on 'rule''s OpenFlow - * actions and apply them to 'packet'. */ ++ * actions and apply them to 'packet'. ++ * ++ * Takes ownership of 'packet'. */ +static void +wx_rule_execute(struct wx *wx, struct wx_rule *rule, + struct ofpbuf *packet, const flow_t *flow) +{ + const union xflow_action *actions; ++ struct xflow_flow_stats stats; + size_t n_actions; + struct xflow_actions a; + ++ assert(ofpbuf_headroom(packet) >= sizeof(struct ofp_packet_in)); ++ + /* Grab or compose the xflow actions. + * + * The special case for an exact-match 'rule' where 'flow' is not the + * rule's flow is important to avoid, e.g., sending a packet out its input + * port simply because the xflow actions were composed for the wrong + * scenario. */ + if (rule->wr.cr.flow.wildcards + || !flow_equal_headers(flow, &rule->wr.cr.flow)) + { + struct wx_rule *super = rule->super ? rule->super : rule; + if (wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions, flow, + packet, NULL, &a, NULL)) { ++ ofpbuf_delete(packet); + return; + } + actions = a.actions; + n_actions = a.n_actions; + } else { + actions = rule->xflow_actions; + n_actions = rule->n_xflow_actions; + } + + /* Execute the xflow actions. */ - if (!xfif_execute(wx->xfif, flow->in_port, - actions, n_actions, packet)) { - struct xflow_flow_stats stats; - flow_extract_stats(flow, packet, &stats); ++ flow_extract_stats(flow, packet, &stats); ++ if (!execute_xflow_actions(wx, flow->in_port, ++ actions, n_actions, packet)) { + wx_rule_update_stats(wx, rule, &stats); + rule->used = time_msec(); + //XXX netflow_flow_update_time(wx->netflow, &rule->nf_flow, rule->used); + } +} + ++/* Inserts 'rule' into 'p''s flow table. ++ * ++ * If 'packet' is nonnull, takes ownership of 'packet', executes 'rule''s ++ * actions on it and credits the statistics for sending the packet to 'rule'. ++ * 'packet' must have at least sizeof(struct ofp_packet_in) bytes of ++ * headroom. */ +static void +wx_rule_insert(struct wx *wx, struct wx_rule *rule, struct ofpbuf *packet, + uint16_t in_port) +{ + struct wx_rule *displaced_rule; + + /* Insert the rule in the classifier. */ + displaced_rule = wx_rule_cast(classifier_insert(&wx->cls, &rule->wr.cr)); + if (!rule->wr.cr.flow.wildcards) { + wx_rule_make_actions(wx, rule, packet); + } + + /* Send the packet and credit it to the rule. */ + if (packet) { + flow_t flow; + flow_extract(packet, 0, in_port, &flow); + wx_rule_execute(wx, rule, packet, &flow); + } + + /* Install the rule in the datapath only after sending the packet, to + * avoid packet reordering. */ + if (rule->wr.cr.flow.wildcards) { + COVERAGE_INC(wx_add_wc_flow); + wx->need_revalidate = true; + } else { + wx_rule_install(wx, rule, displaced_rule); + } + + /* Free the rule that was displaced, if any. */ + if (displaced_rule) { + rule->wr.client_data = displaced_rule->wr.client_data; + wx_rule_destroy(wx, displaced_rule); + } +} + +static struct wx_rule * +wx_rule_create_subrule(struct wx *wx, struct wx_rule *rule, const flow_t *flow) +{ + struct wx_rule *subrule; + + subrule = wx_rule_create(rule, NULL, 0, + rule->wr.idle_timeout, + rule->wr.hard_timeout); + /* Subrules aren't really in any OpenFlow table, so don't bother with + * subrule->wr.ofp_table_id. */ + COVERAGE_INC(wx_subrule_create); + cls_rule_from_flow(flow, &subrule->wr.cr); + classifier_insert_exact(&wx->cls, &subrule->wr.cr); + + return subrule; +} + +/* Returns true if the actions changed, false otherwise. */ +static bool +wx_rule_make_actions(struct wx *wx, struct wx_rule *rule, + const struct ofpbuf *packet) +{ + const struct wx_rule *super; + struct xflow_actions a; + size_t actions_len; + + assert(!rule->wr.cr.flow.wildcards); + + super = rule->super ? rule->super : rule; + wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions, + &rule->wr.cr.flow, packet, + &rule->tags, &a, &rule->may_install); + + actions_len = a.n_actions * sizeof *a.actions; + if (rule->n_xflow_actions != a.n_actions + || memcmp(rule->xflow_actions, a.actions, actions_len)) { + COVERAGE_INC(wx_xflow_unchanged); + free(rule->xflow_actions); + rule->n_xflow_actions = a.n_actions; + rule->xflow_actions = xmemdup(a.actions, actions_len); + return true; + } else { + return false; + } +} + +static int +do_put_flow(struct wx *wx, struct wx_rule *rule, int flags, + struct xflow_flow_put *put) +{ + memset(&put->flow.stats, 0, sizeof put->flow.stats); + xflow_key_from_flow(&put->flow.key, &rule->wr.cr.flow); + put->flow.actions = rule->xflow_actions; + put->flow.n_actions = rule->n_xflow_actions; + put->flow.flags = 0; + put->flags = flags; + return xfif_flow_put(wx->xfif, put); +} + +static void +wx_rule_install(struct wx *wx, struct wx_rule *rule, struct wx_rule *displaced_rule) +{ + assert(!rule->wr.cr.flow.wildcards); + + if (rule->may_install) { + struct xflow_flow_put put; + if (!do_put_flow(wx, rule, + XFLOWPF_CREATE | XFLOWPF_MODIFY | XFLOWPF_ZERO_STATS, + &put)) { + rule->installed = true; + if (displaced_rule) { + wx_rule_update_stats(wx, displaced_rule, &put.flow.stats); + wx_rule_post_uninstall(wx, displaced_rule); + } + } + } else if (displaced_rule) { + wx_rule_uninstall(wx, displaced_rule); + } +} + +static void +wx_rule_reinstall(struct wx *wx, struct wx_rule *rule) +{ + if (rule->installed) { + struct xflow_flow_put put; + COVERAGE_INC(wx_dp_missed); + do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY, &put); + } else { + wx_rule_install(wx, rule, NULL); + } +} + +static void +wx_rule_update_actions(struct wx *wx, struct wx_rule *rule) +{ + bool actions_changed; +#if 0 + uint16_t new_out_iface, old_out_iface; + + old_out_iface = rule->nf_flow.output_iface; +#endif + actions_changed = wx_rule_make_actions(wx, rule, NULL); + + if (rule->may_install) { + if (rule->installed) { + if (actions_changed) { + struct xflow_flow_put put; + do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY + | XFLOWPF_ZERO_STATS, &put); + wx_rule_update_stats(wx, rule, &put.flow.stats); +#if 0 + /* Temporarily set the old output iface so that NetFlow + * messages have the correct output interface for the old + * stats. */ + new_out_iface = rule->nf_flow.output_iface; + rule->nf_flow.output_iface = old_out_iface; +#endif + wx_rule_post_uninstall(wx, rule); + //rule->nf_flow.output_iface = new_out_iface; + } + } else { + wx_rule_install(wx, rule, NULL); + } + } else { + wx_rule_uninstall(wx, rule); + } +} + +static void +add_output_group_action(struct xflow_actions *actions, uint16_t group, + uint16_t *nf_output_iface) +{ + xflow_actions_add(actions, XFLOWAT_OUTPUT_GROUP)->output_group.group = group; + + if (group == WX_GROUP_ALL || group == WX_GROUP_FLOOD) { + *nf_output_iface = NF_OUT_FLOOD; + } +} + +static void +add_controller_action(struct xflow_actions *actions, uint16_t max_len) +{ + union xflow_action *a = xflow_actions_add(actions, XFLOWAT_CONTROLLER); + a->controller.arg = max_len; +} + +struct wx_xlate_ctx { + /* Input. */ + flow_t flow; /* Flow to which these actions correspond. */ + int recurse; /* Recursion level, via xlate_table_action. */ + struct wx *wx; + const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a + * null pointer if we are revalidating + * without a packet to refer to. */ + + /* Output. */ + struct xflow_actions *out; /* Datapath actions. */ + tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */ + bool may_set_up_flow; /* True ordinarily; false if the actions must + * be reassessed for every packet. */ + uint16_t nf_output_iface; /* Output interface index for NetFlow. */ +}; + +static void do_xlate_actions(const union ofp_action *in, size_t n_in, + struct wx_xlate_ctx *ctx); + +static void +add_output_action(struct wx_xlate_ctx *ctx, uint16_t port) +{ + const struct wdp_port *wdp_port = port_array_get(&ctx->wx->ports, port); + + if (wdp_port) { + if (wdp_port->opp.config & OFPPC_NO_FWD) { + /* Forwarding disabled on port. */ + return; + } + } else { + /* + * We don't have an ofport record for this port, but it doesn't hurt to + * allow forwarding to it anyhow. Maybe such a port will appear later + * and we're pre-populating the flow table. + */ + } + + xflow_actions_add(ctx->out, XFLOWAT_OUTPUT)->output.port = port; + //ctx->nf_output_iface = port; +} + +static struct wx_rule * +wx_rule_lookup_valid(struct wx *wx, const flow_t *flow) +{ + struct wx_rule *rule = wx_rule_cast(classifier_lookup(&wx->cls, flow)); + + /* The rule we found might not be valid, since we could be in need of + * revalidation. If it is not valid, don't return it. */ + if (rule + && rule->super + && wx->need_revalidate + && !wx_rule_revalidate(wx, rule)) { + COVERAGE_INC(wx_invalidated); + return NULL; + } + + return rule; +} + +static void +xlate_table_action(struct wx_xlate_ctx *ctx, uint16_t in_port) +{ + if (!ctx->recurse) { + uint16_t old_in_port; + struct wx_rule *rule; + + /* Look up a flow with 'in_port' as the input port. Then restore the + * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will + * have surprising behavior). */ + old_in_port = ctx->flow.in_port; + ctx->flow.in_port = in_port; + rule = wx_rule_lookup_valid(ctx->wx, &ctx->flow); + ctx->flow.in_port = old_in_port; + + if (rule) { + if (rule->super) { + rule = rule->super; + } + + ctx->recurse++; + do_xlate_actions(rule->wr.actions, rule->wr.n_actions, ctx); + ctx->recurse--; + } + } +} + +static void +xlate_output_action__(struct wx_xlate_ctx *ctx, + uint16_t port, uint16_t max_len) +{ + uint16_t xflow_port; + uint16_t prev_nf_output_iface = ctx->nf_output_iface; + + ctx->nf_output_iface = NF_OUT_DROP; + + switch (port) { + case OFPP_IN_PORT: + add_output_action(ctx, ctx->flow.in_port); + break; + case OFPP_TABLE: + xlate_table_action(ctx, ctx->flow.in_port); + break; + case OFPP_NORMAL: + if (!ctx->wx->ofhooks->normal_cb(&ctx->flow, ctx->packet, + ctx->out, ctx->tags, + &ctx->nf_output_iface, + ctx->wx->aux)) { + COVERAGE_INC(wx_uninstallable); + ctx->may_set_up_flow = false; + } + break; + + case OFPP_FLOOD: + add_output_group_action(ctx->out, WX_GROUP_FLOOD, + &ctx->nf_output_iface); + break; + case OFPP_ALL: + add_output_group_action(ctx->out, WX_GROUP_ALL, &ctx->nf_output_iface); + break; + case OFPP_CONTROLLER: + add_controller_action(ctx->out, max_len); + break; + case OFPP_LOCAL: + add_output_action(ctx, XFLOWP_LOCAL); + break; + default: + xflow_port = ofp_port_to_xflow_port(port); + if (xflow_port != ctx->flow.in_port) { + add_output_action(ctx, xflow_port); + } + break; + } + + if (prev_nf_output_iface == NF_OUT_FLOOD) { + ctx->nf_output_iface = NF_OUT_FLOOD; + } else if (ctx->nf_output_iface == NF_OUT_DROP) { + ctx->nf_output_iface = prev_nf_output_iface; + } else if (prev_nf_output_iface != NF_OUT_DROP && + ctx->nf_output_iface != NF_OUT_FLOOD) { + ctx->nf_output_iface = NF_OUT_MULTI; + } +} + +static void +xlate_output_action(struct wx_xlate_ctx *ctx, + const struct ofp_action_output *oao) +{ + xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len)); +} + +/* If the final xflow action in 'ctx' is "pop priority", drop it, as an + * optimization, because we're going to add another action that sets the + * priority immediately after, or because there are no actions following the + * pop. */ +static void +remove_pop_action(struct wx_xlate_ctx *ctx) +{ + size_t n = ctx->out->n_actions; + if (n > 0 && ctx->out->actions[n - 1].type == XFLOWAT_POP_PRIORITY) { + ctx->out->n_actions--; + } +} + +static void +xlate_enqueue_action(struct wx_xlate_ctx *ctx, + const struct ofp_action_enqueue *oae) +{ + uint16_t ofp_port, xflow_port; + uint32_t priority; + int error; + + error = xfif_queue_to_priority(ctx->wx->xfif, ntohl(oae->queue_id), + &priority); + if (error) { + /* Fall back to ordinary output action. */ + xlate_output_action__(ctx, ntohs(oae->port), 0); + return; + } + + /* Figure out xflow output port. */ + ofp_port = ntohs(oae->port); + if (ofp_port != OFPP_IN_PORT) { + xflow_port = ofp_port_to_xflow_port(ofp_port); + } else { + xflow_port = ctx->flow.in_port; + } + + /* Add xflow actions. */ + remove_pop_action(ctx); + xflow_actions_add(ctx->out, XFLOWAT_SET_PRIORITY)->priority.priority + = priority; + add_output_action(ctx, xflow_port); + xflow_actions_add(ctx->out, XFLOWAT_POP_PRIORITY); + + /* Update NetFlow output port. */ + if (ctx->nf_output_iface == NF_OUT_DROP) { + ctx->nf_output_iface = xflow_port; + } else if (ctx->nf_output_iface != NF_OUT_FLOOD) { + ctx->nf_output_iface = NF_OUT_MULTI; + } +} + +static void +xlate_nicira_action(struct wx_xlate_ctx *ctx, + const struct nx_action_header *nah) +{ + const struct nx_action_resubmit *nar; + const struct nx_action_set_tunnel *nast; + union xflow_action *oa; + int subtype = ntohs(nah->subtype); + + assert(nah->vendor == htonl(NX_VENDOR_ID)); + switch (subtype) { + case NXAST_RESUBMIT: + nar = (const struct nx_action_resubmit *) nah; + xlate_table_action(ctx, ofp_port_to_xflow_port(ntohs(nar->in_port))); + break; + + case NXAST_SET_TUNNEL: + nast = (const struct nx_action_set_tunnel *) nah; + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TUNNEL); + ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id; + break; + ++ case NXAST_DROP_SPOOFED_ARP: ++ if (ctx->flow.dl_type == htons(ETH_TYPE_ARP)) { ++ xflow_actions_add(ctx->out, XFLOWAT_DROP_SPOOFED_ARP); ++ } ++ break; ++ + /* If you add a new action here that modifies flow data, don't forget to + * update the flow key in ctx->flow at the same time. */ + + default: + VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype); + break; + } +} + +static void +do_xlate_actions(const union ofp_action *in, size_t n_in, + struct wx_xlate_ctx *ctx) +{ + struct actions_iterator iter; + const union ofp_action *ia; + const struct wdp_port *port; + + port = port_array_get(&ctx->wx->ports, ctx->flow.in_port); + if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) && + port->opp.config & (eth_addr_equals(ctx->flow.dl_dst, eth_addr_stp) + ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) { + /* Drop this flow. */ + return; + } + + for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) { + uint16_t type = ntohs(ia->type); + union xflow_action *oa; + + switch (type) { + case OFPAT_OUTPUT: + xlate_output_action(ctx, &ia->output); + break; + + case OFPAT_SET_VLAN_VID: + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI); + oa->dl_tci.tci = ia->vlan_vid.vlan_vid & htons(VLAN_VID_MASK); + oa->dl_tci.mask = htons(VLAN_VID_MASK); + ctx->flow.dl_vlan = ia->vlan_vid.vlan_vid; + break; + + case OFPAT_SET_VLAN_PCP: + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI); + oa->dl_tci.tci = htons((ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT) + & VLAN_PCP_MASK); + oa->dl_tci.mask = htons(VLAN_PCP_MASK); + + if (ctx->flow.dl_vlan == htons(OFP_VLAN_NONE)) { + ctx->flow.dl_vlan = htons(0); + } + ctx->flow.dl_vlan_pcp = ia->vlan_pcp.vlan_pcp; + break; + + case OFPAT_STRIP_VLAN: + xflow_actions_add(ctx->out, XFLOWAT_STRIP_VLAN); + ctx->flow.dl_vlan = htons(OFP_VLAN_NONE); + ctx->flow.dl_vlan_pcp = 0; + break; + + case OFPAT_SET_DL_SRC: + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_SRC); + memcpy(oa->dl_addr.dl_addr, + ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN); + memcpy(ctx->flow.dl_src, + ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN); + break; + + case OFPAT_SET_DL_DST: + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_DST); + memcpy(oa->dl_addr.dl_addr, + ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN); + memcpy(ctx->flow.dl_dst, + ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN); + break; + + case OFPAT_SET_NW_SRC: + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_SRC); + ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr; + break; + + case OFPAT_SET_NW_DST: + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_DST); + ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr; + break; + + case OFPAT_SET_NW_TOS: + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_TOS); + ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos; + break; + + case OFPAT_SET_TP_SRC: + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_SRC); + ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port; + break; + + case OFPAT_SET_TP_DST: + oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_DST); + ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port; + break; + + case OFPAT_ENQUEUE: + xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia); + break; + + case OFPAT_VENDOR: + xlate_nicira_action(ctx, (const struct nx_action_header *) ia); + break; + + default: + VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type); + break; + } + } +} + +/* Returns true if 'flow' and 'actions' may be set up as a flow in the kernel. + * This is true most of the time, but we don't allow flows that would prevent + * DHCP replies from being seen by the local port to be set up in the + * kernel. + * + * We only need this, strictly speaking, when in-band control is turned on. */ +static bool +wx_may_set_up(const flow_t *flow, const struct xflow_actions *actions) +{ + if (flow->dl_type == htons(ETH_TYPE_IP) + && flow->nw_proto == IP_TYPE_UDP + && flow->tp_src == htons(DHCP_SERVER_PORT) + && flow->tp_dst == htons(DHCP_CLIENT_PORT)) { + int i; + + for (i = 0; i < actions->n_actions; i++) { + const struct xflow_action_output *oao = &actions->actions[i].output; + if (oao->type == XFLOWAT_OUTPUT && oao->port == XFLOWP_LOCAL) { + return true; + } + } + return false; + } + + return true; +} + +static int +wx_xlate_actions(struct wx *wx, const union ofp_action *in, size_t n_in, + const flow_t *flow, const struct ofpbuf *packet, + tag_type *tags, struct xflow_actions *out, + bool *may_set_up_flow) +{ + tag_type no_tags = 0; + struct wx_xlate_ctx ctx; + COVERAGE_INC(wx_ofp2xflow); + xflow_actions_init(out); + ctx.flow = *flow; + ctx.recurse = 0; + ctx.wx = wx; + ctx.packet = packet; + ctx.out = out; + ctx.tags = tags ? tags : &no_tags; + ctx.may_set_up_flow = true; + ctx.nf_output_iface = NF_OUT_DROP; + do_xlate_actions(in, n_in, &ctx); + remove_pop_action(&ctx); + + if (may_set_up_flow) { + *may_set_up_flow = ctx.may_set_up_flow && wx_may_set_up(flow, out); + } +#if 0 + if (nf_output_iface) { + *nf_output_iface = ctx.nf_output_iface; + } +#endif + if (xflow_actions_overflow(out)) { + COVERAGE_INC(xflow_overflow); + xflow_actions_init(out); + return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY); + } + return 0; +} + +static void +update_used(struct wx *wx) +{ + struct xflow_flow *flows; + size_t n_flows; + size_t i; + int error; + + error = xfif_flow_list_all(wx->xfif, &flows, &n_flows); + if (error) { + return; + } + + for (i = 0; i < n_flows; i++) { + struct xflow_flow *f = &flows[i]; + struct wx_rule *rule; + flow_t flow; + + xflow_key_to_flow(&f->key, &flow); + rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, &flow)); + if (!rule || !rule->installed) { + COVERAGE_INC(wx_unexpected_rule); + xfif_flow_del(wx->xfif, f); + continue; + } + + wx_rule_update_time(wx, rule, &f->stats); + wx_rule_account(wx, rule, f->stats.n_bytes); + } + free(flows); +} + +static void +uninstall_idle_flow(struct wx *wx, struct wx_rule *rule) +{ + assert(rule->installed); + assert(!rule->wr.cr.flow.wildcards); + + if (rule->super) { + wx_rule_remove(wx, rule); + } else { + wx_rule_uninstall(wx, rule); + } +} + +static int +expire_rule(struct cls_rule *cls_rule, void *wx_) +{ + struct wx *wx = wx_; + struct wx_rule *rule = wx_rule_cast(cls_rule); + long long int hard_expire, idle_expire, expire, now; + + hard_expire = (rule->wr.hard_timeout + ? rule->wr.created + rule->wr.hard_timeout * 1000 + : LLONG_MAX); + idle_expire = (rule->wr.idle_timeout + && (rule->super || list_is_empty(&rule->list)) + ? rule->used + rule->wr.idle_timeout * 1000 + : LLONG_MAX); + expire = MIN(hard_expire, idle_expire); + + now = time_msec(); + if (now < expire) { + if (rule->installed && now >= rule->used + 5000) { + uninstall_idle_flow(wx, rule); + } else if (!rule->wr.cr.flow.wildcards) { + //XXX active_timeout(wx, rule); + } + + return 0; + } + + COVERAGE_INC(wx_expired); + + /* Update stats. This code will be a no-op if the rule expired + * due to an idle timeout. */ + if (rule->wr.cr.flow.wildcards) { + struct wx_rule *subrule, *next; + LIST_FOR_EACH_SAFE (subrule, next, struct wx_rule, list, &rule->list) { + wx_rule_remove(wx, subrule); + } + } else { + wx_rule_uninstall(wx, rule); + } + +#if 0 /* XXX */ + if (!wx_rule_is_hidden(rule)) { + send_flow_removed(wx, rule, now, + (now >= hard_expire + ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT)); + } +#endif + wx_rule_remove(wx, rule); + + return 0; +} + +struct revalidate_cbdata { + struct wx *wx; + bool revalidate_all; /* Revalidate all exact-match rules? */ + bool revalidate_subrules; /* Revalidate all exact-match subrules? */ + struct tag_set revalidate_set; /* Set of tags to revalidate. */ +}; + +static bool +revalidate_rule(struct wx *wx, struct wx_rule *rule) +{ + const flow_t *flow = &rule->wr.cr.flow; + + COVERAGE_INC(wx_revalidate_rule); + if (rule->super) { + struct wx_rule *super; + super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow)); + if (!super) { + wx_rule_remove(wx, rule); + return false; + } else if (super != rule->super) { + COVERAGE_INC(wx_revalidate_moved); + list_remove(&rule->list); + list_push_back(&super->list, &rule->list); + rule->super = super; + rule->wr.hard_timeout = super->wr.hard_timeout; + rule->wr.idle_timeout = super->wr.idle_timeout; + rule->wr.created = super->wr.created; + rule->used = 0; + } + } + + wx_rule_update_actions(wx, rule); + return true; +} + +static int +revalidate_cb(struct cls_rule *sub_, void *cbdata_) +{ + struct wx_rule *sub = wx_rule_cast(sub_); + struct revalidate_cbdata *cbdata = cbdata_; + + if (cbdata->revalidate_all + || (cbdata->revalidate_subrules && sub->super) + || tag_set_intersects(&cbdata->revalidate_set, sub->tags)) { + revalidate_rule(cbdata->wx, sub); + } + return 0; +} + +static void +wx_run_one(struct wx *wx) +{ + if (time_msec() >= wx->next_expiration) { + COVERAGE_INC(wx_expiration); + wx->next_expiration = time_msec() + 1000; + update_used(wx); + + classifier_for_each(&wx->cls, CLS_INC_ALL, expire_rule, wx); + + /* XXX account_checkpoint_cb */ + } + + if (wx->need_revalidate || !tag_set_is_empty(&wx->revalidate_set)) { + struct revalidate_cbdata cbdata; + cbdata.wx = wx; + cbdata.revalidate_all = wx->revalidate_all; + cbdata.revalidate_subrules = wx->need_revalidate; + cbdata.revalidate_set = wx->revalidate_set; + tag_set_init(&wx->revalidate_set); + COVERAGE_INC(wx_revalidate); + classifier_for_each(&wx->cls, CLS_INC_EXACT, revalidate_cb, &cbdata); + wx->need_revalidate = false; + } +} + +static void +wx_run(void) +{ + struct wx *wx; + + LIST_FOR_EACH (wx, struct wx, list_node, &all_wx) { + wx_run_one(wx); + } + xf_run(); +} + +static void +wx_wait_one(struct wx *wx) +{ + if (wx->need_revalidate || !tag_set_is_empty(&wx->revalidate_set)) { + poll_immediate_wake(); + } else if (wx->next_expiration != LLONG_MAX) { + poll_timer_wait_until(wx->next_expiration); + } +} + +static void +wx_wait(void) +{ + struct wx *wx; + + LIST_FOR_EACH (wx, struct wx, list_node, &all_wx) { + wx_wait_one(wx); + } + xf_wait(); +} + +static int wx_flow_flush(struct wdp *); + +static int +wx_enumerate(const struct wdp_class *wdp_class, struct svec *all_wdps) +{ + struct svec names = SVEC_EMPTY_INITIALIZER; + int error = xf_enumerate_names(wdp_class->type, &names); + svec_move(all_wdps, &names); + return error; +} + +static int +wx_open(const struct wdp_class *wdp_class, const char *name, bool create, + struct wdp **wdpp) +{ + struct xfif *xfif; + int error; + + error = (create + ? xfif_create_and_open(name, wdp_class->type, &xfif) + : xfif_open(name, wdp_class->type, &xfif)); + if (!error) { + struct wx *wx; + + wx = xzalloc(sizeof *wx); + list_push_back(&all_wx, &wx->list_node); + wdp_init(&wx->wdp, wdp_class, name, 0, 0); + wx->xfif = xfif; + classifier_init(&wx->cls); + wx->netdev_monitor = netdev_monitor_create(); + port_array_init(&wx->ports); + shash_init(&wx->port_by_name); + wx->next_expiration = time_msec() + 1000; + tag_set_init(&wx->revalidate_set); + + wx_port_init(wx); + + wx->ofhooks = &default_ofhooks; + wx->aux = wx; + wx->ml = mac_learning_create(); + ++ list_init(&wx->ctl_packets); ++ + *wdpp = &wx->wdp; + } + + return error; +} + +static void +wx_close(struct wdp *wdp) +{ + struct wx *wx = wx_cast(wdp); + + wx_flow_flush(wdp); + xfif_close(wx->xfif); + classifier_destroy(&wx->cls); + netdev_monitor_destroy(wx->netdev_monitor); + list_remove(&wx->list_node); + mac_learning_destroy(wx->ml); + free(wx); +} + +static int +wx_get_all_names(const struct wdp *wdp, struct svec *all_names) +{ + struct wx *wx = wx_cast(wdp); + + return xfif_get_all_names(wx->xfif, all_names); +} + +static int +wx_destroy(struct wdp *wdp) +{ + struct wx *wx = wx_cast(wdp); + + return xfif_delete(wx->xfif); +} + +static int +wx_get_features(const struct wdp *wdp, struct ofpbuf **featuresp) +{ + struct wx *wx = wx_cast(wdp); + struct ofp_switch_features *osf; + struct ofpbuf *buf; + unsigned int port_no; + struct wdp_port *port; + + buf = ofpbuf_new(sizeof *osf); + osf = ofpbuf_put_zeros(buf, sizeof *osf); + osf->n_tables = 2; + osf->capabilities = htonl(OFPC_ARP_MATCH_IP); + osf->actions = htonl((1u << OFPAT_OUTPUT) | + (1u << OFPAT_SET_VLAN_VID) | + (1u << OFPAT_SET_VLAN_PCP) | + (1u << OFPAT_STRIP_VLAN) | + (1u << OFPAT_SET_DL_SRC) | + (1u << OFPAT_SET_DL_DST) | + (1u << OFPAT_SET_NW_SRC) | + (1u << OFPAT_SET_NW_DST) | + (1u << OFPAT_SET_NW_TOS) | + (1u << OFPAT_SET_TP_SRC) | + (1u << OFPAT_SET_TP_DST) | + (1u << OFPAT_ENQUEUE)); + + PORT_ARRAY_FOR_EACH (port, &wx->ports, port_no) { + hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp)); + } + + *featuresp = buf; + return 0; +} + +static int +count_subrules(struct cls_rule *cls_rule, void *n_subrules_) +{ + struct wx_rule *rule = wx_rule_cast(cls_rule); + int *n_subrules = n_subrules_; + + if (rule->super) { + (*n_subrules)++; + } + return 0; +} + +static int +wx_get_stats(const struct wdp *wdp, struct wdp_stats *stats) +{ + struct wx *wx = wx_cast(wdp); + struct xflow_stats xflow_stats; + int error; + + error = xfif_get_xf_stats(wx->xfif, &xflow_stats); + stats->max_ports = xflow_stats.max_ports; + return error; +} + +static int +wx_get_table_stats(const struct wdp *wdp, struct ofpbuf *stats) +{ + struct wx *wx = wx_cast(wdp); + struct xflow_stats xflow_stats; + struct ofp_table_stats *exact, *wild; + int n_subrules; + + xfif_get_xf_stats(wx->xfif, &xflow_stats); + /* XXX should pass up errors, but there are no appropriate OpenFlow error + * codes. */ + + n_subrules = 0; + classifier_for_each(&wx->cls, CLS_INC_EXACT, count_subrules, &n_subrules); + + exact = ofpbuf_put_zeros(stats, sizeof *exact); + exact->table_id = TABLEID_HASH; + strcpy(exact->name, "exact"); + exact->wildcards = htonl(0); + exact->max_entries = htonl(MIN(WX_MAX_EXACT, xflow_stats.max_capacity)); + exact->active_count = htonl(classifier_count_exact(&wx->cls) - n_subrules); + exact->lookup_count = htonll(xflow_stats.n_hit + xflow_stats.n_missed); + exact->matched_count = htonll(xflow_stats.n_hit); + + wild = ofpbuf_put_zeros(stats, sizeof *exact); + wild->table_id = TABLEID_CLASSIFIER; + strcpy(wild->name, "classifier"); + wild->wildcards = htonl(OVSFW_ALL); + wild->max_entries = htonl(WX_MAX_WILD); + wild->active_count = htonl(classifier_count_wild(&wx->cls)); + wild->lookup_count = htonll(0); /* XXX */ + wild->matched_count = htonll(0); /* XXX */ + + return 0; +} + +static int +wx_get_drop_frags(const struct wdp *wdp, bool *drop_frags) +{ + struct wx *wx = wx_cast(wdp); + + return xfif_get_drop_frags(wx->xfif, drop_frags); +} + +static int +wx_set_drop_frags(struct wdp *wdp, bool drop_frags) +{ + struct wx *wx = wx_cast(wdp); + + return xfif_set_drop_frags(wx->xfif, drop_frags); +} + +static int +wx_port_add(struct wdp *wdp, const char *devname, + bool internal, uint16_t *port_no) +{ + struct wx *wx = wx_cast(wdp); + uint16_t xflow_flags = internal ? XFLOW_PORT_INTERNAL : 0; + return xfif_port_add(wx->xfif, devname, xflow_flags, port_no); +} + +static int +wx_port_del(struct wdp *wdp, uint16_t port_no) +{ + struct wx *wx = wx_cast(wdp); + + return xfif_port_del(wx->xfif, port_no); +} + +static int +wx_answer_port_query(const struct wdp_port *port, struct wdp_port *portp) +{ + if (port) { + wdp_port_copy(portp, port); + return 0; + } else { + return ENOENT; + } +} + +static int +wx_port_query_by_number(const struct wdp *wdp, uint16_t port_no, + struct wdp_port *portp) +{ + struct wx *wx = wx_cast(wdp); + const struct wdp_port *port; + + port = port_array_get(&wx->ports, ofp_port_to_xflow_port(port_no)); + return wx_answer_port_query(port, portp); +} + +static int +wx_port_query_by_name(const struct wdp *wdp, const char *devname, + struct wdp_port *portp) +{ + struct wx *wx = wx_cast(wdp); + + return wx_answer_port_query(shash_find_data(&wx->port_by_name, devname), + portp); +} + +static int +wx_port_set_config(struct wdp *wdp, uint16_t port_no, uint32_t config) +{ + struct wx *wx = wx_cast(wdp); + struct wdp_port *port; + uint32_t changes; + + port = port_array_get(&wx->ports, ofp_port_to_xflow_port(port_no)); + if (!port) { + return ENOENT; + } + changes = config ^ port->opp.config; + + if (changes & OFPPC_PORT_DOWN) { + int error; + if (config & OFPPC_PORT_DOWN) { + error = netdev_turn_flags_off(port->netdev, NETDEV_UP, true); + } else { + error = netdev_turn_flags_on(port->netdev, NETDEV_UP, true); + } + if (!error) { + port->opp.config ^= OFPPC_PORT_DOWN; + } + } + +#define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD) + if (changes & REVALIDATE_BITS) { + COVERAGE_INC(wx_costly_flags); + port->opp.config ^= changes & REVALIDATE_BITS; + wx->need_revalidate = true; + } +#undef REVALIDATE_BITS + + if (changes & OFPPC_NO_FLOOD) { + port->opp.config ^= OFPPC_NO_FLOOD; + wx_port_refresh_groups(wx); + } + + if (changes & OFPPC_NO_PACKET_IN) { + port->opp.config ^= OFPPC_NO_PACKET_IN; + } + + return 0; +} + +static int +wx_port_list(const struct wdp *wdp, struct wdp_port **portsp, size_t *n_portsp) +{ + struct wx *wx = wx_cast(wdp); + struct wdp_port *ports, *port; + unsigned int port_no; + size_t n_ports, i; + + *n_portsp = n_ports = port_array_count(&wx->ports); + *portsp = ports = xmalloc(n_ports * sizeof *ports); + i = 0; + PORT_ARRAY_FOR_EACH (port, &wx->ports, port_no) { + wdp_port_copy(&ports[i++], port); + } + assert(i == n_ports); + + return 0; +} + +static int +wx_port_poll(struct wdp *wdp, wdp_port_poll_cb_func *cb, void *aux) +{ + struct wx *wx = wx_cast(wdp); + char *devname; + int retval; + int error; + + retval = 0; + while ((error = xfif_port_poll(wx->xfif, &devname)) != EAGAIN) { + wx_port_process_change(wx, error, devname, cb, aux); + if (error && error != ENOBUFS) { + retval = error; + } + } + while ((error = netdev_monitor_poll(wx->netdev_monitor, + &devname)) != EAGAIN) { + wx_port_process_change(wx, error, devname, cb, aux); + if (error && error != ENOBUFS) { + retval = error; + } + } + return retval; +} + +static int +wx_port_poll_wait(const struct wdp *wdp) +{ + struct wx *wx = wx_cast(wdp); + + xfif_port_poll_wait(wx->xfif); + netdev_monitor_poll_wait(wx->netdev_monitor); + return 0; +} + +static struct wdp_rule * +wx_flow_get(const struct wdp *wdp, const flow_t *flow, unsigned int include) +{ + struct wx *wx = wx_cast(wdp); + struct wx_rule *rule; + int table_id; + + table_id = flow->wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH; + if (!(include & (1u << table_id))) { + return NULL; + } + + rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, flow)); + return rule && !wx_rule_is_hidden(rule) ? &rule->wr : NULL; +} + +static struct wdp_rule * +wx_flow_match(const struct wdp *wdp, const flow_t *flow) +{ + struct wx *wx = wx_cast(wdp); + struct wx_rule *rule; + + rule = wx_rule_cast(classifier_lookup(&wx->cls, flow)); + if (rule) { + if (wx_rule_is_hidden(rule)) { + rule = rule->super; + } + return &rule->wr; + } else { + return NULL; + } +} + +struct wx_for_each_thunk_aux { + wdp_flow_cb_func *client_callback; + void *client_aux; +}; + +static int +wx_for_each_thunk(struct cls_rule *cls_rule, void *aux_) +{ + struct wx_for_each_thunk_aux *aux = aux_; + struct wx_rule *rule = wx_rule_cast(cls_rule); + + if (!wx_rule_is_hidden(rule)) { + return aux->client_callback(&rule->wr, aux->client_aux); + } + return 0; +} + +static int +wx_flow_for_each_match(const struct wdp *wdp, const flow_t *target, + unsigned int include, + wdp_flow_cb_func *client_callback, void *client_aux) +{ + struct wx *wx = wx_cast(wdp); + struct wx_for_each_thunk_aux aux; + int cls_include; + + cls_include = 0; + if (include & (1u << TABLEID_HASH)) { + cls_include |= CLS_INC_EXACT; + } + if (include & (1u << TABLEID_CLASSIFIER)) { + cls_include |= CLS_INC_WILD; + } + + aux.client_callback = client_callback; + aux.client_aux = client_aux; + return classifier_for_each_match(&wx->cls, target, cls_include, + wx_for_each_thunk, &aux); +} + +/* Obtains statistic counters for 'rule' within 'wx' and stores them into + * '*stats'. If 'rule' is a wildcarded rule, the returned statistic include + * statistics for all of 'rule''s subrules. */ +static void +query_stats(struct wx *wx, struct wx_rule *rule, struct wdp_flow_stats *stats) +{ + struct wx_rule *subrule; + struct xflow_flow *xflow_flows; + size_t n_xflow_flows; + + /* Start from historical data for 'rule' itself that are no longer tracked + * by the datapath. This counts, for example, subrules that have + * expired. */ + stats->n_packets = rule->packet_count; + stats->n_bytes = rule->byte_count; + stats->inserted = rule->wr.created; + stats->used = LLONG_MIN; + stats->tcp_flags = 0; + stats->ip_tos = 0; + + /* Prepare to ask the datapath for statistics on 'rule', or if it is + * wildcarded then on all of its subrules. + * + * Also, add any statistics that are not tracked by the datapath for each + * subrule. This includes, for example, statistics for packets that were + * executed "by hand" by ofproto via xfif_execute() but must be accounted + * to a flow. */ + n_xflow_flows = rule->wr.cr.flow.wildcards ? list_size(&rule->list) : 1; + xflow_flows = xzalloc(n_xflow_flows * sizeof *xflow_flows); + if (rule->wr.cr.flow.wildcards) { + size_t i = 0; + LIST_FOR_EACH (subrule, struct wx_rule, list, &rule->list) { + xflow_key_from_flow(&xflow_flows[i++].key, &subrule->wr.cr.flow); + stats->n_packets += subrule->packet_count; + stats->n_bytes += subrule->byte_count; + } + } else { + xflow_key_from_flow(&xflow_flows[0].key, &rule->wr.cr.flow); + } + + /* Fetch up-to-date statistics from the datapath and add them in. */ + if (!xfif_flow_get_multiple(wx->xfif, xflow_flows, n_xflow_flows)) { + size_t i; + for (i = 0; i < n_xflow_flows; i++) { + struct xflow_flow *xflow_flow = &xflow_flows[i]; + long long int used; + + stats->n_packets += xflow_flow->stats.n_packets; + stats->n_bytes += xflow_flow->stats.n_bytes; + used = xflow_flow_stats_to_msec(&xflow_flow->stats); + if (used > stats->used) { + stats->used = used; + } + stats->tcp_flags |= xflow_flow->stats.tcp_flags; + } + } + free(xflow_flows); +} + +static int +wx_flow_get_stats(const struct wdp *wdp, + const struct wdp_rule *wdp_rule, + struct wdp_flow_stats *stats) +{ + struct wx *wx = wx_cast(wdp); + struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr); + + query_stats(wx, rule, stats); + return 0; +} + +static bool +wx_flow_overlaps(const struct wdp *wdp, const flow_t *flow) +{ + struct wx *wx = wx_cast(wdp); + + /* XXX overlap with a subrule? */ + return classifier_rule_overlaps(&wx->cls, flow); +} + +static int +wx_flow_put(struct wdp *wdp, const struct wdp_flow_put *put, + struct wdp_flow_stats *old_stats, struct wdp_rule **rulep) +{ + struct wx *wx = wx_cast(wdp); + struct wx_rule *rule; + uint8_t ofp_table_id; + + ofp_table_id = put->flow->wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH; + if (put->ofp_table_id != 0xff && put->ofp_table_id != ofp_table_id) { + return ofp_mkerr_nicira(OFPET_FLOW_MOD_FAILED, NXFMFC_BAD_TABLE_ID); + } + + rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, put->flow)); + if (rule && wx_rule_is_hidden(rule)) { + rule = NULL; + } + + if (rule) { + if (!(put->flags & WDP_PUT_MODIFY)) { + return EEXIST; + } + } else { + if (!(put->flags & WDP_PUT_CREATE)) { + return EINVAL; + } + if ((put->flow->wildcards + ? classifier_count_wild(&wx->cls) >= WX_MAX_WILD + : classifier_count_exact(&wx->cls) >= WX_MAX_EXACT)) { + /* XXX subrules should not count against exact-match limit */ + return ofp_mkerr(OFPET_FLOW_MOD_FAILED, OFPFMFC_ALL_TABLES_FULL); + } + } + + rule = wx_rule_create(NULL, put->actions, put->n_actions, + put->idle_timeout, put->hard_timeout); + cls_rule_from_flow(put->flow, &rule->wr.cr); + rule->wr.ofp_table_id = ofp_table_id; + wx_rule_insert(wx, rule, NULL, 0); + + if (old_stats) { + /* XXX */ + memset(old_stats, 0, sizeof *old_stats); + } + if (rulep) { + *rulep = &rule->wr; + } + + return 0; +} + +static int +wx_flow_delete(struct wdp *wdp, struct wdp_rule *wdp_rule, + struct wdp_flow_stats *final_stats) +{ + struct wx *wx = wx_cast(wdp); + struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr); + + wx_rule_remove(wx, rule); + if (final_stats) { + memset(final_stats, 0, sizeof *final_stats); /* XXX */ + } + return 0; +} + +static int +wx_flush_rule(struct cls_rule *cls_rule, void *wx_) +{ + struct wx_rule *rule = wx_rule_cast(cls_rule); + struct wx *wx = wx_; + + /* Mark the flow as not installed, even though it might really be + * installed, so that wx_rule_remove() doesn't bother trying to uninstall + * it. There is no point in uninstalling it individually since we are + * about to blow away all the flows with xfif_flow_flush(). */ + rule->installed = false; + + wx_rule_remove(wx, rule); + + return 0; +} + +static int +wx_flow_flush(struct wdp *wdp) +{ + struct wx *wx = wx_cast(wdp); + + COVERAGE_INC(wx_flow_flush); + classifier_for_each(&wx->cls, CLS_INC_ALL, wx_flush_rule, wx); + xfif_flow_flush(wx->xfif); + return 0; +} + +static int +wx_execute(struct wdp *wdp, uint16_t in_port, + const union ofp_action actions[], int n_actions, + const struct ofpbuf *packet) +{ + struct wx *wx = wx_cast(wdp); + struct xflow_actions xflow_actions; + flow_t flow; + int error; + + flow_extract((struct ofpbuf *) packet, 0, in_port, &flow); + error = wx_xlate_actions(wx, actions, n_actions, &flow, packet, + NULL, &xflow_actions, NULL); + if (error) { + return error; + } + return xfif_execute(wx->xfif, ofp_port_to_xflow_port(in_port), + xflow_actions.actions, xflow_actions.n_actions, + packet); +} + +static int +wx_flow_inject(struct wdp *wdp, struct wdp_rule *wdp_rule, + uint16_t in_port, const struct ofpbuf *packet) +{ + struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr); + int error; + + error = wx_execute(wdp, in_port, rule->wr.actions, rule->wr.n_actions, + packet); + if (!error) { + rule->packet_count++; + rule->byte_count += packet->size; + rule->used = time_msec(); + } + return error; +} + +static int +wx_recv_get_mask(const struct wdp *wdp, int *listen_mask) +{ + struct wx *wx = wx_cast(wdp); + int xflow_listen_mask; + int error; + + error = xfif_recv_get_mask(wx->xfif, &xflow_listen_mask); + if (!error) { + *listen_mask = 0; + if (xflow_listen_mask & XFLOWL_MISS) { + *listen_mask |= 1 << WDP_CHAN_MISS; + } + if (xflow_listen_mask & XFLOWL_ACTION) { + *listen_mask |= 1 << WDP_CHAN_ACTION; + } + if (xflow_listen_mask & XFLOWL_SFLOW) { + *listen_mask |= 1 << WDP_CHAN_SFLOW; + } + } + return error; +} + +static int +wx_recv_set_mask(struct wdp *wdp, int listen_mask) +{ + struct wx *wx = wx_cast(wdp); + int xflow_listen_mask; + ++ wx->wdp_listen_mask = listen_mask; ++ + xflow_listen_mask = 0; + if (listen_mask & (1 << WDP_CHAN_MISS)) { + xflow_listen_mask |= XFLOWL_MISS; + } + if (listen_mask & (1 << WDP_CHAN_ACTION)) { + xflow_listen_mask |= XFLOWL_ACTION; ++ } else { ++ wx_purge_ctl_packets__(wx); + } + if (listen_mask & (1 << WDP_CHAN_SFLOW)) { + xflow_listen_mask |= XFLOWL_SFLOW; + } + + return xfif_recv_set_mask(wx->xfif, xflow_listen_mask); +} + +static int +wx_get_sflow_probability(const struct wdp *wdp, uint32_t *probability) +{ + struct wx *wx = wx_cast(wdp); + + return xfif_get_sflow_probability(wx->xfif, probability); +} + +static int +wx_set_sflow_probability(struct wdp *wdp, uint32_t probability) +{ + struct wx *wx = wx_cast(wdp); + + return xfif_set_sflow_probability(wx->xfif, probability); +} + +static int +wx_translate_xflow_msg(struct xflow_msg *msg, struct ofpbuf *payload, + struct wdp_packet *packet) +{ + packet->in_port = xflow_port_to_ofp_port(msg->port); + packet->send_len = 0; + packet->tun_id = 0; + + switch (msg->type) { + case _XFLOWL_MISS_NR: + packet->channel = WDP_CHAN_MISS; + packet->payload = payload; + packet->tun_id = msg->arg; + return 0; + + case _XFLOWL_ACTION_NR: + packet->channel = WDP_CHAN_ACTION; + packet->payload = payload; + packet->send_len = msg->arg; + return 0; + + case _XFLOWL_SFLOW_NR: + /* XXX */ + ofpbuf_delete(payload); + return ENOSYS; + + default: + VLOG_WARN_RL(&rl, "received XFLOW message of unexpected type %"PRIu32, + msg->type); + ofpbuf_delete(payload); + return ENOSYS; + } +} + +static const uint8_t * +get_local_mac(const struct wx *wx) +{ + const struct wdp_port *port = port_array_get(&wx->ports, XFLOWP_LOCAL); + return port ? port->opp.hw_addr : NULL; +} + +/* Returns true if 'packet' is a DHCP reply to the local port. Such a reply + * should be sent to the local port regardless of the flow table. + * + * We only need this, strictly speaking, when in-band control is turned on. */ +static bool +wx_is_local_dhcp_reply(const struct wx *wx, + const flow_t *flow, const struct ofpbuf *packet) +{ + if (flow->dl_type == htons(ETH_TYPE_IP) + && flow->nw_proto == IP_TYPE_UDP + && flow->tp_src == htons(DHCP_SERVER_PORT) + && flow->tp_dst == htons(DHCP_CLIENT_PORT) + && packet->l7) + { + const uint8_t *local_mac = get_local_mac(wx); + struct dhcp_header *dhcp = ofpbuf_at( + packet, (char *)packet->l7 - (char *)packet->data, sizeof *dhcp); + return dhcp && local_mac && eth_addr_equals(dhcp->chaddr, local_mac); + } + + return false; +} + ++/* Determines whether 'payload' that arrived on 'in_port' is included in any of ++ * the flows in 'wx''s OpenFlow flow table. If so, then it adds a ++ * corresponding flow to the xfif's exact-match flow table, taking ownership of ++ * 'payload', and returns true. If not, it returns false and the caller ++ * retains ownership of 'payload'. */ +static bool - wx_explode_rule(struct wx *wx, struct xflow_msg *msg, struct ofpbuf *payload) ++wx_explode_rule(struct wx *wx, uint16_t in_port, struct ofpbuf *payload) +{ + struct wx_rule *rule; + flow_t flow; + - flow_extract(payload, 0, xflow_port_to_ofp_port(msg->port), &flow); ++ flow_extract(payload, 0, xflow_port_to_ofp_port(in_port), &flow); + + if (wx_is_local_dhcp_reply(wx, &flow, payload)) { + union xflow_action action; + + memset(&action, 0, sizeof(action)); + action.output.type = XFLOWAT_OUTPUT; + action.output.port = XFLOWP_LOCAL; - xfif_execute(wx->xfif, msg->port, &action, 1, payload); ++ xfif_execute(wx->xfif, in_port, &action, 1, payload); + } + + rule = wx_rule_lookup_valid(wx, &flow); + if (!rule) { + return false; + } + + if (rule->wr.cr.flow.wildcards) { + rule = wx_rule_create_subrule(wx, rule, &flow); + wx_rule_make_actions(wx, rule, payload); + } else { + if (!rule->may_install) { + /* The rule is not installable, that is, we need to process every + * packet, so process the current packet and set its actions into + * 'subrule'. */ + wx_rule_make_actions(wx, rule, payload); + } else { + /* XXX revalidate rule if it needs it */ + } + } + + wx_rule_execute(wx, rule, payload, &flow); + wx_rule_reinstall(wx, rule); + + return true; +} + +static int +wx_recv(struct wdp *wdp, struct wdp_packet *packet) +{ + struct wx *wx = wx_cast(wdp); + int i; + ++ if (wx->n_ctl_packets) { ++ struct wdp_packet *wdp_packet; ++ ++ wdp_packet = CONTAINER_OF(list_pop_front(&wx->ctl_packets), ++ struct wdp_packet, list); ++ wx->n_ctl_packets--; ++ ++ *packet = *wdp_packet; ++ free(wdp_packet); ++ ++ return 0; ++ } ++ + /* XXX need to avoid 50*50 potential cost for caller. */ + for (i = 0; i < 50; i++) { + struct xflow_msg *msg; + struct ofpbuf *buf; + int error; + + error = xfif_recv(wx->xfif, &buf); + if (error) { + return error; + } + + msg = ofpbuf_pull(buf, sizeof *msg); - if (msg->type != _XFLOWL_MISS_NR || !wx_explode_rule(wx, msg, buf)) { ++ if (msg->type != _XFLOWL_MISS_NR ++ || !wx_explode_rule(wx, msg->port, buf)) { + return wx_translate_xflow_msg(msg, buf, packet); + } - ofpbuf_delete(buf); + } + return EAGAIN; +} + +static void +wx_recv_purge_queue__(struct wx *wx, int max, int xflow_listen_mask, + int *errorp) +{ + int error; + + error = xfif_recv_set_mask(wx->xfif, xflow_listen_mask); + if (!error) { + struct ofpbuf *buf; + + while (max > 0 && (error = xfif_recv(wx->xfif, &buf)) == 0) { + ofpbuf_delete(buf); + max--; + } + } + if (error && error != EAGAIN) { + *errorp = error; + } +} + ++static void ++wx_purge_ctl_packets__(struct wx *wx) ++{ ++ struct wdp_packet *this, *next; ++ ++ LIST_FOR_EACH_SAFE (this, next, struct wdp_packet, list, ++ &wx->ctl_packets) { ++ list_remove(&this->list); ++ ofpbuf_delete(this->payload); ++ free(this); ++ } ++ wx->n_ctl_packets = 0; ++} ++ +static int +wx_recv_purge(struct wdp *wdp) +{ + struct wx *wx = wx_cast(wdp); + struct xflow_stats xflow_stats; + int xflow_listen_mask; + int retval, error; + + xfif_get_xf_stats(wx->xfif, &xflow_stats); + + error = xfif_recv_get_mask(wx->xfif, &xflow_listen_mask); + if (error || !(xflow_listen_mask & XFLOWL_ALL)) { + return error; + } + + if (xflow_listen_mask & XFLOWL_MISS) { + wx_recv_purge_queue__(wx, xflow_stats.max_miss_queue, XFLOWL_MISS, + &error); + } + if (xflow_listen_mask & XFLOWL_ACTION) { + wx_recv_purge_queue__(wx, xflow_stats.max_action_queue, XFLOWL_ACTION, + &error); ++ wx_purge_ctl_packets__(wx); + } + if (xflow_listen_mask & XFLOWL_SFLOW) { + wx_recv_purge_queue__(wx, xflow_stats.max_sflow_queue, XFLOWL_SFLOW, + &error); + } + + retval = xfif_recv_set_mask(wx->xfif, xflow_listen_mask); + return retval ? retval : error; +} + + +static void +wx_recv_wait(struct wdp *wdp) +{ + struct wx *wx = wx_cast(wdp); + - xfif_recv_wait(wx->xfif); ++ if (wx->n_ctl_packets) { ++ poll_immediate_wake(); ++ } else { ++ xfif_recv_wait(wx->xfif); ++ } +} + +static int +wx_set_ofhooks(struct wdp *wdp, const struct ofhooks *ofhooks, void *aux) +{ + struct wx *wx = wx_cast(wdp); + + if (wx->ofhooks == &default_ofhooks) { + mac_learning_destroy(wx->ml); + wx->ml = NULL; + } + + wx->ofhooks = ofhooks; + wx->aux = aux; + return 0; +} + +static void +wx_revalidate(struct wdp *wdp, tag_type tag) +{ + struct wx *wx = wx_cast(wdp); + + tag_set_add(&wx->revalidate_set, tag); +} + +static void +wx_revalidate_all(struct wdp *wdp) +{ + struct wx *wx = wx_cast(wdp); + + wx->revalidate_all = true; +} + +static void wx_port_update(struct wx *, const char *devname, + wdp_port_poll_cb_func *cb, void *aux); +static void wx_port_reinit(struct wx *, wdp_port_poll_cb_func *cb, void *aux); + +static void +wx_port_process_change(struct wx *wx, int error, char *devname, + wdp_port_poll_cb_func *cb, void *aux) +{ + if (error == ENOBUFS) { + wx_port_reinit(wx, cb, aux); + } else if (!error) { + wx_port_update(wx, devname, cb, aux); + free(devname); + } +} + +static size_t +wx_port_refresh_group(struct wx *wx, unsigned int group) +{ + uint16_t *ports; + size_t n_ports; + struct wdp_port *port; + unsigned int port_no; + + assert(group == WX_GROUP_ALL || group == WX_GROUP_FLOOD); + + ports = xmalloc(port_array_count(&wx->ports) * sizeof *ports); + n_ports = 0; + PORT_ARRAY_FOR_EACH (port, &wx->ports, port_no) { + if (group == WX_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) { + ports[n_ports++] = port_no; + } + } + xfif_port_group_set(wx->xfif, group, ports, n_ports); + free(ports); + + return n_ports; +} + +static void +wx_port_refresh_groups(struct wx *wx) +{ + wx_port_refresh_group(wx, WX_GROUP_FLOOD); + wx_port_refresh_group(wx, WX_GROUP_ALL); +} + +static void +wx_port_reinit(struct wx *wx, wdp_port_poll_cb_func *cb, void *aux) +{ + struct svec devnames; + struct wdp_port *wdp_port; + unsigned int port_no; + struct xflow_port *xflow_ports; + size_t n_xflow_ports; + size_t i; + + svec_init(&devnames); + PORT_ARRAY_FOR_EACH (wdp_port, &wx->ports, port_no) { + svec_add (&devnames, (char *) wdp_port->opp.name); + } + xfif_port_list(wx->xfif, &xflow_ports, &n_xflow_ports); + for (i = 0; i < n_xflow_ports; i++) { + svec_add(&devnames, xflow_ports[i].devname); + } + free(xflow_ports); + + svec_sort_unique(&devnames); + for (i = 0; i < devnames.n; i++) { + wx_port_update(wx, devnames.names[i], cb, aux); + } + svec_destroy(&devnames); + + wx_port_refresh_groups(wx); +} + +static struct wdp_port * +make_wdp_port(const struct xflow_port *xflow_port) +{ + struct netdev_options netdev_options; + enum netdev_flags flags; + struct wdp_port *wdp_port; + struct netdev *netdev; + bool carrier; + int error; + + memset(&netdev_options, 0, sizeof netdev_options); + netdev_options.name = xflow_port->devname; + netdev_options.ethertype = NETDEV_ETH_TYPE_NONE; + + error = netdev_open(&netdev_options, &netdev); + if (error) { + VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s " + "cannot be opened (%s)", + xflow_port->devname, xflow_port->port, + xflow_port->devname, strerror(error)); + return NULL; + } + + wdp_port = xmalloc(sizeof *wdp_port); + wdp_port->netdev = netdev; + wdp_port->opp.port_no = xflow_port_to_ofp_port(xflow_port->port); + netdev_get_etheraddr(netdev, wdp_port->opp.hw_addr); + strncpy((char *) wdp_port->opp.name, xflow_port->devname, + sizeof wdp_port->opp.name); + wdp_port->opp.name[sizeof wdp_port->opp.name - 1] = '\0'; + + netdev_get_flags(netdev, &flags); + wdp_port->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN; + + netdev_get_carrier(netdev, &carrier); + wdp_port->opp.state = carrier ? 0 : OFPPS_LINK_DOWN; + + netdev_get_features(netdev, + &wdp_port->opp.curr, &wdp_port->opp.advertised, + &wdp_port->opp.supported, &wdp_port->opp.peer); + + wdp_port->devname = xstrdup(xflow_port->devname); + wdp_port->internal = (xflow_port->flags & XFLOW_PORT_INTERNAL) != 0; + return wdp_port; +} + +static bool +wx_port_conflicts(const struct wx *wx, const struct xflow_port *xflow_port) +{ + if (port_array_get(&wx->ports, xflow_port->port)) { + VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath", + xflow_port->port); + return true; + } else if (shash_find(&wx->port_by_name, xflow_port->devname)) { + VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath", + xflow_port->devname); + return true; + } else { + return false; + } +} + +static int +wdp_port_equal(const struct wdp_port *a_, const struct wdp_port *b_) +{ + const struct ofp_phy_port *a = &a_->opp; + const struct ofp_phy_port *b = &b_->opp; + + BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */ + return (a->port_no == b->port_no + && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr) + && !strcmp((char *) a->name, (char *) b->name) + && a->state == b->state + && a->config == b->config + && a->curr == b->curr + && a->advertised == b->advertised + && a->supported == b->supported + && a->peer == b->peer); +} + +static void +wx_port_install(struct wx *wx, struct wdp_port *wdp_port) +{ + uint16_t xflow_port = ofp_port_to_xflow_port(wdp_port->opp.port_no); + const char *netdev_name = (const char *) wdp_port->opp.name; + + netdev_monitor_add(wx->netdev_monitor, wdp_port->netdev); + port_array_set(&wx->ports, xflow_port, wdp_port); + shash_add(&wx->port_by_name, netdev_name, wdp_port); +} + +static void +wx_port_remove(struct wx *wx, struct wdp_port *wdp_port) +{ + uint16_t xflow_port = ofp_port_to_xflow_port(wdp_port->opp.port_no); + + netdev_monitor_remove(wx->netdev_monitor, wdp_port->netdev); + port_array_delete(&wx->ports, xflow_port); + shash_delete(&wx->port_by_name, + shash_find(&wx->port_by_name, (char *) wdp_port->opp.name)); +} + +static void +wx_port_free(struct wdp_port *wdp_port) +{ + wdp_port_free(wdp_port); + free(wdp_port); +} + +static void +wx_port_update(struct wx *wx, const char *devname, + wdp_port_poll_cb_func *cb, void *aux) +{ + struct xflow_port xflow_port; + struct wdp_port *old_wdp_port; + struct wdp_port *new_wdp_port; + int error; + + COVERAGE_INC(wx_update_port); + + /* Query the datapath for port information. */ + error = xfif_port_query_by_name(wx->xfif, devname, &xflow_port); + + /* Find the old wdp_port. */ + old_wdp_port = shash_find_data(&wx->port_by_name, devname); + if (!error) { + if (!old_wdp_port) { + /* There's no port named 'devname' but there might be a port with + * the same port number. This could happen if a port is deleted + * and then a new one added in its place very quickly, or if a port + * is renamed. In the former case we want to send an OFPPR_DELETE + * and an OFPPR_ADD, and in the latter case we want to send a + * single OFPPR_MODIFY. We can distinguish the cases by comparing + * the old port's ifindex against the new port, or perhaps less + * reliably but more portably by comparing the old port's MAC + * against the new port's MAC. However, this code isn't that smart + * and always sends an OFPPR_MODIFY (XXX). */ + old_wdp_port = port_array_get(&wx->ports, xflow_port.port); + } + } else if (error != ENOENT && error != ENODEV) { + VLOG_WARN_RL(&rl, "xfif_port_query_by_name returned unexpected error " + "%s", strerror(error)); + return; + } + + /* Create a new wdp_port. */ + new_wdp_port = !error ? make_wdp_port(&xflow_port) : NULL; + + /* Eliminate a few pathological cases. */ + if (!old_wdp_port && !new_wdp_port) { + return; + } else if (old_wdp_port && new_wdp_port) { + /* Most of the 'config' bits are OpenFlow soft state, but + * OFPPC_PORT_DOWN is maintained by the kernel. So transfer the + * OpenFlow bits from old_wdp_port. (make_wdp_port() only sets + * OFPPC_PORT_DOWN and leaves the other bits 0.) */ + new_wdp_port->opp.config |= old_wdp_port->opp.config & ~OFPPC_PORT_DOWN; + + if (wdp_port_equal(old_wdp_port, new_wdp_port)) { + /* False alarm--no change. */ + wx_port_free(new_wdp_port); + return; + } + } + + /* Now deal with the normal cases. */ + if (old_wdp_port) { + wx_port_remove(wx, old_wdp_port); + } + if (new_wdp_port) { + wx_port_install(wx, new_wdp_port); + } + + /* Call back. */ + if (!old_wdp_port) { + (*cb)(&new_wdp_port->opp, OFPPR_ADD, aux); + } else if (!new_wdp_port) { + (*cb)(&old_wdp_port->opp, OFPPR_DELETE, aux); + } else { + (*cb)(&new_wdp_port->opp, OFPPR_MODIFY, aux); + } + + /* Update port groups. */ + wx_port_refresh_groups(wx); + + /* Clean up. */ + wx_port_free(old_wdp_port); +} + +static int +wx_port_init(struct wx *wx) +{ + struct xflow_port *ports; + size_t n_ports; + size_t i; + int error; + + error = xfif_port_list(wx->xfif, &ports, &n_ports); + if (error) { + return error; + } + + for (i = 0; i < n_ports; i++) { + const struct xflow_port *xflow_port = &ports[i]; + if (!wx_port_conflicts(wx, xflow_port)) { + struct wdp_port *wdp_port = make_wdp_port(xflow_port); + if (wdp_port) { + wx_port_install(wx, wdp_port); + } + } + } + free(ports); + wx_port_refresh_groups(wx); + return 0; +} + +void +wdp_xflow_register(void) +{ + static const struct wdp_class wdp_xflow_class = { + NULL, /* name */ + wx_run, + wx_wait, + wx_enumerate, + wx_open, + wx_close, + wx_get_all_names, + wx_destroy, + wx_get_features, + wx_get_stats, + wx_get_table_stats, + wx_get_drop_frags, + wx_set_drop_frags, + wx_port_add, + wx_port_del, + wx_port_query_by_number, + wx_port_query_by_name, + wx_port_list, + wx_port_set_config, + wx_port_poll, + wx_port_poll_wait, + wx_flow_get, + wx_flow_match, + wx_flow_for_each_match, + wx_flow_get_stats, + wx_flow_overlaps, + wx_flow_put, + wx_flow_delete, + wx_flow_flush, + wx_flow_inject, + wx_execute, + wx_recv_get_mask, + wx_recv_set_mask, + wx_get_sflow_probability, + wx_set_sflow_probability, + wx_recv, + wx_recv_purge, + wx_recv_wait, + wx_set_ofhooks, + wx_revalidate, + wx_revalidate_all, + }; + + static bool inited = false; + + struct svec types; + const char *type; + bool registered; + int i; + + if (inited) { + return; + } + inited = true; + + svec_init(&types); + xf_enumerate_types(&types); + + registered = false; + SVEC_FOR_EACH (i, type, &types) { + struct wdp_class *class; + + class = xmalloc(sizeof *class); + *class = wdp_xflow_class; + class->type = xstrdup(type); + if (registered) { + class->run = NULL; + class->wait = NULL; + } + if (!wdp_register_provider(class)) { + registered = true; + } + } + + svec_destroy(&types); +} + +static bool +default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet, + struct xflow_actions *actions, tag_type *tags, + uint16_t *nf_output_iface, void *wx_) +{ + struct wx *wx = wx_; + int out_port; + + /* Drop frames for reserved multicast addresses. */ + if (eth_addr_is_reserved(flow->dl_dst)) { + return true; + } + + /* Learn source MAC (but don't try to learn from revalidation). */ + if (packet != NULL) { + tag_type rev_tag = mac_learning_learn(wx->ml, flow->dl_src, + 0, flow->in_port, + GRAT_ARP_LOCK_NONE); + if (rev_tag) { + /* The log messages here could actually be useful in debugging, + * so keep the rate limit relatively high. */ + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300); + VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16, + ETH_ADDR_ARGS(flow->dl_src), flow->in_port); + tag_set_add(&wx->revalidate_set, rev_tag); + } + } + + /* Determine output port. */ + out_port = mac_learning_lookup_tag(wx->ml, flow->dl_dst, 0, tags, + NULL); + if (out_port < 0) { + add_output_group_action(actions, WX_GROUP_FLOOD, nf_output_iface); + } else if (out_port != flow->in_port) { + xflow_actions_add(actions, XFLOWAT_OUTPUT)->output.port = out_port; + *nf_output_iface = out_port; + } else { + /* Drop. */ + } + + return true; +} + +static const struct ofhooks default_ofhooks = { + NULL, + default_normal_ofhook_cb, + NULL, + NULL +};