* It is safe to access the datapath and dp_port structures with just
* dp_mutex.
*/
- static struct datapath *dps[ODP_MAX];
+ static struct datapath *dps[XFLOW_MAX];
static DEFINE_MUTEX(dp_mutex);
-/* Number of milliseconds between runs of the maintenance thread. */
-#define MAINT_SLEEP_MSECS 1000
+/* We limit the number of times that we pass into dp_process_received_packet()
+ * to avoid blowing out the stack in the event that we have a loop. */
+struct loop_counter {
+ int count; /* Count. */
+ bool looping; /* Loop detected? */
+};
+
+#define DP_MAX_LOOPS 5
+
+/* We use a separate counter for each CPU for both interrupt and non-interrupt
+ * context in order to keep the limit deterministic for a given packet. */
+struct percpu_loop_counters {
+ struct loop_counter counters[2];
+};
+
+static DEFINE_PER_CPU(struct percpu_loop_counters, dp_loop_counters);
- static int new_dp_port(struct datapath *, struct odp_port *, int port_no);
+ static int new_dp_port(struct datapath *, struct xflow_port *, int port_no);
/* Must be called with rcu_read_lock or dp_mutex. */
struct datapath *get_dp(int dp_idx)
struct datapath *dp = p->dp;
struct dp_stats_percpu *stats;
int stats_counter_off;
- struct odp_flow_key key;
+ struct xflow_key key;
struct tbl_node *flow_node;
-
- WARN_ON_ONCE(skb_shared(skb));
- skb_warn_if_lro(skb);
+ struct sw_flow *flow;
+ struct sw_flow_actions *acts;
+ struct loop_counter *loop;
OVS_CB(skb)->dp_port = p;
- if (flow_extract(skb, p ? p->port_no : ODPP_NONE, &key)) {
+ /* Extract flow from 'skb' into 'key'. */
+ if (flow_extract(skb, p ? p->port_no : XFLOWP_NONE, &key)) {
if (dp->drop_frags) {
kfree_skb(skb);
stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
}
}
+ /* Look up flow. */
flow_node = tbl_lookup(rcu_dereference(dp->table), &key, flow_hash(&key), flow_cmp);
- if (flow_node) {
- struct sw_flow *flow = flow_cast(flow_node);
- struct sw_flow_actions *acts = rcu_dereference(flow->sf_acts);
- flow_used(flow, skb);
- execute_actions(dp, skb, &key, acts->actions, acts->n_actions,
- GFP_ATOMIC);
- stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
- } else {
- stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
+ if (unlikely(!flow_node)) {
- dp_output_control(dp, skb, _ODPL_MISS_NR, OVS_CB(skb)->tun_id);
+ dp_output_control(dp, skb, _XFLOWL_MISS_NR, OVS_CB(skb)->tun_id);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
+ goto out;
}
+ flow = flow_cast(flow_node);
+ flow_used(flow, skb);
+
+ acts = rcu_dereference(flow->sf_acts);
+
+ /* Check whether we've looped too much. */
+ loop = &get_cpu_var(dp_loop_counters).counters[!!in_interrupt()];
+ if (unlikely(++loop->count > DP_MAX_LOOPS))
+ loop->looping = true;
+ if (unlikely(loop->looping)) {
+ suppress_loop(dp, acts);
+ goto out_loop;
+ }
+
+ /* Execute actions. */
+ execute_actions(dp, skb, &key, acts->actions, acts->n_actions, GFP_ATOMIC);
+ stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
+
+ /* Check whether sub-actions looped too much. */
+ if (unlikely(loop->looping))
+ suppress_loop(dp, acts);
+
+out_loop:
+ /* Decrement loop counter. */
+ if (!--loop->count)
+ loop->looping = false;
+ put_cpu_var(dp_loop_counters);
+
out:
+ /* Update datapath statistics. */
local_bh_disable();
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
(*(u64 *)((u8 *)stats + stats_counter_off))++;
return ERR_PTR(error);
}
-static void get_stats(struct sw_flow *flow, struct xflow_flow_stats *stats)
+static struct timespec get_time_offset(void)
+{
+ struct timespec now_mono, now_jiffies;
+
+ ktime_get_ts(&now_mono);
+ jiffies_to_timespec(jiffies, &now_jiffies);
+ return timespec_sub(now_mono, now_jiffies);
+}
+
- static void get_stats(struct sw_flow *flow, struct odp_flow_stats *stats,
++static void get_stats(struct sw_flow *flow, struct xflow_flow_stats *stats,
+ struct timespec time_offset)
{
- if (flow->used.tv_sec) {
- stats->used_sec = flow->used.tv_sec;
- stats->used_nsec = flow->used.tv_nsec;
+ if (flow->used) {
+ struct timespec flow_ts, used;
+
+ jiffies_to_timespec(flow->used, &flow_ts);
+ set_normalized_timespec(&used, flow_ts.tv_sec + time_offset.tv_sec,
+ flow_ts.tv_nsec + time_offset.tv_nsec);
+
+ stats->used_sec = used.tv_sec;
+ stats->used_nsec = used.tv_nsec;
} else {
stats->used_sec = 0;
stats->used_nsec = 0;
/* Fetch stats, then clear them if necessary. */
spin_lock_bh(&flow->lock);
- get_stats(flow, stats);
+ get_stats(flow, stats, get_time_offset());
- if (uf->flags & ODPPF_ZERO_STATS)
+ if (uf->flags & XFLOWPF_ZERO_STATS)
clear_stats(flow);
spin_unlock_bh(&flow->lock);
}
}
static int do_answer_query(struct sw_flow *flow, u32 query_flags,
- struct odp_flow_stats __user *ustats,
- union odp_action __user *actions,
+ struct timespec time_offset,
+ struct xflow_flow_stats __user *ustats,
+ union xflow_action __user *actions,
u32 __user *n_actionsp)
{
struct sw_flow_actions *sf_acts;
u32 n_actions;
spin_lock_bh(&flow->lock);
- get_stats(flow, &stats);
+ get_stats(flow, &stats, time_offset);
- if (query_flags & ODPFF_ZERO_TCP_FLAGS)
+ if (query_flags & XFLOWFF_ZERO_TCP_FLAGS)
flow->tcp_flags = 0;
spin_unlock_bh(&flow->lock);
}
static int answer_query(struct sw_flow *flow, u32 query_flags,
- struct odp_flow __user *ufp)
+ struct timespec time_offset,
+ struct xflow_flow __user *ufp)
{
- union odp_action *actions;
+ union xflow_action *actions;
if (get_user(actions, &ufp->actions))
return -EFAULT;
return error;
}
- static int do_query_flows(struct datapath *dp, const struct odp_flowvec *flowvec)
+ static int do_query_flows(struct datapath *dp, const struct xflow_flowvec *flowvec)
{
struct tbl *table = rcu_dereference(dp->table);
+ struct timespec time_offset;
u32 i;
+ time_offset = get_time_offset();
+
for (i = 0; i < flowvec->n_flows; i++) {
- struct odp_flow __user *ufp = &flowvec->flows[i];
- struct odp_flow uf;
+ struct xflow_flow __user *ufp = &flowvec->flows[i];
+ struct xflow_flow uf;
struct tbl_node *flow_node;
int error;
}
struct list_flows_cbdata {
- struct odp_flow __user *uflows;
+ struct xflow_flow __user *uflows;
u32 n_flows;
u32 listed_flows;
+ struct timespec time_offset;
};
static int list_flow(struct tbl_node *node, void *cbdata_)
}
static int compat_answer_query(struct sw_flow *flow, u32 query_flags,
- struct compat_odp_flow __user *ufp)
+ struct timespec time_offset,
+ struct compat_xflow_flow __user *ufp)
{
compat_uptr_t actions;
return error;
}
- static int compat_query_flows(struct datapath *dp, struct compat_odp_flow *flows, u32 n_flows)
+ static int compat_query_flows(struct datapath *dp, struct compat_xflow_flow *flows, u32 n_flows)
{
struct tbl *table = rcu_dereference(dp->table);
+ struct timespec time_offset;
u32 i;
+ time_offset = get_time_offset();
+
for (i = 0; i < n_flows; i++) {
- struct compat_odp_flow __user *ufp = &flows[i];
- struct odp_flow uf;
+ struct compat_xflow_flow __user *ufp = &flows[i];
+ struct xflow_flow uf;
struct tbl_node *flow_node;
int error;
}
struct compat_list_flows_cbdata {
- struct compat_odp_flow __user *uflows;
+ struct compat_xflow_flow __user *uflows;
u32 n_flows;
u32 listed_flows;
+ struct timespec time_offset;
};
static int compat_list_flow(struct tbl_node *node, void *cbdata_)
return retval;
}
- u32 flow_hash(const struct odp_flow_key *key)
-struct sw_flow *flow_cast(const struct tbl_node *node)
-{
- return container_of(node, struct sw_flow, tbl_node);
-}
-
+ u32 flow_hash(const struct xflow_key *key)
{
return jhash2((u32*)key, sizeof *key / sizeof(u32), hash_seed);
}
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/gfp.h>
+#include <linux/jiffies.h>
#include <linux/time.h>
- #include "openvswitch/datapath-protocol.h"
+ #include "openvswitch/xflow.h"
#include "table.h"
struct sk_buff;
struct rcu_head rcu;
struct tbl_node tbl_node;
- struct odp_flow_key key;
+ struct xflow_key key;
struct sw_flow_actions *sf_acts;
- struct timespec used; /* Last used time. */
-
- u8 ip_tos; /* IP TOS value. */
-
spinlock_t lock; /* Lock for values below. */
+ unsigned long used; /* Last used time (in jiffies). */
u64 packet_count; /* Number of packets matched. */
u64 byte_count; /* Number of bytes matched. */
u8 tcp_flags; /* Union of seen TCP flags. */
struct sw_flow_actions *flow_actions_alloc(size_t n_actions);
void flow_deferred_free(struct sw_flow *);
void flow_deferred_free_acts(struct sw_flow_actions *);
- int flow_extract(struct sk_buff *, u16 in_port, struct odp_flow_key *);
+ int flow_extract(struct sk_buff *, u16 in_port, struct xflow_key *);
void flow_used(struct sw_flow *, struct sk_buff *);
- u32 flow_hash(const struct odp_flow_key *key);
-struct sw_flow *flow_cast(const struct tbl_node *);
+ u32 flow_hash(const struct xflow_key *key);
int flow_cmp(const struct tbl_node *, void *target);
void flow_free_tbl(struct tbl_node *);
--- /dev/null
- uint8_t ip_tos;
+ /*
+ * Copyright (c) 2009, 2010 Nicira Networks.
+ *
+ * This file is offered under your choice of two licenses: Apache 2.0 or GNU
+ * GPL 2.0 or later. The permission statements for each of these licenses is
+ * given below. You may license your modifications to this file under either
+ * of these licenses or both. If you wish to license your modifications under
+ * only one of these licenses, delete the permission text for the other
+ * license.
+ *
+ * ----------------------------------------------------------------------
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ * ----------------------------------------------------------------------
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * ----------------------------------------------------------------------
+ */
+
+ /* Protocol between userspace and kernel datapath.
+ *
+ * Be sure to update datapath/xflow-compat.h if you change any of the
+ * structures in here. */
+
+ #ifndef XFLOW_H
+ #define XFLOW_H 1
+
+ /* The ovs_be<N> types indicate that an object is in big-endian, not
+ * native-endian, byte order. They are otherwise equivalent to uint<N>_t.
+ * The Linux kernel already has __be<N> types for this, which take on
+ * additional semantics when the "sparse" static checker is used, so we use
+ * those types when compiling the kernel. */
+ #ifdef __KERNEL__
+ #include <linux/types.h>
+ #define ovs_be16 __be16
+ #define ovs_be32 __be32
+ #define ovs_be64 __be64
+ #else
+ #include <stdint.h>
+ #define ovs_be16 uint16_t
+ #define ovs_be32 uint32_t
+ #define ovs_be64 uint64_t
+ #endif
+
+ #define XFLOW_MAX 256 /* Maximum number of datapaths. */
+
+ #define XFLOW_DP_CREATE _IO('O', 0)
+ #define XFLOW_DP_DESTROY _IO('O', 1)
+ #define XFLOW_DP_STATS _IOW('O', 2, struct xflow_stats)
+
+ #define XFLOW_GET_DROP_FRAGS _IOW('O', 3, int)
+ #define XFLOW_SET_DROP_FRAGS _IOR('O', 4, int)
+
+ #define XFLOW_GET_LISTEN_MASK _IOW('O', 5, int)
+ #define XFLOW_SET_LISTEN_MASK _IOR('O', 6, int)
+
+ #define XFLOW_PORT_ATTACH _IOR('O', 7, struct xflow_port)
+ #define XFLOW_PORT_DETACH _IOR('O', 8, int)
+ #define XFLOW_PORT_QUERY _IOWR('O', 9, struct xflow_port)
+ #define XFLOW_PORT_LIST _IOWR('O', 10, struct xflow_portvec)
+
+ #define XFLOW_PORT_GROUP_SET _IOR('O', 11, struct xflow_port_group)
+ #define XFLOW_PORT_GROUP_GET _IOWR('O', 12, struct xflow_port_group)
+
+ #define XFLOW_FLOW_GET _IOWR('O', 13, struct xflow_flow)
+ #define XFLOW_FLOW_PUT _IOWR('O', 14, struct xflow_flow)
+ #define XFLOW_FLOW_LIST _IOWR('O', 15, struct xflow_flowvec)
+ #define XFLOW_FLOW_FLUSH _IO('O', 16)
+ #define XFLOW_FLOW_DEL _IOWR('O', 17, struct xflow_flow)
+
+ #define XFLOW_EXECUTE _IOR('O', 18, struct xflow_execute)
+
+ #define XFLOW_SET_SFLOW_PROBABILITY _IOR('O', 19, int)
+ #define XFLOW_GET_SFLOW_PROBABILITY _IOW('O', 20, int)
+
+ #define XFLOW_VPORT_ADD _IOR('O', 21, struct xflow_vport_add)
+ #define XFLOW_VPORT_MOD _IOR('O', 22, struct xflow_vport_mod)
+ #define XFLOW_VPORT_DEL _IO('O', 23)
+ #define XFLOW_VPORT_STATS_GET _IOWR('O', 24, struct xflow_vport_stats_req)
+ #define XFLOW_VPORT_ETHER_GET _IOWR('O', 25, struct xflow_vport_ether)
+ #define XFLOW_VPORT_ETHER_SET _IOW('O', 26, struct xflow_vport_ether)
+ #define XFLOW_VPORT_MTU_GET _IOWR('O', 27, struct xflow_vport_mtu)
+ #define XFLOW_VPORT_MTU_SET _IOW('O', 28, struct xflow_vport_mtu)
+ #define XFLOW_VPORT_STATS_SET _IOWR('O', 29, struct xflow_vport_stats_req)
+
+ struct xflow_stats {
+ /* Flows. */
+ uint32_t n_flows; /* Number of flows in flow table. */
+ uint32_t cur_capacity; /* Current flow table capacity. */
+ uint32_t max_capacity; /* Maximum expansion of flow table capacity. */
+
+ /* Ports. */
+ uint32_t n_ports; /* Current number of ports. */
+ uint32_t max_ports; /* Maximum supported number of ports. */
+ uint16_t max_groups; /* Maximum number of port groups. */
+ uint16_t reserved;
+
+ /* Lookups. */
+ uint64_t n_frags; /* Number of dropped IP fragments. */
+ uint64_t n_hit; /* Number of flow table matches. */
+ uint64_t n_missed; /* Number of flow table misses. */
+ uint64_t n_lost; /* Number of misses not sent to userspace. */
+
+ /* Queues. */
+ uint16_t max_miss_queue; /* Max length of XFLOWL_MISS queue. */
+ uint16_t max_action_queue; /* Max length of XFLOWL_ACTION queue. */
+ uint16_t max_sflow_queue; /* Max length of XFLOWL_SFLOW queue. */
+ };
+
+ /* Logical ports. */
+ #define XFLOWP_LOCAL ((uint16_t)0)
+ #define XFLOWP_NONE ((uint16_t)-1)
+ #define XFLOWP_NORMAL ((uint16_t)-2)
+
+ /* Listening channels. */
+ #define _XFLOWL_MISS_NR 0 /* Packet missed in flow table. */
+ #define XFLOWL_MISS (1 << _XFLOWL_MISS_NR)
+ #define _XFLOWL_ACTION_NR 1 /* Packet output to XFLOWP_CONTROLLER. */
+ #define XFLOWL_ACTION (1 << _XFLOWL_ACTION_NR)
+ #define _XFLOWL_SFLOW_NR 2 /* sFlow samples. */
+ #define XFLOWL_SFLOW (1 << _XFLOWL_SFLOW_NR)
+ #define XFLOWL_ALL (XFLOWL_MISS | XFLOWL_ACTION | XFLOWL_SFLOW)
+
+ /**
+ * struct xflow_msg - format of messages read from datapath fd.
+ * @type: One of the %_XFLOWL_* constants.
+ * @length: Total length of message, including this header.
+ * @port: Port that received the packet embedded in this message.
+ * @reserved: Not currently used. Should be set to 0.
+ * @arg: Argument value whose meaning depends on @type.
+ *
+ * For @type == %_XFLOWL_MISS_NR, the header is followed by packet data. The
+ * @arg member is the ID (in network byte order) of the tunnel that
+ * encapsulated this packet. It is 0 if the packet was not received on a tunnel. *
+ * For @type == %_XFLOWL_ACTION_NR, the header is followed by packet data. The
+ * @arg member is copied from the &struct xflow_action_controller that caused
+ * the &struct xflow_msg to be composed.
+ *
+ * For @type == %_XFLOWL_SFLOW_NR, the header is followed by &struct
+ * xflow_sflow_sample_header, then by an array of &union xflow_action (the
+ * number of which is specified in &struct xflow_sflow_sample_header), then by
+ * packet data.
+ */
+ struct xflow_msg {
+ uint32_t type;
+ uint32_t length;
+ uint16_t port;
+ uint16_t reserved;
+ uint32_t arg;
+ };
+
+ /**
+ * struct xflow_sflow_sample_header - header added to sFlow sampled packet.
+ * @sample_pool: Number of packets that were candidates for sFlow sampling,
+ * regardless of whether they were actually chosen and sent down to userspace.
+ * @n_actions: Number of "union xflow_action"s immediately following this
+ * header.
+ *
+ * This header follows &struct xflow_msg when that structure's @type is
+ * %_XFLOWL_SFLOW_NR, and it is itself followed by an array of &union
+ * xflow_action (the number of which is specified in @n_actions) and then by
+ * packet data.
+ */
+ struct xflow_sflow_sample_header {
+ uint32_t sample_pool;
+ uint32_t n_actions;
+ };
+
+ #define XFLOW_PORT_INTERNAL (1 << 0) /* This port is simulated. */
+ struct xflow_port {
+ char devname[16]; /* IFNAMSIZ */
+ uint16_t port;
+ uint16_t flags;
+ uint32_t reserved2;
+ };
+
+ struct xflow_portvec {
+ struct xflow_port *ports;
+ uint32_t n_ports;
+ };
+
+ struct xflow_port_group {
+ uint16_t *ports;
+ uint16_t n_ports; /* Number of ports. */
+ uint16_t group; /* Group number. */
+ };
+
+ struct xflow_flow_stats {
+ uint64_t n_packets; /* Number of matched packets. */
+ uint64_t n_bytes; /* Number of matched bytes. */
+ uint64_t used_sec; /* Time last used, in system monotonic time. */
+ uint32_t used_nsec;
+ uint8_t tcp_flags;
++ uint8_t reserved;
+ uint16_t error; /* Used by XFLOW_FLOW_GET. */
+ };
+
+ /*
+ * The datapath protocol adopts the Linux convention for TCI fields: if an
+ * 802.1Q header is present then its TCI value is used verbatim except that the
+ * CFI bit (0x1000) is always set to 1, and all-bits-zero indicates no 802.1Q
+ * header.
+ */
+ #define XFLOW_TCI_PRESENT 0x1000 /* CFI bit */
+
+ struct xflow_key {
+ ovs_be32 tun_id; /* Encapsulating tunnel ID. */
+ ovs_be32 nw_src; /* IP source address. */
+ ovs_be32 nw_dst; /* IP destination address. */
+ uint16_t in_port; /* Input switch port. */
+ ovs_be16 dl_tci; /* All zeros if 802.1Q header absent,
+ * XFLOW_TCI_PRESENT set if present. */
+ ovs_be16 dl_type; /* Ethernet frame type. */
+ ovs_be16 tp_src; /* TCP/UDP source port. */
+ ovs_be16 tp_dst; /* TCP/UDP destination port. */
+ uint8_t dl_src[6]; /* Ethernet source address. */
+ uint8_t dl_dst[6]; /* Ethernet destination address. */
+ uint8_t nw_proto; /* IP protocol or low 8 bits of ARP opcode. */
+ uint8_t nw_tos; /* IP ToS (DSCP field, 6 bits). */
+ };
+
+ /* Flags for XFLOW_FLOW. */
+ #define XFLOWFF_ZERO_TCP_FLAGS (1 << 0) /* Zero the TCP flags. */
+
+ struct xflow_flow {
+ struct xflow_flow_stats stats;
+ struct xflow_key key;
+ union xflow_action *actions;
+ uint32_t n_actions;
+ uint32_t flags;
+ };
+
+ /* Flags for XFLOW_FLOW_PUT. */
+ #define XFLOWPF_CREATE (1 << 0) /* Allow creating a new flow. */
+ #define XFLOWPF_MODIFY (1 << 1) /* Allow modifying an existing flow. */
+ #define XFLOWPF_ZERO_STATS (1 << 2) /* Zero the stats of existing flow. */
+
+ /* XFLOW_FLOW_PUT argument. */
+ struct xflow_flow_put {
+ struct xflow_flow flow;
+ uint32_t flags;
+ };
+
+ struct xflow_flowvec {
+ struct xflow_flow *flows;
+ uint32_t n_flows;
+ };
+
+ /* Action types. */
+ #define XFLOWAT_OUTPUT 0 /* Output to switch port. */
+ #define XFLOWAT_OUTPUT_GROUP 1 /* Output to all ports in group. */
+ #define XFLOWAT_CONTROLLER 2 /* Send copy to controller. */
+ #define XFLOWAT_SET_DL_TCI 3 /* Set the 802.1q VLAN VID and/or PCP. */
+ #define XFLOWAT_STRIP_VLAN 4 /* Strip the 802.1q header. */
+ #define XFLOWAT_SET_DL_SRC 5 /* Ethernet source address. */
+ #define XFLOWAT_SET_DL_DST 6 /* Ethernet destination address. */
+ #define XFLOWAT_SET_NW_SRC 7 /* IP source address. */
+ #define XFLOWAT_SET_NW_DST 8 /* IP destination address. */
+ #define XFLOWAT_SET_NW_TOS 9 /* IP ToS/DSCP field (6 bits). */
+ #define XFLOWAT_SET_TP_SRC 10 /* TCP/UDP source port. */
+ #define XFLOWAT_SET_TP_DST 11 /* TCP/UDP destination port. */
+ #define XFLOWAT_SET_TUNNEL 12 /* Set the encapsulating tunnel ID. */
+ #define XFLOWAT_SET_PRIORITY 14 /* Set skb->priority. */
+ #define XFLOWAT_POP_PRIORITY 15 /* Restore original skb->priority. */
+ #define XFLOWAT_N_ACTIONS 16
+
+ struct xflow_action_output {
+ uint16_t type; /* XFLOWAT_OUTPUT. */
+ uint16_t port; /* Output port. */
+ uint16_t reserved1;
+ uint16_t reserved2;
+ };
+
+ struct xflow_action_output_group {
+ uint16_t type; /* XFLOWAT_OUTPUT_GROUP. */
+ uint16_t group; /* Group number. */
+ uint16_t reserved1;
+ uint16_t reserved2;
+ };
+
+ struct xflow_action_controller {
+ uint16_t type; /* XFLOWAT_OUTPUT_CONTROLLER. */
+ uint16_t reserved;
+ uint32_t arg; /* Copied to struct xflow_msg 'arg' member. */
+ };
+
+ struct xflow_action_tunnel {
+ uint16_t type; /* XFLOWAT_SET_TUNNEL. */
+ uint16_t reserved;
+ ovs_be32 tun_id; /* Tunnel ID. */
+ };
+
+ /* Action structure for XFLOWAT_SET_DL_TCI. */
+ struct xflow_action_dl_tci {
+ uint16_t type; /* XFLOWAT_SET_DL_TCI. */
+ ovs_be16 tci; /* New TCI. Bits not in mask must be zero. */
+ ovs_be16 mask; /* 0x0fff to set VID, 0xe000 to set PCP,
+ * or 0xefff to set both. */
+ uint16_t reserved;
+ };
+
+ /* Action structure for XFLOWAT_SET_DL_SRC/DST. */
+ struct xflow_action_dl_addr {
+ uint16_t type; /* XFLOWAT_SET_DL_SRC/DST. */
+ uint8_t dl_addr[6]; /* Ethernet address. */
+ };
+
+ /* Action structure for XFLOWAT_SET_NW_SRC/DST. */
+ struct xflow_action_nw_addr {
+ uint16_t type; /* XFLOWAT_SET_TW_SRC/DST. */
+ uint16_t reserved;
+ ovs_be32 nw_addr; /* IP address. */
+ };
+
+ struct xflow_action_nw_tos {
+ uint16_t type; /* XFLOWAT_SET_NW_TOS. */
+ uint8_t nw_tos; /* IP ToS/DSCP field (6 bits). */
+ uint8_t reserved1;
+ uint16_t reserved2;
+ uint16_t reserved3;
+ };
+
+ /* Action structure for XFLOWAT_SET_PRIORITY. */
+ struct xflow_action_priority {
+ uint16_t type; /* XFLOWAT_SET_PRIORITY. */
+ uint16_t reserved;
+ uint32_t priority; /* skb->priority value. */
+ };
+
+ /* Action structure for XFLOWAT_SET_TP_SRC/DST. */
+ struct xflow_action_tp_port {
+ uint16_t type; /* XFLOWAT_SET_TP_SRC/DST. */
+ ovs_be16 tp_port; /* TCP/UDP port. */
+ uint16_t reserved1;
+ uint16_t reserved2;
+ };
+
+ union xflow_action {
+ uint16_t type;
+ struct xflow_action_output output;
+ struct xflow_action_output_group output_group;
+ struct xflow_action_controller controller;
+ struct xflow_action_tunnel tunnel;
+ struct xflow_action_dl_tci dl_tci;
+ struct xflow_action_dl_addr dl_addr;
+ struct xflow_action_nw_addr nw_addr;
+ struct xflow_action_nw_tos nw_tos;
+ struct xflow_action_tp_port tp_port;
+ struct xflow_action_priority priority;
+ };
+
+ struct xflow_execute {
+ uint16_t in_port;
+ uint16_t reserved1;
+ uint32_t reserved2;
+
+ union xflow_action *actions;
+ uint32_t n_actions;
+
+ const void *data;
+ uint32_t length;
+ };
+
+ #define VPORT_TYPE_SIZE 16
+ struct xflow_vport_add {
+ char port_type[VPORT_TYPE_SIZE];
+ char devname[16]; /* IFNAMSIZ */
+ void *config;
+ };
+
+ struct xflow_vport_mod {
+ char devname[16]; /* IFNAMSIZ */
+ void *config;
+ };
+
+ struct xflow_vport_stats {
+ uint64_t rx_packets;
+ uint64_t tx_packets;
+ uint64_t rx_bytes;
+ uint64_t tx_bytes;
+ uint64_t rx_dropped;
+ uint64_t tx_dropped;
+ uint64_t rx_errors;
+ uint64_t tx_errors;
+ uint64_t rx_frame_err;
+ uint64_t rx_over_err;
+ uint64_t rx_crc_err;
+ uint64_t collisions;
+ };
+
+ struct xflow_vport_stats_req {
+ char devname[16]; /* IFNAMSIZ */
+ struct xflow_vport_stats stats;
+ };
+
+ struct xflow_vport_ether {
+ char devname[16]; /* IFNAMSIZ */
+ unsigned char ether_addr[6];
+ };
+
+ struct xflow_vport_mtu {
+ char devname[16]; /* IFNAMSIZ */
+ uint16_t mtu;
+ };
+
+ /* Values below this cutoff are 802.3 packets and the two bytes
+ * following MAC addresses are used as a frame length. Otherwise, the
+ * two bytes are used as the Ethernet type.
+ */
+ #define XFLOW_DL_TYPE_ETH2_CUTOFF 0x0600
+
+ /* Value of dl_type to indicate that the frame does not include an
+ * Ethernet type.
+ */
+ #define XFLOW_DL_TYPE_NOT_ETH_TYPE 0x05ff
+
+ #endif /* openvswitch/xflow.h */
lib/netdev-provider.h \
lib/netdev.c \
lib/netdev.h \
- lib/odp-util.c \
- lib/odp-util.h \
+ lib/ofp-parse.c \
+ lib/ofp-parse.h \
lib/ofp-print.c \
lib/ofp-print.h \
lib/ofp-util.c \
--- /dev/null
+/*
+ * Copyright (c) 2010 Nicira Networks.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <config.h>
+
+#include "ofp-parse.h"
+
+#include <errno.h>
+#include <stdlib.h>
+
+#include "netdev.h"
+#include "ofp-util.h"
+#include "ofpbuf.h"
+#include "openflow/openflow.h"
+#include "packets.h"
+#include "socket-util.h"
+#include "vconn.h"
+#include "vlog.h"
+
+
+VLOG_DEFINE_THIS_MODULE(ofp_parse)
+
+#define DEFAULT_IDLE_TIMEOUT 60
+
+static uint32_t
+str_to_u32(const char *str)
+{
+ char *tail;
+ uint32_t value;
+
+ errno = 0;
+ value = strtoul(str, &tail, 0);
+ if (errno == EINVAL || errno == ERANGE || *tail) {
+ ovs_fatal(0, "invalid numeric format %s", str);
+ }
+ return value;
+}
+
+static uint64_t
+str_to_u64(const char *str)
+{
+ char *tail;
+ uint64_t value;
+
+ errno = 0;
+ value = strtoull(str, &tail, 0);
+ if (errno == EINVAL || errno == ERANGE || *tail) {
+ ovs_fatal(0, "invalid numeric format %s", str);
+ }
+ return value;
+}
+
+static void
+str_to_mac(const char *str, uint8_t mac[6])
+{
+ if (sscanf(str, ETH_ADDR_SCAN_FMT, ETH_ADDR_SCAN_ARGS(mac))
+ != ETH_ADDR_SCAN_COUNT) {
+ ovs_fatal(0, "invalid mac address %s", str);
+ }
+}
+
+static uint32_t
+str_to_ip(const char *str_, uint32_t *ip)
+{
+ char *str = xstrdup(str_);
+ char *save_ptr = NULL;
+ const char *name, *netmask;
+ struct in_addr in_addr;
+ int n_wild, retval;
+
+ name = strtok_r(str, "/", &save_ptr);
+ retval = name ? lookup_ip(name, &in_addr) : EINVAL;
+ if (retval) {
+ ovs_fatal(0, "%s: could not convert to IP address", str);
+ }
+ *ip = in_addr.s_addr;
+
+ netmask = strtok_r(NULL, "/", &save_ptr);
+ if (netmask) {
+ uint8_t o[4];
+ if (sscanf(netmask, "%"SCNu8".%"SCNu8".%"SCNu8".%"SCNu8,
+ &o[0], &o[1], &o[2], &o[3]) == 4) {
+ uint32_t nm = (o[0] << 24) | (o[1] << 16) | (o[2] << 8) | o[3];
+ int i;
+
+ /* Find first 1-bit. */
+ for (i = 0; i < 32; i++) {
+ if (nm & (1u << i)) {
+ break;
+ }
+ }
+ n_wild = i;
+
+ /* Verify that the rest of the bits are 1-bits. */
+ for (; i < 32; i++) {
+ if (!(nm & (1u << i))) {
+ ovs_fatal(0, "%s: %s is not a valid netmask",
+ str, netmask);
+ }
+ }
+ } else {
+ int prefix = atoi(netmask);
+ if (prefix <= 0 || prefix > 32) {
+ ovs_fatal(0, "%s: network prefix bits not between 1 and 32",
+ str);
+ }
+ n_wild = 32 - prefix;
+ }
+ } else {
+ n_wild = 0;
+ }
+
+ free(str);
+ return n_wild;
+}
+
+static void *
+put_action(struct ofpbuf *b, size_t size, uint16_t type)
+{
+ struct ofp_action_header *ah = ofpbuf_put_zeros(b, size);
+ ah->type = htons(type);
+ ah->len = htons(size);
+ return ah;
+}
+
+static struct ofp_action_output *
+put_output_action(struct ofpbuf *b, uint16_t port)
+{
+ struct ofp_action_output *oao = put_action(b, sizeof *oao, OFPAT_OUTPUT);
+ oao->port = htons(port);
+ return oao;
+}
+
+static void
+put_enqueue_action(struct ofpbuf *b, uint16_t port, uint32_t queue)
+{
+ struct ofp_action_enqueue *oae = put_action(b, sizeof *oae, OFPAT_ENQUEUE);
+ oae->port = htons(port);
+ oae->queue_id = htonl(queue);
+}
+
+static void
+put_dl_addr_action(struct ofpbuf *b, uint16_t type, const char *addr)
+{
+ struct ofp_action_dl_addr *oada = put_action(b, sizeof *oada, type);
+ str_to_mac(addr, oada->dl_addr);
+}
+
+
+static bool
+parse_port_name(const char *name, uint16_t *port)
+{
+ struct pair {
+ const char *name;
+ uint16_t value;
+ };
+ static const struct pair pairs[] = {
+#define DEF_PAIR(NAME) {#NAME, OFPP_##NAME}
+ DEF_PAIR(IN_PORT),
+ DEF_PAIR(TABLE),
+ DEF_PAIR(NORMAL),
+ DEF_PAIR(FLOOD),
+ DEF_PAIR(ALL),
+ DEF_PAIR(CONTROLLER),
+ DEF_PAIR(LOCAL),
+ DEF_PAIR(NONE),
+#undef DEF_PAIR
+ };
+ static const int n_pairs = ARRAY_SIZE(pairs);
+ size_t i;
+
+ for (i = 0; i < n_pairs; i++) {
+ if (!strcasecmp(name, pairs[i].name)) {
+ *port = pairs[i].value;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void
+str_to_action(char *str, struct ofpbuf *b)
+{
+ char *act, *arg;
+ char *saveptr = NULL;
+ bool drop = false;
+ int n_actions;
+
+ for (act = strtok_r(str, ", \t\r\n", &saveptr), n_actions = 0; act;
+ act = strtok_r(NULL, ", \t\r\n", &saveptr), n_actions++)
+ {
+ uint16_t port;
+
+ if (drop) {
+ ovs_fatal(0, "Drop actions must not be followed by other actions");
+ }
+
+ /* Arguments are separated by colons */
+ arg = strchr(act, ':');
+ if (arg) {
+ *arg = '\0';
+ arg++;
+ }
+
+ if (!strcasecmp(act, "mod_vlan_vid")) {
+ struct ofp_action_vlan_vid *va;
+ va = put_action(b, sizeof *va, OFPAT_SET_VLAN_VID);
+ va->vlan_vid = htons(str_to_u32(arg));
+ } else if (!strcasecmp(act, "mod_vlan_pcp")) {
+ struct ofp_action_vlan_pcp *va;
+ va = put_action(b, sizeof *va, OFPAT_SET_VLAN_PCP);
+ va->vlan_pcp = str_to_u32(arg);
+ } else if (!strcasecmp(act, "strip_vlan")) {
+ struct ofp_action_header *ah;
+ ah = put_action(b, sizeof *ah, OFPAT_STRIP_VLAN);
+ ah->type = htons(OFPAT_STRIP_VLAN);
+ } else if (!strcasecmp(act, "mod_dl_src")) {
+ put_dl_addr_action(b, OFPAT_SET_DL_SRC, arg);
+ } else if (!strcasecmp(act, "mod_dl_dst")) {
+ put_dl_addr_action(b, OFPAT_SET_DL_DST, arg);
+ } else if (!strcasecmp(act, "mod_nw_src")) {
+ struct ofp_action_nw_addr *na;
+ na = put_action(b, sizeof *na, OFPAT_SET_NW_SRC);
+ str_to_ip(arg, &na->nw_addr);
+ } else if (!strcasecmp(act, "mod_nw_dst")) {
+ struct ofp_action_nw_addr *na;
+ na = put_action(b, sizeof *na, OFPAT_SET_NW_DST);
+ str_to_ip(arg, &na->nw_addr);
+ } else if (!strcasecmp(act, "mod_tp_src")) {
+ struct ofp_action_tp_port *ta;
+ ta = put_action(b, sizeof *ta, OFPAT_SET_TP_SRC);
+ ta->tp_port = htons(str_to_u32(arg));
+ } else if (!strcasecmp(act, "mod_tp_dst")) {
+ struct ofp_action_tp_port *ta;
+ ta = put_action(b, sizeof *ta, OFPAT_SET_TP_DST);
+ ta->tp_port = htons(str_to_u32(arg));
+ } else if (!strcasecmp(act, "mod_nw_tos")) {
+ struct ofp_action_nw_tos *nt;
+ nt = put_action(b, sizeof *nt, OFPAT_SET_NW_TOS);
+ nt->nw_tos = str_to_u32(arg);
+ } else if (!strcasecmp(act, "resubmit")) {
+ struct nx_action_resubmit *nar;
+ nar = put_action(b, sizeof *nar, OFPAT_VENDOR);
+ nar->vendor = htonl(NX_VENDOR_ID);
+ nar->subtype = htons(NXAST_RESUBMIT);
+ nar->in_port = htons(str_to_u32(arg));
+ } else if (!strcasecmp(act, "set_tunnel")) {
+ struct nx_action_set_tunnel *nast;
+ nast = put_action(b, sizeof *nast, OFPAT_VENDOR);
+ nast->vendor = htonl(NX_VENDOR_ID);
+ nast->subtype = htons(NXAST_SET_TUNNEL);
+ nast->tun_id = htonl(str_to_u32(arg));
+ } else if (!strcasecmp(act, "output")) {
+ put_output_action(b, str_to_u32(arg));
+ } else if (!strcasecmp(act, "enqueue")) {
+ char *sp = NULL;
+ char *port = strtok_r(arg, ":q", &sp);
+ char *queue = strtok_r(NULL, "", &sp);
+ if (port == NULL || queue == NULL) {
+ ovs_fatal(0, "\"enqueue\" syntax is \"enqueue:PORT:QUEUE\"");
+ }
+ put_enqueue_action(b, str_to_u32(port), str_to_u32(queue));
+ } else if (!strcasecmp(act, "drop")) {
+ /* A drop action in OpenFlow occurs by just not setting
+ * an action. */
+ drop = true;
+ if (n_actions) {
+ ovs_fatal(0, "Drop actions must not be preceded by other "
+ "actions");
+ }
+ } else if (!strcasecmp(act, "CONTROLLER")) {
+ struct ofp_action_output *oao;
+ oao = put_output_action(b, OFPP_CONTROLLER);
+
+ /* Unless a numeric argument is specified, we send the whole
+ * packet to the controller. */
+ if (arg && (strspn(arg, "0123456789") == strlen(arg))) {
+ oao->max_len = htons(str_to_u32(arg));
+ } else {
+ oao->max_len = htons(UINT16_MAX);
+ }
+ } else if (parse_port_name(act, &port)) {
+ put_output_action(b, port);
+ } else if (strspn(act, "0123456789") == strlen(act)) {
+ put_output_action(b, str_to_u32(act));
+ } else {
+ ovs_fatal(0, "Unknown action: %s", act);
+ }
+ }
+}
+
+struct protocol {
+ const char *name;
+ uint16_t dl_type;
+ uint8_t nw_proto;
+};
+
+static bool
+parse_protocol(const char *name, const struct protocol **p_out)
+{
+ static const struct protocol protocols[] = {
+ { "ip", ETH_TYPE_IP, 0 },
+ { "arp", ETH_TYPE_ARP, 0 },
+ { "icmp", ETH_TYPE_IP, IP_TYPE_ICMP },
+ { "tcp", ETH_TYPE_IP, IP_TYPE_TCP },
+ { "udp", ETH_TYPE_IP, IP_TYPE_UDP },
+ };
+ const struct protocol *p;
+
+ for (p = protocols; p < &protocols[ARRAY_SIZE(protocols)]; p++) {
+ if (!strcmp(p->name, name)) {
+ *p_out = p;
+ return true;
+ }
+ }
+ *p_out = NULL;
+ return false;
+}
+
+struct field {
+ const char *name;
+ uint32_t wildcard;
+ enum { F_U8, F_U16, F_MAC, F_IP } type;
+ size_t offset, shift;
+};
+
+static bool
+parse_field(const char *name, const struct field **f_out)
+{
+#define F_OFS(MEMBER) offsetof(struct ofp_match, MEMBER)
+ static const struct field fields[] = {
+ { "in_port", OFPFW_IN_PORT, F_U16, F_OFS(in_port), 0 },
+ { "dl_vlan", OFPFW_DL_VLAN, F_U16, F_OFS(dl_vlan), 0 },
+ { "dl_vlan_pcp", OFPFW_DL_VLAN_PCP, F_U8, F_OFS(dl_vlan_pcp), 0 },
+ { "dl_src", OFPFW_DL_SRC, F_MAC, F_OFS(dl_src), 0 },
+ { "dl_dst", OFPFW_DL_DST, F_MAC, F_OFS(dl_dst), 0 },
+ { "dl_type", OFPFW_DL_TYPE, F_U16, F_OFS(dl_type), 0 },
+ { "nw_src", OFPFW_NW_SRC_MASK, F_IP,
+ F_OFS(nw_src), OFPFW_NW_SRC_SHIFT },
+ { "nw_dst", OFPFW_NW_DST_MASK, F_IP,
+ F_OFS(nw_dst), OFPFW_NW_DST_SHIFT },
+ { "nw_proto", OFPFW_NW_PROTO, F_U8, F_OFS(nw_proto), 0 },
+ { "nw_tos", OFPFW_NW_TOS, F_U8, F_OFS(nw_tos), 0 },
+ { "tp_src", OFPFW_TP_SRC, F_U16, F_OFS(tp_src), 0 },
+ { "tp_dst", OFPFW_TP_DST, F_U16, F_OFS(tp_dst), 0 },
+ { "icmp_type", OFPFW_ICMP_TYPE, F_U16, F_OFS(icmp_type), 0 },
+ { "icmp_code", OFPFW_ICMP_CODE, F_U16, F_OFS(icmp_code), 0 }
+ };
+ const struct field *f;
+
+ for (f = fields; f < &fields[ARRAY_SIZE(fields)]; f++) {
+ if (!strcmp(f->name, name)) {
+ *f_out = f;
+ return true;
+ }
+ }
+ *f_out = NULL;
+ return false;
+}
+
+/* Convert 'string' (as described in the Flow Syntax section of the
+ * ovs-ofctl man page) into 'match'. The other arguments are optional
+ * and may be NULL if their value is not needed. If 'actions' is
+ * specified, an action must be in 'string' and may be expanded or
+ * reallocated. */
+void
+parse_ofp_str(char *string, struct ofp_match *match, struct ofpbuf *actions,
+ uint8_t *table_idx, uint16_t *out_port, uint16_t *priority,
+ uint16_t *idle_timeout, uint16_t *hard_timeout,
+ uint64_t *cookie)
+{
+ struct ofp_match normalized;
+ char *save_ptr = NULL;
+ char *name;
+ uint32_t wildcards;
+
+ if (table_idx) {
+ *table_idx = 0xff;
+ }
+ if (out_port) {
+ *out_port = OFPP_NONE;
+ }
+ if (priority) {
+ *priority = OFP_DEFAULT_PRIORITY;
+ }
+ if (idle_timeout) {
+ *idle_timeout = DEFAULT_IDLE_TIMEOUT;
+ }
+ if (hard_timeout) {
+ *hard_timeout = OFP_FLOW_PERMANENT;
+ }
+ if (cookie) {
+ *cookie = 0;
+ }
+ if (actions) {
+ char *act_str = strstr(string, "action");
+ if (!act_str) {
+ ovs_fatal(0, "must specify an action");
+ }
+ *act_str = '\0';
+
+ act_str = strchr(act_str + 1, '=');
+ if (!act_str) {
+ ovs_fatal(0, "must specify an action");
+ }
+
+ act_str++;
+
+ str_to_action(act_str, actions);
+ }
+ memset(match, 0, sizeof *match);
+ wildcards = OFPFW_ALL;
+ for (name = strtok_r(string, "=, \t\r\n", &save_ptr); name;
+ name = strtok_r(NULL, "=, \t\r\n", &save_ptr)) {
+ const struct protocol *p;
+
+ if (parse_protocol(name, &p)) {
+ wildcards &= ~OFPFW_DL_TYPE;
+ match->dl_type = htons(p->dl_type);
+ if (p->nw_proto) {
+ wildcards &= ~OFPFW_NW_PROTO;
+ match->nw_proto = p->nw_proto;
+ }
+ } else {
+ const struct field *f;
+ char *value;
+
+ value = strtok_r(NULL, ", \t\r\n", &save_ptr);
+ if (!value) {
+ ovs_fatal(0, "field %s missing value", name);
+ }
+
+ if (table_idx && !strcmp(name, "table")) {
+ *table_idx = atoi(value);
++ if (*table_idx < 0 || *table_idx > 31) {
++ ovs_fatal(0, "table %s is invalid, "
++ "must be between 0 and 31", value);
++ }
+ } else if (out_port && !strcmp(name, "out_port")) {
+ *out_port = atoi(value);
+ } else if (priority && !strcmp(name, "priority")) {
+ *priority = atoi(value);
+ } else if (idle_timeout && !strcmp(name, "idle_timeout")) {
+ *idle_timeout = atoi(value);
+ } else if (hard_timeout && !strcmp(name, "hard_timeout")) {
+ *hard_timeout = atoi(value);
+ } else if (cookie && !strcmp(name, "cookie")) {
+ *cookie = str_to_u64(value);
+ } else if (!strcmp(name, "tun_id_wild")) {
+ wildcards |= NXFW_TUN_ID;
+ } else if (parse_field(name, &f)) {
+ void *data = (char *) match + f->offset;
+ if (!strcmp(value, "*") || !strcmp(value, "ANY")) {
+ wildcards |= f->wildcard;
+ } else {
+ wildcards &= ~f->wildcard;
+ if (f->wildcard == OFPFW_IN_PORT
+ && parse_port_name(value, (uint16_t *) data)) {
+ /* Nothing to do. */
+ } else if (f->type == F_U8) {
+ *(uint8_t *) data = str_to_u32(value);
+ } else if (f->type == F_U16) {
+ *(uint16_t *) data = htons(str_to_u32(value));
+ } else if (f->type == F_MAC) {
+ str_to_mac(value, data);
+ } else if (f->type == F_IP) {
+ wildcards |= str_to_ip(value, data) << f->shift;
+ } else {
+ NOT_REACHED();
+ }
+ }
+ } else {
+ ovs_fatal(0, "unknown keyword %s", name);
+ }
+ }
+ }
+ match->wildcards = htonl(wildcards);
+
+ normalized = *match;
+ normalize_match(&normalized);
+ if (memcmp(match, &normalized, sizeof normalized)) {
+ char *old = ofp_match_to_literal_string(match);
+ char *new = ofp_match_to_literal_string(&normalized);
+ VLOG_WARN("The specified flow is not in normal form:");
+ VLOG_WARN(" as specified: %s", old);
+ VLOG_WARN("as normalized: %s", new);
+ free(old);
+ free(new);
+ }
+}
VLOG_MODULE(netflow)
VLOG_MODULE(netlink)
VLOG_MODULE(ofctl)
+VLOG_MODULE(ofp_parse)
VLOG_MODULE(ofp_util)
- VLOG_MODULE(ovs_discover)
VLOG_MODULE(ofproto)
VLOG_MODULE(openflowd)
+ VLOG_MODULE(ovs_discover)
VLOG_MODULE(ovsdb_client)
VLOG_MODULE(ovsdb_error)
VLOG_MODULE(ovsdb_file)
--- /dev/null
- uint8_t ip_tos; /* IP TOS value. */
+ /*
+ * Copyright (c) 2009, 2010 Nicira Networks.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ #include <config.h>
+ #include "xfif.h"
+
+ #include <assert.h>
+ #include <ctype.h>
+ #include <errno.h>
+ #include <fcntl.h>
+ #include <inttypes.h>
+ #include <netinet/in.h>
+ #include <sys/socket.h>
+ #include <net/if.h>
+ #include <stdlib.h>
+ #include <string.h>
+ #include <sys/ioctl.h>
+ #include <sys/stat.h>
+ #include <unistd.h>
+
+ #include "csum.h"
+ #include "flow.h"
+ #include "hmap.h"
+ #include "list.h"
+ #include "netdev.h"
+ #include "xflow-util.h"
+ #include "ofp-print.h"
+ #include "ofpbuf.h"
+ #include "packets.h"
+ #include "poll-loop.h"
+ #include "queue.h"
+ #include "timeval.h"
+ #include "util.h"
+ #include "vlog.h"
+ #include "xfif-provider.h"
+
+ VLOG_DEFINE_THIS_MODULE(xfif_netdev)
+
+ /* Configuration parameters. */
+ enum { N_QUEUES = 2 }; /* Number of queues for xfif_recv(). */
+ enum { MAX_QUEUE_LEN = 100 }; /* Maximum number of packets per queue. */
+ enum { N_GROUPS = 16 }; /* Number of port groups. */
+ enum { MAX_PORTS = 256 }; /* Maximum number of ports. */
+ enum { MAX_FLOWS = 65536 }; /* Maximum number of flows in flow table. */
+
+ /* Enough headroom to add a vlan tag, plus an extra 2 bytes to allow IP
+ * headers to be aligned on a 4-byte boundary. */
+ enum { XF_NETDEV_HEADROOM = 2 + VLAN_HEADER_LEN };
+
+ /* Datapath based on the network device interface from netdev.h. */
+ struct xf_netdev {
+ struct list node;
+ int xf_idx;
+ int open_cnt;
+ bool destroyed;
+
+ bool drop_frags; /* Drop all IP fragments, if true. */
+ struct ovs_queue queues[N_QUEUES]; /* Messages queued for xfif_recv(). */
+ struct hmap flow_table; /* Flow table. */
+ struct xflow_port_group groups[N_GROUPS];
+
+ /* Statistics. */
+ long long int n_frags; /* Number of dropped IP fragments. */
+ long long int n_hit; /* Number of flow table matches. */
+ long long int n_missed; /* Number of flow table misses. */
+ long long int n_lost; /* Number of misses not passed to client. */
+
+ /* Ports. */
+ int n_ports;
+ struct xf_netdev_port *ports[MAX_PORTS];
+ struct list port_list;
+ unsigned int serial;
+ };
+
+ /* A port in a netdev-based datapath. */
+ struct xf_netdev_port {
+ int port_no; /* Index into xf_netdev's 'ports'. */
+ struct list node; /* Element in xf_netdev's 'port_list'. */
+ struct netdev *netdev;
+ bool internal; /* Internal port (as XFLOW_PORT_INTERNAL)? */
+ };
+
+ /* A flow in xf_netdev's 'flow_table'. */
+ struct xf_netdev_flow {
+ struct hmap_node node; /* Element in xf_netdev's 'flow_table'. */
+ struct xflow_key key;
+
+ /* Statistics. */
+ struct timespec used; /* Last used time. */
+ long long int packet_count; /* Number of packets matched. */
+ long long int byte_count; /* Number of bytes matched. */
- xflow_flow->stats.ip_tos = flow->ip_tos;
+ uint16_t tcp_ctl; /* Bitwise-OR of seen tcp_ctl values. */
+
+ /* Actions. */
+ union xflow_action *actions;
+ unsigned int n_actions;
+ };
+
+ /* Interface to netdev-based datapath. */
+ struct xfif_netdev {
+ struct xfif xfif;
+ struct xf_netdev *xf;
+ int listen_mask;
+ unsigned int xf_serial;
+ };
+
+ /* All netdev-based datapaths. */
+ static struct xf_netdev *xf_netdevs[256];
+ struct list xf_netdev_list = LIST_INITIALIZER(&xf_netdev_list);
+ enum { N_XF_NETDEVS = ARRAY_SIZE(xf_netdevs) };
+
+ /* Maximum port MTU seen so far. */
+ static int max_mtu = ETH_PAYLOAD_MAX;
+
+ static int get_port_by_number(struct xf_netdev *, uint16_t port_no,
+ struct xf_netdev_port **portp);
+ static int get_port_by_name(struct xf_netdev *, const char *devname,
+ struct xf_netdev_port **portp);
+ static void xf_netdev_free(struct xf_netdev *);
+ static void xf_netdev_flow_flush(struct xf_netdev *);
+ static int do_add_port(struct xf_netdev *, const char *devname, uint16_t flags,
+ uint16_t port_no);
+ static int do_del_port(struct xf_netdev *, uint16_t port_no);
+ static int xf_netdev_output_control(struct xf_netdev *, const struct ofpbuf *,
+ int queue_no, int port_no, uint32_t arg);
+ static int xf_netdev_execute_actions(struct xf_netdev *,
+ struct ofpbuf *, struct xflow_key *,
+ const union xflow_action *, int n);
+
+ static struct xfif_netdev *
+ xfif_netdev_cast(const struct xfif *xfif)
+ {
+ xfif_assert_class(xfif, &xfif_netdev_class);
+ return CONTAINER_OF(xfif, struct xfif_netdev, xfif);
+ }
+
+ static struct xf_netdev *
+ get_xf_netdev(const struct xfif *xfif)
+ {
+ return xfif_netdev_cast(xfif)->xf;
+ }
+
+ static int
+ name_to_xf_idx(const char *name)
+ {
+ if (!strncmp(name, "xf", 2) && isdigit((unsigned char)name[2])) {
+ int xf_idx = atoi(name + 2);
+ if (xf_idx >= 0 && xf_idx < N_XF_NETDEVS) {
+ return xf_idx;
+ }
+ }
+ return -1;
+ }
+
+ static struct xf_netdev *
+ find_xf_netdev(const char *name)
+ {
+ int xf_idx;
+ size_t i;
+
+ xf_idx = name_to_xf_idx(name);
+ if (xf_idx >= 0) {
+ return xf_netdevs[xf_idx];
+ }
+
+ for (i = 0; i < N_XF_NETDEVS; i++) {
+ struct xf_netdev *xf = xf_netdevs[i];
+ if (xf) {
+ struct xf_netdev_port *port;
+ if (!get_port_by_name(xf, name, &port)) {
+ return xf;
+ }
+ }
+ }
+ return NULL;
+ }
+
+ static struct xfif *
+ create_xfif_netdev(struct xf_netdev *xf)
+ {
+ struct xfif_netdev *xfif;
+ char *xfname;
+
+ xf->open_cnt++;
+
+ xfname = xasprintf("xf%d", xf->xf_idx);
+ xfif = xmalloc(sizeof *xfif);
+ xfif_init(&xfif->xfif, &xfif_netdev_class, xfname, xf->xf_idx, xf->xf_idx);
+ xfif->xf = xf;
+ xfif->listen_mask = 0;
+ xfif->xf_serial = xf->serial;
+ free(xfname);
+
+ return &xfif->xfif;
+ }
+
+ static int
+ create_xf_netdev(const char *name, int xf_idx, struct xfif **xfifp)
+ {
+ struct xf_netdev *xf;
+ int error;
+ int i;
+
+ if (xf_netdevs[xf_idx]) {
+ return EBUSY;
+ }
+
+ /* Create datapath. */
+ xf_netdevs[xf_idx] = xf = xzalloc(sizeof *xf);
+ list_push_back(&xf_netdev_list, &xf->node);
+ xf->xf_idx = xf_idx;
+ xf->open_cnt = 0;
+ xf->drop_frags = false;
+ for (i = 0; i < N_QUEUES; i++) {
+ queue_init(&xf->queues[i]);
+ }
+ hmap_init(&xf->flow_table);
+ for (i = 0; i < N_GROUPS; i++) {
+ xf->groups[i].ports = NULL;
+ xf->groups[i].n_ports = 0;
+ xf->groups[i].group = i;
+ }
+ list_init(&xf->port_list);
+ error = do_add_port(xf, name, XFLOW_PORT_INTERNAL, XFLOWP_LOCAL);
+ if (error) {
+ xf_netdev_free(xf);
+ return ENODEV;
+ }
+
+ *xfifp = create_xfif_netdev(xf);
+ return 0;
+ }
+
+ static int
+ xfif_netdev_open(const char *name, const char *type OVS_UNUSED, bool create,
+ struct xfif **xfifp)
+ {
+ if (create) {
+ if (find_xf_netdev(name)) {
+ return EEXIST;
+ } else {
+ int xf_idx = name_to_xf_idx(name);
+ if (xf_idx >= 0) {
+ return create_xf_netdev(name, xf_idx, xfifp);
+ } else {
+ /* Scan for unused xf_idx number. */
+ for (xf_idx = 0; xf_idx < N_XF_NETDEVS; xf_idx++) {
+ int error = create_xf_netdev(name, xf_idx, xfifp);
+ if (error != EBUSY) {
+ return error;
+ }
+ }
+
+ /* All datapath numbers in use. */
+ return ENOBUFS;
+ }
+ }
+ } else {
+ struct xf_netdev *xf = find_xf_netdev(name);
+ if (xf) {
+ *xfifp = create_xfif_netdev(xf);
+ return 0;
+ } else {
+ return ENODEV;
+ }
+ }
+ }
+
+ static void
+ xf_netdev_free(struct xf_netdev *xf)
+ {
+ int i;
+
+ xf_netdev_flow_flush(xf);
+ while (xf->n_ports > 0) {
+ struct xf_netdev_port *port = CONTAINER_OF(
+ xf->port_list.next, struct xf_netdev_port, node);
+ do_del_port(xf, port->port_no);
+ }
+ for (i = 0; i < N_QUEUES; i++) {
+ queue_destroy(&xf->queues[i]);
+ }
+ hmap_destroy(&xf->flow_table);
+ for (i = 0; i < N_GROUPS; i++) {
+ free(xf->groups[i].ports);
+ }
+ xf_netdevs[xf->xf_idx] = NULL;
+ list_remove(&xf->node);
+ free(xf);
+ }
+
+ static void
+ xfif_netdev_close(struct xfif *xfif)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ assert(xf->open_cnt > 0);
+ if (--xf->open_cnt == 0 && xf->destroyed) {
+ xf_netdev_free(xf);
+ }
+ free(xfif);
+ }
+
+ static int
+ xfif_netdev_destroy(struct xfif *xfif)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ xf->destroyed = true;
+ return 0;
+ }
+
+ static int
+ xfif_netdev_get_stats(const struct xfif *xfif, struct xflow_stats *stats)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ memset(stats, 0, sizeof *stats);
+ stats->n_flows = hmap_count(&xf->flow_table);
+ stats->cur_capacity = hmap_capacity(&xf->flow_table);
+ stats->max_capacity = MAX_FLOWS;
+ stats->n_ports = xf->n_ports;
+ stats->max_ports = MAX_PORTS;
+ stats->max_groups = N_GROUPS;
+ stats->n_frags = xf->n_frags;
+ stats->n_hit = xf->n_hit;
+ stats->n_missed = xf->n_missed;
+ stats->n_lost = xf->n_lost;
+ stats->max_miss_queue = MAX_QUEUE_LEN;
+ stats->max_action_queue = MAX_QUEUE_LEN;
+ return 0;
+ }
+
+ static int
+ xfif_netdev_get_drop_frags(const struct xfif *xfif, bool *drop_fragsp)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ *drop_fragsp = xf->drop_frags;
+ return 0;
+ }
+
+ static int
+ xfif_netdev_set_drop_frags(struct xfif *xfif, bool drop_frags)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ xf->drop_frags = drop_frags;
+ return 0;
+ }
+
+ static int
+ do_add_port(struct xf_netdev *xf, const char *devname, uint16_t flags,
+ uint16_t port_no)
+ {
+ bool internal = (flags & XFLOW_PORT_INTERNAL) != 0;
+ struct xf_netdev_port *port;
+ struct netdev_options netdev_options;
+ struct netdev *netdev;
+ int mtu;
+ int error;
+
+ /* XXX reject devices already in some xf_netdev. */
+
+ /* Open and validate network device. */
+ memset(&netdev_options, 0, sizeof netdev_options);
+ netdev_options.name = devname;
+ netdev_options.ethertype = NETDEV_ETH_TYPE_ANY;
+ if (internal) {
+ netdev_options.type = "tap";
+ }
+
+ error = netdev_open(&netdev_options, &netdev);
+ if (error) {
+ return error;
+ }
+ /* XXX reject loopback devices */
+ /* XXX reject non-Ethernet devices */
+
+ error = netdev_turn_flags_on(netdev, NETDEV_PROMISC, false);
+ if (error) {
+ netdev_close(netdev);
+ return error;
+ }
+
+ port = xmalloc(sizeof *port);
+ port->port_no = port_no;
+ port->netdev = netdev;
+ port->internal = internal;
+
+ netdev_get_mtu(netdev, &mtu);
+ if (mtu > max_mtu) {
+ max_mtu = mtu;
+ }
+
+ list_push_back(&xf->port_list, &port->node);
+ xf->ports[port_no] = port;
+ xf->n_ports++;
+ xf->serial++;
+
+ return 0;
+ }
+
+ static int
+ xfif_netdev_port_add(struct xfif *xfif, const char *devname, uint16_t flags,
+ uint16_t *port_nop)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ int port_no;
+
+ for (port_no = 0; port_no < MAX_PORTS; port_no++) {
+ if (!xf->ports[port_no]) {
+ *port_nop = port_no;
+ return do_add_port(xf, devname, flags, port_no);
+ }
+ }
+ return EFBIG;
+ }
+
+ static int
+ xfif_netdev_port_del(struct xfif *xfif, uint16_t port_no)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ return port_no == XFLOWP_LOCAL ? EINVAL : do_del_port(xf, port_no);
+ }
+
+ static bool
+ is_valid_port_number(uint16_t port_no)
+ {
+ return port_no < MAX_PORTS;
+ }
+
+ static int
+ get_port_by_number(struct xf_netdev *xf,
+ uint16_t port_no, struct xf_netdev_port **portp)
+ {
+ if (!is_valid_port_number(port_no)) {
+ *portp = NULL;
+ return EINVAL;
+ } else {
+ *portp = xf->ports[port_no];
+ return *portp ? 0 : ENOENT;
+ }
+ }
+
+ static int
+ get_port_by_name(struct xf_netdev *xf,
+ const char *devname, struct xf_netdev_port **portp)
+ {
+ struct xf_netdev_port *port;
+
+ LIST_FOR_EACH (port, struct xf_netdev_port, node, &xf->port_list) {
+ if (!strcmp(netdev_get_name(port->netdev), devname)) {
+ *portp = port;
+ return 0;
+ }
+ }
+ return ENOENT;
+ }
+
+ static int
+ do_del_port(struct xf_netdev *xf, uint16_t port_no)
+ {
+ struct xf_netdev_port *port;
+ char *name;
+ int error;
+
+ error = get_port_by_number(xf, port_no, &port);
+ if (error) {
+ return error;
+ }
+
+ list_remove(&port->node);
+ xf->ports[port->port_no] = NULL;
+ xf->n_ports--;
+ xf->serial++;
+
+ name = xstrdup(netdev_get_name(port->netdev));
+ netdev_close(port->netdev);
+
+ free(name);
+ free(port);
+
+ return 0;
+ }
+
+ static void
+ answer_port_query(const struct xf_netdev_port *port, struct xflow_port *xflow_port)
+ {
+ memset(xflow_port, 0, sizeof *xflow_port);
+ ovs_strlcpy(xflow_port->devname, netdev_get_name(port->netdev),
+ sizeof xflow_port->devname);
+ xflow_port->port = port->port_no;
+ xflow_port->flags = port->internal ? XFLOW_PORT_INTERNAL : 0;
+ }
+
+ static int
+ xfif_netdev_port_query_by_number(const struct xfif *xfif, uint16_t port_no,
+ struct xflow_port *xflow_port)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ struct xf_netdev_port *port;
+ int error;
+
+ error = get_port_by_number(xf, port_no, &port);
+ if (!error) {
+ answer_port_query(port, xflow_port);
+ }
+ return error;
+ }
+
+ static int
+ xfif_netdev_port_query_by_name(const struct xfif *xfif, const char *devname,
+ struct xflow_port *xflow_port)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ struct xf_netdev_port *port;
+ int error;
+
+ error = get_port_by_name(xf, devname, &port);
+ if (!error) {
+ answer_port_query(port, xflow_port);
+ }
+ return error;
+ }
+
+ static void
+ xf_netdev_free_flow(struct xf_netdev *xf, struct xf_netdev_flow *flow)
+ {
+ hmap_remove(&xf->flow_table, &flow->node);
+ free(flow->actions);
+ free(flow);
+ }
+
+ static void
+ xf_netdev_flow_flush(struct xf_netdev *xf)
+ {
+ struct xf_netdev_flow *flow, *next;
+
+ HMAP_FOR_EACH_SAFE (flow, next, struct xf_netdev_flow, node,
+ &xf->flow_table) {
+ xf_netdev_free_flow(xf, flow);
+ }
+ }
+
+ static int
+ xfif_netdev_flow_flush(struct xfif *xfif)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ xf_netdev_flow_flush(xf);
+ return 0;
+ }
+
+ static int
+ xfif_netdev_port_list(const struct xfif *xfif, struct xflow_port *ports, int n)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ struct xf_netdev_port *port;
+ int i;
+
+ i = 0;
+ LIST_FOR_EACH (port, struct xf_netdev_port, node, &xf->port_list) {
+ struct xflow_port *xflow_port = &ports[i];
+ if (i >= n) {
+ break;
+ }
+ answer_port_query(port, xflow_port);
+ i++;
+ }
+ return xf->n_ports;
+ }
+
+ static int
+ xfif_netdev_port_poll(const struct xfif *xfif_, char **devnamep OVS_UNUSED)
+ {
+ struct xfif_netdev *xfif = xfif_netdev_cast(xfif_);
+ if (xfif->xf_serial != xfif->xf->serial) {
+ xfif->xf_serial = xfif->xf->serial;
+ return ENOBUFS;
+ } else {
+ return EAGAIN;
+ }
+ }
+
+ static void
+ xfif_netdev_port_poll_wait(const struct xfif *xfif_)
+ {
+ struct xfif_netdev *xfif = xfif_netdev_cast(xfif_);
+ if (xfif->xf_serial != xfif->xf->serial) {
+ poll_immediate_wake();
+ }
+ }
+
+ static int
+ get_port_group(const struct xfif *xfif, int group_no,
+ struct xflow_port_group **groupp)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+
+ if (group_no >= 0 && group_no < N_GROUPS) {
+ *groupp = &xf->groups[group_no];
+ return 0;
+ } else {
+ *groupp = NULL;
+ return EINVAL;
+ }
+ }
+
+ static int
+ xfif_netdev_port_group_get(const struct xfif *xfif, int group_no,
+ uint16_t ports[], int n)
+ {
+ struct xflow_port_group *group;
+ int error;
+
+ if (n < 0) {
+ return -EINVAL;
+ }
+
+ error = get_port_group(xfif, group_no, &group);
+ if (!error) {
+ memcpy(ports, group->ports, MIN(n, group->n_ports) * sizeof *ports);
+ return group->n_ports;
+ } else {
+ return -error;
+ }
+ }
+
+ static int
+ xfif_netdev_port_group_set(struct xfif *xfif, int group_no,
+ const uint16_t ports[], int n)
+ {
+ struct xflow_port_group *group;
+ int error;
+
+ if (n < 0 || n > MAX_PORTS) {
+ return EINVAL;
+ }
+
+ error = get_port_group(xfif, group_no, &group);
+ if (!error) {
+ free(group->ports);
+ group->ports = xmemdup(ports, n * sizeof *group->ports);
+ group->n_ports = n;
+ group->group = group_no;
+ }
+ return error;
+ }
+
+ static struct xf_netdev_flow *
+ xf_netdev_lookup_flow(const struct xf_netdev *xf,
+ const struct xflow_key *key)
+ {
+ struct xf_netdev_flow *flow;
+
+ HMAP_FOR_EACH_WITH_HASH (flow, struct xf_netdev_flow, node,
+ xflow_key_hash(key, 0), &xf->flow_table) {
+ if (xflow_key_equal(&flow->key, key)) {
+ return flow;
+ }
+ }
+ return NULL;
+ }
+
+ static void
+ answer_flow_query(struct xf_netdev_flow *flow, uint32_t query_flags,
+ struct xflow_flow *xflow_flow)
+ {
+ if (flow) {
+ xflow_flow->key = flow->key;
+ xflow_flow->stats.n_packets = flow->packet_count;
+ xflow_flow->stats.n_bytes = flow->byte_count;
+ xflow_flow->stats.used_sec = flow->used.tv_sec;
+ xflow_flow->stats.used_nsec = flow->used.tv_nsec;
+ xflow_flow->stats.tcp_flags = TCP_FLAGS(flow->tcp_ctl);
- flow->ip_tos = 0;
++ xflow_flow->stats.reserved = 0;
+ xflow_flow->stats.error = 0;
+ if (xflow_flow->n_actions > 0) {
+ unsigned int n = MIN(xflow_flow->n_actions, flow->n_actions);
+ memcpy(xflow_flow->actions, flow->actions,
+ n * sizeof *xflow_flow->actions);
+ xflow_flow->n_actions = flow->n_actions;
+ }
+
+ if (query_flags & XFLOWFF_ZERO_TCP_FLAGS) {
+ flow->tcp_ctl = 0;
+ }
+
+ } else {
+ xflow_flow->stats.error = ENOENT;
+ }
+ }
+
+ static int
+ xfif_netdev_flow_get(const struct xfif *xfif, struct xflow_flow flows[], int n)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ int i;
+
+ for (i = 0; i < n; i++) {
+ struct xflow_flow *xflow_flow = &flows[i];
+ answer_flow_query(xf_netdev_lookup_flow(xf, &xflow_flow->key),
+ xflow_flow->flags, xflow_flow);
+ }
+ return 0;
+ }
+
+ static int
+ xfif_netdev_validate_actions(const union xflow_action *actions, int n_actions,
+ bool *mutates)
+ {
+ unsigned int i;
+
+ *mutates = false;
+ for (i = 0; i < n_actions; i++) {
+ const union xflow_action *a = &actions[i];
+ switch (a->type) {
+ case XFLOWAT_OUTPUT:
+ if (a->output.port >= MAX_PORTS) {
+ return EINVAL;
+ }
+ break;
+
+ case XFLOWAT_OUTPUT_GROUP:
+ *mutates = true;
+ if (a->output_group.group >= N_GROUPS) {
+ return EINVAL;
+ }
+ break;
+
+ case XFLOWAT_CONTROLLER:
+ break;
+
+ case XFLOWAT_SET_DL_TCI:
+ *mutates = true;
+ if (a->dl_tci.mask != htons(VLAN_VID_MASK)
+ && a->dl_tci.mask != htons(VLAN_PCP_MASK)
+ && a->dl_tci.mask != htons(VLAN_VID_MASK | VLAN_PCP_MASK)) {
+ return EINVAL;
+ }
+ if (a->dl_tci.tci & ~a->dl_tci.mask){
+ return EINVAL;
+ }
+ break;
+
+ case XFLOWAT_SET_NW_TOS:
+ *mutates = true;
+ if (a->nw_tos.nw_tos & IP_ECN_MASK) {
+ return EINVAL;
+ }
+ break;
+
+ case XFLOWAT_STRIP_VLAN:
+ case XFLOWAT_SET_DL_SRC:
+ case XFLOWAT_SET_DL_DST:
+ case XFLOWAT_SET_NW_SRC:
+ case XFLOWAT_SET_NW_DST:
+ case XFLOWAT_SET_TP_SRC:
+ case XFLOWAT_SET_TP_DST:
+ *mutates = true;
+ break;
+
+ default:
+ return EOPNOTSUPP;
+ }
+ }
+ return 0;
+ }
+
+ static int
+ set_flow_actions(struct xf_netdev_flow *flow, struct xflow_flow *xflow_flow)
+ {
+ size_t n_bytes;
+ bool mutates;
+ int error;
+
+ if (xflow_flow->n_actions >= 4096 / sizeof *xflow_flow->actions) {
+ return EINVAL;
+ }
+ error = xfif_netdev_validate_actions(xflow_flow->actions,
+ xflow_flow->n_actions, &mutates);
+ if (error) {
+ return error;
+ }
+
+ n_bytes = xflow_flow->n_actions * sizeof *flow->actions;
+ flow->actions = xrealloc(flow->actions, n_bytes);
+ flow->n_actions = xflow_flow->n_actions;
+ memcpy(flow->actions, xflow_flow->actions, n_bytes);
+ return 0;
+ }
+
+ static int
+ add_flow(struct xfif *xfif, struct xflow_flow *xflow_flow)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ struct xf_netdev_flow *flow;
+ int error;
+
+ flow = xzalloc(sizeof *flow);
+ flow->key = xflow_flow->key;
+
+ error = set_flow_actions(flow, xflow_flow);
+ if (error) {
+ free(flow);
+ return error;
+ }
+
+ hmap_insert(&xf->flow_table, &flow->node,
+ xflow_key_hash(&flow->key, 0));
+ return 0;
+ }
+
+ static void
+ clear_stats(struct xf_netdev_flow *flow)
+ {
+ flow->used.tv_sec = 0;
+ flow->used.tv_nsec = 0;
+ flow->packet_count = 0;
+ flow->byte_count = 0;
- if (key->dl_type == htons(ETH_TYPE_IP)) {
- struct ip_header *nh = packet->l3;
- flow->ip_tos = nh->ip_tos;
-
- if (key->nw_proto == IPPROTO_TCP) {
- struct tcp_header *th = packet->l4;
- flow->tcp_ctl |= th->tcp_ctl;
- }
+ flow->tcp_ctl = 0;
+ }
+
+ static int
+ xfif_netdev_flow_put(struct xfif *xfif, struct xflow_flow_put *put)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ struct xf_netdev_flow *flow;
+
+ flow = xf_netdev_lookup_flow(xf, &put->flow.key);
+ if (!flow) {
+ if (put->flags & XFLOWPF_CREATE) {
+ if (hmap_count(&xf->flow_table) < MAX_FLOWS) {
+ return add_flow(xfif, &put->flow);
+ } else {
+ return EFBIG;
+ }
+ } else {
+ return ENOENT;
+ }
+ } else {
+ if (put->flags & XFLOWPF_MODIFY) {
+ int error = set_flow_actions(flow, &put->flow);
+ if (!error && put->flags & XFLOWPF_ZERO_STATS) {
+ clear_stats(flow);
+ }
+ return error;
+ } else {
+ return EEXIST;
+ }
+ }
+ }
+
+
+ static int
+ xfif_netdev_flow_del(struct xfif *xfif, struct xflow_flow *xflow_flow)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ struct xf_netdev_flow *flow;
+
+ flow = xf_netdev_lookup_flow(xf, &xflow_flow->key);
+ if (flow) {
+ answer_flow_query(flow, 0, xflow_flow);
+ xf_netdev_free_flow(xf, flow);
+ return 0;
+ } else {
+ return ENOENT;
+ }
+ }
+
+ static int
+ xfif_netdev_flow_list(const struct xfif *xfif, struct xflow_flow flows[], int n)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ struct xf_netdev_flow *flow;
+ int i;
+
+ i = 0;
+ HMAP_FOR_EACH (flow, struct xf_netdev_flow, node, &xf->flow_table) {
+ if (i >= n) {
+ break;
+ }
+ answer_flow_query(flow, 0, &flows[i++]);
+ }
+ return hmap_count(&xf->flow_table);
+ }
+
+ static int
+ xfif_netdev_execute(struct xfif *xfif, uint16_t in_port,
+ const union xflow_action actions[], int n_actions,
+ const struct ofpbuf *packet)
+ {
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ struct ofpbuf copy;
+ bool mutates;
+ struct xflow_key key;
+ flow_t flow;
+ int error;
+
+ if (packet->size < ETH_HEADER_LEN || packet->size > UINT16_MAX) {
+ return EINVAL;
+ }
+
+ error = xfif_netdev_validate_actions(actions, n_actions, &mutates);
+ if (error) {
+ return error;
+ }
+
+ if (mutates) {
+ /* We need a deep copy of 'packet' since we're going to modify its
+ * data. */
+ ofpbuf_init(©, XF_NETDEV_HEADROOM + packet->size);
+ copy.data = (char*)copy.base + XF_NETDEV_HEADROOM;
+ ofpbuf_put(©, packet->data, packet->size);
+ } else {
+ /* We still need a shallow copy of 'packet', even though we won't
+ * modify its data, because flow_extract() modifies packet->l2, etc.
+ * We could probably get away with modifying those but it's more polite
+ * if we don't. */
+ copy = *packet;
+ }
+ flow_extract(©, 0, in_port, &flow);
+ xflow_key_from_flow(&key, &flow);
+ error = xf_netdev_execute_actions(xf, ©, &key, actions, n_actions);
+ if (mutates) {
+ ofpbuf_uninit(©);
+ }
+ return error;
+ }
+
+ static int
+ xfif_netdev_recv_get_mask(const struct xfif *xfif, int *listen_mask)
+ {
+ struct xfif_netdev *xfif_netdev = xfif_netdev_cast(xfif);
+ *listen_mask = xfif_netdev->listen_mask;
+ return 0;
+ }
+
+ static int
+ xfif_netdev_recv_set_mask(struct xfif *xfif, int listen_mask)
+ {
+ struct xfif_netdev *xfif_netdev = xfif_netdev_cast(xfif);
+ if (!(listen_mask & ~XFLOWL_ALL)) {
+ xfif_netdev->listen_mask = listen_mask;
+ return 0;
+ } else {
+ return EINVAL;
+ }
+ }
+
+ static struct ovs_queue *
+ find_nonempty_queue(struct xfif *xfif)
+ {
+ struct xfif_netdev *xfif_netdev = xfif_netdev_cast(xfif);
+ struct xf_netdev *xf = get_xf_netdev(xfif);
+ int mask = xfif_netdev->listen_mask;
+ int i;
+
+ for (i = 0; i < N_QUEUES; i++) {
+ struct ovs_queue *q = &xf->queues[i];
+ if (q->n && mask & (1u << i)) {
+ return q;
+ }
+ }
+ return NULL;
+ }
+
+ static int
+ xfif_netdev_recv(struct xfif *xfif, struct ofpbuf **bufp)
+ {
+ struct ovs_queue *q = find_nonempty_queue(xfif);
+ if (q) {
+ *bufp = queue_pop_head(q);
+ return 0;
+ } else {
+ return EAGAIN;
+ }
+ }
+
+ static void
+ xfif_netdev_recv_wait(struct xfif *xfif)
+ {
+ struct ovs_queue *q = find_nonempty_queue(xfif);
+ if (q) {
+ poll_immediate_wake();
+ } else {
+ /* No messages ready to be received, and xf_wait() will ensure that we
+ * wake up to queue new messages, so there is nothing to do. */
+ }
+ }
+ \f
+ static void
+ xf_netdev_flow_used(struct xf_netdev_flow *flow,
+ const struct xflow_key *key,
+ const struct ofpbuf *packet)
+ {
+ time_timespec(&flow->used);
+ flow->packet_count++;
+ flow->byte_count += packet->size;
++ if (key->dl_type == htons(ETH_TYPE_IP) && key->nw_proto == IPPROTO_TCP) {
++ struct tcp_header *th = packet->l4;
++ flow->tcp_ctl |= th->tcp_ctl;
+ }
+ }
+
+ static void
+ xf_netdev_port_input(struct xf_netdev *xf, struct xf_netdev_port *port,
+ struct ofpbuf *packet)
+ {
+ struct xf_netdev_flow *flow;
+ struct xflow_key key;
+ flow_t f;
+
+ if (flow_extract(packet, 0, port->port_no, &f) && xf->drop_frags) {
+ xf->n_frags++;
+ return;
+ }
+ xflow_key_from_flow(&key, &f);
+
+ flow = xf_netdev_lookup_flow(xf, &key);
+ if (flow) {
+ xf_netdev_flow_used(flow, &key, packet);
+ xf_netdev_execute_actions(xf, packet, &key,
+ flow->actions, flow->n_actions);
+ xf->n_hit++;
+ } else {
+ xf->n_missed++;
+ xf_netdev_output_control(xf, packet, _XFLOWL_MISS_NR, port->port_no, 0);
+ }
+ }
+
+ static void
+ xf_netdev_run(void)
+ {
+ struct ofpbuf packet;
+ struct xf_netdev *xf;
+
+ ofpbuf_init(&packet, XF_NETDEV_HEADROOM + max_mtu);
+ LIST_FOR_EACH (xf, struct xf_netdev, node, &xf_netdev_list) {
+ struct xf_netdev_port *port;
+
+ LIST_FOR_EACH (port, struct xf_netdev_port, node, &xf->port_list) {
+ int error;
+
+ /* Reset packet contents. */
+ packet.data = (char*)packet.base + XF_NETDEV_HEADROOM;
+ packet.size = 0;
+
+ error = netdev_recv(port->netdev, &packet);
+ if (!error) {
+ xf_netdev_port_input(xf, port, &packet);
+ } else if (error != EAGAIN) {
+ struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
+ netdev_get_name(port->netdev), strerror(error));
+ }
+ }
+ }
+ ofpbuf_uninit(&packet);
+ }
+
+ static void
+ xf_netdev_wait(void)
+ {
+ struct xf_netdev *xf;
+
+ LIST_FOR_EACH (xf, struct xf_netdev, node, &xf_netdev_list) {
+ struct xf_netdev_port *port;
+ LIST_FOR_EACH (port, struct xf_netdev_port, node, &xf->port_list) {
+ netdev_recv_wait(port->netdev);
+ }
+ }
+ }
+
+
+ /* Modify or add a 802.1Q header in 'packet' according to 'a'. */
+ static void
+ xf_netdev_set_dl_tci(struct ofpbuf *packet, struct xflow_key *key,
+ const struct xflow_action_dl_tci *a)
+ {
+ struct vlan_eth_header *veh;
+
+ if (key->dl_tci) {
+ veh = packet->l2;
+ veh->veth_tci = (veh->veth_tci & ~a->mask) | a->tci;
+ } else {
+ /* Insert new 802.1Q header. */
+ struct eth_header *eh = packet->l2;
+ struct vlan_eth_header tmp;
+ memcpy(tmp.veth_dst, eh->eth_dst, ETH_ADDR_LEN);
+ memcpy(tmp.veth_src, eh->eth_src, ETH_ADDR_LEN);
+ tmp.veth_type = htons(ETH_TYPE_VLAN);
+ tmp.veth_tci = htons(a->tci);
+ tmp.veth_next_type = eh->eth_type;
+
+ veh = ofpbuf_push_uninit(packet, VLAN_HEADER_LEN);
+ memcpy(veh, &tmp, sizeof tmp);
+ packet->l2 = (char*)packet->l2 - VLAN_HEADER_LEN;
+ }
+
+ key->dl_tci = veh->veth_tci | htons(XFLOW_TCI_PRESENT);
+ }
+
+ static void
+ xf_netdev_strip_vlan(struct ofpbuf *packet, struct xflow_key *key)
+ {
+ struct vlan_eth_header *veh = packet->l2;
+ if (veh->veth_type == htons(ETH_TYPE_VLAN)) {
+ struct eth_header tmp;
+
+ memcpy(tmp.eth_dst, veh->veth_dst, ETH_ADDR_LEN);
+ memcpy(tmp.eth_src, veh->veth_src, ETH_ADDR_LEN);
+ tmp.eth_type = veh->veth_next_type;
+
+ packet->size -= VLAN_HEADER_LEN;
+ packet->data = (char*)packet->data + VLAN_HEADER_LEN;
+ packet->l2 = (char*)packet->l2 + VLAN_HEADER_LEN;
+ memcpy(packet->data, &tmp, sizeof tmp);
+
+ key->dl_tci = htons(0);
+ }
+ }
+
+ static void
+ xf_netdev_set_dl_src(struct ofpbuf *packet,
+ const uint8_t dl_addr[ETH_ADDR_LEN])
+ {
+ struct eth_header *eh = packet->l2;
+ memcpy(eh->eth_src, dl_addr, sizeof eh->eth_src);
+ }
+
+ static void
+ xf_netdev_set_dl_dst(struct ofpbuf *packet,
+ const uint8_t dl_addr[ETH_ADDR_LEN])
+ {
+ struct eth_header *eh = packet->l2;
+ memcpy(eh->eth_dst, dl_addr, sizeof eh->eth_dst);
+ }
+
+ static void
+ xf_netdev_set_nw_addr(struct ofpbuf *packet, const struct xflow_key *key,
+ const struct xflow_action_nw_addr *a)
+ {
+ if (key->dl_type == htons(ETH_TYPE_IP)) {
+ struct ip_header *nh = packet->l3;
+ uint32_t *field;
+
+ field = a->type == XFLOWAT_SET_NW_SRC ? &nh->ip_src : &nh->ip_dst;
+ if (key->nw_proto == IP_TYPE_TCP) {
+ struct tcp_header *th = packet->l4;
+ th->tcp_csum = recalc_csum32(th->tcp_csum, *field, a->nw_addr);
+ } else if (key->nw_proto == IP_TYPE_UDP) {
+ struct udp_header *uh = packet->l4;
+ if (uh->udp_csum) {
+ uh->udp_csum = recalc_csum32(uh->udp_csum, *field, a->nw_addr);
+ if (!uh->udp_csum) {
+ uh->udp_csum = 0xffff;
+ }
+ }
+ }
+ nh->ip_csum = recalc_csum32(nh->ip_csum, *field, a->nw_addr);
+ *field = a->nw_addr;
+ }
+ }
+
+ static void
+ xf_netdev_set_nw_tos(struct ofpbuf *packet, const struct xflow_key *key,
+ const struct xflow_action_nw_tos *a)
+ {
+ if (key->dl_type == htons(ETH_TYPE_IP)) {
+ struct ip_header *nh = packet->l3;
+ uint8_t *field = &nh->ip_tos;
+
+ /* Set the DSCP bits and preserve the ECN bits. */
+ uint8_t new = a->nw_tos | (nh->ip_tos & IP_ECN_MASK);
+
+ nh->ip_csum = recalc_csum16(nh->ip_csum, htons((uint16_t)*field),
+ htons((uint16_t)a->nw_tos));
+ *field = new;
+ }
+ }
+
+ static void
+ xf_netdev_set_tp_port(struct ofpbuf *packet, const struct xflow_key *key,
+ const struct xflow_action_tp_port *a)
+ {
+ if (key->dl_type == htons(ETH_TYPE_IP)) {
+ uint16_t *field;
+ if (key->nw_proto == IPPROTO_TCP) {
+ struct tcp_header *th = packet->l4;
+ field = a->type == XFLOWAT_SET_TP_SRC ? &th->tcp_src : &th->tcp_dst;
+ th->tcp_csum = recalc_csum16(th->tcp_csum, *field, a->tp_port);
+ *field = a->tp_port;
+ } else if (key->nw_proto == IPPROTO_UDP) {
+ struct udp_header *uh = packet->l4;
+ field = a->type == XFLOWAT_SET_TP_SRC ? &uh->udp_src : &uh->udp_dst;
+ uh->udp_csum = recalc_csum16(uh->udp_csum, *field, a->tp_port);
+ *field = a->tp_port;
+ } else {
+ return;
+ }
+ }
+ }
+
+ static void
+ xf_netdev_output_port(struct xf_netdev *xf, struct ofpbuf *packet,
+ uint16_t out_port)
+ {
+ struct xf_netdev_port *p = xf->ports[out_port];
+ if (p) {
+ netdev_send(p->netdev, packet);
+ }
+ }
+
+ static void
+ xf_netdev_output_group(struct xf_netdev *xf, uint16_t group, uint16_t in_port,
+ struct ofpbuf *packet)
+ {
+ struct xflow_port_group *g = &xf->groups[group];
+ int i;
+
+ for (i = 0; i < g->n_ports; i++) {
+ uint16_t out_port = g->ports[i];
+ if (out_port != in_port) {
+ xf_netdev_output_port(xf, packet, out_port);
+ }
+ }
+ }
+
+ static int
+ xf_netdev_output_control(struct xf_netdev *xf, const struct ofpbuf *packet,
+ int queue_no, int port_no, uint32_t arg)
+ {
+ struct ovs_queue *q = &xf->queues[queue_no];
+ struct xflow_msg *header;
+ struct ofpbuf *msg;
+ size_t msg_size;
+
+ if (q->n >= MAX_QUEUE_LEN) {
+ xf->n_lost++;
+ return ENOBUFS;
+ }
+
+ msg_size = sizeof *header + packet->size;
+ msg = ofpbuf_new(msg_size + XFIF_RECV_MSG_PADDING);
+ header = ofpbuf_put_uninit(msg, sizeof *header);
+ ofpbuf_reserve(msg, XFIF_RECV_MSG_PADDING);
+ header->type = queue_no;
+ header->length = msg_size;
+ header->port = port_no;
+ header->arg = arg;
+ ofpbuf_put(msg, packet->data, packet->size);
+ queue_push_tail(q, msg);
+
+ return 0;
+ }
+
+ static int
+ xf_netdev_execute_actions(struct xf_netdev *xf,
+ struct ofpbuf *packet, struct xflow_key *key,
+ const union xflow_action *actions, int n_actions)
+ {
+ int i;
+ for (i = 0; i < n_actions; i++) {
+ const union xflow_action *a = &actions[i];
+
+ switch (a->type) {
+ case XFLOWAT_OUTPUT:
+ xf_netdev_output_port(xf, packet, a->output.port);
+ break;
+
+ case XFLOWAT_OUTPUT_GROUP:
+ xf_netdev_output_group(xf, a->output_group.group, key->in_port,
+ packet);
+ break;
+
+ case XFLOWAT_CONTROLLER:
+ xf_netdev_output_control(xf, packet, _XFLOWL_ACTION_NR,
+ key->in_port, a->controller.arg);
+ break;
+
+ case XFLOWAT_SET_DL_TCI:
+ xf_netdev_set_dl_tci(packet, key, &a->dl_tci);
+ break;
+
+ case XFLOWAT_STRIP_VLAN:
+ xf_netdev_strip_vlan(packet, key);
+ break;
+
+ case XFLOWAT_SET_DL_SRC:
+ xf_netdev_set_dl_src(packet, a->dl_addr.dl_addr);
+ break;
+
+ case XFLOWAT_SET_DL_DST:
+ xf_netdev_set_dl_dst(packet, a->dl_addr.dl_addr);
+ break;
+
+ case XFLOWAT_SET_NW_SRC:
+ case XFLOWAT_SET_NW_DST:
+ xf_netdev_set_nw_addr(packet, key, &a->nw_addr);
+ break;
+
+ case XFLOWAT_SET_NW_TOS:
+ xf_netdev_set_nw_tos(packet, key, &a->nw_tos);
+ break;
+
+ case XFLOWAT_SET_TP_SRC:
+ case XFLOWAT_SET_TP_DST:
+ xf_netdev_set_tp_port(packet, key, &a->tp_port);
+ break;
+ }
+ }
+ return 0;
+ }
+
+ const struct xfif_class xfif_netdev_class = {
+ "netdev",
+ xf_netdev_run,
+ xf_netdev_wait,
+ NULL, /* enumerate */
+ xfif_netdev_open,
+ xfif_netdev_close,
+ NULL, /* get_all_names */
+ xfif_netdev_destroy,
+ xfif_netdev_get_stats,
+ xfif_netdev_get_drop_frags,
+ xfif_netdev_set_drop_frags,
+ xfif_netdev_port_add,
+ xfif_netdev_port_del,
+ xfif_netdev_port_query_by_number,
+ xfif_netdev_port_query_by_name,
+ xfif_netdev_port_list,
+ xfif_netdev_port_poll,
+ xfif_netdev_port_poll_wait,
+ xfif_netdev_port_group_get,
+ xfif_netdev_port_group_set,
+ xfif_netdev_flow_get,
+ xfif_netdev_flow_put,
+ xfif_netdev_flow_del,
+ xfif_netdev_flow_flush,
+ xfif_netdev_flow_list,
+ xfif_netdev_execute,
+ xfif_netdev_recv_get_mask,
+ xfif_netdev_recv_set_mask,
+ NULL, /* get_sflow_probability */
+ NULL, /* set_sflow_probability */
+ NULL, /* queue_to_priority */
+ xfif_netdev_recv,
+ xfif_netdev_recv_wait,
+ };
/* OpenFlow connections. */
struct hmap controllers; /* Controller "struct ofconn"s. */
struct list all_conns; /* Contains "struct ofconn"s. */
- struct pvconn **listeners;
- size_t n_listeners;
+ enum ofproto_fail_mode fail_mode;
+
+ /* OpenFlow listeners. */
+ struct hmap services; /* Contains "struct ofservice"s. */
struct pvconn **snoops;
size_t n_snoops;
-
- /* Hooks for ovs-vswitchd. */
- const struct ofhooks *ofhooks;
- void *aux;
-
- /* Used by default ofhooks. */
- struct mac_learning *ml;
};
static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
for (i = 0; i < n_controllers; i++) {
const struct ofproto_controller *c = &controllers[i];
- shash_add_once(&new_controllers, c->target, &controllers[i]);
- if (!find_controller_by_target(p, c->target)) {
- add_controller(p, c);
+ if (!vconn_verify_name(c->target) || !strcmp(c->target, "discover")) {
+ if (!find_controller_by_target(p, c->target)) {
+ add_controller(p, c);
+ }
+ } else if (!pvconn_verify_name(c->target)) {
+ if (!ofservice_lookup(p, c->target) && ofservice_create(p, c)) {
+ continue;
+ }
+ } else {
+ VLOG_WARN_RL(&rl, "%s: unsupported controller \"%s\"",
- dpif_name(p->dpif), c->target);
++ wdp_name(p->wdp), c->target);
+ continue;
}
+
+ shash_add_once(&new_controllers, c->target, &controllers[i]);
}
- fail_mode = OFPROTO_FAIL_STANDALONE;
+ /* Delete controllers that are no longer configured.
+ * Update configuration of all now-existing controllers. */
ss_exists = false;
- HMAP_FOR_EACH_SAFE (ofconn, next, struct ofconn, hmap_node,
+ HMAP_FOR_EACH_SAFE (ofconn, next_ofconn, struct ofconn, hmap_node,
&p->controllers) {
struct ofproto_controller *c;
void
ofproto_destroy(struct ofproto *p)
{
+ struct ofservice *ofservice, *next_ofservice;
struct ofconn *ofconn, *next_ofconn;
- struct ofport *ofport;
- unsigned int port_no;
size_t i;
if (!p) {
ofproto_run1(struct ofproto *p)
{
struct ofconn *ofconn, *next_ofconn;
- char *devname;
- int error;
+ struct ofservice *ofservice;
int i;
- if (shash_is_empty(&p->port_by_name)) {
- init_ports(p);
- }
-
for (i = 0; i < 50; i++) {
- struct ofpbuf *buf;
+ struct wdp_packet packet;
int error;
- error = dpif_recv(p->dpif, &buf);
+ error = wdp_recv(p->wdp, &packet);
if (error) {
if (error == ENODEV) {
/* Someone destroyed the datapath behind our back. The caller
if (p->sflow) {
ofproto_sflow_wait(p->sflow);
}
- if (!tag_set_is_empty(&p->revalidate_set)) {
- poll_immediate_wake();
- }
- if (p->need_revalidate) {
- /* Shouldn't happen, but if it does just go around again. */
- VLOG_DBG_RL(&rl, "need revalidate in ofproto_wait_cb()");
- poll_immediate_wake();
- } else if (p->next_expiration != LLONG_MAX) {
- poll_timer_wait_until(p->next_expiration);
- }
- for (i = 0; i < p->n_listeners; i++) {
- pvconn_wait(p->listeners[i]);
+ HMAP_FOR_EACH (ofservice, struct ofservice, node, &p->services) {
+ pvconn_wait(ofservice->pvconn);
}
for (i = 0; i < p->n_snoops; i++) {
pvconn_wait(p->snoops[i]);
static char *
ofconn_make_name(const struct ofproto *ofproto, const char *target)
{
- return xasprintf("%s<->%s", dpif_base_name(ofproto->dpif), target);
+ return xasprintf("%s<->%s", wdp_base_name(ofproto->wdp), target);
}
+
+static void
+ofconn_set_rate_limit(struct ofconn *ofconn, int rate, int burst)
+{
+ int i;
+
+ for (i = 0; i < N_SCHEDULERS; i++) {
+ struct pinsched **s = &ofconn->schedulers[i];
+
+ if (rate > 0) {
+ if (!*s) {
+ *s = pinsched_create(rate, burst,
+ ofconn->ofproto->switch_status);
+ } else {
+ pinsched_set_limits(*s, rate, burst);
+ }
+ } else {
+ pinsched_destroy(*s);
+ *s = NULL;
+ }
+ }
+}
+\f
+static void
+ofservice_reconfigure(struct ofservice *ofservice,
+ const struct ofproto_controller *c)
+{
+ ofservice->probe_interval = c->probe_interval;
+ ofservice->rate_limit = c->rate_limit;
+ ofservice->burst_limit = c->burst_limit;
+}
+
+/* Creates a new ofservice in 'ofproto'. Returns 0 if successful, otherwise a
+ * positive errno value. */
+static int
+ofservice_create(struct ofproto *ofproto, const struct ofproto_controller *c)
+{
+ struct ofservice *ofservice;
+ struct pvconn *pvconn;
+ int error;
+
+ error = pvconn_open(c->target, &pvconn);
+ if (error) {
+ return error;
+ }
+
+ ofservice = xzalloc(sizeof *ofservice);
+ hmap_insert(&ofproto->services, &ofservice->node,
+ hash_string(c->target, 0));
+ ofservice->pvconn = pvconn;
+
+ ofservice_reconfigure(ofservice, c);
+
+ return 0;
+}
+
+static void
+ofservice_destroy(struct ofproto *ofproto, struct ofservice *ofservice)
+{
+ hmap_remove(&ofproto->services, &ofservice->node);
+ pvconn_close(ofservice->pvconn);
+ free(ofservice);
+}
+
+/* Finds and returns the ofservice within 'ofproto' that has the given
+ * 'target', or a null pointer if none exists. */
+static struct ofservice *
+ofservice_lookup(struct ofproto *ofproto, const char *target)
+{
+ struct ofservice *ofservice;
+
+ HMAP_FOR_EACH_WITH_HASH (ofservice, struct ofservice, node,
+ hash_string(target, 0), &ofproto->services) {
+ if (!strcmp(pvconn_get_name(ofservice->pvconn), target)) {
+ return ofservice;
+ }
+ }
+ return NULL;
+}
\f
- /* Caller is responsible for initializing the 'cr' member of the returned
- * rule. */
- static struct rule *
- rule_create(struct ofproto *ofproto, struct rule *super,
- const union ofp_action *actions, size_t n_actions,
- uint16_t idle_timeout, uint16_t hard_timeout,
- uint64_t flow_cookie, bool send_flow_removed)
- {
- struct rule *rule = xzalloc(sizeof *rule);
- rule->idle_timeout = idle_timeout;
- rule->hard_timeout = hard_timeout;
- rule->flow_cookie = flow_cookie;
- rule->used = rule->created = time_msec();
- rule->send_flow_removed = send_flow_removed;
- rule->super = super;
- if (super) {
- list_push_back(&super->list, &rule->list);
- } else {
- list_init(&rule->list);
- }
- rule->n_actions = n_actions;
- rule->actions = xmemdup(actions, n_actions * sizeof *actions);
- netflow_flow_clear(&rule->nf_flow);
- netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
-
- return rule;
- }
-
- static struct rule *
- rule_from_cls_rule(const struct cls_rule *cls_rule)
- {
- return cls_rule ? CONTAINER_OF(cls_rule, struct rule, cr) : NULL;
- }
-
- static void
- rule_free(struct rule *rule)
- {
- free(rule->actions);
- free(rule->odp_actions);
- free(rule);
- }
-
- /* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
- * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
- * through all of its subrules and revalidates them, destroying any that no
- * longer has a super-rule (which is probably all of them).
- *
- * Before calling this function, the caller must make have removed 'rule' from
- * the classifier. If 'rule' is an exact-match rule, the caller is also
- * responsible for ensuring that it has been uninstalled from the datapath. */
- static void
- rule_destroy(struct ofproto *ofproto, struct rule *rule)
- {
- if (!rule->super) {
- struct rule *subrule, *next;
- LIST_FOR_EACH_SAFE (subrule, next, struct rule, list, &rule->list) {
- revalidate_rule(ofproto, subrule);
- }
- } else {
- list_remove(&rule->list);
- }
- rule_free(rule);
- }
-
static bool
- rule_has_out_port(const struct rule *rule, uint16_t out_port)
+ rule_has_out_port(const struct wdp_rule *rule, uint16_t out_port)
{
const union ofp_action *oa;
struct actions_iterator i;
}
flags = ntohs(osc->flags);
- if (ofconn->type == OFCONN_CONTROLLER && ofconn->role != NX_ROLE_SLAVE) {
+ if (ofconn->type == OFCONN_PRIMARY && ofconn->role != NX_ROLE_SLAVE) {
switch (flags & OFPC_FRAG_MASK) {
case OFPC_FRAG_NORMAL:
- dpif_set_drop_frags(p->dpif, false);
+ wdp_set_drop_frags(p->wdp, false);
break;
case OFPC_FRAG_DROP:
- dpif_set_drop_frags(p->dpif, true);
+ wdp_set_drop_frags(p->wdp, true);
break;
default:
VLOG_WARN_RL(&rl, "requested bad fragment mode (flags=%"PRIx16")",
void (*port_changed_cb)(enum ofp_port_reason, const struct ofp_phy_port *,
void *aux);
bool (*normal_cb)(const flow_t *, const struct ofpbuf *packet,
- struct odp_actions *, tag_type *,
+ struct xflow_actions *, tag_type *,
uint16_t *nf_output_iface, void *aux);
- void (*account_flow_cb)(const flow_t *, const union xflow_action *,
- size_t n_actions, unsigned long long int n_bytes,
- void *aux);
+ void (*account_flow_cb)(const flow_t *, tag_type tags,
- const union odp_action *, size_t n_actions,
++ const union xflow_action *, size_t n_actions,
+ unsigned long long int n_bytes, void *aux);
void (*account_checkpoint_cb)(void *aux);
};
void ofproto_revalidate(struct ofproto *, tag_type);
--- /dev/null
- /* XXX netflow_flow_update_flags(&rule->nf_flow, stats->ip_tos,
- stats->tcp_flags); */
+ /*
+ * Copyright (c) 2010 Nicira Networks.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+ #include <config.h>
+
+ #include "wdp-xflow.h"
+
+ #include <errno.h>
+ #include <inttypes.h>
+
+ #include "coverage.h"
+ #include "dhcp.h"
+ #include "mac-learning.h"
+ #include "netdev.h"
+ #include "netflow.h"
+ #include "ofp-util.h"
+ #include "ofpbuf.h"
+ #include "ofproto.h"
+ #include "openflow/nicira-ext.h"
+ #include "openflow/openflow.h"
+ #include "packets.h"
+ #include "poll-loop.h"
+ #include "port-array.h"
+ #include "shash.h"
+ #include "stp.h"
+ #include "svec.h"
+ #include "timeval.h"
+ #include "util.h"
+ #include "vconn.h"
+ #include "wdp-provider.h"
+ #include "xfif.h"
+ #include "xflow-util.h"
+ #include "vlog.h"
+ #include "xtoxll.h"
+
+ VLOG_DEFINE_THIS_MODULE(wdp_xflow)
+
+ enum {
+ TABLEID_HASH = 0,
+ TABLEID_CLASSIFIER = 1
+ };
+
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+ \f
+ /* Maximum numbers of rules. */
+ #define WX_MAX_WILD 65536 /* Wildcarded rules. */
+ #define WX_MAX_EXACT 1048576 /* Exact-match rules. */
+
+ struct wx {
+ struct list list_node;
+ struct wdp wdp;
+ struct xfif *xfif;
+ struct classifier cls;
+ struct netdev_monitor *netdev_monitor;
+ struct port_array ports; /* Index is xflow port nr;
+ * wdp_port->opp.port_no is OFP port nr. */
+ struct shash port_by_name;
+ long long int next_expiration;
+
+ /* Rules that might need to be revalidated. */
+ bool need_revalidate; /* Revalidate all subrules? */
+ bool revalidate_all; /* Revalidate all subrules and other rules? */
+ struct tag_set revalidate_set; /* Tag set of (sub)rules to revalidate. */
+
+ /* Hooks for ovs-vswitchd. */
+ const struct ofhooks *ofhooks;
+ void *aux;
+
+ /* Used by default ofhooks. */
+ struct mac_learning *ml;
+ };
+
+ static const struct ofhooks default_ofhooks;
+
+ static struct list all_wx = LIST_INITIALIZER(&all_wx);
+
+ static int wx_port_init(struct wx *);
+ static void wx_port_process_change(struct wx *wx, int error, char *devname,
+ wdp_port_poll_cb_func *cb, void *aux);
+ static void wx_port_refresh_groups(struct wx *);
+
+ enum {
+ WX_GROUP_FLOOD = 0,
+ WX_GROUP_ALL = 1
+ };
+
+ static struct wx *
+ wx_cast(const struct wdp *wdp)
+ {
+ return CONTAINER_OF(wdp, struct wx, wdp);
+ }
+
+ static int
+ wx_xlate_actions(struct wx *, const union ofp_action *, size_t n,
+ const flow_t *flow, const struct ofpbuf *packet,
+ tag_type *tags, struct xflow_actions *out,
+ bool *may_set_up_flow);
+ \f
+ struct wx_rule {
+ struct wdp_rule wr;
+
+ uint64_t packet_count; /* Number of packets received. */
+ uint64_t byte_count; /* Number of bytes received. */
+ uint64_t accounted_bytes; /* Number of bytes passed to account_cb. */
+ long long int used; /* Last-used time (0 if never used). */
+ tag_type tags; /* Tags (set only by hooks). */
+
+ /* If 'super' is non-NULL, this rule is a subrule, that is, it is an
+ * exact-match rule (having cr.wc.wildcards of 0) generated from the
+ * wildcard rule 'super'. In this case, 'list' is an element of the
+ * super-rule's list.
+ *
+ * If 'super' is NULL, this rule is a super-rule, and 'list' is the head of
+ * a list of subrules. A super-rule with no wildcards (where
+ * cr.wc.wildcards is 0) will never have any subrules. */
+ struct wx_rule *super;
+ struct list list;
+
+ /* Datapath actions.
+ *
+ * A super-rule with wildcard fields never has xflow actions (since the
+ * datapath only supports exact-match flows). */
+ bool installed; /* Installed in datapath? */
+ bool may_install; /* True ordinarily; false if actions must
+ * be reassessed for every packet. */
+ int n_xflow_actions;
+ union xflow_action *xflow_actions;
+ };
+
+ static void wx_rule_destroy(struct wx *, struct wx_rule *);
+ static void wx_rule_update_actions(struct wx *, struct wx_rule *);
+ static void wx_rule_execute(struct wx *, struct wx_rule *,
+ struct ofpbuf *packet, const flow_t *);
+ static bool wx_rule_make_actions(struct wx *, struct wx_rule *,
+ const struct ofpbuf *packet);
+ static void wx_rule_install(struct wx *, struct wx_rule *,
+ struct wx_rule *displaced_rule);
+
+ static struct wx_rule *
+ wx_rule_cast(const struct cls_rule *cls_rule)
+ {
+ return cls_rule ? CONTAINER_OF(cls_rule, struct wx_rule, wr.cr) : NULL;
+ }
+
+ /* Returns true if 'rule' is merely an implementation detail that should be
+ * hidden from the client. */
+ static inline bool
+ wx_rule_is_hidden(const struct wx_rule *rule)
+ {
+ return rule->super != NULL;
+ }
+
+ static void
+ wx_rule_free(struct wx_rule *rule)
+ {
+ wdp_rule_uninit(&rule->wr);
+ free(rule->xflow_actions);
+ free(rule);
+ }
+
+ static void
+ wx_rule_account(struct wx *wx OVS_UNUSED, struct wx_rule *rule OVS_UNUSED,
+ uint64_t extra_bytes OVS_UNUSED)
+ {
+ /* XXX call account_cb hook */
+ }
+
+ static void
+ wx_rule_post_uninstall(struct wx *wx, struct wx_rule *rule)
+ {
+ struct wx_rule *super = rule->super;
+
+ wx_rule_account(wx, rule, 0);
+
+ /* XXX netflow expiration */
+
+ if (super) {
+ super->packet_count += rule->packet_count;
+ super->byte_count += rule->byte_count;
+
+ /* Reset counters to prevent double counting if the rule ever gets
+ * reinstalled. */
+ rule->packet_count = 0;
+ rule->byte_count = 0;
+ rule->accounted_bytes = 0;
+
+ //XXX netflow_flow_clear(&rule->nf_flow);
+ }
+ }
+
+ static long long int
+ xflow_flow_stats_to_msec(const struct xflow_flow_stats *stats)
+ {
+ return (stats->used_sec
+ ? stats->used_sec * 1000 + stats->used_nsec / 1000000
+ : 0);
+ }
+
+ static void
+ wx_rule_update_time(struct wx *wx OVS_UNUSED, struct wx_rule *rule,
+ const struct xflow_flow_stats *stats)
+ {
+ long long int used = xflow_flow_stats_to_msec(stats);
+ if (used > rule->used) {
+ rule->used = used;
+ if (rule->super && used > rule->super->used) {
+ rule->super->used = used;
+ }
+ //XXX netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, used);
+ }
+ }
+
+ static void
+ wx_rule_update_stats(struct wx *wx, struct wx_rule *rule,
+ const struct xflow_flow_stats *stats)
+ {
+ if (stats->n_packets) {
+ wx_rule_update_time(wx, rule, stats);
+ rule->packet_count += stats->n_packets;
+ rule->byte_count += stats->n_bytes;
- if (xflow_flow->key.dl_type == htons(ETH_TYPE_IP)
- && xflow_flow->key.nw_proto == IP_TYPE_TCP) {
- stats->ip_tos = xflow_flow->stats.ip_tos;
- }
++ /* XXX netflow_flow_update_flags(&rule->nf_flow, stats->tcp_flags); */
+ }
+ }
+
+ static void
+ wx_rule_uninstall(struct wx *wx, struct wx_rule *rule)
+ {
+ assert(!rule->wr.cr.flow.wildcards);
+ if (rule->installed) {
+ struct xflow_flow xflow_flow;
+
+ xflow_key_from_flow(&xflow_flow.key, &rule->wr.cr.flow);
+ xflow_flow.actions = NULL;
+ xflow_flow.n_actions = 0;
+ xflow_flow.flags = 0;
+ if (!xfif_flow_del(wx->xfif, &xflow_flow)) {
+ wx_rule_update_stats(wx, rule, &xflow_flow.stats);
+ }
+ rule->installed = false;
+
+ wx_rule_post_uninstall(wx, rule);
+ }
+ }
+
+ #if 0
+ static bool
+ is_controller_rule(struct wx_rule *rule)
+ {
+ /* If the only action is send to the controller then don't report
+ * NetFlow expiration messages since it is just part of the control
+ * logic for the network and not real traffic. */
+
+ return (rule
+ && rule->super
+ && rule->super->n_actions == 1
+ && action_outputs_to_port(&rule->super->actions[0],
+ htons(OFPP_CONTROLLER)));
+ }
+ #endif
+
+ static void
+ wx_rule_remove(struct wx *wx, struct wx_rule *rule)
+ {
+ if (rule->wr.cr.flow.wildcards) {
+ COVERAGE_INC(wx_del_wc_flow);
+ wx->need_revalidate = true;
+ } else {
+ wx_rule_uninstall(wx, rule);
+ }
+ classifier_remove(&wx->cls, &rule->wr.cr);
+ wx_rule_destroy(wx, rule);
+ }
+
+ static bool
+ wx_rule_revalidate(struct wx *wx, struct wx_rule *rule)
+ {
+ const flow_t *flow = &rule->wr.cr.flow;
+
+ COVERAGE_INC(wx_rule_revalidate);
+ if (rule->super) {
+ struct wx_rule *super;
+ super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow));
+ if (!super) {
+ wx_rule_remove(wx, rule);
+ return false;
+ } else if (super != rule->super) {
+ COVERAGE_INC(wx_revalidate_moved);
+ list_remove(&rule->list);
+ list_push_back(&super->list, &rule->list);
+ rule->super = super;
+ rule->wr.hard_timeout = super->wr.hard_timeout;
+ rule->wr.idle_timeout = super->wr.idle_timeout;
+ rule->wr.created = super->wr.created;
+ rule->used = 0;
+ }
+ }
+
+ wx_rule_update_actions(wx, rule);
+ return true;
+ }
+
+ /* Destroys 'rule'. If 'rule' is a subrule, also removes it from its
+ * super-rule's list of subrules. If 'rule' is a super-rule, also iterates
+ * through all of its subrules and revalidates them, destroying any that no
+ * longer has a super-rule (which is probably all of them).
+ *
+ * Before calling this function, the caller must make have removed 'rule' from
+ * the classifier. If 'rule' is an exact-match rule, the caller is also
+ * responsible for ensuring that it has been uninstalled from the datapath. */
+ static void
+ wx_rule_destroy(struct wx *wx, struct wx_rule *rule)
+ {
+ if (!rule->super) {
+ struct wx_rule *subrule, *next;
+ LIST_FOR_EACH_SAFE (subrule, next, struct wx_rule, list, &rule->list) {
+ wx_rule_revalidate(wx, subrule);
+ }
+ } else {
+ list_remove(&rule->list);
+ }
+ wx_rule_free(rule);
+ }
+
+ #if 0
+ static bool
+ wx_rule_has_out_port(const struct wx_rule *rule, uint16_t out_port)
+ {
+ const union ofp_action *oa;
+ struct actions_iterator i;
+
+ if (out_port == htons(OFPP_NONE)) {
+ return true;
+ }
+ for (oa = actions_first(&i, rule->wr.actions,
+ rule->wr.n_actions);
+ oa;
+ oa = actions_next(&i)) {
+ if (oa->type == htons(OFPAT_OUTPUT) && oa->output.port == out_port) {
+ return true;
+ }
+ }
+ return false;
+ }
+ #endif
+
+ /* Caller is responsible for initializing the 'cr' and ofp_table_id members of
+ * the returned rule. */
+ static struct wx_rule *
+ wx_rule_create(struct wx_rule *super,
+ const union ofp_action *actions, size_t n_actions,
+ uint16_t idle_timeout, uint16_t hard_timeout)
+ {
+ struct wx_rule *rule = xzalloc(sizeof *rule);
+ wdp_rule_init(&rule->wr, actions, n_actions);
+ rule->wr.idle_timeout = idle_timeout;
+ rule->wr.hard_timeout = hard_timeout;
+ rule->used = rule->wr.created;
+ rule->super = super;
+ if (super) {
+ list_push_back(&super->list, &rule->list);
+ } else {
+ list_init(&rule->list);
+ }
+ #if 0
+ netflow_flow_clear(&rule->nf_flow);
+ netflow_flow_update_time(ofproto->netflow, &rule->nf_flow, rule->created);
+ #endif
+
+ return rule;
+ }
+
+ /* Executes the actions indicated by 'rule' on 'packet', which is in flow
+ * 'flow' and is considered to have arrived on xflow port 'in_port'.
+ *
+ * The flow that 'packet' actually contains does not need to actually match
+ * 'rule'; the actions in 'rule' will be applied to it either way. Likewise,
+ * the packet and byte counters for 'rule' will be credited for the packet sent
+ * out whether or not the packet actually matches 'rule'.
+ *
+ * If 'rule' is an exact-match rule and 'flow' actually equals the rule's flow,
+ * the caller must already have accurately composed xflow actions for it given
+ * 'packet' using rule_make_actions(). If 'rule' is a wildcard rule, or if
+ * 'rule' is an exact-match rule but 'flow' is not the rule's flow, then this
+ * function will compose a set of xflow actions based on 'rule''s OpenFlow
+ * actions and apply them to 'packet'. */
+ static void
+ wx_rule_execute(struct wx *wx, struct wx_rule *rule,
+ struct ofpbuf *packet, const flow_t *flow)
+ {
+ const union xflow_action *actions;
+ size_t n_actions;
+ struct xflow_actions a;
+
+ /* Grab or compose the xflow actions.
+ *
+ * The special case for an exact-match 'rule' where 'flow' is not the
+ * rule's flow is important to avoid, e.g., sending a packet out its input
+ * port simply because the xflow actions were composed for the wrong
+ * scenario. */
+ if (rule->wr.cr.flow.wildcards
+ || !flow_equal_headers(flow, &rule->wr.cr.flow))
+ {
+ struct wx_rule *super = rule->super ? rule->super : rule;
+ if (wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions, flow,
+ packet, NULL, &a, NULL)) {
+ return;
+ }
+ actions = a.actions;
+ n_actions = a.n_actions;
+ } else {
+ actions = rule->xflow_actions;
+ n_actions = rule->n_xflow_actions;
+ }
+
+ /* Execute the xflow actions. */
+ if (!xfif_execute(wx->xfif, flow->in_port,
+ actions, n_actions, packet)) {
+ struct xflow_flow_stats stats;
+ flow_extract_stats(flow, packet, &stats);
+ wx_rule_update_stats(wx, rule, &stats);
+ rule->used = time_msec();
+ //XXX netflow_flow_update_time(wx->netflow, &rule->nf_flow, rule->used);
+ }
+ }
+
+ static void
+ wx_rule_insert(struct wx *wx, struct wx_rule *rule, struct ofpbuf *packet,
+ uint16_t in_port)
+ {
+ struct wx_rule *displaced_rule;
+
+ /* Insert the rule in the classifier. */
+ displaced_rule = wx_rule_cast(classifier_insert(&wx->cls, &rule->wr.cr));
+ if (!rule->wr.cr.flow.wildcards) {
+ wx_rule_make_actions(wx, rule, packet);
+ }
+
+ /* Send the packet and credit it to the rule. */
+ if (packet) {
+ flow_t flow;
+ flow_extract(packet, 0, in_port, &flow);
+ wx_rule_execute(wx, rule, packet, &flow);
+ }
+
+ /* Install the rule in the datapath only after sending the packet, to
+ * avoid packet reordering. */
+ if (rule->wr.cr.flow.wildcards) {
+ COVERAGE_INC(wx_add_wc_flow);
+ wx->need_revalidate = true;
+ } else {
+ wx_rule_install(wx, rule, displaced_rule);
+ }
+
+ /* Free the rule that was displaced, if any. */
+ if (displaced_rule) {
+ rule->wr.client_data = displaced_rule->wr.client_data;
+ wx_rule_destroy(wx, displaced_rule);
+ }
+ }
+
+ static struct wx_rule *
+ wx_rule_create_subrule(struct wx *wx, struct wx_rule *rule, const flow_t *flow)
+ {
+ struct wx_rule *subrule;
+
+ subrule = wx_rule_create(rule, NULL, 0,
+ rule->wr.idle_timeout,
+ rule->wr.hard_timeout);
+ /* Subrules aren't really in any OpenFlow table, so don't bother with
+ * subrule->wr.ofp_table_id. */
+ COVERAGE_INC(wx_subrule_create);
+ cls_rule_from_flow(flow, &subrule->wr.cr);
+ classifier_insert_exact(&wx->cls, &subrule->wr.cr);
+
+ return subrule;
+ }
+
+ /* Returns true if the actions changed, false otherwise. */
+ static bool
+ wx_rule_make_actions(struct wx *wx, struct wx_rule *rule,
+ const struct ofpbuf *packet)
+ {
+ const struct wx_rule *super;
+ struct xflow_actions a;
+ size_t actions_len;
+
+ assert(!rule->wr.cr.flow.wildcards);
+
+ super = rule->super ? rule->super : rule;
+ wx_xlate_actions(wx, super->wr.actions, super->wr.n_actions,
+ &rule->wr.cr.flow, packet,
+ &rule->tags, &a, &rule->may_install);
+
+ actions_len = a.n_actions * sizeof *a.actions;
+ if (rule->n_xflow_actions != a.n_actions
+ || memcmp(rule->xflow_actions, a.actions, actions_len)) {
+ COVERAGE_INC(wx_xflow_unchanged);
+ free(rule->xflow_actions);
+ rule->n_xflow_actions = a.n_actions;
+ rule->xflow_actions = xmemdup(a.actions, actions_len);
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ static int
+ do_put_flow(struct wx *wx, struct wx_rule *rule, int flags,
+ struct xflow_flow_put *put)
+ {
+ memset(&put->flow.stats, 0, sizeof put->flow.stats);
+ xflow_key_from_flow(&put->flow.key, &rule->wr.cr.flow);
+ put->flow.actions = rule->xflow_actions;
+ put->flow.n_actions = rule->n_xflow_actions;
+ put->flow.flags = 0;
+ put->flags = flags;
+ return xfif_flow_put(wx->xfif, put);
+ }
+
+ static void
+ wx_rule_install(struct wx *wx, struct wx_rule *rule, struct wx_rule *displaced_rule)
+ {
+ assert(!rule->wr.cr.flow.wildcards);
+
+ if (rule->may_install) {
+ struct xflow_flow_put put;
+ if (!do_put_flow(wx, rule,
+ XFLOWPF_CREATE | XFLOWPF_MODIFY | XFLOWPF_ZERO_STATS,
+ &put)) {
+ rule->installed = true;
+ if (displaced_rule) {
+ wx_rule_update_stats(wx, displaced_rule, &put.flow.stats);
+ wx_rule_post_uninstall(wx, displaced_rule);
+ }
+ }
+ } else if (displaced_rule) {
+ wx_rule_uninstall(wx, displaced_rule);
+ }
+ }
+
+ static void
+ wx_rule_reinstall(struct wx *wx, struct wx_rule *rule)
+ {
+ if (rule->installed) {
+ struct xflow_flow_put put;
+ COVERAGE_INC(wx_dp_missed);
+ do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY, &put);
+ } else {
+ wx_rule_install(wx, rule, NULL);
+ }
+ }
+
+ static void
+ wx_rule_update_actions(struct wx *wx, struct wx_rule *rule)
+ {
+ bool actions_changed;
+ #if 0
+ uint16_t new_out_iface, old_out_iface;
+
+ old_out_iface = rule->nf_flow.output_iface;
+ #endif
+ actions_changed = wx_rule_make_actions(wx, rule, NULL);
+
+ if (rule->may_install) {
+ if (rule->installed) {
+ if (actions_changed) {
+ struct xflow_flow_put put;
+ do_put_flow(wx, rule, XFLOWPF_CREATE | XFLOWPF_MODIFY
+ | XFLOWPF_ZERO_STATS, &put);
+ wx_rule_update_stats(wx, rule, &put.flow.stats);
+ #if 0
+ /* Temporarily set the old output iface so that NetFlow
+ * messages have the correct output interface for the old
+ * stats. */
+ new_out_iface = rule->nf_flow.output_iface;
+ rule->nf_flow.output_iface = old_out_iface;
+ #endif
+ wx_rule_post_uninstall(wx, rule);
+ //rule->nf_flow.output_iface = new_out_iface;
+ }
+ } else {
+ wx_rule_install(wx, rule, NULL);
+ }
+ } else {
+ wx_rule_uninstall(wx, rule);
+ }
+ }
+ \f
+ static void
+ add_output_group_action(struct xflow_actions *actions, uint16_t group,
+ uint16_t *nf_output_iface)
+ {
+ xflow_actions_add(actions, XFLOWAT_OUTPUT_GROUP)->output_group.group = group;
+
+ if (group == WX_GROUP_ALL || group == WX_GROUP_FLOOD) {
+ *nf_output_iface = NF_OUT_FLOOD;
+ }
+ }
+
+ static void
+ add_controller_action(struct xflow_actions *actions, uint16_t max_len)
+ {
+ union xflow_action *a = xflow_actions_add(actions, XFLOWAT_CONTROLLER);
+ a->controller.arg = max_len;
+ }
+
+ struct wx_xlate_ctx {
+ /* Input. */
+ flow_t flow; /* Flow to which these actions correspond. */
+ int recurse; /* Recursion level, via xlate_table_action. */
+ struct wx *wx;
+ const struct ofpbuf *packet; /* The packet corresponding to 'flow', or a
+ * null pointer if we are revalidating
+ * without a packet to refer to. */
+
+ /* Output. */
+ struct xflow_actions *out; /* Datapath actions. */
+ tag_type *tags; /* Tags associated with OFPP_NORMAL actions. */
+ bool may_set_up_flow; /* True ordinarily; false if the actions must
+ * be reassessed for every packet. */
+ uint16_t nf_output_iface; /* Output interface index for NetFlow. */
+ };
+
+ static void do_xlate_actions(const union ofp_action *in, size_t n_in,
+ struct wx_xlate_ctx *ctx);
+
+ static void
+ add_output_action(struct wx_xlate_ctx *ctx, uint16_t port)
+ {
+ const struct wdp_port *wdp_port = port_array_get(&ctx->wx->ports, port);
+
+ if (wdp_port) {
+ if (wdp_port->opp.config & OFPPC_NO_FWD) {
+ /* Forwarding disabled on port. */
+ return;
+ }
+ } else {
+ /*
+ * We don't have an ofport record for this port, but it doesn't hurt to
+ * allow forwarding to it anyhow. Maybe such a port will appear later
+ * and we're pre-populating the flow table.
+ */
+ }
+
+ xflow_actions_add(ctx->out, XFLOWAT_OUTPUT)->output.port = port;
+ //ctx->nf_output_iface = port;
+ }
+
+ static struct wx_rule *
+ wx_rule_lookup_valid(struct wx *wx, const flow_t *flow)
+ {
+ struct wx_rule *rule = wx_rule_cast(classifier_lookup(&wx->cls, flow));
+
+ /* The rule we found might not be valid, since we could be in need of
+ * revalidation. If it is not valid, don't return it. */
+ if (rule
+ && rule->super
+ && wx->need_revalidate
+ && !wx_rule_revalidate(wx, rule)) {
+ COVERAGE_INC(wx_invalidated);
+ return NULL;
+ }
+
+ return rule;
+ }
+
+ static void
+ xlate_table_action(struct wx_xlate_ctx *ctx, uint16_t in_port)
+ {
+ if (!ctx->recurse) {
+ uint16_t old_in_port;
+ struct wx_rule *rule;
+
+ /* Look up a flow with 'in_port' as the input port. Then restore the
+ * original input port (otherwise OFPP_NORMAL and OFPP_IN_PORT will
+ * have surprising behavior). */
+ old_in_port = ctx->flow.in_port;
+ ctx->flow.in_port = in_port;
+ rule = wx_rule_lookup_valid(ctx->wx, &ctx->flow);
+ ctx->flow.in_port = old_in_port;
+
+ if (rule) {
+ if (rule->super) {
+ rule = rule->super;
+ }
+
+ ctx->recurse++;
+ do_xlate_actions(rule->wr.actions, rule->wr.n_actions, ctx);
+ ctx->recurse--;
+ }
+ }
+ }
+
+ static void
+ xlate_output_action__(struct wx_xlate_ctx *ctx,
+ uint16_t port, uint16_t max_len)
+ {
+ uint16_t xflow_port;
+ uint16_t prev_nf_output_iface = ctx->nf_output_iface;
+
+ ctx->nf_output_iface = NF_OUT_DROP;
+
+ switch (port) {
+ case OFPP_IN_PORT:
+ add_output_action(ctx, ctx->flow.in_port);
+ break;
+ case OFPP_TABLE:
+ xlate_table_action(ctx, ctx->flow.in_port);
+ break;
+ case OFPP_NORMAL:
+ if (!ctx->wx->ofhooks->normal_cb(&ctx->flow, ctx->packet,
+ ctx->out, ctx->tags,
+ &ctx->nf_output_iface,
+ ctx->wx->aux)) {
+ COVERAGE_INC(wx_uninstallable);
+ ctx->may_set_up_flow = false;
+ }
+ break;
+
+ case OFPP_FLOOD:
+ add_output_group_action(ctx->out, WX_GROUP_FLOOD,
+ &ctx->nf_output_iface);
+ break;
+ case OFPP_ALL:
+ add_output_group_action(ctx->out, WX_GROUP_ALL, &ctx->nf_output_iface);
+ break;
+ case OFPP_CONTROLLER:
+ add_controller_action(ctx->out, max_len);
+ break;
+ case OFPP_LOCAL:
+ add_output_action(ctx, XFLOWP_LOCAL);
+ break;
+ default:
+ xflow_port = ofp_port_to_xflow_port(port);
+ if (xflow_port != ctx->flow.in_port) {
+ add_output_action(ctx, xflow_port);
+ }
+ break;
+ }
+
+ if (prev_nf_output_iface == NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_FLOOD;
+ } else if (ctx->nf_output_iface == NF_OUT_DROP) {
+ ctx->nf_output_iface = prev_nf_output_iface;
+ } else if (prev_nf_output_iface != NF_OUT_DROP &&
+ ctx->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_MULTI;
+ }
+ }
+
+ static void
+ xlate_output_action(struct wx_xlate_ctx *ctx,
+ const struct ofp_action_output *oao)
+ {
+ xlate_output_action__(ctx, ntohs(oao->port), ntohs(oao->max_len));
+ }
+
+ /* If the final xflow action in 'ctx' is "pop priority", drop it, as an
+ * optimization, because we're going to add another action that sets the
+ * priority immediately after, or because there are no actions following the
+ * pop. */
+ static void
+ remove_pop_action(struct wx_xlate_ctx *ctx)
+ {
+ size_t n = ctx->out->n_actions;
+ if (n > 0 && ctx->out->actions[n - 1].type == XFLOWAT_POP_PRIORITY) {
+ ctx->out->n_actions--;
+ }
+ }
+
+ static void
+ xlate_enqueue_action(struct wx_xlate_ctx *ctx,
+ const struct ofp_action_enqueue *oae)
+ {
+ uint16_t ofp_port, xflow_port;
+ uint32_t priority;
+ int error;
+
+ error = xfif_queue_to_priority(ctx->wx->xfif, ntohl(oae->queue_id),
+ &priority);
+ if (error) {
+ /* Fall back to ordinary output action. */
+ xlate_output_action__(ctx, ntohs(oae->port), 0);
+ return;
+ }
+
+ /* Figure out xflow output port. */
+ ofp_port = ntohs(oae->port);
+ if (ofp_port != OFPP_IN_PORT) {
+ xflow_port = ofp_port_to_xflow_port(ofp_port);
+ } else {
+ xflow_port = ctx->flow.in_port;
+ }
+
+ /* Add xflow actions. */
+ remove_pop_action(ctx);
+ xflow_actions_add(ctx->out, XFLOWAT_SET_PRIORITY)->priority.priority
+ = priority;
+ add_output_action(ctx, xflow_port);
+ xflow_actions_add(ctx->out, XFLOWAT_POP_PRIORITY);
+
+ /* Update NetFlow output port. */
+ if (ctx->nf_output_iface == NF_OUT_DROP) {
+ ctx->nf_output_iface = xflow_port;
+ } else if (ctx->nf_output_iface != NF_OUT_FLOOD) {
+ ctx->nf_output_iface = NF_OUT_MULTI;
+ }
+ }
+
+ static void
+ xlate_nicira_action(struct wx_xlate_ctx *ctx,
+ const struct nx_action_header *nah)
+ {
+ const struct nx_action_resubmit *nar;
+ const struct nx_action_set_tunnel *nast;
+ union xflow_action *oa;
+ int subtype = ntohs(nah->subtype);
+
+ assert(nah->vendor == htonl(NX_VENDOR_ID));
+ switch (subtype) {
+ case NXAST_RESUBMIT:
+ nar = (const struct nx_action_resubmit *) nah;
+ xlate_table_action(ctx, ofp_port_to_xflow_port(ntohs(nar->in_port)));
+ break;
+
+ case NXAST_SET_TUNNEL:
+ nast = (const struct nx_action_set_tunnel *) nah;
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TUNNEL);
+ ctx->flow.tun_id = oa->tunnel.tun_id = nast->tun_id;
+ break;
+
+ /* If you add a new action here that modifies flow data, don't forget to
+ * update the flow key in ctx->flow at the same time. */
+
+ default:
+ VLOG_DBG_RL(&rl, "unknown Nicira action type %"PRIu16, subtype);
+ break;
+ }
+ }
+
+ static void
+ do_xlate_actions(const union ofp_action *in, size_t n_in,
+ struct wx_xlate_ctx *ctx)
+ {
+ struct actions_iterator iter;
+ const union ofp_action *ia;
+ const struct wdp_port *port;
+
+ port = port_array_get(&ctx->wx->ports, ctx->flow.in_port);
+ if (port && port->opp.config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP) &&
+ port->opp.config & (eth_addr_equals(ctx->flow.dl_dst, stp_eth_addr)
+ ? OFPPC_NO_RECV_STP : OFPPC_NO_RECV)) {
+ /* Drop this flow. */
+ return;
+ }
+
+ for (ia = actions_first(&iter, in, n_in); ia; ia = actions_next(&iter)) {
+ uint16_t type = ntohs(ia->type);
+ union xflow_action *oa;
+
+ switch (type) {
+ case OFPAT_OUTPUT:
+ xlate_output_action(ctx, &ia->output);
+ break;
+
+ case OFPAT_SET_VLAN_VID:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
+ oa->dl_tci.tci = ia->vlan_vid.vlan_vid & htons(VLAN_VID_MASK);
+ oa->dl_tci.mask = htons(VLAN_VID_MASK);
+ ctx->flow.dl_vlan = ia->vlan_vid.vlan_vid;
+ break;
+
+ case OFPAT_SET_VLAN_PCP:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_TCI);
+ oa->dl_tci.tci = htons((ia->vlan_pcp.vlan_pcp << VLAN_PCP_SHIFT)
+ & VLAN_PCP_MASK);
+ oa->dl_tci.mask = htons(VLAN_PCP_MASK);
+
+ if (ctx->flow.dl_vlan == htons(OFP_VLAN_NONE)) {
+ ctx->flow.dl_vlan = htons(0);
+ }
+ ctx->flow.dl_vlan_pcp = ia->vlan_pcp.vlan_pcp;
+ break;
+
+ case OFPAT_STRIP_VLAN:
+ xflow_actions_add(ctx->out, XFLOWAT_STRIP_VLAN);
+ ctx->flow.dl_vlan = htons(OFP_VLAN_NONE);
+ ctx->flow.dl_vlan_pcp = 0;
+ break;
+
+ case OFPAT_SET_DL_SRC:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_SRC);
+ memcpy(oa->dl_addr.dl_addr,
+ ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
+ memcpy(ctx->flow.dl_src,
+ ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
+ break;
+
+ case OFPAT_SET_DL_DST:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_DL_DST);
+ memcpy(oa->dl_addr.dl_addr,
+ ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
+ memcpy(ctx->flow.dl_dst,
+ ((struct ofp_action_dl_addr *) ia)->dl_addr, ETH_ADDR_LEN);
+ break;
+
+ case OFPAT_SET_NW_SRC:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_SRC);
+ ctx->flow.nw_src = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
+ break;
+
+ case OFPAT_SET_NW_DST:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_DST);
+ ctx->flow.nw_dst = oa->nw_addr.nw_addr = ia->nw_addr.nw_addr;
+ break;
+
+ case OFPAT_SET_NW_TOS:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_NW_TOS);
+ ctx->flow.nw_tos = oa->nw_tos.nw_tos = ia->nw_tos.nw_tos;
+ break;
+
+ case OFPAT_SET_TP_SRC:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_SRC);
+ ctx->flow.tp_src = oa->tp_port.tp_port = ia->tp_port.tp_port;
+ break;
+
+ case OFPAT_SET_TP_DST:
+ oa = xflow_actions_add(ctx->out, XFLOWAT_SET_TP_DST);
+ ctx->flow.tp_dst = oa->tp_port.tp_port = ia->tp_port.tp_port;
+ break;
+
+ case OFPAT_ENQUEUE:
+ xlate_enqueue_action(ctx, (const struct ofp_action_enqueue *) ia);
+ break;
+
+ case OFPAT_VENDOR:
+ xlate_nicira_action(ctx, (const struct nx_action_header *) ia);
+ break;
+
+ default:
+ VLOG_DBG_RL(&rl, "unknown action type %"PRIu16, type);
+ break;
+ }
+ }
+ }
+
+ /* Returns true if 'flow' and 'actions' may be set up as a flow in the kernel.
+ * This is true most of the time, but we don't allow flows that would prevent
+ * DHCP replies from being seen by the local port to be set up in the
+ * kernel.
+ *
+ * We only need this, strictly speaking, when in-band control is turned on. */
+ static bool
+ wx_may_set_up(const flow_t *flow, const struct xflow_actions *actions)
+ {
+ if (flow->dl_type == htons(ETH_TYPE_IP)
+ && flow->nw_proto == IP_TYPE_UDP
+ && flow->tp_src == htons(DHCP_SERVER_PORT)
+ && flow->tp_dst == htons(DHCP_CLIENT_PORT)) {
+ int i;
+
+ for (i = 0; i < actions->n_actions; i++) {
+ const struct xflow_action_output *oao = &actions->actions[i].output;
+ if (oao->type == XFLOWAT_OUTPUT && oao->port == XFLOWP_LOCAL) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ return true;
+ }
+
+ static int
+ wx_xlate_actions(struct wx *wx, const union ofp_action *in, size_t n_in,
+ const flow_t *flow, const struct ofpbuf *packet,
+ tag_type *tags, struct xflow_actions *out,
+ bool *may_set_up_flow)
+ {
+ tag_type no_tags = 0;
+ struct wx_xlate_ctx ctx;
+ COVERAGE_INC(wx_ofp2xflow);
+ xflow_actions_init(out);
+ ctx.flow = *flow;
+ ctx.recurse = 0;
+ ctx.wx = wx;
+ ctx.packet = packet;
+ ctx.out = out;
+ ctx.tags = tags ? tags : &no_tags;
+ ctx.may_set_up_flow = true;
+ ctx.nf_output_iface = NF_OUT_DROP;
+ do_xlate_actions(in, n_in, &ctx);
+ remove_pop_action(&ctx);
+
+ if (may_set_up_flow) {
+ *may_set_up_flow = ctx.may_set_up_flow && wx_may_set_up(flow, out);
+ }
+ #if 0
+ if (nf_output_iface) {
+ *nf_output_iface = ctx.nf_output_iface;
+ }
+ #endif
+ if (xflow_actions_overflow(out)) {
+ xflow_actions_init(out);
+ return ofp_mkerr(OFPET_BAD_ACTION, OFPBAC_TOO_MANY);
+ }
+ return 0;
+ }
+ \f
+ static void
+ update_used(struct wx *wx)
+ {
+ struct xflow_flow *flows;
+ size_t n_flows;
+ size_t i;
+ int error;
+
+ error = xfif_flow_list_all(wx->xfif, &flows, &n_flows);
+ if (error) {
+ return;
+ }
+
+ for (i = 0; i < n_flows; i++) {
+ struct xflow_flow *f = &flows[i];
+ struct wx_rule *rule;
+ flow_t flow;
+
+ xflow_key_to_flow(&f->key, &flow);
+ rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, &flow));
+ if (!rule || !rule->installed) {
+ COVERAGE_INC(wx_unexpected_rule);
+ xfif_flow_del(wx->xfif, f);
+ continue;
+ }
+
+ wx_rule_update_time(wx, rule, &f->stats);
+ wx_rule_account(wx, rule, f->stats.n_bytes);
+ }
+ free(flows);
+ }
+
+ static void
+ uninstall_idle_flow(struct wx *wx, struct wx_rule *rule)
+ {
+ assert(rule->installed);
+ assert(!rule->wr.cr.flow.wildcards);
+
+ if (rule->super) {
+ wx_rule_remove(wx, rule);
+ } else {
+ wx_rule_uninstall(wx, rule);
+ }
+ }
+
+ static void
+ expire_rule(struct cls_rule *cls_rule, void *wx_)
+ {
+ struct wx *wx = wx_;
+ struct wx_rule *rule = wx_rule_cast(cls_rule);
+ long long int hard_expire, idle_expire, expire, now;
+
+ hard_expire = (rule->wr.hard_timeout
+ ? rule->wr.created + rule->wr.hard_timeout * 1000
+ : LLONG_MAX);
+ idle_expire = (rule->wr.idle_timeout
+ && (rule->super || list_is_empty(&rule->list))
+ ? rule->used + rule->wr.idle_timeout * 1000
+ : LLONG_MAX);
+ expire = MIN(hard_expire, idle_expire);
+
+ now = time_msec();
+ if (now < expire) {
+ if (rule->installed && now >= rule->used + 5000) {
+ uninstall_idle_flow(wx, rule);
+ } else if (!rule->wr.cr.flow.wildcards) {
+ //XXX active_timeout(wx, rule);
+ }
+
+ return;
+ }
+
+ COVERAGE_INC(wx_expired);
+
+ /* Update stats. This code will be a no-op if the rule expired
+ * due to an idle timeout. */
+ if (rule->wr.cr.flow.wildcards) {
+ struct wx_rule *subrule, *next;
+ LIST_FOR_EACH_SAFE (subrule, next, struct wx_rule, list, &rule->list) {
+ wx_rule_remove(wx, subrule);
+ }
+ } else {
+ wx_rule_uninstall(wx, rule);
+ }
+
+ #if 0 /* XXX */
+ if (!wx_rule_is_hidden(rule)) {
+ send_flow_removed(wx, rule, now,
+ (now >= hard_expire
+ ? OFPRR_HARD_TIMEOUT : OFPRR_IDLE_TIMEOUT));
+ }
+ #endif
+ wx_rule_remove(wx, rule);
+ }
+
+ struct revalidate_cbdata {
+ struct wx *wx;
+ bool revalidate_all; /* Revalidate all exact-match rules? */
+ bool revalidate_subrules; /* Revalidate all exact-match subrules? */
+ struct tag_set revalidate_set; /* Set of tags to revalidate. */
+ };
+
+ static bool
+ revalidate_rule(struct wx *wx, struct wx_rule *rule)
+ {
+ const flow_t *flow = &rule->wr.cr.flow;
+
+ COVERAGE_INC(wx_revalidate_rule);
+ if (rule->super) {
+ struct wx_rule *super;
+ super = wx_rule_cast(classifier_lookup_wild(&wx->cls, flow));
+ if (!super) {
+ wx_rule_remove(wx, rule);
+ return false;
+ } else if (super != rule->super) {
+ COVERAGE_INC(wx_revalidate_moved);
+ list_remove(&rule->list);
+ list_push_back(&super->list, &rule->list);
+ rule->super = super;
+ rule->wr.hard_timeout = super->wr.hard_timeout;
+ rule->wr.idle_timeout = super->wr.idle_timeout;
+ rule->wr.created = super->wr.created;
+ rule->used = 0;
+ }
+ }
+
+ wx_rule_update_actions(wx, rule);
+ return true;
+ }
+
+ static void
+ revalidate_cb(struct cls_rule *sub_, void *cbdata_)
+ {
+ struct wx_rule *sub = wx_rule_cast(sub_);
+ struct revalidate_cbdata *cbdata = cbdata_;
+
+ if (cbdata->revalidate_all
+ || (cbdata->revalidate_subrules && sub->super)
+ || tag_set_intersects(&cbdata->revalidate_set, sub->tags)) {
+ revalidate_rule(cbdata->wx, sub);
+ }
+ }
+
+ static void
+ wx_run_one(struct wx *wx)
+ {
+ if (time_msec() >= wx->next_expiration) {
+ COVERAGE_INC(wx_expiration);
+ wx->next_expiration = time_msec() + 1000;
+ update_used(wx);
+
+ classifier_for_each(&wx->cls, CLS_INC_ALL, expire_rule, wx);
+
+ /* XXX account_checkpoint_cb */
+ }
+
+ if (wx->need_revalidate || !tag_set_is_empty(&wx->revalidate_set)) {
+ struct revalidate_cbdata cbdata;
+ cbdata.wx = wx;
+ cbdata.revalidate_all = wx->revalidate_all;
+ cbdata.revalidate_subrules = wx->need_revalidate;
+ cbdata.revalidate_set = wx->revalidate_set;
+ tag_set_init(&wx->revalidate_set);
+ COVERAGE_INC(wx_revalidate);
+ classifier_for_each(&wx->cls, CLS_INC_EXACT, revalidate_cb, &cbdata);
+ wx->need_revalidate = false;
+ }
+ }
+
+ static void
+ wx_run(void)
+ {
+ struct wx *wx;
+
+ LIST_FOR_EACH (wx, struct wx, list_node, &all_wx) {
+ wx_run_one(wx);
+ }
+ xf_run();
+ }
+
+ static void
+ wx_wait_one(struct wx *wx)
+ {
+ if (wx->need_revalidate || !tag_set_is_empty(&wx->revalidate_set)) {
+ poll_immediate_wake();
+ } else if (wx->next_expiration != LLONG_MAX) {
+ poll_timer_wait_until(wx->next_expiration);
+ }
+ }
+
+ static void
+ wx_wait(void)
+ {
+ struct wx *wx;
+
+ LIST_FOR_EACH (wx, struct wx, list_node, &all_wx) {
+ wx_wait_one(wx);
+ }
+ xf_wait();
+ }
+ \f
+ static int wx_flow_flush(struct wdp *);
+
+ static int
+ wx_enumerate(const struct wdp_class *wdp_class, struct svec *all_wdps)
+ {
+ struct svec names = SVEC_EMPTY_INITIALIZER;
+ int error = xf_enumerate_names(wdp_class->type, &names);
+ svec_move(all_wdps, &names);
+ return error;
+ }
+
+ static int
+ wx_open(const struct wdp_class *wdp_class, const char *name, bool create,
+ struct wdp **wdpp)
+ {
+ struct xfif *xfif;
+ int error;
+
+ error = (create
+ ? xfif_create_and_open(name, wdp_class->type, &xfif)
+ : xfif_open(name, wdp_class->type, &xfif));
+ if (!error) {
+ struct wx *wx;
+
+ wx = xzalloc(sizeof *wx);
+ list_push_back(&all_wx, &wx->list_node);
+ wdp_init(&wx->wdp, wdp_class, name, 0, 0);
+ wx->xfif = xfif;
+ classifier_init(&wx->cls);
+ wx->netdev_monitor = netdev_monitor_create();
+ port_array_init(&wx->ports);
+ shash_init(&wx->port_by_name);
+ wx->next_expiration = time_msec() + 1000;
+ tag_set_init(&wx->revalidate_set);
+
+ wx_port_init(wx);
+
+ wx->ofhooks = &default_ofhooks;
+ wx->aux = wx;
+ wx->ml = mac_learning_create();
+
+ *wdpp = &wx->wdp;
+ }
+
+ return error;
+ }
+
+ static void
+ wx_close(struct wdp *wdp)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ wx_flow_flush(wdp);
+ xfif_close(wx->xfif);
+ classifier_destroy(&wx->cls);
+ netdev_monitor_destroy(wx->netdev_monitor);
+ list_remove(&wx->list_node);
+ mac_learning_destroy(wx->ml);
+ free(wx);
+ }
+
+ static int
+ wx_get_all_names(const struct wdp *wdp, struct svec *all_names)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_get_all_names(wx->xfif, all_names);
+ }
+
+ static int
+ wx_destroy(struct wdp *wdp)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_delete(wx->xfif);
+ }
+
+ static int
+ wx_get_features(const struct wdp *wdp, struct ofpbuf **featuresp)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct ofp_switch_features *osf;
+ struct ofpbuf *buf;
+ unsigned int port_no;
+ struct wdp_port *port;
+
+ buf = ofpbuf_new(sizeof *osf);
+ osf = ofpbuf_put_zeros(buf, sizeof *osf);
+ osf->n_tables = 2;
+ osf->capabilities = htonl(OFPC_ARP_MATCH_IP);
+ osf->actions = htonl((1u << OFPAT_OUTPUT) |
+ (1u << OFPAT_SET_VLAN_VID) |
+ (1u << OFPAT_SET_VLAN_PCP) |
+ (1u << OFPAT_STRIP_VLAN) |
+ (1u << OFPAT_SET_DL_SRC) |
+ (1u << OFPAT_SET_DL_DST) |
+ (1u << OFPAT_SET_NW_SRC) |
+ (1u << OFPAT_SET_NW_DST) |
+ (1u << OFPAT_SET_NW_TOS) |
+ (1u << OFPAT_SET_TP_SRC) |
+ (1u << OFPAT_SET_TP_DST) |
+ (1u << OFPAT_ENQUEUE));
+
+ PORT_ARRAY_FOR_EACH (port, &wx->ports, port_no) {
+ hton_ofp_phy_port(ofpbuf_put(buf, &port->opp, sizeof port->opp));
+ }
+
+ *featuresp = buf;
+ return 0;
+ }
+
+ static void
+ count_subrules(struct cls_rule *cls_rule, void *n_subrules_)
+ {
+ struct wx_rule *rule = wx_rule_cast(cls_rule);
+ int *n_subrules = n_subrules_;
+
+ if (rule->super) {
+ (*n_subrules)++;
+ }
+ }
+
+ static int
+ wx_get_stats(const struct wdp *wdp, struct wdp_stats *stats)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct xflow_stats xflow_stats;
+ int error;
+
+ error = xfif_get_xf_stats(wx->xfif, &xflow_stats);
+ stats->max_ports = xflow_stats.max_ports;
+ return error;
+ }
+
+ static int
+ wx_get_table_stats(const struct wdp *wdp, struct ofpbuf *stats)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct xflow_stats xflow_stats;
+ struct ofp_table_stats *exact, *wild;
+ int n_subrules;
+
+ xfif_get_xf_stats(wx->xfif, &xflow_stats);
+ /* XXX should pass up errors, but there are no appropriate OpenFlow error
+ * codes. */
+
+ n_subrules = 0;
+ classifier_for_each(&wx->cls, CLS_INC_EXACT, count_subrules, &n_subrules);
+
+ exact = ofpbuf_put_zeros(stats, sizeof *exact);
+ exact->table_id = TABLEID_HASH;
+ strcpy(exact->name, "exact");
+ exact->wildcards = htonl(0);
+ exact->max_entries = htonl(MIN(WX_MAX_EXACT, xflow_stats.max_capacity));
+ exact->active_count = htonl(classifier_count_exact(&wx->cls) - n_subrules);
+ exact->lookup_count = htonll(xflow_stats.n_hit + xflow_stats.n_missed);
+ exact->matched_count = htonll(xflow_stats.n_hit);
+
+ wild = ofpbuf_put_zeros(stats, sizeof *exact);
+ wild->table_id = TABLEID_CLASSIFIER;
+ strcpy(wild->name, "classifier");
+ wild->wildcards = htonl(OVSFW_ALL);
+ wild->max_entries = htonl(WX_MAX_WILD);
+ wild->active_count = htonl(classifier_count_wild(&wx->cls));
+ wild->lookup_count = htonll(0); /* XXX */
+ wild->matched_count = htonll(0); /* XXX */
+
+ return 0;
+ }
+
+ static int
+ wx_get_drop_frags(const struct wdp *wdp, bool *drop_frags)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_get_drop_frags(wx->xfif, drop_frags);
+ }
+
+ static int
+ wx_set_drop_frags(struct wdp *wdp, bool drop_frags)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_set_drop_frags(wx->xfif, drop_frags);
+ }
+
+ static int
+ wx_port_add(struct wdp *wdp, const char *devname,
+ bool internal, uint16_t *port_no)
+ {
+ struct wx *wx = wx_cast(wdp);
+ uint16_t xflow_flags = internal ? XFLOW_PORT_INTERNAL : 0;
+ return xfif_port_add(wx->xfif, devname, xflow_flags, port_no);
+ }
+
+ static int
+ wx_port_del(struct wdp *wdp, uint16_t port_no)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_port_del(wx->xfif, port_no);
+ }
+
+ static int
+ wx_answer_port_query(const struct wdp_port *port, struct wdp_port *portp)
+ {
+ if (port) {
+ wdp_port_copy(portp, port);
+ return 0;
+ } else {
+ return ENOENT;
+ }
+ }
+
+ static int
+ wx_port_query_by_number(const struct wdp *wdp, uint16_t port_no,
+ struct wdp_port *portp)
+ {
+ struct wx *wx = wx_cast(wdp);
+ const struct wdp_port *port;
+
+ port = port_array_get(&wx->ports, ofp_port_to_xflow_port(port_no));
+ return wx_answer_port_query(port, portp);
+ }
+
+ static int
+ wx_port_query_by_name(const struct wdp *wdp, const char *devname,
+ struct wdp_port *portp)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ return wx_answer_port_query(shash_find_data(&wx->port_by_name, devname),
+ portp);
+ }
+
+ static int
+ wx_port_set_config(struct wdp *wdp, uint16_t port_no, uint32_t config)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct wdp_port *port;
+ uint32_t changes;
+
+ port = port_array_get(&wx->ports, ofp_port_to_xflow_port(port_no));
+ if (!port) {
+ return ENOENT;
+ }
+ changes = config ^ port->opp.config;
+
+ if (changes & OFPPC_PORT_DOWN) {
+ int error;
+ if (config & OFPPC_PORT_DOWN) {
+ error = netdev_turn_flags_off(port->netdev, NETDEV_UP, true);
+ } else {
+ error = netdev_turn_flags_on(port->netdev, NETDEV_UP, true);
+ }
+ if (!error) {
+ port->opp.config ^= OFPPC_PORT_DOWN;
+ }
+ }
+
+ #define REVALIDATE_BITS (OFPPC_NO_RECV | OFPPC_NO_RECV_STP | OFPPC_NO_FWD)
+ if (changes & REVALIDATE_BITS) {
+ COVERAGE_INC(wx_costly_flags);
+ port->opp.config ^= changes & REVALIDATE_BITS;
+ wx->need_revalidate = true;
+ }
+ #undef REVALIDATE_BITS
+
+ if (changes & OFPPC_NO_FLOOD) {
+ port->opp.config ^= OFPPC_NO_FLOOD;
+ wx_port_refresh_groups(wx);
+ }
+
+ if (changes & OFPPC_NO_PACKET_IN) {
+ port->opp.config ^= OFPPC_NO_PACKET_IN;
+ }
+
+ return 0;
+ }
+
+ static int
+ wx_port_list(const struct wdp *wdp, struct wdp_port **portsp, size_t *n_portsp)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct wdp_port *ports, *port;
+ unsigned int port_no;
+ size_t n_ports, i;
+
+ *n_portsp = n_ports = port_array_count(&wx->ports);
+ *portsp = ports = xmalloc(n_ports * sizeof *ports);
+ i = 0;
+ PORT_ARRAY_FOR_EACH (port, &wx->ports, port_no) {
+ wdp_port_copy(&ports[i++], port);
+ }
+ assert(i == n_ports);
+
+ return 0;
+ }
+
+ static int
+ wx_port_poll(struct wdp *wdp, wdp_port_poll_cb_func *cb, void *aux)
+ {
+ struct wx *wx = wx_cast(wdp);
+ char *devname;
+ int retval;
+ int error;
+
+ retval = 0;
+ while ((error = xfif_port_poll(wx->xfif, &devname)) != EAGAIN) {
+ wx_port_process_change(wx, error, devname, cb, aux);
+ if (error && error != ENOBUFS) {
+ retval = error;
+ }
+ }
+ while ((error = netdev_monitor_poll(wx->netdev_monitor,
+ &devname)) != EAGAIN) {
+ wx_port_process_change(wx, error, devname, cb, aux);
+ if (error && error != ENOBUFS) {
+ retval = error;
+ }
+ }
+ return retval;
+ }
+
+ static int
+ wx_port_poll_wait(const struct wdp *wdp)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ xfif_port_poll_wait(wx->xfif);
+ netdev_monitor_poll_wait(wx->netdev_monitor);
+ return 0;
+ }
+
+ static struct wdp_rule *
+ wx_flow_get(const struct wdp *wdp, const flow_t *flow, unsigned int include)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule;
+ int table_id;
+
+ table_id = flow->wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
+ if (!(include & (1u << table_id))) {
+ return NULL;
+ }
+
+ rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, flow));
+ return rule && !wx_rule_is_hidden(rule) ? &rule->wr : NULL;
+ }
+
+ static struct wdp_rule *
+ wx_flow_match(const struct wdp *wdp, const flow_t *flow)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule;
+
+ rule = wx_rule_cast(classifier_lookup(&wx->cls, flow));
+ if (rule) {
+ if (wx_rule_is_hidden(rule)) {
+ rule = rule->super;
+ }
+ return &rule->wr;
+ } else {
+ return NULL;
+ }
+ }
+
+ struct wx_for_each_thunk_aux {
+ wdp_flow_cb_func *client_callback;
+ void *client_aux;
+ };
+
+ static void
+ wx_for_each_thunk(struct cls_rule *cls_rule, void *aux_)
+ {
+ struct wx_for_each_thunk_aux *aux = aux_;
+ struct wx_rule *rule = wx_rule_cast(cls_rule);
+
+ if (!wx_rule_is_hidden(rule)) {
+ aux->client_callback(&rule->wr, aux->client_aux);
+ }
+ }
+
+ static void
+ wx_flow_for_each_match(const struct wdp *wdp, const flow_t *target,
+ unsigned int include,
+ wdp_flow_cb_func *client_callback, void *client_aux)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct wx_for_each_thunk_aux aux;
+ int cls_include;
+
+ cls_include = 0;
+ if (include & (1u << TABLEID_HASH)) {
+ cls_include |= CLS_INC_EXACT;
+ }
+ if (include & (1u << TABLEID_CLASSIFIER)) {
+ cls_include |= CLS_INC_WILD;
+ }
+
+ aux.client_callback = client_callback;
+ aux.client_aux = client_aux;
+ classifier_for_each_match(&wx->cls, target, cls_include,
+ wx_for_each_thunk, &aux);
+ }
+
+ /* Obtains statistic counters for 'rule' within 'wx' and stores them into
+ * '*stats'. If 'rule' is a wildcarded rule, the returned statistic include
+ * statistics for all of 'rule''s subrules. */
+ static void
+ query_stats(struct wx *wx, struct wx_rule *rule, struct wdp_flow_stats *stats)
+ {
+ struct wx_rule *subrule;
+ struct xflow_flow *xflow_flows;
+ size_t n_xflow_flows;
+
+ /* Start from historical data for 'rule' itself that are no longer tracked
+ * by the datapath. This counts, for example, subrules that have
+ * expired. */
+ stats->n_packets = rule->packet_count;
+ stats->n_bytes = rule->byte_count;
+ stats->inserted = rule->wr.created;
+ stats->used = LLONG_MIN;
+ stats->tcp_flags = 0;
+ stats->ip_tos = 0;
+
+ /* Prepare to ask the datapath for statistics on 'rule', or if it is
+ * wildcarded then on all of its subrules.
+ *
+ * Also, add any statistics that are not tracked by the datapath for each
+ * subrule. This includes, for example, statistics for packets that were
+ * executed "by hand" by ofproto via xfif_execute() but must be accounted
+ * to a flow. */
+ n_xflow_flows = rule->wr.cr.flow.wildcards ? list_size(&rule->list) : 1;
+ xflow_flows = xzalloc(n_xflow_flows * sizeof *xflow_flows);
+ if (rule->wr.cr.flow.wildcards) {
+ size_t i = 0;
+ LIST_FOR_EACH (subrule, struct wx_rule, list, &rule->list) {
+ xflow_key_from_flow(&xflow_flows[i++].key, &subrule->wr.cr.flow);
+ stats->n_packets += subrule->packet_count;
+ stats->n_bytes += subrule->byte_count;
+ }
+ } else {
+ xflow_key_from_flow(&xflow_flows[0].key, &rule->wr.cr.flow);
+ }
+
+ /* Fetch up-to-date statistics from the datapath and add them in. */
+ if (!xfif_flow_get_multiple(wx->xfif, xflow_flows, n_xflow_flows)) {
+ size_t i;
+ for (i = 0; i < n_xflow_flows; i++) {
+ struct xflow_flow *xflow_flow = &xflow_flows[i];
+ long long int used;
+
+ stats->n_packets += xflow_flow->stats.n_packets;
+ stats->n_bytes += xflow_flow->stats.n_bytes;
+ used = xflow_flow_stats_to_msec(&xflow_flow->stats);
+ if (used > stats->used) {
+ stats->used = used;
+ }
+ stats->tcp_flags |= xflow_flow->stats.tcp_flags;
+ }
+ }
+ free(xflow_flows);
+ }
+
+ static int
+ wx_flow_get_stats(const struct wdp *wdp,
+ const struct wdp_rule *wdp_rule,
+ struct wdp_flow_stats *stats)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
+
+ query_stats(wx, rule, stats);
+ return 0;
+ }
+
+ static bool
+ wx_flow_overlaps(const struct wdp *wdp, const flow_t *flow)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ /* XXX overlap with a subrule? */
+ return classifier_rule_overlaps(&wx->cls, flow);
+ }
+
+ static int
+ wx_flow_put(struct wdp *wdp, const struct wdp_flow_put *put,
+ struct wdp_flow_stats *old_stats, struct wdp_rule **rulep)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule;
+ uint8_t ofp_table_id;
+
+ ofp_table_id = put->flow->wildcards ? TABLEID_CLASSIFIER : TABLEID_HASH;
+ if (put->ofp_table_id != 0xff && put->ofp_table_id != ofp_table_id) {
+ return EINVAL;
+ }
+
+ rule = wx_rule_cast(classifier_find_rule_exactly(&wx->cls, put->flow));
+ if (rule && wx_rule_is_hidden(rule)) {
+ rule = NULL;
+ }
+
+ if (rule) {
+ if (!(put->flags & WDP_PUT_MODIFY)) {
+ return EEXIST;
+ }
+ } else {
+ if (!(put->flags & WDP_PUT_CREATE)) {
+ return EINVAL;
+ }
+ if ((put->flow->wildcards
+ ? classifier_count_wild(&wx->cls) >= WX_MAX_WILD
+ : classifier_count_exact(&wx->cls) >= WX_MAX_EXACT)) {
+ /* XXX subrules should not count against exact-match limit */
+ return ENOBUFS;
+ }
+ }
+
+ rule = wx_rule_create(NULL, put->actions, put->n_actions,
+ put->idle_timeout, put->hard_timeout);
+ cls_rule_from_flow(put->flow, &rule->wr.cr);
+ rule->wr.ofp_table_id = ofp_table_id;
+ wx_rule_insert(wx, rule, NULL, 0);
+
+ if (old_stats) {
+ /* XXX */
+ memset(old_stats, 0, sizeof *old_stats);
+ }
+ if (rulep) {
+ *rulep = &rule->wr;
+ }
+
+ return 0;
+ }
+
+ static int
+ wx_flow_delete(struct wdp *wdp, struct wdp_rule *wdp_rule,
+ struct wdp_flow_stats *final_stats)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
+
+ wx_rule_remove(wx, rule);
+ if (final_stats) {
+ memset(final_stats, 0, sizeof *final_stats); /* XXX */
+ }
+ return 0;
+ }
+
+ static void
+ wx_flush_rule(struct cls_rule *cls_rule, void *wx_)
+ {
+ struct wx_rule *rule = wx_rule_cast(cls_rule);
+ struct wx *wx = wx_;
+
+ /* Mark the flow as not installed, even though it might really be
+ * installed, so that wx_rule_remove() doesn't bother trying to uninstall
+ * it. There is no point in uninstalling it individually since we are
+ * about to blow away all the flows with xfif_flow_flush(). */
+ rule->installed = false;
+
+ wx_rule_remove(wx, rule);
+ }
+
+ static int
+ wx_flow_flush(struct wdp *wdp)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ COVERAGE_INC(wx_flow_flush);
+ classifier_for_each(&wx->cls, CLS_INC_ALL, wx_flush_rule, wx);
+ xfif_flow_flush(wx->xfif);
+ return 0;
+ }
+
+ static int
+ wx_execute(struct wdp *wdp, uint16_t in_port,
+ const union ofp_action actions[], int n_actions,
+ const struct ofpbuf *packet)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct xflow_actions xflow_actions;
+ flow_t flow;
+ int error;
+
+ flow_extract((struct ofpbuf *) packet, 0, in_port, &flow);
+ error = wx_xlate_actions(wx, actions, n_actions, &flow, packet,
+ NULL, &xflow_actions, NULL);
+ if (error) {
+ return error;
+ }
+ xfif_execute(wx->xfif, ofp_port_to_xflow_port(in_port),
+ xflow_actions.actions, xflow_actions.n_actions, packet);
+ return 0;
+ }
+
+ static int
+ wx_flow_inject(struct wdp *wdp, struct wdp_rule *wdp_rule,
+ uint16_t in_port, const struct ofpbuf *packet)
+ {
+ struct wx_rule *rule = wx_rule_cast(&wdp_rule->cr);
+ int error;
+
+ error = wx_execute(wdp, in_port, rule->wr.actions, rule->wr.n_actions,
+ packet);
+ if (!error) {
+ rule->packet_count++;
+ rule->byte_count += packet->size;
+ rule->used = time_msec();
+ }
+ return error;
+ }
+
+ static int
+ wx_recv_get_mask(const struct wdp *wdp, int *listen_mask)
+ {
+ struct wx *wx = wx_cast(wdp);
+ int xflow_listen_mask;
+ int error;
+
+ error = xfif_recv_get_mask(wx->xfif, &xflow_listen_mask);
+ if (!error) {
+ *listen_mask = 0;
+ if (xflow_listen_mask & XFLOWL_MISS) {
+ *listen_mask |= 1 << WDP_CHAN_MISS;
+ }
+ if (xflow_listen_mask & XFLOWL_ACTION) {
+ *listen_mask |= 1 << WDP_CHAN_ACTION;
+ }
+ if (xflow_listen_mask & XFLOWL_SFLOW) {
+ *listen_mask |= 1 << WDP_CHAN_SFLOW;
+ }
+ }
+ return error;
+ }
+
+ static int
+ wx_recv_set_mask(struct wdp *wdp, int listen_mask)
+ {
+ struct wx *wx = wx_cast(wdp);
+ int xflow_listen_mask;
+
+ xflow_listen_mask = 0;
+ if (listen_mask & (1 << WDP_CHAN_MISS)) {
+ xflow_listen_mask |= XFLOWL_MISS;
+ }
+ if (listen_mask & (1 << WDP_CHAN_ACTION)) {
+ xflow_listen_mask |= XFLOWL_ACTION;
+ }
+ if (listen_mask & (1 << WDP_CHAN_SFLOW)) {
+ xflow_listen_mask |= XFLOWL_SFLOW;
+ }
+
+ return xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
+ }
+
+ static int
+ wx_get_sflow_probability(const struct wdp *wdp, uint32_t *probability)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_get_sflow_probability(wx->xfif, probability);
+ }
+
+ static int
+ wx_set_sflow_probability(struct wdp *wdp, uint32_t probability)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ return xfif_set_sflow_probability(wx->xfif, probability);
+ }
+
+ static int
+ wx_translate_xflow_msg(struct xflow_msg *msg, struct ofpbuf *payload,
+ struct wdp_packet *packet)
+ {
+ packet->in_port = xflow_port_to_ofp_port(msg->port);
+ packet->send_len = 0;
+ packet->tun_id = 0;
+
+ switch (msg->type) {
+ case _XFLOWL_MISS_NR:
+ packet->channel = WDP_CHAN_MISS;
+ packet->payload = payload;
+ packet->tun_id = msg->arg;
+ return 0;
+
+ case _XFLOWL_ACTION_NR:
+ packet->channel = WDP_CHAN_ACTION;
+ packet->payload = payload;
+ packet->send_len = msg->arg;
+ return 0;
+
+ case _XFLOWL_SFLOW_NR:
+ /* XXX */
+ ofpbuf_delete(payload);
+ return ENOSYS;
+
+ default:
+ VLOG_WARN_RL(&rl, "received XFLOW message of unexpected type %"PRIu32,
+ msg->type);
+ ofpbuf_delete(payload);
+ return ENOSYS;
+ }
+ }
+
+ static const uint8_t *
+ get_local_mac(const struct wx *wx)
+ {
+ const struct wdp_port *port = port_array_get(&wx->ports, XFLOWP_LOCAL);
+ return port ? port->opp.hw_addr : NULL;
+ }
+
+ /* Returns true if 'packet' is a DHCP reply to the local port. Such a reply
+ * should be sent to the local port regardless of the flow table.
+ *
+ * We only need this, strictly speaking, when in-band control is turned on. */
+ static bool
+ wx_is_local_dhcp_reply(const struct wx *wx,
+ const flow_t *flow, const struct ofpbuf *packet)
+ {
+ if (flow->dl_type == htons(ETH_TYPE_IP)
+ && flow->nw_proto == IP_TYPE_UDP
+ && flow->tp_src == htons(DHCP_SERVER_PORT)
+ && flow->tp_dst == htons(DHCP_CLIENT_PORT)
+ && packet->l7)
+ {
+ const uint8_t *local_mac = get_local_mac(wx);
+ struct dhcp_header *dhcp = ofpbuf_at(
+ packet, (char *)packet->l7 - (char *)packet->data, sizeof *dhcp);
+ return dhcp && local_mac && eth_addr_equals(dhcp->chaddr, local_mac);
+ }
+
+ return false;
+ }
+
+ static bool
+ wx_explode_rule(struct wx *wx, struct xflow_msg *msg, struct ofpbuf *payload)
+ {
+ struct wx_rule *rule;
+ flow_t flow;
+
+ flow_extract(payload, 0, xflow_port_to_ofp_port(msg->port), &flow);
+
+ if (wx_is_local_dhcp_reply(wx, &flow, payload)) {
+ union xflow_action action;
+
+ memset(&action, 0, sizeof(action));
+ action.output.type = XFLOWAT_OUTPUT;
+ action.output.port = XFLOWP_LOCAL;
+ xfif_execute(wx->xfif, msg->port, &action, 1, payload);
+ }
+
+ rule = wx_rule_lookup_valid(wx, &flow);
+ if (!rule) {
+ return false;
+ }
+
+ if (rule->wr.cr.flow.wildcards) {
+ rule = wx_rule_create_subrule(wx, rule, &flow);
+ wx_rule_make_actions(wx, rule, payload);
+ } else {
+ if (!rule->may_install) {
+ /* The rule is not installable, that is, we need to process every
+ * packet, so process the current packet and set its actions into
+ * 'subrule'. */
+ wx_rule_make_actions(wx, rule, payload);
+ } else {
+ /* XXX revalidate rule if it needs it */
+ }
+ }
+
+ wx_rule_execute(wx, rule, payload, &flow);
+ wx_rule_reinstall(wx, rule);
+
+ return true;
+ }
+
+ static int
+ wx_recv(struct wdp *wdp, struct wdp_packet *packet)
+ {
+ struct wx *wx = wx_cast(wdp);
+ int i;
+
+ /* XXX need to avoid 50*50 potential cost for caller. */
+ for (i = 0; i < 50; i++) {
+ struct xflow_msg *msg;
+ struct ofpbuf *buf;
+ int error;
+
+ error = xfif_recv(wx->xfif, &buf);
+ if (error) {
+ return error;
+ }
+
+ msg = ofpbuf_pull(buf, sizeof *msg);
+ if (msg->type != _XFLOWL_MISS_NR || !wx_explode_rule(wx, msg, buf)) {
+ return wx_translate_xflow_msg(msg, buf, packet);
+ }
+ ofpbuf_delete(buf);
+ }
+ return EAGAIN;
+ }
+
+ static void
+ wx_recv_purge_queue__(struct wx *wx, int max, int xflow_listen_mask,
+ int *errorp)
+ {
+ int error;
+
+ error = xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
+ if (!error) {
+ struct ofpbuf *buf;
+
+ while (max > 0 && (error = xfif_recv(wx->xfif, &buf)) == 0) {
+ ofpbuf_delete(buf);
+ max--;
+ }
+ }
+ if (error && error != EAGAIN) {
+ *errorp = error;
+ }
+ }
+
+ static int
+ wx_recv_purge(struct wdp *wdp)
+ {
+ struct wx *wx = wx_cast(wdp);
+ struct xflow_stats xflow_stats;
+ int xflow_listen_mask;
+ int retval, error;
+
+ xfif_get_xf_stats(wx->xfif, &xflow_stats);
+
+ error = xfif_recv_get_mask(wx->xfif, &xflow_listen_mask);
+ if (error || !(xflow_listen_mask & XFLOWL_ALL)) {
+ return error;
+ }
+
+ if (xflow_listen_mask & XFLOWL_MISS) {
+ wx_recv_purge_queue__(wx, xflow_stats.max_miss_queue, XFLOWL_MISS,
+ &error);
+ }
+ if (xflow_listen_mask & XFLOWL_ACTION) {
+ wx_recv_purge_queue__(wx, xflow_stats.max_action_queue, XFLOWL_ACTION,
+ &error);
+ }
+ if (xflow_listen_mask & XFLOWL_SFLOW) {
+ wx_recv_purge_queue__(wx, xflow_stats.max_sflow_queue, XFLOWL_SFLOW,
+ &error);
+ }
+
+ retval = xfif_recv_set_mask(wx->xfif, xflow_listen_mask);
+ return retval ? retval : error;
+ }
+
+
+ static void
+ wx_recv_wait(struct wdp *wdp)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ xfif_recv_wait(wx->xfif);
+ }
+
+ static int
+ wx_set_ofhooks(struct wdp *wdp, const struct ofhooks *ofhooks, void *aux)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ if (wx->ofhooks == &default_ofhooks) {
+ mac_learning_destroy(wx->ml);
+ wx->ml = NULL;
+ }
+
+ wx->ofhooks = ofhooks;
+ wx->aux = aux;
+ return 0;
+ }
+
+ static void
+ wx_revalidate(struct wdp *wdp, tag_type tag)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ tag_set_add(&wx->revalidate_set, tag);
+ }
+
+ static void
+ wx_revalidate_all(struct wdp *wdp)
+ {
+ struct wx *wx = wx_cast(wdp);
+
+ wx->revalidate_all = true;
+ }
+ \f
+ static void wx_port_update(struct wx *, const char *devname,
+ wdp_port_poll_cb_func *cb, void *aux);
+ static void wx_port_reinit(struct wx *, wdp_port_poll_cb_func *cb, void *aux);
+
+ static void
+ wx_port_process_change(struct wx *wx, int error, char *devname,
+ wdp_port_poll_cb_func *cb, void *aux)
+ {
+ if (error == ENOBUFS) {
+ wx_port_reinit(wx, cb, aux);
+ } else if (!error) {
+ wx_port_update(wx, devname, cb, aux);
+ free(devname);
+ }
+ }
+
+ static size_t
+ wx_port_refresh_group(struct wx *wx, unsigned int group)
+ {
+ uint16_t *ports;
+ size_t n_ports;
+ struct wdp_port *port;
+ unsigned int port_no;
+
+ assert(group == WX_GROUP_ALL || group == WX_GROUP_FLOOD);
+
+ ports = xmalloc(port_array_count(&wx->ports) * sizeof *ports);
+ n_ports = 0;
+ PORT_ARRAY_FOR_EACH (port, &wx->ports, port_no) {
+ if (group == WX_GROUP_ALL || !(port->opp.config & OFPPC_NO_FLOOD)) {
+ ports[n_ports++] = port_no;
+ }
+ }
+ xfif_port_group_set(wx->xfif, group, ports, n_ports);
+ free(ports);
+
+ return n_ports;
+ }
+
+ static void
+ wx_port_refresh_groups(struct wx *wx)
+ {
+ wx_port_refresh_group(wx, WX_GROUP_FLOOD);
+ wx_port_refresh_group(wx, WX_GROUP_ALL);
+ }
+
+ static void
+ wx_port_reinit(struct wx *wx, wdp_port_poll_cb_func *cb, void *aux)
+ {
+ struct svec devnames;
+ struct wdp_port *wdp_port;
+ unsigned int port_no;
+ struct xflow_port *xflow_ports;
+ size_t n_xflow_ports;
+ size_t i;
+
+ svec_init(&devnames);
+ PORT_ARRAY_FOR_EACH (wdp_port, &wx->ports, port_no) {
+ svec_add (&devnames, (char *) wdp_port->opp.name);
+ }
+ xfif_port_list(wx->xfif, &xflow_ports, &n_xflow_ports);
+ for (i = 0; i < n_xflow_ports; i++) {
+ svec_add(&devnames, xflow_ports[i].devname);
+ }
+ free(xflow_ports);
+
+ svec_sort_unique(&devnames);
+ for (i = 0; i < devnames.n; i++) {
+ wx_port_update(wx, devnames.names[i], cb, aux);
+ }
+ svec_destroy(&devnames);
+
+ wx_port_refresh_groups(wx);
+ }
+
+ static struct wdp_port *
+ make_wdp_port(const struct xflow_port *xflow_port)
+ {
+ struct netdev_options netdev_options;
+ enum netdev_flags flags;
+ struct wdp_port *wdp_port;
+ struct netdev *netdev;
+ bool carrier;
+ int error;
+
+ memset(&netdev_options, 0, sizeof netdev_options);
+ netdev_options.name = xflow_port->devname;
+ netdev_options.ethertype = NETDEV_ETH_TYPE_NONE;
+
+ error = netdev_open(&netdev_options, &netdev);
+ if (error) {
+ VLOG_WARN_RL(&rl, "ignoring port %s (%"PRIu16") because netdev %s "
+ "cannot be opened (%s)",
+ xflow_port->devname, xflow_port->port,
+ xflow_port->devname, strerror(error));
+ return NULL;
+ }
+
+ wdp_port = xmalloc(sizeof *wdp_port);
+ wdp_port->netdev = netdev;
+ wdp_port->opp.port_no = xflow_port_to_ofp_port(xflow_port->port);
+ netdev_get_etheraddr(netdev, wdp_port->opp.hw_addr);
+ strncpy((char *) wdp_port->opp.name, xflow_port->devname,
+ sizeof wdp_port->opp.name);
+ wdp_port->opp.name[sizeof wdp_port->opp.name - 1] = '\0';
+
+ netdev_get_flags(netdev, &flags);
+ wdp_port->opp.config = flags & NETDEV_UP ? 0 : OFPPC_PORT_DOWN;
+
+ netdev_get_carrier(netdev, &carrier);
+ wdp_port->opp.state = carrier ? 0 : OFPPS_LINK_DOWN;
+
+ netdev_get_features(netdev,
+ &wdp_port->opp.curr, &wdp_port->opp.advertised,
+ &wdp_port->opp.supported, &wdp_port->opp.peer);
+
+ wdp_port->devname = xstrdup(xflow_port->devname);
+ wdp_port->internal = (xflow_port->flags & XFLOW_PORT_INTERNAL) != 0;
+ return wdp_port;
+ }
+
+ static bool
+ wx_port_conflicts(const struct wx *wx, const struct xflow_port *xflow_port)
+ {
+ if (port_array_get(&wx->ports, xflow_port->port)) {
+ VLOG_WARN_RL(&rl, "ignoring duplicate port %"PRIu16" in datapath",
+ xflow_port->port);
+ return true;
+ } else if (shash_find(&wx->port_by_name, xflow_port->devname)) {
+ VLOG_WARN_RL(&rl, "ignoring duplicate device %s in datapath",
+ xflow_port->devname);
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ static int
+ wdp_port_equal(const struct wdp_port *a_, const struct wdp_port *b_)
+ {
+ const struct ofp_phy_port *a = &a_->opp;
+ const struct ofp_phy_port *b = &b_->opp;
+
+ BUILD_ASSERT_DECL(sizeof *a == 48); /* Detect ofp_phy_port changes. */
+ return (a->port_no == b->port_no
+ && !memcmp(a->hw_addr, b->hw_addr, sizeof a->hw_addr)
+ && !strcmp((char *) a->name, (char *) b->name)
+ && a->state == b->state
+ && a->config == b->config
+ && a->curr == b->curr
+ && a->advertised == b->advertised
+ && a->supported == b->supported
+ && a->peer == b->peer);
+ }
+
+ static void
+ wx_port_install(struct wx *wx, struct wdp_port *wdp_port)
+ {
+ uint16_t xflow_port = ofp_port_to_xflow_port(wdp_port->opp.port_no);
+ const char *netdev_name = (const char *) wdp_port->opp.name;
+
+ netdev_monitor_add(wx->netdev_monitor, wdp_port->netdev);
+ port_array_set(&wx->ports, xflow_port, wdp_port);
+ shash_add(&wx->port_by_name, netdev_name, wdp_port);
+ }
+
+ static void
+ wx_port_remove(struct wx *wx, struct wdp_port *wdp_port)
+ {
+ uint16_t xflow_port = ofp_port_to_xflow_port(wdp_port->opp.port_no);
+
+ netdev_monitor_remove(wx->netdev_monitor, wdp_port->netdev);
+ port_array_delete(&wx->ports, xflow_port);
+ shash_delete(&wx->port_by_name,
+ shash_find(&wx->port_by_name, (char *) wdp_port->opp.name));
+ }
+
+ static void
+ wx_port_free(struct wdp_port *wdp_port)
+ {
+ if (wdp_port) {
+ netdev_close(wdp_port->netdev);
+ free(wdp_port);
+ }
+ }
+
+ static void
+ wx_port_update(struct wx *wx, const char *devname,
+ wdp_port_poll_cb_func *cb, void *aux)
+ {
+ struct xflow_port xflow_port;
+ struct wdp_port *old_wdp_port;
+ struct wdp_port *new_wdp_port;
+ int error;
+
+ COVERAGE_INC(wx_update_port);
+
+ /* Query the datapath for port information. */
+ error = xfif_port_query_by_name(wx->xfif, devname, &xflow_port);
+
+ /* Find the old wdp_port. */
+ old_wdp_port = shash_find_data(&wx->port_by_name, devname);
+ if (!error) {
+ if (!old_wdp_port) {
+ /* There's no port named 'devname' but there might be a port with
+ * the same port number. This could happen if a port is deleted
+ * and then a new one added in its place very quickly, or if a port
+ * is renamed. In the former case we want to send an OFPPR_DELETE
+ * and an OFPPR_ADD, and in the latter case we want to send a
+ * single OFPPR_MODIFY. We can distinguish the cases by comparing
+ * the old port's ifindex against the new port, or perhaps less
+ * reliably but more portably by comparing the old port's MAC
+ * against the new port's MAC. However, this code isn't that smart
+ * and always sends an OFPPR_MODIFY (XXX). */
+ old_wdp_port = port_array_get(&wx->ports, xflow_port.port);
+ }
+ } else if (error != ENOENT && error != ENODEV) {
+ VLOG_WARN_RL(&rl, "xfif_port_query_by_name returned unexpected error "
+ "%s", strerror(error));
+ return;
+ }
+
+ /* Create a new wdp_port. */
+ new_wdp_port = !error ? make_wdp_port(&xflow_port) : NULL;
+
+ /* Eliminate a few pathological cases. */
+ if (!old_wdp_port && !new_wdp_port) {
+ return;
+ } else if (old_wdp_port && new_wdp_port) {
+ /* Most of the 'config' bits are OpenFlow soft state, but
+ * OFPPC_PORT_DOWN is maintained by the kernel. So transfer the
+ * OpenFlow bits from old_wdp_port. (make_wdp_port() only sets
+ * OFPPC_PORT_DOWN and leaves the other bits 0.) */
+ new_wdp_port->opp.config |= old_wdp_port->opp.config & ~OFPPC_PORT_DOWN;
+
+ if (wdp_port_equal(old_wdp_port, new_wdp_port)) {
+ /* False alarm--no change. */
+ wx_port_free(new_wdp_port);
+ return;
+ }
+ }
+
+ /* Now deal with the normal cases. */
+ if (old_wdp_port) {
+ wx_port_remove(wx, old_wdp_port);
+ }
+ if (new_wdp_port) {
+ wx_port_install(wx, new_wdp_port);
+ }
+
+ /* Call back. */
+ if (!old_wdp_port) {
+ (*cb)(&new_wdp_port->opp, OFPPR_ADD, aux);
+ } else if (!new_wdp_port) {
+ (*cb)(&old_wdp_port->opp, OFPPR_DELETE, aux);
+ } else {
+ (*cb)(&new_wdp_port->opp, OFPPR_MODIFY, aux);
+ }
+
+ /* Update port groups. */
+ wx_port_refresh_groups(wx);
+
+ /* Clean up. */
+ wx_port_free(old_wdp_port);
+ }
+
+ static int
+ wx_port_init(struct wx *wx)
+ {
+ struct xflow_port *ports;
+ size_t n_ports;
+ size_t i;
+ int error;
+
+ error = xfif_port_list(wx->xfif, &ports, &n_ports);
+ if (error) {
+ return error;
+ }
+
+ for (i = 0; i < n_ports; i++) {
+ const struct xflow_port *xflow_port = &ports[i];
+ if (!wx_port_conflicts(wx, xflow_port)) {
+ struct wdp_port *wdp_port = make_wdp_port(xflow_port);
+ if (wdp_port) {
+ wx_port_install(wx, wdp_port);
+ }
+ }
+ }
+ free(ports);
+ wx_port_refresh_groups(wx);
+ return 0;
+ }
+ \f
+ void
+ wdp_xflow_register(void)
+ {
+ static const struct wdp_class wdp_xflow_class = {
+ NULL, /* name */
+ wx_run,
+ wx_wait,
+ wx_enumerate,
+ wx_open,
+ wx_close,
+ wx_get_all_names,
+ wx_destroy,
+ wx_get_features,
+ wx_get_stats,
+ wx_get_table_stats,
+ wx_get_drop_frags,
+ wx_set_drop_frags,
+ wx_port_add,
+ wx_port_del,
+ wx_port_query_by_number,
+ wx_port_query_by_name,
+ wx_port_list,
+ wx_port_set_config,
+ wx_port_poll,
+ wx_port_poll_wait,
+ wx_flow_get,
+ wx_flow_match,
+ wx_flow_for_each_match,
+ wx_flow_get_stats,
+ wx_flow_overlaps,
+ wx_flow_put,
+ wx_flow_delete,
+ wx_flow_flush,
+ wx_flow_inject,
+ wx_execute,
+ wx_recv_get_mask,
+ wx_recv_set_mask,
+ wx_get_sflow_probability,
+ wx_set_sflow_probability,
+ wx_recv,
+ wx_recv_purge,
+ wx_recv_wait,
+ wx_set_ofhooks,
+ wx_revalidate,
+ wx_revalidate_all,
+ };
+
+ static bool inited = false;
+
+ struct svec types;
+ const char *type;
+ bool registered;
+ int i;
+
+ if (inited) {
+ return;
+ }
+ inited = true;
+
+ svec_init(&types);
+ xf_enumerate_types(&types);
+
+ registered = false;
+ SVEC_FOR_EACH (i, type, &types) {
+ struct wdp_class *class;
+
+ class = xmalloc(sizeof *class);
+ *class = wdp_xflow_class;
+ class->type = xstrdup(type);
+ if (registered) {
+ class->run = NULL;
+ class->wait = NULL;
+ }
+ if (!wdp_register_provider(class)) {
+ registered = true;
+ }
+ }
+
+ svec_destroy(&types);
+ }
+ \f
+ static bool
+ default_normal_ofhook_cb(const flow_t *flow, const struct ofpbuf *packet,
+ struct xflow_actions *actions, tag_type *tags,
+ uint16_t *nf_output_iface, void *wx_)
+ {
+ struct wx *wx = wx_;
+ int out_port;
+
+ /* Drop frames for reserved multicast addresses. */
+ if (eth_addr_is_reserved(flow->dl_dst)) {
+ return true;
+ }
+
+ /* Learn source MAC (but don't try to learn from revalidation). */
+ if (packet != NULL) {
+ tag_type rev_tag = mac_learning_learn(wx->ml, flow->dl_src,
+ 0, flow->in_port,
+ GRAT_ARP_LOCK_NONE);
+ if (rev_tag) {
+ /* The log messages here could actually be useful in debugging,
+ * so keep the rate limit relatively high. */
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(30, 300);
+ VLOG_DBG_RL(&rl, "learned that "ETH_ADDR_FMT" is on port %"PRIu16,
+ ETH_ADDR_ARGS(flow->dl_src), flow->in_port);
+ tag_set_add(&wx->revalidate_set, rev_tag);
+ }
+ }
+
+ /* Determine output port. */
+ out_port = mac_learning_lookup_tag(wx->ml, flow->dl_dst, 0, tags,
+ NULL);
+ if (out_port < 0) {
+ add_output_group_action(actions, WX_GROUP_FLOOD, nf_output_iface);
+ } else if (out_port != flow->in_port) {
+ xflow_actions_add(actions, XFLOWAT_OUTPUT)->output.port = out_port;
+ *nf_output_iface = out_port;
+ } else {
+ /* Drop. */
+ }
+
+ return true;
+ }
+
+ static const struct ofhooks default_ofhooks = {
+ NULL,
+ default_normal_ofhook_cb,
+ NULL,
+ NULL
+ };
#include "command-line.h"
#include "compiler.h"
#include "dirs.h"
- #include "dpif.h"
+ #include "dynamic-string.h"
+ #include "netdev.h"
#include "netlink.h"
- #include "odp-util.h"
-#include "xflow-util.h"
+#include "ofp-parse.h"
#include "ofp-print.h"
#include "ofp-util.h"
#include "ofpbuf.h"
#include "util.h"
#include "vconn.h"
#include "vlog.h"
+ #include "xfif.h"
++#include "xflow-util.h"
#include "xtoxll.h"
VLOG_DEFINE_THIS_MODULE(ofctl)
uint16_t priority, idle_timeout, hard_timeout;
uint64_t cookie;
struct ofp_match match;
+ uint8_t table_idx;
- /* Parse and send. str_to_flow() will expand and reallocate the data in
- * 'buffer', so we can't keep pointers to across the str_to_flow() call. */
+ /* Parse and send. parse_ofp_str() will expand and reallocate the
+ * data in 'buffer', so we can't keep pointers to across the
+ * parse_ofp_str() call. */
make_openflow(sizeof *ofm, OFPT_FLOW_MOD, &buffer);
- str_to_flow(argv[2], &match, buffer,
- &table_idx, NULL, &priority, &idle_timeout, &hard_timeout,
- &cookie);
+ parse_ofp_str(argv[2], &match, buffer,
- NULL, NULL, &priority, &idle_timeout, &hard_timeout,
++ &table_idx, NULL, &priority, &idle_timeout, &hard_timeout,
+ &cookie);
ofm = buffer->data;
ofm->match = match;
ofm->command = htons(OFPFC_ADD);
continue;
}
- /* Parse and send. str_to_flow() will expand and reallocate the data
- * in 'buffer', so we can't keep pointers to across the str_to_flow()
- * call. */
+ /* Parse and send. parse_ofp_str() will expand and reallocate
+ * the data in 'buffer', so we can't keep pointers to across the
+ * parse_ofp_str() call. */
make_openflow(sizeof *ofm, OFPT_FLOW_MOD, &buffer);
- parse_ofp_str(line, &match, buffer,
- NULL, NULL, &priority, &idle_timeout, &hard_timeout,
- &cookie);
- str_to_flow(line, &match, buffer,
- &table_idx, NULL, &priority, &idle_timeout, &hard_timeout,
- &cookie);
++ parse_ofp_str(line, &match, buffer, &table_idx, NULL, &priority,
++ &idle_timeout, &hard_timeout, &cookie);
ofm = buffer->data;
ofm->match = match;
ofm->command = htons(OFPFC_ADD);
struct ofpbuf *buffer;
struct ofp_flow_mod *ofm;
struct ofp_match match;
+ uint8_t table_idx;
- /* Parse and send. str_to_flow() will expand and reallocate the data in
- * 'buffer', so we can't keep pointers to across the str_to_flow() call. */
+ /* Parse and send. parse_ofp_str() will expand and reallocate the
+ * data in 'buffer', so we can't keep pointers to across the
+ * parse_ofp_str() call. */
make_openflow(sizeof *ofm, OFPT_FLOW_MOD, &buffer);
- str_to_flow(argv[2], &match, buffer,
- &table_idx, NULL, &priority, &idle_timeout, &hard_timeout,
- &cookie);
+ parse_ofp_str(argv[2], &match, buffer,
- NULL, NULL, &priority, &idle_timeout, &hard_timeout,
++ &table_idx, NULL, &priority, &idle_timeout, &hard_timeout,
+ &cookie);
ofm = buffer->data;
ofm->match = match;
if (strict) {
/* Parse and send. */
ofm = make_openflow(sizeof *ofm, OFPT_FLOW_MOD, &buffer);
- parse_ofp_str(argc > 2 ? argv[2] : "", &ofm->match, NULL, NULL,
- str_to_flow(argc > 2 ? argv[2] : "", &ofm->match, NULL, NULL,
- &out_port, &priority, NULL, NULL, NULL);
++ parse_ofp_str(argc > 2 ? argv[2] : "", &ofm->match, NULL, &table_idx,
+ &out_port, &priority, NULL, NULL, NULL);
if (strict) {
ofm->command = htons(OFPFC_DELETE_STRICT);
} else {
The mandatory \fIdatapath\fR argument argument specifies the local datapath
to relay. It takes one of the following forms:
.
- .so lib/dpif.man
+ .so lib/xfif.man
.
.PP
-The optional \fIcontroller\fR arguments specify how to connect to
-the OpenFlow controller. It takes one of the following forms:
+The optional \fIcontroller\fR arguments specify how to connect to the
+OpenFlow controller or controllers. Each takes one of the following
+forms:
.
.so lib/vconn-active.man
.
}
/* Local vconns. */
- dp_parse_name(argv[0], &s->dp_name, &s->dp_type);
+ xf_parse_name(argv[0], &s->dp_name, &s->dp_type);
- /* Controllers. */
- s->n_controllers = argc > 1 ? argc - 1 : 1;
+ /* Figure out controller names. */
+ if (!controllers.n) {
+ svec_add_nocopy(&controllers,
+ xasprintf("punix:%s/%s.mgmt", ovs_rundir, s->dp_name));
+ }
+ for (i = 1; i < argc; i++) {
+ svec_add(&controllers, argv[i]);
+ }
+ if (argc < 2) {
+ svec_add(&controllers, "discover");
+ }
+
+ /* Set up controllers. */
+ s->n_controllers = controllers.n;
s->controllers = xmalloc(s->n_controllers * sizeof *s->controllers);
if (argc > 1) {
size_t i;
}
}
svec_destroy(&bridge_names);
- svec_destroy(&dpif_names);
- svec_destroy(&dpif_types);
+ svec_destroy(&xfif_names);
+ svec_destroy(&xfif_types);
}
-#ifdef HAVE_OPENSSL
-static void
-bridge_configure_ssl(const struct ovsrec_ssl *ssl)
-{
- /* XXX SSL should be configurable on a per-bridge basis. */
- if (ssl) {
- stream_ssl_set_private_key_file(ssl->private_key);
- stream_ssl_set_certificate_file(ssl->certificate);
- stream_ssl_set_ca_cert_file(ssl->ca_cert, ssl->bootstrap_ca_cert);
- }
-}
-#endif
-
/* Attempt to create the network device 'iface_name' through the netdev
* library. */
static int
{
struct ovsrec_controller **controllers;
size_t n_controllers;
+ bool had_primary;
+
+ struct ofproto_controller *ocs;
+ size_t n_ocs;
+ size_t i;
ofproto_set_extra_in_band_remotes(br->ofproto, managers, n_managers);
- dpif_name(br->dpif), c->target);
+ had_primary = ofproto_has_primary_controller(br->ofproto);
+
+ n_controllers = bridge_get_controllers(br, &controllers);
+
+ ocs = xmalloc((n_controllers + 1) * sizeof *ocs);
+ n_ocs = 0;
+
+ bridge_ofproto_controller_for_mgmt(br, &ocs[n_ocs++]);
+ for (i = 0; i < n_controllers; i++) {
+ struct ovsrec_controller *c = controllers[i];
+
+ if (!strncmp(c->target, "punix:", 6)
+ || !strncmp(c->target, "unix:", 5)) {
+ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
+
+ /* Prevent remote ovsdb-server users from accessing arbitrary Unix
+ * domain sockets and overwriting arbitrary local files. */
+ VLOG_ERR_RL(&rl, "%s: not adding Unix domain socket controller "
+ "\"%s\" due to possibility for remote exploit",
++ br->name, c->target);
+ continue;
+ }
+
+ bridge_configure_local_iface_netdev(br, c);
+ bridge_ofproto_controller_from_ovsrec(c, &ocs[n_ocs++]);
+ }
+
+ ofproto_set_controllers(br->ofproto, ocs, n_ocs);
+ free(ocs[0].target); /* From bridge_ofproto_controller_for_mgmt(). */
+ free(ocs);
- n_controllers = bridge_get_controllers(ovs_cfg, br, &controllers);
- if (ofproto_has_controller(br->ofproto) != (n_controllers != 0)) {
+ if (had_primary != ofproto_has_primary_controller(br->ofproto)) {
ofproto_flush_flows(br->ofproto);
}
action.output.len = htons(sizeof action);
action.output.port = htons(OFPP_NORMAL);
memset(&flow, 0, sizeof flow);
- ofproto_add_flow(br->ofproto, &flow, OVSFW_ALL, 0, &action, 1, 0);
+ flow.wildcards = OVSFW_ALL;
+ ofproto_add_flow(br->ofproto, &flow, &action, 1, 0);
- } else {
- struct ofproto_controller *ocs;
- size_t i;
-
- ocs = xmalloc(n_controllers * sizeof *ocs);
- for (i = 0; i < n_controllers; i++) {
- struct ovsrec_controller *c = controllers[i];
- struct ofproto_controller *oc = &ocs[i];
-
- if (strcmp(c->target, "discover")) {
- struct iface *local_iface;
- struct in_addr ip;
-
- local_iface = bridge_get_local_iface(br);
- if (local_iface && c->local_ip
- && inet_aton(c->local_ip, &ip)) {
- struct netdev *netdev = local_iface->netdev;
- struct in_addr mask, gateway;
-
- if (!c->local_netmask
- || !inet_aton(c->local_netmask, &mask)) {
- mask.s_addr = 0;
- }
- if (!c->local_gateway
- || !inet_aton(c->local_gateway, &gateway)) {
- gateway.s_addr = 0;
- }
-
- netdev_turn_flags_on(netdev, NETDEV_UP, true);
- if (!mask.s_addr) {
- mask.s_addr = guess_netmask(ip.s_addr);
- }
- if (!netdev_set_in4(netdev, ip, mask)) {
- VLOG_INFO("bridge %s: configured IP address "IP_FMT", "
- "netmask "IP_FMT,
- br->name, IP_ARGS(&ip.s_addr),
- IP_ARGS(&mask.s_addr));
- }
-
- if (gateway.s_addr) {
- if (!netdev_add_router(netdev, gateway)) {
- VLOG_INFO("bridge %s: configured gateway "IP_FMT,
- br->name, IP_ARGS(&gateway.s_addr));
- }
- }
- }
- }
-
- oc->target = c->target;
- oc->max_backoff = c->max_backoff ? *c->max_backoff / 1000 : 8;
- oc->probe_interval = (c->inactivity_probe
- ? *c->inactivity_probe / 1000 : 5);
- oc->fail = (!c->fail_mode
- || !strcmp(c->fail_mode, "standalone")
- || !strcmp(c->fail_mode, "open")
- ? OFPROTO_FAIL_STANDALONE
- : OFPROTO_FAIL_SECURE);
- oc->band = (!c->connection_mode
- || !strcmp(c->connection_mode, "in-band")
- ? OFPROTO_IN_BAND
- : OFPROTO_OUT_OF_BAND);
- oc->accept_re = c->discover_accept_regex;
- oc->update_resolv_conf = c->discover_update_resolv_conf;
- oc->rate_limit = (c->controller_rate_limit
- ? *c->controller_rate_limit : 0);
- oc->burst_limit = (c->controller_burst_limit
- ? *c->controller_burst_limit : 0);
- }
- ofproto_set_controllers(br->ofproto, ocs, n_controllers);
- free(ocs);
}
}
}
static void
-bridge_account_flow_ofhook_cb(const flow_t *flow,
+bridge_account_flow_ofhook_cb(const flow_t *flow, tag_type tags,
- const union odp_action *actions,
+ const union xflow_action *actions,
size_t n_actions, unsigned long long int n_bytes,
void *br_)
{
struct bridge *br = br_;
- const union odp_action *a;
+ const union xflow_action *a;
struct port *in_port;
- tag_type tags = 0;
+ tag_type dummy = 0;
int vlan;
- /* Feed information from the active flows back into the learning table
- * to ensure that table is always in sync with what is actually flowing
- * through the datapath. */
- if (is_admissible(br, flow, false, &tags, &vlan, &in_port)) {
+ /* Feed information from the active flows back into the learning table to
+ * ensure that table is always in sync with what is actually flowing
+ * through the datapath.
+ *
+ * We test that 'tags' is nonzero to ensure that only flows that include an
+ * OFPP_NORMAL action are used for learning. This works because
+ * bridge_normal_ofhook_cb() always sets a nonzero tag value. */
+ if (tags && is_admissible(br, flow, false, &dummy, &vlan, &in_port)) {
update_learning_table(br, flow, vlan, in_port);
}
if (!br->has_bonded_ports) {
return;
}
-
for (a = actions; a < &actions[n_actions]; a++) {
- if (a->type == ODPAT_OUTPUT) {
- struct port *out_port = port_from_dp_ifidx(br, a->output.port);
+ if (a->type == XFLOWAT_OUTPUT) {
+ struct port *out_port = port_from_xf_ifidx(br, a->output.port);
if (out_port && out_port->n_ifaces >= 2) {
struct bond_entry *e = lookup_bond_entry(out_port,
flow->dl_src);