{
struct sw_flow_actions *sfa;
- if (n_actions > (PAGE_SIZE - sizeof *sfa) / sizeof(union odp_action))
+ /* At least DP_MAX_PORTS actions are required to be able to flood a
+ * packet to every port. Factor of 2 allows for setting VLAN tags,
+ * etc. */
+ if (n_actions > 2 * DP_MAX_PORTS)
return ERR_PTR(-EINVAL);
- sfa = kmalloc(sizeof *sfa + n_actions * sizeof(union odp_action),
+ sfa = kmalloc(sizeof *sfa + n_actions * sizeof(union xflow_action),
GFP_KERNEL);
if (!sfa)
return ERR_PTR(-ENOMEM);
return sfa;
}
-
-/* Frees 'flow' immediately. */
-static void flow_free(struct sw_flow *flow)
+struct sw_flow *flow_alloc(void)
{
- if (unlikely(!flow))
- return;
- kfree(flow->sf_acts);
- kmem_cache_free(flow_cache, flow);
+ struct sw_flow *flow;
+
+ flow = kmem_cache_alloc(flow_cache, GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&flow->lock);
+ atomic_set(&flow->refcnt, 1);
+ flow->dead = false;
+
+ return flow;
}
void flow_free_tbl(struct tbl_node *node)
{
struct sw_flow *flow = flow_cast(node);
- flow_free(flow);
+
+ flow->dead = true;
+ flow_put(flow);
}
/* RCU callback used by flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
- flow_free(flow);
+
+ flow->dead = true;
+ flow_put(flow);
}
/* Schedules 'flow' to be freed after the next RCU grace period.
call_rcu(&flow->rcu, rcu_free_flow_callback);
}
+void flow_hold(struct sw_flow *flow)
+{
+ atomic_inc(&flow->refcnt);
+}
+
+void flow_put(struct sw_flow *flow)
+{
+ if (unlikely(!flow))
+ return;
+
+ if (atomic_dec_and_test(&flow->refcnt)) {
+ kfree(flow->sf_acts);
+ kmem_cache_free(flow_cache, flow);
+ }
+}
+
/* RCU callback used by flow_deferred_free_acts. */
static void rcu_free_acts_callback(struct rcu_head *rcu)
{
call_rcu(&sf_acts->rcu, rcu_free_acts_callback);
}
-static void parse_vlan(struct sk_buff *skb, struct odp_flow_key *key)
+static void parse_vlan(struct sk_buff *skb, struct xflow_key *key)
{
struct qtag_prefix {
__be16 eth_type; /* ETH_P_8021Q */
return;
qp = (struct qtag_prefix *) skb->data;
- key->dl_vlan = qp->tci & htons(VLAN_VID_MASK);
- key->dl_vlan_pcp = (ntohs(qp->tci) & VLAN_PCP_MASK) >> VLAN_PCP_SHIFT;
+ key->dl_tci = qp->tci | htons(XFLOW_TCI_PRESENT);
__skb_pull(skb, sizeof(struct qtag_prefix));
}
proto = *(__be16 *) skb->data;
__skb_pull(skb, sizeof(__be16));
- if (ntohs(proto) >= ODP_DL_TYPE_ETH2_CUTOFF)
+ if (ntohs(proto) >= XFLOW_DL_TYPE_ETH2_CUTOFF)
return proto;
if (unlikely(skb->len < sizeof(struct llc_snap_hdr)))
- return htons(ODP_DL_TYPE_NOT_ETH_TYPE);
+ return htons(XFLOW_DL_TYPE_NOT_ETH_TYPE);
llc = (struct llc_snap_hdr *) skb->data;
if (llc->dsap != LLC_SAP_SNAP ||
llc->ssap != LLC_SAP_SNAP ||
(llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0)
- return htons(ODP_DL_TYPE_NOT_ETH_TYPE);
+ return htons(XFLOW_DL_TYPE_NOT_ETH_TYPE);
__skb_pull(skb, sizeof(struct llc_snap_hdr));
return llc->ethertype;
* Sets OVS_CB(skb)->is_frag to %true if @skb is an IPv4 fragment, otherwise to
* %false.
*/
-int flow_extract(struct sk_buff *skb, u16 in_port, struct odp_flow_key *key)
+int flow_extract(struct sk_buff *skb, u16 in_port, struct xflow_key *key,
+ bool *is_frag)
{
struct ethhdr *eth;
memset(key, 0, sizeof *key);
key->tun_id = OVS_CB(skb)->tun_id;
key->in_port = in_port;
- key->dl_vlan = htons(ODP_VLAN_NONE);
- OVS_CB(skb)->is_frag = false;
+ *is_frag = false;
/*
* We would really like to pull as many bytes as we could possibly
memcpy(key->dl_src, eth->h_source, ETH_ALEN);
memcpy(key->dl_dst, eth->h_dest, ETH_ALEN);
- /* dl_type, dl_vlan, dl_vlan_pcp. */
+ /* dl_type, dl_tci. */
__skb_pull(skb, 2 * ETH_ALEN);
if (eth->h_proto == htons(ETH_P_8021Q))
parse_vlan(skb, key);
key->tp_dst = htons(icmp->code);
}
}
- } else {
- OVS_CB(skb)->is_frag = true;
- }
+ } else
+ *is_frag = true;
+
} else if (key->dl_type == htons(ETH_P_ARP) && arphdr_ok(skb)) {
struct arp_eth_header *arp;
&& arp->ar_pln == 4) {
/* We only match on the lower 8 bits of the opcode. */
- if (ntohs(arp->ar_op) <= 0xff) {
+ if (ntohs(arp->ar_op) <= 0xff)
key->nw_proto = ntohs(arp->ar_op);
- }
if (key->nw_proto == ARPOP_REQUEST
|| key->nw_proto == ARPOP_REPLY) {
return 0;
}
-u32 flow_hash(const struct odp_flow_key *key)
+u32 flow_hash(const struct xflow_key *key)
{
return jhash2((u32*)key, sizeof *key / sizeof(u32), hash_seed);
}
int flow_cmp(const struct tbl_node *node, void *key2_)
{
- const struct odp_flow_key *key1 = &flow_cast(node)->key;
- const struct odp_flow_key *key2 = key2_;
+ const struct xflow_key *key1 = &flow_cast(node)->key;
+ const struct xflow_key *key2 = key2_;
- return !memcmp(key1, key2, sizeof(struct odp_flow_key));
+ return !memcmp(key1, key2, sizeof(struct xflow_key));
}
/* Initializes the flow module.