/*
- * Copyright (c) 2009, 2010, 2011 Nicira Networks.
+ * Copyright (c) 2009, 2010, 2011, 2012 Nicira Networks.
* Copyright (c) 2010 Jean Tourrilhes - HP-Labs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
}
}
+/* Sets the MAC aging timeout for the OFPP_NORMAL action on 'ofproto' to
+ * 'idle_time', in seconds. */
+void
+ofproto_set_mac_idle_time(struct ofproto *ofproto, unsigned idle_time)
+{
+ if (ofproto->ofproto_class->set_mac_idle_time) {
+ ofproto->ofproto_class->set_mac_idle_time(ofproto, idle_time);
+ }
+}
+
void
ofproto_set_desc(struct ofproto *p,
const char *mfr_desc, const char *hw_desc,
int
ofproto_run(struct ofproto *p)
{
+ struct sset changed_netdevs;
+ const char *changed_netdev;
struct ofport *ofport;
- char *devname;
int error;
error = p->ofproto_class->run(p);
}
if (p->ofproto_class->port_poll) {
+ char *devname;
+
while ((error = p->ofproto_class->port_poll(p, &devname)) != EAGAIN) {
process_port_change(p, error, devname);
}
}
+ /* Update OpenFlow port status for any port whose netdev has changed.
+ *
+ * Refreshing a given 'ofport' can cause an arbitrary ofport to be
+ * destroyed, so it's not safe to update ports directly from the
+ * HMAP_FOR_EACH loop, or even to use HMAP_FOR_EACH_SAFE. Instead, we
+ * need this two-phase approach. */
+ sset_init(&changed_netdevs);
HMAP_FOR_EACH (ofport, hmap_node, &p->ports) {
unsigned int change_seq = netdev_change_seq(ofport->netdev);
if (ofport->change_seq != change_seq) {
ofport->change_seq = change_seq;
- update_port(p, netdev_get_name(ofport->netdev));
+ sset_add(&changed_netdevs, netdev_get_name(ofport->netdev));
}
}
+ SSET_FOR_EACH (changed_netdev, &changed_netdevs) {
+ update_port(p, changed_netdev);
+ }
+ sset_destroy(&changed_netdevs);
switch (p->state) {
shash_add(&p->port_by_name, netdev_name, ofport);
if (!netdev_get_mtu(netdev, &dev_mtu)) {
- set_internal_devs_mtu(p);
ofport->mtu = dev_mtu;
+ set_internal_devs_mtu(p);
} else {
ofport->mtu = 0;
}
* Returns 0 on success, otherwise an OpenFlow error code. */
static int
collect_rules_loose(struct ofproto *ofproto, uint8_t table_id,
- const struct cls_rule *match, uint16_t out_port,
- struct list *rules)
+ const struct cls_rule *match,
+ ovs_be64 cookie, ovs_be64 cookie_mask,
+ uint16_t out_port, struct list *rules)
{
struct classifier *cls;
int error;
if (rule->pending) {
return OFPROTO_POSTPONE;
}
- if (!rule_is_hidden(rule) && rule_has_out_port(rule, out_port)) {
+ if (!rule_is_hidden(rule) && rule_has_out_port(rule, out_port)
+ && !((rule->flow_cookie ^ cookie) & cookie_mask)) {
list_push_back(rules, &rule->ofproto_node);
}
}
* Returns 0 on success, otherwise an OpenFlow error code. */
static int
collect_rules_strict(struct ofproto *ofproto, uint8_t table_id,
- const struct cls_rule *match, uint16_t out_port,
- struct list *rules)
+ const struct cls_rule *match,
+ ovs_be64 cookie, ovs_be64 cookie_mask,
+ uint16_t out_port, struct list *rules)
{
struct classifier *cls;
int error;
if (rule->pending) {
return OFPROTO_POSTPONE;
}
- if (!rule_is_hidden(rule) && rule_has_out_port(rule, out_port)) {
+ if (!rule_is_hidden(rule) && rule_has_out_port(rule, out_port)
+ && !((rule->flow_cookie ^ cookie) & cookie_mask)) {
list_push_back(rules, &rule->ofproto_node);
}
}
}
error = collect_rules_loose(ofproto, fsr.table_id, &fsr.match,
+ fsr.cookie, fsr.cookie_mask,
fsr.out_port, &rules);
if (error) {
return error;
}
error = collect_rules_loose(ofproto, request.table_id, &request.match,
+ request.cookie, request.cookie_mask,
request.out_port, &rules);
if (error) {
return error;
struct list rules;
int error;
- error = collect_rules_loose(ofproto, fm->table_id, &fm->cr, OFPP_NONE,
- &rules);
+ error = collect_rules_loose(ofproto, fm->table_id, &fm->cr,
+ fm->cookie, fm->cookie_mask,
+ OFPP_NONE, &rules);
return (error ? error
: list_is_empty(&rules) ? add_flow(ofproto, ofconn, fm, request)
: modify_flows__(ofproto, ofconn, fm, request, &rules));
struct list rules;
int error;
- error = collect_rules_strict(ofproto, fm->table_id, &fm->cr, OFPP_NONE,
- &rules);
+ error = collect_rules_strict(ofproto, fm->table_id, &fm->cr,
+ fm->cookie, fm->cookie_mask,
+ OFPP_NONE, &rules);
return (error ? error
: list_is_empty(&rules) ? add_flow(ofproto, ofconn, fm, request)
: list_is_singleton(&rules) ? modify_flows__(ofproto, ofconn,
struct list rules;
int error;
- error = collect_rules_loose(ofproto, fm->table_id, &fm->cr, fm->out_port,
- &rules);
+ error = collect_rules_loose(ofproto, fm->table_id, &fm->cr,
+ fm->cookie, fm->cookie_mask,
+ fm->out_port, &rules);
return (error ? error
: !list_is_empty(&rules) ? delete_flows__(ofproto, ofconn, request,
&rules)
struct list rules;
int error;
- error = collect_rules_strict(ofproto, fm->table_id, &fm->cr, fm->out_port,
- &rules);
+ error = collect_rules_strict(ofproto, fm->table_id, &fm->cr,
+ fm->cookie, fm->cookie_mask,
+ fm->out_port, &rules);
return (error ? error
: list_is_singleton(&rules) ? delete_flows__(ofproto, ofconn,
request, &rules)
handle_nxt_flow_mod_table_id(struct ofconn *ofconn,
const struct ofp_header *oh)
{
- const struct nxt_flow_mod_table_id *msg
- = (const struct nxt_flow_mod_table_id *) oh;
+ const struct nx_flow_mod_table_id *msg
+ = (const struct nx_flow_mod_table_id *) oh;
ofconn_set_flow_mod_table_id(ofconn, msg->set != 0);
return 0;
static int
handle_nxt_set_flow_format(struct ofconn *ofconn, const struct ofp_header *oh)
{
- const struct nxt_set_flow_format *msg
- = (const struct nxt_set_flow_format *) oh;
+ const struct nx_set_flow_format *msg
+ = (const struct nx_set_flow_format *) oh;
uint32_t format;
format = ntohl(msg->format);
return 0;
}
+static int
+handle_nxt_set_packet_in_format(struct ofconn *ofconn,
+ const struct ofp_header *oh)
+{
+ const struct nx_set_packet_in_format *msg;
+ uint32_t format;
+
+ msg = (const struct nx_set_packet_in_format *) oh;
+ format = ntohl(msg->format);
+ if (format != NXFF_OPENFLOW10 && format != NXPIF_NXM) {
+ return ofp_mkerr(OFPET_BAD_REQUEST, OFPBRC_EPERM);
+ }
+
+ if (format != ofconn_get_packet_in_format(ofconn)
+ && ofconn_has_pending_opgroups(ofconn)) {
+ /* Avoid sending async message in surprsing packet in format. */
+ return OFPROTO_POSTPONE;
+ }
+
+ ofconn_set_packet_in_format(ofconn, format);
+ return 0;
+}
+
static int
handle_barrier_request(struct ofconn *ofconn, const struct ofp_header *oh)
{
case OFPUTIL_NXT_SET_FLOW_FORMAT:
return handle_nxt_set_flow_format(ofconn, oh);
+ case OFPUTIL_NXT_SET_PACKET_IN_FORMAT:
+ return handle_nxt_set_packet_in_format(ofconn, oh);
+
case OFPUTIL_NXT_FLOW_MOD:
return handle_flow_mod(ofconn, oh);
case OFPUTIL_OFPST_AGGREGATE_REPLY:
case OFPUTIL_NXT_ROLE_REPLY:
case OFPUTIL_NXT_FLOW_REMOVED:
+ case OFPUTIL_NXT_PACKET_IN:
case OFPUTIL_NXST_FLOW_REPLY:
case OFPUTIL_NXST_AGGREGATE_REPLY:
default:
if (op->victim) {
ofproto_rule_destroy__(op->victim);
}
- if (!(rule->cr.wc.vlan_tci_mask & htons(VLAN_VID_MASK))
- && ofproto->vlan_bitmap) {
- uint16_t vid = vlan_tci_to_vid(rule->cr.flow.vlan_tci);
-
- if (!bitmap_is_set(ofproto->vlan_bitmap, vid)) {
- bitmap_set1(ofproto->vlan_bitmap, vid);
+ if ((rule->cr.wc.vlan_tci_mask & htons(VLAN_VID_MASK))
+ == htons(VLAN_VID_MASK)) {
+ if (ofproto->vlan_bitmap) {
+ uint16_t vid = vlan_tci_to_vid(rule->cr.flow.vlan_tci);
+
+ if (!bitmap_is_set(ofproto->vlan_bitmap, vid)) {
+ bitmap_set1(ofproto->vlan_bitmap, vid);
+ ofproto->vlans_changed = true;
+ }
+ } else {
ofproto->vlans_changed = true;
}
}
const struct cls_table *table;
HMAP_FOR_EACH (table, hmap_node, &cls->tables) {
- if (!(table->wc.vlan_tci_mask & htons(VLAN_VID_MASK))) {
+ if ((table->wc.vlan_tci_mask & htons(VLAN_VID_MASK))
+ == htons(VLAN_VID_MASK)) {
const struct cls_rule *rule;
HMAP_FOR_EACH (rule, hmap_node, &table->rules) {