1 /* Copyright (c) 2008 The Board of Trustees of The Leland Stanford
4 * We are making the OpenFlow specification and associated documentation
5 * (Software) available for public use and benefit with the expectation
6 * that others will use, modify and enhance the Software and contribute
7 * those enhancements back to the community. However, since we would
8 * like to make the Software available for broadest use, with as few
9 * restrictions as possible permission is hereby granted, free of
10 * charge, to any person obtaining a copy of this Software to deal in
11 * the Software under the copyrights without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be
18 * included in all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * The name and trademarks of copyright holder(s) may NOT be used in
30 * advertising or publicity pertaining to the Software or any
31 * derivatives without specific, written prior permission.
35 #include <arpa/inet.h>
46 #include "openflow/openflow.h"
47 #include "openflow/nicira-ext.h"
49 #include "poll-loop.h"
52 #include "switch-flow.h"
59 #define THIS_MODULE VLM_datapath
65 extern char serial_num;
67 /* Capabilities supported by this implementation. */
68 #define OFP_SUPPORTED_CAPABILITIES ( OFPC_FLOW_STATS \
73 /* Actions supported by this implementation. */
74 #define OFP_SUPPORTED_ACTIONS ( (1 << OFPAT_OUTPUT) \
75 | (1 << OFPAT_SET_VLAN_VID) \
76 | (1 << OFPAT_SET_VLAN_PCP) \
77 | (1 << OFPAT_STRIP_VLAN) \
78 | (1 << OFPAT_SET_DL_SRC) \
79 | (1 << OFPAT_SET_DL_DST) \
80 | (1 << OFPAT_SET_NW_SRC) \
81 | (1 << OFPAT_SET_NW_DST) \
82 | (1 << OFPAT_SET_TP_SRC) \
83 | (1 << OFPAT_SET_TP_DST) )
85 /* The origin of a received OpenFlow message, to enable sending a reply. */
87 struct remote *remote; /* The device that sent the message. */
88 uint32_t xid; /* The OpenFlow transaction ID. */
91 /* A connection to a secure channel. */
95 #define TXQ_LIMIT 128 /* Max number of packets to queue for tx. */
96 int n_txq; /* Number of packets queued for tx on rconn. */
98 /* Support for reliable, multi-message replies to requests.
100 * If an incoming request needs to have a reliable reply that might
101 * require multiple messages, it can use remote_start_dump() to set up
102 * a callback that will be called as buffer space for replies. */
103 int (*cb_dump)(struct datapath *, void *aux);
104 void (*cb_done)(void *aux);
108 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 60);
110 static struct remote *remote_create(struct datapath *, struct rconn *);
111 static void remote_run(struct datapath *, struct remote *);
112 static void remote_wait(struct remote *);
113 static void remote_destroy(struct remote *);
115 static void update_port_flags(struct datapath *, const struct ofp_port_mod *);
116 static int update_port_status(struct sw_port *p);
117 static void send_port_status(struct sw_port *p, uint8_t status);
118 static void del_switch_port(struct sw_port *p);
120 /* Buffers are identified by a 31-bit opaque ID. We divide the ID
121 * into a buffer number (low bits) and a cookie (high bits). The buffer number
122 * is an index into an array of buffers. The cookie distinguishes between
123 * different packets that have occupied a single buffer. Thus, the more
124 * buffers we have, the lower-quality the cookie... */
125 #define PKT_BUFFER_BITS 8
126 #define N_PKT_BUFFERS (1 << PKT_BUFFER_BITS)
127 #define PKT_BUFFER_MASK (N_PKT_BUFFERS - 1)
129 #define PKT_COOKIE_BITS (32 - PKT_BUFFER_BITS)
131 int run_flow_through_tables(struct datapath *, struct ofpbuf *,
133 void fwd_port_input(struct datapath *, struct ofpbuf *, struct sw_port *);
134 int fwd_control_input(struct datapath *, const struct sender *,
135 const void *, size_t);
137 uint32_t save_buffer(struct ofpbuf *);
138 static struct ofpbuf *retrieve_buffer(uint32_t id);
139 static void discard_buffer(uint32_t id);
141 static struct sw_port *
142 lookup_port(struct datapath *dp, uint16_t port_no)
144 return (port_no < DP_MAX_PORTS ? &dp->ports[port_no]
145 : port_no == OFPP_LOCAL ? dp->local_port
149 /* Generates and returns a random datapath id. */
151 gen_datapath_id(void)
153 uint8_t ea[ETH_ADDR_LEN];
155 ea[0] = 0x00; /* Set Nicira OUI. */
158 return eth_addr_to_uint64(ea);
162 dp_new(struct datapath **dp_, uint64_t dpid)
166 dp = calloc(1, sizeof *dp);
171 dp->last_timeout = time_now();
172 list_init(&dp->remotes);
173 dp->listeners = NULL;
175 dp->id = dpid <= UINT64_C(0xffffffffffff) ? dpid : gen_datapath_id();
176 dp->chain = chain_create(dp);
178 VLOG_ERR("could not create chain");
183 list_init(&dp->port_list);
185 dp->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
191 new_port(struct datapath *dp, struct sw_port *port, uint16_t port_no,
192 const char *netdev_name, const uint8_t *new_mac)
194 struct netdev *netdev;
199 error = netdev_open(netdev_name, NETDEV_ETH_TYPE_ANY, &netdev);
203 if (new_mac && !eth_addr_equals(netdev_get_etheraddr(netdev), new_mac)) {
204 /* Generally the device has to be down before we change its hardware
205 * address. Don't bother to check for an error because it's really
206 * the netdev_set_etheraddr() call below that we care about. */
207 netdev_set_flags(netdev, 0, false);
208 error = netdev_set_etheraddr(netdev, new_mac);
210 VLOG_WARN("failed to change %s Ethernet address "
211 "to "ETH_ADDR_FMT": %s",
212 netdev_name, ETH_ADDR_ARGS(new_mac), strerror(error));
215 error = netdev_set_flags(netdev, NETDEV_UP | NETDEV_PROMISC, false);
217 VLOG_ERR("failed to set promiscuous mode on %s device", netdev_name);
218 netdev_close(netdev);
221 if (netdev_get_in4(netdev, &in4)) {
222 VLOG_ERR("%s device has assigned IP address %s",
223 netdev_name, inet_ntoa(in4));
225 if (netdev_get_in6(netdev, &in6)) {
226 char in6_name[INET6_ADDRSTRLEN + 1];
227 inet_ntop(AF_INET6, &in6, in6_name, sizeof in6_name);
228 VLOG_ERR("%s device has assigned IPv6 address %s",
229 netdev_name, in6_name);
232 memset(port, '\0', sizeof *port);
235 port->netdev = netdev;
236 port->port_no = port_no;
237 list_push_back(&dp->port_list, &port->node);
239 /* Notify the ctlpath that this port has been added */
240 send_port_status(port, OFPPR_ADD);
246 dp_add_port(struct datapath *dp, const char *netdev)
249 for (port_no = 0; port_no < DP_MAX_PORTS; port_no++) {
250 struct sw_port *port = &dp->ports[port_no];
252 return new_port(dp, port, port_no, netdev, NULL);
259 dp_add_local_port(struct datapath *dp, const char *netdev)
261 if (!dp->local_port) {
262 uint8_t ea[ETH_ADDR_LEN];
263 struct sw_port *port;
266 port = xcalloc(1, sizeof *port);
267 eth_addr_from_uint64(dp->id, ea);
268 error = new_port(dp, port, OFPP_LOCAL, netdev, ea);
270 dp->local_port = port;
281 dp_add_pvconn(struct datapath *dp, struct pvconn *pvconn)
283 dp->listeners = xrealloc(dp->listeners,
284 sizeof *dp->listeners * (dp->n_listeners + 1));
285 dp->listeners[dp->n_listeners++] = pvconn;
289 dp_run(struct datapath *dp)
291 time_t now = time_now();
292 struct sw_port *p, *pn;
293 struct remote *r, *rn;
294 struct ofpbuf *buffer = NULL;
297 if (now != dp->last_timeout) {
298 struct list deleted = LIST_INITIALIZER(&deleted);
299 struct sw_flow *f, *n;
301 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
302 if (update_port_status(p)) {
303 send_port_status(p, OFPPR_MODIFY);
307 chain_timeout(dp->chain, &deleted);
308 LIST_FOR_EACH_SAFE (f, n, struct sw_flow, node, &deleted) {
309 dp_send_flow_end(dp, f, f->reason);
310 list_remove(&f->node);
313 dp->last_timeout = now;
315 poll_timer_wait(1000);
317 LIST_FOR_EACH_SAFE (p, pn, struct sw_port, node, &dp->port_list) {
321 /* Allocate buffer with some headroom to add headers in forwarding
322 * to the controller or adding a vlan tag, plus an extra 2 bytes to
323 * allow IP headers to be aligned on a 4-byte boundary. */
324 const int headroom = 128 + 2;
325 const int hard_header = VLAN_ETH_HEADER_LEN;
326 const int mtu = netdev_get_mtu(p->netdev);
327 buffer = ofpbuf_new(headroom + hard_header + mtu);
328 buffer->data = (char*)buffer->data + headroom;
330 error = netdev_recv(p->netdev, buffer);
333 p->rx_bytes += buffer->size;
334 fwd_port_input(dp, buffer, p);
336 } else if (error != EAGAIN) {
337 VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
338 netdev_get_name(p->netdev), strerror(error));
341 ofpbuf_delete(buffer);
343 /* Talk to remotes. */
344 LIST_FOR_EACH_SAFE (r, rn, struct remote, node, &dp->remotes) {
348 for (i = 0; i < dp->n_listeners; ) {
349 struct pvconn *pvconn = dp->listeners[i];
350 struct vconn *new_vconn;
351 int retval = pvconn_accept(pvconn, OFP_VERSION, &new_vconn);
353 remote_create(dp, rconn_new_from_vconn("passive", new_vconn));
354 } else if (retval != EAGAIN) {
355 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
356 dp->listeners[i] = dp->listeners[--dp->n_listeners];
364 remote_run(struct datapath *dp, struct remote *r)
370 /* Do some remote processing, but cap it at a reasonable amount so that
371 * other processing doesn't starve. */
372 for (i = 0; i < 50; i++) {
374 struct ofpbuf *buffer;
375 struct ofp_header *oh;
377 buffer = rconn_recv(r->rconn);
382 if (buffer->size >= sizeof *oh) {
383 struct sender sender;
387 sender.xid = oh->xid;
388 fwd_control_input(dp, &sender, buffer->data, buffer->size);
390 VLOG_WARN_RL(&rl, "received too-short OpenFlow message");
392 ofpbuf_delete(buffer);
394 if (r->n_txq < TXQ_LIMIT) {
395 int error = r->cb_dump(dp, r->cb_aux);
398 VLOG_WARN_RL(&rl, "dump callback error: %s",
401 r->cb_done(r->cb_aux);
410 if (!rconn_is_alive(r->rconn)) {
416 remote_wait(struct remote *r)
418 rconn_run_wait(r->rconn);
419 rconn_recv_wait(r->rconn);
423 remote_destroy(struct remote *r)
426 if (r->cb_dump && r->cb_done) {
427 r->cb_done(r->cb_aux);
429 list_remove(&r->node);
430 rconn_destroy(r->rconn);
435 static struct remote *
436 remote_create(struct datapath *dp, struct rconn *rconn)
438 struct remote *remote = xmalloc(sizeof *remote);
439 list_push_back(&dp->remotes, &remote->node);
440 remote->rconn = rconn;
441 remote->cb_dump = NULL;
446 /* Starts a callback-based, reliable, possibly multi-message reply to a
447 * request made by 'remote'.
449 * 'dump' designates a function that will be called when the 'remote' send
450 * queue has an empty slot. It should compose a message and send it on
451 * 'remote'. On success, it should return 1 if it should be called again when
452 * another send queue slot opens up, 0 if its transmissions are complete, or a
453 * negative errno value on failure.
455 * 'done' designates a function to clean up any resources allocated for the
456 * dump. It must handle being called before the dump is complete (which will
457 * happen if 'remote' is closed unexpectedly).
459 * 'aux' is passed to 'dump' and 'done'. */
461 remote_start_dump(struct remote *remote,
462 int (*dump)(struct datapath *, void *),
463 void (*done)(void *),
466 assert(!remote->cb_dump);
467 remote->cb_dump = dump;
468 remote->cb_done = done;
469 remote->cb_aux = aux;
473 dp_wait(struct datapath *dp)
479 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
480 netdev_recv_wait(p->netdev);
482 LIST_FOR_EACH (r, struct remote, node, &dp->remotes) {
485 for (i = 0; i < dp->n_listeners; i++) {
486 pvconn_wait(dp->listeners[i]);
490 /* Delete 'p' from switch. */
492 del_switch_port(struct sw_port *p)
494 send_port_status(p, OFPPR_DELETE);
495 netdev_close(p->netdev);
497 list_remove(&p->node);
501 dp_destroy(struct datapath *dp)
503 struct sw_port *p, *n;
509 LIST_FOR_EACH_SAFE (p, n, struct sw_port, node, &dp->port_list) {
512 chain_destroy(dp->chain);
516 /* Send packets out all the ports except the originating one. If the
517 * "flood" argument is set, don't send out ports with flooding disabled.
520 output_all(struct datapath *dp, struct ofpbuf *buffer, int in_port, int flood)
526 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
527 if (p->port_no == in_port) {
530 if (flood && p->config & OFPPC_NO_FLOOD) {
533 if (prev_port != -1) {
534 dp_output_port(dp, ofpbuf_clone(buffer), in_port, prev_port,
537 prev_port = p->port_no;
540 dp_output_port(dp, buffer, in_port, prev_port, false);
542 ofpbuf_delete(buffer);
548 output_packet(struct datapath *dp, struct ofpbuf *buffer, uint16_t out_port)
550 struct sw_port *p = lookup_port(dp, out_port);
551 if (p && p->netdev != NULL) {
552 if (!(p->config & OFPPC_PORT_DOWN)) {
553 if (!netdev_send(p->netdev, buffer)) {
555 p->tx_bytes += buffer->size;
560 ofpbuf_delete(buffer);
564 ofpbuf_delete(buffer);
565 VLOG_DBG_RL(&rl, "can't forward to bad port %d\n", out_port);
568 /* Takes ownership of 'buffer' and transmits it to 'out_port' on 'dp'.
571 dp_output_port(struct datapath *dp, struct ofpbuf *buffer,
572 int in_port, int out_port, bool ignore_no_fwd)
578 output_packet(dp, buffer, in_port);
582 struct sw_port *p = lookup_port(dp, in_port);
583 if (run_flow_through_tables(dp, buffer, p)) {
584 ofpbuf_delete(buffer);
590 output_all(dp, buffer, in_port, 1);
594 output_all(dp, buffer, in_port, 0);
597 case OFPP_CONTROLLER:
598 dp_output_control(dp, buffer, in_port, 0, OFPR_ACTION);
603 if (in_port == out_port) {
604 VLOG_DBG_RL(&rl, "can't directly forward to input port");
607 output_packet(dp, buffer, out_port);
613 make_openflow_reply(size_t openflow_len, uint8_t type,
614 const struct sender *sender, struct ofpbuf **bufferp)
616 return make_openflow_xid(openflow_len, type, sender ? sender->xid : 0,
621 send_openflow_buffer_to_remote(struct ofpbuf *buffer, struct remote *remote)
623 int retval = rconn_send_with_limit(remote->rconn, buffer, &remote->n_txq,
626 VLOG_WARN_RL(&rl, "send to %s failed: %s",
627 rconn_get_name(remote->rconn), strerror(retval));
633 send_openflow_buffer(struct datapath *dp, struct ofpbuf *buffer,
634 const struct sender *sender)
636 update_openflow_length(buffer);
638 /* Send back to the sender. */
639 return send_openflow_buffer_to_remote(buffer, sender->remote);
641 /* Broadcast to all remotes. */
642 struct remote *r, *prev = NULL;
643 LIST_FOR_EACH (r, struct remote, node, &dp->remotes) {
645 send_openflow_buffer_to_remote(ofpbuf_clone(buffer), prev);
650 send_openflow_buffer_to_remote(buffer, prev);
652 ofpbuf_delete(buffer);
658 /* Takes ownership of 'buffer' and transmits it to 'dp''s controller. If the
659 * packet can be saved in a buffer, then only the first max_len bytes of
660 * 'buffer' are sent; otherwise, all of 'buffer' is sent. 'reason' indicates
661 * why 'buffer' is being sent. 'max_len' sets the maximum number of bytes that
662 * the caller wants to be sent; a value of 0 indicates the entire packet should
665 dp_output_control(struct datapath *dp, struct ofpbuf *buffer, int in_port,
666 size_t max_len, int reason)
668 struct ofp_packet_in *opi;
672 buffer_id = save_buffer(buffer);
673 total_len = buffer->size;
674 if (buffer_id != UINT32_MAX && max_len && buffer->size > max_len) {
675 buffer->size = max_len;
678 opi = ofpbuf_push_uninit(buffer, offsetof(struct ofp_packet_in, data));
679 opi->header.version = OFP_VERSION;
680 opi->header.type = OFPT_PACKET_IN;
681 opi->header.length = htons(buffer->size);
682 opi->header.xid = htonl(0);
683 opi->buffer_id = htonl(buffer_id);
684 opi->total_len = htons(total_len);
685 opi->in_port = htons(in_port);
686 opi->reason = reason;
688 send_openflow_buffer(dp, buffer, NULL);
691 static void fill_port_desc(struct datapath *dp, struct sw_port *p,
692 struct ofp_phy_port *desc)
694 desc->port_no = htons(p->port_no);
695 strncpy((char *) desc->name, netdev_get_name(p->netdev),
697 desc->name[sizeof desc->name - 1] = '\0';
698 memcpy(desc->hw_addr, netdev_get_etheraddr(p->netdev), ETH_ADDR_LEN);
699 desc->config = htonl(p->config);
700 desc->state = htonl(p->state);
701 desc->curr = htonl(netdev_get_features(p->netdev, NETDEV_FEAT_CURRENT));
702 desc->supported = htonl(netdev_get_features(p->netdev,
703 NETDEV_FEAT_SUPPORTED));
704 desc->advertised = htonl(netdev_get_features(p->netdev,
705 NETDEV_FEAT_ADVERTISED));
706 desc->peer = htonl(netdev_get_features(p->netdev, NETDEV_FEAT_PEER));
710 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
712 struct ofpbuf *buffer;
713 struct ofp_switch_features *ofr;
716 ofr = make_openflow_reply(sizeof *ofr, OFPT_FEATURES_REPLY,
718 ofr->datapath_id = htonll(dp->id);
719 ofr->n_tables = dp->chain->n_tables;
720 ofr->n_buffers = htonl(N_PKT_BUFFERS);
721 ofr->capabilities = htonl(OFP_SUPPORTED_CAPABILITIES);
722 ofr->actions = htonl(OFP_SUPPORTED_ACTIONS);
723 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
724 struct ofp_phy_port *opp = ofpbuf_put_uninit(buffer, sizeof *opp);
725 memset(opp, 0, sizeof *opp);
726 fill_port_desc(dp, p, opp);
728 send_openflow_buffer(dp, buffer, sender);
732 update_port_flags(struct datapath *dp, const struct ofp_port_mod *opm)
734 struct sw_port *p = lookup_port(dp, ntohs(opm->port_no));
736 /* Make sure the port id hasn't changed since this was sent */
737 if (!p || memcmp(opm->hw_addr, netdev_get_etheraddr(p->netdev),
738 ETH_ADDR_LEN) != 0) {
744 uint32_t config_mask = ntohl(opm->mask);
745 p->config &= ~config_mask;
746 p->config |= ntohl(opm->config) & config_mask;
749 if (opm->mask & htonl(OFPPC_PORT_DOWN)) {
750 if ((opm->config & htonl(OFPPC_PORT_DOWN))
751 && (p->config & OFPPC_PORT_DOWN) == 0) {
752 p->config |= OFPPC_PORT_DOWN;
753 netdev_turn_flags_off(p->netdev, NETDEV_UP, true);
754 } else if ((opm->config & htonl(OFPPC_PORT_DOWN)) == 0
755 && (p->config & OFPPC_PORT_DOWN)) {
756 p->config &= ~OFPPC_PORT_DOWN;
757 netdev_turn_flags_on(p->netdev, NETDEV_UP, true);
762 /* Update the port status field of the bridge port. A non-zero return
763 * value indicates some field has changed.
765 * NB: Callers of this function may hold the RCU read lock, so any
766 * additional checks must not sleep.
769 update_port_status(struct sw_port *p)
772 enum netdev_flags flags;
773 uint32_t orig_config = p->config;
774 uint32_t orig_state = p->state;
776 if (netdev_get_flags(p->netdev, &flags) < 0) {
777 VLOG_WARN_RL(&rl, "could not get netdev flags for %s",
778 netdev_get_name(p->netdev));
781 if (flags & NETDEV_UP) {
782 p->config &= ~OFPPC_PORT_DOWN;
784 p->config |= OFPPC_PORT_DOWN;
788 /* Not all cards support this getting link status, so don't warn on
790 retval = netdev_get_link_status(p->netdev);
792 p->state &= ~OFPPS_LINK_DOWN;
793 } else if (retval == 0) {
794 p->state |= OFPPS_LINK_DOWN;
797 return ((orig_config != p->config) || (orig_state != p->state));
801 send_port_status(struct sw_port *p, uint8_t status)
803 struct ofpbuf *buffer;
804 struct ofp_port_status *ops;
805 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &buffer);
806 ops->reason = status;
807 memset(ops->pad, 0, sizeof ops->pad);
808 fill_port_desc(p->dp, p, &ops->desc);
810 send_openflow_buffer(p->dp, buffer, NULL);
814 dp_send_flow_end(struct datapath *dp, struct sw_flow *flow,
815 enum nx_flow_end_reason reason)
817 struct ofpbuf *buffer;
818 struct nx_flow_end *nfe;
820 if (!dp->send_flow_end) {
824 nfe = make_openflow_xid(sizeof *nfe, OFPT_VENDOR, 0, &buffer);
828 nfe->header.vendor = htonl(NX_VENDOR_ID);
829 nfe->header.subtype = htonl(NXT_FLOW_END);
831 flow_fill_match(&nfe->match, &flow->key);
833 nfe->priority = htons(flow->priority);
834 nfe->reason = reason;
836 nfe->tcp_flags = flow->tcp_flags;
837 nfe->ip_tos = flow->ip_tos;
839 memset(nfe->pad, 0, sizeof nfe->pad);
841 nfe->init_time = htonll(flow->created);
842 nfe->used_time = htonll(flow->used);
843 nfe->end_time = htonll(time_msec());
845 nfe->packet_count = htonll(flow->packet_count);
846 nfe->byte_count = htonll(flow->byte_count);
848 send_openflow_buffer(dp, buffer, NULL);
852 dp_send_error_msg(struct datapath *dp, const struct sender *sender,
853 uint16_t type, uint16_t code, const void *data, size_t len)
855 struct ofpbuf *buffer;
856 struct ofp_error_msg *oem;
857 oem = make_openflow_reply(sizeof(*oem)+len, OFPT_ERROR, sender, &buffer);
858 oem->type = htons(type);
859 oem->code = htons(code);
860 memcpy(oem->data, data, len);
861 send_openflow_buffer(dp, buffer, sender);
865 fill_flow_stats(struct ofpbuf *buffer, struct sw_flow *flow,
866 int table_idx, uint64_t now)
868 struct ofp_flow_stats *ofs;
869 int length = sizeof *ofs + flow->sf_acts->actions_len;
870 ofs = ofpbuf_put_uninit(buffer, length);
871 ofs->length = htons(length);
872 ofs->table_id = table_idx;
874 ofs->match.wildcards = htonl(flow->key.wildcards);
875 ofs->match.in_port = flow->key.flow.in_port;
876 memcpy(ofs->match.dl_src, flow->key.flow.dl_src, ETH_ADDR_LEN);
877 memcpy(ofs->match.dl_dst, flow->key.flow.dl_dst, ETH_ADDR_LEN);
878 ofs->match.dl_vlan = flow->key.flow.dl_vlan;
879 ofs->match.dl_type = flow->key.flow.dl_type;
880 ofs->match.nw_src = flow->key.flow.nw_src;
881 ofs->match.nw_dst = flow->key.flow.nw_dst;
882 ofs->match.nw_proto = flow->key.flow.nw_proto;
884 ofs->match.tp_src = flow->key.flow.tp_src;
885 ofs->match.tp_dst = flow->key.flow.tp_dst;
886 ofs->duration = htonl((now - flow->created) / 1000);
887 ofs->priority = htons(flow->priority);
888 ofs->idle_timeout = htons(flow->idle_timeout);
889 ofs->hard_timeout = htons(flow->hard_timeout);
890 memset(ofs->pad2, 0, sizeof ofs->pad2);
891 ofs->packet_count = htonll(flow->packet_count);
892 ofs->byte_count = htonll(flow->byte_count);
893 memcpy(ofs->actions, flow->sf_acts->actions, flow->sf_acts->actions_len);
897 /* 'buffer' was received on 'p', which may be a a physical switch port or a
898 * null pointer. Process it according to 'dp''s flow table. Returns 0 if
899 * successful, in which case 'buffer' is destroyed, or -ESRCH if there is no
900 * matching flow, in which case 'buffer' still belongs to the caller. */
901 int run_flow_through_tables(struct datapath *dp, struct ofpbuf *buffer,
904 struct sw_flow_key key;
905 struct sw_flow *flow;
908 if (flow_extract(buffer, p ? p->port_no : OFPP_NONE, &key.flow)
909 && (dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
911 ofpbuf_delete(buffer);
914 if (p && p->config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP)
915 && p->config & (!eth_addr_equals(key.flow.dl_dst, stp_eth_addr)
916 ? OFPPC_NO_RECV : OFPPC_NO_RECV_STP)) {
917 ofpbuf_delete(buffer);
921 flow = chain_lookup(dp->chain, &key);
923 flow_used(flow, buffer);
924 execute_actions(dp, buffer, &key, flow->sf_acts->actions,
925 flow->sf_acts->actions_len, false);
932 /* 'buffer' was received on 'p', which may be a a physical switch port or a
933 * null pointer. Process it according to 'dp''s flow table, sending it up to
934 * the controller if no flow matches. Takes ownership of 'buffer'. */
935 void fwd_port_input(struct datapath *dp, struct ofpbuf *buffer,
938 if (run_flow_through_tables(dp, buffer, p)) {
939 dp_output_control(dp, buffer, p->port_no,
940 dp->miss_send_len, OFPR_NO_MATCH);
945 recv_features_request(struct datapath *dp, const struct sender *sender,
948 dp_send_features_reply(dp, sender);
953 recv_get_config_request(struct datapath *dp, const struct sender *sender,
956 struct ofpbuf *buffer;
957 struct ofp_switch_config *osc;
959 osc = make_openflow_reply(sizeof *osc, OFPT_GET_CONFIG_REPLY,
962 osc->flags = htons(dp->flags);
963 osc->miss_send_len = htons(dp->miss_send_len);
965 return send_openflow_buffer(dp, buffer, sender);
969 recv_set_config(struct datapath *dp, const struct sender *sender UNUSED,
972 const struct ofp_switch_config *osc = msg;
975 flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
976 if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
977 && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
978 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
981 dp->miss_send_len = ntohs(osc->miss_send_len);
986 recv_packet_out(struct datapath *dp, const struct sender *sender,
989 const struct ofp_packet_out *opo = msg;
990 struct sw_flow_key key;
992 struct ofpbuf *buffer;
993 size_t actions_len = ntohs(opo->actions_len);
995 if (actions_len > (ntohs(opo->header.length) - sizeof *opo)) {
996 VLOG_DBG_RL(&rl, "message too short for number of actions");
1000 if (ntohl(opo->buffer_id) == (uint32_t) -1) {
1001 /* FIXME: can we avoid copying data here? */
1002 int data_len = ntohs(opo->header.length) - sizeof *opo - actions_len;
1003 buffer = ofpbuf_new(data_len);
1004 ofpbuf_put(buffer, (uint8_t *)opo->actions + actions_len, data_len);
1006 buffer = retrieve_buffer(ntohl(opo->buffer_id));
1012 flow_extract(buffer, ntohs(opo->in_port), &key.flow);
1014 v_code = validate_actions(dp, &key, opo->actions, actions_len);
1015 if (v_code != ACT_VALIDATION_OK) {
1016 dp_send_error_msg(dp, sender, OFPET_BAD_ACTION, v_code,
1017 msg, ntohs(opo->header.length));
1021 execute_actions(dp, buffer, &key, opo->actions, actions_len, true);
1026 ofpbuf_delete(buffer);
1031 recv_port_mod(struct datapath *dp, const struct sender *sender UNUSED,
1034 const struct ofp_port_mod *opm = msg;
1036 update_port_flags(dp, opm);
1042 add_flow(struct datapath *dp, const struct sender *sender,
1043 const struct ofp_flow_mod *ofm)
1045 int error = -ENOMEM;
1047 struct sw_flow *flow;
1048 size_t actions_len = ntohs(ofm->header.length) - sizeof *ofm;
1050 /* Allocate memory. */
1051 flow = flow_alloc(actions_len);
1055 flow_extract_match(&flow->key, &ofm->match);
1057 v_code = validate_actions(dp, &flow->key, ofm->actions, actions_len);
1058 if (v_code != ACT_VALIDATION_OK) {
1059 dp_send_error_msg(dp, sender, OFPET_BAD_ACTION, v_code,
1060 ofm, ntohs(ofm->header.length));
1061 goto error_free_flow;
1064 /* Fill out flow. */
1065 flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
1066 flow->idle_timeout = ntohs(ofm->idle_timeout);
1067 flow->hard_timeout = ntohs(ofm->hard_timeout);
1068 flow->used = flow->created = time_msec();
1069 flow->sf_acts->actions_len = actions_len;
1070 flow->byte_count = 0;
1071 flow->packet_count = 0;
1072 flow->tcp_flags = 0;
1074 memcpy(flow->sf_acts->actions, ofm->actions, actions_len);
1077 error = chain_insert(dp->chain, flow);
1078 if (error == -ENOBUFS) {
1079 dp_send_error_msg(dp, sender, OFPET_FLOW_MOD_FAILED,
1080 OFPFMFC_ALL_TABLES_FULL, ofm, ntohs(ofm->header.length));
1081 goto error_free_flow;
1083 goto error_free_flow;
1086 if (ntohl(ofm->buffer_id) != UINT32_MAX) {
1087 struct ofpbuf *buffer = retrieve_buffer(ntohl(ofm->buffer_id));
1089 struct sw_flow_key key;
1090 uint16_t in_port = ntohs(ofm->match.in_port);
1091 flow_extract(buffer, in_port, &key.flow);
1092 flow_used(flow, buffer);
1093 execute_actions(dp, buffer, &key,
1094 ofm->actions, actions_len, false);
1104 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
1105 discard_buffer(ntohl(ofm->buffer_id));
1110 mod_flow(struct datapath *dp, const struct sender *sender,
1111 const struct ofp_flow_mod *ofm)
1113 int error = -ENOMEM;
1116 struct sw_flow_key key;
1120 flow_extract_match(&key, &ofm->match);
1122 actions_len = ntohs(ofm->header.length) - sizeof *ofm;
1124 v_code = validate_actions(dp, &key, ofm->actions, actions_len);
1125 if (v_code != ACT_VALIDATION_OK) {
1126 dp_send_error_msg(dp, sender, OFPET_BAD_ACTION, v_code,
1127 ofm, ntohs(ofm->header.length));
1131 priority = key.wildcards ? ntohs(ofm->priority) : -1;
1132 strict = (ofm->command == htons(OFPFC_MODIFY_STRICT)) ? 1 : 0;
1133 chain_modify(dp->chain, &key, priority, strict, ofm->actions, actions_len);
1135 if (ntohl(ofm->buffer_id) != UINT32_MAX) {
1136 struct ofpbuf *buffer = retrieve_buffer(ntohl(ofm->buffer_id));
1138 struct sw_flow_key skb_key;
1139 uint16_t in_port = ntohs(ofm->match.in_port);
1140 flow_extract(buffer, in_port, &skb_key.flow);
1141 execute_actions(dp, buffer, &skb_key,
1142 ofm->actions, actions_len, false);
1150 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
1151 discard_buffer(ntohl(ofm->buffer_id));
1156 recv_flow(struct datapath *dp, const struct sender *sender,
1159 const struct ofp_flow_mod *ofm = msg;
1160 uint16_t command = ntohs(ofm->command);
1162 if (command == OFPFC_ADD) {
1163 return add_flow(dp, sender, ofm);
1164 } else if ((command == OFPFC_MODIFY) || (command == OFPFC_MODIFY_STRICT)) {
1165 return mod_flow(dp, sender, ofm);
1166 } else if (command == OFPFC_DELETE) {
1167 struct sw_flow_key key;
1168 flow_extract_match(&key, &ofm->match);
1169 return chain_delete(dp->chain, &key, ofm->out_port, 0, 0) ? 0 : -ESRCH;
1170 } else if (command == OFPFC_DELETE_STRICT) {
1171 struct sw_flow_key key;
1173 flow_extract_match(&key, &ofm->match);
1174 priority = key.wildcards ? ntohs(ofm->priority) : -1;
1175 return chain_delete(dp->chain, &key, ofm->out_port,
1176 priority, 1) ? 0 : -ESRCH;
1182 static int desc_stats_dump(struct datapath *dp, void *state,
1183 struct ofpbuf *buffer)
1185 struct ofp_desc_stats *ods = ofpbuf_put_uninit(buffer, sizeof *ods);
1187 strncpy(ods->mfr_desc, &mfr_desc, sizeof ods->mfr_desc);
1188 strncpy(ods->hw_desc, &hw_desc, sizeof ods->hw_desc);
1189 strncpy(ods->sw_desc, &sw_desc, sizeof ods->sw_desc);
1190 strncpy(ods->serial_num, &serial_num, sizeof ods->serial_num);
1195 struct flow_stats_state {
1197 struct sw_table_position position;
1198 struct ofp_flow_stats_request rq;
1199 uint64_t now; /* Current time in milliseconds */
1201 struct ofpbuf *buffer;
1204 #define MAX_FLOW_STATS_BYTES 4096
1206 static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
1209 const struct ofp_flow_stats_request *fsr = body;
1210 struct flow_stats_state *s = xmalloc(sizeof *s);
1211 s->table_idx = fsr->table_id == 0xff ? 0 : fsr->table_id;
1212 memset(&s->position, 0, sizeof s->position);
1218 static int flow_stats_dump_callback(struct sw_flow *flow, void *private)
1220 struct flow_stats_state *s = private;
1221 fill_flow_stats(s->buffer, flow, s->table_idx, s->now);
1222 return s->buffer->size >= MAX_FLOW_STATS_BYTES;
1225 static int flow_stats_dump(struct datapath *dp, void *state,
1226 struct ofpbuf *buffer)
1228 struct flow_stats_state *s = state;
1229 struct sw_flow_key match_key;
1231 flow_extract_match(&match_key, &s->rq.match);
1233 s->now = time_msec();
1234 while (s->table_idx < dp->chain->n_tables
1235 && (s->rq.table_id == 0xff || s->rq.table_id == s->table_idx))
1237 struct sw_table *table = dp->chain->tables[s->table_idx];
1239 if (table->iterate(table, &match_key, s->rq.out_port,
1240 &s->position, flow_stats_dump_callback, s))
1244 memset(&s->position, 0, sizeof s->position);
1246 return s->buffer->size >= MAX_FLOW_STATS_BYTES;
1249 static void flow_stats_done(void *state)
1254 struct aggregate_stats_state {
1255 struct ofp_aggregate_stats_request rq;
1258 static int aggregate_stats_init(struct datapath *dp,
1259 const void *body, int body_len,
1262 const struct ofp_aggregate_stats_request *rq = body;
1263 struct aggregate_stats_state *s = xmalloc(sizeof *s);
1269 static int aggregate_stats_dump_callback(struct sw_flow *flow, void *private)
1271 struct ofp_aggregate_stats_reply *rpy = private;
1272 rpy->packet_count += flow->packet_count;
1273 rpy->byte_count += flow->byte_count;
1278 static int aggregate_stats_dump(struct datapath *dp, void *state,
1279 struct ofpbuf *buffer)
1281 struct aggregate_stats_state *s = state;
1282 struct ofp_aggregate_stats_request *rq = &s->rq;
1283 struct ofp_aggregate_stats_reply *rpy;
1284 struct sw_table_position position;
1285 struct sw_flow_key match_key;
1288 rpy = ofpbuf_put_uninit(buffer, sizeof *rpy);
1289 memset(rpy, 0, sizeof *rpy);
1291 flow_extract_match(&match_key, &rq->match);
1292 table_idx = rq->table_id == 0xff ? 0 : rq->table_id;
1293 memset(&position, 0, sizeof position);
1294 while (table_idx < dp->chain->n_tables
1295 && (rq->table_id == 0xff || rq->table_id == table_idx))
1297 struct sw_table *table = dp->chain->tables[table_idx];
1300 error = table->iterate(table, &match_key, rq->out_port, &position,
1301 aggregate_stats_dump_callback, rpy);
1306 memset(&position, 0, sizeof position);
1309 rpy->packet_count = htonll(rpy->packet_count);
1310 rpy->byte_count = htonll(rpy->byte_count);
1311 rpy->flow_count = htonl(rpy->flow_count);
1315 static void aggregate_stats_done(void *state)
1320 static int table_stats_dump(struct datapath *dp, void *state,
1321 struct ofpbuf *buffer)
1324 for (i = 0; i < dp->chain->n_tables; i++) {
1325 struct ofp_table_stats *ots = ofpbuf_put_uninit(buffer, sizeof *ots);
1326 struct sw_table_stats stats;
1327 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
1328 strncpy(ots->name, stats.name, sizeof ots->name);
1330 ots->wildcards = htonl(stats.wildcards);
1331 memset(ots->pad, 0, sizeof ots->pad);
1332 ots->max_entries = htonl(stats.max_flows);
1333 ots->active_count = htonl(stats.n_flows);
1334 ots->lookup_count = htonll(stats.n_lookup);
1335 ots->matched_count = htonll(stats.n_matched);
1340 struct port_stats_state {
1344 static int port_stats_init(struct datapath *dp, const void *body, int body_len,
1347 struct port_stats_state *s = xmalloc(sizeof *s);
1354 dump_port_stats(struct sw_port *port, struct ofpbuf *buffer)
1356 struct ofp_port_stats *ops = ofpbuf_put_uninit(buffer, sizeof *ops);
1357 ops->port_no = htons(port->port_no);
1358 memset(ops->pad, 0, sizeof ops->pad);
1359 ops->rx_packets = htonll(port->rx_packets);
1360 ops->tx_packets = htonll(port->tx_packets);
1361 ops->rx_bytes = htonll(port->rx_bytes);
1362 ops->tx_bytes = htonll(port->tx_bytes);
1363 ops->rx_dropped = htonll(-1);
1364 ops->tx_dropped = htonll(port->tx_dropped);
1365 ops->rx_errors = htonll(-1);
1366 ops->tx_errors = htonll(-1);
1367 ops->rx_frame_err = htonll(-1);
1368 ops->rx_over_err = htonll(-1);
1369 ops->rx_crc_err = htonll(-1);
1370 ops->collisions = htonll(-1);
1373 static int port_stats_dump(struct datapath *dp, void *state,
1374 struct ofpbuf *buffer)
1376 struct port_stats_state *s = state;
1379 for (i = s->port; i < DP_MAX_PORTS; i++) {
1380 struct sw_port *p = &dp->ports[i];
1382 dump_port_stats(p, buffer);
1387 if (dp->local_port) {
1388 dump_port_stats(dp->local_port, buffer);
1389 s->port = OFPP_LOCAL + 1;
1394 static void port_stats_done(void *state)
1400 /* Value for 'type' member of struct ofp_stats_request. */
1403 /* Minimum and maximum acceptable number of bytes in body member of
1404 * struct ofp_stats_request. */
1405 size_t min_body, max_body;
1407 /* Prepares to dump some kind of statistics on 'dp'. 'body' and
1408 * 'body_len' are the 'body' member of the struct ofp_stats_request.
1409 * Returns zero if successful, otherwise a negative error code.
1410 * May initialize '*state' to state information. May be null if no
1411 * initialization is required.*/
1412 int (*init)(struct datapath *dp, const void *body, int body_len,
1415 /* Appends statistics for 'dp' to 'buffer', which initially contains a
1416 * struct ofp_stats_reply. On success, it should return 1 if it should be
1417 * called again later with another buffer, 0 if it is done, or a negative
1418 * errno value on failure. */
1419 int (*dump)(struct datapath *dp, void *state, struct ofpbuf *buffer);
1421 /* Cleans any state created by the init or dump functions. May be null
1422 * if no cleanup is required. */
1423 void (*done)(void *state);
1426 static const struct stats_type stats[] = {
1437 sizeof(struct ofp_flow_stats_request),
1438 sizeof(struct ofp_flow_stats_request),
1445 sizeof(struct ofp_aggregate_stats_request),
1446 sizeof(struct ofp_aggregate_stats_request),
1447 aggregate_stats_init,
1448 aggregate_stats_dump,
1449 aggregate_stats_done
1469 struct stats_dump_cb {
1471 struct ofp_stats_request *rq;
1472 struct sender sender;
1473 const struct stats_type *s;
1478 stats_dump(struct datapath *dp, void *cb_)
1480 struct stats_dump_cb *cb = cb_;
1481 struct ofp_stats_reply *osr;
1482 struct ofpbuf *buffer;
1489 osr = make_openflow_reply(sizeof *osr, OFPT_STATS_REPLY, &cb->sender,
1491 osr->type = htons(cb->s->type);
1494 err = cb->s->dump(dp, cb->state, buffer);
1500 /* Buffer might have been reallocated, so find our data again. */
1501 osr = ofpbuf_at_assert(buffer, 0, sizeof *osr);
1502 osr->flags = ntohs(OFPSF_REPLY_MORE);
1504 err2 = send_openflow_buffer(dp, buffer, &cb->sender);
1514 stats_done(void *cb_)
1516 struct stats_dump_cb *cb = cb_;
1519 cb->s->done(cb->state);
1526 recv_stats_request(struct datapath *dp, const struct sender *sender,
1529 const struct ofp_stats_request *rq = oh;
1530 size_t rq_len = ntohs(rq->header.length);
1531 const struct stats_type *st;
1532 struct stats_dump_cb *cb;
1536 type = ntohs(rq->type);
1537 for (st = stats; ; st++) {
1538 if (st >= &stats[ARRAY_SIZE(stats)]) {
1539 VLOG_WARN_RL(&rl, "received stats request of unknown type %d",
1542 } else if (type == st->type) {
1547 cb = xmalloc(sizeof *cb);
1549 cb->rq = xmemdup(rq, rq_len);
1550 cb->sender = *sender;
1554 body_len = rq_len - offsetof(struct ofp_stats_request, body);
1555 if (body_len < cb->s->min_body || body_len > cb->s->max_body) {
1556 VLOG_WARN_RL(&rl, "stats request type %d with bad body length %d",
1563 err = cb->s->init(dp, rq->body, body_len, &cb->state);
1566 "failed initialization of stats request type %d: %s",
1567 type, strerror(-err));
1572 remote_start_dump(sender->remote, stats_dump, stats_done, cb);
1582 recv_echo_request(struct datapath *dp, const struct sender *sender,
1585 return send_openflow_buffer(dp, make_echo_reply(oh), sender);
1589 recv_echo_reply(struct datapath *dp UNUSED, const struct sender *sender UNUSED,
1590 const void *oh UNUSED)
1596 recv_vendor(struct datapath *dp, const struct sender *sender,
1599 const struct ofp_vendor_header *ovh = oh;
1601 switch (ntohl(ovh->vendor))
1604 return nx_recv_msg(dp, sender, oh);
1607 VLOG_WARN_RL(&rl, "unknown vendor: 0x%x\n", ntohl(ovh->vendor));
1608 dp_send_error_msg(dp, sender, OFPET_BAD_REQUEST,
1609 OFPBRC_BAD_VENDOR, oh, ntohs(ovh->header.length));
1614 /* 'msg', which is 'length' bytes long, was received from the control path.
1615 * Apply it to 'chain'. */
1617 fwd_control_input(struct datapath *dp, const struct sender *sender,
1618 const void *msg, size_t length)
1620 int (*handler)(struct datapath *, const struct sender *, const void *);
1621 struct ofp_header *oh;
1624 /* Check encapsulated length. */
1625 oh = (struct ofp_header *) msg;
1626 if (ntohs(oh->length) > length) {
1629 assert(oh->version == OFP_VERSION);
1631 /* Figure out how to handle it. */
1633 case OFPT_FEATURES_REQUEST:
1634 min_size = sizeof(struct ofp_header);
1635 handler = recv_features_request;
1637 case OFPT_GET_CONFIG_REQUEST:
1638 min_size = sizeof(struct ofp_header);
1639 handler = recv_get_config_request;
1641 case OFPT_SET_CONFIG:
1642 min_size = sizeof(struct ofp_switch_config);
1643 handler = recv_set_config;
1645 case OFPT_PACKET_OUT:
1646 min_size = sizeof(struct ofp_packet_out);
1647 handler = recv_packet_out;
1650 min_size = sizeof(struct ofp_flow_mod);
1651 handler = recv_flow;
1654 min_size = sizeof(struct ofp_port_mod);
1655 handler = recv_port_mod;
1657 case OFPT_STATS_REQUEST:
1658 min_size = sizeof(struct ofp_stats_request);
1659 handler = recv_stats_request;
1661 case OFPT_ECHO_REQUEST:
1662 min_size = sizeof(struct ofp_header);
1663 handler = recv_echo_request;
1665 case OFPT_ECHO_REPLY:
1666 min_size = sizeof(struct ofp_header);
1667 handler = recv_echo_reply;
1670 min_size = sizeof(struct ofp_vendor_header);
1671 handler = recv_vendor;
1674 dp_send_error_msg(dp, sender, OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE,
1680 if (length < min_size)
1682 return handler(dp, sender, msg);
1685 /* Packet buffering. */
1687 #define OVERWRITE_SECS 1
1689 struct packet_buffer {
1690 struct ofpbuf *buffer;
1695 static struct packet_buffer buffers[N_PKT_BUFFERS];
1696 static unsigned int buffer_idx;
1698 uint32_t save_buffer(struct ofpbuf *buffer)
1700 struct packet_buffer *p;
1703 buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
1704 p = &buffers[buffer_idx];
1706 /* Don't buffer packet if existing entry is less than
1707 * OVERWRITE_SECS old. */
1708 if (time_now() < p->timeout) { /* FIXME */
1711 ofpbuf_delete(p->buffer);
1714 /* Don't use maximum cookie value since the all-bits-1 id is
1716 if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
1718 p->buffer = ofpbuf_clone(buffer); /* FIXME */
1719 p->timeout = time_now() + OVERWRITE_SECS; /* FIXME */
1720 id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
1725 static struct ofpbuf *retrieve_buffer(uint32_t id)
1727 struct ofpbuf *buffer = NULL;
1728 struct packet_buffer *p;
1730 p = &buffers[id & PKT_BUFFER_MASK];
1731 if (p->cookie == id >> PKT_BUFFER_BITS) {
1735 printf("cookie mismatch: %x != %x\n",
1736 id >> PKT_BUFFER_BITS, p->cookie);
1742 static void discard_buffer(uint32_t id)
1744 struct packet_buffer *p;
1746 p = &buffers[id & PKT_BUFFER_MASK];
1747 if (p->cookie == id >> PKT_BUFFER_BITS) {
1748 ofpbuf_delete(p->buffer);