1 /* Copyright (c) 2008 The Board of Trustees of The Leland Stanford
4 * We are making the OpenFlow specification and associated documentation
5 * (Software) available for public use and benefit with the expectation
6 * that others will use, modify and enhance the Software and contribute
7 * those enhancements back to the community. However, since we would
8 * like to make the Software available for broadest use, with as few
9 * restrictions as possible permission is hereby granted, free of
10 * charge, to any person obtaining a copy of this Software to deal in
11 * the Software under the copyrights without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be
18 * included in all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * The name and trademarks of copyright holder(s) may NOT be used in
30 * advertising or publicity pertaining to the Software or any
31 * derivatives without specific, written prior permission.
35 #include <arpa/inet.h>
47 #include "openflow/openflow.h"
49 #include "poll-loop.h"
52 #include "switch-flow.h"
59 #define THIS_MODULE VLM_datapath
65 extern char serial_num;
67 /* Capabilities supported by this implementation. */
68 #define OFP_SUPPORTED_CAPABILITIES ( OFPC_FLOW_STATS \
73 /* Actions supported by this implementation. */
74 #define OFP_SUPPORTED_ACTIONS ( (1 << OFPAT_OUTPUT) \
75 | (1 << OFPAT_SET_VLAN_VID) \
76 | (1 << OFPAT_SET_VLAN_PCP) \
77 | (1 << OFPAT_STRIP_VLAN) \
78 | (1 << OFPAT_SET_DL_SRC) \
79 | (1 << OFPAT_SET_DL_DST) \
80 | (1 << OFPAT_SET_NW_SRC) \
81 | (1 << OFPAT_SET_NW_DST) \
82 | (1 << OFPAT_SET_TP_SRC) \
83 | (1 << OFPAT_SET_TP_DST) )
86 uint32_t config; /* Some subset of OFPPC_* flags. */
87 uint32_t state; /* Some subset of OFPPS_* flags. */
89 struct netdev *netdev;
90 struct list node; /* Element in datapath.ports. */
91 unsigned long long int rx_packets, tx_packets;
92 unsigned long long int rx_bytes, tx_bytes;
93 unsigned long long int tx_dropped;
97 /* The origin of a received OpenFlow message, to enable sending a reply. */
99 struct remote *remote; /* The device that sent the message. */
100 uint32_t xid; /* The OpenFlow transaction ID. */
103 /* A connection to a secure channel. */
107 #define TXQ_LIMIT 128 /* Max number of packets to queue for tx. */
108 int n_txq; /* Number of packets queued for tx on rconn. */
110 /* Support for reliable, multi-message replies to requests.
112 * If an incoming request needs to have a reliable reply that might
113 * require multiple messages, it can use remote_start_dump() to set up
114 * a callback that will be called as buffer space for replies. */
115 int (*cb_dump)(struct datapath *, void *aux);
116 void (*cb_done)(void *aux);
120 #define DP_MAX_PORTS 255
121 BUILD_ASSERT_DECL(DP_MAX_PORTS <= OFPP_MAX);
124 /* Remote connections. */
125 struct list remotes; /* All connections (including controller). */
128 struct pvconn **listeners;
133 /* Unique identifier for this datapath */
136 struct sw_chain *chain; /* Forwarding rules. */
138 /* Configuration set from controller. */
140 uint16_t miss_send_len;
143 struct sw_port ports[DP_MAX_PORTS];
144 struct sw_port *local_port; /* OFPP_LOCAL port, if any. */
145 struct list port_list; /* All ports, including local_port. */
148 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 60);
150 static struct remote *remote_create(struct datapath *, struct rconn *);
151 static void remote_run(struct datapath *, struct remote *);
152 static void remote_wait(struct remote *);
153 static void remote_destroy(struct remote *);
155 static void update_port_flags(struct datapath *, const struct ofp_port_mod *);
156 static void send_flow_expired(struct datapath *, struct sw_flow *,
157 enum ofp_flow_expired_reason);
158 static int update_port_status(struct sw_port *p);
159 static void send_port_status(struct sw_port *p, uint8_t status);
160 static void del_switch_port(struct sw_port *p);
162 /* Buffers are identified by a 31-bit opaque ID. We divide the ID
163 * into a buffer number (low bits) and a cookie (high bits). The buffer number
164 * is an index into an array of buffers. The cookie distinguishes between
165 * different packets that have occupied a single buffer. Thus, the more
166 * buffers we have, the lower-quality the cookie... */
167 #define PKT_BUFFER_BITS 8
168 #define N_PKT_BUFFERS (1 << PKT_BUFFER_BITS)
169 #define PKT_BUFFER_MASK (N_PKT_BUFFERS - 1)
171 #define PKT_COOKIE_BITS (32 - PKT_BUFFER_BITS)
173 int run_flow_through_tables(struct datapath *, struct ofpbuf *,
175 void fwd_port_input(struct datapath *, struct ofpbuf *, struct sw_port *);
176 int fwd_control_input(struct datapath *, const struct sender *,
177 const void *, size_t);
179 uint32_t save_buffer(struct ofpbuf *);
180 static struct ofpbuf *retrieve_buffer(uint32_t id);
181 static void discard_buffer(uint32_t id);
183 static struct sw_port *
184 lookup_port(struct datapath *dp, uint16_t port_no)
186 return (port_no < DP_MAX_PORTS ? &dp->ports[port_no]
187 : port_no == OFPP_LOCAL ? dp->local_port
191 /* Generates and returns a random datapath id. */
193 gen_datapath_id(void)
195 uint8_t ea[ETH_ADDR_LEN];
197 ea[0] = 0x00; /* Set Nicira OUI. */
200 return eth_addr_to_uint64(ea);
204 dp_new(struct datapath **dp_, uint64_t dpid)
208 dp = calloc(1, sizeof *dp);
213 dp->last_timeout = time_now();
214 list_init(&dp->remotes);
215 dp->listeners = NULL;
217 dp->id = dpid <= UINT64_C(0xffffffffffff) ? dpid : gen_datapath_id();
218 dp->chain = chain_create();
220 VLOG_ERR("could not create chain");
225 list_init(&dp->port_list);
227 dp->miss_send_len = OFP_DEFAULT_MISS_SEND_LEN;
233 new_port(struct datapath *dp, struct sw_port *port, uint16_t port_no,
234 const char *netdev_name, const uint8_t *new_mac)
236 struct netdev *netdev;
241 error = netdev_open(netdev_name, NETDEV_ETH_TYPE_ANY, &netdev);
245 if (new_mac && !eth_addr_equals(netdev_get_etheraddr(netdev), new_mac)) {
246 /* Generally the device has to be down before we change its hardware
247 * address. Don't bother to check for an error because it's really
248 * the netdev_set_etheraddr() call below that we care about. */
249 netdev_set_flags(netdev, 0, false);
250 error = netdev_set_etheraddr(netdev, new_mac);
252 VLOG_WARN("failed to change %s Ethernet address "
253 "to "ETH_ADDR_FMT": %s",
254 netdev_name, ETH_ADDR_ARGS(new_mac), strerror(error));
257 error = netdev_set_flags(netdev, NETDEV_UP | NETDEV_PROMISC, false);
259 VLOG_ERR("failed to set promiscuous mode on %s device", netdev_name);
260 netdev_close(netdev);
263 if (netdev_get_in4(netdev, &in4)) {
264 VLOG_ERR("%s device has assigned IP address %s",
265 netdev_name, inet_ntoa(in4));
267 if (netdev_get_in6(netdev, &in6)) {
268 char in6_name[INET6_ADDRSTRLEN + 1];
269 inet_ntop(AF_INET6, &in6, in6_name, sizeof in6_name);
270 VLOG_ERR("%s device has assigned IPv6 address %s",
271 netdev_name, in6_name);
274 memset(port, '\0', sizeof *port);
277 port->netdev = netdev;
278 port->port_no = port_no;
279 list_push_back(&dp->port_list, &port->node);
281 /* Notify the ctlpath that this port has been added */
282 send_port_status(port, OFPPR_ADD);
288 dp_add_port(struct datapath *dp, const char *netdev)
291 for (port_no = 0; port_no < DP_MAX_PORTS; port_no++) {
292 struct sw_port *port = &dp->ports[port_no];
294 return new_port(dp, port, port_no, netdev, NULL);
301 dp_add_local_port(struct datapath *dp, const char *netdev)
303 if (!dp->local_port) {
304 uint8_t ea[ETH_ADDR_LEN];
305 struct sw_port *port;
308 port = xcalloc(1, sizeof *port);
309 eth_addr_from_uint64(dp->id, ea);
310 error = new_port(dp, port, OFPP_LOCAL, netdev, ea);
312 dp->local_port = port;
323 dp_add_pvconn(struct datapath *dp, struct pvconn *pvconn)
325 dp->listeners = xrealloc(dp->listeners,
326 sizeof *dp->listeners * (dp->n_listeners + 1));
327 dp->listeners[dp->n_listeners++] = pvconn;
331 dp_run(struct datapath *dp)
333 time_t now = time_now();
334 struct sw_port *p, *pn;
335 struct remote *r, *rn;
336 struct ofpbuf *buffer = NULL;
339 if (now != dp->last_timeout) {
340 struct list deleted = LIST_INITIALIZER(&deleted);
341 struct sw_flow *f, *n;
343 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
344 if (update_port_status(p)) {
345 send_port_status(p, OFPPR_MODIFY);
349 chain_timeout(dp->chain, &deleted);
350 LIST_FOR_EACH_SAFE (f, n, struct sw_flow, node, &deleted) {
351 send_flow_expired(dp, f, f->reason);
352 list_remove(&f->node);
355 dp->last_timeout = now;
357 poll_timer_wait(1000);
359 LIST_FOR_EACH_SAFE (p, pn, struct sw_port, node, &dp->port_list) {
363 /* Allocate buffer with some headroom to add headers in forwarding
364 * to the controller or adding a vlan tag, plus an extra 2 bytes to
365 * allow IP headers to be aligned on a 4-byte boundary. */
366 const int headroom = 128 + 2;
367 const int hard_header = VLAN_ETH_HEADER_LEN;
368 const int mtu = netdev_get_mtu(p->netdev);
369 buffer = ofpbuf_new(headroom + hard_header + mtu);
370 buffer->data = (char*)buffer->data + headroom;
372 error = netdev_recv(p->netdev, buffer);
375 p->rx_bytes += buffer->size;
376 fwd_port_input(dp, buffer, p);
378 } else if (error != EAGAIN) {
379 VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
380 netdev_get_name(p->netdev), strerror(error));
383 ofpbuf_delete(buffer);
385 /* Talk to remotes. */
386 LIST_FOR_EACH_SAFE (r, rn, struct remote, node, &dp->remotes) {
390 for (i = 0; i < dp->n_listeners; ) {
391 struct pvconn *pvconn = dp->listeners[i];
392 struct vconn *new_vconn;
393 int retval = pvconn_accept(pvconn, OFP_VERSION, &new_vconn);
395 remote_create(dp, rconn_new_from_vconn("passive", new_vconn));
396 } else if (retval != EAGAIN) {
397 VLOG_WARN_RL(&rl, "accept failed (%s)", strerror(retval));
398 dp->listeners[i] = dp->listeners[--dp->n_listeners];
406 remote_run(struct datapath *dp, struct remote *r)
412 /* Do some remote processing, but cap it at a reasonable amount so that
413 * other processing doesn't starve. */
414 for (i = 0; i < 50; i++) {
416 struct ofpbuf *buffer;
417 struct ofp_header *oh;
419 buffer = rconn_recv(r->rconn);
424 if (buffer->size >= sizeof *oh) {
425 struct sender sender;
429 sender.xid = oh->xid;
430 fwd_control_input(dp, &sender, buffer->data, buffer->size);
432 VLOG_WARN_RL(&rl, "received too-short OpenFlow message");
434 ofpbuf_delete(buffer);
436 if (r->n_txq < TXQ_LIMIT) {
437 int error = r->cb_dump(dp, r->cb_aux);
440 VLOG_WARN_RL(&rl, "dump callback error: %s",
443 r->cb_done(r->cb_aux);
452 if (!rconn_is_alive(r->rconn)) {
458 remote_wait(struct remote *r)
460 rconn_run_wait(r->rconn);
461 rconn_recv_wait(r->rconn);
465 remote_destroy(struct remote *r)
468 if (r->cb_dump && r->cb_done) {
469 r->cb_done(r->cb_aux);
471 list_remove(&r->node);
472 rconn_destroy(r->rconn);
477 static struct remote *
478 remote_create(struct datapath *dp, struct rconn *rconn)
480 struct remote *remote = xmalloc(sizeof *remote);
481 list_push_back(&dp->remotes, &remote->node);
482 remote->rconn = rconn;
483 remote->cb_dump = NULL;
488 /* Starts a callback-based, reliable, possibly multi-message reply to a
489 * request made by 'remote'.
491 * 'dump' designates a function that will be called when the 'remote' send
492 * queue has an empty slot. It should compose a message and send it on
493 * 'remote'. On success, it should return 1 if it should be called again when
494 * another send queue slot opens up, 0 if its transmissions are complete, or a
495 * negative errno value on failure.
497 * 'done' designates a function to clean up any resources allocated for the
498 * dump. It must handle being called before the dump is complete (which will
499 * happen if 'remote' is closed unexpectedly).
501 * 'aux' is passed to 'dump' and 'done'. */
503 remote_start_dump(struct remote *remote,
504 int (*dump)(struct datapath *, void *),
505 void (*done)(void *),
508 assert(!remote->cb_dump);
509 remote->cb_dump = dump;
510 remote->cb_done = done;
511 remote->cb_aux = aux;
515 dp_wait(struct datapath *dp)
521 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
522 netdev_recv_wait(p->netdev);
524 LIST_FOR_EACH (r, struct remote, node, &dp->remotes) {
527 for (i = 0; i < dp->n_listeners; i++) {
528 pvconn_wait(dp->listeners[i]);
532 /* Delete 'p' from switch. */
534 del_switch_port(struct sw_port *p)
536 send_port_status(p, OFPPR_DELETE);
537 netdev_close(p->netdev);
539 list_remove(&p->node);
543 dp_destroy(struct datapath *dp)
545 struct sw_port *p, *n;
551 LIST_FOR_EACH_SAFE (p, n, struct sw_port, node, &dp->port_list) {
554 chain_destroy(dp->chain);
558 /* Send packets out all the ports except the originating one. If the
559 * "flood" argument is set, don't send out ports with flooding disabled.
562 output_all(struct datapath *dp, struct ofpbuf *buffer, int in_port, int flood)
568 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
569 if (p->port_no == in_port) {
572 if (flood && p->config & OFPPC_NO_FLOOD) {
575 if (prev_port != -1) {
576 dp_output_port(dp, ofpbuf_clone(buffer), in_port, prev_port,
579 prev_port = p->port_no;
582 dp_output_port(dp, buffer, in_port, prev_port, false);
584 ofpbuf_delete(buffer);
590 output_packet(struct datapath *dp, struct ofpbuf *buffer, uint16_t out_port)
592 struct sw_port *p = lookup_port(dp, out_port);
593 if (p && p->netdev != NULL) {
594 if (!(p->config & OFPPC_PORT_DOWN)) {
595 if (!netdev_send(p->netdev, buffer)) {
597 p->tx_bytes += buffer->size;
602 ofpbuf_delete(buffer);
606 ofpbuf_delete(buffer);
607 VLOG_DBG_RL(&rl, "can't forward to bad port %d\n", out_port);
610 /* Takes ownership of 'buffer' and transmits it to 'out_port' on 'dp'.
613 dp_output_port(struct datapath *dp, struct ofpbuf *buffer,
614 int in_port, int out_port, bool ignore_no_fwd)
620 output_packet(dp, buffer, in_port);
624 struct sw_port *p = lookup_port(dp, in_port);
625 if (run_flow_through_tables(dp, buffer, p)) {
626 ofpbuf_delete(buffer);
632 output_all(dp, buffer, in_port, 1);
636 output_all(dp, buffer, in_port, 0);
639 case OFPP_CONTROLLER:
640 dp_output_control(dp, buffer, in_port, 0, OFPR_ACTION);
645 if (in_port == out_port) {
646 VLOG_DBG_RL(&rl, "can't directly forward to input port");
649 output_packet(dp, buffer, out_port);
655 make_openflow_reply(size_t openflow_len, uint8_t type,
656 const struct sender *sender, struct ofpbuf **bufferp)
658 return make_openflow_xid(openflow_len, type, sender ? sender->xid : 0,
663 send_openflow_buffer_to_remote(struct ofpbuf *buffer, struct remote *remote)
665 int retval = rconn_send_with_limit(remote->rconn, buffer, &remote->n_txq,
668 VLOG_WARN_RL(&rl, "send to %s failed: %s",
669 rconn_get_name(remote->rconn), strerror(retval));
675 send_openflow_buffer(struct datapath *dp, struct ofpbuf *buffer,
676 const struct sender *sender)
678 update_openflow_length(buffer);
680 /* Send back to the sender. */
681 return send_openflow_buffer_to_remote(buffer, sender->remote);
683 /* Broadcast to all remotes. */
684 struct remote *r, *prev = NULL;
685 LIST_FOR_EACH (r, struct remote, node, &dp->remotes) {
687 send_openflow_buffer_to_remote(ofpbuf_clone(buffer), prev);
692 send_openflow_buffer_to_remote(buffer, prev);
694 ofpbuf_delete(buffer);
700 /* Takes ownership of 'buffer' and transmits it to 'dp''s controller. If the
701 * packet can be saved in a buffer, then only the first max_len bytes of
702 * 'buffer' are sent; otherwise, all of 'buffer' is sent. 'reason' indicates
703 * why 'buffer' is being sent. 'max_len' sets the maximum number of bytes that
704 * the caller wants to be sent; a value of 0 indicates the entire packet should
707 dp_output_control(struct datapath *dp, struct ofpbuf *buffer, int in_port,
708 size_t max_len, int reason)
710 struct ofp_packet_in *opi;
714 buffer_id = save_buffer(buffer);
715 total_len = buffer->size;
716 if (buffer_id != UINT32_MAX && max_len && buffer->size > max_len) {
717 buffer->size = max_len;
720 opi = ofpbuf_push_uninit(buffer, offsetof(struct ofp_packet_in, data));
721 opi->header.version = OFP_VERSION;
722 opi->header.type = OFPT_PACKET_IN;
723 opi->header.length = htons(buffer->size);
724 opi->header.xid = htonl(0);
725 opi->buffer_id = htonl(buffer_id);
726 opi->total_len = htons(total_len);
727 opi->in_port = htons(in_port);
728 opi->reason = reason;
730 send_openflow_buffer(dp, buffer, NULL);
733 static void fill_port_desc(struct datapath *dp, struct sw_port *p,
734 struct ofp_phy_port *desc)
736 desc->port_no = htons(p->port_no);
737 strncpy((char *) desc->name, netdev_get_name(p->netdev),
739 desc->name[sizeof desc->name - 1] = '\0';
740 memcpy(desc->hw_addr, netdev_get_etheraddr(p->netdev), ETH_ADDR_LEN);
741 desc->config = htonl(p->config);
742 desc->state = htonl(p->state);
743 desc->curr = htonl(netdev_get_features(p->netdev, NETDEV_FEAT_CURRENT));
744 desc->supported = htonl(netdev_get_features(p->netdev,
745 NETDEV_FEAT_SUPPORTED));
746 desc->advertised = htonl(netdev_get_features(p->netdev,
747 NETDEV_FEAT_ADVERTISED));
748 desc->peer = htonl(netdev_get_features(p->netdev, NETDEV_FEAT_PEER));
752 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
754 struct ofpbuf *buffer;
755 struct ofp_switch_features *ofr;
758 ofr = make_openflow_reply(sizeof *ofr, OFPT_FEATURES_REPLY,
760 ofr->datapath_id = htonll(dp->id);
761 ofr->n_tables = dp->chain->n_tables;
762 ofr->n_buffers = htonl(N_PKT_BUFFERS);
763 ofr->capabilities = htonl(OFP_SUPPORTED_CAPABILITIES);
764 ofr->actions = htonl(OFP_SUPPORTED_ACTIONS);
765 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
766 struct ofp_phy_port *opp = ofpbuf_put_uninit(buffer, sizeof *opp);
767 memset(opp, 0, sizeof *opp);
768 fill_port_desc(dp, p, opp);
770 send_openflow_buffer(dp, buffer, sender);
774 update_port_flags(struct datapath *dp, const struct ofp_port_mod *opm)
776 struct sw_port *p = lookup_port(dp, ntohs(opm->port_no));
778 /* Make sure the port id hasn't changed since this was sent */
779 if (!p || memcmp(opm->hw_addr, netdev_get_etheraddr(p->netdev),
780 ETH_ADDR_LEN) != 0) {
786 uint32_t config_mask = ntohl(opm->mask);
787 p->config &= ~config_mask;
788 p->config |= ntohl(opm->config) & config_mask;
791 if (opm->mask & htonl(OFPPC_PORT_DOWN)) {
792 if ((opm->config & htonl(OFPPC_PORT_DOWN))
793 && (p->config & OFPPC_PORT_DOWN) == 0) {
794 p->config |= OFPPC_PORT_DOWN;
795 netdev_turn_flags_off(p->netdev, NETDEV_UP, true);
796 } else if ((opm->config & htonl(OFPPC_PORT_DOWN)) == 0
797 && (p->config & OFPPC_PORT_DOWN)) {
798 p->config &= ~OFPPC_PORT_DOWN;
799 netdev_turn_flags_on(p->netdev, NETDEV_UP, true);
804 /* Update the port status field of the bridge port. A non-zero return
805 * value indicates some field has changed.
807 * NB: Callers of this function may hold the RCU read lock, so any
808 * additional checks must not sleep.
811 update_port_status(struct sw_port *p)
814 enum netdev_flags flags;
815 uint32_t orig_config = p->config;
816 uint32_t orig_state = p->state;
818 if (netdev_get_flags(p->netdev, &flags) < 0) {
819 VLOG_WARN_RL(&rl, "could not get netdev flags for %s",
820 netdev_get_name(p->netdev));
823 if (flags & NETDEV_UP) {
824 p->config &= ~OFPPC_PORT_DOWN;
826 p->config |= OFPPC_PORT_DOWN;
830 /* Not all cards support this getting link status, so don't warn on
832 retval = netdev_get_link_status(p->netdev);
834 p->state &= ~OFPPS_LINK_DOWN;
835 } else if (retval == 0) {
836 p->state |= OFPPS_LINK_DOWN;
839 return ((orig_config != p->config) || (orig_state != p->state));
843 send_port_status(struct sw_port *p, uint8_t status)
845 struct ofpbuf *buffer;
846 struct ofp_port_status *ops;
847 ops = make_openflow_xid(sizeof *ops, OFPT_PORT_STATUS, 0, &buffer);
848 ops->reason = status;
849 memset(ops->pad, 0, sizeof ops->pad);
850 fill_port_desc(p->dp, p, &ops->desc);
852 send_openflow_buffer(p->dp, buffer, NULL);
856 send_flow_expired(struct datapath *dp, struct sw_flow *flow,
857 enum ofp_flow_expired_reason reason)
859 struct ofpbuf *buffer;
860 struct ofp_flow_expired *ofe;
861 ofe = make_openflow_xid(sizeof *ofe, OFPT_FLOW_EXPIRED, 0, &buffer);
862 flow_fill_match(&ofe->match, &flow->key);
864 ofe->priority = htons(flow->priority);
865 ofe->reason = reason;
866 memset(ofe->pad, 0, sizeof ofe->pad);
868 ofe->duration = htonl(time_now() - flow->created);
869 memset(ofe->pad2, 0, sizeof ofe->pad2);
870 ofe->packet_count = htonll(flow->packet_count);
871 ofe->byte_count = htonll(flow->byte_count);
872 send_openflow_buffer(dp, buffer, NULL);
876 dp_send_error_msg(struct datapath *dp, const struct sender *sender,
877 uint16_t type, uint16_t code, const void *data, size_t len)
879 struct ofpbuf *buffer;
880 struct ofp_error_msg *oem;
881 oem = make_openflow_reply(sizeof(*oem)+len, OFPT_ERROR, sender, &buffer);
882 oem->type = htons(type);
883 oem->code = htons(code);
884 memcpy(oem->data, data, len);
885 send_openflow_buffer(dp, buffer, sender);
889 fill_flow_stats(struct ofpbuf *buffer, struct sw_flow *flow,
890 int table_idx, time_t now)
892 struct ofp_flow_stats *ofs;
893 int length = sizeof *ofs + flow->sf_acts->actions_len;
894 ofs = ofpbuf_put_uninit(buffer, length);
895 ofs->length = htons(length);
896 ofs->table_id = table_idx;
898 ofs->match.wildcards = htonl(flow->key.wildcards);
899 ofs->match.in_port = flow->key.flow.in_port;
900 memcpy(ofs->match.dl_src, flow->key.flow.dl_src, ETH_ADDR_LEN);
901 memcpy(ofs->match.dl_dst, flow->key.flow.dl_dst, ETH_ADDR_LEN);
902 ofs->match.dl_vlan = flow->key.flow.dl_vlan;
903 ofs->match.dl_type = flow->key.flow.dl_type;
904 ofs->match.nw_src = flow->key.flow.nw_src;
905 ofs->match.nw_dst = flow->key.flow.nw_dst;
906 ofs->match.nw_proto = flow->key.flow.nw_proto;
908 ofs->match.tp_src = flow->key.flow.tp_src;
909 ofs->match.tp_dst = flow->key.flow.tp_dst;
910 ofs->duration = htonl(now - flow->created);
911 ofs->priority = htons(flow->priority);
912 ofs->idle_timeout = htons(flow->idle_timeout);
913 ofs->hard_timeout = htons(flow->hard_timeout);
914 memset(ofs->pad2, 0, sizeof ofs->pad2);
915 ofs->packet_count = htonll(flow->packet_count);
916 ofs->byte_count = htonll(flow->byte_count);
917 memcpy(ofs->actions, flow->sf_acts->actions, flow->sf_acts->actions_len);
921 /* 'buffer' was received on 'p', which may be a a physical switch port or a
922 * null pointer. Process it according to 'dp''s flow table. Returns 0 if
923 * successful, in which case 'buffer' is destroyed, or -ESRCH if there is no
924 * matching flow, in which case 'buffer' still belongs to the caller. */
925 int run_flow_through_tables(struct datapath *dp, struct ofpbuf *buffer,
928 struct sw_flow_key key;
929 struct sw_flow *flow;
932 if (flow_extract(buffer, p ? p->port_no : OFPP_NONE, &key.flow)
933 && (dp->flags & OFPC_FRAG_MASK) == OFPC_FRAG_DROP) {
935 ofpbuf_delete(buffer);
938 if (p && p->config & (OFPPC_NO_RECV | OFPPC_NO_RECV_STP)
939 && p->config & (!eth_addr_equals(key.flow.dl_dst, stp_eth_addr)
940 ? OFPPC_NO_RECV : OFPPC_NO_RECV_STP)) {
941 ofpbuf_delete(buffer);
945 flow = chain_lookup(dp->chain, &key);
947 flow_used(flow, buffer);
948 execute_actions(dp, buffer, &key, flow->sf_acts->actions,
949 flow->sf_acts->actions_len, false);
956 /* 'buffer' was received on 'p', which may be a a physical switch port or a
957 * null pointer. Process it according to 'dp''s flow table, sending it up to
958 * the controller if no flow matches. Takes ownership of 'buffer'. */
959 void fwd_port_input(struct datapath *dp, struct ofpbuf *buffer,
962 if (run_flow_through_tables(dp, buffer, p)) {
963 dp_output_control(dp, buffer, p->port_no,
964 dp->miss_send_len, OFPR_NO_MATCH);
969 recv_features_request(struct datapath *dp, const struct sender *sender,
972 dp_send_features_reply(dp, sender);
977 recv_get_config_request(struct datapath *dp, const struct sender *sender,
980 struct ofpbuf *buffer;
981 struct ofp_switch_config *osc;
983 osc = make_openflow_reply(sizeof *osc, OFPT_GET_CONFIG_REPLY,
986 osc->flags = htons(dp->flags);
987 osc->miss_send_len = htons(dp->miss_send_len);
989 return send_openflow_buffer(dp, buffer, sender);
993 recv_set_config(struct datapath *dp, const struct sender *sender UNUSED,
996 const struct ofp_switch_config *osc = msg;
999 flags = ntohs(osc->flags) & (OFPC_SEND_FLOW_EXP | OFPC_FRAG_MASK);
1000 if ((flags & OFPC_FRAG_MASK) != OFPC_FRAG_NORMAL
1001 && (flags & OFPC_FRAG_MASK) != OFPC_FRAG_DROP) {
1002 flags = (flags & ~OFPC_FRAG_MASK) | OFPC_FRAG_DROP;
1005 dp->miss_send_len = ntohs(osc->miss_send_len);
1010 recv_packet_out(struct datapath *dp, const struct sender *sender,
1013 const struct ofp_packet_out *opo = msg;
1014 struct sw_flow_key key;
1016 struct ofpbuf *buffer;
1017 size_t actions_len = ntohs(opo->actions_len);
1019 if (actions_len > (ntohs(opo->header.length) - sizeof *opo)) {
1020 VLOG_DBG_RL(&rl, "message too short for number of actions");
1024 if (ntohl(opo->buffer_id) == (uint32_t) -1) {
1025 /* FIXME: can we avoid copying data here? */
1026 int data_len = ntohs(opo->header.length) - sizeof *opo - actions_len;
1027 buffer = ofpbuf_new(data_len);
1028 ofpbuf_put(buffer, (uint8_t *)opo->actions + actions_len, data_len);
1030 buffer = retrieve_buffer(ntohl(opo->buffer_id));
1036 flow_extract(buffer, ntohs(opo->in_port), &key.flow);
1038 v_code = validate_actions(dp, &key, opo->actions, actions_len);
1039 if (v_code != ACT_VALIDATION_OK) {
1040 dp_send_error_msg(dp, sender, OFPET_BAD_ACTION, v_code,
1041 msg, ntohs(opo->header.length));
1045 execute_actions(dp, buffer, &key, opo->actions, actions_len, true);
1050 ofpbuf_delete(buffer);
1055 recv_port_mod(struct datapath *dp, const struct sender *sender UNUSED,
1058 const struct ofp_port_mod *opm = msg;
1060 update_port_flags(dp, opm);
1066 add_flow(struct datapath *dp, const struct sender *sender,
1067 const struct ofp_flow_mod *ofm)
1069 int error = -ENOMEM;
1071 struct sw_flow *flow;
1072 size_t actions_len = ntohs(ofm->header.length) - sizeof *ofm;
1074 /* Allocate memory. */
1075 flow = flow_alloc(actions_len);
1079 flow_extract_match(&flow->key, &ofm->match);
1081 v_code = validate_actions(dp, &flow->key, ofm->actions, actions_len);
1082 if (v_code != ACT_VALIDATION_OK) {
1083 dp_send_error_msg(dp, sender, OFPET_BAD_ACTION, v_code,
1084 ofm, ntohs(ofm->header.length));
1085 goto error_free_flow;
1088 /* Fill out flow. */
1089 flow->priority = flow->key.wildcards ? ntohs(ofm->priority) : -1;
1090 flow->idle_timeout = ntohs(ofm->idle_timeout);
1091 flow->hard_timeout = ntohs(ofm->hard_timeout);
1092 flow->used = flow->created = time_now();
1093 flow->sf_acts->actions_len = actions_len;
1094 flow->byte_count = 0;
1095 flow->packet_count = 0;
1096 memcpy(flow->sf_acts->actions, ofm->actions, actions_len);
1099 error = chain_insert(dp->chain, flow);
1100 if (error == -ENOBUFS) {
1101 dp_send_error_msg(dp, sender, OFPET_FLOW_MOD_FAILED,
1102 OFPFMFC_ALL_TABLES_FULL, ofm, ntohs(ofm->header.length));
1103 goto error_free_flow;
1105 goto error_free_flow;
1108 if (ntohl(ofm->buffer_id) != UINT32_MAX) {
1109 struct ofpbuf *buffer = retrieve_buffer(ntohl(ofm->buffer_id));
1111 struct sw_flow_key key;
1112 uint16_t in_port = ntohs(ofm->match.in_port);
1113 flow_used(flow, buffer);
1114 flow_extract(buffer, in_port, &key.flow);
1115 execute_actions(dp, buffer, &key,
1116 ofm->actions, actions_len, false);
1126 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
1127 discard_buffer(ntohl(ofm->buffer_id));
1132 mod_flow(struct datapath *dp, const struct sender *sender,
1133 const struct ofp_flow_mod *ofm)
1135 int error = -ENOMEM;
1138 struct sw_flow_key key;
1142 flow_extract_match(&key, &ofm->match);
1144 actions_len = ntohs(ofm->header.length) - sizeof *ofm;
1146 v_code = validate_actions(dp, &key, ofm->actions, actions_len);
1147 if (v_code != ACT_VALIDATION_OK) {
1148 dp_send_error_msg(dp, sender, OFPET_BAD_ACTION, v_code,
1149 ofm, ntohs(ofm->header.length));
1153 priority = key.wildcards ? ntohs(ofm->priority) : -1;
1154 strict = (ofm->command == htons(OFPFC_MODIFY_STRICT)) ? 1 : 0;
1155 chain_modify(dp->chain, &key, priority, strict, ofm->actions, actions_len);
1157 if (ntohl(ofm->buffer_id) != UINT32_MAX) {
1158 struct ofpbuf *buffer = retrieve_buffer(ntohl(ofm->buffer_id));
1160 struct sw_flow_key skb_key;
1161 uint16_t in_port = ntohs(ofm->match.in_port);
1162 flow_extract(buffer, in_port, &skb_key.flow);
1163 execute_actions(dp, buffer, &skb_key,
1164 ofm->actions, actions_len, false);
1172 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
1173 discard_buffer(ntohl(ofm->buffer_id));
1178 recv_flow(struct datapath *dp, const struct sender *sender,
1181 const struct ofp_flow_mod *ofm = msg;
1182 uint16_t command = ntohs(ofm->command);
1184 if (command == OFPFC_ADD) {
1185 return add_flow(dp, sender, ofm);
1186 } else if ((command == OFPFC_MODIFY) || (command == OFPFC_MODIFY_STRICT)) {
1187 return mod_flow(dp, sender, ofm);
1188 } else if (command == OFPFC_DELETE) {
1189 struct sw_flow_key key;
1190 flow_extract_match(&key, &ofm->match);
1191 return chain_delete(dp->chain, &key, ofm->out_port, 0, 0) ? 0 : -ESRCH;
1192 } else if (command == OFPFC_DELETE_STRICT) {
1193 struct sw_flow_key key;
1195 flow_extract_match(&key, &ofm->match);
1196 priority = key.wildcards ? ntohs(ofm->priority) : -1;
1197 return chain_delete(dp->chain, &key, ofm->out_port,
1198 priority, 1) ? 0 : -ESRCH;
1204 static int desc_stats_dump(struct datapath *dp, void *state,
1205 struct ofpbuf *buffer)
1207 struct ofp_desc_stats *ods = ofpbuf_put_uninit(buffer, sizeof *ods);
1209 strncpy(ods->mfr_desc, &mfr_desc, sizeof ods->mfr_desc);
1210 strncpy(ods->hw_desc, &hw_desc, sizeof ods->hw_desc);
1211 strncpy(ods->sw_desc, &sw_desc, sizeof ods->sw_desc);
1212 strncpy(ods->serial_num, &serial_num, sizeof ods->serial_num);
1217 struct flow_stats_state {
1219 struct sw_table_position position;
1220 struct ofp_flow_stats_request rq;
1223 struct ofpbuf *buffer;
1226 #define MAX_FLOW_STATS_BYTES 4096
1228 static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
1231 const struct ofp_flow_stats_request *fsr = body;
1232 struct flow_stats_state *s = xmalloc(sizeof *s);
1233 s->table_idx = fsr->table_id == 0xff ? 0 : fsr->table_id;
1234 memset(&s->position, 0, sizeof s->position);
1240 static int flow_stats_dump_callback(struct sw_flow *flow, void *private)
1242 struct flow_stats_state *s = private;
1243 fill_flow_stats(s->buffer, flow, s->table_idx, s->now);
1244 return s->buffer->size >= MAX_FLOW_STATS_BYTES;
1247 static int flow_stats_dump(struct datapath *dp, void *state,
1248 struct ofpbuf *buffer)
1250 struct flow_stats_state *s = state;
1251 struct sw_flow_key match_key;
1253 flow_extract_match(&match_key, &s->rq.match);
1255 s->now = time_now();
1256 while (s->table_idx < dp->chain->n_tables
1257 && (s->rq.table_id == 0xff || s->rq.table_id == s->table_idx))
1259 struct sw_table *table = dp->chain->tables[s->table_idx];
1261 if (table->iterate(table, &match_key, s->rq.out_port,
1262 &s->position, flow_stats_dump_callback, s))
1266 memset(&s->position, 0, sizeof s->position);
1268 return s->buffer->size >= MAX_FLOW_STATS_BYTES;
1271 static void flow_stats_done(void *state)
1276 struct aggregate_stats_state {
1277 struct ofp_aggregate_stats_request rq;
1280 static int aggregate_stats_init(struct datapath *dp,
1281 const void *body, int body_len,
1284 const struct ofp_aggregate_stats_request *rq = body;
1285 struct aggregate_stats_state *s = xmalloc(sizeof *s);
1291 static int aggregate_stats_dump_callback(struct sw_flow *flow, void *private)
1293 struct ofp_aggregate_stats_reply *rpy = private;
1294 rpy->packet_count += flow->packet_count;
1295 rpy->byte_count += flow->byte_count;
1300 static int aggregate_stats_dump(struct datapath *dp, void *state,
1301 struct ofpbuf *buffer)
1303 struct aggregate_stats_state *s = state;
1304 struct ofp_aggregate_stats_request *rq = &s->rq;
1305 struct ofp_aggregate_stats_reply *rpy;
1306 struct sw_table_position position;
1307 struct sw_flow_key match_key;
1310 rpy = ofpbuf_put_uninit(buffer, sizeof *rpy);
1311 memset(rpy, 0, sizeof *rpy);
1313 flow_extract_match(&match_key, &rq->match);
1314 table_idx = rq->table_id == 0xff ? 0 : rq->table_id;
1315 memset(&position, 0, sizeof position);
1316 while (table_idx < dp->chain->n_tables
1317 && (rq->table_id == 0xff || rq->table_id == table_idx))
1319 struct sw_table *table = dp->chain->tables[table_idx];
1322 error = table->iterate(table, &match_key, rq->out_port, &position,
1323 aggregate_stats_dump_callback, rpy);
1328 memset(&position, 0, sizeof position);
1331 rpy->packet_count = htonll(rpy->packet_count);
1332 rpy->byte_count = htonll(rpy->byte_count);
1333 rpy->flow_count = htonl(rpy->flow_count);
1337 static void aggregate_stats_done(void *state)
1342 static int table_stats_dump(struct datapath *dp, void *state,
1343 struct ofpbuf *buffer)
1346 for (i = 0; i < dp->chain->n_tables; i++) {
1347 struct ofp_table_stats *ots = ofpbuf_put_uninit(buffer, sizeof *ots);
1348 struct sw_table_stats stats;
1349 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
1350 strncpy(ots->name, stats.name, sizeof ots->name);
1352 ots->wildcards = htonl(stats.wildcards);
1353 memset(ots->pad, 0, sizeof ots->pad);
1354 ots->max_entries = htonl(stats.max_flows);
1355 ots->active_count = htonl(stats.n_flows);
1356 ots->lookup_count = htonll(stats.n_lookup);
1357 ots->matched_count = htonll(stats.n_matched);
1362 struct port_stats_state {
1366 static int port_stats_init(struct datapath *dp, const void *body, int body_len,
1369 struct port_stats_state *s = xmalloc(sizeof *s);
1376 dump_port_stats(struct sw_port *port, struct ofpbuf *buffer)
1378 struct ofp_port_stats *ops = ofpbuf_put_uninit(buffer, sizeof *ops);
1379 ops->port_no = htons(port->port_no);
1380 memset(ops->pad, 0, sizeof ops->pad);
1381 ops->rx_packets = htonll(port->rx_packets);
1382 ops->tx_packets = htonll(port->tx_packets);
1383 ops->rx_bytes = htonll(port->rx_bytes);
1384 ops->tx_bytes = htonll(port->tx_bytes);
1385 ops->rx_dropped = htonll(-1);
1386 ops->tx_dropped = htonll(port->tx_dropped);
1387 ops->rx_errors = htonll(-1);
1388 ops->tx_errors = htonll(-1);
1389 ops->rx_frame_err = htonll(-1);
1390 ops->rx_over_err = htonll(-1);
1391 ops->rx_crc_err = htonll(-1);
1392 ops->collisions = htonll(-1);
1395 static int port_stats_dump(struct datapath *dp, void *state,
1396 struct ofpbuf *buffer)
1398 struct port_stats_state *s = state;
1401 for (i = s->port; i < DP_MAX_PORTS; i++) {
1402 struct sw_port *p = &dp->ports[i];
1404 dump_port_stats(p, buffer);
1409 if (dp->local_port) {
1410 dump_port_stats(dp->local_port, buffer);
1411 s->port = OFPP_LOCAL + 1;
1416 static void port_stats_done(void *state)
1422 /* Value for 'type' member of struct ofp_stats_request. */
1425 /* Minimum and maximum acceptable number of bytes in body member of
1426 * struct ofp_stats_request. */
1427 size_t min_body, max_body;
1429 /* Prepares to dump some kind of statistics on 'dp'. 'body' and
1430 * 'body_len' are the 'body' member of the struct ofp_stats_request.
1431 * Returns zero if successful, otherwise a negative error code.
1432 * May initialize '*state' to state information. May be null if no
1433 * initialization is required.*/
1434 int (*init)(struct datapath *dp, const void *body, int body_len,
1437 /* Appends statistics for 'dp' to 'buffer', which initially contains a
1438 * struct ofp_stats_reply. On success, it should return 1 if it should be
1439 * called again later with another buffer, 0 if it is done, or a negative
1440 * errno value on failure. */
1441 int (*dump)(struct datapath *dp, void *state, struct ofpbuf *buffer);
1443 /* Cleans any state created by the init or dump functions. May be null
1444 * if no cleanup is required. */
1445 void (*done)(void *state);
1448 static const struct stats_type stats[] = {
1459 sizeof(struct ofp_flow_stats_request),
1460 sizeof(struct ofp_flow_stats_request),
1467 sizeof(struct ofp_aggregate_stats_request),
1468 sizeof(struct ofp_aggregate_stats_request),
1469 aggregate_stats_init,
1470 aggregate_stats_dump,
1471 aggregate_stats_done
1491 struct stats_dump_cb {
1493 struct ofp_stats_request *rq;
1494 struct sender sender;
1495 const struct stats_type *s;
1500 stats_dump(struct datapath *dp, void *cb_)
1502 struct stats_dump_cb *cb = cb_;
1503 struct ofp_stats_reply *osr;
1504 struct ofpbuf *buffer;
1511 osr = make_openflow_reply(sizeof *osr, OFPT_STATS_REPLY, &cb->sender,
1513 osr->type = htons(cb->s->type);
1516 err = cb->s->dump(dp, cb->state, buffer);
1522 /* Buffer might have been reallocated, so find our data again. */
1523 osr = ofpbuf_at_assert(buffer, 0, sizeof *osr);
1524 osr->flags = ntohs(OFPSF_REPLY_MORE);
1526 err2 = send_openflow_buffer(dp, buffer, &cb->sender);
1536 stats_done(void *cb_)
1538 struct stats_dump_cb *cb = cb_;
1541 cb->s->done(cb->state);
1548 recv_stats_request(struct datapath *dp, const struct sender *sender,
1551 const struct ofp_stats_request *rq = oh;
1552 size_t rq_len = ntohs(rq->header.length);
1553 const struct stats_type *st;
1554 struct stats_dump_cb *cb;
1558 type = ntohs(rq->type);
1559 for (st = stats; ; st++) {
1560 if (st >= &stats[ARRAY_SIZE(stats)]) {
1561 VLOG_WARN_RL(&rl, "received stats request of unknown type %d",
1564 } else if (type == st->type) {
1569 cb = xmalloc(sizeof *cb);
1571 cb->rq = xmemdup(rq, rq_len);
1572 cb->sender = *sender;
1576 body_len = rq_len - offsetof(struct ofp_stats_request, body);
1577 if (body_len < cb->s->min_body || body_len > cb->s->max_body) {
1578 VLOG_WARN_RL(&rl, "stats request type %d with bad body length %d",
1585 err = cb->s->init(dp, rq->body, body_len, &cb->state);
1588 "failed initialization of stats request type %d: %s",
1589 type, strerror(-err));
1594 remote_start_dump(sender->remote, stats_dump, stats_done, cb);
1604 recv_echo_request(struct datapath *dp, const struct sender *sender,
1607 return send_openflow_buffer(dp, make_echo_reply(oh), sender);
1611 recv_echo_reply(struct datapath *dp UNUSED, const struct sender *sender UNUSED,
1612 const void *oh UNUSED)
1617 /* 'msg', which is 'length' bytes long, was received from the control path.
1618 * Apply it to 'chain'. */
1620 fwd_control_input(struct datapath *dp, const struct sender *sender,
1621 const void *msg, size_t length)
1623 int (*handler)(struct datapath *, const struct sender *, const void *);
1624 struct ofp_header *oh;
1627 /* Check encapsulated length. */
1628 oh = (struct ofp_header *) msg;
1629 if (ntohs(oh->length) > length) {
1632 assert(oh->version == OFP_VERSION);
1634 /* Figure out how to handle it. */
1636 case OFPT_FEATURES_REQUEST:
1637 min_size = sizeof(struct ofp_header);
1638 handler = recv_features_request;
1640 case OFPT_GET_CONFIG_REQUEST:
1641 min_size = sizeof(struct ofp_header);
1642 handler = recv_get_config_request;
1644 case OFPT_SET_CONFIG:
1645 min_size = sizeof(struct ofp_switch_config);
1646 handler = recv_set_config;
1648 case OFPT_PACKET_OUT:
1649 min_size = sizeof(struct ofp_packet_out);
1650 handler = recv_packet_out;
1653 min_size = sizeof(struct ofp_flow_mod);
1654 handler = recv_flow;
1657 min_size = sizeof(struct ofp_port_mod);
1658 handler = recv_port_mod;
1660 case OFPT_STATS_REQUEST:
1661 min_size = sizeof(struct ofp_stats_request);
1662 handler = recv_stats_request;
1664 case OFPT_ECHO_REQUEST:
1665 min_size = sizeof(struct ofp_header);
1666 handler = recv_echo_request;
1668 case OFPT_ECHO_REPLY:
1669 min_size = sizeof(struct ofp_header);
1670 handler = recv_echo_reply;
1673 dp_send_error_msg(dp, sender, OFPET_BAD_REQUEST, OFPBRC_BAD_TYPE,
1679 if (length < min_size)
1681 return handler(dp, sender, msg);
1684 /* Packet buffering. */
1686 #define OVERWRITE_SECS 1
1688 struct packet_buffer {
1689 struct ofpbuf *buffer;
1694 static struct packet_buffer buffers[N_PKT_BUFFERS];
1695 static unsigned int buffer_idx;
1697 uint32_t save_buffer(struct ofpbuf *buffer)
1699 struct packet_buffer *p;
1702 buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
1703 p = &buffers[buffer_idx];
1705 /* Don't buffer packet if existing entry is less than
1706 * OVERWRITE_SECS old. */
1707 if (time_now() < p->timeout) { /* FIXME */
1710 ofpbuf_delete(p->buffer);
1713 /* Don't use maximum cookie value since the all-bits-1 id is
1715 if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
1717 p->buffer = ofpbuf_clone(buffer); /* FIXME */
1718 p->timeout = time_now() + OVERWRITE_SECS; /* FIXME */
1719 id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
1724 static struct ofpbuf *retrieve_buffer(uint32_t id)
1726 struct ofpbuf *buffer = NULL;
1727 struct packet_buffer *p;
1729 p = &buffers[id & PKT_BUFFER_MASK];
1730 if (p->cookie == id >> PKT_BUFFER_BITS) {
1734 printf("cookie mismatch: %x != %x\n",
1735 id >> PKT_BUFFER_BITS, p->cookie);
1741 static void discard_buffer(uint32_t id)
1743 struct packet_buffer *p;
1745 p = &buffers[id & PKT_BUFFER_MASK];
1746 if (p->cookie == id >> PKT_BUFFER_BITS) {
1747 ofpbuf_delete(p->buffer);