1 /* Copyright (c) 2008 The Board of Trustees of The Leland Stanford
4 * We are making the OpenFlow specification and associated documentation
5 * (Software) available for public use and benefit with the expectation
6 * that others will use, modify and enhance the Software and contribute
7 * those enhancements back to the community. However, since we would
8 * like to make the Software available for broadest use, with as few
9 * restrictions as possible permission is hereby granted, free of
10 * charge, to any person obtaining a copy of this Software to deal in
11 * the Software under the copyrights without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be
18 * included in all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
23 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
24 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
25 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
26 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 * The name and trademarks of copyright holder(s) may NOT be used in
30 * advertising or publicity pertaining to the Software or any
31 * derivatives without specific, written prior permission.
35 #include <arpa/inet.h>
45 #include "poll-loop.h"
51 #define THIS_MODULE VLM_datapath
54 #define BRIDGE_PORT_NO_FLOOD 0x00000001
56 /* Capabilities supported by this implementation. */
57 #define OFP_SUPPORTED_CAPABILITIES (OFPC_MULTI_PHY_TX)
59 /* Actions supported by this implementation. */
60 #define OFP_SUPPORTED_ACTIONS ( (1 << OFPAT_OUTPUT) \
61 | (1 << OFPAT_SET_DL_VLAN) \
62 | (1 << OFPAT_SET_DL_SRC) \
63 | (1 << OFPAT_SET_DL_DST) \
64 | (1 << OFPAT_SET_NW_SRC) \
65 | (1 << OFPAT_SET_NW_DST) \
66 | (1 << OFPAT_SET_TP_SRC) \
67 | (1 << OFPAT_SET_TP_DST) )
72 struct netdev *netdev;
73 struct list node; /* Element in datapath.ports. */
74 unsigned long long int rx_count, tx_count, drop_count;
77 /* The origin of a received OpenFlow message, to enable sending a reply. */
79 struct remote *remote; /* The device that sent the message. */
80 uint32_t xid; /* The OpenFlow transaction ID. */
83 /* A connection to a controller or a management device. */
88 /* Support for reliable, multi-message replies to requests.
90 * If an incoming request needs to have a reliable reply that might
91 * require multiple messages, it can use remote_start_dump() to set up
92 * a callback that will be called as buffer space for replies. */
93 int (*cb_dump)(struct datapath *, void *aux);
94 void (*cb_done)(void *aux);
99 /* Remote connections. */
100 struct remote *controller; /* Connection to controller. */
101 struct list remotes; /* All connections (including controller). */
102 struct vconn *listen_vconn;
106 /* Unique identifier for this datapath */
109 struct sw_chain *chain; /* Forwarding rules. */
111 struct ofp_switch_config config;
114 struct sw_port ports[OFPP_MAX];
115 struct list port_list; /* List of ports, for flooding. */
118 static struct remote *remote_create(struct datapath *, struct rconn *);
119 static void remote_run(struct datapath *, struct remote *);
120 static void remote_wait(struct remote *);
121 static void remote_destroy(struct remote *);
123 void dp_output_port(struct datapath *, struct buffer *,
124 int in_port, int out_port);
125 void dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp);
126 void dp_output_control(struct datapath *, struct buffer *, int in_port,
127 size_t max_len, int reason);
128 static void send_flow_expired(struct datapath *, struct sw_flow *);
129 static void send_port_status(struct sw_port *p, uint8_t status);
130 static void del_switch_port(struct sw_port *p);
131 static void execute_actions(struct datapath *, struct buffer *,
132 int in_port, const struct sw_flow_key *,
133 const struct ofp_action *, int n_actions);
134 static void modify_vlan(struct buffer *buffer, const struct sw_flow_key *key,
135 const struct ofp_action *a);
136 static void modify_nh(struct buffer *buffer, uint16_t eth_proto,
137 uint8_t nw_proto, const struct ofp_action *a);
138 static void modify_th(struct buffer *buffer, uint16_t eth_proto,
139 uint8_t nw_proto, const struct ofp_action *a);
141 /* Buffers are identified to userspace by a 31-bit opaque ID. We divide the ID
142 * into a buffer number (low bits) and a cookie (high bits). The buffer number
143 * is an index into an array of buffers. The cookie distinguishes between
144 * different packets that have occupied a single buffer. Thus, the more
145 * buffers we have, the lower-quality the cookie... */
146 #define PKT_BUFFER_BITS 8
147 #define N_PKT_BUFFERS (1 << PKT_BUFFER_BITS)
148 #define PKT_BUFFER_MASK (N_PKT_BUFFERS - 1)
150 #define PKT_COOKIE_BITS (32 - PKT_BUFFER_BITS)
152 void fwd_port_input(struct datapath *, struct buffer *, int in_port);
153 int fwd_control_input(struct datapath *, const struct sender *,
154 const void *, size_t);
156 uint32_t save_buffer(struct buffer *);
157 static struct buffer *retrieve_buffer(uint32_t id);
158 static void discard_buffer(uint32_t id);
160 static int port_no(struct datapath *dp, struct sw_port *p)
162 assert(p >= dp->ports && p < &dp->ports[ARRAY_SIZE(dp->ports)]);
163 return p - dp->ports;
166 /* Generates a unique datapath id. It incorporates the datapath index
167 * and a hardware address, if available. If not, it generates a random
171 gen_datapath_id(void)
173 /* Choose a random datapath id. */
179 for (i = 0; i < ETH_ADDR_LEN; i++) {
180 id |= (uint64_t)(rand() & 0xff) << (8*(ETH_ADDR_LEN-1 - i));
187 dp_new(struct datapath **dp_, uint64_t dpid, struct rconn *rconn)
191 dp = calloc(1, sizeof *dp);
196 dp->last_timeout = time(0);
197 list_init(&dp->remotes);
198 dp->controller = remote_create(dp, rconn);
199 dp->listen_vconn = NULL;
200 dp->id = dpid <= UINT64_C(0xffffffffffff) ? dpid : gen_datapath_id();
201 dp->chain = chain_create();
203 VLOG_ERR("could not create chain");
208 list_init(&dp->port_list);
209 dp->config.flags = 0;
210 dp->config.miss_send_len = htons(OFP_DEFAULT_MISS_SEND_LEN);
216 dp_add_port(struct datapath *dp, const char *name)
218 struct netdev *netdev;
222 error = netdev_open(name, &netdev);
227 for (p = dp->ports; ; p++) {
228 if (p >= &dp->ports[ARRAY_SIZE(dp->ports)]) {
230 } else if (!p->netdev) {
240 list_push_back(&dp->port_list, &p->node);
242 /* Notify the ctlpath that this port has been added */
243 send_port_status(p, OFPPR_ADD);
249 dp_add_listen_vconn(struct datapath *dp, struct vconn *listen_vconn)
251 assert(!dp->listen_vconn);
252 dp->listen_vconn = listen_vconn;
256 dp_run(struct datapath *dp)
258 time_t now = time(0);
259 struct sw_port *p, *pn;
260 struct remote *r, *rn;
261 struct buffer *buffer = NULL;
263 if (now != dp->last_timeout) {
264 struct list deleted = LIST_INITIALIZER(&deleted);
265 struct sw_flow *f, *n;
267 chain_timeout(dp->chain, &deleted);
268 LIST_FOR_EACH_SAFE (f, n, struct sw_flow, node, &deleted) {
269 send_flow_expired(dp, f);
270 list_remove(&f->node);
273 dp->last_timeout = now;
275 poll_timer_wait(1000);
277 LIST_FOR_EACH_SAFE (p, pn, struct sw_port, node, &dp->port_list) {
281 /* Allocate buffer with some headroom to add headers in forwarding
282 * to the controller or adding a vlan tag, plus an extra 2 bytes to
283 * allow IP headers to be aligned on a 4-byte boundary. */
284 const int headroom = 128 + 2;
285 const int hard_header = VLAN_ETH_HEADER_LEN;
286 const int mtu = netdev_get_mtu(p->netdev);
287 buffer = buffer_new(headroom + hard_header + mtu);
288 buffer->data += headroom;
290 error = netdev_recv(p->netdev, buffer);
293 fwd_port_input(dp, buffer, port_no(dp, p));
295 } else if (error != EAGAIN) {
296 VLOG_ERR("Error receiving data from %s: %s",
297 netdev_get_name(p->netdev), strerror(error));
301 buffer_delete(buffer);
303 /* Talk to remotes. */
304 LIST_FOR_EACH_SAFE (r, rn, struct remote, node, &dp->remotes) {
307 if (dp->listen_vconn) {
309 struct vconn *new_vconn;
312 retval = vconn_accept(dp->listen_vconn, &new_vconn);
314 if (retval != EAGAIN) {
315 VLOG_WARN("accept failed (%s)", strerror(retval));
319 remote_create(dp, rconn_new_from_vconn("passive", 128, new_vconn));
325 remote_run(struct datapath *dp, struct remote *r)
331 /* Do some remote processing, but cap it at a reasonable amount so that
332 * other processing doesn't starve. */
333 for (i = 0; i < 50; i++) {
335 struct buffer *buffer;
336 struct ofp_header *oh;
338 buffer = rconn_recv(r->rconn);
343 if (buffer->size >= sizeof *oh) {
344 struct sender sender;
348 sender.xid = oh->xid;
349 fwd_control_input(dp, &sender, buffer->data, buffer->size);
351 VLOG_WARN("received too-short OpenFlow message");
353 buffer_delete(buffer);
355 if (!rconn_is_full(r->rconn)) {
356 int error = r->cb_dump(dp, r->cb_aux);
359 VLOG_WARN("dump callback error: %s", strerror(-error));
361 r->cb_done(r->cb_aux);
370 if (!rconn_is_alive(r->rconn)) {
376 remote_wait(struct remote *r)
378 rconn_run_wait(r->rconn);
379 rconn_recv_wait(r->rconn);
383 remote_destroy(struct remote *r)
386 if (r->cb_dump && r->cb_done) {
387 r->cb_done(r->cb_aux);
389 list_remove(&r->node);
390 rconn_destroy(r->rconn);
395 static struct remote *
396 remote_create(struct datapath *dp, struct rconn *rconn)
398 struct remote *remote = xmalloc(sizeof *remote);
399 list_push_back(&dp->remotes, &remote->node);
400 remote->rconn = rconn;
401 remote->cb_dump = NULL;
405 /* Starts a callback-based, reliable, possibly multi-message reply to a
406 * request made by 'remote'.
408 * 'dump' designates a function that will be called when the 'remote' send
409 * queue has an empty slot. It should compose a message and send it on
410 * 'remote'. On success, it should return 1 if it should be called again when
411 * another send queue slot opens up, 0 if its transmissions are complete, or a
412 * negative errno value on failure.
414 * 'done' designates a function to clean up any resources allocated for the
415 * dump. It must handle being called before the dump is complete (which will
416 * happen if 'remote' is closed unexpectedly).
418 * 'aux' is passed to 'dump' and 'done'. */
420 remote_start_dump(struct remote *remote,
421 int (*dump)(struct datapath *, void *),
422 void (*done)(void *),
425 assert(!remote->cb_dump);
426 remote->cb_dump = dump;
427 remote->cb_done = done;
428 remote->cb_aux = aux;
432 dp_wait(struct datapath *dp)
437 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
438 netdev_recv_wait(p->netdev);
440 LIST_FOR_EACH (r, struct remote, node, &dp->remotes) {
443 if (dp->listen_vconn) {
444 vconn_accept_wait(dp->listen_vconn);
448 /* Delete 'p' from switch. */
450 del_switch_port(struct sw_port *p)
452 send_port_status(p, OFPPR_DELETE);
453 netdev_close(p->netdev);
455 list_remove(&p->node);
459 dp_destroy(struct datapath *dp)
461 struct sw_port *p, *n;
467 LIST_FOR_EACH_SAFE (p, n, struct sw_port, node, &dp->port_list) {
470 chain_destroy(dp->chain);
475 flood(struct datapath *dp, struct buffer *buffer, int in_port)
481 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
482 if (port_no(dp, p) == in_port || p->flags & BRIDGE_PORT_NO_FLOOD) {
485 if (prev_port != -1) {
486 dp_output_port(dp, buffer_clone(buffer), in_port, prev_port);
488 prev_port = port_no(dp, p);
491 dp_output_port(dp, buffer, in_port, prev_port);
493 buffer_delete(buffer);
499 output_packet(struct datapath *dp, struct buffer *buffer, int out_port)
501 if (out_port >= 0 && out_port < OFPP_MAX) {
502 struct sw_port *p = &dp->ports[out_port];
503 if (p->netdev != NULL) {
504 if (!netdev_send(p->netdev, buffer)) {
513 buffer_delete(buffer);
514 /* FIXME: ratelimit */
515 VLOG_DBG("can't forward to bad port %d\n", out_port);
518 /* Takes ownership of 'buffer' and transmits it to 'out_port' on 'dp'.
521 dp_output_port(struct datapath *dp, struct buffer *buffer,
522 int in_port, int out_port)
526 if (out_port == OFPP_FLOOD) {
527 flood(dp, buffer, in_port);
528 } else if (out_port == OFPP_CONTROLLER) {
529 dp_output_control(dp, buffer, in_port, 0, OFPR_ACTION);
530 } else if (out_port == OFPP_TABLE) {
531 struct sw_flow_key key;
532 struct sw_flow *flow;
535 flow_extract(buffer, in_port, &key.flow);
536 flow = chain_lookup(dp->chain, &key);
538 flow_used(flow, buffer);
539 execute_actions(dp, buffer, in_port, &key,
540 flow->actions, flow->n_actions);
543 output_packet(dp, buffer, out_port);
548 alloc_openflow_buffer(struct datapath *dp, size_t openflow_len, uint8_t type,
549 const struct sender *sender, struct buffer **bufferp)
551 struct buffer *buffer;
552 struct ofp_header *oh;
554 buffer = *bufferp = buffer_new(openflow_len);
555 oh = buffer_put_uninit(buffer, openflow_len);
556 oh->version = OFP_VERSION;
558 oh->length = 0; /* Filled in by send_openflow_buffer(). */
559 oh->xid = sender ? sender->xid : 0;
564 send_openflow_buffer(struct datapath *dp, struct buffer *buffer,
565 const struct sender *sender)
567 struct remote *remote = sender ? sender->remote : dp->controller;
568 struct rconn *rconn = remote->rconn;
569 struct ofp_header *oh;
572 oh = buffer_at_assert(buffer, 0, sizeof *oh);
573 oh->length = htons(buffer->size);
575 retval = rconn_send(rconn, buffer);
577 VLOG_WARN("send to %s failed: %s",
578 rconn_get_name(rconn), strerror(retval));
579 buffer_delete(buffer);
584 /* Takes ownership of 'buffer' and transmits it to 'dp''s controller. If the
585 * packet can be saved in a buffer, then only the first max_len bytes of
586 * 'buffer' are sent; otherwise, all of 'buffer' is sent. 'reason' indicates
587 * why 'buffer' is being sent. 'max_len' sets the maximum number of bytes that
588 * the caller wants to be sent; a value of 0 indicates the entire packet should
591 dp_output_control(struct datapath *dp, struct buffer *buffer, int in_port,
592 size_t max_len, int reason)
594 struct ofp_packet_in *opi;
598 buffer_id = save_buffer(buffer);
599 total_len = buffer->size;
600 if (buffer_id != UINT32_MAX && buffer->size > max_len) {
601 buffer->size = max_len;
604 opi = buffer_push_uninit(buffer, offsetof(struct ofp_packet_in, data));
605 opi->header.version = OFP_VERSION;
606 opi->header.type = OFPT_PACKET_IN;
607 opi->header.length = htons(buffer->size);
608 opi->header.xid = htonl(0);
609 opi->buffer_id = htonl(buffer_id);
610 opi->total_len = htons(total_len);
611 opi->in_port = htons(in_port);
612 opi->reason = reason;
614 send_openflow_buffer(dp, buffer, NULL);
617 static void fill_port_desc(struct datapath *dp, struct sw_port *p,
618 struct ofp_phy_port *desc)
620 desc->port_no = htons(port_no(dp, p));
621 strncpy((char *) desc->name, netdev_get_name(p->netdev),
623 desc->name[sizeof desc->name - 1] = '\0';
624 memcpy(desc->hw_addr, netdev_get_etheraddr(p->netdev), ETH_ADDR_LEN);
625 desc->flags = htonl(p->flags);
626 desc->features = htonl(netdev_get_features(p->netdev));
627 desc->speed = htonl(netdev_get_speed(p->netdev));
631 dp_send_features_reply(struct datapath *dp, const struct sender *sender)
633 struct buffer *buffer;
634 struct ofp_switch_features *ofr;
637 ofr = alloc_openflow_buffer(dp, sizeof *ofr, OFPT_FEATURES_REPLY,
639 ofr->datapath_id = htonll(dp->id);
640 ofr->n_exact = htonl(2 * TABLE_HASH_MAX_FLOWS);
641 ofr->n_compression = 0; /* Not supported */
642 ofr->n_general = htonl(TABLE_LINEAR_MAX_FLOWS);
643 ofr->buffer_mb = htonl(UINT32_MAX);
644 ofr->n_buffers = htonl(N_PKT_BUFFERS);
645 ofr->capabilities = htonl(OFP_SUPPORTED_CAPABILITIES);
646 ofr->actions = htonl(OFP_SUPPORTED_ACTIONS);
647 LIST_FOR_EACH (p, struct sw_port, node, &dp->port_list) {
648 struct ofp_phy_port *opp = buffer_put_uninit(buffer, sizeof *opp);
649 memset(opp, 0, sizeof *opp);
650 fill_port_desc(dp, p, opp);
652 send_openflow_buffer(dp, buffer, sender);
656 dp_update_port_flags(struct datapath *dp, const struct ofp_phy_port *opp)
660 p = &dp->ports[htons(opp->port_no)];
662 /* Make sure the port id hasn't changed since this was sent */
663 if (!p || memcmp(opp->hw_addr, netdev_get_etheraddr(p->netdev),
667 p->flags = htonl(opp->flags);
671 send_port_status(struct sw_port *p, uint8_t status)
673 struct buffer *buffer;
674 struct ofp_port_status *ops;
675 ops = alloc_openflow_buffer(p->dp, sizeof *ops, OFPT_PORT_STATUS, NULL,
677 ops->reason = status;
678 fill_port_desc(p->dp, p, &ops->desc);
679 send_openflow_buffer(p->dp, buffer, NULL);
683 send_flow_expired(struct datapath *dp, struct sw_flow *flow)
685 struct buffer *buffer;
686 struct ofp_flow_expired *ofe;
687 ofe = alloc_openflow_buffer(dp, sizeof *ofe, OFPT_FLOW_EXPIRED, NULL,
689 flow_fill_match(&ofe->match, &flow->key);
690 ofe->duration = htonl(flow->timeout - flow->max_idle - flow->created);
691 ofe->packet_count = htonll(flow->packet_count);
692 ofe->byte_count = htonll(flow->byte_count);
693 send_openflow_buffer(dp, buffer, NULL);
697 dp_send_error_msg(struct datapath *dp, const struct sender *sender,
698 uint16_t type, uint16_t code, const uint8_t *data, size_t len)
700 struct buffer *buffer;
701 struct ofp_error_msg *oem;
702 oem = alloc_openflow_buffer(dp, sizeof(*oem)+len, OFPT_ERROR_MSG,
704 oem->type = htons(type);
705 oem->code = htons(code);
706 memcpy(oem->data, data, len);
707 send_openflow_buffer(dp, buffer, sender);
711 fill_flow_stats(struct ofp_flow_stats *ofs, struct sw_flow *flow,
712 int table_idx, time_t now)
714 ofs->match.wildcards = htons(flow->key.wildcards);
715 ofs->match.in_port = flow->key.flow.in_port;
716 memcpy(ofs->match.dl_src, flow->key.flow.dl_src, ETH_ADDR_LEN);
717 memcpy(ofs->match.dl_dst, flow->key.flow.dl_dst, ETH_ADDR_LEN);
718 ofs->match.dl_vlan = flow->key.flow.dl_vlan;
719 ofs->match.dl_type = flow->key.flow.dl_type;
720 ofs->match.nw_src = flow->key.flow.nw_src;
721 ofs->match.nw_dst = flow->key.flow.nw_dst;
722 ofs->match.nw_proto = flow->key.flow.nw_proto;
723 memset(ofs->match.pad, 0, sizeof ofs->match.pad);
724 ofs->match.tp_src = flow->key.flow.tp_src;
725 ofs->match.tp_dst = flow->key.flow.tp_dst;
726 ofs->duration = htonl(now - flow->created);
727 ofs->packet_count = htonll(flow->packet_count);
728 ofs->byte_count = htonll(flow->byte_count);
729 ofs->priority = htons(flow->priority);
730 ofs->table_id = table_idx;
731 memset(ofs->pad, 0, sizeof ofs->pad);
735 /* 'buffer' was received on 'in_port', a physical switch port between 0 and
736 * OFPP_MAX. Process it according to 'chain'. */
737 void fwd_port_input(struct datapath *dp, struct buffer *buffer, int in_port)
739 struct sw_flow_key key;
740 struct sw_flow *flow;
743 flow_extract(buffer, in_port, &key.flow);
744 flow = chain_lookup(dp->chain, &key);
746 flow_used(flow, buffer);
747 execute_actions(dp, buffer, in_port, &key,
748 flow->actions, flow->n_actions);
750 dp_output_control(dp, buffer, in_port, ntohs(dp->config.miss_send_len),
756 do_output(struct datapath *dp, struct buffer *buffer, int in_port,
757 size_t max_len, int out_port)
759 if (out_port != OFPP_CONTROLLER) {
760 dp_output_port(dp, buffer, in_port, out_port);
762 dp_output_control(dp, buffer, in_port, max_len, OFPR_ACTION);
767 execute_actions(struct datapath *dp, struct buffer *buffer,
768 int in_port, const struct sw_flow_key *key,
769 const struct ofp_action *actions, int n_actions)
771 /* Every output action needs a separate clone of 'buffer', but the common
772 * case is just a single output action, so that doing a clone and then
773 * freeing the original buffer is wasteful. So the following code is
774 * slightly obscure just to avoid that. */
776 size_t max_len=0; /* Initialze to make compiler happy */
781 eth_proto = ntohs(key->flow.dl_type);
783 for (i = 0; i < n_actions; i++) {
784 const struct ofp_action *a = &actions[i];
785 struct eth_header *eh = buffer->l2;
787 if (prev_port != -1) {
788 do_output(dp, buffer_clone(buffer), in_port, max_len, prev_port);
792 switch (ntohs(a->type)) {
794 prev_port = ntohs(a->arg.output.port);
795 max_len = ntohs(a->arg.output.max_len);
798 case OFPAT_SET_DL_VLAN:
799 modify_vlan(buffer, key, a);
802 case OFPAT_SET_DL_SRC:
803 memcpy(eh->eth_src, a->arg.dl_addr, sizeof eh->eth_src);
806 case OFPAT_SET_DL_DST:
807 memcpy(eh->eth_dst, a->arg.dl_addr, sizeof eh->eth_dst);
810 case OFPAT_SET_NW_SRC:
811 case OFPAT_SET_NW_DST:
812 modify_nh(buffer, eth_proto, key->flow.nw_proto, a);
815 case OFPAT_SET_TP_SRC:
816 case OFPAT_SET_TP_DST:
817 modify_th(buffer, eth_proto, key->flow.nw_proto, a);
825 do_output(dp, buffer, in_port, max_len, prev_port);
827 buffer_delete(buffer);
830 /* Returns the new checksum for a packet in which the checksum field previously
831 * contained 'old_csum' and in which a field that contained 'old_u16' was
832 * changed to contain 'new_u16'. */
834 recalc_csum16(uint16_t old_csum, uint16_t old_u16, uint16_t new_u16)
836 /* Ones-complement arithmetic is endian-independent, so this code does not
837 * use htons() or ntohs().
839 * See RFC 1624 for formula and explanation. */
840 uint16_t hc_complement = ~old_csum;
841 uint16_t m_complement = ~old_u16;
842 uint16_t m_prime = new_u16;
843 uint32_t sum = hc_complement + m_complement + m_prime;
844 uint16_t hc_prime_complement = sum + (sum >> 16);
845 return ~hc_prime_complement;
848 /* Returns the new checksum for a packet in which the checksum field previously
849 * contained 'old_csum' and in which a field that contained 'old_u32' was
850 * changed to contain 'new_u32'. */
852 recalc_csum32(uint16_t old_csum, uint32_t old_u32, uint32_t new_u32)
854 return recalc_csum16(recalc_csum16(old_csum, old_u32, new_u32),
855 old_u32 >> 16, new_u32 >> 16);
858 static void modify_nh(struct buffer *buffer, uint16_t eth_proto,
859 uint8_t nw_proto, const struct ofp_action *a)
861 if (eth_proto == ETH_TYPE_IP) {
862 struct ip_header *nh = buffer->l3;
863 uint32_t new, *field;
865 new = a->arg.nw_addr;
866 field = a->type == OFPAT_SET_NW_SRC ? &nh->ip_src : &nh->ip_dst;
867 if (nw_proto == IP_TYPE_TCP) {
868 struct tcp_header *th = buffer->l4;
869 th->tcp_csum = recalc_csum32(th->tcp_csum, *field, new);
870 } else if (nw_proto == IP_TYPE_UDP) {
871 struct udp_header *th = buffer->l4;
873 th->udp_csum = recalc_csum32(th->udp_csum, *field, new);
875 th->udp_csum = 0xffff;
879 nh->ip_csum = recalc_csum32(nh->ip_csum, *field, new);
884 static void modify_th(struct buffer *buffer, uint16_t eth_proto,
885 uint8_t nw_proto, const struct ofp_action *a)
887 if (eth_proto == ETH_TYPE_IP) {
888 uint16_t new, *field;
892 if (nw_proto == IP_TYPE_TCP) {
893 struct tcp_header *th = buffer->l4;
894 field = a->type == OFPAT_SET_TP_SRC ? &th->tcp_src : &th->tcp_dst;
895 th->tcp_csum = recalc_csum16(th->tcp_csum, *field, new);
897 } else if (nw_proto == IP_TYPE_UDP) {
898 struct udp_header *th = buffer->l4;
899 field = a->type == OFPAT_SET_TP_SRC ? &th->udp_src : &th->udp_dst;
900 th->udp_csum = recalc_csum16(th->udp_csum, *field, new);
907 modify_vlan(struct buffer *buffer,
908 const struct sw_flow_key *key, const struct ofp_action *a)
910 uint16_t new_id = a->arg.vlan_id;
911 struct vlan_eth_header *veh;
913 if (new_id != OFP_VLAN_NONE) {
914 if (key->flow.dl_vlan != htons(OFP_VLAN_NONE)) {
915 /* Modify vlan id, but maintain other TCI values */
917 veh->veth_tci &= ~htons(VLAN_VID);
918 veh->veth_tci |= htons(new_id);
920 /* Insert new vlan id. */
921 struct eth_header *eh = buffer->l2;
922 struct vlan_eth_header tmp;
923 memcpy(tmp.veth_dst, eh->eth_dst, ETH_ADDR_LEN);
924 memcpy(tmp.veth_src, eh->eth_src, ETH_ADDR_LEN);
925 tmp.veth_type = htons(ETH_TYPE_VLAN);
926 tmp.veth_tci = new_id;
927 tmp.veth_next_type = eh->eth_type;
929 veh = buffer_push_uninit(buffer, VLAN_HEADER_LEN);
930 memcpy(veh, &tmp, sizeof tmp);
931 buffer->l2 -= VLAN_HEADER_LEN;
934 /* Remove an existing vlan header if it exists */
936 if (veh->veth_type == htons(ETH_TYPE_VLAN)) {
937 struct eth_header tmp;
939 memcpy(tmp.eth_dst, veh->veth_dst, ETH_ADDR_LEN);
940 memcpy(tmp.eth_src, veh->veth_src, ETH_ADDR_LEN);
941 tmp.eth_type = veh->veth_next_type;
943 buffer->size -= VLAN_HEADER_LEN;
944 buffer->data += VLAN_HEADER_LEN;
945 buffer->l2 += VLAN_HEADER_LEN;
946 memcpy(buffer->data, &tmp, sizeof tmp);
952 recv_features_request(struct datapath *dp, const struct sender *sender,
955 dp_send_features_reply(dp, sender);
960 recv_get_config_request(struct datapath *dp, const struct sender *sender,
963 struct buffer *buffer;
964 struct ofp_switch_config *osc;
966 osc = alloc_openflow_buffer(dp, sizeof *osc, OFPT_GET_CONFIG_REPLY,
969 assert(sizeof *osc == sizeof dp->config);
970 memcpy(((char *)osc) + sizeof osc->header,
971 ((char *)&dp->config) + sizeof dp->config.header,
972 sizeof dp->config - sizeof dp->config.header);
974 return send_openflow_buffer(dp, buffer, sender);
978 recv_set_config(struct datapath *dp, const struct sender *sender UNUSED,
981 const struct ofp_switch_config *osc = msg;
987 recv_packet_out(struct datapath *dp, const struct sender *sender UNUSED,
990 const struct ofp_packet_out *opo = msg;
992 if (ntohl(opo->buffer_id) == (uint32_t) -1) {
993 /* FIXME: can we avoid copying data here? */
994 int data_len = ntohs(opo->header.length) - sizeof *opo;
995 struct buffer *buffer = buffer_new(data_len);
996 buffer_put(buffer, opo->u.data, data_len);
997 dp_output_port(dp, buffer,
998 ntohs(opo->in_port), ntohs(opo->out_port));
1000 struct sw_flow_key key;
1001 struct buffer *buffer;
1004 buffer = retrieve_buffer(ntohl(opo->buffer_id));
1009 n_acts = (ntohs(opo->header.length) - sizeof *opo)
1010 / sizeof *opo->u.actions;
1011 flow_extract(buffer, ntohs(opo->in_port), &key.flow);
1012 execute_actions(dp, buffer, ntohs(opo->in_port),
1013 &key, opo->u.actions, n_acts);
1019 recv_port_mod(struct datapath *dp, const struct sender *sender UNUSED,
1022 const struct ofp_port_mod *opm = msg;
1024 dp_update_port_flags(dp, &opm->desc);
1030 add_flow(struct datapath *dp, const struct ofp_flow_mod *ofm)
1032 int error = -ENOMEM;
1035 struct sw_flow *flow;
1038 /* Check number of actions. */
1039 n_acts = (ntohs(ofm->header.length) - sizeof *ofm) / sizeof *ofm->actions;
1040 if (n_acts > MAX_ACTIONS) {
1045 /* To prevent loops, make sure there's no action to send to the
1046 * OFP_TABLE virtual port.
1048 for (i=0; i<n_acts; i++) {
1049 const struct ofp_action *a = &ofm->actions[i];
1051 if (a->type == htons(OFPAT_OUTPUT)
1052 && a->arg.output.port == htons(OFPP_TABLE)) {
1053 /* xxx Send fancy new error message? */
1058 /* Allocate memory. */
1059 flow = flow_alloc(n_acts);
1063 /* Fill out flow. */
1064 flow_extract_match(&flow->key, &ofm->match);
1065 flow->max_idle = ntohs(ofm->max_idle);
1066 flow->priority = ntohs(ofm->priority);
1067 flow->timeout = time(0) + flow->max_idle; /* FIXME */
1068 flow->n_actions = n_acts;
1069 flow->created = time(0); /* FIXME */
1070 flow->byte_count = 0;
1071 flow->packet_count = 0;
1072 memcpy(flow->actions, ofm->actions, n_acts * sizeof *flow->actions);
1075 error = chain_insert(dp->chain, flow);
1077 goto error_free_flow;
1080 if (ntohl(ofm->buffer_id) != UINT32_MAX) {
1081 struct buffer *buffer = retrieve_buffer(ntohl(ofm->buffer_id));
1083 struct sw_flow_key key;
1084 uint16_t in_port = ntohs(ofm->match.in_port);
1085 flow_used(flow, buffer);
1086 flow_extract(buffer, in_port, &key.flow);
1087 execute_actions(dp, buffer, in_port, &key, ofm->actions, n_acts);
1097 if (ntohl(ofm->buffer_id) != (uint32_t) -1)
1098 discard_buffer(ntohl(ofm->buffer_id));
1103 recv_flow(struct datapath *dp, const struct sender *sender UNUSED,
1106 const struct ofp_flow_mod *ofm = msg;
1107 uint16_t command = ntohs(ofm->command);
1109 if (command == OFPFC_ADD) {
1110 return add_flow(dp, ofm);
1111 } else if (command == OFPFC_DELETE) {
1112 struct sw_flow_key key;
1113 flow_extract_match(&key, &ofm->match);
1114 return chain_delete(dp->chain, &key, 0, 0) ? 0 : -ESRCH;
1115 } else if (command == OFPFC_DELETE_STRICT) {
1116 struct sw_flow_key key;
1117 flow_extract_match(&key, &ofm->match);
1118 return chain_delete(dp->chain, &key,
1119 ntohs(ofm->priority), 1) ? 0 : -ESRCH;
1125 struct flow_stats_state {
1127 struct sw_table_position position;
1128 struct ofp_flow_stats_request rq;
1131 struct buffer *buffer;
1132 int n_flows, max_flows;
1135 static int flow_stats_init(struct datapath *dp, const void *body, int body_len,
1138 const struct ofp_flow_stats_request *fsr = body;
1139 struct flow_stats_state *s = xmalloc(sizeof *s);
1140 s->table_idx = fsr->table_id == 0xff ? 0 : fsr->table_id;
1141 memset(&s->position, 0, sizeof s->position);
1147 static int flow_stats_dump_callback(struct sw_flow *flow, void *private)
1149 struct flow_stats_state *s = private;
1150 struct ofp_flow_stats *ofs = buffer_put_uninit(s->buffer, sizeof *ofs);
1151 fill_flow_stats(ofs, flow, s->table_idx, s->now);
1152 return ++s->n_flows >= s->max_flows;
1155 static int flow_stats_dump(struct datapath *dp, void *state,
1156 struct buffer *buffer)
1158 struct flow_stats_state *s = state;
1159 struct ofp_flow_stats *ofs;
1160 struct sw_flow_key match_key;
1162 s->max_flows = 4096 / sizeof *ofs;
1166 flow_extract_match(&match_key, &s->rq.match);
1170 while (s->table_idx < dp->chain->n_tables
1171 && (s->rq.table_id == 0xff || s->rq.table_id == s->table_idx))
1173 struct sw_table *table = dp->chain->tables[s->table_idx];
1175 if (table->iterate(table, &match_key, &s->position,
1176 flow_stats_dump_callback, s))
1180 memset(&s->position, 0, sizeof s->position);
1182 return s->n_flows >= s->max_flows;
1185 static void flow_stats_done(void *state)
1190 static int table_stats_dump(struct datapath *dp, void *state,
1191 struct buffer *buffer)
1194 for (i = 0; i < dp->chain->n_tables; i++) {
1195 struct ofp_table_stats *ots = buffer_put_uninit(buffer, sizeof *ots);
1196 struct sw_table_stats stats;
1197 dp->chain->tables[i]->stats(dp->chain->tables[i], &stats);
1198 strncpy(ots->name, stats.name, sizeof ots->name);
1200 memset(ots->pad, 0, sizeof ots->pad);
1201 ots->max_entries = htonl(stats.max_flows);
1202 ots->active_count = htonl(stats.n_flows);
1203 ots->matched_count = htonll(0); /* FIXME */
1208 struct port_stats_state {
1212 static int port_stats_init(struct datapath *dp, const void *body, int body_len,
1215 struct port_stats_state *s = xmalloc(sizeof *s);
1221 static int port_stats_dump(struct datapath *dp, void *state,
1222 struct buffer *buffer)
1224 struct port_stats_state *s = state;
1227 for (i = s->port; i < OFPP_MAX; i++) {
1228 struct sw_port *p = &dp->ports[i];
1229 struct ofp_port_stats *ops;
1233 ops = buffer_put_uninit(buffer, sizeof *ops);
1234 ops->port_no = htons(port_no(dp, p));
1235 memset(ops->pad, 0, sizeof ops->pad);
1236 ops->rx_count = htonll(p->rx_count);
1237 ops->tx_count = htonll(p->tx_count);
1238 ops->drop_count = htonll(p->drop_count);
1245 static void port_stats_done(void *state)
1251 /* Minimum and maximum acceptable number of bytes in body member of
1252 * struct ofp_stats_request. */
1253 size_t min_body, max_body;
1255 /* Prepares to dump some kind of statistics on 'dp'. 'body' and
1256 * 'body_len' are the 'body' member of the struct ofp_stats_request.
1257 * Returns zero if successful, otherwise a negative error code.
1258 * May initialize '*state' to state information. May be null if no
1259 * initialization is required.*/
1260 int (*init)(struct datapath *dp, const void *body, int body_len,
1263 /* Appends statistics for 'dp' to 'buffer', which initially contains a
1264 * struct ofp_stats_reply. On success, it should return 1 if it should be
1265 * called again later with another buffer, 0 if it is done, or a negative
1266 * errno value on failure. */
1267 int (*dump)(struct datapath *dp, void *state, struct buffer *buffer);
1269 /* Cleans any state created by the init or dump functions. May be null
1270 * if no cleanup is required. */
1271 void (*done)(void *state);
1274 static const struct stats_type stats[] = {
1276 sizeof(struct ofp_flow_stats_request),
1277 sizeof(struct ofp_flow_stats_request),
1298 struct stats_dump_cb {
1300 struct ofp_stats_request *rq;
1301 struct sender sender;
1302 const struct stats_type *s;
1307 stats_dump(struct datapath *dp, void *cb_)
1309 struct stats_dump_cb *cb = cb_;
1310 struct ofp_stats_reply *osr;
1311 struct buffer *buffer;
1318 osr = alloc_openflow_buffer(dp, sizeof *osr, OFPT_STATS_REPLY, &cb->sender,
1320 osr->type = htons(cb->s - stats);
1323 err = cb->s->dump(dp, cb->state, buffer);
1329 /* Buffer might have been reallocated, so find our data again. */
1330 osr = buffer_at_assert(buffer, 0, sizeof *osr);
1331 osr->flags = ntohs(OFPSF_REPLY_MORE);
1333 err2 = send_openflow_buffer(dp, buffer, &cb->sender);
1343 stats_done(void *cb_)
1345 struct stats_dump_cb *cb = cb_;
1348 cb->s->done(cb->state);
1355 recv_stats_request(struct datapath *dp, const struct sender *sender,
1358 const struct ofp_stats_request *rq = oh;
1359 size_t rq_len = ntohs(rq->header.length);
1360 struct stats_dump_cb *cb;
1364 type = ntohs(rq->type);
1365 if (type >= ARRAY_SIZE(stats) || !stats[type].dump) {
1366 VLOG_WARN("received stats request of unknown type %d", type);
1370 cb = xmalloc(sizeof *cb);
1372 cb->rq = xmemdup(rq, rq_len);
1373 cb->sender = *sender;
1374 cb->s = &stats[type];
1377 body_len = rq_len - offsetof(struct ofp_stats_request, body);
1378 if (body_len < cb->s->min_body || body_len > cb->s->max_body) {
1379 VLOG_WARN("stats request type %d with bad body length %d",
1386 err = cb->s->init(dp, rq->body, body_len, &cb->state);
1388 VLOG_WARN("failed initialization of stats request type %d: %s",
1389 type, strerror(-err));
1394 remote_start_dump(sender->remote, stats_dump, stats_done, cb);
1403 /* 'msg', which is 'length' bytes long, was received from the control path.
1404 * Apply it to 'chain'. */
1406 fwd_control_input(struct datapath *dp, const struct sender *sender,
1407 const void *msg, size_t length)
1409 struct openflow_packet {
1411 int (*handler)(struct datapath *, const struct sender *, const void *);
1414 static const struct openflow_packet packets[] = {
1415 [OFPT_FEATURES_REQUEST] = {
1416 sizeof (struct ofp_header),
1417 recv_features_request,
1419 [OFPT_GET_CONFIG_REQUEST] = {
1420 sizeof (struct ofp_header),
1421 recv_get_config_request,
1423 [OFPT_SET_CONFIG] = {
1424 sizeof (struct ofp_switch_config),
1427 [OFPT_PACKET_OUT] = {
1428 sizeof (struct ofp_packet_out),
1432 sizeof (struct ofp_flow_mod),
1436 sizeof (struct ofp_port_mod),
1439 [OFPT_STATS_REQUEST] = {
1440 sizeof (struct ofp_stats_request),
1445 const struct openflow_packet *pkt;
1446 struct ofp_header *oh;
1448 oh = (struct ofp_header *) msg;
1449 if (oh->version != OFP_VERSION || oh->type >= ARRAY_SIZE(packets)
1450 || ntohs(oh->length) > length)
1453 pkt = &packets[oh->type];
1456 if (length < pkt->min_size)
1459 return pkt->handler(dp, sender, msg);
1462 /* Packet buffering. */
1464 #define OVERWRITE_SECS 1
1466 struct packet_buffer {
1467 struct buffer *buffer;
1472 static struct packet_buffer buffers[N_PKT_BUFFERS];
1473 static unsigned int buffer_idx;
1475 uint32_t save_buffer(struct buffer *buffer)
1477 struct packet_buffer *p;
1480 buffer_idx = (buffer_idx + 1) & PKT_BUFFER_MASK;
1481 p = &buffers[buffer_idx];
1483 /* Don't buffer packet if existing entry is less than
1484 * OVERWRITE_SECS old. */
1485 if (time(0) < p->timeout) { /* FIXME */
1488 buffer_delete(p->buffer);
1491 /* Don't use maximum cookie value since the all-bits-1 id is
1493 if (++p->cookie >= (1u << PKT_COOKIE_BITS) - 1)
1495 p->buffer = buffer_clone(buffer); /* FIXME */
1496 p->timeout = time(0) + OVERWRITE_SECS; /* FIXME */
1497 id = buffer_idx | (p->cookie << PKT_BUFFER_BITS);
1502 static struct buffer *retrieve_buffer(uint32_t id)
1504 struct buffer *buffer = NULL;
1505 struct packet_buffer *p;
1507 p = &buffers[id & PKT_BUFFER_MASK];
1508 if (p->cookie == id >> PKT_BUFFER_BITS) {
1512 printf("cookie mismatch: %x != %x\n",
1513 id >> PKT_BUFFER_BITS, p->cookie);
1519 static void discard_buffer(uint32_t id)
1521 struct packet_buffer *p;
1523 p = &buffers[id & PKT_BUFFER_MASK];
1524 if (p->cookie == id >> PKT_BUFFER_BITS) {
1525 buffer_delete(p->buffer);