X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=lib%2Fdpif-linux.c;h=d9dd3b2ee2a1de6d6a3e5640600e5cce9ef78d2f;hb=552e20d02a32cf70b4fcaa18d6f0817f80a3d8d0;hp=3b2fba3c0f87d053c475fea6e2fd2989023f659e;hpb=e0edde6fee279cdbbf3c179f5f50adaf0c7c7f1e;p=sliver-openvswitch.git diff --git a/lib/dpif-linux.c b/lib/dpif-linux.c index 3b2fba3c0..d9dd3b2ee 100644 --- a/lib/dpif-linux.c +++ b/lib/dpif-linux.c @@ -54,6 +54,7 @@ #include "random.h" #include "shash.h" #include "sset.h" +#include "timeval.h" #include "unaligned.h" #include "util.h" #include "vlog.h" @@ -61,9 +62,10 @@ VLOG_DEFINE_THIS_MODULE(dpif_linux); enum { MAX_PORTS = USHRT_MAX }; -enum { N_UPCALL_SOCKS = 16 }; -BUILD_ASSERT_DECL(IS_POW2(N_UPCALL_SOCKS)); -BUILD_ASSERT_DECL(N_UPCALL_SOCKS <= 32); /* We use a 32-bit word as a mask. */ +enum { N_CHANNELS = 17 }; +BUILD_ASSERT_DECL(IS_POW2(N_CHANNELS - 1)); +BUILD_ASSERT_DECL(N_CHANNELS > 1); +BUILD_ASSERT_DECL(N_CHANNELS <= 32); /* We use a 32-bit word as a mask. */ /* This ethtool flag was introduced in Linux 2.6.24, so it might be * missing if we have old headers. */ @@ -129,23 +131,73 @@ static int dpif_linux_flow_transact(struct dpif_linux_flow *request, static void dpif_linux_flow_get_stats(const struct dpif_linux_flow *, struct dpif_flow_stats *); +/* Packet drop monitoring. + * + * When kernel-to-user Netlink buffers overflow, the kernel notifies us that + * one or more packets were dropped, but it doesn't tell us anything about + * those packets. However, the administrator really wants to know. So we do + * the next best thing, and keep track of the top sources of packets received + * on each kernel-to-user channel, since the top sources are those that will + * cause the buffers to overflow. + * + * We use a variation on the "Space-Saving" algorithm in Metwally et al., + * "Efficient Computation of Frequent and Top-k Elements in Data Streams", ACM + * Transactions on Database Systems 31:3 (2006). This algorithm yields + * perfectly accurate results when the data stream's unique values (in this + * case, port numbers) fit into our data structure, and degrades gracefully + * even for challenging distributions (e.g. Zipf). + * + * Our implementation is very simple, without any of the special flourishes + * described in the paper. It avoids the need to use a hash for lookup by + * keeping the constant factor (N_SKETCHES) very small. The error calculations + * in the paper make it sound like the results should still be satisfactory. + * + * "space-saving" and "Metwally" seem like awkward names for data structures, + * so we call this a "sketch" even though technically that's a different sort + * of summary structure. + */ + +/* One of N_SKETCHES counting elements per channel in the Metwally + * "space-saving" algorithm. */ +enum { N_SKETCHES = 8 }; /* Number of elements per channel. */ +struct dpif_sketch { + uint32_t port_no; /* Port number. */ + unsigned int hits; /* Number of hits. */ + unsigned int error; /* Upper bound on error in 'hits'. */ +}; + +/* One of N_CHANNELS channels per dpif between the kernel and userspace. */ +struct dpif_channel { + struct nl_sock *sock; /* Netlink socket. */ + struct dpif_sketch sketches[N_SKETCHES]; /* From max to min 'hits'. */ + long long int last_poll; /* Last time this channel was polled. */ +}; + +static void update_sketch(struct dpif_channel *, uint32_t port_no); +static void scale_sketches(struct dpif *); +static void report_loss(struct dpif *, struct dpif_channel *); + +/* Interval, in milliseconds, at which to scale down the sketch values by a + * factor of 2. The Metwally algorithm doesn't do this, which makes sense in + * the context it assumes, but in our situation we ought to weight recent data + * more heavily than old data, so in my opinion this is reasonable. */ +#define SCALE_INTERVAL (60 * 1000) + /* Datapath interface for the openvswitch Linux kernel module. */ struct dpif_linux { struct dpif dpif; int dp_ifindex; /* Upcall messages. */ - struct nl_sock *upcall_socks[N_UPCALL_SOCKS]; + struct dpif_channel channels[N_CHANNELS]; uint32_t ready_mask; /* 1-bit for each sock with unread messages. */ - int epoll_fd; /* epoll fd that includes the upcall socks. */ + int epoll_fd; /* epoll fd that includes channel socks. */ + long long int next_scale; /* Next time to scale down the sketches. */ /* Change notification. */ struct sset changed_ports; /* Ports that have changed. */ struct nln_notifier *port_notifier; bool change_error; - - /* Port number allocation. */ - uint16_t alloc_port_no; }; static struct vlog_rate_limit error_rl = VLOG_RATE_LIMIT_INIT(9999, 5); @@ -164,7 +216,7 @@ static int dpif_linux_init(void); static void open_dpif(const struct dpif_linux_dp *, struct dpif **); static bool dpif_linux_nln_parse(struct ofpbuf *, void *); static void dpif_linux_port_changed(const void *vport, void *dpif); -static uint32_t dpif_linux_port_get_pid(const struct dpif *, uint16_t port_no); +static uint32_t dpif_linux_port_get_pid(const struct dpif *, uint32_t port_no); static void dpif_linux_vport_to_ofpbuf(const struct dpif_linux_vport *, struct ofpbuf *); @@ -248,24 +300,27 @@ open_dpif(const struct dpif_linux_dp *dp, struct dpif **dpifp) dpif_init(&dpif->dpif, &dpif_linux_class, dp->name, dp->dp_ifindex, dp->dp_ifindex); + dpif->next_scale = LLONG_MAX; + dpif->dp_ifindex = dp->dp_ifindex; sset_init(&dpif->changed_ports); *dpifp = &dpif->dpif; } static void -destroy_upcall_socks(struct dpif_linux *dpif) +destroy_channels(struct dpif_linux *dpif) { - int i; + struct dpif_channel *ch; if (dpif->epoll_fd >= 0) { close(dpif->epoll_fd); dpif->epoll_fd = -1; } - for (i = 0; i < N_UPCALL_SOCKS; i++) { - nl_sock_destroy(dpif->upcall_socks[i]); - dpif->upcall_socks[i] = NULL; + for (ch = dpif->channels; ch < &dpif->channels[N_CHANNELS]; ch++) { + nl_sock_destroy(ch->sock); + ch->sock = NULL; } + dpif->next_scale = LLONG_MAX; } static void @@ -274,7 +329,7 @@ dpif_linux_close(struct dpif *dpif_) struct dpif_linux *dpif = dpif_linux_cast(dpif_); nln_notifier_destroy(dpif->port_notifier); - destroy_upcall_socks(dpif); + destroy_channels(dpif); sset_destroy(&dpif->changed_ports); free(dpif); } @@ -292,8 +347,15 @@ dpif_linux_destroy(struct dpif *dpif_) } static void -dpif_linux_run(struct dpif *dpif OVS_UNUSED) +dpif_linux_run(struct dpif *dpif_) { + struct dpif_linux *dpif = dpif_linux_cast(dpif_); + + if (time_msec() >= dpif->next_scale) { + dpif->next_scale = time_msec() + SCALE_INTERVAL; + scale_sketches(dpif_); + } + if (nln) { nln_run(nln); } @@ -327,15 +389,16 @@ dpif_linux_get_stats(const struct dpif *dpif_, struct dpif_dp_stats *stats) static int dpif_linux_port_add(struct dpif *dpif_, struct netdev *netdev, - uint16_t *port_nop) + uint32_t *port_nop) { struct dpif_linux *dpif = dpif_linux_cast(dpif_); const char *name = netdev_get_name(netdev); const char *type = netdev_get_type(netdev); struct dpif_linux_vport request, reply; const struct ofpbuf *options; + uint32_t upcall_pid; struct ofpbuf *buf; - int error, i = 0, max_ports = MAX_PORTS; + int error; dpif_linux_vport_init(&request); request.cmd = OVS_VPORT_CMD_NEW; @@ -359,34 +422,28 @@ dpif_linux_port_add(struct dpif *dpif_, struct netdev *netdev, netdev_linux_ethtool_set_flag(netdev, ETH_FLAG_LRO, "LRO", false); } - /* Loop until we find a port that isn't used. */ - do { - uint32_t upcall_pid; + request.port_no = *port_nop; + upcall_pid = dpif_linux_port_get_pid(dpif_, request.port_no); + request.upcall_pid = &upcall_pid; - request.port_no = ++dpif->alloc_port_no; - upcall_pid = dpif_linux_port_get_pid(dpif_, request.port_no); - request.upcall_pid = &upcall_pid; - error = dpif_linux_vport_transact(&request, &reply, &buf); + error = dpif_linux_vport_transact(&request, &reply, &buf); - if (!error) { - *port_nop = reply.port_no; - VLOG_DBG("%s: assigning port %"PRIu32" to netlink pid %"PRIu32, - dpif_name(dpif_), request.port_no, upcall_pid); - } else if (error == EFBIG) { - /* Older datapath has lower limit. */ - max_ports = dpif->alloc_port_no; - dpif->alloc_port_no = 0; - } + if (!error) { + *port_nop = reply.port_no; + VLOG_DBG("%s: assigning port %"PRIu32" to netlink pid %"PRIu32, + dpif_name(dpif_), request.port_no, upcall_pid); + } else if (error == EBUSY && *port_nop != UINT32_MAX) { + VLOG_INFO("%s: requested port %"PRIu32" is in use", + dpif_name(dpif_), *port_nop); + } - ofpbuf_delete(buf); - } while ((i++ < max_ports) - && (error == EBUSY || error == EFBIG)); + ofpbuf_delete(buf); return error; } static int -dpif_linux_port_del(struct dpif *dpif_, uint16_t port_no) +dpif_linux_port_del(struct dpif *dpif_, uint32_t port_no) { struct dpif_linux *dpif = dpif_linux_cast(dpif_); struct dpif_linux_vport vport; @@ -422,7 +479,7 @@ dpif_linux_port_query__(const struct dpif *dpif, uint32_t port_no, /* A query by name reported that 'port_name' is in some datapath * other than 'dpif', but the caller wants to know about 'dpif'. */ error = ENODEV; - } else { + } else if (dpif_port) { dpif_port->name = xstrdup(reply.name); dpif_port->type = xstrdup(netdev_vport_get_netdev_type(&reply)); dpif_port->port_no = reply.port_no; @@ -433,7 +490,7 @@ dpif_linux_port_query__(const struct dpif *dpif, uint32_t port_no, } static int -dpif_linux_port_query_by_number(const struct dpif *dpif, uint16_t port_no, +dpif_linux_port_query_by_number(const struct dpif *dpif, uint32_t port_no, struct dpif_port *dpif_port) { return dpif_linux_port_query__(dpif, port_no, NULL, dpif_port); @@ -453,15 +510,19 @@ dpif_linux_get_max_ports(const struct dpif *dpif OVS_UNUSED) } static uint32_t -dpif_linux_port_get_pid(const struct dpif *dpif_, uint16_t port_no) +dpif_linux_port_get_pid(const struct dpif *dpif_, uint32_t port_no) { struct dpif_linux *dpif = dpif_linux_cast(dpif_); if (dpif->epoll_fd < 0) { return 0; } else { - int idx = port_no & (N_UPCALL_SOCKS - 1); - return nl_sock_pid(dpif->upcall_socks[idx]); + int idx; + + idx = (port_no != UINT32_MAX + ? 1 + (port_no & (N_CHANNELS - 2)) + : 0); + return nl_sock_pid(dpif->channels[idx].sock); } } @@ -521,8 +582,8 @@ dpif_linux_port_dump_next(const struct dpif *dpif OVS_UNUSED, void *state_, return error; } - dpif_port->name = (char *) vport.name; - dpif_port->type = (char *) netdev_vport_get_netdev_type(&vport); + dpif_port->name = CONST_CAST(char *, vport.name); + dpif_port->type = CONST_CAST(char *, netdev_vport_get_netdev_type(&vport)); dpif_port->port_no = vport.port_no; return 0; } @@ -594,7 +655,7 @@ dpif_linux_flow_get(const struct dpif *dpif_, dpif_linux_flow_get_stats(&reply, stats); } if (actionsp) { - buf->data = (void *) reply.actions; + buf->data = CONST_CAST(struct nlattr *, reply.actions); buf->size = reply.actions_len; *actionsp = buf; } else { @@ -899,24 +960,38 @@ dpif_linux_operate__(struct dpif *dpif_, struct dpif_op **ops, size_t n_ops) switch (op->type) { case DPIF_OP_FLOW_PUT: put = &op->u.flow_put; - if (!op->error && put->stats) { - struct dpif_linux_flow reply; - - op->error = dpif_linux_flow_from_ofpbuf(&reply, txn->reply); + if (put->stats) { if (!op->error) { - dpif_linux_flow_get_stats(&reply, put->stats); + struct dpif_linux_flow reply; + + op->error = dpif_linux_flow_from_ofpbuf(&reply, + txn->reply); + if (!op->error) { + dpif_linux_flow_get_stats(&reply, put->stats); + } + } + + if (op->error) { + memset(put->stats, 0, sizeof *put->stats); } } break; case DPIF_OP_FLOW_DEL: del = &op->u.flow_del; - if (!op->error && del->stats) { - struct dpif_linux_flow reply; - - op->error = dpif_linux_flow_from_ofpbuf(&reply, txn->reply); + if (del->stats) { if (!op->error) { - dpif_linux_flow_get_stats(&reply, del->stats); + struct dpif_linux_flow reply; + + op->error = dpif_linux_flow_from_ofpbuf(&reply, + txn->reply); + if (!op->error) { + dpif_linux_flow_get_stats(&reply, del->stats); + } + } + + if (op->error) { + memset(del->stats, 0, sizeof *del->stats); } } break; @@ -983,37 +1058,42 @@ dpif_linux_recv_set(struct dpif *dpif_, bool enable) } if (!enable) { - destroy_upcall_socks(dpif); + destroy_channels(dpif); } else { - int i; + struct dpif_channel *ch; int error; - dpif->epoll_fd = epoll_create(N_UPCALL_SOCKS); + dpif->epoll_fd = epoll_create(N_CHANNELS); if (dpif->epoll_fd < 0) { return errno; } - for (i = 0; i < N_UPCALL_SOCKS; i++) { + for (ch = dpif->channels; ch < &dpif->channels[N_CHANNELS]; ch++) { + int indx = ch - dpif->channels; struct epoll_event event; - error = nl_sock_create(NETLINK_GENERIC, &dpif->upcall_socks[i]); + error = nl_sock_create(NETLINK_GENERIC, &ch->sock); if (error) { - destroy_upcall_socks(dpif); + destroy_channels(dpif); return error; } memset(&event, 0, sizeof event); event.events = EPOLLIN; - event.data.u32 = i; - if (epoll_ctl(dpif->epoll_fd, EPOLL_CTL_ADD, - nl_sock_fd(dpif->upcall_socks[i]), &event) < 0) { + event.data.u32 = indx; + if (epoll_ctl(dpif->epoll_fd, EPOLL_CTL_ADD, nl_sock_fd(ch->sock), + &event) < 0) { error = errno; - destroy_upcall_socks(dpif); + destroy_channels(dpif); return error; } + + memset(ch->sketches, 0, sizeof ch->sketches); + ch->last_poll = LLONG_MIN; } dpif->ready_mask = 0; + dpif->next_scale = time_msec() + SCALE_INTERVAL; } set_upcall_pids(dpif_); @@ -1076,9 +1156,11 @@ parse_odp_packet(struct ofpbuf *buf, struct dpif_upcall *upcall, memset(upcall, 0, sizeof *upcall); upcall->type = type; upcall->packet = buf; - upcall->packet->data = (void *) nl_attr_get(a[OVS_PACKET_ATTR_PACKET]); + upcall->packet->data = CONST_CAST(struct nlattr *, + nl_attr_get(a[OVS_PACKET_ATTR_PACKET])); upcall->packet->size = nl_attr_get_size(a[OVS_PACKET_ATTR_PACKET]); - upcall->key = (void *) nl_attr_get(a[OVS_PACKET_ATTR_KEY]); + upcall->key = CONST_CAST(struct nlattr *, + nl_attr_get(a[OVS_PACKET_ATTR_KEY])); upcall->key_len = nl_attr_get_size(a[OVS_PACKET_ATTR_KEY]); upcall->userdata = (a[OVS_PACKET_ATTR_USERDATA] ? nl_attr_get_u64(a[OVS_PACKET_ATTR_USERDATA]) @@ -1100,12 +1182,12 @@ dpif_linux_recv(struct dpif *dpif_, struct dpif_upcall *upcall, } if (!dpif->ready_mask) { - struct epoll_event events[N_UPCALL_SOCKS]; + struct epoll_event events[N_CHANNELS]; int retval; int i; do { - retval = epoll_wait(dpif->epoll_fd, events, N_UPCALL_SOCKS, 0); + retval = epoll_wait(dpif->epoll_fd, events, N_CHANNELS, 0); } while (retval < 0 && errno == EINTR); if (retval < 0) { static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 1); @@ -1119,7 +1201,7 @@ dpif_linux_recv(struct dpif *dpif_, struct dpif_upcall *upcall, while (dpif->ready_mask) { int indx = ffs(dpif->ready_mask) - 1; - struct nl_sock *upcall_sock = dpif->upcall_socks[indx]; + struct dpif_channel *ch = &dpif->channels[indx]; dpif->ready_mask &= ~(1u << indx); @@ -1131,7 +1213,17 @@ dpif_linux_recv(struct dpif *dpif_, struct dpif_upcall *upcall, return EAGAIN; } - error = nl_sock_recv(upcall_sock, buf, false); + error = nl_sock_recv(ch->sock, buf, false); + if (error == ENOBUFS) { + /* ENOBUFS typically means that we've received so many + * packets that the buffer overflowed. Try again + * immediately because there's almost certainly a packet + * waiting for us. */ + report_loss(dpif_, ch); + continue; + } + + ch->last_poll = time_msec(); if (error) { if (error == EAGAIN) { break; @@ -1141,6 +1233,13 @@ dpif_linux_recv(struct dpif *dpif_, struct dpif_upcall *upcall, error = parse_odp_packet(buf, upcall, &dp_ifindex); if (!error && dp_ifindex == dpif->dp_ifindex) { + const struct nlattr *in_port; + + in_port = nl_attr_find__(upcall->key, upcall->key_len, + OVS_KEY_ATTR_IN_PORT); + if (in_port) { + update_sketch(ch, nl_attr_get_u32(in_port)); + } return 0; } if (error) { @@ -1168,20 +1267,21 @@ static void dpif_linux_recv_purge(struct dpif *dpif_) { struct dpif_linux *dpif = dpif_linux_cast(dpif_); - int i; + struct dpif_channel *ch; if (dpif->epoll_fd < 0) { return; } - for (i = 0; i < N_UPCALL_SOCKS; i++) { - nl_sock_drain(dpif->upcall_socks[i]); + for (ch = dpif->channels; ch < &dpif->channels[N_CHANNELS]; ch++) { + nl_sock_drain(ch->sock); } } const struct dpif_class dpif_linux_class = { "system", dpif_linux_enumerate, + NULL, dpif_linux_open, dpif_linux_close, dpif_linux_destroy, @@ -1276,33 +1376,6 @@ dpif_linux_is_internal_device(const char *name) return reply.type == OVS_VPORT_TYPE_INTERNAL; } -int -dpif_linux_vport_send(int dp_ifindex, uint32_t port_no, - const void *data, size_t size) -{ - struct ofpbuf actions, key, packet; - struct odputil_keybuf keybuf; - struct dpif_execute execute; - struct flow flow; - uint64_t action; - - ofpbuf_use_const(&packet, data, size); - flow_extract(&packet, 0, htonll(0), 0, &flow); - - ofpbuf_use_stack(&key, &keybuf, sizeof keybuf); - odp_flow_key_from_flow(&key, &flow); - - ofpbuf_use_stack(&actions, &action, sizeof action); - nl_msg_put_u32(&actions, OVS_ACTION_ATTR_OUTPUT, port_no); - - execute.key = key.data; - execute.key_len = key.size; - execute.actions = actions.data; - execute.actions_len = actions.size; - execute.packet = &packet; - return dpif_linux_execute__(dp_ifindex, &execute); -} - static bool dpif_linux_nln_parse(struct ofpbuf *buf, void *vport_) { @@ -1801,3 +1874,96 @@ dpif_linux_flow_get_stats(const struct dpif_linux_flow *flow, stats->used = flow->used ? get_32aligned_u64(flow->used) : 0; stats->tcp_flags = flow->tcp_flags ? *flow->tcp_flags : 0; } + +/* Metwally "space-saving" algorithm implementation. */ + +/* Updates 'ch' to record that a packet was received on 'port_no'. */ +static void +update_sketch(struct dpif_channel *ch, uint32_t port_no) +{ + struct dpif_sketch *sk; + + /* Find an existing counting element for 'port_no' or, if none, replace the + * counting element with the fewest hits by 'port_no'. */ + for (sk = ch->sketches; ; sk++) { + if (port_no == sk->port_no) { + break; + } else if (sk == &ch->sketches[N_SKETCHES - 1]) { + sk->port_no = port_no; + sk->error = sk->hits; + break; + } + } + + /* Increment the hit count, then re-sort the counting elements (usually + * nothing needs to be done). */ + sk->hits++; + while (sk > ch->sketches && sk[-1].hits > sk->hits) { + struct dpif_sketch tmp = sk[-1]; + sk[-1] = *sk; + *sk = tmp; + sk--; + } +} + +/* Divide the counts of all the the counting elements in 'dpif' by 2. See the + * comment on SCALE_INTERVAL. */ +static void +scale_sketches(struct dpif *dpif_) +{ + struct dpif_linux *dpif = dpif_linux_cast(dpif_); + struct dpif_channel *ch; + + for (ch = dpif->channels; ch < &dpif->channels[N_CHANNELS]; ch++) { + struct dpif_sketch *sk; + + for (sk = ch->sketches; sk < &ch->sketches[N_SKETCHES]; sk++) { + sk->hits /= 2; + sk->error /= 2; + } + } +} + +/* Logs information about a packet that was recently lost in 'ch' (in + * 'dpif_'). */ +static void +report_loss(struct dpif *dpif_, struct dpif_channel *ch) +{ + struct dpif_linux *dpif = dpif_linux_cast(dpif_); + static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, 5); + struct dpif_sketch *sk; + struct ds s; + + if (VLOG_DROP_ERR(&rl)) { + return; + } + + ds_init(&s); + if (ch->last_poll != LLONG_MIN) { + ds_put_format(&s, " (last polled %lld ms ago)", + time_msec() - ch->last_poll); + } + ds_put_cstr(&s, ", most frequent sources are"); + for (sk = ch->sketches; sk < &ch->sketches[N_SKETCHES]; sk++) { + if (sk->hits) { + struct dpif_port port; + + ds_put_format(&s, " %"PRIu32, sk->port_no); + if (!dpif_port_query_by_number(dpif_, sk->port_no, &port)) { + ds_put_format(&s, "(%s)", port.name); + dpif_port_destroy(&port); + } + if (sk->error) { + ds_put_format(&s, ": %u to %u,", + sk->hits - sk->error, sk->hits); + } else { + ds_put_format(&s, ": %u,", sk->hits); + } + } + } + ds_chomp(&s, ','); + + VLOG_WARN("%s: lost packet on channel %td%s", + dpif_name(dpif_), ch - dpif->channels, ds_cstr(&s)); + ds_destroy(&s); +}