static void
netdev_linux_common_construct(struct netdev_linux *netdev)
{
- ovs_mutex_init(&netdev->mutex, PTHREAD_MUTEX_NORMAL);
+ ovs_mutex_init(&netdev->mutex);
netdev->change_seq = 1;
}
return error;
}
-/* Returns the maximum size of transmitted (and received) packets on 'netdev',
- * in bytes, not including the hardware header; thus, this is typically 1500
- * bytes for Ethernet devices. */
static int
-netdev_linux_get_mtu(const struct netdev *netdev_, int *mtup)
+netdev_linux_get_mtu__(struct netdev_linux *netdev, int *mtup)
{
- struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
- ovs_mutex_lock(&netdev->mutex);
if (!(netdev->cache_valid & VALID_MTU)) {
struct ifreq ifr;
netdev->netdev_mtu_error = af_inet_ifreq_ioctl(
- netdev_get_name(netdev_), &ifr, SIOCGIFMTU, "SIOCGIFMTU");
+ netdev_get_name(&netdev->up), &ifr, SIOCGIFMTU, "SIOCGIFMTU");
netdev->mtu = ifr.ifr_mtu;
netdev->cache_valid |= VALID_MTU;
}
if (!error) {
*mtup = netdev->mtu;
}
+
+ return error;
+}
+
+/* Returns the maximum size of transmitted (and received) packets on 'netdev',
+ * in bytes, not including the hardware header; thus, this is typically 1500
+ * bytes for Ethernet devices. */
+static int
+netdev_linux_get_mtu(const struct netdev *netdev_, int *mtup)
+{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ int error;
+
+ ovs_mutex_lock(&netdev->mutex);
+ error = netdev_linux_get_mtu__(netdev, mtup);
ovs_mutex_unlock(&netdev->mutex);
return error;
static void
netdev_linux_read_features(struct netdev_linux *netdev)
- OVS_REQUIRES(netdev->mutex)
{
struct ethtool_cmd ecmd;
uint32_t speed;
return true;
}
+struct netdev_linux_queue_state {
+ unsigned int *queues;
+ size_t cur_queue;
+ size_t n_queues;
+};
+
static int
-netdev_linux_dump_queues(const struct netdev *netdev_,
- netdev_dump_queues_cb *cb, void *aux)
+netdev_linux_queue_dump_start(const struct netdev *netdev_, void **statep)
{
- struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ const struct netdev_linux *netdev = netdev_linux_cast(netdev_);
int error;
ovs_mutex_lock(&netdev->mutex);
error = tc_query_qdisc(netdev_);
if (!error) {
if (netdev->tc->ops->class_get) {
- struct tc_queue *queue, *next_queue;
- struct smap details;
-
- smap_init(&details);
- HMAP_FOR_EACH_SAFE (queue, next_queue, hmap_node,
- &netdev->tc->queues) {
- int retval;
-
- smap_clear(&details);
-
- retval = netdev->tc->ops->class_get(netdev_, queue, &details);
- if (!retval) {
- (*cb)(queue->queue_id, &details, aux);
- } else {
- error = retval;
- }
+ struct netdev_linux_queue_state *state;
+ struct tc_queue *queue;
+ size_t i;
+
+ *statep = state = xmalloc(sizeof *state);
+ state->n_queues = hmap_count(&netdev->tc->queues);
+ state->cur_queue = 0;
+ state->queues = xmalloc(state->n_queues * sizeof *state->queues);
+
+ i = 0;
+ HMAP_FOR_EACH (queue, hmap_node, &netdev->tc->queues) {
+ state->queues[i++] = queue->queue_id;
}
- smap_destroy(&details);
} else {
error = EOPNOTSUPP;
}
return error;
}
+static int
+netdev_linux_queue_dump_next(const struct netdev *netdev_, void *state_,
+ unsigned int *queue_idp, struct smap *details)
+{
+ const struct netdev_linux *netdev = netdev_linux_cast(netdev_);
+ struct netdev_linux_queue_state *state = state_;
+ int error = EOF;
+
+ ovs_mutex_lock(&netdev->mutex);
+ while (state->cur_queue < state->n_queues) {
+ unsigned int queue_id = state->queues[state->cur_queue++];
+ struct tc_queue *queue = tc_find_queue(netdev_, queue_id);
+
+ if (queue) {
+ *queue_idp = queue_id;
+ error = netdev->tc->ops->class_get(netdev_, queue, details);
+ break;
+ }
+ }
+ ovs_mutex_unlock(&netdev->mutex);
+
+ return error;
+}
+
+static int
+netdev_linux_queue_dump_done(const struct netdev *netdev OVS_UNUSED,
+ void *state_)
+{
+ struct netdev_linux_queue_state *state = state_;
+
+ free(state->queues);
+ free(state);
+ return 0;
+}
+
static int
netdev_linux_dump_queue_stats(const struct netdev *netdev_,
netdev_dump_queue_stats_cb *cb, void *aux)
if (nd & NETDEV_PROMISC) {
iff |= IFF_PROMISC;
}
+ if (nd & NETDEV_LOOPBACK) {
+ iff |= IFF_LOOPBACK;
+ }
return iff;
}
if (iff & IFF_PROMISC) {
nd |= NETDEV_PROMISC;
}
+ if (iff & IFF_LOOPBACK) {
+ nd |= NETDEV_LOOPBACK;
+ }
return nd;
}
netdev_linux_set_queue, \
netdev_linux_delete_queue, \
netdev_linux_get_queue_stats, \
- netdev_linux_dump_queues, \
+ netdev_linux_queue_dump_start, \
+ netdev_linux_queue_dump_next, \
+ netdev_linux_queue_dump_done, \
netdev_linux_dump_queue_stats, \
\
netdev_linux_get_in4, \
int error;
int mtu;
- error = netdev_get_mtu(netdev, &mtu);
+ error = netdev_linux_get_mtu__(netdev_linux_cast(netdev), &mtu);
if (error) {
VLOG_WARN_RL(&rl, "cannot set up HTB on device %s that lacks MTU",
netdev_get_name(netdev));
}
static void
-htb_parse_qdisc_details__(struct netdev *netdev,
+htb_parse_qdisc_details__(struct netdev *netdev_,
const struct smap *details, struct htb_class *hc)
{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
const char *max_rate_s;
max_rate_s = smap_get(details, "max-rate");
if (!hc->max_rate) {
enum netdev_features current;
- netdev_get_features(netdev, ¤t, NULL, NULL, NULL);
+ netdev_linux_read_features(netdev);
+ current = !netdev->get_features_error ? netdev->current : 0;
hc->max_rate = netdev_features_to_bps(current, 100 * 1000 * 1000) / 8;
}
hc->min_rate = hc->max_rate;
const char *priority_s = smap_get(details, "priority");
int mtu, error;
- error = netdev_get_mtu(netdev, &mtu);
+ error = netdev_linux_get_mtu__(netdev_linux_cast(netdev), &mtu);
if (error) {
VLOG_WARN_RL(&rl, "cannot parse HTB class on device %s that lacks MTU",
netdev_get_name(netdev));
}
static void
-hfsc_parse_qdisc_details__(struct netdev *netdev, const struct smap *details,
+hfsc_parse_qdisc_details__(struct netdev *netdev_, const struct smap *details,
struct hfsc_class *class)
{
+ struct netdev_linux *netdev = netdev_linux_cast(netdev_);
uint32_t max_rate;
const char *max_rate_s;
if (!max_rate) {
enum netdev_features current;
- netdev_get_features(netdev, ¤t, NULL, NULL, NULL);
+ netdev_linux_read_features(netdev);
+ current = !netdev->get_features_error ? netdev->current : 0;
max_rate = netdev_features_to_bps(current, 100 * 1000 * 1000) / 8;
}