#include <linux/wait.h>
#include <asm/system.h>
#include <asm/div64.h>
-#include <asm/bug.h>
#include <linux/highmem.h>
#include <linux/netfilter_bridge.h>
#include <linux/netfilter_ipv4.h>
#include <linux/openvswitch.h>
#include <linux/rculist.h>
#include <linux/dmi.h>
-#include <net/inet_ecn.h>
#include <net/genetlink.h>
#include "checksum.h"
#include "datapath.h"
-#include "actions.h"
#include "flow.h"
#include "vlan.h"
#include "tunnel.h"
#include "vport-internal_dev.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) || \
- LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)
-#error Kernels before 2.6.18 or after 3.1 are not supported by this version of Open vSwitch.
+ LINUX_VERSION_CODE > KERNEL_VERSION(3,2,0)
+#error Kernels before 2.6.18 or after 3.2 are not supported by this version of Open vSwitch.
#endif
int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
/* Must be called with rcu_read_lock or RTNL lock. */
const char *dp_name(const struct datapath *dp)
{
- return vport_get_name(rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]));
+ struct vport *vport = rcu_dereference_rtnl(dp->ports[OVSP_LOCAL]);
+ return vport->ops->get_name(vport);
}
static int get_dpifindex(struct datapath *dp)
local = get_vport_protected(dp, OVSP_LOCAL);
if (local)
- ifindex = vport_get_ifindex(local);
+ ifindex = local->ops->get_ifindex(local);
else
ifindex = 0;
return ifindex;
}
-static inline size_t br_nlmsg_size(void)
+static size_t br_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
int event, unsigned int flags)
{
struct datapath *dp = port->dp;
- int ifindex = vport_get_ifindex(port);
struct ifinfomsg *hdr;
struct nlmsghdr *nlh;
- if (ifindex < 0)
- return ifindex;
+ if (!port->ops->get_ifindex)
+ return -ENODEV;
nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
if (nlh == NULL)
hdr->ifi_family = AF_BRIDGE;
hdr->__ifi_pad = 0;
hdr->ifi_type = ARPHRD_ETHER;
- hdr->ifi_index = ifindex;
- hdr->ifi_flags = vport_get_flags(port);
+ hdr->ifi_index = port->ops->get_ifindex(port);
+ hdr->ifi_flags = port->ops->get_dev_flags(port);
hdr->ifi_change = 0;
- NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
+ NLA_PUT_STRING(skb, IFLA_IFNAME, port->ops->get_name(port));
NLA_PUT_U32(skb, IFLA_MASTER, get_dpifindex(dp));
- NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
+ NLA_PUT_U32(skb, IFLA_MTU, port->ops->get_mtu(port));
#ifdef IFLA_OPERSTATE
NLA_PUT_U8(skb, IFLA_OPERSTATE,
- vport_is_running(port)
- ? vport_get_operstate(port)
+ port->ops->is_running(port)
+ ? port->ops->get_operstate(port)
: IF_OPER_DOWN);
#endif
- NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
+ NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, port->ops->get_addr(port));
return nlmsg_end(skb, nlh);
static void dp_ifinfo_notify(int event, struct vport *port)
{
struct sk_buff *skb;
- int err = -ENOBUFS;
+ int err;
skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
- if (skb == NULL)
- goto errout;
+ if (!skb) {
+ err = -ENOBUFS;
+ goto err;
+ }
err = dp_fill_ifinfo(skb, port, event, 0);
if (err < 0) {
- /* -EMSGSIZE implies BUG in br_nlmsg_size() */
- WARN_ON(err == -EMSGSIZE);
- kfree_skb(skb);
- goto errout;
+ if (err == -ENODEV) {
+ goto out;
+ } else {
+ /* -EMSGSIZE implies BUG in br_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ goto err;
+ }
}
+
rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+
return;
-errout:
- if (err < 0)
- rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
+err:
+ rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
+out:
+ kfree_skb(skb);
}
static void release_dp(struct kobject *kobj)
int error;
stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
- OVS_CB(skb)->vport = p;
if (!OVS_CB(skb)->flow) {
struct sw_flow_key key;
}
/* Look up flow. */
- flow = flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+ flow = flow_tbl_lookup(rcu_dereference(dp->table),
+ &key, key_len);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
write_seqcount_end(&stats->seqlock);
}
-static void copy_and_csum_skb(struct sk_buff *skb, void *to)
-{
- u16 csum_start, csum_offset;
- __wsum csum;
-
- get_skb_csum_pointers(skb, &csum_start, &csum_offset);
- csum_start -= skb_headroom(skb);
-
- skb_copy_bits(skb, 0, to, csum_start);
-
- csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
- skb->len - csum_start, 0);
- *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
-}
-
static struct genl_family dp_packet_genl_family = {
.id = GENL_ID_GENERATE,
.hdrsize = sizeof(struct ovs_header),
.name = OVS_PACKET_FAMILY,
- .version = 1,
+ .version = OVS_PACKET_VERSION,
.maxattr = OVS_PACKET_ATTR_MAX
};
* properly mark later fragments.
*/
later_key = *upcall_info->key;
- later_key.ip.tos_frag &= ~OVS_FRAG_TYPE_MASK;
- later_key.ip.tos_frag |= OVS_FRAG_TYPE_LATER;
+ later_key.ip.frag = OVS_FRAG_TYPE_LATER;
later_info = *upcall_info;
later_info.key = &later_key;
nla_get_u64(upcall_info->userdata));
nla = __nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, skb->len);
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- copy_and_csum_skb(skb, nla_data(nla));
- else
- skb_copy_bits(skb, 0, nla_data(nla), skb->len);
+
+ skb_copy_and_csum_dev(skb, nla_data(nla));
return genlmsg_unicast(&init_net, user_skb, upcall_info->pid);
}
static int validate_sample(const struct nlattr *attr,
const struct sw_flow_key *key, int depth)
{
- static const struct nla_policy sample_policy[OVS_SAMPLE_ATTR_MAX + 1] =
- {
- [OVS_SAMPLE_ATTR_PROBABILITY] = {.type = NLA_U32 },
- [OVS_SAMPLE_ATTR_ACTIONS] = {.type = NLA_UNSPEC },
- };
- struct nlattr *a[OVS_SAMPLE_ATTR_MAX + 1];
- int error;
-
- error = nla_parse_nested(a, OVS_SAMPLE_ATTR_MAX, attr, sample_policy);
- if (error)
- return error;
+ const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1];
+ const struct nlattr *probability, *actions;
+ const struct nlattr *a;
+ int rem;
- if (!a[OVS_SAMPLE_ATTR_PROBABILITY])
+ memset(attrs, 0, sizeof(attrs));
+ nla_for_each_nested(a, attr, rem) {
+ int type = nla_type(a);
+ if (!type || type > OVS_SAMPLE_ATTR_MAX || attrs[type])
+ return -EINVAL;
+ attrs[type] = a;
+ }
+ if (rem)
return -EINVAL;
- if (!a[OVS_SAMPLE_ATTR_ACTIONS])
+
+ probability = attrs[OVS_SAMPLE_ATTR_PROBABILITY];
+ if (!probability || nla_len(probability) != sizeof(u32))
return -EINVAL;
- return validate_actions(a[OVS_SAMPLE_ATTR_ACTIONS], key, (depth + 1));
+ actions = attrs[OVS_SAMPLE_ATTR_ACTIONS];
+ if (!actions || (nla_len(actions) && nla_len(actions) < NLA_HDRLEN))
+ return -EINVAL;
+ return validate_actions(actions, key, depth + 1);
}
static int validate_action_key(const struct nlattr *a,
nla_len(ovs_key) != ovs_key_lens[key_type])
return -EINVAL;
-#define ACTION(act, key) ((act << 8) | key)
+#define ACTION(act, key) (((act) << 8) | (key))
- switch(ACTION(act_type, key_type)) {
+ switch (ACTION(act_type, key_type)) {
const struct ovs_key_ipv4 *ipv4_key;
const struct ovs_key_8021q *q_key;
+ case ACTION(OVS_ACTION_ATTR_SET, OVS_KEY_ATTR_PRIORITY):
case ACTION(OVS_ACTION_ATTR_SET, OVS_KEY_ATTR_TUN_ID):
case ACTION(OVS_ACTION_ATTR_SET, OVS_KEY_ATTR_ETHERNET):
- break;
+ break;
case ACTION(OVS_ACTION_ATTR_PUSH, OVS_KEY_ATTR_8021Q):
q_key = nla_data(ovs_key);
if (ipv4_key->ipv4_proto != flow_key->ip.proto)
return -EINVAL;
- if (ipv4_key->ipv4_tos & INET_ECN_MASK)
- return -EINVAL;
-
- if (ipv4_key->ipv4_frag !=
- (flow_key->ip.tos_frag & OVS_FRAG_TYPE_MASK))
+ if (ipv4_key->ipv4_frag != flow_key->ip.frag)
return -EINVAL;
break;
static int validate_userspace(const struct nlattr *attr)
{
- static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] =
- {
+ static const struct nla_policy userspace_policy[OVS_USERSPACE_ATTR_MAX + 1] = {
[OVS_USERSPACE_ATTR_PID] = {.type = NLA_U32 },
[OVS_USERSPACE_ATTR_USERDATA] = {.type = NLA_U64 },
};
struct nlattr *a[OVS_USERSPACE_ATTR_MAX + 1];
int error;
- error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX, attr, userspace_policy);
+ error = nla_parse_nested(a, OVS_USERSPACE_ATTR_MAX,
+ attr, userspace_policy);
if (error)
return error;
- if (!a[OVS_USERSPACE_ATTR_PID] || !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
+ if (!a[OVS_USERSPACE_ATTR_PID] ||
+ !nla_get_u32(a[OVS_USERSPACE_ATTR_PID]))
return -EINVAL;
return 0;
[OVS_ACTION_ATTR_PUSH] = (u32)-1,
[OVS_ACTION_ATTR_POP] = 2,
[OVS_ACTION_ATTR_SET] = (u32)-1,
- [OVS_ACTION_ATTR_SET_PRIORITY] = 4,
- [OVS_ACTION_ATTR_POP_PRIORITY] = 0,
[OVS_ACTION_ATTR_SAMPLE] = (u32)-1
};
int type = nla_type(a);
case OVS_ACTION_ATTR_UNSPEC:
return -EINVAL;
- case OVS_ACTION_ATTR_SET_PRIORITY:
- case OVS_ACTION_ATTR_POP_PRIORITY:
- /* No validation needed. */
- break;
-
case OVS_ACTION_ATTR_USERSPACE:
err = validate_userspace(a);
if (err)
if (err)
goto err_flow_put;
- err = flow_metadata_from_nlattrs(&flow->key.eth.in_port,
- &flow->key.eth.tun_id,
+ err = flow_metadata_from_nlattrs(&flow->key.phy.priority,
+ &flow->key.phy.in_port,
+ &flow->key.phy.tun_id,
a[OVS_PACKET_ATTR_KEY]);
if (err)
goto err_flow_put;
rcu_assign_pointer(flow->sf_acts, acts);
OVS_CB(packet)->flow = flow;
+ packet->priority = flow->key.phy.priority;
rcu_read_lock();
dp = get_dp(ovs_header->dp_ifindex);
if (!dp)
goto err_unlock;
- if (flow->key.eth.in_port < DP_MAX_PORTS)
- OVS_CB(packet)->vport = get_vport_protected(dp,
- flow->key.eth.in_port);
-
local_bh_disable();
err = execute_actions(dp, packet);
local_bh_enable();
.id = GENL_ID_GENERATE,
.hdrsize = sizeof(struct ovs_header),
.name = OVS_FLOW_FAMILY,
- .version = 1,
+ .version = OVS_FLOW_VERSION,
.maxattr = OVS_FLOW_ATTR_MAX
};
/* Called with genl_lock. */
static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
- struct sk_buff *skb, u32 pid, u32 seq, u32 flags, u8 cmd)
+ struct sk_buff *skb, u32 pid,
+ u32 seq, u32 flags, u8 cmd)
{
const int skb_orig_len = skb->len;
const struct sw_flow_actions *sf_acts;
NLA_PUT_U64(skb, OVS_FLOW_ATTR_USED, flow_used_time(used));
if (stats.n_packets)
- NLA_PUT(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats);
+ NLA_PUT(skb, OVS_FLOW_ATTR_STATS,
+ sizeof(struct ovs_flow_stats), &stats);
if (tcp_flags)
NLA_PUT_U8(skb, OVS_FLOW_ATTR_TCP_FLAGS, tcp_flags);
sf_acts = rcu_dereference_protected(flow->sf_acts,
lockdep_genl_is_held());
- len = nla_total_size(FLOW_BUFSIZE); /* OVS_FLOW_ATTR_KEY */
- len += nla_total_size(sf_acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
- len += nla_total_size(sizeof(struct ovs_flow_stats)); /* OVS_FLOW_ATTR_STATS */
- len += nla_total_size(1); /* OVS_FLOW_ATTR_TCP_FLAGS */
- len += nla_total_size(8); /* OVS_FLOW_ATTR_USED */
- return genlmsg_new(NLMSG_ALIGN(sizeof(struct ovs_header)) + len, GFP_KERNEL);
+ /* OVS_FLOW_ATTR_KEY */
+ len = nla_total_size(FLOW_BUFSIZE);
+ /* OVS_FLOW_ATTR_ACTIONS */
+ len += nla_total_size(sf_acts->actions_len);
+ /* OVS_FLOW_ATTR_STATS */
+ len += nla_total_size(sizeof(struct ovs_flow_stats));
+ /* OVS_FLOW_ATTR_TCP_FLAGS */
+ len += nla_total_size(1);
+ /* OVS_FLOW_ATTR_USED */
+ len += nla_total_size(8);
+
+ len += NLMSG_ALIGN(sizeof(struct ovs_header));
+
+ return genlmsg_new(len, GFP_KERNEL);
}
-static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow, struct datapath *dp,
+static struct sk_buff *ovs_flow_cmd_build_info(struct sw_flow *flow,
+ struct datapath *dp,
u32 pid, u32 seq, u8 cmd)
{
struct sk_buff *skb;
flow_tbl_insert(table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
- info->snd_seq, OVS_FLOW_CMD_NEW);
+ info->snd_seq,
+ OVS_FLOW_CMD_NEW);
} else {
/* We found a matching flow. */
struct sw_flow_actions *old_acts;
+ struct nlattr *acts_attrs;
/* Bail out if we're not allowed to modify an existing flow.
* We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
/* Update actions. */
old_acts = rcu_dereference_protected(flow->sf_acts,
lockdep_genl_is_held());
- if (a[OVS_FLOW_ATTR_ACTIONS] &&
- (old_acts->actions_len != nla_len(a[OVS_FLOW_ATTR_ACTIONS]) ||
- memcmp(old_acts->actions, nla_data(a[OVS_FLOW_ATTR_ACTIONS]),
- old_acts->actions_len))) {
+ acts_attrs = a[OVS_FLOW_ATTR_ACTIONS];
+ if (acts_attrs &&
+ (old_acts->actions_len != nla_len(acts_attrs) ||
+ memcmp(old_acts->actions, nla_data(acts_attrs),
+ old_acts->actions_len))) {
struct sw_flow_actions *new_acts;
- new_acts = flow_actions_alloc(a[OVS_FLOW_ATTR_ACTIONS]);
+ new_acts = flow_actions_alloc(acts_attrs);
error = PTR_ERR(new_acts);
if (IS_ERR(new_acts))
goto error;
}
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
- info->snd_seq, OVS_FLOW_CMD_NEW);
+ info->snd_seq, OVS_FLOW_CMD_NEW);
/* Clear stats. */
if (a[OVS_FLOW_ATTR_CLEAR]) {
if (!IS_ERR(reply))
genl_notify(reply, genl_info_net(info), info->snd_pid,
- dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
+ dp_flow_multicast_group.id, info->nlhdr, GFP_KERNEL);
else
netlink_set_err(INIT_NET_GENL_SOCK, 0,
dp_flow_multicast_group.id, PTR_ERR(reply));
if (!flow)
return -ENOENT;
- reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid, info->snd_seq, OVS_FLOW_CMD_NEW);
+ reply = ovs_flow_cmd_build_info(flow, dp, info->snd_pid,
+ info->snd_seq, OVS_FLOW_CMD_NEW);
if (IS_ERR(reply))
return PTR_ERR(reply);
dp = get_dp(ovs_header->dp_ifindex);
if (!dp)
- return -ENODEV;
+ return -ENODEV;
table = get_table_protected(dp);
flow = flow_tbl_lookup(table, &key, key_len);
if (!flow)
break;
- if (ovs_flow_cmd_fill_info(flow, dp, skb, NETLINK_CB(cb->skb).pid,
+ if (ovs_flow_cmd_fill_info(flow, dp, skb,
+ NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
OVS_FLOW_CMD_NEW) < 0)
break;
.id = GENL_ID_GENERATE,
.hdrsize = sizeof(struct ovs_header),
.name = OVS_DATAPATH_FAMILY,
- .version = 1,
+ .version = OVS_DATAPATH_VERSION,
.maxattr = OVS_DP_ATTR_MAX
};
}
/* Called with genl_mutex and optionally with RTNL lock also. */
-static struct datapath *lookup_datapath(struct ovs_header *ovs_header, struct nlattr *a[OVS_DP_ATTR_MAX + 1])
+static struct datapath *lookup_datapath(struct ovs_header *ovs_header,
+ struct nlattr *a[OVS_DP_ATTR_MAX + 1])
{
struct datapath *dp;
goto err_destroy_percpu;
}
- reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_NEW);
err = PTR_ERR(reply);
if (IS_ERR(reply))
goto err_destroy_local_port;
if (IS_ERR(dp))
goto exit_unlock;
- reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_DEL);
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_DEL);
err = PTR_ERR(reply);
if (IS_ERR(reply))
goto exit_unlock;
- list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
+ list_for_each_entry_safe(vport, next_vport, &dp->port_list, node)
if (vport->port_no != OVSP_LOCAL)
dp_detach_port(vport);
if (IS_ERR(dp))
return PTR_ERR(dp);
- reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_NEW);
if (IS_ERR(reply)) {
err = PTR_ERR(reply);
netlink_set_err(INIT_NET_GENL_SOCK, 0,
if (IS_ERR(dp))
return PTR_ERR(dp);
- reply = ovs_dp_cmd_build_info(dp, info->snd_pid, info->snd_seq, OVS_DP_CMD_NEW);
+ reply = ovs_dp_cmd_build_info(dp, info->snd_pid,
+ info->snd_seq, OVS_DP_CMD_NEW);
if (IS_ERR(reply))
return PTR_ERR(reply);
int skip = cb->args[0];
int i = 0;
- list_for_each_entry (dp, &dps, list_node) {
+ list_for_each_entry(dp, &dps, list_node) {
if (i < skip)
continue;
if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
.id = GENL_ID_GENERATE,
.hdrsize = sizeof(struct ovs_header),
.name = OVS_VPORT_FAMILY,
- .version = 1,
+ .version = OVS_VPORT_VERSION,
.maxattr = OVS_VPORT_ATTR_MAX
};
ovs_header->dp_ifindex = get_dpifindex(vport->dp);
NLA_PUT_U32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no);
- NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport_get_type(vport));
- NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport_get_name(vport));
+ NLA_PUT_U32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type);
+ NLA_PUT_STRING(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport));
NLA_PUT_U32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_pid);
- nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats));
+ nla = nla_reserve(skb, OVS_VPORT_ATTR_STATS,
+ sizeof(struct ovs_vport_stats));
if (!nla)
goto nla_put_failure;
vport_get_stats(vport, nla_data(nla));
- NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
+ NLA_PUT(skb, OVS_VPORT_ATTR_ADDRESS, ETH_ALEN,
+ vport->ops->get_addr(vport));
err = vport_get_options(vport, skb);
if (err == -EMSGSIZE)
}
/* Called with RTNL lock. */
-static int change_vport(struct vport *vport, struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
+static int change_vport(struct vport *vport,
+ struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
{
int err = 0;
if (IS_ERR(vport))
goto exit_unlock;
- dp_sysfs_add_if(vport);
+ dp_sysfs_add_if(vport);
err = change_vport(vport, a);
if (!err) {
reply = ovs_vport_cmd_build_info(vport, info->snd_pid,
- info->snd_seq, OVS_VPORT_CMD_NEW);
+ info->snd_seq,
+ OVS_VPORT_CMD_NEW);
if (IS_ERR(reply))
err = PTR_ERR(reply);
}
goto exit_unlock;
err = 0;
- if (a[OVS_VPORT_ATTR_TYPE] && nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport_get_type(vport))
+ if (a[OVS_VPORT_ATTR_TYPE] &&
+ nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)
err = -EINVAL;
+
if (!err && a[OVS_VPORT_ATTR_OPTIONS])
err = vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
if (!err)
BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
- printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
+ pr_info("Open vSwitch %s, built "__DATE__" "__TIME__"\n",
+ VERSION BUILDNR);
err = tnl_init();
if (err)