2 * Copyright (c) 2007, 2008, 2009, 2010, 2011 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 /* Functions for managing the dp interface/device. */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 #include <linux/init.h>
14 #include <linux/module.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_vlan.h>
20 #include <linux/jhash.h>
21 #include <linux/delay.h>
22 #include <linux/time.h>
23 #include <linux/etherdevice.h>
24 #include <linux/genetlink.h>
25 #include <linux/kernel.h>
26 #include <linux/kthread.h>
27 #include <linux/mutex.h>
28 #include <linux/percpu.h>
29 #include <linux/rcupdate.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/version.h>
33 #include <linux/ethtool.h>
34 #include <linux/wait.h>
35 #include <asm/system.h>
36 #include <asm/div64.h>
38 #include <linux/highmem.h>
39 #include <linux/netfilter_bridge.h>
40 #include <linux/netfilter_ipv4.h>
41 #include <linux/inetdevice.h>
42 #include <linux/list.h>
43 #include <linux/rculist.h>
44 #include <linux/dmi.h>
45 #include <net/inet_ecn.h>
46 #include <net/genetlink.h>
47 #include <linux/compat.h>
49 #include "openvswitch/datapath-protocol.h"
54 #include "loop_counter.h"
56 #include "vport-internal_dev.h"
58 int (*dp_ioctl_hook)(struct net_device *dev, struct ifreq *rq, int cmd);
59 EXPORT_SYMBOL(dp_ioctl_hook);
64 * Writes to device state (add/remove datapath, port, set operations on vports,
65 * etc.) are protected by RTNL.
67 * Writes to other state (flow table modifications, set miscellaneous datapath
68 * parameters such as drop frags, etc.) are protected by genl_mutex. The RTNL
69 * lock nests inside genl_mutex.
71 * Reads are protected by RCU.
73 * There are a few special cases (mostly stats) that have their own
74 * synchronization but they nest under all of above and don't interact with
78 /* Protected by genl_mutex. */
79 static struct datapath __rcu *dps[256];
81 static struct vport *new_vport(const struct vport_parms *);
83 /* Must be called with rcu_read_lock, genl_mutex, or RTNL lock. */
84 struct datapath *get_dp(int dp_idx)
86 if (dp_idx < 0 || dp_idx >= ARRAY_SIZE(dps))
89 return rcu_dereference_check(dps[dp_idx], rcu_read_lock_held() ||
90 lockdep_rtnl_is_held() ||
91 lockdep_genl_is_held());
93 EXPORT_SYMBOL_GPL(get_dp);
95 /* Must be called with genl_mutex. */
96 static struct tbl *get_table_protected(struct datapath *dp)
98 return rcu_dereference_protected(dp->table, lockdep_genl_is_held());
101 /* Must be called with rcu_read_lock or RTNL lock. */
102 static struct vport *get_vport_protected(struct datapath *dp, u16 port_no)
104 return rcu_dereference_rtnl(dp->ports[port_no]);
107 /* Must be called with rcu_read_lock or RTNL lock. */
108 const char *dp_name(const struct datapath *dp)
110 return vport_get_name(rcu_dereference_rtnl(dp->ports[ODPP_LOCAL]));
113 static inline size_t br_nlmsg_size(void)
115 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
116 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
117 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
118 + nla_total_size(4) /* IFLA_MASTER */
119 + nla_total_size(4) /* IFLA_MTU */
120 + nla_total_size(4) /* IFLA_LINK */
121 + nla_total_size(1); /* IFLA_OPERSTATE */
124 /* Caller must hold RTNL lock. */
125 static int dp_fill_ifinfo(struct sk_buff *skb,
126 const struct vport *port,
127 int event, unsigned int flags)
129 struct datapath *dp = port->dp;
130 int ifindex = vport_get_ifindex(port);
131 int iflink = vport_get_iflink(port);
132 struct ifinfomsg *hdr;
133 struct nlmsghdr *nlh;
141 nlh = nlmsg_put(skb, 0, 0, event, sizeof(*hdr), flags);
145 hdr = nlmsg_data(nlh);
146 hdr->ifi_family = AF_BRIDGE;
148 hdr->ifi_type = ARPHRD_ETHER;
149 hdr->ifi_index = ifindex;
150 hdr->ifi_flags = vport_get_flags(port);
153 NLA_PUT_STRING(skb, IFLA_IFNAME, vport_get_name(port));
154 NLA_PUT_U32(skb, IFLA_MASTER,
155 vport_get_ifindex(get_vport_protected(dp, ODPP_LOCAL)));
156 NLA_PUT_U32(skb, IFLA_MTU, vport_get_mtu(port));
157 #ifdef IFLA_OPERSTATE
158 NLA_PUT_U8(skb, IFLA_OPERSTATE,
159 vport_is_running(port)
160 ? vport_get_operstate(port)
164 NLA_PUT(skb, IFLA_ADDRESS, ETH_ALEN, vport_get_addr(port));
166 if (ifindex != iflink)
167 NLA_PUT_U32(skb, IFLA_LINK,iflink);
169 return nlmsg_end(skb, nlh);
172 nlmsg_cancel(skb, nlh);
176 /* Caller must hold RTNL lock. */
177 static void dp_ifinfo_notify(int event, struct vport *port)
182 skb = nlmsg_new(br_nlmsg_size(), GFP_KERNEL);
186 err = dp_fill_ifinfo(skb, port, event, 0);
188 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
189 WARN_ON(err == -EMSGSIZE);
193 rtnl_notify(skb, &init_net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
197 rtnl_set_sk_err(&init_net, RTNLGRP_LINK, err);
200 static void release_dp(struct kobject *kobj)
202 struct datapath *dp = container_of(kobj, struct datapath, ifobj);
206 static struct kobj_type dp_ktype = {
207 .release = release_dp
210 static void destroy_dp_rcu(struct rcu_head *rcu)
212 struct datapath *dp = container_of(rcu, struct datapath, rcu);
214 tbl_destroy((struct tbl __force *)dp->table, flow_free_tbl);
215 free_percpu(dp->stats_percpu);
216 kobject_put(&dp->ifobj);
219 /* Called with RTNL lock and genl_lock. */
220 static struct vport *new_vport(const struct vport_parms *parms)
224 vport = vport_add(parms);
225 if (!IS_ERR(vport)) {
226 struct datapath *dp = parms->dp;
228 rcu_assign_pointer(dp->ports[parms->port_no], vport);
229 list_add(&vport->node, &dp->port_list);
231 dp_ifinfo_notify(RTM_NEWLINK, vport);
237 /* Called with RTNL lock. */
238 int dp_detach_port(struct vport *p)
242 if (p->port_no != ODPP_LOCAL)
244 dp_ifinfo_notify(RTM_DELLINK, p);
246 /* First drop references to device. */
248 rcu_assign_pointer(p->dp->ports[p->port_no], NULL);
250 /* Then destroy it. */
254 /* Must be called with rcu_read_lock. */
255 void dp_process_received_packet(struct vport *p, struct sk_buff *skb)
257 struct datapath *dp = p->dp;
258 struct dp_stats_percpu *stats;
259 int stats_counter_off;
260 struct sw_flow_actions *acts;
261 struct loop_counter *loop;
264 OVS_CB(skb)->vport = p;
266 if (!OVS_CB(skb)->flow) {
267 struct sw_flow_key key;
268 struct tbl_node *flow_node;
271 /* Extract flow from 'skb' into 'key'. */
272 error = flow_extract(skb, p->port_no, &key, &is_frag);
273 if (unlikely(error)) {
278 if (is_frag && dp->drop_frags) {
280 stats_counter_off = offsetof(struct dp_stats_percpu, n_frags);
285 flow_node = tbl_lookup(rcu_dereference(dp->table), &key,
286 flow_hash(&key), flow_cmp);
287 if (unlikely(!flow_node)) {
288 struct dp_upcall_info upcall;
290 upcall.cmd = ODP_PACKET_CMD_MISS;
293 upcall.sample_pool = 0;
294 upcall.actions = NULL;
295 upcall.actions_len = 0;
296 dp_upcall(dp, skb, &upcall);
297 stats_counter_off = offsetof(struct dp_stats_percpu, n_missed);
301 OVS_CB(skb)->flow = flow_cast(flow_node);
304 stats_counter_off = offsetof(struct dp_stats_percpu, n_hit);
305 flow_used(OVS_CB(skb)->flow, skb);
307 acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
309 /* Check whether we've looped too much. */
310 loop = loop_get_counter();
311 if (unlikely(++loop->count > MAX_LOOPS))
312 loop->looping = true;
313 if (unlikely(loop->looping)) {
314 loop_suppress(dp, acts);
319 /* Execute actions. */
320 execute_actions(dp, skb, &OVS_CB(skb)->flow->key, acts->actions,
323 /* Check whether sub-actions looped too much. */
324 if (unlikely(loop->looping))
325 loop_suppress(dp, acts);
328 /* Decrement loop counter. */
330 loop->looping = false;
334 /* Update datapath statistics. */
336 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
338 write_seqcount_begin(&stats->seqlock);
339 (*(u64 *)((u8 *)stats + stats_counter_off))++;
340 write_seqcount_end(&stats->seqlock);
345 static void copy_and_csum_skb(struct sk_buff *skb, void *to)
347 u16 csum_start, csum_offset;
350 get_skb_csum_pointers(skb, &csum_start, &csum_offset);
351 csum_start -= skb_headroom(skb);
352 BUG_ON(csum_start >= skb_headlen(skb));
354 skb_copy_bits(skb, 0, to, csum_start);
356 csum = skb_copy_and_csum_bits(skb, csum_start, to + csum_start,
357 skb->len - csum_start, 0);
358 *(__sum16 *)(to + csum_start + csum_offset) = csum_fold(csum);
361 static struct genl_family dp_packet_genl_family;
362 #define PACKET_N_MC_GROUPS 16
364 static int packet_mc_group(struct datapath *dp, u8 cmd)
366 BUILD_BUG_ON_NOT_POWER_OF_2(PACKET_N_MC_GROUPS);
367 return jhash_2words(dp->dp_idx, cmd, 0) & (PACKET_N_MC_GROUPS - 1);
370 /* Send each packet in the 'skb' list to userspace for 'dp' as directed by
371 * 'upcall_info'. There will be only one packet unless we broke up a GSO
374 static int queue_control_packets(struct datapath *dp, struct sk_buff *skb,
375 const struct dp_upcall_info *upcall_info)
377 u32 group = packet_mc_group(dp, upcall_info->cmd);
378 struct sk_buff *nskb;
382 if (OVS_CB(skb)->vport)
383 port_no = OVS_CB(skb)->vport->port_no;
385 port_no = ODPP_LOCAL;
388 struct odp_header *upcall;
389 struct sk_buff *user_skb; /* to be queued to userspace */
396 len = sizeof(struct odp_header);
397 len += nla_total_size(4); /* ODP_PACKET_ATTR_TYPE. */
398 len += nla_total_size(skb->len);
399 len += nla_total_size(FLOW_BUFSIZE);
400 if (upcall_info->userdata)
401 len += nla_total_size(8);
402 if (upcall_info->sample_pool)
403 len += nla_total_size(4);
404 if (upcall_info->actions_len)
405 len += nla_total_size(upcall_info->actions_len);
407 user_skb = genlmsg_new(len, GFP_ATOMIC);
409 netlink_set_err(INIT_NET_GENL_SOCK, 0, group, -ENOBUFS);
413 upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family, 0, upcall_info->cmd);
414 upcall->dp_idx = dp->dp_idx;
416 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_KEY);
417 flow_to_nlattrs(upcall_info->key, user_skb);
418 nla_nest_end(user_skb, nla);
420 if (upcall_info->userdata)
421 nla_put_u64(user_skb, ODP_PACKET_ATTR_USERDATA, upcall_info->userdata);
422 if (upcall_info->sample_pool)
423 nla_put_u32(user_skb, ODP_PACKET_ATTR_SAMPLE_POOL, upcall_info->sample_pool);
424 if (upcall_info->actions_len) {
425 const struct nlattr *actions = upcall_info->actions;
426 u32 actions_len = upcall_info->actions_len;
428 nla = nla_nest_start(user_skb, ODP_PACKET_ATTR_ACTIONS);
429 memcpy(__skb_put(user_skb, actions_len), actions, actions_len);
430 nla_nest_end(user_skb, nla);
433 nla = __nla_reserve(user_skb, ODP_PACKET_ATTR_PACKET, skb->len);
434 if (skb->ip_summed == CHECKSUM_PARTIAL)
435 copy_and_csum_skb(skb, nla_data(nla));
437 skb_copy_bits(skb, 0, nla_data(nla), skb->len);
439 err = genlmsg_multicast(user_skb, 0, group, GFP_ATOMIC);
450 while ((skb = nskb) != NULL) {
457 /* Generic Netlink multicast groups for upcalls.
459 * We really want three unique multicast groups per datapath, but we can't even
460 * get one, because genl_register_mc_group() takes genl_lock, which is also
461 * held during Generic Netlink message processing, so trying to acquire
462 * multicast groups during ODP_DP_NEW processing deadlocks. Instead, we
463 * preallocate a few groups and use them round-robin for datapaths. Collision
464 * isn't fatal--multicast listeners should check that the family is the one
465 * that they want and discard others--but it wastes time and memory to receive
468 static struct genl_multicast_group packet_mc_groups[PACKET_N_MC_GROUPS];
470 static struct genl_family dp_packet_genl_family = {
471 .id = GENL_ID_GENERATE,
472 .hdrsize = sizeof(struct odp_header),
473 .name = ODP_PACKET_FAMILY,
475 .maxattr = ODP_PACKET_ATTR_MAX
478 static int packet_register_mc_groups(void)
482 for (i = 0; i < PACKET_N_MC_GROUPS; i++) {
483 struct genl_multicast_group *group = &packet_mc_groups[i];
486 sprintf(group->name, "packet%d", i);
487 error = genl_register_mc_group(&dp_packet_genl_family, group);
494 int dp_upcall(struct datapath *dp, struct sk_buff *skb, const struct dp_upcall_info *upcall_info)
496 struct dp_stats_percpu *stats;
499 WARN_ON_ONCE(skb_shared(skb));
501 forward_ip_summed(skb);
503 err = vswitch_skb_checksum_setup(skb);
507 /* Break apart GSO packets into their component pieces. Otherwise
508 * userspace may try to stuff a 64kB packet into a 1500-byte MTU. */
509 if (skb_is_gso(skb)) {
510 struct sk_buff *nskb = skb_gso_segment(skb, NETIF_F_SG | NETIF_F_HW_CSUM);
520 return queue_control_packets(dp, skb, upcall_info);
526 stats = per_cpu_ptr(dp->stats_percpu, smp_processor_id());
528 write_seqcount_begin(&stats->seqlock);
530 write_seqcount_end(&stats->seqlock);
537 /* Called with genl_mutex. */
538 static int flush_flows(int dp_idx)
540 struct tbl *old_table;
541 struct tbl *new_table;
548 old_table = get_table_protected(dp);
549 new_table = tbl_create(TBL_MIN_BUCKETS);
553 rcu_assign_pointer(dp->table, new_table);
555 tbl_deferred_destroy(old_table, flow_free_tbl);
560 static int validate_actions(const struct nlattr *actions, u32 actions_len)
562 const struct nlattr *a;
565 nla_for_each_attr(a, actions, actions_len, rem) {
566 static const u32 action_lens[ODPAT_MAX + 1] = {
568 [ODPAT_CONTROLLER] = 8,
569 [ODPAT_SET_DL_TCI] = 2,
570 [ODPAT_STRIP_VLAN] = 0,
571 [ODPAT_SET_DL_SRC] = ETH_ALEN,
572 [ODPAT_SET_DL_DST] = ETH_ALEN,
573 [ODPAT_SET_NW_SRC] = 4,
574 [ODPAT_SET_NW_DST] = 4,
575 [ODPAT_SET_NW_TOS] = 1,
576 [ODPAT_SET_TP_SRC] = 2,
577 [ODPAT_SET_TP_DST] = 2,
578 [ODPAT_SET_TUNNEL] = 8,
579 [ODPAT_SET_PRIORITY] = 4,
580 [ODPAT_POP_PRIORITY] = 0,
581 [ODPAT_DROP_SPOOFED_ARP] = 0,
583 int type = nla_type(a);
585 if (type > ODPAT_MAX || nla_len(a) != action_lens[type])
592 case ODPAT_CONTROLLER:
593 case ODPAT_STRIP_VLAN:
594 case ODPAT_SET_DL_SRC:
595 case ODPAT_SET_DL_DST:
596 case ODPAT_SET_NW_SRC:
597 case ODPAT_SET_NW_DST:
598 case ODPAT_SET_TP_SRC:
599 case ODPAT_SET_TP_DST:
600 case ODPAT_SET_TUNNEL:
601 case ODPAT_SET_PRIORITY:
602 case ODPAT_POP_PRIORITY:
603 case ODPAT_DROP_SPOOFED_ARP:
604 /* No validation needed. */
608 if (nla_get_u32(a) >= DP_MAX_PORTS)
612 case ODPAT_SET_DL_TCI:
613 if (nla_get_be16(a) & htons(VLAN_CFI_MASK))
617 case ODPAT_SET_NW_TOS:
618 if (nla_get_u8(a) & INET_ECN_MASK)
637 struct sw_flow_key key;
638 const struct nlattr *actions;
644 static struct sw_flow_actions *get_actions(const struct dp_flowcmd *flowcmd)
646 struct sw_flow_actions *actions;
648 actions = flow_actions_alloc(flowcmd->actions_len);
649 if (!IS_ERR(actions) && flowcmd->actions_len)
650 memcpy(actions->actions, flowcmd->actions, flowcmd->actions_len);
654 static void clear_stats(struct sw_flow *flow)
658 flow->packet_count = 0;
659 flow->byte_count = 0;
662 /* Called with genl_mutex. */
663 static int expand_table(struct datapath *dp)
665 struct tbl *old_table = get_table_protected(dp);
666 struct tbl *new_table;
668 new_table = tbl_expand(old_table);
669 if (IS_ERR(new_table))
670 return PTR_ERR(new_table);
672 rcu_assign_pointer(dp->table, new_table);
673 tbl_deferred_destroy(old_table, NULL);
678 static int odp_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
680 struct odp_header *odp_header = info->userhdr;
681 struct nlattr **a = info->attrs;
682 struct sk_buff *packet;
683 unsigned int actions_len;
684 struct nlattr *actions;
685 struct sw_flow_key key;
692 if (!a[ODP_PACKET_ATTR_PACKET] || !a[ODP_PACKET_ATTR_ACTIONS] ||
693 nla_len(a[ODP_PACKET_ATTR_PACKET]) < ETH_HLEN)
696 actions = nla_data(a[ODP_PACKET_ATTR_ACTIONS]);
697 actions_len = nla_len(a[ODP_PACKET_ATTR_ACTIONS]);
698 err = validate_actions(actions, actions_len);
702 packet = skb_clone(skb, GFP_KERNEL);
706 packet->data = nla_data(a[ODP_PACKET_ATTR_PACKET]);
707 packet->len = nla_len(a[ODP_PACKET_ATTR_PACKET]);
709 skb_reset_mac_header(packet);
710 eth = eth_hdr(packet);
712 /* Normally, setting the skb 'protocol' field would be handled by a
713 * call to eth_type_trans(), but it assumes there's a sending
714 * device, which we may not have. */
715 if (ntohs(eth->h_proto) >= 1536)
716 packet->protocol = eth->h_proto;
718 packet->protocol = htons(ETH_P_802_2);
720 err = flow_extract(packet, -1, &key, &is_frag);
725 dp = get_dp(odp_header->dp_idx);
728 err = execute_actions(dp, packet, &key, actions, actions_len);
735 static const struct nla_policy packet_policy[ODP_PACKET_ATTR_MAX + 1] = {
736 [ODP_PACKET_ATTR_PACKET] = { .type = NLA_UNSPEC },
737 [ODP_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
740 static struct genl_ops dp_packet_genl_ops[] = {
741 { .cmd = ODP_PACKET_CMD_EXECUTE,
742 .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
743 .policy = packet_policy,
744 .doit = odp_packet_cmd_execute
748 static void get_dp_stats(struct datapath *dp, struct odp_stats *stats)
752 stats->n_frags = stats->n_hit = stats->n_missed = stats->n_lost = 0;
753 for_each_possible_cpu(i) {
754 const struct dp_stats_percpu *percpu_stats;
755 struct dp_stats_percpu local_stats;
758 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
761 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
762 local_stats = *percpu_stats;
763 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
765 stats->n_frags += local_stats.n_frags;
766 stats->n_hit += local_stats.n_hit;
767 stats->n_missed += local_stats.n_missed;
768 stats->n_lost += local_stats.n_lost;
772 /* MTU of the dp pseudo-device: ETH_DATA_LEN or the minimum of the ports.
773 * Called with RTNL lock.
775 int dp_min_mtu(const struct datapath *dp)
782 list_for_each_entry (p, &dp->port_list, node) {
785 /* Skip any internal ports, since that's what we're trying to
787 if (is_internal_vport(p))
790 dev_mtu = vport_get_mtu(p);
791 if (!mtu || dev_mtu < mtu)
795 return mtu ? mtu : ETH_DATA_LEN;
798 /* Sets the MTU of all datapath devices to the minimum of the ports
799 * Called with RTNL lock.
801 void set_internal_devs_mtu(const struct datapath *dp)
808 mtu = dp_min_mtu(dp);
810 list_for_each_entry (p, &dp->port_list, node) {
811 if (is_internal_vport(p))
812 vport_set_mtu(p, mtu);
816 static const struct nla_policy flow_policy[ODP_FLOW_ATTR_MAX + 1] = {
817 [ODP_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
818 [ODP_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
819 [ODP_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
820 [ODP_FLOW_ATTR_STATE] = { .type = NLA_U64 },
824 static int copy_flow_to_user(struct odp_flow __user *dst, struct datapath *dp,
825 struct sw_flow *flow, u32 total_len, u64 state)
827 const struct sw_flow_actions *sf_acts;
828 struct odp_flow_stats stats;
829 struct odp_flow *odp_flow;
836 sf_acts = rcu_dereference_protected(flow->sf_acts,
837 lockdep_genl_is_held());
839 skb = alloc_skb(128 + FLOW_BUFSIZE + sf_acts->actions_len, GFP_KERNEL);
844 odp_flow = (struct odp_flow*)__skb_put(skb, sizeof(struct odp_flow));
845 odp_flow->dp_idx = dp->dp_idx;
846 odp_flow->total_len = total_len;
848 nla = nla_nest_start(skb, ODP_FLOW_ATTR_KEY);
850 goto nla_put_failure;
851 err = flow_to_nlattrs(&flow->key, skb);
854 nla_nest_end(skb, nla);
856 nla = nla_nest_start(skb, ODP_FLOW_ATTR_ACTIONS);
857 if (!nla || skb_tailroom(skb) < sf_acts->actions_len)
858 goto nla_put_failure;
859 memcpy(__skb_put(skb, sf_acts->actions_len), sf_acts->actions, sf_acts->actions_len);
860 nla_nest_end(skb, nla);
862 spin_lock_bh(&flow->lock);
864 stats.n_packets = flow->packet_count;
865 stats.n_bytes = flow->byte_count;
866 tcp_flags = flow->tcp_flags;
867 spin_unlock_bh(&flow->lock);
870 NLA_PUT_MSECS(skb, ODP_FLOW_ATTR_USED, used);
873 NLA_PUT(skb, ODP_FLOW_ATTR_STATS, sizeof(struct odp_flow_stats), &stats);
876 NLA_PUT_U8(skb, ODP_FLOW_ATTR_TCP_FLAGS, tcp_flags);
879 NLA_PUT_U64(skb, ODP_FLOW_ATTR_STATE, state);
881 if (skb->len > total_len)
882 goto nla_put_failure;
884 odp_flow->len = skb->len;
885 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
896 /* Called with genl_mutex. */
897 static struct sk_buff *copy_flow_from_user(struct odp_flow __user *uodp_flow,
898 struct dp_flowcmd *flowcmd)
900 struct nlattr *a[ODP_FLOW_ATTR_MAX + 1];
901 struct odp_flow *odp_flow;
906 if (get_user(len, &uodp_flow->len))
907 return ERR_PTR(-EFAULT);
908 if (len < sizeof(struct odp_flow))
909 return ERR_PTR(-EINVAL);
911 skb = alloc_skb(len, GFP_KERNEL);
913 return ERR_PTR(-ENOMEM);
916 if (copy_from_user(__skb_put(skb, len), uodp_flow, len))
919 odp_flow = (struct odp_flow *)skb->data;
921 if (odp_flow->len != len)
924 flowcmd->nlmsg_flags = odp_flow->nlmsg_flags;
925 flowcmd->dp_idx = odp_flow->dp_idx;
926 flowcmd->total_len = odp_flow->total_len;
928 err = nla_parse(a, ODP_FLOW_ATTR_MAX,
929 (struct nlattr *)(skb->data + sizeof(struct odp_flow)),
930 skb->len - sizeof(struct odp_flow), flow_policy);
934 /* ODP_FLOW_ATTR_KEY. */
935 if (a[ODP_FLOW_ATTR_KEY]) {
936 err = flow_from_nlattrs(&flowcmd->key, a[ODP_FLOW_ATTR_KEY]);
940 memset(&flowcmd->key, 0, sizeof(struct sw_flow_key));
942 /* ODP_FLOW_ATTR_ACTIONS. */
943 if (a[ODP_FLOW_ATTR_ACTIONS]) {
944 flowcmd->actions = nla_data(a[ODP_FLOW_ATTR_ACTIONS]);
945 flowcmd->actions_len = nla_len(a[ODP_FLOW_ATTR_ACTIONS]);
946 err = validate_actions(flowcmd->actions, flowcmd->actions_len);
950 flowcmd->actions = NULL;
951 flowcmd->actions_len = 0;
954 flowcmd->clear = a[ODP_FLOW_ATTR_CLEAR] != NULL;
956 flowcmd->state = a[ODP_FLOW_ATTR_STATE] ? nla_get_u64(a[ODP_FLOW_ATTR_STATE]) : 0;
965 static int new_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
967 struct tbl_node *flow_node;
968 struct dp_flowcmd flowcmd;
969 struct sw_flow *flow;
976 skb = copy_flow_from_user(uodp_flow, &flowcmd);
977 error = PTR_ERR(skb);
981 dp = get_dp(flowcmd.dp_idx);
986 hash = flow_hash(&flowcmd.key);
987 table = get_table_protected(dp);
988 flow_node = tbl_lookup(table, &flowcmd.key, hash, flow_cmp);
990 struct sw_flow_actions *acts;
992 /* Bail out if we're not allowed to create a new flow. */
994 if (cmd == ODP_FLOW_SET)
997 /* Expand table, if necessary, to make room. */
998 if (tbl_count(table) >= tbl_n_buckets(table)) {
999 error = expand_table(dp);
1002 table = get_table_protected(dp);
1005 /* Allocate flow. */
1006 flow = flow_alloc();
1008 error = PTR_ERR(flow);
1011 flow->key = flowcmd.key;
1014 /* Obtain actions. */
1015 acts = get_actions(&flowcmd);
1016 error = PTR_ERR(acts);
1018 goto error_free_flow;
1019 rcu_assign_pointer(flow->sf_acts, acts);
1021 error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1023 goto error_free_flow;
1025 /* Put flow in bucket. */
1026 error = tbl_insert(table, &flow->tbl_node, hash);
1028 goto error_free_flow;
1030 /* We found a matching flow. */
1031 struct sw_flow_actions *old_acts;
1033 /* Bail out if we're not allowed to modify an existing flow.
1034 * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
1035 * because Generic Netlink treats the latter as a dump
1036 * request. We also accept NLM_F_EXCL in case that bug ever
1040 if (flowcmd.nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
1041 goto error_kfree_skb;
1043 /* Update actions. */
1044 flow = flow_cast(flow_node);
1045 old_acts = rcu_dereference_protected(flow->sf_acts,
1046 lockdep_genl_is_held());
1047 if (flowcmd.actions &&
1048 (old_acts->actions_len != flowcmd.actions_len ||
1049 memcmp(old_acts->actions, flowcmd.actions,
1050 flowcmd.actions_len))) {
1051 struct sw_flow_actions *new_acts;
1053 new_acts = get_actions(&flowcmd);
1054 error = PTR_ERR(new_acts);
1055 if (IS_ERR(new_acts))
1056 goto error_kfree_skb;
1058 rcu_assign_pointer(flow->sf_acts, new_acts);
1059 flow_deferred_free_acts(old_acts);
1062 error = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1064 goto error_kfree_skb;
1067 if (flowcmd.clear) {
1068 spin_lock_bh(&flow->lock);
1070 spin_unlock_bh(&flow->lock);
1084 static int get_or_del_flow(unsigned int cmd, struct odp_flow __user *uodp_flow)
1086 struct tbl_node *flow_node;
1087 struct dp_flowcmd flowcmd;
1088 struct sw_flow *flow;
1089 struct sk_buff *skb;
1090 struct datapath *dp;
1094 skb = copy_flow_from_user(uodp_flow, &flowcmd);
1096 return PTR_ERR(skb);
1098 dp = get_dp(flowcmd.dp_idx);
1102 table = get_table_protected(dp);
1103 flow_node = tbl_lookup(table, &flowcmd.key, flow_hash(&flowcmd.key), flow_cmp);
1107 if (cmd == ODP_FLOW_DEL) {
1108 err = tbl_remove(table, flow_node);
1113 flow = flow_cast(flow_node);
1114 err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len, 0);
1115 if (!err && cmd == ODP_FLOW_DEL)
1116 flow_deferred_free(flow);
1121 static int dump_flow(struct odp_flow __user *uodp_flow)
1123 struct tbl_node *flow_node;
1124 struct dp_flowcmd flowcmd;
1125 struct sw_flow *flow;
1126 struct sk_buff *skb;
1127 struct datapath *dp;
1131 skb = copy_flow_from_user(uodp_flow, &flowcmd);
1136 dp = get_dp(flowcmd.dp_idx);
1139 goto exit_kfree_skb;
1141 bucket = flowcmd.state >> 32;
1142 obj = flowcmd.state;
1143 flow_node = tbl_next(get_table_protected(dp), &bucket, &obj);
1146 goto exit_kfree_skb;
1148 flow = flow_cast(flow_node);
1149 err = copy_flow_to_user(uodp_flow, dp, flow, flowcmd.total_len,
1150 ((u64)bucket << 32) | obj);
1158 static const struct nla_policy datapath_policy[ODP_DP_ATTR_MAX + 1] = {
1159 [ODP_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1160 [ODP_DP_ATTR_IPV4_FRAGS] = { .type = NLA_U32 },
1161 [ODP_DP_ATTR_SAMPLING] = { .type = NLA_U32 },
1164 /* Called with genl_mutex. */
1165 static int copy_datapath_to_user(void __user *dst, struct datapath *dp, uint32_t total_len)
1167 struct odp_datapath *odp_datapath;
1168 struct sk_buff *skb;
1172 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
1177 odp_datapath = (struct odp_datapath*)__skb_put(skb, sizeof(struct odp_datapath));
1178 odp_datapath->dp_idx = dp->dp_idx;
1179 odp_datapath->total_len = total_len;
1182 err = nla_put_string(skb, ODP_DP_ATTR_NAME, dp_name(dp));
1185 goto nla_put_failure;
1187 nla = nla_reserve(skb, ODP_DP_ATTR_STATS, sizeof(struct odp_stats));
1189 goto nla_put_failure;
1190 get_dp_stats(dp, nla_data(nla));
1192 NLA_PUT_U32(skb, ODP_DP_ATTR_IPV4_FRAGS,
1193 dp->drop_frags ? ODP_DP_FRAG_DROP : ODP_DP_FRAG_ZERO);
1195 if (dp->sflow_probability)
1196 NLA_PUT_U32(skb, ODP_DP_ATTR_SAMPLING, dp->sflow_probability);
1198 nla = nla_nest_start(skb, ODP_DP_ATTR_MCGROUPS);
1200 goto nla_put_failure;
1201 NLA_PUT_U32(skb, ODP_PACKET_CMD_MISS, packet_mc_group(dp, ODP_PACKET_CMD_MISS));
1202 NLA_PUT_U32(skb, ODP_PACKET_CMD_ACTION, packet_mc_group(dp, ODP_PACKET_CMD_ACTION));
1203 NLA_PUT_U32(skb, ODP_PACKET_CMD_SAMPLE, packet_mc_group(dp, ODP_PACKET_CMD_SAMPLE));
1204 nla_nest_end(skb, nla);
1206 if (skb->len > total_len)
1207 goto nla_put_failure;
1209 odp_datapath->len = skb->len;
1210 err = copy_to_user(dst, skb->data, skb->len) ? -EFAULT : 0;
1221 /* Called with genl_mutex. */
1222 static struct sk_buff *copy_datapath_from_user(struct odp_datapath __user *uodp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1224 struct odp_datapath *odp_datapath;
1225 struct sk_buff *skb;
1229 if (get_user(len, &uodp_datapath->len))
1230 return ERR_PTR(-EFAULT);
1231 if (len < sizeof(struct odp_datapath))
1232 return ERR_PTR(-EINVAL);
1234 skb = alloc_skb(len, GFP_KERNEL);
1236 return ERR_PTR(-ENOMEM);
1239 if (copy_from_user(__skb_put(skb, len), uodp_datapath, len))
1240 goto error_free_skb;
1242 odp_datapath = (struct odp_datapath *)skb->data;
1244 if (odp_datapath->len != len)
1245 goto error_free_skb;
1247 err = nla_parse(a, ODP_DP_ATTR_MAX,
1248 (struct nlattr *)(skb->data + sizeof(struct odp_datapath)),
1249 skb->len - sizeof(struct odp_datapath), datapath_policy);
1251 goto error_free_skb;
1253 if (a[ODP_DP_ATTR_IPV4_FRAGS]) {
1254 u32 frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]);
1257 if (frags != ODP_DP_FRAG_ZERO && frags != ODP_DP_FRAG_DROP)
1258 goto error_free_skb;
1261 err = VERIFY_NUL_STRING(a[ODP_DP_ATTR_NAME], IFNAMSIZ - 1);
1263 goto error_free_skb;
1269 return ERR_PTR(err);
1272 /* Called with genl_mutex and optionally with RTNL lock also. */
1273 static struct datapath *lookup_datapath(struct odp_datapath *odp_datapath, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1275 if (!a[ODP_DP_ATTR_NAME]) {
1276 struct datapath *dp = get_dp(odp_datapath->dp_idx);
1278 return ERR_PTR(-ENODEV);
1281 struct vport *vport;
1285 vport = vport_locate(nla_data(a[ODP_DP_ATTR_NAME]));
1286 dp_idx = vport && vport->port_no == ODPP_LOCAL ? vport->dp->dp_idx : -1;
1290 return ERR_PTR(-ENODEV);
1295 /* Called with genl_mutex. */
1296 static void change_datapath(struct datapath *dp, struct nlattr *a[ODP_DP_ATTR_MAX + 1])
1298 if (a[ODP_DP_ATTR_IPV4_FRAGS])
1299 dp->drop_frags = nla_get_u32(a[ODP_DP_ATTR_IPV4_FRAGS]) == ODP_DP_FRAG_DROP;
1300 if (a[ODP_DP_ATTR_SAMPLING])
1301 dp->sflow_probability = nla_get_u32(a[ODP_DP_ATTR_SAMPLING]);
1304 static int new_datapath(struct odp_datapath __user *uodp_datapath)
1306 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1307 struct odp_datapath *odp_datapath;
1308 struct vport_parms parms;
1309 struct sk_buff *skb;
1310 struct datapath *dp;
1311 struct vport *vport;
1315 skb = copy_datapath_from_user(uodp_datapath, a);
1319 odp_datapath = (struct odp_datapath *)skb->data;
1322 if (!a[ODP_DP_ATTR_NAME])
1327 if (!try_module_get(THIS_MODULE))
1328 goto err_unlock_rtnl;
1330 dp_idx = odp_datapath->dp_idx;
1333 for (dp_idx = 0; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1339 } else if (dp_idx < ARRAY_SIZE(dps))
1340 err = get_dp(dp_idx) ? -EBUSY : 0;
1344 goto err_put_module;
1347 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1349 goto err_put_module;
1350 INIT_LIST_HEAD(&dp->port_list);
1351 dp->dp_idx = dp_idx;
1353 /* Initialize kobject for bridge. This will be added as
1354 * /sys/class/net/<devname>/brif later, if sysfs is enabled. */
1355 dp->ifobj.kset = NULL;
1356 kobject_init(&dp->ifobj, &dp_ktype);
1358 /* Allocate table. */
1360 rcu_assign_pointer(dp->table, tbl_create(TBL_MIN_BUCKETS));
1364 /* Set up our datapath device. */
1365 parms.name = nla_data(a[ODP_DP_ATTR_NAME]);
1366 parms.type = ODP_VPORT_TYPE_INTERNAL;
1367 parms.options = NULL;
1369 parms.port_no = ODPP_LOCAL;
1370 vport = new_vport(&parms);
1371 if (IS_ERR(vport)) {
1372 err = PTR_ERR(vport);
1376 goto err_destroy_table;
1380 dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
1381 if (!dp->stats_percpu) {
1383 goto err_destroy_local_port;
1386 change_datapath(dp, a);
1388 rcu_assign_pointer(dps[dp_idx], dp);
1389 dp_sysfs_add_dp(dp);
1395 err_destroy_local_port:
1396 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1398 tbl_destroy(get_table_protected(dp), NULL);
1402 module_put(THIS_MODULE);
1411 static int del_datapath(struct odp_datapath __user *uodp_datapath)
1413 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1414 struct vport *vport, *next_vport;
1415 struct datapath *dp;
1416 struct sk_buff *skb;
1419 skb = copy_datapath_from_user(uodp_datapath, a);
1425 dp = lookup_datapath((struct odp_datapath *)skb->data, a);
1430 list_for_each_entry_safe (vport, next_vport, &dp->port_list, node)
1431 if (vport->port_no != ODPP_LOCAL)
1432 dp_detach_port(vport);
1434 dp_sysfs_del_dp(dp);
1435 rcu_assign_pointer(dps[dp->dp_idx], NULL);
1436 dp_detach_port(get_vport_protected(dp, ODPP_LOCAL));
1438 call_rcu(&dp->rcu, destroy_dp_rcu);
1439 module_put(THIS_MODULE);
1450 static int set_datapath(struct odp_datapath __user *uodp_datapath)
1452 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1453 struct datapath *dp;
1454 struct sk_buff *skb;
1457 skb = copy_datapath_from_user(uodp_datapath, a);
1462 dp = lookup_datapath((struct odp_datapath *)skb->data, a);
1467 change_datapath(dp, a);
1476 static int get_datapath(struct odp_datapath __user *uodp_datapath)
1478 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1479 struct odp_datapath *odp_datapath;
1480 struct datapath *dp;
1481 struct sk_buff *skb;
1484 skb = copy_datapath_from_user(uodp_datapath, a);
1488 odp_datapath = (struct odp_datapath *)skb->data;
1490 dp = lookup_datapath(odp_datapath, a);
1496 err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
1503 static int dump_datapath(struct odp_datapath __user *uodp_datapath)
1505 struct nlattr *a[ODP_DP_ATTR_MAX + 1];
1506 struct odp_datapath *odp_datapath;
1507 struct sk_buff *skb;
1511 skb = copy_datapath_from_user(uodp_datapath, a);
1515 odp_datapath = (struct odp_datapath *)skb->data;
1518 for (dp_idx = odp_datapath->dp_idx; dp_idx < ARRAY_SIZE(dps); dp_idx++) {
1519 struct datapath *dp = get_dp(dp_idx);
1523 err = copy_datapath_to_user(uodp_datapath, dp, odp_datapath->total_len);
1531 static const struct nla_policy vport_policy[ODP_VPORT_ATTR_MAX + 1] = {
1532 [ODP_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1533 [ODP_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1534 [ODP_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1535 [ODP_VPORT_ATTR_STATS] = { .len = sizeof(struct rtnl_link_stats64) },
1536 [ODP_VPORT_ATTR_ADDRESS] = { .len = ETH_ALEN },
1537 [ODP_VPORT_ATTR_MTU] = { .type = NLA_U32 },
1538 [ODP_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1541 /* Called with RCU read lock. */
1542 static struct sk_buff *odp_vport_build_info(struct vport *vport, uint32_t total_len)
1544 struct odp_vport *odp_vport;
1545 struct sk_buff *skb;
1547 int ifindex, iflink;
1550 skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
1555 odp_vport = (struct odp_vport*)__skb_put(skb, sizeof(struct odp_vport));
1556 odp_vport->dp_idx = vport->dp->dp_idx;
1557 odp_vport->total_len = total_len;
1559 NLA_PUT_U32(skb, ODP_VPORT_ATTR_PORT_NO, vport->port_no);
1560 NLA_PUT_U32(skb, ODP_VPORT_ATTR_TYPE, vport_get_type(vport));
1561 NLA_PUT_STRING(skb, ODP_VPORT_ATTR_NAME, vport_get_name(vport));
1563 nla = nla_reserve(skb, ODP_VPORT_ATTR_STATS, sizeof(struct rtnl_link_stats64));
1565 goto nla_put_failure;
1566 if (vport_get_stats(vport, nla_data(nla)))
1567 __skb_trim(skb, skb->len - nla->nla_len);
1569 NLA_PUT(skb, ODP_VPORT_ATTR_ADDRESS, ETH_ALEN, vport_get_addr(vport));
1571 NLA_PUT_U32(skb, ODP_VPORT_ATTR_MTU, vport_get_mtu(vport));
1573 err = vport_get_options(vport, skb);
1575 ifindex = vport_get_ifindex(vport);
1577 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFINDEX, ifindex);
1579 iflink = vport_get_iflink(vport);
1581 NLA_PUT_U32(skb, ODP_VPORT_ATTR_IFLINK, iflink);
1584 if (skb->len > total_len)
1587 odp_vport->len = skb->len;
1595 return ERR_PTR(err);
1598 static struct sk_buff *copy_vport_from_user(struct odp_vport __user *uodp_vport,
1599 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1601 struct odp_vport *odp_vport;
1602 struct sk_buff *skb;
1606 if (get_user(len, &uodp_vport->len))
1607 return ERR_PTR(-EFAULT);
1608 if (len < sizeof(struct odp_vport))
1609 return ERR_PTR(-EINVAL);
1611 skb = alloc_skb(len, GFP_KERNEL);
1613 return ERR_PTR(-ENOMEM);
1616 if (copy_from_user(__skb_put(skb, len), uodp_vport, len))
1617 goto error_free_skb;
1619 odp_vport = (struct odp_vport *)skb->data;
1621 if (odp_vport->len != len)
1622 goto error_free_skb;
1624 err = nla_parse(a, ODP_VPORT_ATTR_MAX, (struct nlattr *)(skb->data + sizeof(struct odp_vport)),
1625 skb->len - sizeof(struct odp_vport), vport_policy);
1627 goto error_free_skb;
1629 err = VERIFY_NUL_STRING(a[ODP_VPORT_ATTR_NAME], IFNAMSIZ - 1);
1631 goto error_free_skb;
1637 return ERR_PTR(err);
1640 /* Called with RTNL lock or RCU read lock. */
1641 static struct vport *lookup_vport(struct odp_vport *odp_vport,
1642 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1644 struct datapath *dp;
1645 struct vport *vport;
1647 if (a[ODP_VPORT_ATTR_NAME]) {
1648 vport = vport_locate(nla_data(a[ODP_VPORT_ATTR_NAME]));
1650 return ERR_PTR(-ENODEV);
1652 } else if (a[ODP_VPORT_ATTR_PORT_NO]) {
1653 u32 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1655 if (port_no >= DP_MAX_PORTS)
1656 return ERR_PTR(-EINVAL);
1658 dp = get_dp(odp_vport->dp_idx);
1660 return ERR_PTR(-ENODEV);
1662 vport = get_vport_protected(dp, port_no);
1664 return ERR_PTR(-ENOENT);
1667 return ERR_PTR(-EINVAL);
1670 /* Called with RTNL lock. */
1671 static int change_vport(struct vport *vport, struct nlattr *a[ODP_VPORT_ATTR_MAX + 1])
1674 if (a[ODP_VPORT_ATTR_STATS])
1675 err = vport_set_stats(vport, nla_data(a[ODP_VPORT_ATTR_STATS]));
1676 if (!err && a[ODP_VPORT_ATTR_ADDRESS])
1677 err = vport_set_addr(vport, nla_data(a[ODP_VPORT_ATTR_ADDRESS]));
1678 if (!err && a[ODP_VPORT_ATTR_MTU])
1679 err = vport_set_mtu(vport, nla_get_u32(a[ODP_VPORT_ATTR_MTU]));
1683 static int attach_vport(struct odp_vport __user *uodp_vport)
1685 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1686 struct odp_vport *odp_vport;
1687 struct vport_parms parms;
1688 struct sk_buff *reply;
1689 struct vport *vport;
1690 struct sk_buff *skb;
1691 struct datapath *dp;
1695 skb = copy_vport_from_user(uodp_vport, a);
1699 odp_vport = (struct odp_vport *)skb->data;
1702 if (!a[ODP_VPORT_ATTR_NAME] || !a[ODP_VPORT_ATTR_TYPE])
1703 goto exit_kfree_skb;
1706 dp = get_dp(odp_vport->dp_idx);
1711 if (a[ODP_VPORT_ATTR_PORT_NO]) {
1712 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1715 if (port_no >= DP_MAX_PORTS)
1718 vport = get_vport_protected(dp, port_no);
1723 for (port_no = 1; ; port_no++) {
1724 if (port_no >= DP_MAX_PORTS) {
1728 vport = get_vport_protected(dp, port_no);
1734 parms.name = nla_data(a[ODP_VPORT_ATTR_NAME]);
1735 parms.type = nla_get_u32(a[ODP_VPORT_ATTR_TYPE]);
1736 parms.options = a[ODP_VPORT_ATTR_OPTIONS];
1738 parms.port_no = port_no;
1740 vport = new_vport(&parms);
1741 err = PTR_ERR(vport);
1745 set_internal_devs_mtu(dp);
1746 dp_sysfs_add_if(vport);
1748 err = change_vport(vport, a);
1750 dp_detach_port(vport);
1754 reply = odp_vport_build_info(vport, odp_vport->total_len);
1755 err = PTR_ERR(reply);
1759 err = copy_to_user(uodp_vport, reply->data, reply->len) ? -EFAULT : 0;
1770 static int set_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1772 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1773 struct vport *vport;
1774 struct sk_buff *skb;
1777 skb = copy_vport_from_user(uodp_vport, a);
1783 vport = lookup_vport((struct odp_vport *)skb->data, a);
1784 err = PTR_ERR(vport);
1789 if (a[ODP_VPORT_ATTR_OPTIONS])
1790 err = vport_set_options(vport, a[ODP_VPORT_ATTR_OPTIONS]);
1792 err = change_vport(vport, a);
1801 static int del_vport(unsigned int cmd, struct odp_vport __user *uodp_vport)
1803 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1804 struct vport *vport;
1805 struct sk_buff *skb;
1808 skb = copy_vport_from_user(uodp_vport, a);
1814 vport = lookup_vport((struct odp_vport *)skb->data, a);
1815 err = PTR_ERR(vport);
1817 err = dp_detach_port(vport);
1825 static int get_vport(struct odp_vport __user *uodp_vport)
1827 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1828 struct odp_vport *odp_vport;
1829 struct sk_buff *reply;
1830 struct vport *vport;
1831 struct sk_buff *skb;
1834 skb = copy_vport_from_user(uodp_vport, a);
1838 odp_vport = (struct odp_vport *)skb->data;
1841 vport = lookup_vport(odp_vport, a);
1842 err = PTR_ERR(vport);
1844 goto err_unlock_rcu;
1845 reply = odp_vport_build_info(vport, odp_vport->total_len);
1848 err = PTR_ERR(reply);
1852 err = copy_to_user(uodp_vport, reply->data, reply->len) ? -EFAULT : 0;
1866 static int dump_vport(struct odp_vport __user *uodp_vport)
1868 struct nlattr *a[ODP_VPORT_ATTR_MAX + 1];
1869 struct odp_vport *odp_vport;
1870 struct sk_buff *skb;
1871 struct datapath *dp;
1875 skb = copy_vport_from_user(uodp_vport, a);
1879 odp_vport = (struct odp_vport *)skb->data;
1881 dp = get_dp(odp_vport->dp_idx);
1887 if (a[ODP_VPORT_ATTR_PORT_NO])
1888 port_no = nla_get_u32(a[ODP_VPORT_ATTR_PORT_NO]);
1891 for (; port_no < DP_MAX_PORTS; port_no++) {
1892 struct sk_buff *skb_out;
1893 struct vport *vport;
1896 vport = get_vport_protected(dp, port_no);
1900 skb_out = odp_vport_build_info(vport, odp_vport->total_len);
1903 err = PTR_ERR(skb_out);
1904 if (IS_ERR(skb_out))
1907 retval = copy_to_user(uodp_vport, skb_out->data, skb_out->len);
1911 return retval ? -EFAULT : 0;
1922 static long openvswitch_ioctl(struct file *f, unsigned int cmd,
1930 err = new_datapath((struct odp_datapath __user *)argp);
1934 err = get_datapath((struct odp_datapath __user *)argp);
1938 err = del_datapath((struct odp_datapath __user *)argp);
1942 err = set_datapath((struct odp_datapath __user *)argp);
1946 err = dump_datapath((struct odp_datapath __user *)argp);
1950 err = attach_vport((struct odp_vport __user *)argp);
1954 err = get_vport((struct odp_vport __user *)argp);
1958 err = del_vport(cmd, (struct odp_vport __user *)argp);
1962 err = set_vport(cmd, (struct odp_vport __user *)argp);
1965 case ODP_VPORT_DUMP:
1966 err = dump_vport((struct odp_vport __user *)argp);
1969 case ODP_FLOW_FLUSH:
1970 err = flush_flows(argp);
1975 err = new_flow(cmd, (struct odp_flow __user *)argp);
1980 err = get_or_del_flow(cmd, (struct odp_flow __user *)argp);
1984 err = dump_flow((struct odp_flow __user *)argp);
1996 #ifdef CONFIG_COMPAT
1997 static long openvswitch_compat_ioctl(struct file *f, unsigned int cmd, unsigned long argp)
2000 case ODP_FLOW_FLUSH:
2001 /* Ioctls that don't need any translation at all. */
2002 return openvswitch_ioctl(f, cmd, argp);
2013 case ODP_VPORT_DUMP:
2019 /* Ioctls that just need their pointer argument extended. */
2020 return openvswitch_ioctl(f, cmd, (unsigned long)compat_ptr(argp));
2023 return -ENOIOCTLCMD;
2028 static struct file_operations openvswitch_fops = {
2029 .owner = THIS_MODULE,
2030 .unlocked_ioctl = openvswitch_ioctl,
2031 #ifdef CONFIG_COMPAT
2032 .compat_ioctl = openvswitch_compat_ioctl,
2038 struct genl_family_and_ops {
2039 struct genl_family *family;
2040 struct genl_ops *ops;
2042 struct genl_multicast_group *group;
2045 static const struct genl_family_and_ops dp_genl_families[] = {
2046 { &dp_packet_genl_family,
2047 dp_packet_genl_ops, ARRAY_SIZE(dp_packet_genl_ops),
2051 static void dp_unregister_genl(int n_families)
2055 for (i = 0; i < n_families; i++) {
2056 genl_unregister_family(dp_genl_families[i].family);
2060 static int dp_register_genl(void)
2067 for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2068 const struct genl_family_and_ops *f = &dp_genl_families[i];
2070 err = genl_register_family_with_ops(f->family, f->ops,
2077 err = genl_register_mc_group(f->family, f->group);
2083 err = packet_register_mc_groups();
2089 dp_unregister_genl(n_registered);
2093 static int __init dp_init(void)
2095 struct sk_buff *dummy_skb;
2098 BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof(dummy_skb->cb));
2100 printk("Open vSwitch %s, built "__DATE__" "__TIME__"\n", VERSION BUILDNR);
2108 goto error_flow_exit;
2110 err = register_netdevice_notifier(&dp_device_notifier);
2112 goto error_vport_exit;
2114 major = register_chrdev(0, "openvswitch", &openvswitch_fops);
2116 goto error_unreg_notifier;
2118 err = dp_register_genl();
2120 goto error_unreg_chrdev;
2125 unregister_chrdev(major, "openvswitch");
2126 error_unreg_notifier:
2127 unregister_netdevice_notifier(&dp_device_notifier);
2136 static void dp_cleanup(void)
2139 dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2140 unregister_chrdev(major, "openvswitch");
2141 unregister_netdevice_notifier(&dp_device_notifier);
2146 module_init(dp_init);
2147 module_exit(dp_cleanup);
2149 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2150 MODULE_LICENSE("GPL");