2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/dcache.h>
10 #include <linux/etherdevice.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/percpu.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/compat.h>
20 #include "vport-internal_dev.h"
22 /* List of statically compiled vport implementations. Don't forget to also
23 * add yours to the list at the bottom of vport.h. */
24 static struct vport_ops *base_vport_ops_list[] = {
31 static const struct vport_ops **vport_ops_list;
32 static int n_vport_types;
34 static struct hlist_head *dev_table;
35 #define VPORT_HASH_BUCKETS 1024
37 /* We limit the number of times that we pass through vport_send() to
38 * avoid blowing out the stack in the event that we have a loop. There is
39 * a separate counter for each CPU for both interrupt and non-interrupt
40 * context in order to keep the limit deterministic for a given packet. */
41 struct percpu_loop_counter {
45 static struct percpu_loop_counter *vport_loop_counter;
46 #define VPORT_MAX_LOOPS 5
48 /* Both RTNL lock and vport_mutex need to be held when updating dev_table.
50 * If you use vport_locate and then perform some operations, you need to hold
51 * one of these locks if you don't want the vport to be deleted out from under
54 * If you get a reference to a vport through a dp_port, it is protected
55 * by RCU and you need to hold rcu_read_lock instead when reading.
57 * If multiple locks are taken, the hierarchy is:
62 static DEFINE_MUTEX(vport_mutex);
65 * vport_lock - acquire vport lock
67 * Acquire global vport lock. See above comment about locking requirements
68 * and specific function definitions. May sleep.
73 mutex_lock(&vport_mutex);
77 * vport_unlock - release vport lock
79 * Release lock acquired with vport_lock.
84 mutex_unlock(&vport_mutex);
87 #define ASSERT_VPORT() do { \
88 if (unlikely(!mutex_is_locked(&vport_mutex))) { \
89 printk(KERN_ERR "openvswitch: vport lock not held at %s (%d)\n", \
90 __FILE__, __LINE__); \
96 * vport_init - initialize vport subsystem
98 * Called at module load time to initialize the vport subsystem and any
99 * compiled in vport types.
107 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
114 vport_ops_list = kmalloc(ARRAY_SIZE(base_vport_ops_list) *
115 sizeof(struct vport_ops *), GFP_KERNEL);
116 if (!vport_ops_list) {
118 goto error_dev_table;
121 vport_loop_counter = alloc_percpu(struct percpu_loop_counter);
122 if (!vport_loop_counter) {
127 for (i = 0; i < ARRAY_SIZE(base_vport_ops_list); i++) {
128 struct vport_ops *new_ops = base_vport_ops_list[i];
131 err = new_ops->init();
136 vport_ops_list[n_vport_types++] = new_ops;
137 else if (new_ops->flags & VPORT_F_REQUIRED) {
146 kfree(vport_ops_list);
161 for (i = 0; i < VPORT_HASH_BUCKETS; i++) {
162 struct hlist_head *bucket = &dev_table[i];
164 struct hlist_node *node, *next;
166 hlist_for_each_entry_safe(vport, node, next, bucket, hash_node)
175 * vport_exit - shutdown vport subsystem
177 * Called at module exit time to shutdown the vport subsystem and any
178 * initialized vport types.
187 for (i = 0; i < n_vport_types; i++) {
188 if (vport_ops_list[i]->exit)
189 vport_ops_list[i]->exit();
192 free_percpu(vport_loop_counter);
193 kfree(vport_ops_list);
198 do_vport_add(struct odp_vport_add *vport_config)
203 vport_config->port_type[VPORT_TYPE_SIZE - 1] = '\0';
204 vport_config->devname[IFNAMSIZ - 1] = '\0';
208 vport = vport_locate(vport_config->devname);
215 vport = vport_add(vport_config->devname, vport_config->port_type,
216 vport_config->config);
220 err = PTR_ERR(vport);
228 * vport_user_add - add vport device (for userspace callers)
230 * @uvport_config: New port configuration.
232 * Creates a new vport with the specified configuration (which is dependent
233 * on device type). This function is for userspace callers and assumes no
237 vport_user_add(const struct odp_vport_add __user *uvport_config)
239 struct odp_vport_add vport_config;
241 if (copy_from_user(&vport_config, uvport_config, sizeof(struct odp_vport_add)))
244 return do_vport_add(&vport_config);
249 compat_vport_user_add(struct compat_odp_vport_add *ucompat)
251 struct compat_odp_vport_add compat;
252 struct odp_vport_add vport_config;
254 if (copy_from_user(&compat, ucompat, sizeof(struct compat_odp_vport_add)))
257 memcpy(vport_config.port_type, compat.port_type, VPORT_TYPE_SIZE);
258 memcpy(vport_config.devname, compat.devname, IFNAMSIZ);
259 vport_config.config = compat_ptr(compat.config);
261 return do_vport_add(&vport_config);
266 do_vport_mod(struct odp_vport_mod *vport_config)
271 vport_config->devname[IFNAMSIZ - 1] = '\0';
275 vport = vport_locate(vport_config->devname);
282 err = vport_mod(vport, vport_config->config);
291 * vport_user_mod - modify existing vport device (for userspace callers)
293 * @uvport_config: New configuration for vport
295 * Modifies an existing device with the specified configuration (which is
296 * dependent on device type). This function is for userspace callers and
297 * assumes no locks are held.
300 vport_user_mod(const struct odp_vport_mod __user *uvport_config)
302 struct odp_vport_mod vport_config;
304 if (copy_from_user(&vport_config, uvport_config, sizeof(struct odp_vport_mod)))
307 return do_vport_mod(&vport_config);
312 compat_vport_user_mod(struct compat_odp_vport_mod *ucompat)
314 struct compat_odp_vport_mod compat;
315 struct odp_vport_mod vport_config;
317 if (copy_from_user(&compat, ucompat, sizeof(struct compat_odp_vport_mod)))
320 memcpy(vport_config.devname, compat.devname, IFNAMSIZ);
321 vport_config.config = compat_ptr(compat.config);
323 return do_vport_mod(&vport_config);
328 * vport_user_del - delete existing vport device (for userspace callers)
330 * @udevname: Name of device to delete
332 * Deletes the specified device. Detaches the device from a datapath first
333 * if it is attached. Deleting the device will fail if it does not exist or it
334 * is the datapath local port. It is also possible to fail for less obvious
335 * reasons, such as lack of memory. This function is for userspace callers and
336 * assumes no locks are held.
339 vport_user_del(const char __user *udevname)
341 char devname[IFNAMSIZ];
343 struct dp_port *dp_port;
347 retval = strncpy_from_user(devname, udevname, IFNAMSIZ);
350 else if (retval >= IFNAMSIZ)
351 return -ENAMETOOLONG;
355 vport = vport_locate(devname);
361 dp_port = vport_get_dp_port(vport);
363 struct datapath *dp = dp_port->dp;
365 mutex_lock(&dp->mutex);
367 if (!strcmp(dp_name(dp), devname)) {
372 err = dp_detach_port(dp_port, 0);
375 mutex_unlock(&dp->mutex);
382 err = vport_del(vport);
391 * vport_user_stats_get - retrieve device stats (for userspace callers)
393 * @ustats_req: Stats request parameters.
395 * Retrieves transmit, receive, and error stats for the given device. This
396 * function is for userspace callers and assumes no locks are held.
399 vport_user_stats_get(struct odp_vport_stats_req __user *ustats_req)
401 struct odp_vport_stats_req stats_req;
405 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
408 stats_req.devname[IFNAMSIZ - 1] = '\0';
412 vport = vport_locate(stats_req.devname);
418 err = vport_get_stats(vport, &stats_req.stats);
424 if (copy_to_user(ustats_req, &stats_req, sizeof(struct odp_vport_stats_req)))
431 * vport_user_stats_set - sets offset device stats (for userspace callers)
433 * @ustats_req: Stats set parameters.
435 * Provides a set of transmit, receive, and error stats to be added as an
436 * offset to the collect data when stats are retreived. Some devices may not
437 * support setting the stats, in which case the result will always be
438 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
442 vport_user_stats_set(struct odp_vport_stats_req __user *ustats_req)
444 struct odp_vport_stats_req stats_req;
448 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
451 stats_req.devname[IFNAMSIZ - 1] = '\0';
456 vport = vport_locate(stats_req.devname);
462 err = vport_set_stats(vport, &stats_req.stats);
472 * vport_user_ether_get - retrieve device Ethernet address (for userspace callers)
474 * @uvport_ether: Ethernet address request parameters.
476 * Retrieves the Ethernet address of the given device. This function is for
477 * userspace callers and assumes no locks are held.
480 vport_user_ether_get(struct odp_vport_ether __user *uvport_ether)
482 struct odp_vport_ether vport_ether;
486 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
489 vport_ether.devname[IFNAMSIZ - 1] = '\0';
493 vport = vport_locate(vport_ether.devname);
500 memcpy(vport_ether.ether_addr, vport_get_addr(vport), ETH_ALEN);
507 if (copy_to_user(uvport_ether, &vport_ether, sizeof(struct odp_vport_ether)))
514 * vport_user_ether_set - set device Ethernet address (for userspace callers)
516 * @uvport_ether: Ethernet address request parameters.
518 * Sets the Ethernet address of the given device. Some devices may not support
519 * setting the Ethernet address, in which case the result will always be
520 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
524 vport_user_ether_set(struct odp_vport_ether __user *uvport_ether)
526 struct odp_vport_ether vport_ether;
530 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
533 vport_ether.devname[IFNAMSIZ - 1] = '\0';
538 vport = vport_locate(vport_ether.devname);
544 err = vport_set_addr(vport, vport_ether.ether_addr);
553 * vport_user_mtu_get - retrieve device MTU (for userspace callers)
555 * @uvport_mtu: MTU request parameters.
557 * Retrieves the MTU of the given device. This function is for userspace
558 * callers and assumes no locks are held.
561 vport_user_mtu_get(struct odp_vport_mtu __user *uvport_mtu)
563 struct odp_vport_mtu vport_mtu;
567 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
570 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
574 vport = vport_locate(vport_mtu.devname);
580 vport_mtu.mtu = vport_get_mtu(vport);
586 if (copy_to_user(uvport_mtu, &vport_mtu, sizeof(struct odp_vport_mtu)))
593 * vport_user_mtu_set - set device MTU (for userspace callers)
595 * @uvport_mtu: MTU request parameters.
597 * Sets the MTU of the given device. Some devices may not support setting the
598 * MTU, in which case the result will always be -EOPNOTSUPP. This function is
599 * for userspace callers and assumes no locks are held.
602 vport_user_mtu_set(struct odp_vport_mtu __user *uvport_mtu)
604 struct odp_vport_mtu vport_mtu;
608 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
611 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
616 vport = vport_locate(vport_mtu.devname);
622 err = vport_set_mtu(vport, vport_mtu.mtu);
630 static struct hlist_head *
631 hash_bucket(const char *name)
633 unsigned int hash = full_name_hash(name, strlen(name));
634 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
638 * vport_locate - find a port that has already been created
640 * @name: name of port to find
642 * Either RTNL or vport lock must be acquired before calling this function
643 * and held while using the found port. See the locking comments at the
647 vport_locate(const char *name)
649 struct hlist_head *bucket = hash_bucket(name);
651 struct hlist_node *node;
653 if (unlikely(!mutex_is_locked(&vport_mutex) && !rtnl_is_locked())) {
654 printk(KERN_ERR "openvswitch: neither RTNL nor vport lock held in vport_locate\n");
660 hlist_for_each_entry(vport, node, bucket, hash_node)
661 if (!strcmp(name, vport_get_name(vport)))
672 register_vport(struct vport *vport)
674 hlist_add_head(&vport->hash_node, hash_bucket(vport_get_name(vport)));
678 unregister_vport(struct vport *vport)
680 hlist_del(&vport->hash_node);
684 * vport_alloc - allocate and initialize new vport
686 * @priv_size: Size of private data area to allocate.
687 * @ops: vport device ops
689 * Allocate and initialize a new vport defined by @ops. The vport will contain
690 * a private data area of size @priv_size that can be accessed using
691 * vport_priv(). vports that are no longer needed should be released with
695 vport_alloc(int priv_size, const struct vport_ops *ops)
700 alloc_size = sizeof(struct vport);
702 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
703 alloc_size += priv_size;
706 vport = kzalloc(alloc_size, GFP_KERNEL);
708 return ERR_PTR(-ENOMEM);
712 if (vport->ops->flags & VPORT_F_GEN_STATS) {
713 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
714 if (!vport->percpu_stats)
715 return ERR_PTR(-ENOMEM);
717 spin_lock_init(&vport->stats_lock);
724 * vport_free - uninitialize and free vport
726 * @vport: vport to free
728 * Frees a vport allocated with vport_alloc() when it is no longer needed.
731 vport_free(struct vport *vport)
733 if (vport->ops->flags & VPORT_F_GEN_STATS)
734 free_percpu(vport->percpu_stats);
740 * vport_add - add vport device (for kernel callers)
742 * @name: Name of new device.
743 * @type: Type of new device (to be matched against types in registered vport
745 * @config: Device type specific configuration. Userspace pointer.
747 * Creates a new vport with the specified configuration (which is dependent
748 * on device type). Both RTNL and vport locks must be held.
751 vport_add(const char *name, const char *type, const void __user *config)
760 for (i = 0; i < n_vport_types; i++) {
761 if (!strcmp(vport_ops_list[i]->type, type)) {
762 vport = vport_ops_list[i]->create(name, config);
764 err = PTR_ERR(vport);
768 register_vport(vport);
780 * vport_mod - modify existing vport device (for kernel callers)
782 * @vport: vport to modify.
783 * @config: Device type specific configuration. Userspace pointer.
785 * Modifies an existing device with the specified configuration (which is
786 * dependent on device type). Both RTNL and vport locks must be held.
789 vport_mod(struct vport *vport, const void __user *config)
794 if (vport->ops->modify)
795 return vport->ops->modify(vport, config);
801 * vport_del - delete existing vport device (for kernel callers)
803 * @vport: vport to delete.
805 * Deletes the specified device. The device must not be currently attached to
806 * a datapath. It is possible to fail for reasons such as lack of memory.
807 * Both RTNL and vport locks must be held.
810 vport_del(struct vport *vport)
814 BUG_ON(vport_get_dp_port(vport));
816 unregister_vport(vport);
818 return vport->ops->destroy(vport);
822 * vport_attach - attach a vport to a datapath
824 * @vport: vport to attach.
825 * @dp_port: Datapath port to attach the vport to.
827 * Attaches a vport to a specific datapath so that packets may be exchanged.
828 * Both ports must be currently unattached. @dp_port must be successfully
829 * attached to a vport before it is connected to a datapath and must not be
830 * modified while connected. RTNL lock and the appropriate DP mutex must be held.
833 vport_attach(struct vport *vport, struct dp_port *dp_port)
840 if (vport_get_dp_port(vport))
843 if (vport->ops->attach) {
846 err = vport->ops->attach(vport);
851 dp_port->vport = vport;
852 rcu_assign_pointer(vport->dp_port, dp_port);
858 * vport_detach - detach a vport from a datapath
860 * @vport: vport to detach.
862 * Detaches a vport from a datapath. May fail for a variety of reasons,
863 * including lack of memory. RTNL lock and the appropriate DP mutex must be held.
866 vport_detach(struct vport *vport)
868 struct dp_port *dp_port;
872 dp_port = vport_get_dp_port(vport);
876 dp_port->vport = NULL;
877 rcu_assign_pointer(vport->dp_port, NULL);
879 if (vport->ops->detach)
880 return vport->ops->detach(vport);
886 * vport_set_mtu - set device MTU (for kernel callers)
888 * @vport: vport on which to set MTU.
891 * Sets the MTU of the given device. Some devices may not support setting the
892 * MTU, in which case the result will always be -EOPNOTSUPP. RTNL lock must
896 vport_set_mtu(struct vport *vport, int mtu)
903 if (vport->ops->set_mtu) {
906 ret = vport->ops->set_mtu(vport, mtu);
908 if (!ret && !is_internal_vport(vport)) {
909 struct dp_port *dp_port = vport_get_dp_port(vport);
912 set_internal_devs_mtu(dp_port->dp);
921 * vport_set_addr - set device Ethernet address (for kernel callers)
923 * @vport: vport on which to set Ethernet address.
924 * @addr: New address.
926 * Sets the Ethernet address of the given device. Some devices may not support
927 * setting the Ethernet address, in which case the result will always be
928 * -EOPNOTSUPP. RTNL lock must be held.
931 vport_set_addr(struct vport *vport, const unsigned char *addr)
935 if (!is_valid_ether_addr(addr))
936 return -EADDRNOTAVAIL;
938 if (vport->ops->set_addr)
939 return vport->ops->set_addr(vport, addr);
945 * vport_set_stats - sets offset device stats (for kernel callers)
947 * @vport: vport on which to set stats
948 * @stats: stats to set
950 * Provides a set of transmit, receive, and error stats to be added as an
951 * offset to the collect data when stats are retreived. Some devices may not
952 * support setting the stats, in which case the result will always be
953 * -EOPNOTSUPP. RTNL lock must be held.
956 vport_set_stats(struct vport *vport, struct odp_vport_stats *stats)
960 if (vport->ops->flags & VPORT_F_GEN_STATS) {
961 spin_lock_bh(&vport->stats_lock);
962 memcpy(&vport->offset_stats, stats, sizeof(struct odp_vport_stats));
963 spin_unlock_bh(&vport->stats_lock);
966 } else if (vport->ops->set_stats)
967 return vport->ops->set_stats(vport, stats);
973 * vport_get_name - retrieve device name
975 * @vport: vport from which to retrieve the name.
977 * Retrieves the name of the given device. Either RTNL lock or rcu_read_lock
978 * must be held for the entire duration that the name is in use.
981 vport_get_name(const struct vport *vport)
983 return vport->ops->get_name(vport);
987 * vport_get_type - retrieve device type
989 * @vport: vport from which to retrieve the type.
991 * Retrieves the type of the given device. Either RTNL lock or rcu_read_lock
992 * must be held for the entire duration that the type is in use.
995 vport_get_type(const struct vport *vport)
997 return vport->ops->type;
1001 * vport_get_addr - retrieve device Ethernet address (for kernel callers)
1003 * @vport: vport from which to retrieve the Ethernet address.
1005 * Retrieves the Ethernet address of the given device. Either RTNL lock or
1006 * rcu_read_lock must be held for the entire duration that the Ethernet address
1009 const unsigned char *
1010 vport_get_addr(const struct vport *vport)
1012 return vport->ops->get_addr(vport);
1016 * vport_get_dp_port - retrieve attached datapath port
1018 * @vport: vport from which to retrieve the datapath port.
1020 * Retrieves the attached datapath port or null if not attached. Either RTNL
1021 * lock or rcu_read_lock must be held for the entire duration that the datapath
1022 * port is being accessed.
1025 vport_get_dp_port(const struct vport *vport)
1027 return rcu_dereference(vport->dp_port);
1031 * vport_get_kobj - retrieve associated kobj
1033 * @vport: vport from which to retrieve the associated kobj
1035 * Retrieves the associated kobj or null if no kobj. The returned kobj is
1036 * valid for as long as the vport exists.
1039 vport_get_kobj(const struct vport *vport)
1041 if (vport->ops->get_kobj)
1042 return vport->ops->get_kobj(vport);
1048 * vport_get_stats - retrieve device stats (for kernel callers)
1050 * @vport: vport from which to retrieve the stats
1051 * @stats: location to store stats
1053 * Retrieves transmit, receive, and error stats for the given device.
1056 vport_get_stats(struct vport *vport, struct odp_vport_stats *stats)
1058 struct odp_vport_stats dev_stats;
1059 struct odp_vport_stats *dev_statsp = NULL;
1062 if (vport->ops->get_stats) {
1063 if (vport->ops->flags & VPORT_F_GEN_STATS)
1064 dev_statsp = &dev_stats;
1069 err = vport->ops->get_stats(vport, dev_statsp);
1076 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1079 /* We potentially have 3 sources of stats that need to be
1080 * combined: those we have collected (split into err_stats and
1081 * percpu_stats), offset_stats from set_stats(), and device
1082 * error stats from get_stats() (for errors that happen
1083 * downstream and therefore aren't reported through our
1084 * vport_record_error() function). */
1086 spin_lock_bh(&vport->stats_lock);
1088 memcpy(stats, &vport->offset_stats, sizeof(struct odp_vport_stats));
1090 stats->rx_errors += vport->err_stats.rx_errors
1091 + vport->err_stats.rx_frame_err
1092 + vport->err_stats.rx_over_err
1093 + vport->err_stats.rx_crc_err;
1094 stats->tx_errors += vport->err_stats.tx_errors;
1095 stats->tx_dropped += vport->err_stats.tx_dropped;
1096 stats->rx_dropped += vport->err_stats.rx_dropped;
1097 stats->rx_over_err += vport->err_stats.rx_over_err;
1098 stats->rx_crc_err += vport->err_stats.rx_crc_err;
1099 stats->rx_frame_err += vport->err_stats.rx_frame_err;
1100 stats->collisions += vport->err_stats.collisions;
1102 spin_unlock_bh(&vport->stats_lock);
1105 stats->rx_errors += dev_statsp->rx_errors;
1106 stats->tx_errors += dev_statsp->tx_errors;
1107 stats->rx_dropped += dev_statsp->rx_dropped;
1108 stats->tx_dropped += dev_statsp->tx_dropped;
1109 stats->rx_over_err += dev_statsp->rx_over_err;
1110 stats->rx_crc_err += dev_statsp->rx_crc_err;
1111 stats->rx_frame_err += dev_statsp->rx_frame_err;
1112 stats->collisions += dev_statsp->collisions;
1115 for_each_possible_cpu(i) {
1116 const struct vport_percpu_stats *percpu_stats;
1118 percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
1119 stats->rx_bytes += percpu_stats->rx_bytes;
1120 stats->rx_packets += percpu_stats->rx_packets;
1121 stats->tx_bytes += percpu_stats->tx_bytes;
1122 stats->tx_packets += percpu_stats->tx_packets;
1134 * vport_get_flags - retrieve device flags
1136 * @vport: vport from which to retrieve the flags
1138 * Retrieves the flags of the given device. Either RTNL lock or rcu_read_lock
1142 vport_get_flags(const struct vport *vport)
1144 return vport->ops->get_dev_flags(vport);
1148 * vport_get_flags - check whether device is running
1150 * @vport: vport on which to check status.
1152 * Checks whether the given device is running. Either RTNL lock or
1153 * rcu_read_lock must be held.
1156 vport_is_running(const struct vport *vport)
1158 return vport->ops->is_running(vport);
1162 * vport_get_flags - retrieve device operating state
1164 * @vport: vport from which to check status
1166 * Retrieves the RFC2863 operstate of the given device. Either RTNL lock or
1167 * rcu_read_lock must be held.
1170 vport_get_operstate(const struct vport *vport)
1172 return vport->ops->get_operstate(vport);
1176 * vport_get_ifindex - retrieve device system interface index
1178 * @vport: vport from which to retrieve index
1180 * Retrieves the system interface index of the given device. Not all devices
1181 * will have system indexes, in which case the index of the datapath local
1182 * port is returned. Returns a negative index on error. Either RTNL lock or
1183 * rcu_read_lock must be held.
1186 vport_get_ifindex(const struct vport *vport)
1188 const struct dp_port *dp_port;
1190 if (vport->ops->get_ifindex)
1191 return vport->ops->get_ifindex(vport);
1193 /* If we don't actually have an ifindex, use the local port's.
1194 * Userspace doesn't check it anyways. */
1195 dp_port = vport_get_dp_port(vport);
1199 return vport_get_ifindex(dp_port->dp->ports[ODPP_LOCAL]->vport);
1203 * vport_get_iflink - retrieve device system link index
1205 * @vport: vport from which to retrieve index
1207 * Retrieves the system link index of the given device. The link is the index
1208 * of the interface on which the packet will actually be sent. In most cases
1209 * this is the same as the ifindex but may be different for tunnel devices.
1210 * Returns a negative index on error. Either RTNL lock or rcu_read_lock must
1214 vport_get_iflink(const struct vport *vport)
1216 if (vport->ops->get_iflink)
1217 return vport->ops->get_iflink(vport);
1219 /* If we don't have an iflink, use the ifindex. In most cases they
1221 return vport_get_ifindex(vport);
1225 * vport_get_mtu - retrieve device MTU (for kernel callers)
1227 * @vport: vport from which to retrieve MTU
1229 * Retrieves the MTU of the given device. Either RTNL lock or rcu_read_lock
1233 vport_get_mtu(const struct vport *vport)
1235 return vport->ops->get_mtu(vport);
1239 * vport_receive - pass up received packet to the datapath for processing
1241 * @vport: vport that received the packet
1242 * @skb: skb that was received
1244 * Must be called with rcu_read_lock. The packet cannot be shared and
1245 * skb->data should point to the Ethernet header. The caller must have already
1246 * called compute_ip_summed() to initialize the checksumming fields.
1249 vport_receive(struct vport *vport, struct sk_buff *skb)
1251 struct dp_port *dp_port = vport_get_dp_port(vport);
1254 vport_record_error(vport, VPORT_E_RX_DROPPED);
1260 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1261 struct vport_percpu_stats *stats;
1265 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1266 stats->rx_packets++;
1267 stats->rx_bytes += skb->len;
1272 if (!(vport->ops->flags & VPORT_F_TUN_ID))
1273 OVS_CB(skb)->tun_id = 0;
1275 dp_process_received_packet(dp_port, skb);
1279 * vport_send - send a packet on a device
1281 * @vport: vport on which to send the packet
1284 * Sends the given packet and returns the length of data sent. Either RTNL
1285 * lock or rcu_read_lock must be held.
1288 vport_send(struct vport *vport, struct sk_buff *skb)
1293 loop_count = &per_cpu_ptr(vport_loop_counter, get_cpu())->count[!!in_interrupt()];
1296 if (likely(*loop_count <= VPORT_MAX_LOOPS)) {
1297 sent = vport->ops->send(vport, skb);
1299 if (net_ratelimit())
1300 printk(KERN_WARNING "%s: dropping packet that has looped more than %d times\n",
1301 dp_name(vport_get_dp_port(vport)->dp), VPORT_MAX_LOOPS);
1305 vport_record_error(vport, VPORT_E_TX_DROPPED);
1311 if (vport->ops->flags & VPORT_F_GEN_STATS && sent > 0) {
1312 struct vport_percpu_stats *stats;
1316 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1317 stats->tx_packets++;
1318 stats->tx_bytes += sent;
1327 * vport_record_error - indicate device error to generic stats layer
1329 * @vport: vport that encountered the error
1330 * @err_type: one of enum vport_err_type types to indicate the error type
1332 * If using the vport generic stats layer indicate that an error of the given
1336 vport_record_error(struct vport *vport, enum vport_err_type err_type)
1338 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1340 spin_lock_bh(&vport->stats_lock);
1343 case VPORT_E_RX_DROPPED:
1344 vport->err_stats.rx_dropped++;
1347 case VPORT_E_RX_ERROR:
1348 vport->err_stats.rx_errors++;
1351 case VPORT_E_RX_FRAME:
1352 vport->err_stats.rx_frame_err++;
1355 case VPORT_E_RX_OVER:
1356 vport->err_stats.rx_over_err++;
1359 case VPORT_E_RX_CRC:
1360 vport->err_stats.rx_crc_err++;
1363 case VPORT_E_TX_DROPPED:
1364 vport->err_stats.tx_dropped++;
1367 case VPORT_E_TX_ERROR:
1368 vport->err_stats.tx_errors++;
1371 case VPORT_E_COLLISION:
1372 vport->err_stats.collisions++;
1376 spin_unlock_bh(&vport->stats_lock);