2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/dcache.h>
10 #include <linux/etherdevice.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/mutex.h>
15 #include <linux/percpu.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/compat.h>
20 #include "vport-internal_dev.h"
22 /* List of statically compiled vport implementations. Don't forget to also
23 * add yours to the list at the bottom of vport.h. */
24 static struct vport_ops *base_vport_ops_list[] = {
31 static const struct vport_ops **vport_ops_list;
32 static int n_vport_types;
34 static struct hlist_head *dev_table;
35 #define VPORT_HASH_BUCKETS 1024
37 /* Both RTNL lock and vport_mutex need to be held when updating dev_table.
39 * If you use vport_locate and then perform some operations, you need to hold
40 * one of these locks if you don't want the vport to be deleted out from under
43 * If you get a reference to a vport through a dp_port, it is protected
44 * by RCU and you need to hold rcu_read_lock instead when reading.
46 * If multiple locks are taken, the hierarchy is:
51 static DEFINE_MUTEX(vport_mutex);
54 * vport_lock - acquire vport lock
56 * Acquire global vport lock. See above comment about locking requirements
57 * and specific function definitions. May sleep.
62 mutex_lock(&vport_mutex);
66 * vport_unlock - release vport lock
68 * Release lock acquired with vport_lock.
73 mutex_unlock(&vport_mutex);
76 #define ASSERT_VPORT() do { \
77 if (unlikely(!mutex_is_locked(&vport_mutex))) { \
78 printk(KERN_ERR "openvswitch: vport lock not held at %s (%d)\n", \
79 __FILE__, __LINE__); \
85 * vport_init - initialize vport subsystem
87 * Called at module load time to initialize the vport subsystem and any
88 * compiled in vport types.
96 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
103 vport_ops_list = kmalloc(ARRAY_SIZE(base_vport_ops_list) *
104 sizeof(struct vport_ops *), GFP_KERNEL);
105 if (!vport_ops_list) {
107 goto error_dev_table;
110 for (i = 0; i < ARRAY_SIZE(base_vport_ops_list); i++) {
111 struct vport_ops *new_ops = base_vport_ops_list[i];
113 if (new_ops->get_stats && new_ops->flags & VPORT_F_GEN_STATS) {
114 printk(KERN_INFO "openvswitch: both get_stats() and VPORT_F_GEN_STATS defined on vport %s, dropping VPORT_F_GEN_STATS\n", new_ops->type);
115 new_ops->flags &= ~VPORT_F_GEN_STATS;
119 err = new_ops->init();
124 vport_ops_list[n_vport_types++] = new_ops;
125 else if (new_ops->flags & VPORT_F_REQUIRED) {
147 for (i = 0; i < VPORT_HASH_BUCKETS; i++) {
148 struct hlist_head *bucket = &dev_table[i];
150 struct hlist_node *node, *next;
152 hlist_for_each_entry_safe(vport, node, next, bucket, hash_node)
161 * vport_exit - shutdown vport subsystem
163 * Called at module exit time to shutdown the vport subsystem and any
164 * initialized vport types.
173 for (i = 0; i < n_vport_types; i++) {
174 if (vport_ops_list[i]->exit)
175 vport_ops_list[i]->exit();
178 kfree(vport_ops_list);
183 do_vport_add(struct odp_vport_add *vport_config)
188 vport_config->port_type[VPORT_TYPE_SIZE - 1] = '\0';
189 vport_config->devname[IFNAMSIZ - 1] = '\0';
193 vport = vport_locate(vport_config->devname);
200 vport = vport_add(vport_config->devname, vport_config->port_type,
201 vport_config->config);
205 err = PTR_ERR(vport);
213 * vport_user_add - add vport device (for userspace callers)
215 * @uvport_config: New port configuration.
217 * Creates a new vport with the specified configuration (which is dependent
218 * on device type). This function is for userspace callers and assumes no
222 vport_user_add(const struct odp_vport_add __user *uvport_config)
224 struct odp_vport_add vport_config;
226 if (copy_from_user(&vport_config, uvport_config, sizeof(struct odp_vport_add)))
229 return do_vport_add(&vport_config);
234 compat_vport_user_add(struct compat_odp_vport_add *ucompat)
236 struct compat_odp_vport_add compat;
237 struct odp_vport_add vport_config;
239 if (copy_from_user(&compat, ucompat, sizeof(struct compat_odp_vport_add)))
242 memcpy(vport_config.port_type, compat.port_type, VPORT_TYPE_SIZE);
243 memcpy(vport_config.devname, compat.devname, IFNAMSIZ);
244 vport_config.config = compat_ptr(compat.config);
246 return do_vport_add(&vport_config);
251 do_vport_mod(struct odp_vport_mod *vport_config)
256 vport_config->devname[IFNAMSIZ - 1] = '\0';
260 vport = vport_locate(vport_config->devname);
267 err = vport_mod(vport, vport_config->config);
276 * vport_user_mod - modify existing vport device (for userspace callers)
278 * @uvport_config: New configuration for vport
280 * Modifies an existing device with the specified configuration (which is
281 * dependent on device type). This function is for userspace callers and
282 * assumes no locks are held.
285 vport_user_mod(const struct odp_vport_mod __user *uvport_config)
287 struct odp_vport_mod vport_config;
289 if (copy_from_user(&vport_config, uvport_config, sizeof(struct odp_vport_mod)))
292 return do_vport_mod(&vport_config);
297 compat_vport_user_mod(struct compat_odp_vport_mod *ucompat)
299 struct compat_odp_vport_mod compat;
300 struct odp_vport_mod vport_config;
302 if (copy_from_user(&compat, ucompat, sizeof(struct compat_odp_vport_mod)))
305 memcpy(vport_config.devname, compat.devname, IFNAMSIZ);
306 vport_config.config = compat_ptr(compat.config);
308 return do_vport_mod(&vport_config);
313 * vport_user_del - delete existing vport device (for userspace callers)
315 * @udevname: Name of device to delete
317 * Deletes the specified device. Detaches the device from a datapath first
318 * if it is attached. Deleting the device will fail if it does not exist or it
319 * is the datapath local port. It is also possible to fail for less obvious
320 * reasons, such as lack of memory. This function is for userspace callers and
321 * assumes no locks are held.
324 vport_user_del(const char __user *udevname)
326 char devname[IFNAMSIZ];
328 struct dp_port *dp_port;
332 retval = strncpy_from_user(devname, udevname, IFNAMSIZ);
335 else if (retval >= IFNAMSIZ)
336 return -ENAMETOOLONG;
340 vport = vport_locate(devname);
346 dp_port = vport_get_dp_port(vport);
348 struct datapath *dp = dp_port->dp;
350 mutex_lock(&dp->mutex);
352 if (!strcmp(dp_name(dp), devname)) {
357 err = dp_detach_port(dp_port, 0);
360 mutex_unlock(&dp->mutex);
367 err = vport_del(vport);
376 * vport_user_stats_get - retrieve device stats (for userspace callers)
378 * @ustats_req: Stats request parameters.
380 * Retrieves transmit, receive, and error stats for the given device. This
381 * function is for userspace callers and assumes no locks are held.
384 vport_user_stats_get(struct odp_vport_stats_req __user *ustats_req)
386 struct odp_vport_stats_req stats_req;
390 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
393 stats_req.devname[IFNAMSIZ - 1] = '\0';
397 vport = vport_locate(stats_req.devname);
403 if (vport->ops->get_stats) {
405 err = vport->ops->get_stats(vport, &stats_req.stats);
408 } else if (vport->ops->flags & VPORT_F_GEN_STATS) {
411 memset(&stats_req.stats, 0, sizeof(struct odp_vport_stats));
413 for_each_possible_cpu(i) {
414 const struct vport_percpu_stats *percpu_stats;
416 percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
417 stats_req.stats.rx_bytes += percpu_stats->rx_bytes;
418 stats_req.stats.rx_packets += percpu_stats->rx_packets;
419 stats_req.stats.tx_bytes += percpu_stats->tx_bytes;
420 stats_req.stats.tx_packets += percpu_stats->tx_packets;
423 spin_lock_bh(&vport->err_stats.lock);
425 stats_req.stats.rx_dropped = vport->err_stats.rx_dropped;
426 stats_req.stats.rx_errors = vport->err_stats.rx_errors
427 + vport->err_stats.rx_frame_err
428 + vport->err_stats.rx_over_err
429 + vport->err_stats.rx_crc_err;
430 stats_req.stats.rx_frame_err = vport->err_stats.rx_frame_err;
431 stats_req.stats.rx_over_err = vport->err_stats.rx_over_err;
432 stats_req.stats.rx_crc_err = vport->err_stats.rx_crc_err;
433 stats_req.stats.tx_dropped = vport->err_stats.tx_dropped;
434 stats_req.stats.tx_errors = vport->err_stats.tx_errors;
435 stats_req.stats.collisions = vport->err_stats.collisions;
437 spin_unlock_bh(&vport->err_stats.lock);
447 if (copy_to_user(ustats_req, &stats_req, sizeof(struct odp_vport_stats_req)))
454 * vport_user_ether_get - retrieve device Ethernet address (for userspace callers)
456 * @uvport_ether: Ethernet address request parameters.
458 * Retrieves the Ethernet address of the given device. This function is for
459 * userspace callers and assumes no locks are held.
462 vport_user_ether_get(struct odp_vport_ether __user *uvport_ether)
464 struct odp_vport_ether vport_ether;
468 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
471 vport_ether.devname[IFNAMSIZ - 1] = '\0';
475 vport = vport_locate(vport_ether.devname);
482 memcpy(vport_ether.ether_addr, vport_get_addr(vport), ETH_ALEN);
489 if (copy_to_user(uvport_ether, &vport_ether, sizeof(struct odp_vport_ether)))
496 * vport_user_ether_set - set device Ethernet address (for userspace callers)
498 * @uvport_ether: Ethernet address request parameters.
500 * Sets the Ethernet address of the given device. Some devices may not support
501 * setting the Ethernet address, in which case the result will always be
502 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
506 vport_user_ether_set(struct odp_vport_ether __user *uvport_ether)
508 struct odp_vport_ether vport_ether;
512 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
515 vport_ether.devname[IFNAMSIZ - 1] = '\0';
520 vport = vport_locate(vport_ether.devname);
526 err = vport_set_addr(vport, vport_ether.ether_addr);
535 * vport_user_mtu_get - retrieve device MTU (for userspace callers)
537 * @uvport_mtu: MTU request parameters.
539 * Retrieves the MTU of the given device. This function is for userspace
540 * callers and assumes no locks are held.
543 vport_user_mtu_get(struct odp_vport_mtu __user *uvport_mtu)
545 struct odp_vport_mtu vport_mtu;
549 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
552 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
556 vport = vport_locate(vport_mtu.devname);
562 vport_mtu.mtu = vport_get_mtu(vport);
568 if (copy_to_user(uvport_mtu, &vport_mtu, sizeof(struct odp_vport_mtu)))
575 * vport_user_mtu_set - set device MTU (for userspace callers)
577 * @uvport_mtu: MTU request parameters.
579 * Sets the MTU of the given device. Some devices may not support setting the
580 * MTU, in which case the result will always be -EOPNOTSUPP. This function is
581 * for userspace callers and assumes no locks are held.
584 vport_user_mtu_set(struct odp_vport_mtu __user *uvport_mtu)
586 struct odp_vport_mtu vport_mtu;
590 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
593 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
598 vport = vport_locate(vport_mtu.devname);
604 err = vport_set_mtu(vport, vport_mtu.mtu);
612 static struct hlist_head *
613 hash_bucket(const char *name)
615 unsigned int hash = full_name_hash(name, strlen(name));
616 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
620 * vport_locate - find a port that has already been created
622 * @name: name of port to find
624 * Either RTNL or vport lock must be acquired before calling this function
625 * and held while using the found port. See the locking comments at the
629 vport_locate(const char *name)
631 struct hlist_head *bucket = hash_bucket(name);
633 struct hlist_node *node;
635 if (unlikely(!mutex_is_locked(&vport_mutex) && !rtnl_is_locked())) {
636 printk(KERN_ERR "openvswitch: neither RTNL nor vport lock held in vport_locate\n");
642 hlist_for_each_entry(vport, node, bucket, hash_node)
643 if (!strcmp(name, vport_get_name(vport)))
654 register_vport(struct vport *vport)
656 hlist_add_head(&vport->hash_node, hash_bucket(vport_get_name(vport)));
660 unregister_vport(struct vport *vport)
662 hlist_del(&vport->hash_node);
666 * vport_alloc - allocate and initialize new vport
668 * @priv_size: Size of private data area to allocate.
669 * @ops: vport device ops
671 * Allocate and initialize a new vport defined by @ops. The vport will contain
672 * a private data area of size @priv_size that can be accessed using
673 * vport_priv(). vports that are no longer needed should be released with
677 vport_alloc(int priv_size, const struct vport_ops *ops)
682 alloc_size = sizeof(struct vport);
684 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
685 alloc_size += priv_size;
688 vport = kzalloc(alloc_size, GFP_KERNEL);
690 return ERR_PTR(-ENOMEM);
694 if (vport->ops->flags & VPORT_F_GEN_STATS) {
695 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
696 if (!vport->percpu_stats)
697 return ERR_PTR(-ENOMEM);
699 spin_lock_init(&vport->err_stats.lock);
706 * vport_free - uninitialize and free vport
708 * @vport: vport to free
710 * Frees a vport allocated with vport_alloc() when it is no longer needed.
713 vport_free(struct vport *vport)
715 if (vport->ops->flags & VPORT_F_GEN_STATS)
716 free_percpu(vport->percpu_stats);
722 * vport_add - add vport device (for kernel callers)
724 * @name: Name of new device.
725 * @type: Type of new device (to be matched against types in registered vport
727 * @config: Device type specific configuration. Userspace pointer.
729 * Creates a new vport with the specified configuration (which is dependent
730 * on device type). Both RTNL and vport locks must be held.
733 vport_add(const char *name, const char *type, const void __user *config)
742 for (i = 0; i < n_vport_types; i++) {
743 if (!strcmp(vport_ops_list[i]->type, type)) {
744 vport = vport_ops_list[i]->create(name, config);
746 err = PTR_ERR(vport);
750 register_vport(vport);
762 * vport_mod - modify existing vport device (for kernel callers)
764 * @vport: vport to modify.
765 * @config: Device type specific configuration. Userspace pointer.
767 * Modifies an existing device with the specified configuration (which is
768 * dependent on device type). Both RTNL and vport locks must be held.
771 vport_mod(struct vport *vport, const void __user *config)
776 if (vport->ops->modify)
777 return vport->ops->modify(vport, config);
783 * vport_del - delete existing vport device (for kernel callers)
785 * @vport: vport to delete.
787 * Deletes the specified device. The device must not be currently attached to
788 * a datapath. It is possible to fail for reasons such as lack of memory.
789 * Both RTNL and vport locks must be held.
792 vport_del(struct vport *vport)
796 BUG_ON(vport_get_dp_port(vport));
798 unregister_vport(vport);
800 return vport->ops->destroy(vport);
804 * vport_attach - attach a vport to a datapath
806 * @vport: vport to attach.
807 * @dp_port: Datapath port to attach the vport to.
809 * Attaches a vport to a specific datapath so that packets may be exchanged.
810 * Both ports must be currently unattached. @dp_port must be successfully
811 * attached to a vport before it is connected to a datapath and must not be
812 * modified while connected. RTNL lock and the appropriate DP mutex must be held.
815 vport_attach(struct vport *vport, struct dp_port *dp_port)
822 if (vport_get_dp_port(vport))
825 if (vport->ops->attach) {
828 err = vport->ops->attach(vport);
833 dp_port->vport = vport;
834 rcu_assign_pointer(vport->dp_port, dp_port);
840 * vport_detach - detach a vport from a datapath
842 * @vport: vport to detach.
844 * Detaches a vport from a datapath. May fail for a variety of reasons,
845 * including lack of memory. RTNL lock and the appropriate DP mutex must be held.
848 vport_detach(struct vport *vport)
850 struct dp_port *dp_port;
854 dp_port = vport_get_dp_port(vport);
858 dp_port->vport = NULL;
859 rcu_assign_pointer(vport->dp_port, NULL);
861 if (vport->ops->detach)
862 return vport->ops->detach(vport);
868 * vport_set_mtu - set device MTU (for kernel callers)
870 * @vport: vport on which to set MTU.
873 * Sets the MTU of the given device. Some devices may not support setting the
874 * MTU, in which case the result will always be -EOPNOTSUPP. RTNL lock must
878 vport_set_mtu(struct vport *vport, int mtu)
885 if (vport->ops->set_mtu) {
888 ret = vport->ops->set_mtu(vport, mtu);
890 if (!ret && !is_internal_vport(vport)) {
891 struct dp_port *dp_port = vport_get_dp_port(vport);
894 set_internal_devs_mtu(dp_port->dp);
903 * vport_set_addr - set device Ethernet address (for kernel callers)
905 * @vport: vport on which to set Ethernet address.
906 * @addr: New address.
908 * Sets the Ethernet address of the given device. Some devices may not support
909 * setting the Ethernet address, in which case the result will always be
910 * -EOPNOTSUPP. RTNL lock must be held.
913 vport_set_addr(struct vport *vport, const unsigned char *addr)
917 if (!is_valid_ether_addr(addr))
918 return -EADDRNOTAVAIL;
920 if (vport->ops->set_addr)
921 return vport->ops->set_addr(vport, addr);
927 * vport_get_name - retrieve device name
929 * @vport: vport from which to retrieve the name.
931 * Retrieves the name of the given device. Either RTNL lock or rcu_read_lock
932 * must be held for the entire duration that the name is in use.
935 vport_get_name(const struct vport *vport)
937 return vport->ops->get_name(vport);
941 * vport_get_type - retrieve device type
943 * @vport: vport from which to retrieve the type.
945 * Retrieves the type of the given device. Either RTNL lock or rcu_read_lock
946 * must be held for the entire duration that the type is in use.
949 vport_get_type(const struct vport *vport)
951 return vport->ops->type;
955 * vport_get_addr - retrieve device Ethernet address (for kernel callers)
957 * @vport: vport from which to retrieve the Ethernet address.
959 * Retrieves the Ethernet address of the given device. Either RTNL lock or
960 * rcu_read_lock must be held for the entire duration that the Ethernet address
963 const unsigned char *
964 vport_get_addr(const struct vport *vport)
966 return vport->ops->get_addr(vport);
970 * vport_get_dp_port - retrieve attached datapath port
972 * @vport: vport from which to retrieve the datapath port.
974 * Retrieves the attached datapath port or null if not attached. Either RTNL
975 * lock or rcu_read_lock must be held for the entire duration that the datapath
976 * port is being accessed.
979 vport_get_dp_port(const struct vport *vport)
981 return rcu_dereference(vport->dp_port);
985 * vport_get_kobj - retrieve associated kobj
987 * @vport: vport from which to retrieve the associated kobj
989 * Retrieves the associated kobj or null if no kobj. The returned kobj is
990 * valid for as long as the vport exists.
993 vport_get_kobj(const struct vport *vport)
995 if (vport->ops->get_kobj)
996 return vport->ops->get_kobj(vport);
1002 * vport_get_flags - retrieve device flags
1004 * @vport: vport from which to retrieve the flags
1006 * Retrieves the flags of the given device. Either RTNL lock or rcu_read_lock
1010 vport_get_flags(const struct vport *vport)
1012 return vport->ops->get_dev_flags(vport);
1016 * vport_get_flags - check whether device is running
1018 * @vport: vport on which to check status.
1020 * Checks whether the given device is running. Either RTNL lock or
1021 * rcu_read_lock must be held.
1024 vport_is_running(const struct vport *vport)
1026 return vport->ops->is_running(vport);
1030 * vport_get_flags - retrieve device operating state
1032 * @vport: vport from which to check status
1034 * Retrieves the RFC2863 operstate of the given device. Either RTNL lock or
1035 * rcu_read_lock must be held.
1038 vport_get_operstate(const struct vport *vport)
1040 return vport->ops->get_operstate(vport);
1044 * vport_get_ifindex - retrieve device system interface index
1046 * @vport: vport from which to retrieve index
1048 * Retrieves the system interface index of the given device. Not all devices
1049 * will have system indexes, in which case the index of the datapath local
1050 * port is returned. Returns a negative index on error. Either RTNL lock or
1051 * rcu_read_lock must be held.
1054 vport_get_ifindex(const struct vport *vport)
1056 const struct dp_port *dp_port;
1058 if (vport->ops->get_ifindex)
1059 return vport->ops->get_ifindex(vport);
1061 /* If we don't actually have an ifindex, use the local port's.
1062 * Userspace doesn't check it anyways. */
1063 dp_port = vport_get_dp_port(vport);
1067 return vport_get_ifindex(dp_port->dp->ports[ODPP_LOCAL]->vport);
1071 * vport_get_iflink - retrieve device system link index
1073 * @vport: vport from which to retrieve index
1075 * Retrieves the system link index of the given device. The link is the index
1076 * of the interface on which the packet will actually be sent. In most cases
1077 * this is the same as the ifindex but may be different for tunnel devices.
1078 * Returns a negative index on error. Either RTNL lock or rcu_read_lock must
1082 vport_get_iflink(const struct vport *vport)
1084 if (vport->ops->get_iflink)
1085 return vport->ops->get_iflink(vport);
1087 /* If we don't have an iflink, use the ifindex. In most cases they
1089 return vport_get_ifindex(vport);
1093 * vport_get_mtu - retrieve device MTU (for kernel callers)
1095 * @vport: vport from which to retrieve MTU
1097 * Retrieves the MTU of the given device. Either RTNL lock or rcu_read_lock
1101 vport_get_mtu(const struct vport *vport)
1103 return vport->ops->get_mtu(vport);
1107 * vport_receive - pass up received packet to the datapath for processing
1109 * @vport: vport that received the packet
1110 * @skb: skb that was received
1112 * Must be called with rcu_read_lock. The packet cannot be shared and
1113 * skb->data should point to the Ethernet header. The caller must have already
1114 * called compute_ip_summed() to initialize the checksumming fields.
1117 vport_receive(struct vport *vport, struct sk_buff *skb)
1119 struct dp_port *dp_port = vport_get_dp_port(vport);
1122 vport_record_error(vport, VPORT_E_RX_DROPPED);
1128 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1129 struct vport_percpu_stats *stats;
1133 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1134 stats->rx_packets++;
1135 stats->rx_bytes += skb->len;
1140 if (!(vport->ops->flags & VPORT_F_TUN_ID))
1141 OVS_CB(skb)->tun_id = 0;
1143 dp_process_received_packet(dp_port, skb);
1147 * vport_send - send a packet on a device
1149 * @vport: vport on which to send the packet
1152 * Sends the given packet and returns the length of data sent. Either RTNL
1153 * lock or rcu_read_lock must be held.
1156 vport_send(struct vport *vport, struct sk_buff *skb)
1160 sent = vport->ops->send(vport, skb);
1162 if (vport->ops->flags & VPORT_F_GEN_STATS && sent > 0) {
1163 struct vport_percpu_stats *stats;
1167 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1168 stats->tx_packets++;
1169 stats->tx_bytes += sent;
1178 * vport_record_error - indicate device error to generic stats layer
1180 * @vport: vport that encountered the error
1181 * @err_type: one of enum vport_err_type types to indicate the error type
1183 * If using the vport generic stats layer indicate that an error of the given
1187 vport_record_error(struct vport *vport, enum vport_err_type err_type)
1189 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1191 spin_lock_bh(&vport->err_stats.lock);
1194 case VPORT_E_RX_DROPPED:
1195 vport->err_stats.rx_dropped++;
1198 case VPORT_E_RX_ERROR:
1199 vport->err_stats.rx_errors++;
1202 case VPORT_E_RX_FRAME:
1203 vport->err_stats.rx_frame_err++;
1206 case VPORT_E_RX_OVER:
1207 vport->err_stats.rx_over_err++;
1210 case VPORT_E_RX_CRC:
1211 vport->err_stats.rx_crc_err++;
1214 case VPORT_E_TX_DROPPED:
1215 vport->err_stats.tx_dropped++;
1218 case VPORT_E_TX_ERROR:
1219 vport->err_stats.tx_errors++;
1222 case VPORT_E_COLLISION:
1223 vport->err_stats.collisions++;
1227 spin_unlock_bh(&vport->err_stats.lock);