2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11 #include <linux/dcache.h>
12 #include <linux/etherdevice.h>
14 #include <linux/if_vlan.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/mutex.h>
18 #include <linux/percpu.h>
19 #include <linux/rtnetlink.h>
20 #include <linux/compat.h>
21 #include <linux/version.h>
24 #include "vport-internal_dev.h"
26 /* List of statically compiled vport implementations. Don't forget to also
27 * add yours to the list at the bottom of vport.h. */
28 static const struct vport_ops *base_vport_ops_list[] = {
33 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
38 static const struct vport_ops **vport_ops_list;
39 static int n_vport_types;
41 static struct hlist_head *dev_table;
42 #define VPORT_HASH_BUCKETS 1024
44 /* Both RTNL lock and vport_mutex need to be held when updating dev_table.
46 * If you use vport_locate and then perform some operations, you need to hold
47 * one of these locks if you don't want the vport to be deleted out from under
50 * If you get a reference to a vport through a dp_port, it is protected
51 * by RCU and you need to hold rcu_read_lock instead when reading.
53 * If multiple locks are taken, the hierarchy is:
58 static DEFINE_MUTEX(vport_mutex);
61 * vport_lock - acquire vport lock
63 * Acquire global vport lock. See above comment about locking requirements
64 * and specific function definitions. May sleep.
68 mutex_lock(&vport_mutex);
72 * vport_unlock - release vport lock
74 * Release lock acquired with vport_lock.
76 void vport_unlock(void)
78 mutex_unlock(&vport_mutex);
81 #define ASSERT_VPORT() \
83 if (unlikely(!mutex_is_locked(&vport_mutex))) { \
84 pr_err("vport lock not held at %s (%d)\n", \
85 __FILE__, __LINE__); \
91 * vport_init - initialize vport subsystem
93 * Called at module load time to initialize the vport subsystem and any
94 * compiled in vport types.
101 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
108 vport_ops_list = kmalloc(ARRAY_SIZE(base_vport_ops_list) *
109 sizeof(struct vport_ops *), GFP_KERNEL);
110 if (!vport_ops_list) {
112 goto error_dev_table;
115 for (i = 0; i < ARRAY_SIZE(base_vport_ops_list); i++) {
116 const struct vport_ops *new_ops = base_vport_ops_list[i];
119 err = new_ops->init();
124 vport_ops_list[n_vport_types++] = new_ops;
125 else if (new_ops->flags & VPORT_F_REQUIRED) {
139 static void vport_del_all(void)
146 for (i = 0; i < VPORT_HASH_BUCKETS; i++) {
147 struct hlist_head *bucket = &dev_table[i];
149 struct hlist_node *node, *next;
151 hlist_for_each_entry_safe(vport, node, next, bucket, hash_node)
160 * vport_exit - shutdown vport subsystem
162 * Called at module exit time to shutdown the vport subsystem and any
163 * initialized vport types.
165 void vport_exit(void)
171 for (i = 0; i < n_vport_types; i++) {
172 if (vport_ops_list[i]->exit)
173 vport_ops_list[i]->exit();
176 kfree(vport_ops_list);
181 * vport_user_mod - modify existing vport device (for userspace callers)
183 * @uport: New configuration for vport
185 * Modifies an existing device with the specified configuration (which is
186 * dependent on device type). This function is for userspace callers and
187 * assumes no locks are held.
189 int vport_user_mod(const struct odp_port __user *uport)
191 struct odp_port port;
195 if (copy_from_user(&port, uport, sizeof(port)))
198 port.devname[IFNAMSIZ - 1] = '\0';
202 vport = vport_locate(port.devname);
209 err = vport_mod(vport, &port);
218 * vport_user_stats_get - retrieve device stats (for userspace callers)
220 * @ustats_req: Stats request parameters.
222 * Retrieves transmit, receive, and error stats for the given device. This
223 * function is for userspace callers and assumes no locks are held.
225 int vport_user_stats_get(struct odp_vport_stats_req __user *ustats_req)
227 struct odp_vport_stats_req stats_req;
231 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
234 stats_req.devname[IFNAMSIZ - 1] = '\0';
238 vport = vport_locate(stats_req.devname);
244 err = vport_get_stats(vport, &stats_req.stats);
250 if (copy_to_user(ustats_req, &stats_req, sizeof(struct odp_vport_stats_req)))
257 * vport_user_stats_set - sets offset device stats (for userspace callers)
259 * @ustats_req: Stats set parameters.
261 * Provides a set of transmit, receive, and error stats to be added as an
262 * offset to the collect data when stats are retreived. Some devices may not
263 * support setting the stats, in which case the result will always be
264 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
267 int vport_user_stats_set(struct odp_vport_stats_req __user *ustats_req)
269 struct odp_vport_stats_req stats_req;
273 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
276 stats_req.devname[IFNAMSIZ - 1] = '\0';
281 vport = vport_locate(stats_req.devname);
287 err = vport_set_stats(vport, &stats_req.stats);
297 * vport_user_ether_get - retrieve device Ethernet address (for userspace callers)
299 * @uvport_ether: Ethernet address request parameters.
301 * Retrieves the Ethernet address of the given device. This function is for
302 * userspace callers and assumes no locks are held.
304 int vport_user_ether_get(struct odp_vport_ether __user *uvport_ether)
306 struct odp_vport_ether vport_ether;
310 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
313 vport_ether.devname[IFNAMSIZ - 1] = '\0';
317 vport = vport_locate(vport_ether.devname);
324 memcpy(vport_ether.ether_addr, vport_get_addr(vport), ETH_ALEN);
331 if (copy_to_user(uvport_ether, &vport_ether, sizeof(struct odp_vport_ether)))
338 * vport_user_ether_set - set device Ethernet address (for userspace callers)
340 * @uvport_ether: Ethernet address request parameters.
342 * Sets the Ethernet address of the given device. Some devices may not support
343 * setting the Ethernet address, in which case the result will always be
344 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
347 int vport_user_ether_set(struct odp_vport_ether __user *uvport_ether)
349 struct odp_vport_ether vport_ether;
353 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
356 vport_ether.devname[IFNAMSIZ - 1] = '\0';
361 vport = vport_locate(vport_ether.devname);
367 err = vport_set_addr(vport, vport_ether.ether_addr);
376 * vport_user_mtu_get - retrieve device MTU (for userspace callers)
378 * @uvport_mtu: MTU request parameters.
380 * Retrieves the MTU of the given device. This function is for userspace
381 * callers and assumes no locks are held.
383 int vport_user_mtu_get(struct odp_vport_mtu __user *uvport_mtu)
385 struct odp_vport_mtu vport_mtu;
389 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
392 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
396 vport = vport_locate(vport_mtu.devname);
402 vport_mtu.mtu = vport_get_mtu(vport);
408 if (copy_to_user(uvport_mtu, &vport_mtu, sizeof(struct odp_vport_mtu)))
415 * vport_user_mtu_set - set device MTU (for userspace callers)
417 * @uvport_mtu: MTU request parameters.
419 * Sets the MTU of the given device. Some devices may not support setting the
420 * MTU, in which case the result will always be -EOPNOTSUPP. This function is
421 * for userspace callers and assumes no locks are held.
423 int vport_user_mtu_set(struct odp_vport_mtu __user *uvport_mtu)
425 struct odp_vport_mtu vport_mtu;
429 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
432 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
437 vport = vport_locate(vport_mtu.devname);
443 err = vport_set_mtu(vport, vport_mtu.mtu);
451 static struct hlist_head *hash_bucket(const char *name)
453 unsigned int hash = full_name_hash(name, strlen(name));
454 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
458 * vport_locate - find a port that has already been created
460 * @name: name of port to find
462 * Either RTNL or vport lock must be acquired before calling this function
463 * and held while using the found port. See the locking comments at the
466 struct vport *vport_locate(const char *name)
468 struct hlist_head *bucket = hash_bucket(name);
470 struct hlist_node *node;
472 if (unlikely(!mutex_is_locked(&vport_mutex) && !rtnl_is_locked())) {
473 pr_err("neither RTNL nor vport lock held in vport_locate\n");
479 hlist_for_each_entry(vport, node, bucket, hash_node)
480 if (!strcmp(name, vport_get_name(vport)))
490 static void register_vport(struct vport *vport)
492 hlist_add_head(&vport->hash_node, hash_bucket(vport_get_name(vport)));
495 static void unregister_vport(struct vport *vport)
497 hlist_del(&vport->hash_node);
501 * vport_alloc - allocate and initialize new vport
503 * @priv_size: Size of private data area to allocate.
504 * @ops: vport device ops
506 * Allocate and initialize a new vport defined by @ops. The vport will contain
507 * a private data area of size @priv_size that can be accessed using
508 * vport_priv(). vports that are no longer needed should be released with
511 struct vport *vport_alloc(int priv_size, const struct vport_ops *ops)
516 alloc_size = sizeof(struct vport);
518 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
519 alloc_size += priv_size;
522 vport = kzalloc(alloc_size, GFP_KERNEL);
524 return ERR_PTR(-ENOMEM);
528 if (vport->ops->flags & VPORT_F_GEN_STATS) {
529 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
530 if (!vport->percpu_stats)
531 return ERR_PTR(-ENOMEM);
533 spin_lock_init(&vport->stats_lock);
540 * vport_free - uninitialize and free vport
542 * @vport: vport to free
544 * Frees a vport allocated with vport_alloc() when it is no longer needed.
546 void vport_free(struct vport *vport)
548 if (vport->ops->flags & VPORT_F_GEN_STATS)
549 free_percpu(vport->percpu_stats);
555 * vport_add - add vport device (for kernel callers)
557 * @parms: Information about new vport.
559 * Creates a new vport with the specified configuration (which is dependent
560 * on device type). Both RTNL and vport locks must be held.
562 struct vport *vport_add(const struct vport_parms *parms)
571 for (i = 0; i < n_vport_types; i++) {
572 if (!strcmp(vport_ops_list[i]->type, parms->type)) {
573 vport = vport_ops_list[i]->create(parms);
575 err = PTR_ERR(vport);
579 register_vport(vport);
591 * vport_mod - modify existing vport device (for kernel callers)
593 * @vport: vport to modify.
594 * @port: New configuration.
596 * Modifies an existing device with the specified configuration (which is
597 * dependent on device type). Both RTNL and vport locks must be held.
599 int vport_mod(struct vport *vport, struct odp_port *port)
604 if (vport->ops->modify)
605 return vport->ops->modify(vport, port);
611 * vport_del - delete existing vport device (for kernel callers)
613 * @vport: vport to delete.
615 * Deletes the specified device. The device must not be currently attached to
616 * a datapath. It is possible to fail for reasons such as lack of memory.
617 * Both RTNL and vport locks must be held.
619 int vport_del(struct vport *vport)
623 BUG_ON(vport_get_dp_port(vport));
625 unregister_vport(vport);
627 return vport->ops->destroy(vport);
631 * vport_attach - attach a vport to a datapath
633 * @vport: vport to attach.
634 * @dp_port: Datapath port to attach the vport to.
636 * Attaches a vport to a specific datapath so that packets may be exchanged.
637 * Both ports must be currently unattached. @dp_port must be successfully
638 * attached to a vport before it is connected to a datapath and must not be
639 * modified while connected. RTNL lock and the appropriate DP mutex must be held.
641 int vport_attach(struct vport *vport, struct dp_port *dp_port)
645 if (vport_get_dp_port(vport))
648 if (vport->ops->attach) {
651 err = vport->ops->attach(vport);
656 rcu_assign_pointer(vport->dp_port, dp_port);
662 * vport_detach - detach a vport from a datapath
664 * @vport: vport to detach.
666 * Detaches a vport from a datapath. May fail for a variety of reasons,
667 * including lack of memory. RTNL lock and the appropriate DP mutex must be held.
669 int vport_detach(struct vport *vport)
671 struct dp_port *dp_port;
675 dp_port = vport_get_dp_port(vport);
679 rcu_assign_pointer(vport->dp_port, NULL);
681 if (vport->ops->detach)
682 return vport->ops->detach(vport);
688 * vport_set_mtu - set device MTU (for kernel callers)
690 * @vport: vport on which to set MTU.
693 * Sets the MTU of the given device. Some devices may not support setting the
694 * MTU, in which case the result will always be -EOPNOTSUPP. RTNL lock must
697 int vport_set_mtu(struct vport *vport, int mtu)
704 if (vport->ops->set_mtu) {
707 ret = vport->ops->set_mtu(vport, mtu);
709 if (!ret && !is_internal_vport(vport)) {
710 struct dp_port *dp_port = vport_get_dp_port(vport);
713 set_internal_devs_mtu(dp_port->dp);
722 * vport_set_addr - set device Ethernet address (for kernel callers)
724 * @vport: vport on which to set Ethernet address.
725 * @addr: New address.
727 * Sets the Ethernet address of the given device. Some devices may not support
728 * setting the Ethernet address, in which case the result will always be
729 * -EOPNOTSUPP. RTNL lock must be held.
731 int vport_set_addr(struct vport *vport, const unsigned char *addr)
735 if (!is_valid_ether_addr(addr))
736 return -EADDRNOTAVAIL;
738 if (vport->ops->set_addr)
739 return vport->ops->set_addr(vport, addr);
745 * vport_set_stats - sets offset device stats (for kernel callers)
747 * @vport: vport on which to set stats
748 * @stats: stats to set
750 * Provides a set of transmit, receive, and error stats to be added as an
751 * offset to the collect data when stats are retreived. Some devices may not
752 * support setting the stats, in which case the result will always be
753 * -EOPNOTSUPP. RTNL lock must be held.
755 int vport_set_stats(struct vport *vport, struct rtnl_link_stats64 *stats)
759 if (vport->ops->flags & VPORT_F_GEN_STATS) {
760 spin_lock_bh(&vport->stats_lock);
761 vport->offset_stats = *stats;
762 spin_unlock_bh(&vport->stats_lock);
765 } else if (vport->ops->set_stats)
766 return vport->ops->set_stats(vport, stats);
772 * vport_get_name - retrieve device name
774 * @vport: vport from which to retrieve the name.
776 * Retrieves the name of the given device. Either RTNL lock or rcu_read_lock
777 * must be held for the entire duration that the name is in use.
779 const char *vport_get_name(const struct vport *vport)
781 return vport->ops->get_name(vport);
785 * vport_get_type - retrieve device type
787 * @vport: vport from which to retrieve the type.
789 * Retrieves the type of the given device. Either RTNL lock or rcu_read_lock
790 * must be held for the entire duration that the type is in use.
792 const char *vport_get_type(const struct vport *vport)
794 return vport->ops->type;
798 * vport_get_addr - retrieve device Ethernet address (for kernel callers)
800 * @vport: vport from which to retrieve the Ethernet address.
802 * Retrieves the Ethernet address of the given device. Either RTNL lock or
803 * rcu_read_lock must be held for the entire duration that the Ethernet address
806 const unsigned char *vport_get_addr(const struct vport *vport)
808 return vport->ops->get_addr(vport);
812 * vport_get_dp_port - retrieve attached datapath port
814 * @vport: vport from which to retrieve the datapath port.
816 * Retrieves the attached datapath port or null if not attached. Either RTNL
817 * lock or rcu_read_lock must be held for the entire duration that the datapath
818 * port is being accessed.
820 struct dp_port *vport_get_dp_port(const struct vport *vport)
822 return rcu_dereference(vport->dp_port);
826 * vport_get_kobj - retrieve associated kobj
828 * @vport: vport from which to retrieve the associated kobj
830 * Retrieves the associated kobj or null if no kobj. The returned kobj is
831 * valid for as long as the vport exists.
833 struct kobject *vport_get_kobj(const struct vport *vport)
835 if (vport->ops->get_kobj)
836 return vport->ops->get_kobj(vport);
842 * vport_get_stats - retrieve device stats (for kernel callers)
844 * @vport: vport from which to retrieve the stats
845 * @stats: location to store stats
847 * Retrieves transmit, receive, and error stats for the given device.
849 int vport_get_stats(struct vport *vport, struct rtnl_link_stats64 *stats)
851 struct rtnl_link_stats64 dev_stats;
852 struct rtnl_link_stats64 *dev_statsp = NULL;
855 if (vport->ops->get_stats) {
856 if (vport->ops->flags & VPORT_F_GEN_STATS)
857 dev_statsp = &dev_stats;
862 err = vport->ops->get_stats(vport, dev_statsp);
869 if (vport->ops->flags & VPORT_F_GEN_STATS) {
872 /* We potentially have 3 sources of stats that need to be
873 * combined: those we have collected (split into err_stats and
874 * percpu_stats), offset_stats from set_stats(), and device
875 * error stats from get_stats() (for errors that happen
876 * downstream and therefore aren't reported through our
877 * vport_record_error() function). */
879 spin_lock_bh(&vport->stats_lock);
881 *stats = vport->offset_stats;
883 stats->rx_errors += vport->err_stats.rx_errors;
884 stats->tx_errors += vport->err_stats.tx_errors;
885 stats->tx_dropped += vport->err_stats.tx_dropped;
886 stats->rx_dropped += vport->err_stats.rx_dropped;
888 spin_unlock_bh(&vport->stats_lock);
891 stats->rx_packets += dev_statsp->rx_packets;
892 stats->tx_packets += dev_statsp->tx_packets;
893 stats->rx_bytes += dev_statsp->rx_bytes;
894 stats->tx_bytes += dev_statsp->tx_bytes;
895 stats->rx_errors += dev_statsp->rx_errors;
896 stats->tx_errors += dev_statsp->tx_errors;
897 stats->rx_dropped += dev_statsp->rx_dropped;
898 stats->tx_dropped += dev_statsp->tx_dropped;
899 stats->multicast += dev_statsp->multicast;
900 stats->collisions += dev_statsp->collisions;
901 stats->rx_length_errors += dev_statsp->rx_length_errors;
902 stats->rx_over_errors += dev_statsp->rx_over_errors;
903 stats->rx_crc_errors += dev_statsp->rx_crc_errors;
904 stats->rx_frame_errors += dev_statsp->rx_frame_errors;
905 stats->rx_fifo_errors += dev_statsp->rx_fifo_errors;
906 stats->rx_missed_errors += dev_statsp->rx_missed_errors;
907 stats->tx_aborted_errors += dev_statsp->tx_aborted_errors;
908 stats->tx_carrier_errors += dev_statsp->tx_carrier_errors;
909 stats->tx_fifo_errors += dev_statsp->tx_fifo_errors;
910 stats->tx_heartbeat_errors += dev_statsp->tx_heartbeat_errors;
911 stats->tx_window_errors += dev_statsp->tx_window_errors;
912 stats->rx_compressed += dev_statsp->rx_compressed;
913 stats->tx_compressed += dev_statsp->tx_compressed;
916 for_each_possible_cpu(i) {
917 const struct vport_percpu_stats *percpu_stats;
918 struct vport_percpu_stats local_stats;
921 percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
924 seqcount = read_seqcount_begin(&percpu_stats->seqlock);
925 local_stats = *percpu_stats;
926 } while (read_seqcount_retry(&percpu_stats->seqlock, seqcount));
928 stats->rx_bytes += local_stats.rx_bytes;
929 stats->rx_packets += local_stats.rx_packets;
930 stats->tx_bytes += local_stats.tx_bytes;
931 stats->tx_packets += local_stats.tx_packets;
943 * vport_get_flags - retrieve device flags
945 * @vport: vport from which to retrieve the flags
947 * Retrieves the flags of the given device. Either RTNL lock or rcu_read_lock
950 unsigned vport_get_flags(const struct vport *vport)
952 return vport->ops->get_dev_flags(vport);
956 * vport_get_flags - check whether device is running
958 * @vport: vport on which to check status.
960 * Checks whether the given device is running. Either RTNL lock or
961 * rcu_read_lock must be held.
963 int vport_is_running(const struct vport *vport)
965 return vport->ops->is_running(vport);
969 * vport_get_flags - retrieve device operating state
971 * @vport: vport from which to check status
973 * Retrieves the RFC2863 operstate of the given device. Either RTNL lock or
974 * rcu_read_lock must be held.
976 unsigned char vport_get_operstate(const struct vport *vport)
978 return vport->ops->get_operstate(vport);
982 * vport_get_ifindex - retrieve device system interface index
984 * @vport: vport from which to retrieve index
986 * Retrieves the system interface index of the given device. Not all devices
987 * will have system indexes, in which case the index of the datapath local
988 * port is returned. Returns a negative index on error. Either RTNL lock or
989 * rcu_read_lock must be held.
991 int vport_get_ifindex(const struct vport *vport)
993 const struct dp_port *dp_port;
995 if (vport->ops->get_ifindex)
996 return vport->ops->get_ifindex(vport);
998 /* If we don't actually have an ifindex, use the local port's.
999 * Userspace doesn't check it anyways. */
1000 dp_port = vport_get_dp_port(vport);
1004 return vport_get_ifindex(dp_port->dp->ports[ODPP_LOCAL]->vport);
1008 * vport_get_iflink - retrieve device system link index
1010 * @vport: vport from which to retrieve index
1012 * Retrieves the system link index of the given device. The link is the index
1013 * of the interface on which the packet will actually be sent. In most cases
1014 * this is the same as the ifindex but may be different for tunnel devices.
1015 * Returns a negative index on error. Either RTNL lock or rcu_read_lock must
1018 int vport_get_iflink(const struct vport *vport)
1020 if (vport->ops->get_iflink)
1021 return vport->ops->get_iflink(vport);
1023 /* If we don't have an iflink, use the ifindex. In most cases they
1025 return vport_get_ifindex(vport);
1029 * vport_get_mtu - retrieve device MTU (for kernel callers)
1031 * @vport: vport from which to retrieve MTU
1033 * Retrieves the MTU of the given device. Either RTNL lock or rcu_read_lock
1036 int vport_get_mtu(const struct vport *vport)
1038 return vport->ops->get_mtu(vport);
1042 * vport_receive - pass up received packet to the datapath for processing
1044 * @vport: vport that received the packet
1045 * @skb: skb that was received
1047 * Must be called with rcu_read_lock. The packet cannot be shared and
1048 * skb->data should point to the Ethernet header. The caller must have already
1049 * called compute_ip_summed() to initialize the checksumming fields.
1051 void vport_receive(struct vport *vport, struct sk_buff *skb)
1053 struct dp_port *dp_port = vport_get_dp_port(vport);
1056 vport_record_error(vport, VPORT_E_RX_DROPPED);
1062 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1063 struct vport_percpu_stats *stats;
1066 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1068 write_seqcount_begin(&stats->seqlock);
1069 stats->rx_packets++;
1070 stats->rx_bytes += skb->len;
1071 write_seqcount_end(&stats->seqlock);
1076 if (!(vport->ops->flags & VPORT_F_FLOW))
1077 OVS_CB(skb)->flow = NULL;
1079 if (!(vport->ops->flags & VPORT_F_TUN_ID))
1080 OVS_CB(skb)->tun_id = 0;
1082 dp_process_received_packet(dp_port, skb);
1085 static inline unsigned packet_length(const struct sk_buff *skb)
1087 unsigned length = skb->len - ETH_HLEN;
1089 if (skb->protocol == htons(ETH_P_8021Q))
1090 length -= VLAN_HLEN;
1096 * vport_send - send a packet on a device
1098 * @vport: vport on which to send the packet
1101 * Sends the given packet and returns the length of data sent. Either RTNL
1102 * lock or rcu_read_lock must be held.
1104 int vport_send(struct vport *vport, struct sk_buff *skb)
1109 mtu = vport_get_mtu(vport);
1110 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
1111 if (net_ratelimit())
1112 pr_warn("%s: dropped over-mtu packet: %d > %d\n",
1113 dp_name(vport_get_dp_port(vport)->dp),
1114 packet_length(skb), mtu);
1118 sent = vport->ops->send(vport, skb);
1120 if (vport->ops->flags & VPORT_F_GEN_STATS && sent > 0) {
1121 struct vport_percpu_stats *stats;
1124 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1126 write_seqcount_begin(&stats->seqlock);
1127 stats->tx_packets++;
1128 stats->tx_bytes += sent;
1129 write_seqcount_end(&stats->seqlock);
1138 vport_record_error(vport, VPORT_E_TX_DROPPED);
1143 * vport_record_error - indicate device error to generic stats layer
1145 * @vport: vport that encountered the error
1146 * @err_type: one of enum vport_err_type types to indicate the error type
1148 * If using the vport generic stats layer indicate that an error of the given
1151 void vport_record_error(struct vport *vport, enum vport_err_type err_type)
1153 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1155 spin_lock_bh(&vport->stats_lock);
1158 case VPORT_E_RX_DROPPED:
1159 vport->err_stats.rx_dropped++;
1162 case VPORT_E_RX_ERROR:
1163 vport->err_stats.rx_errors++;
1166 case VPORT_E_TX_DROPPED:
1167 vport->err_stats.tx_dropped++;
1170 case VPORT_E_TX_ERROR:
1171 vport->err_stats.tx_errors++;
1175 spin_unlock_bh(&vport->stats_lock);