2 * Copyright (c) 2010 Nicira Networks.
3 * Distributed under the terms of the GNU GPL version 2.
5 * Significant portions of this file may be copied from parts of the Linux
6 * kernel, by Linus Torvalds and others.
9 #include <linux/dcache.h>
10 #include <linux/etherdevice.h>
12 #include <linux/if_vlan.h>
13 #include <linux/kernel.h>
14 #include <linux/list.h>
15 #include <linux/mutex.h>
16 #include <linux/percpu.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/compat.h>
21 #include "vport-internal_dev.h"
23 /* List of statically compiled vport implementations. Don't forget to also
24 * add yours to the list at the bottom of vport.h. */
25 static struct vport_ops *base_vport_ops_list[] = {
32 static const struct vport_ops **vport_ops_list;
33 static int n_vport_types;
35 static struct hlist_head *dev_table;
36 #define VPORT_HASH_BUCKETS 1024
38 /* We limit the number of times that we pass through vport_send() to
39 * avoid blowing out the stack in the event that we have a loop. There is
40 * a separate counter for each CPU for both interrupt and non-interrupt
41 * context in order to keep the limit deterministic for a given packet. */
42 struct percpu_loop_counter {
46 static struct percpu_loop_counter *vport_loop_counter;
47 #define VPORT_MAX_LOOPS 5
49 /* Both RTNL lock and vport_mutex need to be held when updating dev_table.
51 * If you use vport_locate and then perform some operations, you need to hold
52 * one of these locks if you don't want the vport to be deleted out from under
55 * If you get a reference to a vport through a dp_port, it is protected
56 * by RCU and you need to hold rcu_read_lock instead when reading.
58 * If multiple locks are taken, the hierarchy is:
63 static DEFINE_MUTEX(vport_mutex);
66 * vport_lock - acquire vport lock
68 * Acquire global vport lock. See above comment about locking requirements
69 * and specific function definitions. May sleep.
74 mutex_lock(&vport_mutex);
78 * vport_unlock - release vport lock
80 * Release lock acquired with vport_lock.
85 mutex_unlock(&vport_mutex);
88 #define ASSERT_VPORT() do { \
89 if (unlikely(!mutex_is_locked(&vport_mutex))) { \
90 printk(KERN_ERR "openvswitch: vport lock not held at %s (%d)\n", \
91 __FILE__, __LINE__); \
97 * vport_init - initialize vport subsystem
99 * Called at module load time to initialize the vport subsystem and any
100 * compiled in vport types.
108 dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
115 vport_ops_list = kmalloc(ARRAY_SIZE(base_vport_ops_list) *
116 sizeof(struct vport_ops *), GFP_KERNEL);
117 if (!vport_ops_list) {
119 goto error_dev_table;
122 vport_loop_counter = alloc_percpu(struct percpu_loop_counter);
123 if (!vport_loop_counter) {
128 for (i = 0; i < ARRAY_SIZE(base_vport_ops_list); i++) {
129 struct vport_ops *new_ops = base_vport_ops_list[i];
132 err = new_ops->init();
137 vport_ops_list[n_vport_types++] = new_ops;
138 else if (new_ops->flags & VPORT_F_REQUIRED) {
147 kfree(vport_ops_list);
162 for (i = 0; i < VPORT_HASH_BUCKETS; i++) {
163 struct hlist_head *bucket = &dev_table[i];
165 struct hlist_node *node, *next;
167 hlist_for_each_entry_safe(vport, node, next, bucket, hash_node)
176 * vport_exit - shutdown vport subsystem
178 * Called at module exit time to shutdown the vport subsystem and any
179 * initialized vport types.
188 for (i = 0; i < n_vport_types; i++) {
189 if (vport_ops_list[i]->exit)
190 vport_ops_list[i]->exit();
193 free_percpu(vport_loop_counter);
194 kfree(vport_ops_list);
199 do_vport_add(struct odp_vport_add *vport_config)
204 vport_config->port_type[VPORT_TYPE_SIZE - 1] = '\0';
205 vport_config->devname[IFNAMSIZ - 1] = '\0';
209 vport = vport_locate(vport_config->devname);
216 vport = vport_add(vport_config->devname, vport_config->port_type,
217 vport_config->config);
221 err = PTR_ERR(vport);
229 * vport_user_add - add vport device (for userspace callers)
231 * @uvport_config: New port configuration.
233 * Creates a new vport with the specified configuration (which is dependent
234 * on device type). This function is for userspace callers and assumes no
238 vport_user_add(const struct odp_vport_add __user *uvport_config)
240 struct odp_vport_add vport_config;
242 if (copy_from_user(&vport_config, uvport_config, sizeof(struct odp_vport_add)))
245 return do_vport_add(&vport_config);
250 compat_vport_user_add(struct compat_odp_vport_add *ucompat)
252 struct compat_odp_vport_add compat;
253 struct odp_vport_add vport_config;
255 if (copy_from_user(&compat, ucompat, sizeof(struct compat_odp_vport_add)))
258 memcpy(vport_config.port_type, compat.port_type, VPORT_TYPE_SIZE);
259 memcpy(vport_config.devname, compat.devname, IFNAMSIZ);
260 vport_config.config = compat_ptr(compat.config);
262 return do_vport_add(&vport_config);
267 do_vport_mod(struct odp_vport_mod *vport_config)
272 vport_config->devname[IFNAMSIZ - 1] = '\0';
276 vport = vport_locate(vport_config->devname);
283 err = vport_mod(vport, vport_config->config);
292 * vport_user_mod - modify existing vport device (for userspace callers)
294 * @uvport_config: New configuration for vport
296 * Modifies an existing device with the specified configuration (which is
297 * dependent on device type). This function is for userspace callers and
298 * assumes no locks are held.
301 vport_user_mod(const struct odp_vport_mod __user *uvport_config)
303 struct odp_vport_mod vport_config;
305 if (copy_from_user(&vport_config, uvport_config, sizeof(struct odp_vport_mod)))
308 return do_vport_mod(&vport_config);
313 compat_vport_user_mod(struct compat_odp_vport_mod *ucompat)
315 struct compat_odp_vport_mod compat;
316 struct odp_vport_mod vport_config;
318 if (copy_from_user(&compat, ucompat, sizeof(struct compat_odp_vport_mod)))
321 memcpy(vport_config.devname, compat.devname, IFNAMSIZ);
322 vport_config.config = compat_ptr(compat.config);
324 return do_vport_mod(&vport_config);
329 * vport_user_del - delete existing vport device (for userspace callers)
331 * @udevname: Name of device to delete
333 * Deletes the specified device. Detaches the device from a datapath first
334 * if it is attached. Deleting the device will fail if it does not exist or it
335 * is the datapath local port. It is also possible to fail for less obvious
336 * reasons, such as lack of memory. This function is for userspace callers and
337 * assumes no locks are held.
340 vport_user_del(const char __user *udevname)
342 char devname[IFNAMSIZ];
344 struct dp_port *dp_port;
348 retval = strncpy_from_user(devname, udevname, IFNAMSIZ);
351 else if (retval >= IFNAMSIZ)
352 return -ENAMETOOLONG;
356 vport = vport_locate(devname);
362 dp_port = vport_get_dp_port(vport);
364 struct datapath *dp = dp_port->dp;
366 mutex_lock(&dp->mutex);
368 if (!strcmp(dp_name(dp), devname)) {
373 err = dp_detach_port(dp_port, 0);
376 mutex_unlock(&dp->mutex);
383 err = vport_del(vport);
392 * vport_user_stats_get - retrieve device stats (for userspace callers)
394 * @ustats_req: Stats request parameters.
396 * Retrieves transmit, receive, and error stats for the given device. This
397 * function is for userspace callers and assumes no locks are held.
400 vport_user_stats_get(struct odp_vport_stats_req __user *ustats_req)
402 struct odp_vport_stats_req stats_req;
406 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
409 stats_req.devname[IFNAMSIZ - 1] = '\0';
413 vport = vport_locate(stats_req.devname);
419 err = vport_get_stats(vport, &stats_req.stats);
425 if (copy_to_user(ustats_req, &stats_req, sizeof(struct odp_vport_stats_req)))
432 * vport_user_stats_set - sets offset device stats (for userspace callers)
434 * @ustats_req: Stats set parameters.
436 * Provides a set of transmit, receive, and error stats to be added as an
437 * offset to the collect data when stats are retreived. Some devices may not
438 * support setting the stats, in which case the result will always be
439 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
443 vport_user_stats_set(struct odp_vport_stats_req __user *ustats_req)
445 struct odp_vport_stats_req stats_req;
449 if (copy_from_user(&stats_req, ustats_req, sizeof(struct odp_vport_stats_req)))
452 stats_req.devname[IFNAMSIZ - 1] = '\0';
457 vport = vport_locate(stats_req.devname);
463 err = vport_set_stats(vport, &stats_req.stats);
473 * vport_user_ether_get - retrieve device Ethernet address (for userspace callers)
475 * @uvport_ether: Ethernet address request parameters.
477 * Retrieves the Ethernet address of the given device. This function is for
478 * userspace callers and assumes no locks are held.
481 vport_user_ether_get(struct odp_vport_ether __user *uvport_ether)
483 struct odp_vport_ether vport_ether;
487 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
490 vport_ether.devname[IFNAMSIZ - 1] = '\0';
494 vport = vport_locate(vport_ether.devname);
501 memcpy(vport_ether.ether_addr, vport_get_addr(vport), ETH_ALEN);
508 if (copy_to_user(uvport_ether, &vport_ether, sizeof(struct odp_vport_ether)))
515 * vport_user_ether_set - set device Ethernet address (for userspace callers)
517 * @uvport_ether: Ethernet address request parameters.
519 * Sets the Ethernet address of the given device. Some devices may not support
520 * setting the Ethernet address, in which case the result will always be
521 * -EOPNOTSUPP. This function is for userspace callers and assumes no locks
525 vport_user_ether_set(struct odp_vport_ether __user *uvport_ether)
527 struct odp_vport_ether vport_ether;
531 if (copy_from_user(&vport_ether, uvport_ether, sizeof(struct odp_vport_ether)))
534 vport_ether.devname[IFNAMSIZ - 1] = '\0';
539 vport = vport_locate(vport_ether.devname);
545 err = vport_set_addr(vport, vport_ether.ether_addr);
554 * vport_user_mtu_get - retrieve device MTU (for userspace callers)
556 * @uvport_mtu: MTU request parameters.
558 * Retrieves the MTU of the given device. This function is for userspace
559 * callers and assumes no locks are held.
562 vport_user_mtu_get(struct odp_vport_mtu __user *uvport_mtu)
564 struct odp_vport_mtu vport_mtu;
568 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
571 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
575 vport = vport_locate(vport_mtu.devname);
581 vport_mtu.mtu = vport_get_mtu(vport);
587 if (copy_to_user(uvport_mtu, &vport_mtu, sizeof(struct odp_vport_mtu)))
594 * vport_user_mtu_set - set device MTU (for userspace callers)
596 * @uvport_mtu: MTU request parameters.
598 * Sets the MTU of the given device. Some devices may not support setting the
599 * MTU, in which case the result will always be -EOPNOTSUPP. This function is
600 * for userspace callers and assumes no locks are held.
603 vport_user_mtu_set(struct odp_vport_mtu __user *uvport_mtu)
605 struct odp_vport_mtu vport_mtu;
609 if (copy_from_user(&vport_mtu, uvport_mtu, sizeof(struct odp_vport_mtu)))
612 vport_mtu.devname[IFNAMSIZ - 1] = '\0';
617 vport = vport_locate(vport_mtu.devname);
623 err = vport_set_mtu(vport, vport_mtu.mtu);
631 static struct hlist_head *
632 hash_bucket(const char *name)
634 unsigned int hash = full_name_hash(name, strlen(name));
635 return &dev_table[hash & (VPORT_HASH_BUCKETS - 1)];
639 * vport_locate - find a port that has already been created
641 * @name: name of port to find
643 * Either RTNL or vport lock must be acquired before calling this function
644 * and held while using the found port. See the locking comments at the
648 vport_locate(const char *name)
650 struct hlist_head *bucket = hash_bucket(name);
652 struct hlist_node *node;
654 if (unlikely(!mutex_is_locked(&vport_mutex) && !rtnl_is_locked())) {
655 printk(KERN_ERR "openvswitch: neither RTNL nor vport lock held in vport_locate\n");
661 hlist_for_each_entry(vport, node, bucket, hash_node)
662 if (!strcmp(name, vport_get_name(vport)))
673 register_vport(struct vport *vport)
675 hlist_add_head(&vport->hash_node, hash_bucket(vport_get_name(vport)));
679 unregister_vport(struct vport *vport)
681 hlist_del(&vport->hash_node);
685 * vport_alloc - allocate and initialize new vport
687 * @priv_size: Size of private data area to allocate.
688 * @ops: vport device ops
690 * Allocate and initialize a new vport defined by @ops. The vport will contain
691 * a private data area of size @priv_size that can be accessed using
692 * vport_priv(). vports that are no longer needed should be released with
696 vport_alloc(int priv_size, const struct vport_ops *ops)
701 alloc_size = sizeof(struct vport);
703 alloc_size = ALIGN(alloc_size, VPORT_ALIGN);
704 alloc_size += priv_size;
707 vport = kzalloc(alloc_size, GFP_KERNEL);
709 return ERR_PTR(-ENOMEM);
713 if (vport->ops->flags & VPORT_F_GEN_STATS) {
714 vport->percpu_stats = alloc_percpu(struct vport_percpu_stats);
715 if (!vport->percpu_stats)
716 return ERR_PTR(-ENOMEM);
718 spin_lock_init(&vport->stats_lock);
725 * vport_free - uninitialize and free vport
727 * @vport: vport to free
729 * Frees a vport allocated with vport_alloc() when it is no longer needed.
732 vport_free(struct vport *vport)
734 if (vport->ops->flags & VPORT_F_GEN_STATS)
735 free_percpu(vport->percpu_stats);
741 * vport_add - add vport device (for kernel callers)
743 * @name: Name of new device.
744 * @type: Type of new device (to be matched against types in registered vport
746 * @config: Device type specific configuration. Userspace pointer.
748 * Creates a new vport with the specified configuration (which is dependent
749 * on device type). Both RTNL and vport locks must be held.
752 vport_add(const char *name, const char *type, const void __user *config)
761 for (i = 0; i < n_vport_types; i++) {
762 if (!strcmp(vport_ops_list[i]->type, type)) {
763 vport = vport_ops_list[i]->create(name, config);
765 err = PTR_ERR(vport);
769 register_vport(vport);
781 * vport_mod - modify existing vport device (for kernel callers)
783 * @vport: vport to modify.
784 * @config: Device type specific configuration. Userspace pointer.
786 * Modifies an existing device with the specified configuration (which is
787 * dependent on device type). Both RTNL and vport locks must be held.
790 vport_mod(struct vport *vport, const void __user *config)
795 if (vport->ops->modify)
796 return vport->ops->modify(vport, config);
802 * vport_del - delete existing vport device (for kernel callers)
804 * @vport: vport to delete.
806 * Deletes the specified device. The device must not be currently attached to
807 * a datapath. It is possible to fail for reasons such as lack of memory.
808 * Both RTNL and vport locks must be held.
811 vport_del(struct vport *vport)
815 BUG_ON(vport_get_dp_port(vport));
817 unregister_vport(vport);
819 return vport->ops->destroy(vport);
823 * vport_attach - attach a vport to a datapath
825 * @vport: vport to attach.
826 * @dp_port: Datapath port to attach the vport to.
828 * Attaches a vport to a specific datapath so that packets may be exchanged.
829 * Both ports must be currently unattached. @dp_port must be successfully
830 * attached to a vport before it is connected to a datapath and must not be
831 * modified while connected. RTNL lock and the appropriate DP mutex must be held.
834 vport_attach(struct vport *vport, struct dp_port *dp_port)
841 if (vport_get_dp_port(vport))
844 if (vport->ops->attach) {
847 err = vport->ops->attach(vport);
852 dp_port->vport = vport;
853 rcu_assign_pointer(vport->dp_port, dp_port);
859 * vport_detach - detach a vport from a datapath
861 * @vport: vport to detach.
863 * Detaches a vport from a datapath. May fail for a variety of reasons,
864 * including lack of memory. RTNL lock and the appropriate DP mutex must be held.
867 vport_detach(struct vport *vport)
869 struct dp_port *dp_port;
873 dp_port = vport_get_dp_port(vport);
877 dp_port->vport = NULL;
878 rcu_assign_pointer(vport->dp_port, NULL);
880 if (vport->ops->detach)
881 return vport->ops->detach(vport);
887 * vport_set_mtu - set device MTU (for kernel callers)
889 * @vport: vport on which to set MTU.
892 * Sets the MTU of the given device. Some devices may not support setting the
893 * MTU, in which case the result will always be -EOPNOTSUPP. RTNL lock must
897 vport_set_mtu(struct vport *vport, int mtu)
904 if (vport->ops->set_mtu) {
907 ret = vport->ops->set_mtu(vport, mtu);
909 if (!ret && !is_internal_vport(vport)) {
910 struct dp_port *dp_port = vport_get_dp_port(vport);
913 set_internal_devs_mtu(dp_port->dp);
922 * vport_set_addr - set device Ethernet address (for kernel callers)
924 * @vport: vport on which to set Ethernet address.
925 * @addr: New address.
927 * Sets the Ethernet address of the given device. Some devices may not support
928 * setting the Ethernet address, in which case the result will always be
929 * -EOPNOTSUPP. RTNL lock must be held.
932 vport_set_addr(struct vport *vport, const unsigned char *addr)
936 if (!is_valid_ether_addr(addr))
937 return -EADDRNOTAVAIL;
939 if (vport->ops->set_addr)
940 return vport->ops->set_addr(vport, addr);
946 * vport_set_stats - sets offset device stats (for kernel callers)
948 * @vport: vport on which to set stats
949 * @stats: stats to set
951 * Provides a set of transmit, receive, and error stats to be added as an
952 * offset to the collect data when stats are retreived. Some devices may not
953 * support setting the stats, in which case the result will always be
954 * -EOPNOTSUPP. RTNL lock must be held.
957 vport_set_stats(struct vport *vport, struct odp_vport_stats *stats)
961 if (vport->ops->flags & VPORT_F_GEN_STATS) {
962 spin_lock_bh(&vport->stats_lock);
963 memcpy(&vport->offset_stats, stats, sizeof(struct odp_vport_stats));
964 spin_unlock_bh(&vport->stats_lock);
967 } else if (vport->ops->set_stats)
968 return vport->ops->set_stats(vport, stats);
974 * vport_get_name - retrieve device name
976 * @vport: vport from which to retrieve the name.
978 * Retrieves the name of the given device. Either RTNL lock or rcu_read_lock
979 * must be held for the entire duration that the name is in use.
982 vport_get_name(const struct vport *vport)
984 return vport->ops->get_name(vport);
988 * vport_get_type - retrieve device type
990 * @vport: vport from which to retrieve the type.
992 * Retrieves the type of the given device. Either RTNL lock or rcu_read_lock
993 * must be held for the entire duration that the type is in use.
996 vport_get_type(const struct vport *vport)
998 return vport->ops->type;
1002 * vport_get_addr - retrieve device Ethernet address (for kernel callers)
1004 * @vport: vport from which to retrieve the Ethernet address.
1006 * Retrieves the Ethernet address of the given device. Either RTNL lock or
1007 * rcu_read_lock must be held for the entire duration that the Ethernet address
1010 const unsigned char *
1011 vport_get_addr(const struct vport *vport)
1013 return vport->ops->get_addr(vport);
1017 * vport_get_dp_port - retrieve attached datapath port
1019 * @vport: vport from which to retrieve the datapath port.
1021 * Retrieves the attached datapath port or null if not attached. Either RTNL
1022 * lock or rcu_read_lock must be held for the entire duration that the datapath
1023 * port is being accessed.
1026 vport_get_dp_port(const struct vport *vport)
1028 return rcu_dereference(vport->dp_port);
1032 * vport_get_kobj - retrieve associated kobj
1034 * @vport: vport from which to retrieve the associated kobj
1036 * Retrieves the associated kobj or null if no kobj. The returned kobj is
1037 * valid for as long as the vport exists.
1040 vport_get_kobj(const struct vport *vport)
1042 if (vport->ops->get_kobj)
1043 return vport->ops->get_kobj(vport);
1049 * vport_get_stats - retrieve device stats (for kernel callers)
1051 * @vport: vport from which to retrieve the stats
1052 * @stats: location to store stats
1054 * Retrieves transmit, receive, and error stats for the given device.
1057 vport_get_stats(struct vport *vport, struct odp_vport_stats *stats)
1059 struct odp_vport_stats dev_stats;
1060 struct odp_vport_stats *dev_statsp = NULL;
1063 if (vport->ops->get_stats) {
1064 if (vport->ops->flags & VPORT_F_GEN_STATS)
1065 dev_statsp = &dev_stats;
1070 err = vport->ops->get_stats(vport, dev_statsp);
1077 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1080 /* We potentially have 3 sources of stats that need to be
1081 * combined: those we have collected (split into err_stats and
1082 * percpu_stats), offset_stats from set_stats(), and device
1083 * error stats from get_stats() (for errors that happen
1084 * downstream and therefore aren't reported through our
1085 * vport_record_error() function). */
1087 spin_lock_bh(&vport->stats_lock);
1089 memcpy(stats, &vport->offset_stats, sizeof(struct odp_vport_stats));
1091 stats->rx_errors += vport->err_stats.rx_errors
1092 + vport->err_stats.rx_frame_err
1093 + vport->err_stats.rx_over_err
1094 + vport->err_stats.rx_crc_err;
1095 stats->tx_errors += vport->err_stats.tx_errors;
1096 stats->tx_dropped += vport->err_stats.tx_dropped;
1097 stats->rx_dropped += vport->err_stats.rx_dropped;
1098 stats->rx_over_err += vport->err_stats.rx_over_err;
1099 stats->rx_crc_err += vport->err_stats.rx_crc_err;
1100 stats->rx_frame_err += vport->err_stats.rx_frame_err;
1101 stats->collisions += vport->err_stats.collisions;
1103 spin_unlock_bh(&vport->stats_lock);
1106 stats->rx_errors += dev_statsp->rx_errors;
1107 stats->tx_errors += dev_statsp->tx_errors;
1108 stats->rx_dropped += dev_statsp->rx_dropped;
1109 stats->tx_dropped += dev_statsp->tx_dropped;
1110 stats->rx_over_err += dev_statsp->rx_over_err;
1111 stats->rx_crc_err += dev_statsp->rx_crc_err;
1112 stats->rx_frame_err += dev_statsp->rx_frame_err;
1113 stats->collisions += dev_statsp->collisions;
1116 for_each_possible_cpu(i) {
1117 const struct vport_percpu_stats *percpu_stats;
1119 percpu_stats = per_cpu_ptr(vport->percpu_stats, i);
1120 stats->rx_bytes += percpu_stats->rx_bytes;
1121 stats->rx_packets += percpu_stats->rx_packets;
1122 stats->tx_bytes += percpu_stats->tx_bytes;
1123 stats->tx_packets += percpu_stats->tx_packets;
1135 * vport_get_flags - retrieve device flags
1137 * @vport: vport from which to retrieve the flags
1139 * Retrieves the flags of the given device. Either RTNL lock or rcu_read_lock
1143 vport_get_flags(const struct vport *vport)
1145 return vport->ops->get_dev_flags(vport);
1149 * vport_get_flags - check whether device is running
1151 * @vport: vport on which to check status.
1153 * Checks whether the given device is running. Either RTNL lock or
1154 * rcu_read_lock must be held.
1157 vport_is_running(const struct vport *vport)
1159 return vport->ops->is_running(vport);
1163 * vport_get_flags - retrieve device operating state
1165 * @vport: vport from which to check status
1167 * Retrieves the RFC2863 operstate of the given device. Either RTNL lock or
1168 * rcu_read_lock must be held.
1171 vport_get_operstate(const struct vport *vport)
1173 return vport->ops->get_operstate(vport);
1177 * vport_get_ifindex - retrieve device system interface index
1179 * @vport: vport from which to retrieve index
1181 * Retrieves the system interface index of the given device. Not all devices
1182 * will have system indexes, in which case the index of the datapath local
1183 * port is returned. Returns a negative index on error. Either RTNL lock or
1184 * rcu_read_lock must be held.
1187 vport_get_ifindex(const struct vport *vport)
1189 const struct dp_port *dp_port;
1191 if (vport->ops->get_ifindex)
1192 return vport->ops->get_ifindex(vport);
1194 /* If we don't actually have an ifindex, use the local port's.
1195 * Userspace doesn't check it anyways. */
1196 dp_port = vport_get_dp_port(vport);
1200 return vport_get_ifindex(dp_port->dp->ports[ODPP_LOCAL]->vport);
1204 * vport_get_iflink - retrieve device system link index
1206 * @vport: vport from which to retrieve index
1208 * Retrieves the system link index of the given device. The link is the index
1209 * of the interface on which the packet will actually be sent. In most cases
1210 * this is the same as the ifindex but may be different for tunnel devices.
1211 * Returns a negative index on error. Either RTNL lock or rcu_read_lock must
1215 vport_get_iflink(const struct vport *vport)
1217 if (vport->ops->get_iflink)
1218 return vport->ops->get_iflink(vport);
1220 /* If we don't have an iflink, use the ifindex. In most cases they
1222 return vport_get_ifindex(vport);
1226 * vport_get_mtu - retrieve device MTU (for kernel callers)
1228 * @vport: vport from which to retrieve MTU
1230 * Retrieves the MTU of the given device. Either RTNL lock or rcu_read_lock
1234 vport_get_mtu(const struct vport *vport)
1236 return vport->ops->get_mtu(vport);
1240 * vport_receive - pass up received packet to the datapath for processing
1242 * @vport: vport that received the packet
1243 * @skb: skb that was received
1245 * Must be called with rcu_read_lock. The packet cannot be shared and
1246 * skb->data should point to the Ethernet header. The caller must have already
1247 * called compute_ip_summed() to initialize the checksumming fields.
1250 vport_receive(struct vport *vport, struct sk_buff *skb)
1252 struct dp_port *dp_port = vport_get_dp_port(vport);
1255 vport_record_error(vport, VPORT_E_RX_DROPPED);
1261 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1262 struct vport_percpu_stats *stats;
1266 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1267 stats->rx_packets++;
1268 stats->rx_bytes += skb->len;
1273 if (!(vport->ops->flags & VPORT_F_TUN_ID))
1274 OVS_CB(skb)->tun_id = 0;
1276 dp_process_received_packet(dp_port, skb);
1279 static inline unsigned
1280 packet_length(const struct sk_buff *skb)
1282 unsigned length = skb->len - ETH_HLEN;
1284 if (skb->protocol == htons(ETH_P_8021Q))
1285 length -= VLAN_HLEN;
1291 * vport_send - send a packet on a device
1293 * @vport: vport on which to send the packet
1296 * Sends the given packet and returns the length of data sent. Either RTNL
1297 * lock or rcu_read_lock must be held.
1300 vport_send(struct vport *vport, struct sk_buff *skb)
1306 loop_count = &per_cpu_ptr(vport_loop_counter, get_cpu())->count[!!in_interrupt()];
1309 if (unlikely(*loop_count > VPORT_MAX_LOOPS)) {
1310 if (net_ratelimit())
1311 printk(KERN_WARNING "%s: dropping packet that has looped more than %d times\n",
1312 dp_name(vport_get_dp_port(vport)->dp), VPORT_MAX_LOOPS);
1316 mtu = vport_get_mtu(vport);
1317 if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) {
1318 if (net_ratelimit())
1319 printk(KERN_WARNING "%s: dropped over-mtu packet: %d > %d\n",
1320 dp_name(vport_get_dp_port(vport)->dp), packet_length(skb), mtu);
1324 sent = vport->ops->send(vport, skb);
1326 if (vport->ops->flags & VPORT_F_GEN_STATS && sent > 0) {
1327 struct vport_percpu_stats *stats;
1331 stats = per_cpu_ptr(vport->percpu_stats, smp_processor_id());
1332 stats->tx_packets++;
1333 stats->tx_bytes += sent;
1343 vport_record_error(vport, VPORT_E_TX_DROPPED);
1352 * vport_record_error - indicate device error to generic stats layer
1354 * @vport: vport that encountered the error
1355 * @err_type: one of enum vport_err_type types to indicate the error type
1357 * If using the vport generic stats layer indicate that an error of the given
1361 vport_record_error(struct vport *vport, enum vport_err_type err_type)
1363 if (vport->ops->flags & VPORT_F_GEN_STATS) {
1365 spin_lock_bh(&vport->stats_lock);
1368 case VPORT_E_RX_DROPPED:
1369 vport->err_stats.rx_dropped++;
1372 case VPORT_E_RX_ERROR:
1373 vport->err_stats.rx_errors++;
1376 case VPORT_E_RX_FRAME:
1377 vport->err_stats.rx_frame_err++;
1380 case VPORT_E_RX_OVER:
1381 vport->err_stats.rx_over_err++;
1384 case VPORT_E_RX_CRC:
1385 vport->err_stats.rx_crc_err++;
1388 case VPORT_E_TX_DROPPED:
1389 vport->err_stats.tx_dropped++;
1392 case VPORT_E_TX_ERROR:
1393 vport->err_stats.tx_errors++;
1396 case VPORT_E_COLLISION:
1397 vport->err_stats.collisions++;
1401 spin_unlock_bh(&vport->stats_lock);