#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
+#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
/* List of statically compiled vport implementations. Don't forget to also
* add yours to the list at the bottom of vport.h. */
-static const struct vport_ops *base_vport_ops_list[] = {
+static const struct vport_ops *vport_ops_list[] = {
&ovs_netdev_vport_ops,
&ovs_internal_vport_ops,
+#if IS_ENABLED(CONFIG_NET_IPGRE_DEMUX)
&ovs_gre_vport_ops,
&ovs_gre64_vport_ops,
+#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
&ovs_vxlan_vport_ops,
&ovs_lisp_vport_ops,
#endif
};
-static const struct vport_ops **vport_ops_list;
-static int n_vport_types;
-
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
static struct hlist_head *dev_table;
#define VPORT_HASH_BUCKETS 1024
/**
* ovs_vport_init - initialize vport subsystem
*
- * Called at module load time to initialize the vport subsystem and any
- * compiled in vport types.
+ * Called at module load time to initialize the vport subsystem.
*/
int ovs_vport_init(void)
{
- int err;
- int i;
-
dev_table = kzalloc(VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
GFP_KERNEL);
- if (!dev_table) {
- err = -ENOMEM;
- goto error;
- }
-
- vport_ops_list = kmalloc(ARRAY_SIZE(base_vport_ops_list) *
- sizeof(struct vport_ops *), GFP_KERNEL);
- if (!vport_ops_list) {
- err = -ENOMEM;
- goto error_dev_table;
- }
-
- for (i = 0; i < ARRAY_SIZE(base_vport_ops_list); i++) {
- const struct vport_ops *new_ops = base_vport_ops_list[i];
-
- if (new_ops->init)
- err = new_ops->init();
- else
- err = 0;
-
- if (!err)
- vport_ops_list[n_vport_types++] = new_ops;
- else if (new_ops->flags & VPORT_F_REQUIRED) {
- ovs_vport_exit();
- goto error;
- }
- }
+ if (!dev_table)
+ return -ENOMEM;
return 0;
-
-error_dev_table:
- kfree(dev_table);
-error:
- return err;
}
/**
* ovs_vport_exit - shutdown vport subsystem
*
- * Called at module exit time to shutdown the vport subsystem and any
- * initialized vport types.
+ * Called at module exit time to shutdown the vport subsystem.
*/
void ovs_vport_exit(void)
{
- int i;
-
- for (i = 0; i < n_vport_types; i++) {
- if (vport_ops_list[i]->exit)
- vport_ops_list[i]->exit();
- }
-
- kfree(vport_ops_list);
kfree(dev_table);
}
int err = 0;
int i;
- for (i = 0; i < n_vport_types; i++) {
+ for (i = 0; i < ARRAY_SIZE(vport_ops_list); i++) {
if (vport_ops_list[i]->type == parms->type) {
struct hlist_head *bucket;
* skb->data should point to the Ethernet header. The caller must have already
* called compute_ip_summed() to initialize the checksumming fields.
*/
-void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)
+void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
+ struct ovs_key_ipv4_tunnel *tun_key)
{
struct pcpu_tstats *stats;
stats->rx_bytes += skb->len;
u64_stats_update_end(&stats->syncp);
- if (!(vport->ops->flags & VPORT_F_TUN_ID))
- OVS_CB(skb)->tun_key = NULL;
-
+ OVS_CB(skb)->tun_key = tun_key;
ovs_dp_process_received_packet(vport, skb);
}
{
int sent = vport->ops->send(vport, skb);
- if (likely(sent)) {
+ if (likely(sent > 0)) {
struct pcpu_tstats *stats;
stats = this_cpu_ptr(vport->percpu_stats);
stats->tx_packets++;
stats->tx_bytes += sent;
u64_stats_update_end(&stats->syncp);
- }
+ } else if (sent < 0) {
+ ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
+ kfree_skb(skb);
+ } else
+ ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
+
return sent;
}
* @err_type: one of enum vport_err_type types to indicate the error type
*
* If using the vport generic stats layer indicate that an error of the given
- * type has occured.
+ * type has occurred.
*/
void ovs_vport_record_error(struct vport *vport, enum vport_err_type err_type)
{
spin_unlock(&vport->stats_lock);
}
+
+static void free_vport_rcu(struct rcu_head *rcu)
+{
+ struct vport *vport = container_of(rcu, struct vport, rcu);
+
+ ovs_vport_free(vport);
+}
+
+void ovs_vport_deferred_free(struct vport *vport)
+{
+ if (!vport)
+ return;
+
+ call_rcu(&vport->rcu, free_vport_rcu);
+}