1 #ifndef __LINUX_NETDEVICE_WRAPPER_H
2 #define __LINUX_NETDEVICE_WRAPPER_H 1
4 #include_next <linux/netdevice.h>
5 #include <linux/if_bridge.h>
9 #include <linux/version.h>
12 #define to_net_dev(class) container_of(class, struct net_device, NETDEV_DEV_MEMBER)
15 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33)
16 extern void unregister_netdevice_queue(struct net_device *dev,
17 struct list_head *head);
18 extern void unregister_netdevice_many(struct list_head *head);
21 #ifndef HAVE_DEV_DISABLE_LRO
22 extern void dev_disable_lro(struct net_device *dev);
25 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) || \
26 defined HAVE_RHEL_OVS_HOOK
28 #ifdef HAVE_RHEL_OVS_HOOK
29 typedef struct sk_buff *(openvswitch_handle_frame_hook_t)(struct sk_buff *skb);
30 extern openvswitch_handle_frame_hook_t *openvswitch_handle_frame_hook;
32 int netdev_rx_handler_register(struct net_device *dev,
33 openvswitch_handle_frame_hook_t *hook,
34 void *rx_handler_data);
37 int netdev_rx_handler_register(struct net_device *dev,
38 struct sk_buff *(*netdev_hook)(struct net_bridge_port *p,
40 void *rx_handler_data);
43 void netdev_rx_handler_unregister(struct net_device *dev);
46 #ifndef HAVE_DEV_GET_BY_INDEX_RCU
47 static inline struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
49 struct net_device *dev;
51 read_lock(&dev_base_lock);
52 dev = __dev_get_by_index(net, ifindex);
53 read_unlock(&dev_base_lock);
63 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)
64 #define skb_gso_segment rpl_skb_gso_segment
65 struct sk_buff *rpl_skb_gso_segment(struct sk_buff *skb, u32 features);
67 #define netif_skb_features rpl_netif_skb_features
68 u32 rpl_netif_skb_features(struct sk_buff *skb);
70 #define netif_needs_gso rpl_netif_needs_gso
71 static inline int rpl_netif_needs_gso(struct sk_buff *skb, int features)
73 return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
74 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
78 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)
79 typedef u32 netdev_features_t;
82 #ifndef HAVE___SKB_GSO_SEGMENT
83 static inline struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
84 netdev_features_t features,
87 return skb_gso_segment(skb, features);
91 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0)
93 /* XEN dom0 networking assumes dev->master is bond device
94 * and it tries to access bond private structure from dev->master
95 * ptr on receive path. This causes panic. Therefore it is better
96 * not to backport this API.
98 static inline int netdev_master_upper_dev_link(struct net_device *dev,
99 struct net_device *upper_dev)
104 static inline void netdev_upper_dev_unlink(struct net_device *dev,
105 struct net_device *upper_dev)
109 static inline struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
115 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
116 #define dev_queue_xmit rpl_dev_queue_xmit
117 int dev_queue_xmit(struct sk_buff *skb);
120 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
121 static inline struct net_device *netdev_notifier_info_to_dev(void *info)