X-Git-Url: http://git.onelab.eu/?a=blobdiff_plain;f=include%2Flinux%2Fnetdevice.h;fp=include%2Flinux%2Fnetdevice.h;h=c5b97cf5970786395432e0d2191b4121e127396a;hb=4e76c8a9fa413ccc09d3f7f664183dcce3555d57;hp=f4169bbb60eb8036d021ba64e9270f6b5ffcaabb;hpb=1db395853d4f30d6120458bd279ede1f882a8525;p=linux-2.6.git diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f4169bbb6..c5b97cf59 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -232,6 +232,7 @@ enum netdev_state_t __LINK_STATE_RX_SCHED, __LINK_STATE_LINKWATCH_PENDING, __LINK_STATE_DORMANT, + __LINK_STATE_QDISC_RUNNING, }; @@ -307,9 +308,17 @@ struct net_device #define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */ #define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */ #define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */ -#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */ +#define NETIF_F_GSO 2048 /* Enable software GSO. */ #define NETIF_F_LLTX 4096 /* LockLess TX */ -#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/ + + /* Segmentation offload features */ +#define NETIF_F_GSO_SHIFT 16 +#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) +#define NETIF_F_UFO (SKB_GSO_UDPV4 << NETIF_F_GSO_SHIFT) +#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) + +#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM) +#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM) struct net_device *next_sched; @@ -398,6 +407,9 @@ struct net_device struct list_head qdisc_list; unsigned long tx_queue_len; /* Max frames per queue allowed */ + /* Partially transmitted GSO packet. */ + struct sk_buff *gso_skb; + /* ingress path synchronizer */ spinlock_t ingress_lock; struct Qdisc *qdisc_ingress; @@ -406,7 +418,7 @@ struct net_device * One part is mostly used on xmit path (device) */ /* hard_start_xmit synchronizer */ - spinlock_t xmit_lock ____cacheline_aligned_in_smp; + spinlock_t _xmit_lock ____cacheline_aligned_in_smp; /* cpu id of processor entered to hard_start_xmit or -1, if nobody entered there. */ @@ -532,6 +544,9 @@ struct packet_type { struct net_device *, struct packet_type *, struct net_device *); + struct sk_buff *(*gso_segment)(struct sk_buff *skb, + int features); + int (*gso_send_check)(struct sk_buff *skb); void *af_packet_priv; struct list_head list; }; @@ -679,7 +694,8 @@ extern int dev_change_name(struct net_device *, char *); extern int dev_set_mtu(struct net_device *, int); extern int dev_set_mac_address(struct net_device *, struct sockaddr *); -extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); +extern int dev_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev); extern void dev_init(void); @@ -889,11 +905,43 @@ static inline void __netif_rx_complete(struct net_device *dev) clear_bit(__LINK_STATE_RX_SCHED, &dev->state); } +static inline void netif_tx_lock(struct net_device *dev) +{ + spin_lock(&dev->_xmit_lock); + dev->xmit_lock_owner = smp_processor_id(); +} + +static inline void netif_tx_lock_bh(struct net_device *dev) +{ + spin_lock_bh(&dev->_xmit_lock); + dev->xmit_lock_owner = smp_processor_id(); +} + +static inline int netif_tx_trylock(struct net_device *dev) +{ + int ok = spin_trylock(&dev->_xmit_lock); + if (likely(ok)) + dev->xmit_lock_owner = smp_processor_id(); + return ok; +} + +static inline void netif_tx_unlock(struct net_device *dev) +{ + dev->xmit_lock_owner = -1; + spin_unlock(&dev->_xmit_lock); +} + +static inline void netif_tx_unlock_bh(struct net_device *dev) +{ + dev->xmit_lock_owner = -1; + spin_unlock_bh(&dev->_xmit_lock); +} + static inline void netif_tx_disable(struct net_device *dev) { - spin_lock_bh(&dev->xmit_lock); + netif_tx_lock_bh(dev); netif_stop_queue(dev); - spin_unlock_bh(&dev->xmit_lock); + netif_tx_unlock_bh(dev); } /* These functions live elsewhere (drivers/net/net_init.c, but related) */ @@ -921,6 +969,7 @@ extern int netdev_max_backlog; extern int weight_p; extern int netdev_set_master(struct net_device *dev, struct net_device *master); extern int skb_checksum_help(struct sk_buff *skb, int inward); +extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features); #ifdef CONFIG_BUG extern void netdev_rx_csum_fault(struct net_device *dev); #else @@ -940,6 +989,19 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); extern void linkwatch_run_queue(void); +static inline int skb_gso_ok(struct sk_buff *skb, int features) +{ + int feature = skb_shinfo(skb)->gso_type << NETIF_F_GSO_SHIFT; + return (features & feature) == feature; +} + +static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) +{ + return skb_is_gso(skb) && + (!skb_gso_ok(skb, dev->features) || + unlikely(skb->ip_summed != CHECKSUM_HW)); +} + #endif /* __KERNEL__ */ #endif /* _LINUX_DEV_H */