#include <asm/cache.h>
#include <asm/byteorder.h>
+#include <linux/config.h>
#include <linux/device.h>
#include <linux/percpu.h>
-#include <linux/dmaengine.h>
struct divert_blk;
struct vlan_group;
__LINK_STATE_SCHED,
__LINK_STATE_NOCARRIER,
__LINK_STATE_RX_SCHED,
- __LINK_STATE_LINKWATCH_PENDING,
- __LINK_STATE_DORMANT,
- __LINK_STATE_QDISC_RUNNING,
+ __LINK_STATE_LINKWATCH_PENDING
};
#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
-#define NETIF_F_GSO 2048 /* Enable software GSO. */
+#define NETIF_F_TSO 2048 /* Can offload TCP/IP segmentation */
#define NETIF_F_LLTX 4096 /* LockLess TX */
-
- /* Segmentation offload features */
-#define NETIF_F_GSO_SHIFT 16
-#define NETIF_F_GSO_MASK 0xffff0000
-#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
-#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
-#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
-#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
-
- /* List of features with software fallbacks. */
-#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
-
-#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
-#define NETIF_F_ALL_CSUM (NETIF_F_IP_CSUM | NETIF_F_GEN_CSUM)
+#define NETIF_F_UFO 8192 /* Can offload UDP Large Send*/
struct net_device *next_sched;
*/
- unsigned int flags; /* interface flags (a la BSD) */
+ unsigned short flags; /* interface flags (a la BSD) */
unsigned short gflags;
unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
unsigned short padded; /* How much padding added by alloc_netdev() */
- unsigned char operstate; /* RFC2863 operstate */
- unsigned char link_mode; /* mapping policy to operstate */
-
unsigned mtu; /* interface MTU value */
unsigned short type; /* interface hardware type */
unsigned short hard_header_len; /* hardware hdr length */
struct list_head qdisc_list;
unsigned long tx_queue_len; /* Max frames per queue allowed */
- /* Partially transmitted GSO packet. */
- struct sk_buff *gso_skb;
-
/* ingress path synchronizer */
spinlock_t ingress_lock;
struct Qdisc *qdisc_ingress;
* One part is mostly used on xmit path (device)
*/
/* hard_start_xmit synchronizer */
- spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
+ spinlock_t xmit_lock ____cacheline_aligned_in_smp;
/* cpu id of processor entered to hard_start_xmit or -1,
if nobody entered there.
*/
/* register/unregister state machine */
enum { NETREG_UNINITIALIZED=0,
- NETREG_REGISTERED, /* completed register_netdevice */
+ NETREG_REGISTERING, /* called register_netdevice */
+ NETREG_REGISTERED, /* completed register todo */
NETREG_UNREGISTERING, /* called unregister_netdevice */
NETREG_UNREGISTERED, /* completed unregister todo */
NETREG_RELEASED, /* called free_netdev */
/* class/net/name entry */
struct class_device class_dev;
- /* space for optional statistics and wireless sysfs groups */
- struct attribute_group *sysfs_groups[3];
};
#define NETDEV_ALIGN 32
struct net_device *,
struct packet_type *,
struct net_device *);
- struct sk_buff *(*gso_segment)(struct sk_buff *skb,
- int features);
- int (*gso_send_check)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
struct sk_buff *completion_queue;
struct net_device backlog_dev; /* Sorry. 8) */
-#ifdef CONFIG_NET_DMA
- struct dma_chan *net_dma;
-#endif
};
DECLARE_PER_CPU(struct softnet_data,softnet_data);
#define HAVE_NETIF_QUEUE
-extern void __netif_schedule(struct net_device *dev);
+static inline void __netif_schedule(struct net_device *dev)
+{
+ if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
+ unsigned long flags;
+ struct softnet_data *sd;
+
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ dev->next_sched = sd->output_queue;
+ sd->output_queue = dev;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+}
static inline void netif_schedule(struct net_device *dev)
{
/* Use this variant in places where it could be invoked
* either from interrupt or non-interrupt context.
*/
-extern void dev_kfree_skb_any(struct sk_buff *skb);
+static inline void dev_kfree_skb_any(struct sk_buff *skb)
+{
+ if (in_irq() || irqs_disabled())
+ dev_kfree_skb_irq(skb);
+ else
+ dev_kfree_skb(skb);
+}
#define HAVE_NETIF_RX 1
extern int netif_rx(struct sk_buff *skb);
extern int dev_set_mtu(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
-extern int dev_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev);
+extern void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern void dev_init(void);
+extern int netdev_nit;
extern int netdev_budget;
/* Called by rtnetlink.c:rtnl_unlock() */
atomic_dec(&dev->refcnt);
}
-static inline void dev_hold(struct net_device *dev)
-{
- atomic_inc(&dev->refcnt);
-}
+#define __dev_put(dev) atomic_dec(&(dev)->refcnt)
+#define dev_hold(dev) atomic_inc(&(dev)->refcnt)
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
* and _off may be called from IRQ context, but it is caller
* who is responsible for serialization of these calls.
- *
- * The name carrier is inappropriate, these functions should really be
- * called netif_lowerlayer_*() because they represent the state of any
- * kind of lower layer not just hardware media.
*/
extern void linkwatch_fire_event(struct net_device *dev);
extern void netif_carrier_off(struct net_device *dev);
-static inline void netif_dormant_on(struct net_device *dev)
-{
- if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
- linkwatch_fire_event(dev);
-}
-
-static inline void netif_dormant_off(struct net_device *dev)
+/* Hot-plugging. */
+static inline int netif_device_present(struct net_device *dev)
{
- if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
- linkwatch_fire_event(dev);
+ return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
-static inline int netif_dormant(const struct net_device *dev)
+static inline void netif_device_detach(struct net_device *dev)
{
- return test_bit(__LINK_STATE_DORMANT, &dev->state);
-}
-
-
-static inline int netif_oper_up(const struct net_device *dev) {
- return (dev->operstate == IF_OPER_UP ||
- dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
+ if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev)) {
+ netif_stop_queue(dev);
+ }
}
-/* Hot-plugging. */
-static inline int netif_device_present(struct net_device *dev)
+static inline void netif_device_attach(struct net_device *dev)
{
- return test_bit(__LINK_STATE_PRESENT, &dev->state);
+ if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev)) {
+ netif_wake_queue(dev);
+ __netdev_watchdog_up(dev);
+ }
}
-extern void netif_device_detach(struct net_device *dev);
-
-extern void netif_device_attach(struct net_device *dev);
-
/*
* Network interface message level settings
*/
* already been called and returned 1.
*/
-extern void __netif_rx_schedule(struct net_device *dev);
+static inline void __netif_rx_schedule(struct net_device *dev)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ dev_hold(dev);
+ list_add_tail(&dev->poll_list, &__get_cpu_var(softnet_data).poll_list);
+ if (dev->quota < 0)
+ dev->quota += dev->weight;
+ else
+ dev->quota = dev->weight;
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ local_irq_restore(flags);
+}
/* Try to reschedule poll. Called by irq handler. */
clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
}
-static inline void netif_tx_lock(struct net_device *dev)
-{
- spin_lock(&dev->_xmit_lock);
- dev->xmit_lock_owner = smp_processor_id();
-}
-
-static inline void netif_tx_lock_bh(struct net_device *dev)
-{
- spin_lock_bh(&dev->_xmit_lock);
- dev->xmit_lock_owner = smp_processor_id();
-}
-
-static inline int netif_tx_trylock(struct net_device *dev)
-{
- int ok = spin_trylock(&dev->_xmit_lock);
- if (likely(ok))
- dev->xmit_lock_owner = smp_processor_id();
- return ok;
-}
-
-static inline void netif_tx_unlock(struct net_device *dev)
-{
- dev->xmit_lock_owner = -1;
- spin_unlock(&dev->_xmit_lock);
-}
-
-static inline void netif_tx_unlock_bh(struct net_device *dev)
-{
- dev->xmit_lock_owner = -1;
- spin_unlock_bh(&dev->_xmit_lock);
-}
-
static inline void netif_tx_disable(struct net_device *dev)
{
- netif_tx_lock_bh(dev);
+ spin_lock_bh(&dev->xmit_lock);
netif_stop_queue(dev);
- netif_tx_unlock_bh(dev);
+ spin_unlock_bh(&dev->xmit_lock);
}
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb, int inward);
-extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
#else
extern void linkwatch_run_queue(void);
-static inline int net_gso_ok(int features, int gso_type)
-{
- int feature = gso_type << NETIF_F_GSO_SHIFT;
- return (features & feature) == feature;
-}
-
-static inline int skb_gso_ok(struct sk_buff *skb, int features)
-{
- return net_gso_ok(features, skb_shinfo(skb)->gso_type);
-}
-
-static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
-{
- return skb_is_gso(skb) &&
- (!skb_gso_ok(skb, dev->features) ||
- unlikely(skb->ip_summed != CHECKSUM_HW));
-}
-
-/* On bonding slaves other than the currently active slave, suppress
- * duplicates except for 802.3ad ETH_P_SLOW and alb non-mcast/bcast.
- */
-static inline int skb_bond_should_drop(struct sk_buff *skb)
-{
- struct net_device *dev = skb->dev;
- struct net_device *master = dev->master;
-
- if (master &&
- (dev->priv_flags & IFF_SLAVE_INACTIVE)) {
- if (master->priv_flags & IFF_MASTER_ALB) {
- if (skb->pkt_type != PACKET_BROADCAST &&
- skb->pkt_type != PACKET_MULTICAST)
- return 0;
- }
- if (master->priv_flags & IFF_MASTER_8023AD &&
- skb->protocol == __constant_htons(ETH_P_SLOW))
- return 0;
-
- return 1;
- }
- return 0;
-}
-
#endif /* __KERNEL__ */
#endif /* _LINUX_DEV_H */