fedora core 6 1.2949 + vserver 2.2.0
[linux-2.6.git] / drivers / net / chelsio / sge.c
index 30ff8ea..659cb22 100644 (file)
 
 #include "common.h"
 
-#include <linux/config.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
+#include <linux/ktime.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/skbuff.h>
 #include <linux/init.h>
 #include <linux/mm.h>
+#include <linux/tcp.h>
 #include <linux/ip.h>
 #include <linux/in.h>
 #include <linux/if_arp.h>
 #include "regs.h"
 #include "espi.h"
 
-
-#ifdef NETIF_F_TSO
-#include <linux/tcp.h>
-#endif
+/* This belongs in if_ether.h */
+#define ETH_P_CPL5 0xf
 
 #define SGE_CMDQ_N             2
 #define SGE_FREELQ_N           2
@@ -74,6 +73,7 @@
 #define SGE_INTRTIMER_NRES     1000
 #define SGE_RX_COPY_THRES      256
 #define SGE_RX_SM_BUF_SIZE     1536
+#define SGE_TX_DESC_MAX_PLEN   16384
 
 # define SGE_RX_DROP_THRES 2
 
@@ -185,17 +185,17 @@ struct cmdQ {
        unsigned long   status;         /* HW DMA fetch status */
        unsigned int    in_use;         /* # of in-use command descriptors */
        unsigned int    size;           /* # of descriptors */
-       unsigned int    processed;      /* total # of descs HW has processed */
-       unsigned int    cleaned;        /* total # of descs SW has reclaimed */
-       unsigned int    stop_thres;     /* SW TX queue suspend threshold */
+       unsigned int    processed;      /* total # of descs HW has processed */
+       unsigned int    cleaned;        /* total # of descs SW has reclaimed */
+       unsigned int    stop_thres;     /* SW TX queue suspend threshold */
        u16             pidx;           /* producer index (SW) */
        u16             cidx;           /* consumer index (HW) */
        u8              genbit;         /* current generation (=valid) bit */
-       u8              sop;            /* is next entry start of packet? */
+       u8              sop;            /* is next entry start of packet? */
        struct cmdQ_e  *entries;        /* HW command descriptor Q */
        struct cmdQ_ce *centries;       /* SW command context descriptor Q */
-       spinlock_t      lock;           /* Lock to protect cmdQ enqueuing */
        dma_addr_t      dma_addr;       /* DMA addr HW command descriptor Q */
+       spinlock_t      lock;           /* Lock to protect cmdQ enqueuing */
 };
 
 struct freelQ {
@@ -204,8 +204,8 @@ struct freelQ {
        u16             pidx;           /* producer index (SW) */
        u16             cidx;           /* consumer index (HW) */
        u16             rx_buffer_size; /* Buffer size on this free list */
-       u16             dma_offset;     /* DMA offset to align IP headers */
-       u16             recycleq_idx;   /* skb recycle q to use */
+       u16             dma_offset;     /* DMA offset to align IP headers */
+       u16             recycleq_idx;   /* skb recycle q to use */
        u8              genbit;         /* current generation (=valid) bit */
        struct freelQ_e *entries;       /* HW freelist descriptor Q */
        struct freelQ_ce *centries;     /* SW freelist context descriptor Q */
@@ -227,6 +227,29 @@ enum {
        CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */
 };
 
+/* T204 TX SW scheduler */
+
+/* Per T204 TX port */
+struct sched_port {
+       unsigned int    avail;          /* available bits - quota */
+       unsigned int    drain_bits_per_1024ns; /* drain rate */
+       unsigned int    speed;          /* drain rate, mbps */
+       unsigned int    mtu;            /* mtu size */
+       struct sk_buff_head skbq;       /* pending skbs */
+};
+
+/* Per T204 device */
+struct sched {
+       ktime_t         last_updated;   /* last time quotas were computed */
+       unsigned int    max_avail;      /* max bits to be sent to any port */
+       unsigned int    port;           /* port index (round robin ports) */
+       unsigned int    num;            /* num skbs in per port queues */
+       struct sched_port p[MAX_NPORTS];
+       struct tasklet_struct sched_tsk;/* tasklet used to run scheduler */
+};
+static void restart_sched(unsigned long);
+
+
 /*
  * Main SGE data structure
  *
@@ -244,17 +267,239 @@ struct sge {
        unsigned int    rx_pkt_pad;     /* RX padding for L2 packets */
        unsigned int    jumbo_fl;       /* jumbo freelist Q index */
        unsigned int    intrtimer_nres; /* no-resource interrupt timer */
-       unsigned int    fixed_intrtimer;/* non-adaptive interrupt timer */
+       unsigned int    fixed_intrtimer;/* non-adaptive interrupt timer */
        struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
        struct timer_list espibug_timer;
-       unsigned int    espibug_timeout;
-       struct sk_buff  *espibug_skb;
+       unsigned long   espibug_timeout;
+       struct sk_buff  *espibug_skb[MAX_NPORTS];
        u32             sge_control;    /* shadow value of sge control reg */
        struct sge_intr_counts stats;
-       struct sge_port_stats port_stats[MAX_NPORTS];
+       struct sge_port_stats *port_stats[MAX_NPORTS];
+       struct sched    *tx_sched;
        struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
 };
 
+/*
+ * stop tasklet and free all pending skb's
+ */
+static void tx_sched_stop(struct sge *sge)
+{
+       struct sched *s = sge->tx_sched;
+       int i;
+
+       tasklet_kill(&s->sched_tsk);
+
+       for (i = 0; i < MAX_NPORTS; i++)
+               __skb_queue_purge(&s->p[s->port].skbq);
+}
+
+/*
+ * t1_sched_update_parms() is called when the MTU or link speed changes. It
+ * re-computes scheduler parameters to scope with the change.
+ */
+unsigned int t1_sched_update_parms(struct sge *sge, unsigned int port,
+                                  unsigned int mtu, unsigned int speed)
+{
+       struct sched *s = sge->tx_sched;
+       struct sched_port *p = &s->p[port];
+       unsigned int max_avail_segs;
+
+       pr_debug("t1_sched_update_params mtu=%d speed=%d\n", mtu, speed);
+       if (speed)
+               p->speed = speed;
+       if (mtu)
+               p->mtu = mtu;
+
+       if (speed || mtu) {
+               unsigned long long drain = 1024ULL * p->speed * (p->mtu - 40);
+               do_div(drain, (p->mtu + 50) * 1000);
+               p->drain_bits_per_1024ns = (unsigned int) drain;
+
+               if (p->speed < 1000)
+                       p->drain_bits_per_1024ns =
+                               90 * p->drain_bits_per_1024ns / 100;
+       }
+
+       if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204) {
+               p->drain_bits_per_1024ns -= 16;
+               s->max_avail = max(4096U, p->mtu + 16 + 14 + 4);
+               max_avail_segs = max(1U, 4096 / (p->mtu - 40));
+       } else {
+               s->max_avail = 16384;
+               max_avail_segs = max(1U, 9000 / (p->mtu - 40));
+       }
+
+       pr_debug("t1_sched_update_parms: mtu %u speed %u max_avail %u "
+                "max_avail_segs %u drain_bits_per_1024ns %u\n", p->mtu,
+                p->speed, s->max_avail, max_avail_segs,
+                p->drain_bits_per_1024ns);
+
+       return max_avail_segs * (p->mtu - 40);
+}
+
+/*
+ * t1_sched_max_avail_bytes() tells the scheduler the maximum amount of
+ * data that can be pushed per port.
+ */
+void t1_sched_set_max_avail_bytes(struct sge *sge, unsigned int val)
+{
+       struct sched *s = sge->tx_sched;
+       unsigned int i;
+
+       s->max_avail = val;
+       for (i = 0; i < MAX_NPORTS; i++)
+               t1_sched_update_parms(sge, i, 0, 0);
+}
+
+/*
+ * t1_sched_set_drain_bits_per_us() tells the scheduler at which rate a port
+ * is draining.
+ */
+void t1_sched_set_drain_bits_per_us(struct sge *sge, unsigned int port,
+                                        unsigned int val)
+{
+       struct sched *s = sge->tx_sched;
+       struct sched_port *p = &s->p[port];
+       p->drain_bits_per_1024ns = val * 1024 / 1000;
+       t1_sched_update_parms(sge, port, 0, 0);
+}
+
+
+/*
+ * get_clock() implements a ns clock (see ktime_get)
+ */
+static inline ktime_t get_clock(void)
+{
+       struct timespec ts;
+
+       ktime_get_ts(&ts);
+       return timespec_to_ktime(ts);
+}
+
+/*
+ * tx_sched_init() allocates resources and does basic initialization.
+ */
+static int tx_sched_init(struct sge *sge)
+{
+       struct sched *s;
+       int i;
+
+       s = kzalloc(sizeof (struct sched), GFP_KERNEL);
+       if (!s)
+               return -ENOMEM;
+
+       pr_debug("tx_sched_init\n");
+       tasklet_init(&s->sched_tsk, restart_sched, (unsigned long) sge);
+       sge->tx_sched = s;
+
+       for (i = 0; i < MAX_NPORTS; i++) {
+               skb_queue_head_init(&s->p[i].skbq);
+               t1_sched_update_parms(sge, i, 1500, 1000);
+       }
+
+       return 0;
+}
+
+/*
+ * sched_update_avail() computes the delta since the last time it was called
+ * and updates the per port quota (number of bits that can be sent to the any
+ * port).
+ */
+static inline int sched_update_avail(struct sge *sge)
+{
+       struct sched *s = sge->tx_sched;
+       ktime_t now = get_clock();
+       unsigned int i;
+       long long delta_time_ns;
+
+       delta_time_ns = ktime_to_ns(ktime_sub(now, s->last_updated));
+
+       pr_debug("sched_update_avail delta=%lld\n", delta_time_ns);
+       if (delta_time_ns < 15000)
+               return 0;
+
+       for (i = 0; i < MAX_NPORTS; i++) {
+               struct sched_port *p = &s->p[i];
+               unsigned int delta_avail;
+
+               delta_avail = (p->drain_bits_per_1024ns * delta_time_ns) >> 13;
+               p->avail = min(p->avail + delta_avail, s->max_avail);
+       }
+
+       s->last_updated = now;
+
+       return 1;
+}
+
+/*
+ * sched_skb() is called from two different places. In the tx path, any
+ * packet generating load on an output port will call sched_skb()
+ * (skb != NULL). In addition, sched_skb() is called from the irq/soft irq
+ * context (skb == NULL).
+ * The scheduler only returns a skb (which will then be sent) if the
+ * length of the skb is <= the current quota of the output port.
+ */
+static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
+                               unsigned int credits)
+{
+       struct sched *s = sge->tx_sched;
+       struct sk_buff_head *skbq;
+       unsigned int i, len, update = 1;
+
+       pr_debug("sched_skb %p\n", skb);
+       if (!skb) {
+               if (!s->num)
+                       return NULL;
+       } else {
+               skbq = &s->p[skb->dev->if_port].skbq;
+               __skb_queue_tail(skbq, skb);
+               s->num++;
+               skb = NULL;
+       }
+
+       if (credits < MAX_SKB_FRAGS + 1)
+               goto out;
+
+ again:
+       for (i = 0; i < MAX_NPORTS; i++) {
+               s->port = ++s->port & (MAX_NPORTS - 1);
+               skbq = &s->p[s->port].skbq;
+
+               skb = skb_peek(skbq);
+
+               if (!skb)
+                       continue;
+
+               len = skb->len;
+               if (len <= s->p[s->port].avail) {
+                       s->p[s->port].avail -= len;
+                       s->num--;
+                       __skb_unlink(skb, skbq);
+                       goto out;
+               }
+               skb = NULL;
+       }
+
+       if (update-- && sched_update_avail(sge))
+               goto again;
+
+ out:
+       /* If there are more pending skbs, we use the hardware to schedule us
+        * again.
+        */
+       if (s->num && !skb) {
+               struct cmdQ *q = &sge->cmdQ[0];
+               clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+               if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+                       set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+                       writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+               }
+       }
+       pr_debug("sched_skb ret %p\n", skb);
+
+       return skb;
+}
+
 /*
  * PIO to indicate that memory mapped Q contains valid descriptor(s).
  */
@@ -336,10 +581,9 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
                        goto err_no_mem;
                memset(q->entries, 0, size);
                size = sizeof(struct freelQ_ce) * q->size;
-               q->centries = kmalloc(size, GFP_KERNEL);
+               q->centries = kzalloc(size, GFP_KERNEL);
                if (!q->centries)
                        goto err_no_mem;
-               memset(q->centries, 0, size);
        }
 
        /*
@@ -352,8 +596,11 @@ static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
        sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
                sizeof(struct cpl_rx_data) +
                sge->freelQ[!sge->jumbo_fl].dma_offset;
-       sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
-               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+               size = (16 * 1024) -
+                   SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+       sge->freelQ[sge->jumbo_fl].rx_buffer_size = size;
 
        /*
         * Setup which skb recycle Q should be used when recycling buffers from
@@ -390,17 +637,23 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
        q->in_use -= n;
        ce = &q->centries[cidx];
        while (n--) {
-               if (q->sop)
-                       pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
-                                        pci_unmap_len(ce, dma_len),
-                                        PCI_DMA_TODEVICE);
-               else
-                       pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
-                                      pci_unmap_len(ce, dma_len),
-                                      PCI_DMA_TODEVICE);
-               q->sop = 0;
+               if (q->sop) {
+                       if (likely(pci_unmap_len(ce, dma_len))) {
+                               pci_unmap_single(pdev,
+                                                pci_unmap_addr(ce, dma_addr),
+                                                pci_unmap_len(ce, dma_len),
+                                                PCI_DMA_TODEVICE);
+                               q->sop = 0;
+                       }
+               } else {
+                       if (likely(pci_unmap_len(ce, dma_len))) {
+                               pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
+                                              pci_unmap_len(ce, dma_len),
+                                              PCI_DMA_TODEVICE);
+                       }
+               }
                if (ce->skb) {
-                       dev_kfree_skb(ce->skb);
+                       dev_kfree_skb_any(ce->skb);
                        q->sop = 1;
                }
                ce++;
@@ -464,10 +717,9 @@ static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
                        goto err_no_mem;
                memset(q->entries, 0, size);
                size = sizeof(struct cmdQ_ce) * q->size;
-               q->centries = kmalloc(size, GFP_KERNEL);
+               q->centries = kzalloc(size, GFP_KERNEL);
                if (!q->centries)
                        goto err_no_mem;
-               memset(q->centries, 0, size);
        }
 
        /*
@@ -507,7 +759,7 @@ void t1_set_vlan_accel(struct adapter *adapter, int on_off)
                sge->sge_control |= F_VLAN_XTRACT;
        if (adapter->open_device_map) {
                writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
-               readl(adapter->regs + A_SG_CONTROL); /* flush */
+               readl(adapter->regs + A_SG_CONTROL);   /* flush */
        }
 }
 
@@ -541,7 +793,6 @@ static void configure_sge(struct sge *sge, struct sge_params *p)
        sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
                F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
                V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
-               F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
                V_RX_PKT_OFFSET(sge->rx_pkt_pad);
 
 #if defined(__BIG_ENDIAN_BITFIELD)
@@ -569,9 +820,12 @@ static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
  */
 void t1_sge_destroy(struct sge *sge)
 {
-       if (sge->espibug_skb)
-               kfree_skb(sge->espibug_skb);
+       int i;
 
+       for_each_port(sge->adapter, i)
+               free_percpu(sge->port_stats[i]);
+
+       kfree(sge->tx_sched);
        free_tx_resources(sge);
        free_rx_resources(sge);
        kfree(sge);
@@ -736,14 +990,28 @@ int t1_sge_intr_error_handler(struct sge *sge)
        return 0;
 }
 
-const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
+const struct sge_intr_counts *t1_sge_get_intr_counts(const struct sge *sge)
 {
        return &sge->stats;
 }
 
-const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
+void t1_sge_get_port_stats(const struct sge *sge, int port,
+                          struct sge_port_stats *ss)
 {
-       return &sge->port_stats[port];
+       int cpu;
+
+       memset(ss, 0, sizeof(*ss));
+       for_each_possible_cpu(cpu) {
+               struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[port], cpu);
+
+               ss->rx_packets += st->rx_packets;
+               ss->rx_cso_good += st->rx_cso_good;
+               ss->tx_packets += st->tx_packets;
+               ss->tx_cso += st->tx_cso;
+               ss->tx_tso += st->tx_tso;
+               ss->vlan_xtract += st->vlan_xtract;
+               ss->vlan_insert += st->vlan_insert;
+       }
 }
 
 /**
@@ -856,6 +1124,99 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
        recycle_fl_buf(fl, fl->cidx);
 }
 
+/*
+ * T1/T2 SGE limits the maximum DMA size per TX descriptor to
+ * SGE_TX_DESC_MAX_PLEN (16KB). If the PAGE_SIZE is larger than 16KB, the
+ * stack might send more than SGE_TX_DESC_MAX_PLEN in a contiguous manner.
+ * Note that the *_large_page_tx_descs stuff will be optimized out when
+ * PAGE_SIZE <= SGE_TX_DESC_MAX_PLEN.
+ *
+ * compute_large_page_descs() computes how many additional descriptors are
+ * required to break down the stack's request.
+ */
+static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
+{
+       unsigned int count = 0;
+       if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+               unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+               unsigned int i, len = skb->len - skb->data_len;
+               while (len > SGE_TX_DESC_MAX_PLEN) {
+                       count++;
+                       len -= SGE_TX_DESC_MAX_PLEN;
+               }
+               for (i = 0; nfrags--; i++) {
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+                       len = frag->size;
+                       while (len > SGE_TX_DESC_MAX_PLEN) {
+                               count++;
+                               len -= SGE_TX_DESC_MAX_PLEN;
+                       }
+               }
+       }
+       return count;
+}
+
+/*
+ * Write a cmdQ entry.
+ *
+ * Since this function writes the 'flags' field, it must not be used to
+ * write the first cmdQ entry.
+ */
+static inline void write_tx_desc(struct cmdQ_e *e, dma_addr_t mapping,
+                                unsigned int len, unsigned int gen,
+                                unsigned int eop)
+{
+       if (unlikely(len > SGE_TX_DESC_MAX_PLEN))
+               BUG();
+       e->addr_lo = (u32)mapping;
+       e->addr_hi = (u64)mapping >> 32;
+       e->len_gen = V_CMD_LEN(len) | V_CMD_GEN1(gen);
+       e->flags = F_CMD_DATAVALID | V_CMD_EOP(eop) | V_CMD_GEN2(gen);
+}
+
+/*
+ * See comment for previous function.
+ *
+ * write_tx_descs_large_page() writes additional SGE tx descriptors if
+ * *desc_len exceeds HW's capability.
+ */
+static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
+                                                    struct cmdQ_e **e,
+                                                    struct cmdQ_ce **ce,
+                                                    unsigned int *gen,
+                                                    dma_addr_t *desc_mapping,
+                                                    unsigned int *desc_len,
+                                                    unsigned int nfrags,
+                                                    struct cmdQ *q)
+{
+       if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
+               struct cmdQ_e *e1 = *e;
+               struct cmdQ_ce *ce1 = *ce;
+
+               while (*desc_len > SGE_TX_DESC_MAX_PLEN) {
+                       *desc_len -= SGE_TX_DESC_MAX_PLEN;
+                       write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
+                                     *gen, nfrags == 0 && *desc_len == 0);
+                       ce1->skb = NULL;
+                       pci_unmap_len_set(ce1, dma_len, 0);
+                       *desc_mapping += SGE_TX_DESC_MAX_PLEN;
+                       if (*desc_len) {
+                               ce1++;
+                               e1++;
+                               if (++pidx == q->size) {
+                                       pidx = 0;
+                                       *gen ^= 1;
+                                       ce1 = q->centries;
+                                       e1 = q->entries;
+                               }
+                       }
+               }
+               *e = e1;
+               *ce = ce1;
+       }
+       return pidx;
+}
+
 /*
  * Write the command descriptors to transmit the given skb starting at
  * descriptor pidx with the given generation.
@@ -864,50 +1225,84 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
                                  unsigned int pidx, unsigned int gen,
                                  struct cmdQ *q)
 {
-       dma_addr_t mapping;
+       dma_addr_t mapping, desc_mapping;
        struct cmdQ_e *e, *e1;
        struct cmdQ_ce *ce;
-       unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
+       unsigned int i, flags, first_desc_len, desc_len,
+           nfrags = skb_shinfo(skb)->nr_frags;
 
-       mapping = pci_map_single(adapter->pdev, skb->data,
-                                skb->len - skb->data_len, PCI_DMA_TODEVICE);
+       e = e1 = &q->entries[pidx];
        ce = &q->centries[pidx];
+
+       mapping = pci_map_single(adapter->pdev, skb->data,
+                               skb->len - skb->data_len, PCI_DMA_TODEVICE);
+
+       desc_mapping = mapping;
+       desc_len = skb->len - skb->data_len;
+
+       flags = F_CMD_DATAVALID | F_CMD_SOP |
+           V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
+           V_CMD_GEN2(gen);
+       first_desc_len = (desc_len <= SGE_TX_DESC_MAX_PLEN) ?
+           desc_len : SGE_TX_DESC_MAX_PLEN;
+       e->addr_lo = (u32)desc_mapping;
+       e->addr_hi = (u64)desc_mapping >> 32;
+       e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
+       ce->skb = NULL;
+       pci_unmap_len_set(ce, dma_len, 0);
+
+       if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
+           desc_len > SGE_TX_DESC_MAX_PLEN) {
+               desc_mapping += first_desc_len;
+               desc_len -= first_desc_len;
+               e1++;
+               ce++;
+               if (++pidx == q->size) {
+                       pidx = 0;
+                       gen ^= 1;
+                       e1 = q->entries;
+                       ce = q->centries;
+               }
+               pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+                                                &desc_mapping, &desc_len,
+                                                nfrags, q);
+
+               if (likely(desc_len))
+                       write_tx_desc(e1, desc_mapping, desc_len, gen,
+                                     nfrags == 0);
+       }
+
        ce->skb = NULL;
        pci_unmap_addr_set(ce, dma_addr, mapping);
        pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
 
-       flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
-               V_CMD_GEN2(gen);
-       e = &q->entries[pidx];
-       e->addr_lo = (u32)mapping;
-       e->addr_hi = (u64)mapping >> 32;
-       e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
-       for (e1 = e, i = 0; nfrags--; i++) {
+       for (i = 0; nfrags--; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
-               ce++;
                e1++;
+               ce++;
                if (++pidx == q->size) {
                        pidx = 0;
                        gen ^= 1;
-                       ce = q->centries;
                        e1 = q->entries;
+                       ce = q->centries;
                }
 
                mapping = pci_map_page(adapter->pdev, frag->page,
                                       frag->page_offset, frag->size,
                                       PCI_DMA_TODEVICE);
+               desc_mapping = mapping;
+               desc_len = frag->size;
+
+               pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen,
+                                                &desc_mapping, &desc_len,
+                                                nfrags, q);
+               if (likely(desc_len))
+                       write_tx_desc(e1, desc_mapping, desc_len, gen,
+                                     nfrags == 0);
                ce->skb = NULL;
                pci_unmap_addr_set(ce, dma_addr, mapping);
                pci_unmap_len_set(ce, dma_len, frag->size);
-
-               e1->addr_lo = (u32)mapping;
-               e1->addr_hi = (u64)mapping >> 32;
-               e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
-               e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
-                           V_CMD_GEN2(gen);
        }
-
        ce->skb = skb;
        wmb();
        e->flags = flags;
@@ -921,26 +1316,56 @@ static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
        unsigned int reclaim = q->processed - q->cleaned;
 
        if (reclaim) {
+               pr_debug("reclaim_completed_tx processed:%d cleaned:%d\n",
+                        q->processed, q->cleaned);
                free_cmdQ_buffers(sge, q, reclaim);
                q->cleaned += reclaim;
        }
 }
 
-#ifndef SET_ETHTOOL_OPS
-# define __netif_rx_complete(dev) netif_rx_complete(dev)
-#endif
-
 /*
- * We cannot use the standard netif_rx_schedule_prep() because we have multiple
- * ports plus the TOE all multiplexing onto a single response queue, therefore
- * accepting new responses cannot depend on the state of any particular port.
- * So define our own equivalent that omits the netif_running() test.
+ * Called from tasklet. Checks the scheduler for any
+ * pending skbs that can be sent.
  */
-static inline int napi_schedule_prep(struct net_device *dev)
+static void restart_sched(unsigned long arg)
 {
-       return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
-}
+       struct sge *sge = (struct sge *) arg;
+       struct adapter *adapter = sge->adapter;
+       struct cmdQ *q = &sge->cmdQ[0];
+       struct sk_buff *skb;
+       unsigned int credits, queued_skb = 0;
+
+       spin_lock(&q->lock);
+       reclaim_completed_tx(sge, q);
+
+       credits = q->size - q->in_use;
+       pr_debug("restart_sched credits=%d\n", credits);
+       while ((skb = sched_skb(sge, NULL, credits)) != NULL) {
+               unsigned int genbit, pidx, count;
+               count = 1 + skb_shinfo(skb)->nr_frags;
+                       count += compute_large_page_tx_descs(skb);
+               q->in_use += count;
+               genbit = q->genbit;
+               pidx = q->pidx;
+               q->pidx += count;
+               if (q->pidx >= q->size) {
+                       q->pidx -= q->size;
+                       q->genbit ^= 1;
+               }
+               write_tx_descs(adapter, skb, pidx, genbit, q);
+               credits = q->size - q->in_use;
+               queued_skb = 1;
+       }
 
+       if (queued_skb) {
+               clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+               if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+                       set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+                       writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+               }
+       }
+       spin_unlock(&q->lock);
+}
 
 /**
  *     sge_rx - process an ingress ethernet packet
@@ -955,41 +1380,53 @@ static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
        struct sk_buff *skb;
        struct cpl_rx_pkt *p;
        struct adapter *adapter = sge->adapter;
+       struct sge_port_stats *st;
 
-       sge->stats.ethernet_pkts++;
        skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
                         sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
                         SGE_RX_DROP_THRES);
-       if (!skb) {
-               sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
+       if (unlikely(!skb)) {
+               sge->stats.rx_drops++;
                return 0;
        }
 
        p = (struct cpl_rx_pkt *)skb->data;
        skb_pull(skb, sizeof(*p));
+       if (p->iff >= adapter->params.nports) {
+               kfree_skb(skb);
+               return 0;
+       }
+
        skb->dev = adapter->port[p->iff].dev;
        skb->dev->last_rx = jiffies;
+       st = per_cpu_ptr(sge->port_stats[p->iff], smp_processor_id());
+       st->rx_packets++;
+
        skb->protocol = eth_type_trans(skb, skb->dev);
        if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
            skb->protocol == htons(ETH_P_IP) &&
            (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
-               sge->port_stats[p->iff].rx_cso_good++;
+               ++st->rx_cso_good;
                skb->ip_summed = CHECKSUM_UNNECESSARY;
        } else
                skb->ip_summed = CHECKSUM_NONE;
 
        if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
-               sge->port_stats[p->iff].vlan_xtract++;
-               if (adapter->params.sge.polling)
+               st->vlan_xtract++;
+#ifdef CONFIG_CHELSIO_T1_NAPI
                        vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
                                                 ntohs(p->vlan));
-               else
+#else
                        vlan_hwaccel_rx(skb, adapter->vlan_grp,
                                        ntohs(p->vlan));
-       } else if (adapter->params.sge.polling)
+#endif
+       } else {
+#ifdef CONFIG_CHELSIO_T1_NAPI
                netif_receive_skb(skb);
-       else
+#else
                netif_rx(skb);
+#endif
+       }
        return 0;
 }
 
@@ -1040,18 +1477,24 @@ static unsigned int update_tx_info(struct adapter *adapter,
        struct cmdQ *cmdq = &sge->cmdQ[0];
 
        cmdq->processed += pr0;
-
+       if (flags & (F_FL0_ENABLE | F_FL1_ENABLE)) {
+               freelQs_empty(sge);
+               flags &= ~(F_FL0_ENABLE | F_FL1_ENABLE);
+       }
        if (flags & F_CMDQ0_ENABLE) {
                clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
-       
+
                if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
                    !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
                        set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
                        writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
                }
-               flags &= ~F_CMDQ0_ENABLE;
+               if (sge->tx_sched)
+                       tasklet_hi_schedule(&sge->tx_sched->sched_tsk);
+
+               flags &= ~F_CMDQ0_ENABLE;
        }
-       
+
        if (unlikely(sge->stopped_tx_queues != 0))
                restart_tx_queues(sge);
 
@@ -1093,8 +1536,7 @@ static int process_responses(struct adapter *adapter, int budget)
                if (likely(e->DataValid)) {
                        struct freelQ *fl = &sge->freelQ[e->FreelistQid];
 
-                       if (unlikely(!e->Sop || !e->Eop))
-                               BUG();
+                       BUG_ON(!e->Sop || !e->Eop);
                        if (unlikely(e->Offload))
                                unexpected_offload(adapter, fl);
                        else
@@ -1134,6 +1576,7 @@ static int process_responses(struct adapter *adapter, int budget)
        return budget;
 }
 
+#ifdef CONFIG_CHELSIO_T1_NAPI
 /*
  * A simpler version of process_responses() that handles only pure (i.e.,
  * non data-carrying) responses.  Such respones are too light-weight to justify
@@ -1181,91 +1624,76 @@ static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
  * or protection from interrupts as data interrupts are off at this point and
  * other adapter interrupts do not interfere.
  */
-static int t1_poll(struct net_device *dev, int *budget)
+int t1_poll(struct net_device *dev, int *budget)
 {
        struct adapter *adapter = dev->priv;
        int effective_budget = min(*budget, dev->quota);
-
        int work_done = process_responses(adapter, effective_budget);
+
        *budget -= work_done;
        dev->quota -= work_done;
 
        if (work_done >= effective_budget)
                return 1;
 
+       spin_lock_irq(&adapter->async_lock);
        __netif_rx_complete(dev);
-
-       /*
-        * Because we don't atomically flush the following write it is
-        * possible that in very rare cases it can reach the device in a way
-        * that races with a new response being written plus an error interrupt
-        * causing the NAPI interrupt handler below to return unhandled status
-        * to the OS.  To protect against this would require flushing the write
-        * and doing both the write and the flush with interrupts off.  Way too
-        * expensive and unjustifiable given the rarity of the race.
-        */
        writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
-       return 0;
-}
+       writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+              adapter->regs + A_PL_ENABLE);
+       spin_unlock_irq(&adapter->async_lock);
 
-/*
- * Returns true if the device is already scheduled for polling.
- */
-static inline int napi_is_scheduled(struct net_device *dev)
-{
-       return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
+       return 0;
 }
 
 /*
  * NAPI version of the main interrupt handler.
  */
-static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
+irqreturn_t t1_interrupt(int irq, void *data)
 {
-       int handled;
        struct adapter *adapter = data;
+       struct net_device *dev = adapter->sge->netdev;
        struct sge *sge = adapter->sge;
-       struct respQ *q = &adapter->sge->respQ;
+       u32 cause;
+       int handled = 0;
 
-       /*
-        * Clear the SGE_DATA interrupt first thing.  Normally the NAPI
-        * handler has control of the response queue and the interrupt handler
-        * can look at the queue reliably only once it knows NAPI is off.
-        * We can't wait that long to clear the SGE_DATA interrupt because we
-        * could race with t1_poll rearming the SGE interrupt, so we need to
-        * clear the interrupt speculatively and really early on.
-        */
-       writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+       cause = readl(adapter->regs + A_PL_CAUSE);
+       if (cause == 0 || cause == ~0)
+               return IRQ_NONE;
 
        spin_lock(&adapter->async_lock);
-       if (!napi_is_scheduled(sge->netdev)) {
+       if (cause & F_PL_INTR_SGE_DATA) {
+               struct respQ *q = &adapter->sge->respQ;
                struct respQ_e *e = &q->entries[q->cidx];
 
-               if (e->GenerationBit == q->genbit) {
-                       if (e->DataValid ||
-                           process_pure_responses(adapter, e)) {
-                               if (likely(napi_schedule_prep(sge->netdev)))
-                                       __netif_rx_schedule(sge->netdev);
-                               else
-                                       printk(KERN_CRIT
-                                              "NAPI schedule failure!\n");
-                       } else
-                       writel(q->cidx, adapter->regs + A_SG_SLEEPING);
-                       handled = 1;
-                       goto unlock;
-               } else
+               handled = 1;
+               writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+               if (e->GenerationBit == q->genbit &&
+                   __netif_rx_schedule_prep(dev)) {
+                       if (e->DataValid || process_pure_responses(adapter, e)) {
+                               /* mask off data IRQ */
+                               writel(adapter->slow_intr_mask,
+                                      adapter->regs + A_PL_ENABLE);
+                               __netif_rx_schedule(sge->netdev);
+                               goto unlock;
+                       }
+                       /* no data, no NAPI needed */
+                       netif_poll_enable(dev);
+
+               }
                writel(q->cidx, adapter->regs + A_SG_SLEEPING);
-       }  else
-       if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
-               printk(KERN_ERR "data interrupt while NAPI running\n");
-       
-       handled = t1_slow_intr_handler(adapter);
+       } else
+               handled = t1_slow_intr_handler(adapter);
+
        if (!handled)
                sge->stats.unhandled_irqs++;
- unlock:
+unlock:
        spin_unlock(&adapter->async_lock);
        return IRQ_RETVAL(handled != 0);
 }
 
+#else
 /*
  * Main interrupt handler, optimized assuming that we took a 'DATA'
  * interrupt.
@@ -1281,7 +1709,7 @@ static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
  * 5. If we took an interrupt, but no valid respQ descriptors was found we
  *      let the slow_intr_handler run and do error handling.
  */
-static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
+irqreturn_t t1_interrupt(int irq, void *cookie)
 {
        int work_done;
        struct respQ_e *e;
@@ -1313,11 +1741,7 @@ static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
        spin_unlock(&adapter->async_lock);
        return IRQ_RETVAL(work_done != 0);
 }
-
-intr_handler_t t1_select_intr_handler(adapter_t *adapter)
-{
-       return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
-}
+#endif
 
 /*
  * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
@@ -1337,34 +1761,59 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
 {
        struct sge *sge = adapter->sge;
        struct cmdQ *q = &sge->cmdQ[qid];
-       unsigned int credits, pidx, genbit, count;
+       unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
+
+       if (!spin_trylock(&q->lock))
+               return NETDEV_TX_LOCKED;
 
-       spin_lock(&q->lock);
        reclaim_completed_tx(sge, q);
 
        pidx = q->pidx;
        credits = q->size - q->in_use;
        count = 1 + skb_shinfo(skb)->nr_frags;
+       count += compute_large_page_tx_descs(skb);
 
-       {       /* Ethernet packet */
-               if (unlikely(credits < count)) {
+       /* Ethernet packet */
+       if (unlikely(credits < count)) {
+               if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
                        set_bit(dev->if_port, &sge->stopped_tx_queues);
                        sge->stats.cmdQ_full[2]++;
-                       spin_unlock(&q->lock);
-                       if (!netif_queue_stopped(dev))
-                               CH_ERR("%s: Tx ring full while queue awake!\n",
-                                      adapter->name);
-                       return NETDEV_TX_BUSY;
+                       CH_ERR("%s: Tx ring full while queue awake!\n",
+                              adapter->name);
                }
-               if (unlikely(credits - count < q->stop_thres)) {
-                       sge->stats.cmdQ_full[2]++;
-                       netif_stop_queue(dev);
-                       set_bit(dev->if_port, &sge->stopped_tx_queues);
+               spin_unlock(&q->lock);
+               return NETDEV_TX_BUSY;
+       }
+
+       if (unlikely(credits - count < q->stop_thres)) {
+               netif_stop_queue(dev);
+               set_bit(dev->if_port, &sge->stopped_tx_queues);
+               sge->stats.cmdQ_full[2]++;
+       }
+
+       /* T204 cmdQ0 skbs that are destined for a certain port have to go
+        * through the scheduler.
+        */
+       if (sge->tx_sched && !qid && skb->dev) {
+       use_sched:
+               use_sched_skb = 1;
+               /* Note that the scheduler might return a different skb than
+                * the one passed in.
+                */
+               skb = sched_skb(sge, skb, credits);
+               if (!skb) {
+                       spin_unlock(&q->lock);
+                       return NETDEV_TX_OK;
                }
+               pidx = q->pidx;
+               count = 1 + skb_shinfo(skb)->nr_frags;
+               count += compute_large_page_tx_descs(skb);
        }
+
        q->in_use += count;
        genbit = q->genbit;
+       pidx = q->pidx;
        q->pidx += count;
        if (q->pidx >= q->size) {
                q->pidx -= q->size;
@@ -1390,6 +1839,14 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
                        writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
                }
        }
+
+       if (use_sched_skb) {
+               if (spin_trylock(&q->lock)) {
+                       credits = q->size - q->in_use;
+                       skb = NULL;
+                       goto use_sched;
+               }
+       }
        return NETDEV_TX_OK;
 }
 
@@ -1414,16 +1871,20 @@ static inline int eth_hdr_len(const void *data)
 int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct adapter *adapter = dev->priv;
-       struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
        struct sge *sge = adapter->sge;
+       struct sge_port_stats *st = per_cpu_ptr(sge->port_stats[dev->if_port], smp_processor_id());
        struct cpl_tx_pkt *cpl;
+       struct sk_buff *orig_skb = skb;
+       int ret;
+
+       if (skb->protocol == htons(ETH_P_CPL5))
+               goto send;
 
-#ifdef NETIF_F_TSO
-       if (skb_shinfo(skb)->tso_size) {
+       if (skb_shinfo(skb)->gso_size) {
                int eth_type;
                struct cpl_tx_pkt_lso *hdr;
 
-               st->tso++;
+               ++st->tx_tso;
 
                eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
                        CPL_ETH_II : CPL_ETH_II_VLAN;
@@ -1434,13 +1895,10 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                hdr->ip_hdr_words = skb->nh.iph->ihl;
                hdr->tcp_hdr_words = skb->h.th->doff;
                hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
-                                               skb_shinfo(skb)->tso_size));
+                                                         skb_shinfo(skb)->gso_size));
                hdr->len = htonl(skb->len - sizeof(*hdr));
                cpl = (struct cpl_tx_pkt *)hdr;
-               sge->stats.tx_lso_pkts++;
-       } else
-#endif
-       {
+       } else {
                /*
                 * Packets shorter than ETH_HLEN can break the MAC, drop them
                 * early.  Also, we may get oversized packets because some
@@ -1449,6 +1907,8 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 */
                if (unlikely(skb->len < ETH_HLEN ||
                             skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+                       pr_debug("%s: packet size %d hdr %d mtu%d\n", dev->name,
+                                skb->len, eth_hdr_len(skb->data), dev->mtu);
                        dev_kfree_skb_any(skb);
                        return NETDEV_TX_OK;
                }
@@ -1458,9 +1918,9 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 * components, such as pktgen, do not handle it right.
                 * Complain when this happens but try to fix things up.
                 */
-               if (unlikely(skb_headroom(skb) <
-                            dev->hard_header_len - ETH_HLEN)) {
-                       struct sk_buff *orig_skb = skb;
+               if (unlikely(skb_headroom(skb) < dev->hard_header_len - ETH_HLEN)) {
+                       pr_debug("%s: headroom %d header_len %d\n", dev->name,
+                                skb_headroom(skb), dev->hard_header_len);
 
                        if (net_ratelimit())
                                printk(KERN_ERR "%s: inadequate headroom in "
@@ -1472,20 +1932,22 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                }
 
                if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
-                   skb->ip_summed == CHECKSUM_HW &&
-                   skb->nh.iph->protocol == IPPROTO_UDP)
-                       if (unlikely(skb_checksum_help(skb, 0))) {
+                   skb->ip_summed == CHECKSUM_PARTIAL &&
+                   skb->nh.iph->protocol == IPPROTO_UDP) {
+                       if (unlikely(skb_checksum_help(skb))) {
+                               pr_debug("%s: unable to do udp checksum\n", dev->name);
                                dev_kfree_skb_any(skb);
                                return NETDEV_TX_OK;
                        }
+               }
 
                /* Hmmm, assuming to catch the gratious arp... and we'll use
                 * it to flush out stuck espi packets...
-                 */
-               if (unlikely(!adapter->sge->espibug_skb)) {
+                */
+               if ((unlikely(!adapter->sge->espibug_skb[dev->if_port]))) {
                        if (skb->protocol == htons(ETH_P_ARP) &&
                            skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
-                               adapter->sge->espibug_skb = skb;
+                               adapter->sge->espibug_skb[dev->if_port] = skb;
                                /* We want to re-use this skb later. We
                                 * simply bump the reference count and it
                                 * will not be freed...
@@ -1497,12 +1959,10 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
                cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
                cpl->opcode = CPL_TX_PKT;
                cpl->ip_csum_dis = 1;    /* SW calculates IP csum */
-               cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
+               cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_PARTIAL ? 0 : 1;
                /* the length field isn't used so don't bother setting it */
 
-               st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
-               sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
-               sge->stats.tx_reg_pkts++;
+               st->tx_cso += (skb->ip_summed == CHECKSUM_PARTIAL);
        }
        cpl->iff = dev->if_port;
 
@@ -1515,8 +1975,19 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
 #endif
                cpl->vlan_valid = 0;
 
+send:
+       st->tx_packets++;
        dev->trans_start = jiffies;
-       return t1_sge_tx(skb, adapter, 0, dev);
+       ret = t1_sge_tx(skb, adapter, 0, dev);
+
+       /* If transmit busy, and we reallocated skb's due to headroom limit,
+        * then silently discard to avoid leak.
+        */
+       if (unlikely(ret != NETDEV_TX_OK && skb != orig_skb)) {
+               dev_kfree_skb_any(skb);
+               ret = NETDEV_TX_OK;
+       }
+       return ret;
 }
 
 /*
@@ -1534,10 +2005,9 @@ static void sge_tx_reclaim_cb(unsigned long data)
                        continue;
 
                reclaim_completed_tx(sge, q);
-               if (i == 0 && q->in_use)   /* flush pending credits */
-                       writel(F_CMDQ0_ENABLE,
-                               sge->adapter->regs + A_SG_DOORBELL);
-
+               if (i == 0 && q->in_use) {    /* flush pending credits */
+                       writel(F_CMDQ0_ENABLE, sge->adapter->regs + A_SG_DOORBELL);
+               }
                spin_unlock(&q->lock);
        }
        mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
@@ -1548,7 +2018,6 @@ static void sge_tx_reclaim_cb(unsigned long data)
  */
 int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
 {
-       sge->netdev->poll = t1_poll;
        sge->fixed_intrtimer = p->rx_coalesce_usecs *
                core_ticks_per_usec(sge->adapter);
        writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
@@ -1584,11 +2053,20 @@ int t1_sge_configure(struct sge *sge, struct sge_params *p)
  */
 void t1_sge_stop(struct sge *sge)
 {
+       int i;
        writel(0, sge->adapter->regs + A_SG_CONTROL);
-       (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+       readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
        if (is_T2(sge->adapter))
                del_timer_sync(&sge->espibug_timer);
+
        del_timer_sync(&sge->tx_reclaim_timer);
+       if (sge->tx_sched)
+               tx_sched_stop(sge);
+
+       for (i = 0; i < MAX_NPORTS; i++)
+               if (sge->espibug_skb[i])
+                       kfree_skb(sge->espibug_skb[i]);
 }
 
 /*
@@ -1601,74 +2079,128 @@ void t1_sge_start(struct sge *sge)
 
        writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
        doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
-       (void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+       readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
 
        mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
 
-       if (is_T2(sge->adapter)) 
+       if (is_T2(sge->adapter))
                mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
 }
 
 /*
  * Callback for the T2 ESPI 'stuck packet feature' workaorund
  */
-static void espibug_workaround(void *data)
+static void espibug_workaround_t204(unsigned long data)
 {
        struct adapter *adapter = (struct adapter *)data;
        struct sge *sge = adapter->sge;
+       unsigned int nports = adapter->params.nports;
+       u32 seop[MAX_NPORTS];
 
-       if (netif_running(adapter->port[0].dev)) {
-               struct sk_buff *skb = sge->espibug_skb;
-
-               u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
-
-               if ((seop & 0xfff0fff) == 0xfff && skb) {
-                       if (!skb->cb[0]) {
-                               u8 ch_mac_addr[ETH_ALEN] =
-                                   {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
-                               memcpy(skb->data + sizeof(struct cpl_tx_pkt),
-                                   ch_mac_addr, ETH_ALEN);
-                               memcpy(skb->data + skb->len - 10, ch_mac_addr,
-                                   ETH_ALEN);
-                               skb->cb[0] = 0xff;
+       if (adapter->open_device_map & PORT_MASK) {
+               int i;
+               if (t1_espi_get_mon_t204(adapter, &(seop[0]), 0) < 0) {
+                       return;
+               }
+               for (i = 0; i < nports; i++) {
+                       struct sk_buff *skb = sge->espibug_skb[i];
+                       if ( (netif_running(adapter->port[i].dev)) &&
+                            !(netif_queue_stopped(adapter->port[i].dev)) &&
+                            (seop[i] && ((seop[i] & 0xfff) == 0)) &&
+                            skb ) {
+                               if (!skb->cb[0]) {
+                                       u8 ch_mac_addr[ETH_ALEN] =
+                                       {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+                                       memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+                                       ch_mac_addr, ETH_ALEN);
+                                       memcpy(skb->data + skb->len - 10,
+                                               ch_mac_addr, ETH_ALEN);
+                                       skb->cb[0] = 0xff;
+                               }
+
+                               /* bump the reference count to avoid freeing of
+                                * the skb once the DMA has completed.
+                                */
+                               skb = skb_get(skb);
+                               t1_sge_tx(skb, adapter, 0, adapter->port[i].dev);
                        }
-
-                       /* bump the reference count to avoid freeing of the
-                        * skb once the DMA has completed.
-                        */
-                       skb = skb_get(skb);
-                       t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
                }
        }
        mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
 }
 
+static void espibug_workaround(unsigned long data)
+{
+       struct adapter *adapter = (struct adapter *)data;
+       struct sge *sge = adapter->sge;
+
+       if (netif_running(adapter->port[0].dev)) {
+               struct sk_buff *skb = sge->espibug_skb[0];
+               u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+
+               if ((seop & 0xfff0fff) == 0xfff && skb) {
+                       if (!skb->cb[0]) {
+                               u8 ch_mac_addr[ETH_ALEN] =
+                                   {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+                               memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+                                      ch_mac_addr, ETH_ALEN);
+                               memcpy(skb->data + skb->len - 10, ch_mac_addr,
+                                      ETH_ALEN);
+                               skb->cb[0] = 0xff;
+                       }
+
+                       /* bump the reference count to avoid freeing of the
+                        * skb once the DMA has completed.
+                        */
+                       skb = skb_get(skb);
+                       t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+               }
+       }
+       mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
 /*
  * Creates a t1_sge structure and returns suggested resource parameters.
  */
 struct sge * __devinit t1_sge_create(struct adapter *adapter,
                                     struct sge_params *p)
 {
-       struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
+       struct sge *sge = kzalloc(sizeof(*sge), GFP_KERNEL);
+       int i;
 
        if (!sge)
                return NULL;
-       memset(sge, 0, sizeof(*sge));
 
        sge->adapter = adapter;
        sge->netdev = adapter->port[0].dev;
        sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
        sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
 
+       for_each_port(adapter, i) {
+               sge->port_stats[i] = alloc_percpu(struct sge_port_stats);
+               if (!sge->port_stats[i])
+                       goto nomem_port;
+       }
+
        init_timer(&sge->tx_reclaim_timer);
        sge->tx_reclaim_timer.data = (unsigned long)sge;
        sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
 
        if (is_T2(sge->adapter)) {
                init_timer(&sge->espibug_timer);
-               sge->espibug_timer.function = (void *)&espibug_workaround;
+
+               if (adapter->params.nports > 1) {
+                       tx_sched_init(sge);
+                       sge->espibug_timer.function = espibug_workaround_t204;
+               } else {
+                       sge->espibug_timer.function = espibug_workaround;
+               }
                sge->espibug_timer.data = (unsigned long)sge->adapter;
+
                sge->espibug_timeout = 1;
+               /* for T204, every 10ms */
+               if (adapter->params.nports > 1)
+                       sge->espibug_timeout = HZ/100;
        }
         
 
@@ -1676,10 +2208,24 @@ struct sge * __devinit t1_sge_create(struct adapter *adapter,
        p->cmdQ_size[1] = SGE_CMDQ1_E_N;
        p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
        p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
-       p->rx_coalesce_usecs =  50;
+       if (sge->tx_sched) {
+               if (board_info(sge->adapter)->board == CHBT_BOARD_CHT204)
+                       p->rx_coalesce_usecs = 15;
+               else
+                       p->rx_coalesce_usecs = 50;
+       } else
+               p->rx_coalesce_usecs = 50;
+
        p->coalesce_enable = 0;
        p->sample_interval_usecs = 0;
-       p->polling = 0;
 
        return sge;
+nomem_port:
+       while (i >= 0) {
+               free_percpu(sge->port_stats[i]);
+               --i;
+       }
+       kfree(sge);
+       return NULL;
+
 }