This commit was manufactured by cvs2svn to create tag
[linux-2.6.git] / drivers / net / acenic.c
index a4977d2..9e049c0 100644 (file)
@@ -369,9 +369,9 @@ MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
  */
 #define ACE_MINI_SIZE          100
 
-#define ACE_MINI_BUFSIZE       ACE_MINI_SIZE
-#define ACE_STD_BUFSIZE                (ACE_STD_MTU + ETH_HLEN + 4)
-#define ACE_JUMBO_BUFSIZE      (ACE_JUMBO_MTU + ETH_HLEN + 4)
+#define ACE_MINI_BUFSIZE       (ACE_MINI_SIZE + 2 + 16)
+#define ACE_STD_BUFSIZE                (ACE_STD_MTU + ETH_HLEN + 2+4+16)
+#define ACE_JUMBO_BUFSIZE      (ACE_JUMBO_MTU + ETH_HLEN + 2+4+16)
 
 /*
  * There seems to be a magic difference in the effect between 995 and 996
@@ -678,7 +678,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
                        ringp = &ap->skb->rx_std_skbuff[i];
                        mapping = pci_unmap_addr(ringp, mapping);
                        pci_unmap_page(ap->pdev, mapping,
-                                      ACE_STD_BUFSIZE,
+                                      ACE_STD_BUFSIZE - (2 + 16),
                                       PCI_DMA_FROMDEVICE);
 
                        ap->rx_std_ring[i].size = 0;
@@ -698,7 +698,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
                                ringp = &ap->skb->rx_mini_skbuff[i];
                                mapping = pci_unmap_addr(ringp,mapping);
                                pci_unmap_page(ap->pdev, mapping,
-                                              ACE_MINI_BUFSIZE,
+                                              ACE_MINI_BUFSIZE - (2 + 16),
                                               PCI_DMA_FROMDEVICE);
 
                                ap->rx_mini_ring[i].size = 0;
@@ -717,7 +717,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
                        ringp = &ap->skb->rx_jumbo_skbuff[i];
                        mapping = pci_unmap_addr(ringp, mapping);
                        pci_unmap_page(ap->pdev, mapping,
-                                      ACE_JUMBO_BUFSIZE,
+                                      ACE_JUMBO_BUFSIZE - (2 + 16),
                                       PCI_DMA_FROMDEVICE);
 
                        ap->rx_jumbo_ring[i].size = 0;
@@ -1257,7 +1257,7 @@ static int __init ace_init(struct net_device *dev)
        set_aceaddr(&info->stats2_ptr, (dma_addr_t) tmp_ptr);
 
        set_aceaddr(&info->rx_std_ctrl.rngptr, ap->rx_ring_base_dma);
-       info->rx_std_ctrl.max_len = ACE_STD_BUFSIZE;
+       info->rx_std_ctrl.max_len = ACE_STD_MTU + ETH_HLEN + 4;
        info->rx_std_ctrl.flags =
          RCB_FLG_TCP_UDP_SUM | RCB_FLG_NO_PSEUDO_HDR | ACE_RCB_VLAN_FLAG;
 
@@ -1700,14 +1700,17 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
                struct rx_desc *rd;
                dma_addr_t mapping;
 
-               skb = alloc_skb(ACE_STD_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+               skb = alloc_skb(ACE_STD_BUFSIZE, GFP_ATOMIC);
                if (!skb)
                        break;
 
-               skb_reserve(skb, NET_IP_ALIGN);
+               /*
+                * Make sure IP header starts on a fresh cache line.
+                */
+               skb_reserve(skb, 2 + 16);
                mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
                                       offset_in_page(skb->data),
-                                      ACE_STD_BUFSIZE,
+                                      ACE_STD_BUFSIZE - (2 + 16),
                                       PCI_DMA_FROMDEVICE);
                ap->skb->rx_std_skbuff[idx].skb = skb;
                pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
@@ -1715,7 +1718,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
 
                rd = &ap->rx_std_ring[idx];
                set_aceaddr(&rd->addr, mapping);
-               rd->size = ACE_STD_BUFSIZE;
+               rd->size = ACE_STD_MTU + ETH_HLEN + 4;
                rd->idx = idx;
                idx = (idx + 1) % RX_STD_RING_ENTRIES;
        }
@@ -1763,14 +1766,17 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
                struct rx_desc *rd;
                dma_addr_t mapping;
 
-               skb = alloc_skb(ACE_MINI_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+               skb = alloc_skb(ACE_MINI_BUFSIZE, GFP_ATOMIC);
                if (!skb)
                        break;
 
-               skb_reserve(skb, NET_IP_ALIGN);
+               /*
+                * Make sure the IP header ends up on a fresh cache line
+                */
+               skb_reserve(skb, 2 + 16);
                mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
                                       offset_in_page(skb->data),
-                                      ACE_MINI_BUFSIZE,
+                                      ACE_MINI_BUFSIZE - (2 + 16),
                                       PCI_DMA_FROMDEVICE);
                ap->skb->rx_mini_skbuff[idx].skb = skb;
                pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
@@ -1778,7 +1784,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
 
                rd = &ap->rx_mini_ring[idx];
                set_aceaddr(&rd->addr, mapping);
-               rd->size = ACE_MINI_BUFSIZE;
+               rd->size = ACE_MINI_SIZE;
                rd->idx = idx;
                idx = (idx + 1) % RX_MINI_RING_ENTRIES;
        }
@@ -1821,14 +1827,17 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
                struct rx_desc *rd;
                dma_addr_t mapping;
 
-               skb = alloc_skb(ACE_JUMBO_BUFSIZE + NET_IP_ALIGN, GFP_ATOMIC);
+               skb = alloc_skb(ACE_JUMBO_BUFSIZE, GFP_ATOMIC);
                if (!skb)
                        break;
 
-               skb_reserve(skb, NET_IP_ALIGN);
+               /*
+                * Make sure the IP header ends up on a fresh cache line
+                */
+               skb_reserve(skb, 2 + 16);
                mapping = pci_map_page(ap->pdev, virt_to_page(skb->data),
                                       offset_in_page(skb->data),
-                                      ACE_JUMBO_BUFSIZE,
+                                      ACE_JUMBO_BUFSIZE - (2 + 16),
                                       PCI_DMA_FROMDEVICE);
                ap->skb->rx_jumbo_skbuff[idx].skb = skb;
                pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
@@ -1836,7 +1845,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
 
                rd = &ap->rx_jumbo_ring[idx];
                set_aceaddr(&rd->addr, mapping);
-               rd->size = ACE_JUMBO_BUFSIZE;
+               rd->size = ACE_JUMBO_MTU + ETH_HLEN + 4;
                rd->idx = idx;
                idx = (idx + 1) % RX_JUMBO_RING_ENTRIES;
        }
@@ -2018,19 +2027,19 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
                         */
                case 0:
                        rip = &ap->skb->rx_std_skbuff[skbidx];
-                       mapsize = ACE_STD_BUFSIZE;
+                       mapsize = ACE_STD_BUFSIZE - (2 + 16);
                        rxdesc = &ap->rx_std_ring[skbidx];
                        std_count++;
                        break;
                case BD_FLG_JUMBO:
                        rip = &ap->skb->rx_jumbo_skbuff[skbidx];
-                       mapsize = ACE_JUMBO_BUFSIZE;
+                       mapsize = ACE_JUMBO_BUFSIZE - (2 + 16);
                        rxdesc = &ap->rx_jumbo_ring[skbidx];
                        atomic_dec(&ap->cur_jumbo_bufs);
                        break;
                case BD_FLG_MINI:
                        rip = &ap->skb->rx_mini_skbuff[skbidx];
-                       mapsize = ACE_MINI_BUFSIZE;
+                       mapsize = ACE_MINI_BUFSIZE - (2 + 16);
                        rxdesc = &ap->rx_mini_ring[skbidx];
                        mini_count++; 
                        break;