static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
+ struct sk_buff *bounce_skb;
dma_addr_t mapping;
u32 len, entry, ctrl;
return 1;
}
- entry = bp->tx_prod;
mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
if(mapping+len > B44_DMA_MASK) {
/* Chip can't handle DMA to/from >1GB, use bounce buffer */
- pci_unmap_single(bp->pdev, mapping, len,PCI_DMA_TODEVICE);
- memcpy(bp->tx_bufs+entry*TX_PKT_BUF_SZ,skb->data,skb->len);
- mapping = pci_map_single(bp->pdev, bp->tx_bufs+entry*TX_PKT_BUF_SZ, len, PCI_DMA_TODEVICE);
+ pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
+
+ bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
+ GFP_ATOMIC|GFP_DMA);
+ if (!bounce_skb)
+ return NETDEV_TX_BUSY;
+
+ mapping = pci_map_single(bp->pdev, bounce_skb->data,
+ len, PCI_DMA_TODEVICE);
+ if(mapping+len > B44_DMA_MASK) {
+ pci_unmap_single(bp->pdev, mapping,
+ len, PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(bounce_skb);
+ return NETDEV_TX_BUSY;
+ }
+
+ memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ skb = bounce_skb;
}
+ entry = bp->tx_prod;
bp->tx_buffers[entry].skb = skb;
pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);
bp->tx_ring, bp->tx_ring_dma);
bp->tx_ring = NULL;
}
- if (bp->tx_bufs) {
- pci_free_consistent(bp->pdev, B44_TX_RING_SIZE * TX_PKT_BUF_SZ,
- bp->tx_bufs, bp->tx_bufs_dma);
- bp->tx_bufs = NULL;
- }
}
/*
goto out_err;
memset(bp->tx_buffers, 0, size);
- size = B44_TX_RING_SIZE * TX_PKT_BUF_SZ;
- bp->tx_bufs = pci_alloc_consistent(bp->pdev, size, &bp->tx_bufs_dma);
- if (!bp->tx_bufs)
- goto out_err;
- memset(bp->tx_bufs, 0, size);
-
size = DMA_TABLE_BYTES;
bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
if (!bp->rx_ring)
}
}
-static int b44_suspend(struct pci_dev *pdev, u32 state)
+static int b44_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct b44 *bp = netdev_priv(dev);