/* */
/* IBM eServer i/pSeries Virtual Ethernet Device Driver */
/* Copyright (C) 2003 IBM Corp. */
-/* Dave Larson (larson1@us.ibm.com) */
-/* Santiago Leon (santil@us.ibm.com) */
+/* Originally written by Dave Larson (larson1@us.ibm.com) */
+/* Maintained by Santiago Leon (santil@us.ibm.com) */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
#ifdef CONFIG_PROC_FS
#define IBMVETH_PROC_DIR "ibmveth"
static const char ibmveth_driver_name[] = "ibmveth";
static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
-static const char ibmveth_driver_version[] = "1.0";
+#define ibmveth_driver_version "1.03"
-MODULE_AUTHOR("Dave Larson <larson1@us.ibm.com>");
+MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(ibmveth_driver_version);
/* simple methods of getting data from the current rxq entry */
static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
free_index = pool->consumer_index++ % pool->size;
index = pool->free_map[free_index];
- ibmveth_assert(index != 0xffff);
+ ibmveth_assert(index != IBM_VETH_INVALID_MAP);
ibmveth_assert(pool->skbuff[index] == NULL);
- dma_addr = vio_map_single(adapter->vdev, skb->data, pool->buff_size, DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+ pool->buff_size, DMA_FROM_DEVICE);
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->dma_addr[index] = dma_addr;
pool->skbuff[index] = skb;
lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
if(lpar_rc != H_Success) {
+ pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
pool->skbuff[index] = NULL;
pool->consumer_index--;
- vio_unmap_single(adapter->vdev, pool->dma_addr[index], pool->buff_size, DMA_FROM_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ pool->dma_addr[index], pool->buff_size,
+ DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
adapter->replenish_add_buff_failure++;
break;
} else {
- pool->free_map[free_index] = 0xffff;
buffers_added++;
adapter->replenish_add_buff_success++;
}
(atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold));
}
+/* kick the replenish tasklet if we need replenishing and it isn't already running */
+static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
+{
+ if(ibmveth_is_replenishing_needed(adapter) &&
+ (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
+ schedule_work(&adapter->replenish_task);
+ }
+}
+
/* replenish tasklet routine */
static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
{
adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
atomic_inc(&adapter->not_replenishing);
- ibmveth_assert(atomic_read(&adapter->not_replenishing) == 1);
-}
-/* kick the replenish tasklet if we need replenishing and it isn't already running */
-static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
-{
- if(ibmveth_is_replenishing_needed(adapter) &&
- (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) {
- schedule_work(&adapter->replenish_task);
- }
+ ibmveth_schedule_replenishing(adapter);
}
/* empty and free ana buffer pool - also used to do cleanup in error paths */
for(i = 0; i < pool->size; ++i) {
struct sk_buff *skb = pool->skbuff[i];
if(skb) {
- vio_unmap_single(adapter->vdev,
+ dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[i],
pool->buff_size,
DMA_FROM_DEVICE);
adapter->rx_buff_pool[pool].skbuff[index] = NULL;
- vio_unmap_single(adapter->vdev,
+ dma_unmap_single(&adapter->vdev->dev,
adapter->rx_buff_pool[pool].dma_addr[index],
adapter->rx_buff_pool[pool].buff_size,
DMA_FROM_DEVICE);
{
if(adapter->buffer_list_addr != NULL) {
if(!dma_mapping_error(adapter->buffer_list_dma)) {
- vio_unmap_single(adapter->vdev, adapter->buffer_list_dma, 4096, DMA_BIDIRECTIONAL);
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->buffer_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
adapter->buffer_list_dma = DMA_ERROR_CODE;
}
free_page((unsigned long)adapter->buffer_list_addr);
if(adapter->filter_list_addr != NULL) {
if(!dma_mapping_error(adapter->filter_list_dma)) {
- vio_unmap_single(adapter->vdev, adapter->filter_list_dma, 4096, DMA_BIDIRECTIONAL);
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->filter_list_dma, 4096,
+ DMA_BIDIRECTIONAL);
adapter->filter_list_dma = DMA_ERROR_CODE;
}
free_page((unsigned long)adapter->filter_list_addr);
if(adapter->rx_queue.queue_addr != NULL) {
if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
- vio_unmap_single(adapter->vdev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+ dma_unmap_single(&adapter->vdev->dev,
+ adapter->rx_queue.queue_dma,
+ adapter->rx_queue.queue_len,
+ DMA_BIDIRECTIONAL);
adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
}
kfree(adapter->rx_queue.queue_addr);
return -ENOMEM;
}
- adapter->buffer_list_dma = vio_map_single(adapter->vdev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
- adapter->filter_list_dma = vio_map_single(adapter->vdev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
- adapter->rx_queue.queue_dma = vio_map_single(adapter->vdev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+ adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
+ adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
+ adapter->rx_queue.queue_addr,
+ adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
if((dma_mapping_error(adapter->buffer_list_dma) ) ||
(dma_mapping_error(adapter->filter_list_dma)) ||
ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
do {
rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while H_isLongBusy(rc);
+ } while (H_isLongBusy(rc) || (rc == H_Busy));
ibmveth_cleanup(adapter);
return rc;
do {
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
- } while H_isLongBusy(lpar_rc);
+ } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
if(lpar_rc != H_Success)
{
}
static u32 netdev_get_link(struct net_device *dev) {
- return 0;
+ return 1;
}
static struct ethtool_ops netdev_ethtool_ops = {
/* map the initial fragment */
desc[0].fields.length = nfrags ? skb->len - skb->data_len : skb->len;
- desc[0].fields.address = vio_map_single(adapter->vdev, skb->data,
+ desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
desc[0].fields.length, DMA_TO_DEVICE);
desc[0].fields.valid = 1;
while(curfrag--) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag];
desc[curfrag+1].fields.address
- = vio_map_single(adapter->vdev,
+ = dma_map_single(&adapter->vdev->dev,
page_address(frag->page) + frag->page_offset,
frag->size, DMA_TO_DEVICE);
desc[curfrag+1].fields.length = frag->size;
adapter->stats.tx_dropped++;
/* Free all the mappings we just created */
while(curfrag < nfrags) {
- vio_unmap_single(adapter->vdev,
+ dma_unmap_single(&adapter->vdev->dev,
desc[curfrag+1].fields.address,
desc[curfrag+1].fields.length,
DMA_TO_DEVICE);
}
do {
- vio_unmap_single(adapter->vdev, desc[nfrags].fields.address, desc[nfrags].fields.length, DMA_TO_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ desc[nfrags].fields.address,
+ desc[nfrags].fields.length, DMA_TO_DEVICE);
} while(--nfrags >= 0);
dev_kfree_skb(skb);
if(ibmveth_rxq_pending_buffer(adapter)) {
struct sk_buff *skb;
+ rmb();
+
if(!ibmveth_rxq_buffer_valid(adapter)) {
wmb(); /* suggested by larson1 */
adapter->rx_invalid_buffer++;
mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
if(!mac_addr_p) {
- ibmveth_error_printk("Can't find VETH_MAC_ADDR attribute\n");
+ printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
+ "attribute\n", __FILE__, __LINE__);
return 0;
}
mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
if(!mcastFilterSize_p) {
- ibmveth_error_printk("Can't find VETH_MCAST_FILTER_SIZE attribute\n");
+ printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
+ "VETH_MCAST_FILTER_SIZE attribute\n",
+ __FILE__, __LINE__);
return 0;
}