linux 2.6.16.38 w/ vs2.0.3-rc1
[linux-2.6.git] / drivers / net / ibmveth.c
index ffe9ea3..ceb98fd 100644 (file)
@@ -35,7 +35,6 @@
 
 #include <linux/config.h>
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/types.h>
 #include <linux/errno.h>
 #include <linux/ioport.h>
@@ -59,7 +58,7 @@
 
 #include "ibmveth.h"
 
-#define DEBUG 1
+#undef DEBUG
 
 #define ibmveth_printk(fmt, args...) \
   printk(KERN_INFO "%s: " fmt, __FILE__, ## args)
@@ -96,10 +95,10 @@ static void ibmveth_proc_unregister_driver(void);
 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
-static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter*);
+static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
 
 #ifdef CONFIG_PROC_FS
-#define IBMVETH_PROC_DIR "ibmveth"
+#define IBMVETH_PROC_DIR "net/ibmveth"
 static struct proc_dir_entry *ibmveth_proc_dir;
 #endif
 
@@ -181,6 +180,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
        atomic_set(&pool->available, 0);
        pool->producer_index = 0;
        pool->consumer_index = 0;
+       pool->active = 0;
 
        return 0;
 }
@@ -218,7 +218,8 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
                ibmveth_assert(index != IBM_VETH_INVALID_MAP);
                ibmveth_assert(pool->skbuff[index] == NULL);
 
-               dma_addr = vio_map_single(adapter->vdev, skb->data, pool->buff_size, DMA_FROM_DEVICE);
+               dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
+                               pool->buff_size, DMA_FROM_DEVICE);
 
                pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
                pool->dma_addr[index] = dma_addr;
@@ -235,10 +236,12 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
                lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
                    
                if(lpar_rc != H_Success) {
-                       pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
+                       pool->free_map[free_index] = index;
                        pool->skbuff[index] = NULL;
                        pool->consumer_index--;
-                       vio_unmap_single(adapter->vdev, pool->dma_addr[index], pool->buff_size, DMA_FROM_DEVICE);
+                       dma_unmap_single(&adapter->vdev->dev,
+                                       pool->dma_addr[index], pool->buff_size,
+                                       DMA_FROM_DEVICE);
                        dev_kfree_skb_any(skb);
                        adapter->replenish_add_buff_failure++;
                        break;
@@ -252,37 +255,19 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
        atomic_add(buffers_added, &(pool->available));
 }
 
-/* check if replenishing is needed.  */
-static inline int ibmveth_is_replenishing_needed(struct ibmveth_adapter *adapter)
-{
-       return ((atomic_read(&adapter->rx_buff_pool[0].available) < adapter->rx_buff_pool[0].threshold) ||
-               (atomic_read(&adapter->rx_buff_pool[1].available) < adapter->rx_buff_pool[1].threshold) ||
-               (atomic_read(&adapter->rx_buff_pool[2].available) < adapter->rx_buff_pool[2].threshold));
-}
-
-/* kick the replenish tasklet if we need replenishing and it isn't already running */
-static inline void ibmveth_schedule_replenishing(struct ibmveth_adapter *adapter)
-{
-       if(ibmveth_is_replenishing_needed(adapter) && 
-          (atomic_dec_if_positive(&adapter->not_replenishing) == 0)) { 
-               schedule_work(&adapter->replenish_task);
-       }
-}
-
-/* replenish tasklet routine */
+/* replenish routine */
 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 
 {
+       int i;
+
        adapter->replenish_task_cycles++;
 
-       ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[0]);
-       ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[1]);
-       ibmveth_replenish_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
+       for(i = 0; i < IbmVethNumBufferPools; i++)
+               if(adapter->rx_buff_pool[i].active)
+                       ibmveth_replenish_buffer_pool(adapter, 
+                                                    &adapter->rx_buff_pool[i]);
 
        adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
-
-       atomic_inc(&adapter->not_replenishing);
-
-       ibmveth_schedule_replenishing(adapter);
 }
 
 /* empty and free ana buffer pool - also used to do cleanup in error paths */
@@ -290,16 +275,14 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
 {
        int i;
 
-       if(pool->free_map) {
-               kfree(pool->free_map);
-               pool->free_map  = NULL;
-       }
+       kfree(pool->free_map);
+       pool->free_map = NULL;
 
        if(pool->skbuff && pool->dma_addr) {
                for(i = 0; i < pool->size; ++i) {
                        struct sk_buff *skb = pool->skbuff[i];
                        if(skb) {
-                               vio_unmap_single(adapter->vdev,
+                               dma_unmap_single(&adapter->vdev->dev,
                                                 pool->dma_addr[i],
                                                 pool->buff_size,
                                                 DMA_FROM_DEVICE);
@@ -318,6 +301,7 @@ static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibm
                kfree(pool->skbuff);
                pool->skbuff = NULL;
        }
+       pool->active = 0;
 }
 
 /* remove a buffer from a pool */
@@ -337,7 +321,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
 
        adapter->rx_buff_pool[pool].skbuff[index] = NULL;
 
-       vio_unmap_single(adapter->vdev,
+       dma_unmap_single(&adapter->vdev->dev,
                         adapter->rx_buff_pool[pool].dma_addr[index],
                         adapter->rx_buff_pool[pool].buff_size,
                         DMA_FROM_DEVICE);
@@ -376,6 +360,12 @@ static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
        ibmveth_assert(pool < IbmVethNumBufferPools);
        ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
 
+       if(!adapter->rx_buff_pool[pool].active) {
+               ibmveth_rxq_harvest_buffer(adapter);
+               ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
+               return;
+       }
+
        desc.desc = 0;
        desc.fields.valid = 1;
        desc.fields.length = adapter->rx_buff_pool[pool].buff_size;
@@ -406,9 +396,13 @@ static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
 
 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
 {
+       int i;
+
        if(adapter->buffer_list_addr != NULL) {
                if(!dma_mapping_error(adapter->buffer_list_dma)) {
-                       vio_unmap_single(adapter->vdev, adapter->buffer_list_dma, 4096, DMA_BIDIRECTIONAL);
+                       dma_unmap_single(&adapter->vdev->dev,
+                                       adapter->buffer_list_dma, 4096,
+                                       DMA_BIDIRECTIONAL);
                        adapter->buffer_list_dma = DMA_ERROR_CODE;
                }
                free_page((unsigned long)adapter->buffer_list_addr);
@@ -417,7 +411,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
 
        if(adapter->filter_list_addr != NULL) {
                if(!dma_mapping_error(adapter->filter_list_dma)) {
-                       vio_unmap_single(adapter->vdev, adapter->filter_list_dma, 4096, DMA_BIDIRECTIONAL);
+                       dma_unmap_single(&adapter->vdev->dev,
+                                       adapter->filter_list_dma, 4096,
+                                       DMA_BIDIRECTIONAL);
                        adapter->filter_list_dma = DMA_ERROR_CODE;
                }
                free_page((unsigned long)adapter->filter_list_addr);
@@ -426,33 +422,34 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
 
        if(adapter->rx_queue.queue_addr != NULL) {
                if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
-                       vio_unmap_single(adapter->vdev, adapter->rx_queue.queue_dma, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+                       dma_unmap_single(&adapter->vdev->dev,
+                                       adapter->rx_queue.queue_dma,
+                                       adapter->rx_queue.queue_len,
+                                       DMA_BIDIRECTIONAL);
                        adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
                }
                kfree(adapter->rx_queue.queue_addr);
                adapter->rx_queue.queue_addr = NULL;
        }
 
-       ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[0]);
-       ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[1]);
-       ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[2]);
+       for(i = 0; i<IbmVethNumBufferPools; i++)
+               ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
 }
 
 static int ibmveth_open(struct net_device *netdev)
 {
        struct ibmveth_adapter *adapter = netdev->priv;
        u64 mac_address = 0;
-       int rxq_entries;
+       int rxq_entries = 1;
        unsigned long lpar_rc;
        int rc;
        union ibmveth_buf_desc rxq_desc;
+       int i;
 
        ibmveth_debug_printk("open starting\n");
 
-       rxq_entries =
-               adapter->rx_buff_pool[0].size +
-               adapter->rx_buff_pool[1].size +
-               adapter->rx_buff_pool[2].size + 1;
+       for(i = 0; i<IbmVethNumBufferPools; i++)
+               rxq_entries += adapter->rx_buff_pool[i].size;
     
        adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
        adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
@@ -472,9 +469,13 @@ static int ibmveth_open(struct net_device *netdev)
                return -ENOMEM;
        }
 
-       adapter->buffer_list_dma = vio_map_single(adapter->vdev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
-       adapter->filter_list_dma = vio_map_single(adapter->vdev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
-       adapter->rx_queue.queue_dma = vio_map_single(adapter->vdev, adapter->rx_queue.queue_addr, adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
+       adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
+                       adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
+       adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
+                       adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
+       adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
+                       adapter->rx_queue.queue_addr,
+                       adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
 
        if((dma_mapping_error(adapter->buffer_list_dma) ) ||
           (dma_mapping_error(adapter->filter_list_dma)) ||
@@ -488,14 +489,8 @@ static int ibmveth_open(struct net_device *netdev)
        adapter->rx_queue.num_slots = rxq_entries;
        adapter->rx_queue.toggle = 1;
 
-       if(ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[0]) ||
-          ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[1]) ||
-          ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[2]))
-       {
-               ibmveth_error_printk("unable to allocate buffer pools\n");
-               ibmveth_cleanup(adapter);
-               return -ENOMEM;
-       }
+       /* call change_mtu to init the buffer pools based in initial mtu */
+       ibmveth_change_mtu(netdev, netdev->mtu);
 
        memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
        mac_address = mac_address >> 16;
@@ -518,7 +513,7 @@ static int ibmveth_open(struct net_device *netdev)
 
        if(lpar_rc != H_Success) {
                ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
-               ibmveth_error_printk("buffer TCE:0x%x filter TCE:0x%x rxq desc:0x%lx MAC:0x%lx\n",
+               ibmveth_error_printk("buffer TCE:0x%lx filter TCE:0x%lx rxq desc:0x%lx MAC:0x%lx\n",
                                     adapter->buffer_list_dma,
                                     adapter->filter_list_dma,
                                     rxq_desc.desc,
@@ -538,10 +533,10 @@ static int ibmveth_open(struct net_device *netdev)
                return rc;
        }
 
-       netif_start_queue(netdev);
+       ibmveth_debug_printk("initial replenish cycle\n");
+       ibmveth_interrupt(netdev->irq, netdev, NULL);
 
-       ibmveth_debug_printk("scheduling initial replenish cycle\n");
-       ibmveth_schedule_replenishing(adapter);
+       netif_start_queue(netdev);
 
        ibmveth_debug_printk("open complete\n");
 
@@ -559,9 +554,6 @@ static int ibmveth_close(struct net_device *netdev)
 
        free_irq(netdev->irq, netdev);
 
-       cancel_delayed_work(&adapter->replenish_task);
-       flush_scheduled_work();
-
        do {
                lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
        } while (H_isLongBusy(lpar_rc) || (lpar_rc == H_Busy));
@@ -626,12 +618,18 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        unsigned long lpar_rc;
        int nfrags = 0, curfrag;
        unsigned long correlator;
+       unsigned long flags;
        unsigned int retry_count;
+       unsigned int tx_dropped = 0;
+       unsigned int tx_bytes = 0;
+       unsigned int tx_packets = 0;
+       unsigned int tx_send_failed = 0;
+       unsigned int tx_map_failed = 0;
+
 
        if ((skb_shinfo(skb)->nr_frags + 1) > IbmVethMaxSendFrags) {
-               adapter->stats.tx_dropped++;
-               dev_kfree_skb(skb);
-               return 0;
+               tx_dropped++;
+               goto out;
        }
 
        memset(&desc, 0, sizeof(desc));
@@ -644,16 +642,15 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        /* map the initial fragment */
        desc[0].fields.length  = nfrags ? skb->len - skb->data_len : skb->len;
-       desc[0].fields.address = vio_map_single(adapter->vdev, skb->data,
+       desc[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
                                        desc[0].fields.length, DMA_TO_DEVICE);
        desc[0].fields.valid   = 1;
 
        if(dma_mapping_error(desc[0].fields.address)) {
                ibmveth_error_printk("tx: unable to map initial fragment\n");
-               adapter->tx_map_failed++;
-               adapter->stats.tx_dropped++;
-               dev_kfree_skb(skb);
-               return 0;
+               tx_map_failed++;
+               tx_dropped++;
+               goto out;
        }
 
        curfrag = nfrags;
@@ -662,7 +659,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        while(curfrag--) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[curfrag];
                desc[curfrag+1].fields.address
-                       = vio_map_single(adapter->vdev,
+                       = dma_map_single(&adapter->vdev->dev,
                                page_address(frag->page) + frag->page_offset,
                                frag->size, DMA_TO_DEVICE);
                desc[curfrag+1].fields.length = frag->size;
@@ -670,18 +667,17 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 
                if(dma_mapping_error(desc[curfrag+1].fields.address)) {
                        ibmveth_error_printk("tx: unable to map fragment %d\n", curfrag);
-                       adapter->tx_map_failed++;
-                       adapter->stats.tx_dropped++;
+                       tx_map_failed++;
+                       tx_dropped++;
                        /* Free all the mappings we just created */
                        while(curfrag < nfrags) {
-                               vio_unmap_single(adapter->vdev,
+                               dma_unmap_single(&adapter->vdev->dev,
                                                 desc[curfrag+1].fields.address,
                                                 desc[curfrag+1].fields.length,
                                                 DMA_TO_DEVICE);
                                curfrag++;
                        }
-                       dev_kfree_skb(skb);
-                       return 0;
+                       goto out;
                }
        }
 
@@ -706,17 +702,28 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                        ibmveth_error_printk("tx: desc[%i] valid=%d, len=%d, address=0x%d\n", i,
                                             desc[i].fields.valid, desc[i].fields.length, desc[i].fields.address);
                }
-               adapter->tx_send_failed++;
-               adapter->stats.tx_dropped++;
+               tx_send_failed++;
+               tx_dropped++;
        } else {
-               adapter->stats.tx_packets++;
-               adapter->stats.tx_bytes += skb->len;
+               tx_packets++;
+               tx_bytes += skb->len;
+               netdev->trans_start = jiffies;
        }
 
        do {
-               vio_unmap_single(adapter->vdev, desc[nfrags].fields.address, desc[nfrags].fields.length, DMA_TO_DEVICE);
+               dma_unmap_single(&adapter->vdev->dev,
+                               desc[nfrags].fields.address,
+                               desc[nfrags].fields.length, DMA_TO_DEVICE);
        } while(--nfrags >= 0);
 
+out:   spin_lock_irqsave(&adapter->stats_lock, flags);
+       adapter->stats.tx_dropped += tx_dropped;
+       adapter->stats.tx_bytes += tx_bytes;
+       adapter->stats.tx_packets += tx_packets;
+       adapter->tx_send_failed += tx_send_failed;
+       adapter->tx_map_failed += tx_map_failed;
+       spin_unlock_irqrestore(&adapter->stats_lock, flags);
+
        dev_kfree_skb(skb);
        return 0;
 }
@@ -760,13 +767,14 @@ static int ibmveth_poll(struct net_device *netdev, int *budget)
                                adapter->stats.rx_packets++;
                                adapter->stats.rx_bytes += length;
                                frames_processed++;
+                               netdev->last_rx = jiffies;
                        }
                } else {
                        more_work = 0;
                }
        } while(more_work && (frames_processed < max_frames_to_process));
 
-       ibmveth_schedule_replenishing(adapter);
+       ibmveth_replenish_task(adapter);
 
        if(more_work) {
                /* more work to do - return that we are not done yet */
@@ -867,17 +875,54 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
 
 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
 {
-       if ((new_mtu < 68) || (new_mtu > (1<<20)))
+       struct ibmveth_adapter *adapter = dev->priv;
+       int i;
+       int prev_smaller = 1;
+
+       if ((new_mtu < 68) || 
+           (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
                return -EINVAL;
+
+       for(i = 0; i<IbmVethNumBufferPools; i++) {
+               int activate = 0;
+               if (new_mtu > (pool_size[i]  - IBMVETH_BUFF_OH)) { 
+                       activate = 1;
+                       prev_smaller= 1;
+               } else {
+                       if (prev_smaller)
+                               activate = 1;
+                       prev_smaller= 0;
+               }
+
+               if (activate && !adapter->rx_buff_pool[i].active) {
+                       struct ibmveth_buff_pool *pool = 
+                                               &adapter->rx_buff_pool[i];
+                       if(ibmveth_alloc_buffer_pool(pool)) {
+                               ibmveth_error_printk("unable to alloc pool\n");
+                               return -ENOMEM;
+                       }
+                       adapter->rx_buff_pool[i].active = 1;
+               } else if (!activate && adapter->rx_buff_pool[i].active) {
+                       adapter->rx_buff_pool[i].active = 0;
+                       h_free_logical_lan_buffer(adapter->vdev->unit_address,
+                                         (u64)pool_size[i]);
+               }
+
+       }
+
+       /* kick the interrupt handler so that the new buffer pools get
+          replenished or deallocated */
+       ibmveth_interrupt(dev->irq, dev, NULL);
+
        dev->mtu = new_mtu;
        return 0;       
 }
 
 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
 {
-       int rc;
+       int rc, i;
        struct net_device *netdev;
-       struct ibmveth_adapter *adapter;
+       struct ibmveth_adapter *adapter = NULL;
 
        unsigned char *mac_addr_p;
        unsigned int *mcastFilterSize_p;
@@ -944,23 +989,21 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
        netdev->ethtool_ops           = &netdev_ethtool_ops;
        netdev->change_mtu         = ibmveth_change_mtu;
        SET_NETDEV_DEV(netdev, &dev->dev);
+       netdev->features |= NETIF_F_LLTX; 
+       spin_lock_init(&adapter->stats_lock);
 
        memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
 
-       ibmveth_init_buffer_pool(&adapter->rx_buff_pool[0], 0, IbmVethPool0DftCnt, IbmVethPool0DftSize);
-       ibmveth_init_buffer_pool(&adapter->rx_buff_pool[1], 1, IbmVethPool1DftCnt, IbmVethPool1DftSize);
-       ibmveth_init_buffer_pool(&adapter->rx_buff_pool[2], 2, IbmVethPool2DftCnt, IbmVethPool2DftSize);
+       for(i = 0; i<IbmVethNumBufferPools; i++)
+               ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, 
+                                        pool_count[i], pool_size[i]);
 
        ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
 
-       INIT_WORK(&adapter->replenish_task, (void*)ibmveth_replenish_task, (void*)adapter);
-
        adapter->buffer_list_dma = DMA_ERROR_CODE;
        adapter->filter_list_dma = DMA_ERROR_CODE;
        adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
 
-       atomic_set(&adapter->not_replenishing, 1);
-
        ibmveth_debug_printk("registering netdev...\n");
 
        rc = register_netdev(netdev);
@@ -994,7 +1037,7 @@ static int __devexit ibmveth_remove(struct vio_dev *dev)
 #ifdef CONFIG_PROC_FS
 static void ibmveth_proc_register_driver(void)
 {
-       ibmveth_proc_dir = create_proc_entry(IBMVETH_PROC_DIR, S_IFDIR, proc_net);
+       ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, NULL);
        if (ibmveth_proc_dir) {
                SET_MODULE_OWNER(ibmveth_proc_dir);
        }
@@ -1002,7 +1045,7 @@ static void ibmveth_proc_register_driver(void)
 
 static void ibmveth_proc_unregister_driver(void)
 {
-       remove_proc_entry(IBMVETH_PROC_DIR, proc_net);
+       remove_proc_entry(IBMVETH_PROC_DIR, NULL);
 }
 
 static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) 
@@ -1128,16 +1171,18 @@ static void ibmveth_proc_unregister_driver(void)
 
 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
        { "network", "IBM,l-lan"},
-       { 0,}
+       { "", "" }
 };
-
 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
 
 static struct vio_driver ibmveth_driver = {
-       .name        = (char *)ibmveth_driver_name,
-       .id_table    = ibmveth_device_table,
-       .probe       = ibmveth_probe,
-       .remove      = ibmveth_remove
+       .id_table       = ibmveth_device_table,
+       .probe          = ibmveth_probe,
+       .remove         = ibmveth_remove,
+       .driver         = {
+               .name   = ibmveth_driver_name,
+               .owner  = THIS_MODULE,
+       }
 };
 
 static int __init ibmveth_module_init(void)