1 /******************************************************************************
2 * drivers/xen/netback/netback.c
4 * Back-end of the driver for virtual network devices. This portion of the
5 * driver exports a 'unified' network-device interface that can be accessed
6 * by any operating system that implements a compatible front end. A
7 * reference front-end implementation can be found in:
8 * drivers/xen/netfront/netfront.c
10 * Copyright (c) 2002-2005, K A Fraser
14 #include <asm-xen/balloon.h>
15 #include <asm-xen/evtchn.h>
17 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
18 #include <linux/delay.h>
21 static void netif_idx_release(u16 pending_idx);
22 static void netif_page_release(struct page *page);
23 static void make_tx_response(netif_t *netif,
26 static int make_rx_response(netif_t *netif,
32 static void net_tx_action(unsigned long unused);
33 static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
35 static void net_rx_action(unsigned long unused);
36 static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
38 static struct timer_list net_timer;
40 static struct sk_buff_head rx_queue;
41 static multicall_entry_t rx_mcl[NETIF_RX_RING_SIZE*2+1];
42 static mmu_update_t rx_mmu[NETIF_RX_RING_SIZE];
43 static struct mmuext_op rx_mmuext[NETIF_RX_RING_SIZE];
44 static unsigned char rx_notify[NR_EVENT_CHANNELS];
46 /* Don't currently gate addition of an interface to the tx scheduling list. */
47 #define tx_work_exists(_if) (1)
49 #define MAX_PENDING_REQS 256
50 static unsigned long mmap_vstart;
51 #define MMAP_VADDR(_req) (mmap_vstart + ((_req) * PAGE_SIZE))
53 #define PKT_PROT_LEN 64
56 netif_tx_request_t req;
58 } pending_tx_info[MAX_PENDING_REQS];
59 static u16 pending_ring[MAX_PENDING_REQS];
60 typedef unsigned int PEND_RING_IDX;
61 #define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
62 static PEND_RING_IDX pending_prod, pending_cons;
63 #define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
65 /* Freed TX SKBs get batched on this ring before return to pending_ring. */
66 static u16 dealloc_ring[MAX_PENDING_REQS];
67 static PEND_RING_IDX dealloc_prod, dealloc_cons;
69 static struct sk_buff_head tx_queue;
70 static multicall_entry_t tx_mcl[MAX_PENDING_REQS];
72 static struct list_head net_schedule_list;
73 static spinlock_t net_schedule_list_lock;
75 #define MAX_MFN_ALLOC 64
76 static unsigned long mfn_list[MAX_MFN_ALLOC];
77 static unsigned int alloc_index = 0;
78 static spinlock_t mfn_lock = SPIN_LOCK_UNLOCKED;
80 static unsigned long alloc_mfn(void)
82 unsigned long mfn = 0, flags;
83 spin_lock_irqsave(&mfn_lock, flags);
84 if ( unlikely(alloc_index == 0) )
85 alloc_index = HYPERVISOR_dom_mem_op(
86 MEMOP_increase_reservation, mfn_list, MAX_MFN_ALLOC, 0);
87 if ( alloc_index != 0 )
88 mfn = mfn_list[--alloc_index];
89 spin_unlock_irqrestore(&mfn_lock, flags);
93 static void free_mfn(unsigned long mfn)
96 spin_lock_irqsave(&mfn_lock, flags);
97 if ( alloc_index != MAX_MFN_ALLOC )
98 mfn_list[alloc_index++] = mfn;
99 else if ( HYPERVISOR_dom_mem_op(MEMOP_decrease_reservation,
102 spin_unlock_irqrestore(&mfn_lock, flags);
105 static inline void maybe_schedule_tx_action(void)
108 if ( (NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
109 !list_empty(&net_schedule_list) )
110 tasklet_schedule(&net_tx_tasklet);
114 * A gross way of confirming the origin of an skb data page. The slab
115 * allocator abuses a field in the page struct to cache the kmem_cache_t ptr.
117 static inline int is_xen_skb(struct sk_buff *skb)
119 extern kmem_cache_t *skbuff_cachep;
120 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
121 kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->lru.next;
123 kmem_cache_t *cp = (kmem_cache_t *)virt_to_page(skb->head)->list.next;
125 return (cp == skbuff_cachep);
128 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
130 netif_t *netif = netdev_priv(dev);
132 ASSERT(skb->dev == dev);
134 /* Drop the packet if the target domain has no receive buffers. */
135 if ( !netif->active ||
136 (netif->rx_req_cons == netif->rx->req_prod) ||
137 ((netif->rx_req_cons-netif->rx_resp_prod) == NETIF_RX_RING_SIZE) )
141 * We do not copy the packet unless:
142 * 1. The data is shared; or
143 * 2. The data is not allocated from our special cache.
144 * NB. We also couldn't cope with fragmented packets, but we won't get
145 * any because we not advertise the NETIF_F_SG feature.
147 if ( skb_shared(skb) || skb_cloned(skb) || !is_xen_skb(skb) )
149 int hlen = skb->data - skb->head;
150 struct sk_buff *nskb = dev_alloc_skb(hlen + skb->len);
151 if ( unlikely(nskb == NULL) )
153 skb_reserve(nskb, hlen);
154 __skb_put(nskb, skb->len);
155 (void)skb_copy_bits(skb, -hlen, nskb->data - hlen, skb->len + hlen);
156 nskb->dev = skb->dev;
161 netif->rx_req_cons++;
164 skb_queue_tail(&rx_queue, skb);
165 tasklet_schedule(&net_rx_tasklet);
170 netif->stats.tx_dropped++;
176 static void xen_network_done_notify(void)
178 static struct net_device *eth0_dev = NULL;
179 if ( unlikely(eth0_dev == NULL) )
180 eth0_dev = __dev_get_by_name("eth0");
181 netif_rx_schedule(eth0_dev);
184 * Add following to poll() function in NAPI driver (Tigon3 is example):
185 * if ( xen_network_done() )
186 * tg3_enable_ints(tp);
188 int xen_network_done(void)
190 return skb_queue_empty(&rx_queue);
194 static void net_rx_action(unsigned long unused)
198 u16 size, id, evtchn;
199 multicall_entry_t *mcl;
201 struct mmuext_op *mmuext;
202 unsigned long vdata, mdata, new_mfn;
203 struct sk_buff_head rxq;
205 u16 notify_list[NETIF_RX_RING_SIZE];
208 skb_queue_head_init(&rxq);
213 while ( (skb = skb_dequeue(&rx_queue)) != NULL )
215 netif = netdev_priv(skb->dev);
216 vdata = (unsigned long)skb->data;
217 mdata = virt_to_machine(vdata);
219 /* Memory squeeze? Back off for an arbitrary while. */
220 if ( (new_mfn = alloc_mfn()) == 0 )
222 if ( net_ratelimit() )
223 printk(KERN_WARNING "Memory squeeze in netback driver.\n");
224 mod_timer(&net_timer, jiffies + HZ);
225 skb_queue_head(&rx_queue, skb);
230 * Set the new P2M table entry before reassigning the old data page.
231 * Heed the comment in pgtable-2level.h:pte_page(). :-)
233 phys_to_machine_mapping[__pa(skb->data) >> PAGE_SHIFT] = new_mfn;
235 mcl->op = __HYPERVISOR_update_va_mapping;
236 mcl->args[0] = vdata;
237 mcl->args[1] = (new_mfn << PAGE_SHIFT) | __PAGE_KERNEL;
241 mcl->op = __HYPERVISOR_mmuext_op;
242 mcl->args[0] = (unsigned long)mmuext;
245 mcl->args[3] = netif->domid;
248 mmuext->cmd = MMUEXT_REASSIGN_PAGE;
249 mmuext->mfn = mdata >> PAGE_SHIFT;
252 mmu->ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
253 mmu->val = __pa(vdata) >> PAGE_SHIFT;
256 __skb_queue_tail(&rxq, skb);
258 /* Filled the batch queue? */
259 if ( (mcl - rx_mcl) == ARRAY_SIZE(rx_mcl) )
266 mcl->op = __HYPERVISOR_mmu_update;
267 mcl->args[0] = (unsigned long)rx_mmu;
268 mcl->args[1] = mmu - rx_mmu;
270 mcl->args[3] = DOMID_SELF;
273 mcl[-3].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
274 if ( unlikely(HYPERVISOR_multicall(rx_mcl, mcl - rx_mcl) != 0) )
279 while ( (skb = __skb_dequeue(&rxq)) != NULL )
281 netif = netdev_priv(skb->dev);
282 size = skb->tail - skb->data;
284 /* Rederive the machine addresses. */
285 new_mfn = mcl[0].args[1] >> PAGE_SHIFT;
286 mdata = ((mmuext[0].mfn << PAGE_SHIFT) |
287 ((unsigned long)skb->data & ~PAGE_MASK));
289 atomic_set(&(skb_shinfo(skb)->dataref), 1);
290 skb_shinfo(skb)->nr_frags = 0;
291 skb_shinfo(skb)->frag_list = NULL;
293 netif->stats.tx_bytes += size;
294 netif->stats.tx_packets++;
296 /* The update_va_mapping() must not fail. */
297 if ( unlikely(mcl[0].args[5] != 0) )
300 /* Check the reassignment error code. */
301 status = NETIF_RSP_OKAY;
302 if ( unlikely(mcl[1].args[5] != 0) )
304 DPRINTK("Failed MMU update transferring to DOM%u\n", netif->domid);
305 free_mfn(mdata >> PAGE_SHIFT);
306 status = NETIF_RSP_ERROR;
309 evtchn = netif->evtchn;
310 id = netif->rx->ring[MASK_NETIF_RX_IDX(netif->rx_resp_prod)].req.id;
311 if ( make_rx_response(netif, id, status, mdata, size) &&
312 (rx_notify[evtchn] == 0) )
314 rx_notify[evtchn] = 1;
315 notify_list[notify_nr++] = evtchn;
325 while ( notify_nr != 0 )
327 evtchn = notify_list[--notify_nr];
328 rx_notify[evtchn] = 0;
329 notify_via_evtchn(evtchn);
332 /* More work to do? */
333 if ( !skb_queue_empty(&rx_queue) && !timer_pending(&net_timer) )
334 tasklet_schedule(&net_rx_tasklet);
337 xen_network_done_notify();
341 static void net_alarm(unsigned long unused)
343 tasklet_schedule(&net_rx_tasklet);
346 struct net_device_stats *netif_be_get_stats(struct net_device *dev)
348 netif_t *netif = netdev_priv(dev);
349 return &netif->stats;
352 static int __on_net_schedule_list(netif_t *netif)
354 return netif->list.next != NULL;
357 static void remove_from_net_schedule_list(netif_t *netif)
359 spin_lock_irq(&net_schedule_list_lock);
360 if ( likely(__on_net_schedule_list(netif)) )
362 list_del(&netif->list);
363 netif->list.next = NULL;
366 spin_unlock_irq(&net_schedule_list_lock);
369 static void add_to_net_schedule_list_tail(netif_t *netif)
371 if ( __on_net_schedule_list(netif) )
374 spin_lock_irq(&net_schedule_list_lock);
375 if ( !__on_net_schedule_list(netif) && netif->active )
377 list_add_tail(&netif->list, &net_schedule_list);
380 spin_unlock_irq(&net_schedule_list_lock);
383 void netif_schedule_work(netif_t *netif)
385 if ( (netif->tx_req_cons != netif->tx->req_prod) &&
386 ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
388 add_to_net_schedule_list_tail(netif);
389 maybe_schedule_tx_action();
393 void netif_deschedule_work(netif_t *netif)
395 remove_from_net_schedule_list(netif);
399 static void tx_credit_callback(unsigned long data)
401 netif_t *netif = (netif_t *)data;
402 netif->remaining_credit = netif->credit_bytes;
403 netif_schedule_work(netif);
406 static void net_tx_action(unsigned long unused)
408 struct list_head *ent;
411 netif_tx_request_t txreq;
414 multicall_entry_t *mcl;
415 PEND_RING_IDX dc, dp;
416 unsigned int data_len;
418 if ( (dc = dealloc_cons) == (dp = dealloc_prod) )
424 pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
425 mcl[0].op = __HYPERVISOR_update_va_mapping;
426 mcl[0].args[0] = MMAP_VADDR(pending_idx);
432 mcl[-1].args[2] = UVMF_TLB_FLUSH|UVMF_ALL;
433 if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
437 while ( dealloc_cons != dp )
439 /* The update_va_mapping() must not fail. */
440 if ( unlikely(mcl[0].args[5] != 0) )
443 pending_idx = dealloc_ring[MASK_PEND_IDX(dealloc_cons++)];
445 netif = pending_tx_info[pending_idx].netif;
447 make_tx_response(netif, pending_tx_info[pending_idx].req.id,
450 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
453 * Scheduling checks must happen after the above response is posted.
454 * This avoids a possible race with a guest OS on another CPU if that
455 * guest is testing against 'resp_prod' when deciding whether to notify
456 * us when it queues additional packets.
459 if ( (netif->tx_req_cons != netif->tx->req_prod) &&
460 ((netif->tx_req_cons-netif->tx_resp_prod) != NETIF_TX_RING_SIZE) )
461 add_to_net_schedule_list_tail(netif);
470 while ( (NR_PENDING_REQS < MAX_PENDING_REQS) &&
471 !list_empty(&net_schedule_list) )
473 /* Get a netif from the list with work to do. */
474 ent = net_schedule_list.next;
475 netif = list_entry(ent, netif_t, list);
477 remove_from_net_schedule_list(netif);
480 i = netif->tx_req_cons;
481 if ( (i == netif->tx->req_prod) ||
482 ((i-netif->tx_resp_prod) == NETIF_TX_RING_SIZE) )
488 rmb(); /* Ensure that we see the request before we copy it. */
489 memcpy(&txreq, &netif->tx->ring[MASK_NETIF_TX_IDX(i)].req,
492 /* Credit-based scheduling. */
493 if ( txreq.size > netif->remaining_credit )
495 unsigned long now = jiffies;
496 unsigned long next_credit =
497 netif->credit_timeout.expires +
498 msecs_to_jiffies(netif->credit_usec / 1000);
500 /* Timer could already be pending in some rare cases. */
501 if ( timer_pending(&netif->credit_timeout) )
504 /* Already passed the point at which we can replenish credit? */
505 if ( time_after_eq(now, next_credit) )
507 netif->credit_timeout.expires = now;
508 netif->remaining_credit = netif->credit_bytes;
511 /* Still too big to send right now? Then set a timer callback. */
512 if ( txreq.size > netif->remaining_credit )
514 netif->remaining_credit = 0;
515 netif->credit_timeout.expires = next_credit;
516 netif->credit_timeout.data = (unsigned long)netif;
517 netif->credit_timeout.function = tx_credit_callback;
518 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
519 add_timer_on(&netif->credit_timeout, smp_processor_id());
521 add_timer(&netif->credit_timeout);
526 netif->remaining_credit -= txreq.size;
529 * Why the barrier? It ensures that the frontend sees updated req_cons
530 * before we check for more work to schedule.
532 netif->tx->req_cons = ++netif->tx_req_cons;
535 netif_schedule_work(netif);
537 if ( unlikely(txreq.size < ETH_HLEN) ||
538 unlikely(txreq.size > ETH_FRAME_LEN) )
540 DPRINTK("Bad packet size: %d\n", txreq.size);
541 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
546 /* No crossing a page boundary as the payload mustn't fragment. */
547 if ( unlikely(((txreq.addr & ~PAGE_MASK) + txreq.size) >= PAGE_SIZE) )
549 DPRINTK("txreq.addr: %lx, size: %u, end: %lu\n",
550 txreq.addr, txreq.size,
551 (txreq.addr &~PAGE_MASK) + txreq.size);
552 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
557 pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
559 data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
561 if ( unlikely((skb = alloc_skb(data_len+16, GFP_ATOMIC)) == NULL) )
563 DPRINTK("Can't allocate a skb in start_xmit.\n");
564 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
569 /* Packets passed to netif_rx() must have some headroom. */
570 skb_reserve(skb, 16);
572 mcl[0].op = __HYPERVISOR_update_va_mapping_otherdomain;
573 mcl[0].args[0] = MMAP_VADDR(pending_idx);
574 mcl[0].args[1] = (txreq.addr & PAGE_MASK) | __PAGE_KERNEL;
576 mcl[0].args[3] = netif->domid;
579 memcpy(&pending_tx_info[pending_idx].req, &txreq, sizeof(txreq));
580 pending_tx_info[pending_idx].netif = netif;
581 *((u16 *)skb->data) = pending_idx;
583 __skb_queue_tail(&tx_queue, skb);
587 /* Filled the batch queue? */
588 if ( (mcl - tx_mcl) == ARRAY_SIZE(tx_mcl) )
595 if ( unlikely(HYPERVISOR_multicall(tx_mcl, mcl - tx_mcl) != 0) )
599 while ( (skb = __skb_dequeue(&tx_queue)) != NULL )
601 pending_idx = *((u16 *)skb->data);
602 netif = pending_tx_info[pending_idx].netif;
603 memcpy(&txreq, &pending_tx_info[pending_idx].req, sizeof(txreq));
605 /* Check the remap error code. */
606 if ( unlikely(mcl[0].args[5] != 0) )
608 DPRINTK("Bad page frame\n");
609 make_tx_response(netif, txreq.id, NETIF_RSP_ERROR);
613 pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
617 phys_to_machine_mapping[__pa(MMAP_VADDR(pending_idx)) >> PAGE_SHIFT] =
618 FOREIGN_FRAME(txreq.addr >> PAGE_SHIFT);
620 data_len = (txreq.size > PKT_PROT_LEN) ? PKT_PROT_LEN : txreq.size;
622 __skb_put(skb, data_len);
624 (void *)(MMAP_VADDR(pending_idx)|(txreq.addr&~PAGE_MASK)),
627 if ( data_len < txreq.size )
629 /* Append the packet payload as a fragment. */
630 skb_shinfo(skb)->frags[0].page =
631 virt_to_page(MMAP_VADDR(pending_idx));
632 skb_shinfo(skb)->frags[0].size = txreq.size - data_len;
633 skb_shinfo(skb)->frags[0].page_offset =
634 (txreq.addr + data_len) & ~PAGE_MASK;
635 skb_shinfo(skb)->nr_frags = 1;
639 /* Schedule a response immediately. */
640 netif_idx_release(pending_idx);
643 skb->data_len = txreq.size - data_len;
644 skb->len += skb->data_len;
646 skb->dev = netif->dev;
647 skb->protocol = eth_type_trans(skb, skb->dev);
649 netif->stats.rx_bytes += txreq.size;
650 netif->stats.rx_packets++;
653 netif->dev->last_rx = jiffies;
659 static void netif_idx_release(u16 pending_idx)
661 static spinlock_t _lock = SPIN_LOCK_UNLOCKED;
664 spin_lock_irqsave(&_lock, flags);
665 dealloc_ring[MASK_PEND_IDX(dealloc_prod++)] = pending_idx;
666 spin_unlock_irqrestore(&_lock, flags);
668 tasklet_schedule(&net_tx_tasklet);
671 static void netif_page_release(struct page *page)
673 u16 pending_idx = page - virt_to_page(mmap_vstart);
675 /* Ready for next use. */
676 set_page_count(page, 1);
678 netif_idx_release(pending_idx);
681 irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
683 netif_t *netif = dev_id;
684 if ( tx_work_exists(netif) )
686 add_to_net_schedule_list_tail(netif);
687 maybe_schedule_tx_action();
692 static void make_tx_response(netif_t *netif,
696 NETIF_RING_IDX i = netif->tx_resp_prod;
697 netif_tx_response_t *resp;
699 resp = &netif->tx->ring[MASK_NETIF_TX_IDX(i)].resp;
703 netif->tx->resp_prod = netif->tx_resp_prod = ++i;
705 mb(); /* Update producer before checking event threshold. */
706 if ( i == netif->tx->event )
707 notify_via_evtchn(netif->evtchn);
710 static int make_rx_response(netif_t *netif,
716 NETIF_RING_IDX i = netif->rx_resp_prod;
717 netif_rx_response_t *resp;
719 resp = &netif->rx->ring[MASK_NETIF_RX_IDX(i)].resp;
722 resp->status = (s16)size;
724 resp->status = (s16)st;
726 netif->rx->resp_prod = netif->rx_resp_prod = ++i;
728 mb(); /* Update producer before checking event threshold. */
729 return (i == netif->rx->event);
732 static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
734 struct list_head *ent;
738 printk(KERN_ALERT "netif_schedule_list:\n");
739 spin_lock_irq(&net_schedule_list_lock);
741 list_for_each ( ent, &net_schedule_list )
743 netif = list_entry(ent, netif_t, list);
744 printk(KERN_ALERT " %d: private(rx_req_cons=%08x rx_resp_prod=%08x\n",
745 i, netif->rx_req_cons, netif->rx_resp_prod);
746 printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
747 netif->tx_req_cons, netif->tx_resp_prod);
748 printk(KERN_ALERT " shared(rx_req_prod=%08x rx_resp_prod=%08x\n",
749 netif->rx->req_prod, netif->rx->resp_prod);
750 printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
751 netif->rx->event, netif->tx->req_prod);
752 printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
753 netif->tx->resp_prod, netif->tx->event);
757 spin_unlock_irq(&net_schedule_list_lock);
758 printk(KERN_ALERT " ** End of netif_schedule_list **\n");
763 static int __init netback_init(void)
768 if ( !(xen_start_info.flags & SIF_NET_BE_DOMAIN) &&
769 !(xen_start_info.flags & SIF_INITDOMAIN) )
772 printk("Initialising Xen netif backend\n");
774 /* We can increase reservation by this much in net_rx_action(). */
775 balloon_update_driver_allowance(NETIF_RX_RING_SIZE);
777 skb_queue_head_init(&rx_queue);
778 skb_queue_head_init(&tx_queue);
780 init_timer(&net_timer);
782 net_timer.function = net_alarm;
784 netif_interface_init();
786 if ( (mmap_vstart = allocate_empty_lowmem_region(MAX_PENDING_REQS)) == 0 )
789 for ( i = 0; i < MAX_PENDING_REQS; i++ )
791 page = virt_to_page(MMAP_VADDR(i));
792 set_page_count(page, 1);
793 SetPageForeign(page, netif_page_release);
797 pending_prod = MAX_PENDING_REQS;
798 for ( i = 0; i < MAX_PENDING_REQS; i++ )
801 spin_lock_init(&net_schedule_list_lock);
802 INIT_LIST_HEAD(&net_schedule_list);
806 (void)request_irq(bind_virq_to_irq(VIRQ_DEBUG),
807 netif_be_dbg, SA_SHIRQ,
808 "net-be-dbg", &netif_be_dbg);
813 static void netback_cleanup(void)
818 module_init(netback_init);
819 module_exit(netback_cleanup);