1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version 2
9 * as published by the Free Software Foundation; or, when distributed
10 * separately from the Linux kernel or incorporated into other
11 * software packages, subject to the following license:
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this source file (the "Software"), to deal in the Software without
15 * restriction, including without limitation the rights to use, copy, modify,
16 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17 * and to permit persons to whom the Software is furnished to do so, subject to
18 * the following conditions:
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include <linux/module.h>
33 #include <linux/version.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/string.h>
38 #include <linux/errno.h>
39 #include <linux/netdevice.h>
40 #include <linux/inetdevice.h>
41 #include <linux/etherdevice.h>
42 #include <linux/skbuff.h>
43 #include <linux/init.h>
44 #include <linux/bitops.h>
45 #include <linux/ethtool.h>
47 #include <linux/if_ether.h>
49 #include <linux/moduleparam.h>
51 #include <net/pkt_sched.h>
53 #include <net/route.h>
54 #include <asm/hypercall.h>
55 #include <asm/uaccess.h>
56 #include <xen/evtchn.h>
57 #include <xen/xenbus.h>
58 #include <xen/interface/io/netif.h>
59 #include <xen/interface/memory.h>
60 #include <xen/balloon.h>
62 #include <asm/maddr.h>
63 #include <asm/uaccess.h>
64 #include <xen/interface/grant_table.h>
65 #include <xen/gnttab.h>
68 * Mutually-exclusive module options to select receive data path:
69 * rx_copy : Packets are copied by network backend into local memory
70 * rx_flip : Page containing packet data is transferred to our ownership
71 * For fully-virtualised guests there is no option - copying must be used.
72 * For paravirtualised guests, flipping is the default.
75 static int MODPARM_rx_copy = 0;
76 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
77 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
78 static int MODPARM_rx_flip = 0;
79 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
80 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
82 static const int MODPARM_rx_copy = 1;
83 static const int MODPARM_rx_flip = 0;
86 #define RX_COPY_THRESHOLD 256
88 /* If we don't have GSO, fake things up so that we never try to use it. */
89 #if defined(NETIF_F_GSO)
91 #define HAVE_TSO 1 /* TSO is a subset of GSO */
92 static inline void dev_disable_gso_features(struct net_device *dev)
94 /* Turn off all GSO bits except ROBUST. */
95 dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
96 dev->features |= NETIF_F_GSO_ROBUST;
98 #elif defined(NETIF_F_TSO)
100 #define gso_size tso_size
101 #define gso_segs tso_segs
102 static inline void dev_disable_gso_features(struct net_device *dev)
104 /* Turn off all TSO bits. */
105 dev->features &= ~NETIF_F_TSO;
107 static inline int skb_is_gso(const struct sk_buff *skb)
109 return skb_shinfo(skb)->tso_size;
111 static inline int skb_gso_ok(struct sk_buff *skb, int features)
113 return (features & NETIF_F_TSO);
116 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
118 return skb_is_gso(skb) &&
119 (!skb_gso_ok(skb, dev->features) ||
120 unlikely(skb->ip_summed != CHECKSUM_HW));
123 #define netif_needs_gso(dev, skb) 0
124 #define dev_disable_gso_features(dev) ((void)0)
127 #define GRANT_INVALID_REF 0
129 #define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
130 #define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
132 struct netfront_info {
133 struct list_head list;
134 struct net_device *netdev;
136 struct net_device_stats stats;
138 struct netif_tx_front_ring tx;
139 struct netif_rx_front_ring rx;
145 unsigned int evtchn, irq;
146 unsigned int copying_receiver;
148 /* Receive-ring batched refills. */
149 #define RX_MIN_TARGET 8
150 #define RX_DFL_MIN_TARGET 64
151 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
152 unsigned rx_min_target, rx_max_target, rx_target;
153 struct sk_buff_head rx_batch;
155 struct timer_list rx_refill_timer;
158 * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
159 * is an index into a chain of free entries.
161 struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
162 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
164 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
165 grant_ref_t gref_tx_head;
166 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
167 grant_ref_t gref_rx_head;
168 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
170 struct xenbus_device *xbdev;
175 unsigned long rx_pfn_array[NET_RX_RING_SIZE];
176 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
177 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
180 struct netfront_rx_info {
181 struct netif_rx_response rx;
182 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
186 * Access macros for acquiring freeing slots in tx_skbs[].
189 static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
192 list[0] = (void *)(unsigned long)id;
195 static inline unsigned short get_id_from_freelist(struct sk_buff **list)
197 unsigned int id = (unsigned int)(unsigned long)list[0];
202 static inline int xennet_rxidx(RING_IDX idx)
204 return idx & (NET_RX_RING_SIZE - 1);
207 static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
210 int i = xennet_rxidx(ri);
211 struct sk_buff *skb = np->rx_skbs[i];
212 np->rx_skbs[i] = NULL;
216 static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
219 int i = xennet_rxidx(ri);
220 grant_ref_t ref = np->grant_rx_ref[i];
221 np->grant_rx_ref[i] = GRANT_INVALID_REF;
225 #define DPRINTK(fmt, args...) \
226 pr_debug("netfront (%s:%d) " fmt, \
227 __FUNCTION__, __LINE__, ##args)
228 #define IPRINTK(fmt, args...) \
229 printk(KERN_INFO "netfront: " fmt, ##args)
230 #define WPRINTK(fmt, args...) \
231 printk(KERN_WARNING "netfront: " fmt, ##args)
233 static int talk_to_backend(struct xenbus_device *, struct netfront_info *);
234 static int setup_device(struct xenbus_device *, struct netfront_info *);
235 static struct net_device *create_netdev(int, int, struct xenbus_device *);
237 static void netfront_closing(struct xenbus_device *);
239 static void end_access(int, void *);
240 static void netif_disconnect_backend(struct netfront_info *);
241 static int open_netdev(struct netfront_info *);
242 static void close_netdev(struct netfront_info *);
243 static void netif_free(struct netfront_info *);
245 static void network_connect(struct net_device *);
246 static void network_tx_buf_gc(struct net_device *);
247 static void network_alloc_rx_buffers(struct net_device *);
248 static int send_fake_arp(struct net_device *);
250 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
253 static int xennet_sysfs_addif(struct net_device *netdev);
254 static void xennet_sysfs_delif(struct net_device *netdev);
255 #else /* !CONFIG_SYSFS */
256 #define xennet_sysfs_addif(dev) (0)
257 #define xennet_sysfs_delif(dev) do { } while(0)
260 static inline int xennet_can_sg(struct net_device *dev)
262 return dev->features & NETIF_F_SG;
266 * Entry point to this code when a new device is created. Allocate the basic
267 * structures and the ring buffers for communication with the backend, and
268 * inform the backend of the appropriate details for those. Switch to
271 static int __devinit netfront_probe(struct xenbus_device *dev,
272 const struct xenbus_device_id *id)
275 struct net_device *netdev;
276 struct netfront_info *info;
277 unsigned int handle, feature_rx_copy, feature_rx_flip, use_copy;
279 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%u", &handle);
281 xenbus_dev_fatal(dev, err, "reading handle");
285 err = xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-copy", "%u",
289 err = xenbus_scanf(XBT_NIL, dev->otherend, "feature-rx-flip", "%u",
295 * Copy packets on receive path if:
296 * (a) This was requested by user, and the backend supports it; or
297 * (b) Flipping was requested, but this is unsupported by the backend.
299 use_copy = (MODPARM_rx_copy && feature_rx_copy) ||
300 (MODPARM_rx_flip && !feature_rx_flip);
302 netdev = create_netdev(handle, use_copy, dev);
303 if (IS_ERR(netdev)) {
304 err = PTR_ERR(netdev);
305 xenbus_dev_fatal(dev, err, "creating netdev");
309 info = netdev_priv(netdev);
310 dev->dev.driver_data = info;
312 err = talk_to_backend(dev, info);
316 err = open_netdev(info);
320 IPRINTK("Created netdev %s with %sing receive path.\n",
321 netdev->name, info->copying_receiver ? "copy" : "flipp");
326 xennet_sysfs_delif(info->netdev);
327 unregister_netdev(netdev);
330 dev->dev.driver_data = NULL;
336 * We are reconnecting to the backend, due to a suspend/resume, or a backend
337 * driver restart. We tear down our netif structure and recreate it, but
338 * leave the device-layer structures intact so that this is transparent to the
339 * rest of the kernel.
341 static int netfront_resume(struct xenbus_device *dev)
343 struct netfront_info *info = dev->dev.driver_data;
345 DPRINTK("%s\n", dev->nodename);
347 netif_disconnect_backend(info);
348 return talk_to_backend(dev, info);
351 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
353 char *s, *e, *macstr;
356 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
358 return PTR_ERR(macstr);
360 for (i = 0; i < ETH_ALEN; i++) {
361 mac[i] = simple_strtoul(s, &e, 16);
362 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
373 /* Common code used when first setting up, and when resuming. */
374 static int talk_to_backend(struct xenbus_device *dev,
375 struct netfront_info *info)
378 struct xenbus_transaction xbt;
381 err = xen_net_read_mac(dev, info->mac);
383 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
387 /* Create shared ring, alloc event channel. */
388 err = setup_device(dev, info);
393 err = xenbus_transaction_start(&xbt);
395 xenbus_dev_fatal(dev, err, "starting transaction");
399 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
402 message = "writing tx ring-ref";
403 goto abort_transaction;
405 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
408 message = "writing rx ring-ref";
409 goto abort_transaction;
411 err = xenbus_printf(xbt, dev->nodename,
412 "event-channel", "%u", info->evtchn);
414 message = "writing event-channel";
415 goto abort_transaction;
418 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
419 info->copying_receiver);
421 message = "writing request-rx-copy";
422 goto abort_transaction;
425 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
427 message = "writing feature-rx-notify";
428 goto abort_transaction;
431 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
433 message = "writing feature-sg";
434 goto abort_transaction;
438 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
440 message = "writing feature-gso-tcpv4";
441 goto abort_transaction;
445 err = xenbus_transaction_end(xbt, 0);
449 xenbus_dev_fatal(dev, err, "completing transaction");
456 xenbus_transaction_end(xbt, 1);
457 xenbus_dev_fatal(dev, err, "%s", message);
465 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
467 struct netif_tx_sring *txs;
468 struct netif_rx_sring *rxs;
470 struct net_device *netdev = info->netdev;
472 info->tx_ring_ref = GRANT_INVALID_REF;
473 info->rx_ring_ref = GRANT_INVALID_REF;
474 info->rx.sring = NULL;
475 info->tx.sring = NULL;
478 txs = (struct netif_tx_sring *)get_zeroed_page(GFP_KERNEL);
481 xenbus_dev_fatal(dev, err, "allocating tx ring page");
484 SHARED_RING_INIT(txs);
485 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
487 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
489 free_page((unsigned long)txs);
492 info->tx_ring_ref = err;
494 rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_KERNEL);
497 xenbus_dev_fatal(dev, err, "allocating rx ring page");
500 SHARED_RING_INIT(rxs);
501 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
503 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
505 free_page((unsigned long)rxs);
508 info->rx_ring_ref = err;
510 err = xenbus_alloc_evtchn(dev, &info->evtchn);
514 memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
515 err = bind_evtchn_to_irqhandler(info->evtchn, netif_int,
516 SA_SAMPLE_RANDOM, netdev->name,
530 * Callback received when the backend's state changes.
532 static void backend_changed(struct xenbus_device *dev,
533 enum xenbus_state backend_state)
535 struct netfront_info *np = dev->dev.driver_data;
536 struct net_device *netdev = np->netdev;
538 DPRINTK("%s\n", xenbus_strstate(backend_state));
540 switch (backend_state) {
541 case XenbusStateInitialising:
542 case XenbusStateInitialised:
543 case XenbusStateConnected:
544 case XenbusStateUnknown:
545 case XenbusStateClosed:
548 case XenbusStateInitWait:
549 network_connect(netdev);
550 xenbus_switch_state(dev, XenbusStateConnected);
551 (void)send_fake_arp(netdev);
554 case XenbusStateClosing:
555 netfront_closing(dev);
561 /** Send a packet on a net device to encourage switches to learn the
562 * MAC. We send a fake ARP request.
565 * @return 0 on success, error code otherwise
567 static int send_fake_arp(struct net_device *dev)
572 dst_ip = INADDR_BROADCAST;
573 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
575 /* No IP? Then nothing to do. */
579 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
581 /*dst_hw*/ NULL, /*src_hw*/ NULL,
582 /*target_hw*/ dev->dev_addr);
586 return dev_queue_xmit(skb);
590 static int network_open(struct net_device *dev)
592 struct netfront_info *np = netdev_priv(dev);
594 memset(&np->stats, 0, sizeof(np->stats));
596 spin_lock(&np->rx_lock);
597 if (netif_carrier_ok(dev)) {
598 network_alloc_rx_buffers(dev);
599 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
600 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
601 netif_rx_schedule(dev);
603 spin_unlock(&np->rx_lock);
605 netif_start_queue(dev);
610 static inline int netfront_tx_slot_available(struct netfront_info *np)
612 return RING_FREE_REQUESTS(&np->tx) >= MAX_SKB_FRAGS + 2;
615 static inline void network_maybe_wake_tx(struct net_device *dev)
617 struct netfront_info *np = netdev_priv(dev);
619 if (unlikely(netif_queue_stopped(dev)) &&
620 netfront_tx_slot_available(np) &&
621 likely(netif_running(dev)))
622 netif_wake_queue(dev);
625 static void network_tx_buf_gc(struct net_device *dev)
629 struct netfront_info *np = netdev_priv(dev);
632 BUG_ON(!netif_carrier_ok(dev));
635 prod = np->tx.sring->rsp_prod;
636 rmb(); /* Ensure we see responses up to 'rp'. */
638 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
639 struct netif_tx_response *txrsp;
641 txrsp = RING_GET_RESPONSE(&np->tx, cons);
642 if (txrsp->status == NETIF_RSP_NULL)
646 skb = np->tx_skbs[id];
647 if (unlikely(gnttab_query_foreign_access(
648 np->grant_tx_ref[id]) != 0)) {
649 printk(KERN_ALERT "network_tx_buf_gc: warning "
650 "-- grant still in use by backend "
654 gnttab_end_foreign_access_ref(
655 np->grant_tx_ref[id], GNTMAP_readonly);
656 gnttab_release_grant_reference(
657 &np->gref_tx_head, np->grant_tx_ref[id]);
658 np->grant_tx_ref[id] = GRANT_INVALID_REF;
659 add_id_to_freelist(np->tx_skbs, id);
660 dev_kfree_skb_irq(skb);
663 np->tx.rsp_cons = prod;
666 * Set a new event, then check for race with update of tx_cons.
667 * Note that it is essential to schedule a callback, no matter
668 * how few buffers are pending. Even if there is space in the
669 * transmit ring, higher layers may be blocked because too much
670 * data is outstanding: in such cases notification from Xen is
671 * likely to be the only kick that we'll get.
673 np->tx.sring->rsp_event =
674 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
676 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
678 network_maybe_wake_tx(dev);
682 static void rx_refill_timeout(unsigned long data)
684 struct net_device *dev = (struct net_device *)data;
685 netif_rx_schedule(dev);
689 static void network_alloc_rx_buffers(struct net_device *dev)
692 struct netfront_info *np = netdev_priv(dev);
695 int i, batch_target, notify;
696 RING_IDX req_prod = np->rx.req_prod_pvt;
697 struct xen_memory_reservation reservation;
702 netif_rx_request_t *req;
704 if (unlikely(!netif_carrier_ok(dev)))
708 * Allocate skbuffs greedily, even though we batch updates to the
709 * receive ring. This creates a less bursty demand on the memory
710 * allocator, so should reduce the chance of failed allocation requests
711 * both for ourself and for other kernel subsystems.
713 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
714 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
716 * Allocate an skb and a page. Do not use __dev_alloc_skb as
717 * that will allocate page-sized buffers which is not
719 * 16 bytes added as necessary headroom for netif_receive_skb.
721 skb = alloc_skb(RX_COPY_THRESHOLD + 16,
722 GFP_ATOMIC | __GFP_NOWARN);
726 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
730 /* Any skbuffs queued for refill? Force them out. */
733 /* Could not allocate any skbuffs. Try again later. */
734 mod_timer(&np->rx_refill_timer,
739 skb_reserve(skb, 16); /* mimic dev_alloc_skb() */
740 skb_shinfo(skb)->frags[0].page = page;
741 skb_shinfo(skb)->nr_frags = 1;
742 __skb_queue_tail(&np->rx_batch, skb);
745 /* Is the batch large enough to be worthwhile? */
746 if (i < (np->rx_target/2)) {
747 if (req_prod > np->rx.sring->req_prod)
752 /* Adjust our fill target if we risked running out of buffers. */
753 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
754 ((np->rx_target *= 2) > np->rx_max_target))
755 np->rx_target = np->rx_max_target;
758 for (nr_flips = i = 0; ; i++) {
759 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
764 id = xennet_rxidx(req_prod + i);
766 BUG_ON(np->rx_skbs[id]);
767 np->rx_skbs[id] = skb;
769 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
770 BUG_ON((signed short)ref < 0);
771 np->grant_rx_ref[id] = ref;
773 pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
774 vaddr = page_address(skb_shinfo(skb)->frags[0].page);
776 req = RING_GET_REQUEST(&np->rx, req_prod + i);
777 if (!np->copying_receiver) {
778 gnttab_grant_foreign_transfer_ref(ref,
779 np->xbdev->otherend_id,
781 np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
782 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
783 /* Remove this page before passing
785 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
786 MULTI_update_va_mapping(np->rx_mcl+i,
787 (unsigned long)vaddr,
792 gnttab_grant_foreign_access_ref(ref,
793 np->xbdev->otherend_id,
802 if ( nr_flips != 0 ) {
803 /* Tell the ballon driver what is going on. */
804 balloon_update_driver_allowance(i);
806 set_xen_guest_handle(reservation.extent_start,
808 reservation.nr_extents = nr_flips;
809 reservation.extent_order = 0;
810 reservation.address_bits = 0;
811 reservation.domid = DOMID_SELF;
813 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
814 /* After all PTEs have been zapped, flush the TLB. */
815 np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
816 UVMF_TLB_FLUSH|UVMF_ALL;
818 /* Give away a batch of pages. */
819 np->rx_mcl[i].op = __HYPERVISOR_memory_op;
820 np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
821 np->rx_mcl[i].args[1] = (unsigned long)&reservation;
823 /* Zap PTEs and give away pages in one big
825 (void)HYPERVISOR_multicall(np->rx_mcl, i+1);
827 /* Check return status of HYPERVISOR_memory_op(). */
828 if (unlikely(np->rx_mcl[i].result != i))
829 panic("Unable to reduce memory reservation\n");
831 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
833 panic("Unable to reduce memory reservation\n");
839 /* Above is a suitable barrier to ensure backend will see requests. */
840 np->rx.req_prod_pvt = req_prod + i;
842 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
844 notify_remote_via_irq(np->irq);
847 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
848 struct netif_tx_request *tx)
850 struct netfront_info *np = netdev_priv(dev);
851 char *data = skb->data;
853 RING_IDX prod = np->tx.req_prod_pvt;
854 int frags = skb_shinfo(skb)->nr_frags;
855 unsigned int offset = offset_in_page(data);
856 unsigned int len = skb_headlen(skb);
861 while (len > PAGE_SIZE - offset) {
862 tx->size = PAGE_SIZE - offset;
863 tx->flags |= NETTXF_more_data;
868 id = get_id_from_freelist(np->tx_skbs);
869 np->tx_skbs[id] = skb_get(skb);
870 tx = RING_GET_REQUEST(&np->tx, prod++);
872 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
873 BUG_ON((signed short)ref < 0);
875 mfn = virt_to_mfn(data);
876 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
877 mfn, GNTMAP_readonly);
879 tx->gref = np->grant_tx_ref[id] = ref;
885 for (i = 0; i < frags; i++) {
886 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
888 tx->flags |= NETTXF_more_data;
890 id = get_id_from_freelist(np->tx_skbs);
891 np->tx_skbs[id] = skb_get(skb);
892 tx = RING_GET_REQUEST(&np->tx, prod++);
894 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
895 BUG_ON((signed short)ref < 0);
897 mfn = pfn_to_mfn(page_to_pfn(frag->page));
898 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
899 mfn, GNTMAP_readonly);
901 tx->gref = np->grant_tx_ref[id] = ref;
902 tx->offset = frag->page_offset;
903 tx->size = frag->size;
907 np->tx.req_prod_pvt = prod;
910 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
913 struct netfront_info *np = netdev_priv(dev);
914 struct netif_tx_request *tx;
915 struct netif_extra_info *extra;
916 char *data = skb->data;
921 int frags = skb_shinfo(skb)->nr_frags;
922 unsigned int offset = offset_in_page(data);
923 unsigned int len = skb_headlen(skb);
925 frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
926 if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
927 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
933 spin_lock_irq(&np->tx_lock);
935 if (unlikely(!netif_carrier_ok(dev) ||
936 (frags > 1 && !xennet_can_sg(dev)) ||
937 netif_needs_gso(dev, skb))) {
938 spin_unlock_irq(&np->tx_lock);
942 i = np->tx.req_prod_pvt;
944 id = get_id_from_freelist(np->tx_skbs);
945 np->tx_skbs[id] = skb;
947 tx = RING_GET_REQUEST(&np->tx, i);
950 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
951 BUG_ON((signed short)ref < 0);
952 mfn = virt_to_mfn(data);
953 gnttab_grant_foreign_access_ref(
954 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
955 tx->gref = np->grant_tx_ref[id] = ref;
962 if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
963 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
965 if (skb->proto_data_valid) /* remote but checksummed? */
966 tx->flags |= NETTXF_data_validated;
970 if (skb_is_gso(skb)) {
971 struct netif_extra_info *gso = (struct netif_extra_info *)
972 RING_GET_REQUEST(&np->tx, ++i);
975 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
977 tx->flags |= NETTXF_extra_info;
979 gso->u.gso.size = skb_shinfo(skb)->gso_size;
980 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
982 gso->u.gso.features = 0;
984 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
990 np->tx.req_prod_pvt = i + 1;
992 xennet_make_frags(skb, dev, tx);
995 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
997 notify_remote_via_irq(np->irq);
999 network_tx_buf_gc(dev);
1001 if (!netfront_tx_slot_available(np))
1002 netif_stop_queue(dev);
1004 spin_unlock_irq(&np->tx_lock);
1006 np->stats.tx_bytes += skb->len;
1007 np->stats.tx_packets++;
1012 np->stats.tx_dropped++;
1017 static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
1019 struct net_device *dev = dev_id;
1020 struct netfront_info *np = netdev_priv(dev);
1021 unsigned long flags;
1023 spin_lock_irqsave(&np->tx_lock, flags);
1025 if (likely(netif_carrier_ok(dev))) {
1026 network_tx_buf_gc(dev);
1027 /* Under tx_lock: protects access to rx shared-ring indexes. */
1028 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
1029 netif_rx_schedule(dev);
1032 spin_unlock_irqrestore(&np->tx_lock, flags);
1037 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
1040 int new = xennet_rxidx(np->rx.req_prod_pvt);
1042 BUG_ON(np->rx_skbs[new]);
1043 np->rx_skbs[new] = skb;
1044 np->grant_rx_ref[new] = ref;
1045 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1046 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1047 np->rx.req_prod_pvt++;
1050 int xennet_get_extras(struct netfront_info *np,
1051 struct netif_extra_info *extras, RING_IDX rp)
1054 struct netif_extra_info *extra;
1055 RING_IDX cons = np->rx.rsp_cons;
1059 struct sk_buff *skb;
1062 if (unlikely(cons + 1 == rp)) {
1063 if (net_ratelimit())
1064 WPRINTK("Missing extra info\n");
1069 extra = (struct netif_extra_info *)
1070 RING_GET_RESPONSE(&np->rx, ++cons);
1072 if (unlikely(!extra->type ||
1073 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1074 if (net_ratelimit())
1075 WPRINTK("Invalid extra type: %d\n",
1079 memcpy(&extras[extra->type - 1], extra,
1083 skb = xennet_get_rx_skb(np, cons);
1084 ref = xennet_get_rx_ref(np, cons);
1085 xennet_move_rx_slot(np, skb, ref);
1086 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1088 np->rx.rsp_cons = cons;
1092 static int xennet_get_responses(struct netfront_info *np,
1093 struct netfront_rx_info *rinfo, RING_IDX rp,
1094 struct sk_buff_head *list,
1095 int *pages_flipped_p)
1097 int pages_flipped = *pages_flipped_p;
1098 struct mmu_update *mmu;
1099 struct multicall_entry *mcl;
1100 struct netif_rx_response *rx = &rinfo->rx;
1101 struct netif_extra_info *extras = rinfo->extras;
1102 RING_IDX cons = np->rx.rsp_cons;
1103 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
1104 grant_ref_t ref = xennet_get_rx_ref(np, cons);
1105 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
1110 if (rx->flags & NETRXF_extra_info) {
1111 err = xennet_get_extras(np, extras, rp);
1112 cons = np->rx.rsp_cons;
1118 if (unlikely(rx->status < 0 ||
1119 rx->offset + rx->status > PAGE_SIZE)) {
1120 if (net_ratelimit())
1121 WPRINTK("rx->offset: %x, size: %u\n",
1122 rx->offset, rx->status);
1128 * This definitely indicates a bug, either in this driver or in
1129 * the backend driver. In future this should flag the bad
1130 * situation to the system controller to reboot the backed.
1132 if (ref == GRANT_INVALID_REF) {
1133 WPRINTK("Bad rx response id %d.\n", rx->id);
1138 if (!np->copying_receiver) {
1139 /* Memory pressure, insufficient buffer
1141 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1142 if (net_ratelimit())
1143 WPRINTK("Unfulfilled rx req "
1144 "(id=%d, st=%d).\n",
1145 rx->id, rx->status);
1146 xennet_move_rx_slot(np, skb, ref);
1151 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1152 /* Remap the page. */
1154 skb_shinfo(skb)->frags[0].page;
1155 unsigned long pfn = page_to_pfn(page);
1156 void *vaddr = page_address(page);
1158 mcl = np->rx_mcl + pages_flipped;
1159 mmu = np->rx_mmu + pages_flipped;
1161 MULTI_update_va_mapping(mcl,
1162 (unsigned long)vaddr,
1166 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
1167 | MMU_MACHPHYS_UPDATE;
1170 set_phys_to_machine(pfn, mfn);
1174 ret = gnttab_end_foreign_access_ref(ref, 0);
1178 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1180 __skb_queue_tail(list, skb);
1183 if (!(rx->flags & NETRXF_more_data))
1186 if (cons + frags == rp) {
1187 if (net_ratelimit())
1188 WPRINTK("Need more frags\n");
1193 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
1194 skb = xennet_get_rx_skb(np, cons + frags);
1195 ref = xennet_get_rx_ref(np, cons + frags);
1199 if (unlikely(frags > max)) {
1200 if (net_ratelimit())
1201 WPRINTK("Too many frags\n");
1205 *pages_flipped_p = pages_flipped;
1210 static RING_IDX xennet_fill_frags(struct netfront_info *np,
1211 struct sk_buff *skb,
1212 struct sk_buff_head *list)
1214 struct skb_shared_info *shinfo = skb_shinfo(skb);
1215 int nr_frags = shinfo->nr_frags;
1216 RING_IDX cons = np->rx.rsp_cons;
1217 skb_frag_t *frag = shinfo->frags + nr_frags;
1218 struct sk_buff *nskb;
1220 while ((nskb = __skb_dequeue(list))) {
1221 struct netif_rx_response *rx =
1222 RING_GET_RESPONSE(&np->rx, ++cons);
1224 frag->page = skb_shinfo(nskb)->frags[0].page;
1225 frag->page_offset = rx->offset;
1226 frag->size = rx->status;
1228 skb->data_len += rx->status;
1230 skb_shinfo(nskb)->nr_frags = 0;
1237 shinfo->nr_frags = nr_frags;
1241 static int xennet_set_skb_gso(struct sk_buff *skb,
1242 struct netif_extra_info *gso)
1244 if (!gso->u.gso.size) {
1245 if (net_ratelimit())
1246 WPRINTK("GSO size must not be zero.\n");
1250 /* Currently only TCPv4 S.O. is supported. */
1251 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1252 if (net_ratelimit())
1253 WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
1258 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1260 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1262 /* Header must be checked, and gso_segs computed. */
1263 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1265 skb_shinfo(skb)->gso_segs = 0;
1269 if (net_ratelimit())
1270 WPRINTK("GSO unsupported by this kernel.\n");
1275 static int netif_poll(struct net_device *dev, int *pbudget)
1277 struct netfront_info *np = netdev_priv(dev);
1278 struct sk_buff *skb;
1279 struct netfront_rx_info rinfo;
1280 struct netif_rx_response *rx = &rinfo.rx;
1281 struct netif_extra_info *extras = rinfo.extras;
1283 struct multicall_entry *mcl;
1284 int work_done, budget, more_to_do = 1;
1285 struct sk_buff_head rxq;
1286 struct sk_buff_head errq;
1287 struct sk_buff_head tmpq;
1288 unsigned long flags;
1290 int pages_flipped = 0;
1293 spin_lock(&np->rx_lock);
1295 if (unlikely(!netif_carrier_ok(dev))) {
1296 spin_unlock(&np->rx_lock);
1300 skb_queue_head_init(&rxq);
1301 skb_queue_head_init(&errq);
1302 skb_queue_head_init(&tmpq);
1304 if ((budget = *pbudget) > dev->quota)
1305 budget = dev->quota;
1306 rp = np->rx.sring->rsp_prod;
1307 rmb(); /* Ensure we see queued responses up to 'rp'. */
1309 for (i = np->rx.rsp_cons, work_done = 0;
1310 (i != rp) && (work_done < budget);
1311 np->rx.rsp_cons = ++i, work_done++) {
1312 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
1313 memset(extras, 0, sizeof(extras));
1315 err = xennet_get_responses(np, &rinfo, rp, &tmpq,
1318 if (unlikely(err)) {
1320 i = np->rx.rsp_cons + skb_queue_len(&tmpq) - 1;
1322 while ((skb = __skb_dequeue(&tmpq)))
1323 __skb_queue_tail(&errq, skb);
1324 np->stats.rx_errors++;
1328 skb = __skb_dequeue(&tmpq);
1330 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1331 struct netif_extra_info *gso;
1332 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1334 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1335 __skb_queue_head(&tmpq, skb);
1340 skb->nh.raw = (void *)skb_shinfo(skb)->frags[0].page;
1341 skb->h.raw = skb->nh.raw + rx->offset;
1344 if (len > RX_COPY_THRESHOLD)
1345 len = RX_COPY_THRESHOLD;
1348 if (rx->status > len) {
1349 skb_shinfo(skb)->frags[0].page_offset =
1351 skb_shinfo(skb)->frags[0].size = rx->status - len;
1352 skb->data_len = rx->status - len;
1354 skb_shinfo(skb)->frags[0].page = NULL;
1355 skb_shinfo(skb)->nr_frags = 0;
1358 i = xennet_fill_frags(np, skb, &tmpq);
1361 * Truesize must approximates the size of true data plus
1362 * any supervisor overheads. Adding hypervisor overheads
1363 * has been shown to significantly reduce achievable
1364 * bandwidth with the default receive buffer size. It is
1365 * therefore not wise to account for it here.
1367 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
1368 * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
1369 * add the size of the data pulled in xennet_fill_frags().
1371 * We also adjust for any unused space in the main data
1372 * area by subtracting (RX_COPY_THRESHOLD - len). This is
1373 * especially important with drivers which split incoming
1374 * packets into header and data, using only 66 bytes of
1375 * the main data area (see the e1000 driver for example.)
1376 * On such systems, without this last adjustement, our
1377 * achievable receive throughout using the standard receive
1378 * buffer size was cut by 25%(!!!).
1380 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
1381 skb->len += skb->data_len;
1384 * Old backends do not assert data_validated but we
1385 * can infer it from csum_blank so test both flags.
1387 if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank))
1388 skb->ip_summed = CHECKSUM_UNNECESSARY;
1390 skb->ip_summed = CHECKSUM_NONE;
1392 skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE);
1393 skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
1395 np->stats.rx_packets++;
1396 np->stats.rx_bytes += skb->len;
1398 __skb_queue_tail(&rxq, skb);
1401 if (pages_flipped) {
1402 /* Some pages are no longer absent... */
1403 balloon_update_driver_allowance(-pages_flipped);
1405 /* Do all the remapping work and M2P updates. */
1406 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1407 mcl = np->rx_mcl + pages_flipped;
1408 mcl->op = __HYPERVISOR_mmu_update;
1409 mcl->args[0] = (unsigned long)np->rx_mmu;
1410 mcl->args[1] = pages_flipped;
1412 mcl->args[3] = DOMID_SELF;
1413 (void)HYPERVISOR_multicall(np->rx_mcl,
1418 while ((skb = __skb_dequeue(&errq)))
1421 while ((skb = __skb_dequeue(&rxq)) != NULL) {
1422 struct page *page = (struct page *)skb->nh.raw;
1423 void *vaddr = page_address(page);
1425 memcpy(skb->data, vaddr + (skb->h.raw - skb->nh.raw),
1428 if (page != skb_shinfo(skb)->frags[0].page)
1431 /* Ethernet work: Delayed to here as it peeks the header. */
1432 skb->protocol = eth_type_trans(skb, dev);
1435 netif_receive_skb(skb);
1436 dev->last_rx = jiffies;
1439 /* If we get a callback with very few responses, reduce fill target. */
1440 /* NB. Note exponential increase, linear decrease. */
1441 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1442 ((3*np->rx_target) / 4)) &&
1443 (--np->rx_target < np->rx_min_target))
1444 np->rx_target = np->rx_min_target;
1446 network_alloc_rx_buffers(dev);
1448 *pbudget -= work_done;
1449 dev->quota -= work_done;
1451 if (work_done < budget) {
1452 local_irq_save(flags);
1454 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1456 __netif_rx_complete(dev);
1458 local_irq_restore(flags);
1461 spin_unlock(&np->rx_lock);
1466 static void netif_release_tx_bufs(struct netfront_info *np)
1468 struct sk_buff *skb;
1471 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
1472 if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
1475 skb = np->tx_skbs[i];
1476 gnttab_end_foreign_access_ref(
1477 np->grant_tx_ref[i], GNTMAP_readonly);
1478 gnttab_release_grant_reference(
1479 &np->gref_tx_head, np->grant_tx_ref[i]);
1480 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1481 add_id_to_freelist(np->tx_skbs, i);
1482 dev_kfree_skb_irq(skb);
1486 static void netif_release_rx_bufs(struct netfront_info *np)
1488 struct mmu_update *mmu = np->rx_mmu;
1489 struct multicall_entry *mcl = np->rx_mcl;
1490 struct sk_buff_head free_list;
1491 struct sk_buff *skb;
1493 int xfer = 0, noxfer = 0, unused = 0;
1496 if (np->copying_receiver) {
1497 printk("%s: fix me for copying receiver.\n", __FUNCTION__);
1501 skb_queue_head_init(&free_list);
1503 spin_lock(&np->rx_lock);
1505 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1506 if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
1511 skb = np->rx_skbs[id];
1512 mfn = gnttab_end_foreign_transfer_ref(ref);
1513 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1514 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1515 add_id_to_freelist(np->rx_skbs, id);
1518 struct page *page = skb_shinfo(skb)->frags[0].page;
1519 balloon_release_driver_page(page);
1520 skb_shinfo(skb)->nr_frags = 0;
1526 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1527 /* Remap the page. */
1528 struct page *page = skb_shinfo(skb)->frags[0].page;
1529 unsigned long pfn = page_to_pfn(page);
1530 void *vaddr = page_address(page);
1532 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1533 pfn_pte_ma(mfn, PAGE_KERNEL),
1536 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
1537 | MMU_MACHPHYS_UPDATE;
1541 set_phys_to_machine(pfn, mfn);
1543 __skb_queue_tail(&free_list, skb);
1547 printk("%s: %d xfer, %d noxfer, %d unused\n",
1548 __FUNCTION__, xfer, noxfer, unused);
1551 /* Some pages are no longer absent... */
1552 balloon_update_driver_allowance(-xfer);
1554 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1555 /* Do all the remapping work and M2P updates. */
1556 mcl->op = __HYPERVISOR_mmu_update;
1557 mcl->args[0] = (unsigned long)np->rx_mmu;
1558 mcl->args[1] = mmu - np->rx_mmu;
1560 mcl->args[3] = DOMID_SELF;
1562 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1566 while ((skb = __skb_dequeue(&free_list)) != NULL)
1569 spin_unlock(&np->rx_lock);
1572 static int network_close(struct net_device *dev)
1574 struct netfront_info *np = netdev_priv(dev);
1575 netif_stop_queue(np->netdev);
1580 static struct net_device_stats *network_get_stats(struct net_device *dev)
1582 struct netfront_info *np = netdev_priv(dev);
1586 static int xennet_change_mtu(struct net_device *dev, int mtu)
1588 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
1596 static int xennet_set_sg(struct net_device *dev, u32 data)
1599 struct netfront_info *np = netdev_priv(dev);
1602 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1607 } else if (dev->mtu > ETH_DATA_LEN)
1608 dev->mtu = ETH_DATA_LEN;
1610 return ethtool_op_set_sg(dev, data);
1613 static int xennet_set_tso(struct net_device *dev, u32 data)
1617 struct netfront_info *np = netdev_priv(dev);
1620 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1621 "feature-gso-tcpv4", "%d", &val) < 0)
1627 return ethtool_op_set_tso(dev, data);
1633 static void xennet_set_features(struct net_device *dev)
1635 dev_disable_gso_features(dev);
1636 xennet_set_sg(dev, 0);
1638 /* We need checksum offload to enable scatter/gather and TSO. */
1639 if (!(dev->features & NETIF_F_IP_CSUM))
1642 if (!xennet_set_sg(dev, 1))
1643 xennet_set_tso(dev, 1);
1646 static void network_connect(struct net_device *dev)
1648 struct netfront_info *np = netdev_priv(dev);
1650 struct sk_buff *skb;
1652 netif_rx_request_t *req;
1654 xennet_set_features(dev);
1656 spin_lock_irq(&np->tx_lock);
1657 spin_lock(&np->rx_lock);
1660 * Recovery procedure:
1661 * NB. Freelist index entries are always going to be less than
1662 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
1663 * greater than PAGE_OFFSET: we use this property to distinguish
1667 /* Step 1: Discard all pending TX packet fragments. */
1668 netif_release_tx_bufs(np);
1670 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1671 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1672 if (!np->rx_skbs[i])
1675 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1676 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1677 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1679 if (!np->copying_receiver) {
1680 gnttab_grant_foreign_transfer_ref(
1681 ref, np->xbdev->otherend_id,
1682 page_to_pfn(skb_shinfo(skb)->frags->page));
1684 gnttab_grant_foreign_access_ref(
1685 ref, np->xbdev->otherend_id,
1686 pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
1691 req->id = requeue_idx;
1696 np->rx.req_prod_pvt = requeue_idx;
1699 * Step 3: All public and private state should now be sane. Get
1700 * ready to start sending and receiving packets and give the driver
1701 * domain a kick because we've probably just requeued some
1704 netif_carrier_on(dev);
1705 notify_remote_via_irq(np->irq);
1706 network_tx_buf_gc(dev);
1707 network_alloc_rx_buffers(dev);
1709 spin_unlock(&np->rx_lock);
1710 spin_unlock_irq(&np->tx_lock);
1713 static void netif_uninit(struct net_device *dev)
1715 struct netfront_info *np = netdev_priv(dev);
1716 netif_release_tx_bufs(np);
1717 netif_release_rx_bufs(np);
1718 gnttab_free_grant_references(np->gref_tx_head);
1719 gnttab_free_grant_references(np->gref_rx_head);
1722 static struct ethtool_ops network_ethtool_ops =
1724 .get_tx_csum = ethtool_op_get_tx_csum,
1725 .set_tx_csum = ethtool_op_set_tx_csum,
1726 .get_sg = ethtool_op_get_sg,
1727 .set_sg = xennet_set_sg,
1728 .get_tso = ethtool_op_get_tso,
1729 .set_tso = xennet_set_tso,
1730 .get_link = ethtool_op_get_link,
1734 static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
1736 struct net_device *netdev = container_of(cd, struct net_device,
1738 struct netfront_info *info = netdev_priv(netdev);
1740 return sprintf(buf, "%u\n", info->rx_min_target);
1743 static ssize_t store_rxbuf_min(struct class_device *cd,
1744 const char *buf, size_t len)
1746 struct net_device *netdev = container_of(cd, struct net_device,
1748 struct netfront_info *np = netdev_priv(netdev);
1750 unsigned long target;
1752 if (!capable(CAP_NET_ADMIN))
1755 target = simple_strtoul(buf, &endp, 0);
1759 if (target < RX_MIN_TARGET)
1760 target = RX_MIN_TARGET;
1761 if (target > RX_MAX_TARGET)
1762 target = RX_MAX_TARGET;
1764 spin_lock(&np->rx_lock);
1765 if (target > np->rx_max_target)
1766 np->rx_max_target = target;
1767 np->rx_min_target = target;
1768 if (target > np->rx_target)
1769 np->rx_target = target;
1771 network_alloc_rx_buffers(netdev);
1773 spin_unlock(&np->rx_lock);
1777 static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
1779 struct net_device *netdev = container_of(cd, struct net_device,
1781 struct netfront_info *info = netdev_priv(netdev);
1783 return sprintf(buf, "%u\n", info->rx_max_target);
1786 static ssize_t store_rxbuf_max(struct class_device *cd,
1787 const char *buf, size_t len)
1789 struct net_device *netdev = container_of(cd, struct net_device,
1791 struct netfront_info *np = netdev_priv(netdev);
1793 unsigned long target;
1795 if (!capable(CAP_NET_ADMIN))
1798 target = simple_strtoul(buf, &endp, 0);
1802 if (target < RX_MIN_TARGET)
1803 target = RX_MIN_TARGET;
1804 if (target > RX_MAX_TARGET)
1805 target = RX_MAX_TARGET;
1807 spin_lock(&np->rx_lock);
1808 if (target < np->rx_min_target)
1809 np->rx_min_target = target;
1810 np->rx_max_target = target;
1811 if (target < np->rx_target)
1812 np->rx_target = target;
1814 network_alloc_rx_buffers(netdev);
1816 spin_unlock(&np->rx_lock);
1820 static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
1822 struct net_device *netdev = container_of(cd, struct net_device,
1824 struct netfront_info *info = netdev_priv(netdev);
1826 return sprintf(buf, "%u\n", info->rx_target);
1829 static const struct class_device_attribute xennet_attrs[] = {
1830 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
1831 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
1832 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
1835 static int xennet_sysfs_addif(struct net_device *netdev)
1840 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1841 error = class_device_create_file(&netdev->class_dev,
1850 class_device_remove_file(&netdev->class_dev,
1855 static void xennet_sysfs_delif(struct net_device *netdev)
1859 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1860 class_device_remove_file(&netdev->class_dev,
1865 #endif /* CONFIG_SYSFS */
1869 * Nothing to do here. Virtual interface is point-to-point and the
1870 * physical interface is probably promiscuous anyway.
1872 static void network_set_multicast_list(struct net_device *dev)
1876 static struct net_device * __devinit
1877 create_netdev(int handle, int copying_receiver, struct xenbus_device *dev)
1880 struct net_device *netdev = NULL;
1881 struct netfront_info *np = NULL;
1883 netdev = alloc_etherdev(sizeof(struct netfront_info));
1885 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
1887 return ERR_PTR(-ENOMEM);
1890 np = netdev_priv(netdev);
1891 np->handle = handle;
1893 np->copying_receiver = copying_receiver;
1895 netif_carrier_off(netdev);
1897 spin_lock_init(&np->tx_lock);
1898 spin_lock_init(&np->rx_lock);
1900 skb_queue_head_init(&np->rx_batch);
1901 np->rx_target = RX_DFL_MIN_TARGET;
1902 np->rx_min_target = RX_DFL_MIN_TARGET;
1903 np->rx_max_target = RX_MAX_TARGET;
1905 init_timer(&np->rx_refill_timer);
1906 np->rx_refill_timer.data = (unsigned long)netdev;
1907 np->rx_refill_timer.function = rx_refill_timeout;
1909 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
1910 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
1911 np->tx_skbs[i] = (void *)((unsigned long) i+1);
1912 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1915 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1916 np->rx_skbs[i] = NULL;
1917 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1920 /* A grant for every tx ring slot */
1921 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1922 &np->gref_tx_head) < 0) {
1923 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1927 /* A grant for every rx ring slot */
1928 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1929 &np->gref_rx_head) < 0) {
1930 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
1935 netdev->open = network_open;
1936 netdev->hard_start_xmit = network_start_xmit;
1937 netdev->stop = network_close;
1938 netdev->get_stats = network_get_stats;
1939 netdev->poll = netif_poll;
1940 netdev->set_multicast_list = network_set_multicast_list;
1941 netdev->uninit = netif_uninit;
1942 netdev->change_mtu = xennet_change_mtu;
1943 netdev->weight = 64;
1944 netdev->features = NETIF_F_IP_CSUM;
1946 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
1947 SET_MODULE_OWNER(netdev);
1948 SET_NETDEV_DEV(netdev, &dev->dev);
1950 np->netdev = netdev;
1954 gnttab_free_grant_references(np->gref_tx_head);
1956 free_netdev(netdev);
1957 return ERR_PTR(err);
1961 * We use this notifier to send out a fake ARP reply to reset switches and
1962 * router ARP caches when an IP interface is brought up on a VIF.
1965 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
1967 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
1968 struct net_device *dev = ifa->ifa_dev->dev;
1970 /* UP event and is it one of our devices? */
1971 if (event == NETDEV_UP && dev->open == network_open)
1972 (void)send_fake_arp(dev);
1978 /* ** Close down ** */
1982 * Handle the change of state of the backend to Closing. We must delete our
1983 * device-layer structures now, to ensure that writes are flushed through to
1984 * the backend. Once is this done, we can switch to Closed in
1987 static void netfront_closing(struct xenbus_device *dev)
1989 struct netfront_info *info = dev->dev.driver_data;
1991 DPRINTK("%s\n", dev->nodename);
1994 xenbus_frontend_closed(dev);
1998 static int __devexit netfront_remove(struct xenbus_device *dev)
2000 struct netfront_info *info = dev->dev.driver_data;
2002 DPRINTK("%s\n", dev->nodename);
2004 netif_disconnect_backend(info);
2005 free_netdev(info->netdev);
2011 static int open_netdev(struct netfront_info *info)
2015 err = register_netdev(info->netdev);
2017 printk(KERN_WARNING "%s: register_netdev err=%d\n",
2022 err = xennet_sysfs_addif(info->netdev);
2024 /* This can be non-fatal: it only means no tuning parameters */
2025 printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
2031 static void close_netdev(struct netfront_info *info)
2033 del_timer_sync(&info->rx_refill_timer);
2035 xennet_sysfs_delif(info->netdev);
2036 unregister_netdev(info->netdev);
2040 static void netif_disconnect_backend(struct netfront_info *info)
2042 /* Stop old i/f to prevent errors whilst we rebuild the state. */
2043 spin_lock_irq(&info->tx_lock);
2044 spin_lock(&info->rx_lock);
2045 netif_carrier_off(info->netdev);
2046 spin_unlock(&info->rx_lock);
2047 spin_unlock_irq(&info->tx_lock);
2050 unbind_from_irqhandler(info->irq, info->netdev);
2051 info->evtchn = info->irq = 0;
2053 end_access(info->tx_ring_ref, info->tx.sring);
2054 end_access(info->rx_ring_ref, info->rx.sring);
2055 info->tx_ring_ref = GRANT_INVALID_REF;
2056 info->rx_ring_ref = GRANT_INVALID_REF;
2057 info->tx.sring = NULL;
2058 info->rx.sring = NULL;
2062 static void netif_free(struct netfront_info *info)
2065 netif_disconnect_backend(info);
2066 free_netdev(info->netdev);
2070 static void end_access(int ref, void *page)
2072 if (ref != GRANT_INVALID_REF)
2073 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
2077 /* ** Driver registration ** */
2080 static struct xenbus_device_id netfront_ids[] = {
2086 static struct xenbus_driver netfront = {
2088 .owner = THIS_MODULE,
2089 .ids = netfront_ids,
2090 .probe = netfront_probe,
2091 .remove = __devexit_p(netfront_remove),
2092 .resume = netfront_resume,
2093 .otherend_changed = backend_changed,
2097 static struct notifier_block notifier_inetdev = {
2098 .notifier_call = inetdev_notify,
2103 static int __init netif_init(void)
2105 if (!is_running_on_xen())
2109 if (MODPARM_rx_flip && MODPARM_rx_copy) {
2110 WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
2114 if (!MODPARM_rx_flip && !MODPARM_rx_copy)
2115 MODPARM_rx_flip = 1; /* Default is to flip. */
2118 if (is_initial_xendomain())
2121 IPRINTK("Initialising virtual ethernet driver.\n");
2123 (void)register_inetaddr_notifier(¬ifier_inetdev);
2125 return xenbus_register_frontend(&netfront);
2127 module_init(netif_init);
2130 static void __exit netif_exit(void)
2132 unregister_inetaddr_notifier(¬ifier_inetdev);
2134 return xenbus_unregister_driver(&netfront);
2136 module_exit(netif_exit);
2138 MODULE_LICENSE("Dual BSD/GPL");