1 /******************************************************************************
2 * arch/xen/drivers/netif/backend/interface.c
4 * Network-device interface management.
6 * Copyright (c) 2004-2005, Keir Fraser
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #include <linux/ethtool.h>
35 #include <linux/rtnetlink.h>
38 * Module parameter 'queue_length':
40 * Enables queuing in the network stack when a client has run out of receive
41 * descriptors. Although this feature can improve receive bandwidth by avoiding
42 * packet loss, it can also result in packets sitting in the 'tx_queue' for
43 * unbounded time. This is bad if those packets hold onto foreign resources.
44 * For example, consider a packet that holds onto resources belonging to the
45 * guest for which it is queued (e.g., packet received on vif1.0, destined for
46 * vif1.1 which is not activated in the guest): in this situation the guest
47 * will never be destroyed, unless vif1.1 is taken down (which flushes the
50 * Only set this parameter to non-zero value if you know what you are doing!
52 static unsigned long netbk_queue_length = 0;
53 module_param_named(queue_length, netbk_queue_length, ulong, 0);
55 static void __netif_up(netif_t *netif)
57 enable_irq(netif->irq);
58 netif_schedule_work(netif);
61 static void __netif_down(netif_t *netif)
63 disable_irq(netif->irq);
64 netif_deschedule_work(netif);
65 del_timer_sync(&netif->credit_timeout);
68 static int net_open(struct net_device *dev)
70 netif_t *netif = netdev_priv(dev);
71 if (netif_carrier_ok(dev))
76 static int net_close(struct net_device *dev)
78 netif_t *netif = netdev_priv(dev);
79 if (netif_carrier_ok(dev))
84 static int netbk_change_mtu(struct net_device *dev, int mtu)
86 int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
94 static int netbk_set_sg(struct net_device *dev, u32 data)
97 netif_t *netif = netdev_priv(dev);
99 if (!(netif->features & NETIF_F_SG))
103 return ethtool_op_set_sg(dev, data);
106 static int netbk_set_tso(struct net_device *dev, u32 data)
109 netif_t *netif = netdev_priv(dev);
111 if (!(netif->features & NETIF_F_TSO))
115 return ethtool_op_set_tso(dev, data);
118 static struct ethtool_ops network_ethtool_ops =
120 .get_tx_csum = ethtool_op_get_tx_csum,
121 .set_tx_csum = ethtool_op_set_tx_csum,
122 .get_sg = ethtool_op_get_sg,
123 .set_sg = netbk_set_sg,
124 .get_tso = ethtool_op_get_tso,
125 .set_tso = netbk_set_tso,
126 .get_link = ethtool_op_get_link,
129 netif_t *netif_alloc(domid_t domid, unsigned int handle, u8 be_mac[ETH_ALEN])
132 struct net_device *dev;
134 char name[IFNAMSIZ] = {};
136 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
137 dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
139 DPRINTK("Could not create netif: out of memory\n");
140 return ERR_PTR(-ENOMEM);
143 netif_carrier_off(dev);
145 netif = netdev_priv(dev);
146 memset(netif, 0, sizeof(*netif));
147 netif->domid = domid;
148 netif->handle = handle;
149 atomic_set(&netif->refcnt, 1);
150 init_waitqueue_head(&netif->waiting_to_free);
153 netif->credit_bytes = netif->remaining_credit = ~0UL;
154 netif->credit_usec = 0UL;
155 init_timer(&netif->credit_timeout);
156 netif->credit_timeout.expires = jiffies;
158 dev->hard_start_xmit = netif_be_start_xmit;
159 dev->get_stats = netif_be_get_stats;
160 dev->open = net_open;
161 dev->stop = net_close;
162 dev->change_mtu = netbk_change_mtu;
163 dev->features = NETIF_F_IP_CSUM;
165 SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
167 dev->tx_queue_len = netbk_queue_length;
168 if (dev->tx_queue_len != 0)
169 printk(KERN_WARNING "netbk: WARNING: device '%s' has non-zero "
170 "queue length (%lu)!\n", dev->name, dev->tx_queue_len);
172 for (i = 0; i < ETH_ALEN; i++)
177 * Initialise a dummy MAC address. We choose the numerically
178 * largest non-broadcast address to prevent the address getting
179 * stolen by an Ethernet bridge for STP purposes.
180 * (FE:FF:FF:FF:FF:FF)
182 memset(dev->dev_addr, 0xFF, ETH_ALEN);
183 dev->dev_addr[0] &= ~0x01;
185 memcpy(dev->dev_addr, be_mac, ETH_ALEN);
188 err = register_netdevice(dev);
191 DPRINTK("Could not register new net device %s: err=%d\n",
197 DPRINTK("Successfully created netif\n");
201 static int map_frontend_pages(
202 netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
204 struct gnttab_map_grant_ref op;
207 gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
208 GNTMAP_host_map, tx_ring_ref, netif->domid);
210 lock_vm_area(netif->tx_comms_area);
211 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
212 unlock_vm_area(netif->tx_comms_area);
216 DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
220 netif->tx_shmem_ref = tx_ring_ref;
221 netif->tx_shmem_handle = op.handle;
223 gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
224 GNTMAP_host_map, rx_ring_ref, netif->domid);
226 lock_vm_area(netif->rx_comms_area);
227 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
228 unlock_vm_area(netif->rx_comms_area);
232 DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
236 netif->rx_shmem_ref = rx_ring_ref;
237 netif->rx_shmem_handle = op.handle;
242 static void unmap_frontend_pages(netif_t *netif)
244 struct gnttab_unmap_grant_ref op;
247 gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
248 GNTMAP_host_map, netif->tx_shmem_handle);
250 lock_vm_area(netif->tx_comms_area);
251 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
252 unlock_vm_area(netif->tx_comms_area);
255 gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
256 GNTMAP_host_map, netif->rx_shmem_handle);
258 lock_vm_area(netif->rx_comms_area);
259 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
260 unlock_vm_area(netif->rx_comms_area);
264 int netif_map(netif_t *netif, unsigned long tx_ring_ref,
265 unsigned long rx_ring_ref, unsigned int evtchn)
268 netif_tx_sring_t *txs;
269 netif_rx_sring_t *rxs;
270 struct evtchn_bind_interdomain bind_interdomain;
272 /* Already connected through? */
276 netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
277 if (netif->tx_comms_area == NULL)
279 netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
280 if (netif->rx_comms_area == NULL)
283 err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
287 bind_interdomain.remote_dom = netif->domid;
288 bind_interdomain.remote_port = evtchn;
290 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
295 netif->evtchn = bind_interdomain.local_port;
297 netif->irq = bind_evtchn_to_irqhandler(
298 netif->evtchn, netif_be_int, 0, netif->dev->name, netif);
299 disable_irq(netif->irq);
301 txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
302 BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
304 rxs = (netif_rx_sring_t *)
305 ((char *)netif->rx_comms_area->addr);
306 BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
308 netif->rx_req_cons_peek = 0;
313 netif_carrier_on(netif->dev);
314 if (netif_running(netif->dev))
320 unmap_frontend_pages(netif);
322 free_vm_area(netif->rx_comms_area);
324 free_vm_area(netif->tx_comms_area);
328 static void netif_free(netif_t *netif)
330 atomic_dec(&netif->refcnt);
331 wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
334 unbind_from_irqhandler(netif->irq, netif);
336 unregister_netdev(netif->dev);
338 if (netif->tx.sring) {
339 unmap_frontend_pages(netif);
340 free_vm_area(netif->tx_comms_area);
341 free_vm_area(netif->rx_comms_area);
344 free_netdev(netif->dev);
347 void netif_disconnect(netif_t *netif)
349 if (netif_carrier_ok(netif->dev)) {
351 netif_carrier_off(netif->dev);
352 if (netif_running(netif->dev))