2 * Copyright (C) 2001 Lennert Buytenhek (buytenh@gnu.org) and
3 * James Leu (jleu@mindspring.net).
4 * Copyright (C) 2001 by various other people who didn't put their name here.
5 * Licensed under the GPL.
8 #include "linux/kernel.h"
9 #include "linux/netdevice.h"
10 #include "linux/rtnetlink.h"
11 #include "linux/skbuff.h"
12 #include "linux/socket.h"
13 #include "linux/spinlock.h"
14 #include "linux/module.h"
15 #include "linux/init.h"
16 #include "linux/etherdevice.h"
17 #include "linux/list.h"
18 #include "linux/inetdevice.h"
19 #include "linux/ctype.h"
20 #include "linux/bootmem.h"
21 #include "linux/ethtool.h"
22 #include "linux/platform_device.h"
23 #include "asm/uaccess.h"
24 #include "user_util.h"
25 #include "kern_util.h"
28 #include "mconsole_kern.h"
33 #define DRIVER_NAME "uml-netdev"
35 static DEFINE_SPINLOCK(opened_lock);
36 static LIST_HEAD(opened);
38 static int uml_net_rx(struct net_device *dev)
40 struct uml_net_private *lp = dev->priv;
44 /* If we can't allocate memory, try again next round. */
45 skb = dev_alloc_skb(dev->mtu);
47 lp->stats.rx_dropped++;
52 skb_put(skb, dev->mtu);
53 skb->mac.raw = skb->data;
54 pkt_len = (*lp->read)(lp->fd, &skb, lp);
57 skb_trim(skb, pkt_len);
58 skb->protocol = (*lp->protocol)(skb);
61 lp->stats.rx_bytes += skb->len;
62 lp->stats.rx_packets++;
70 static void uml_dev_close(void* dev)
72 dev_close( (struct net_device *) dev);
75 irqreturn_t uml_net_interrupt(int irq, void *dev_id, struct pt_regs *regs)
77 struct net_device *dev = dev_id;
78 struct uml_net_private *lp = dev->priv;
81 if(!netif_running(dev))
85 while((err = uml_net_rx(dev)) > 0) ;
87 DECLARE_WORK(close_work, uml_dev_close, dev);
89 "Device '%s' read returned %d, shutting it down\n",
91 /* dev_close can't be called in interrupt context, and takes
93 * And dev_close() can be safely called multiple times on the
94 * same device, since it tests for (dev->flags & IFF_UP). So
95 * there's no harm in delaying the device shutdown. */
96 schedule_work(&close_work);
99 reactivate_fd(lp->fd, UM_ETH_IRQ);
102 spin_unlock(&lp->lock);
106 static int uml_net_open(struct net_device *dev)
108 struct uml_net_private *lp = dev->priv;
111 spin_lock(&lp->lock);
119 dev_ip_addr(dev, &lp->mac[2]);
120 set_ether_mac(dev, lp->mac);
123 lp->fd = (*lp->open)(&lp->user);
129 err = um_request_irq(dev->irq, lp->fd, IRQ_READ, uml_net_interrupt,
130 IRQF_DISABLED | IRQF_SHARED, dev->name, dev);
132 printk(KERN_ERR "uml_net_open: failed to get irq(%d)\n", err);
137 lp->tl.data = (unsigned long) &lp->user;
138 netif_start_queue(dev);
140 /* clear buffer - it can happen that the host side of the interface
141 * is full when we get here. In this case, new data is never queued,
142 * SIGIOs never arrive, and the net never works.
144 while((err = uml_net_rx(dev)) > 0) ;
146 spin_unlock(&lp->lock);
148 spin_lock(&opened_lock);
149 list_add(&lp->list, &opened);
150 spin_unlock(&opened_lock);
154 if(lp->close != NULL) (*lp->close)(lp->fd, &lp->user);
157 spin_unlock(&lp->lock);
161 static int uml_net_close(struct net_device *dev)
163 struct uml_net_private *lp = dev->priv;
165 netif_stop_queue(dev);
166 spin_lock(&lp->lock);
168 free_irq(dev->irq, dev);
169 if(lp->close != NULL)
170 (*lp->close)(lp->fd, &lp->user);
173 spin_unlock(&lp->lock);
175 spin_lock(&opened_lock);
177 spin_unlock(&opened_lock);
182 static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
184 struct uml_net_private *lp = dev->priv;
188 netif_stop_queue(dev);
190 spin_lock_irqsave(&lp->lock, flags);
192 len = (*lp->write)(lp->fd, &skb, lp);
194 if(len == skb->len) {
195 lp->stats.tx_packets++;
196 lp->stats.tx_bytes += skb->len;
197 dev->trans_start = jiffies;
198 netif_start_queue(dev);
200 /* this is normally done in the interrupt when tx finishes */
201 netif_wake_queue(dev);
204 netif_start_queue(dev);
205 lp->stats.tx_dropped++;
208 netif_start_queue(dev);
209 printk(KERN_ERR "uml_net_start_xmit: failed(%d)\n", len);
212 spin_unlock_irqrestore(&lp->lock, flags);
219 static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
221 struct uml_net_private *lp = dev->priv;
225 static void uml_net_set_multicast_list(struct net_device *dev)
227 if (dev->flags & IFF_PROMISC) return;
228 else if (dev->mc_count) dev->flags |= IFF_ALLMULTI;
229 else dev->flags &= ~IFF_ALLMULTI;
232 static void uml_net_tx_timeout(struct net_device *dev)
234 dev->trans_start = jiffies;
235 netif_wake_queue(dev);
238 static int uml_net_set_mac(struct net_device *dev, void *addr)
240 struct uml_net_private *lp = dev->priv;
241 struct sockaddr *hwaddr = addr;
243 spin_lock(&lp->lock);
244 memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
245 spin_unlock(&lp->lock);
250 static int uml_net_change_mtu(struct net_device *dev, int new_mtu)
252 struct uml_net_private *lp = dev->priv;
255 spin_lock(&lp->lock);
257 new_mtu = (*lp->set_mtu)(new_mtu, &lp->user);
266 spin_unlock(&lp->lock);
270 static void uml_net_get_drvinfo(struct net_device *dev,
271 struct ethtool_drvinfo *info)
273 strcpy(info->driver, DRIVER_NAME);
274 strcpy(info->version, "42");
277 static struct ethtool_ops uml_net_ethtool_ops = {
278 .get_drvinfo = uml_net_get_drvinfo,
279 .get_link = ethtool_op_get_link,
282 void uml_net_user_timer_expire(unsigned long _conn)
285 struct connection *conn = (struct connection *)_conn;
287 dprintk(KERN_INFO "uml_net_user_timer_expire [%p]\n", conn);
292 static DEFINE_SPINLOCK(devices_lock);
293 static LIST_HEAD(devices);
295 static struct platform_driver uml_net_driver = {
300 static int driver_registered;
302 static int eth_configure(int n, void *init, char *mac,
303 struct transport *transport)
305 struct uml_net *device;
306 struct net_device *dev;
307 struct uml_net_private *lp;
310 size = transport->private_size + sizeof(struct uml_net_private) +
311 sizeof(((struct uml_net_private *) 0)->user);
313 device = kmalloc(sizeof(*device), GFP_KERNEL);
314 if (device == NULL) {
315 printk(KERN_ERR "eth_configure failed to allocate uml_net\n");
319 memset(device, 0, sizeof(*device));
320 INIT_LIST_HEAD(&device->list);
323 spin_lock(&devices_lock);
324 list_add(&device->list, &devices);
325 spin_unlock(&devices_lock);
327 if (setup_etheraddr(mac, device->mac))
328 device->have_mac = 1;
330 printk(KERN_INFO "Netdevice %d ", n);
331 if (device->have_mac)
332 printk("(%02x:%02x:%02x:%02x:%02x:%02x) ",
333 device->mac[0], device->mac[1],
334 device->mac[2], device->mac[3],
335 device->mac[4], device->mac[5]);
337 dev = alloc_etherdev(size);
339 printk(KERN_ERR "eth_configure: failed to allocate device\n");
344 /* This points to the transport private data. It's still clear, but we
345 * must memset it to 0 *now*. Let's help the drivers. */
349 if (!driver_registered) {
350 platform_driver_register(¨_net_driver);
351 driver_registered = 1;
354 device->pdev.name = DRIVER_NAME;
355 platform_device_register(&device->pdev);
356 SET_NETDEV_DEV(dev,&device->pdev.dev);
358 /* If this name ends up conflicting with an existing registered
359 * netdevice, that is OK, register_netdev{,ice}() will notice this
362 snprintf(dev->name, sizeof(dev->name), "eth%d", n);
365 (*transport->kern->init)(dev, init);
367 dev->mtu = transport->user->max_packet;
368 dev->open = uml_net_open;
369 dev->hard_start_xmit = uml_net_start_xmit;
370 dev->stop = uml_net_close;
371 dev->get_stats = uml_net_get_stats;
372 dev->set_multicast_list = uml_net_set_multicast_list;
373 dev->tx_timeout = uml_net_tx_timeout;
374 dev->set_mac_address = uml_net_set_mac;
375 dev->change_mtu = uml_net_change_mtu;
376 dev->ethtool_ops = ¨_net_ethtool_ops;
377 dev->watchdog_timeo = (HZ >> 1);
378 dev->irq = UM_ETH_IRQ;
381 err = register_netdevice(dev);
385 /* XXX: should we call ->remove() here? */
390 /* lp.user is the first four bytes of the transport data, which
391 * has already been initialized. This structure assignment will
392 * overwrite that, so we make sure that .user gets overwritten with
393 * what it already has.
396 *lp = ((struct uml_net_private)
397 { .list = LIST_HEAD_INIT(lp->list),
400 .mac = { 0xfe, 0xfd, 0x0, 0x0, 0x0, 0x0},
401 .have_mac = device->have_mac,
402 .protocol = transport->kern->protocol,
403 .open = transport->user->open,
404 .close = transport->user->close,
405 .remove = transport->user->remove,
406 .read = transport->kern->read,
407 .write = transport->kern->write,
408 .add_address = transport->user->add_address,
409 .delete_address = transport->user->delete_address,
410 .set_mtu = transport->user->set_mtu,
414 spin_lock_init(&lp->lock);
415 lp->tl.function = uml_net_user_timer_expire;
417 memcpy(lp->mac, device->mac, sizeof(lp->mac));
419 if (transport->user->init)
420 (*transport->user->init)(&lp->user, dev);
422 if (device->have_mac)
423 set_ether_mac(dev, device->mac);
428 static struct uml_net *find_device(int n)
430 struct uml_net *device;
431 struct list_head *ele;
433 spin_lock(&devices_lock);
434 list_for_each(ele, &devices){
435 device = list_entry(ele, struct uml_net, list);
436 if(device->index == n)
441 spin_unlock(&devices_lock);
445 static int eth_parse(char *str, int *index_out, char **str_out)
450 n = simple_strtoul(str, &end, 0);
452 printk(KERN_ERR "eth_setup: Failed to parse '%s'\n", str);
456 printk(KERN_ERR "eth_setup: device %d is negative\n", n);
462 "eth_setup: expected '=' after device number\n");
467 printk(KERN_ERR "eth_setup: Device %d already configured\n",
471 if(index_out) *index_out = n;
477 struct list_head list;
482 /* Filled in at boot time. Will need locking if the transports become
485 struct list_head transports = LIST_HEAD_INIT(transports);
487 /* Filled in during early boot */
488 struct list_head eth_cmd_line = LIST_HEAD_INIT(eth_cmd_line);
490 static int check_transport(struct transport *transport, char *eth, int n,
491 void **init_out, char **mac_out)
495 len = strlen(transport->name);
496 if(strncmp(eth, transport->name, len))
502 else if(*eth != '\0')
505 *init_out = kmalloc(transport->setup_size, GFP_KERNEL);
506 if(*init_out == NULL)
509 if(!transport->setup(eth, mac_out, *init_out)){
516 void register_transport(struct transport *new)
518 struct list_head *ele, *next;
519 struct eth_init *eth;
524 list_add(&new->list, &transports);
526 list_for_each_safe(ele, next, ð_cmd_line){
527 eth = list_entry(ele, struct eth_init, list);
528 match = check_transport(new, eth->init, eth->index, &init,
532 else if(init != NULL){
533 eth_configure(eth->index, init, mac, new);
536 list_del(ð->list);
540 static int eth_setup_common(char *str, int index)
542 struct list_head *ele;
543 struct transport *transport;
547 list_for_each(ele, &transports){
548 transport = list_entry(ele, struct transport, list);
549 if(!check_transport(transport, str, index, &init, &mac))
552 eth_configure(index, init, mac, transport);
560 static int eth_setup(char *str)
562 struct eth_init *new;
565 err = eth_parse(str, &n, &str);
568 new = alloc_bootmem(sizeof(new));
570 printk("eth_init : alloc_bootmem failed\n");
574 INIT_LIST_HEAD(&new->list);
578 list_add_tail(&new->list, ð_cmd_line);
582 __setup("eth", eth_setup);
583 __uml_help(eth_setup,
584 "eth[0-9]+=<transport>,<options>\n"
585 " Configure a network device.\n\n"
589 static int eth_init(void)
591 struct list_head *ele, *next;
592 struct eth_init *eth;
594 list_for_each_safe(ele, next, ð_cmd_line){
595 eth = list_entry(ele, struct eth_init, list);
597 if(eth_setup_common(eth->init, eth->index))
598 list_del(ð->list);
603 __initcall(eth_init);
606 static int net_config(char *str)
610 err = eth_parse(str, &n, &str);
613 str = kstrdup(str, GFP_KERNEL);
615 printk(KERN_ERR "net_config failed to strdup string\n");
618 err = !eth_setup_common(str, n);
624 static int net_id(char **str, int *start_out, int *end_out)
629 n = simple_strtoul(*str, &end, 0);
630 if((*end != '\0') || (end == *str))
639 static int net_remove(int n)
641 struct uml_net *device;
642 struct net_device *dev;
643 struct uml_net_private *lp;
645 device = find_device(n);
653 if(lp->remove != NULL) (*lp->remove)(&lp->user);
654 unregister_netdev(dev);
655 platform_device_unregister(&device->pdev);
657 list_del(&device->list);
663 static struct mc_device net_mc = {
665 .config = net_config,
668 .remove = net_remove,
671 static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
674 struct in_ifaddr *ifa = ptr;
675 struct net_device *dev = ifa->ifa_dev->dev;
676 struct uml_net_private *lp;
677 void (*proc)(unsigned char *, unsigned char *, void *);
678 unsigned char addr_buf[4], netmask_buf[4];
680 if(dev->open != uml_net_open) return(NOTIFY_DONE);
687 proc = lp->add_address;
690 proc = lp->delete_address;
694 memcpy(addr_buf, &ifa->ifa_address, sizeof(addr_buf));
695 memcpy(netmask_buf, &ifa->ifa_mask, sizeof(netmask_buf));
696 (*proc)(addr_buf, netmask_buf, &lp->user);
701 struct notifier_block uml_inetaddr_notifier = {
702 .notifier_call = uml_inetaddr_event,
705 static int uml_net_init(void)
707 struct list_head *ele;
708 struct uml_net_private *lp;
709 struct in_device *ip;
710 struct in_ifaddr *in;
712 mconsole_register_dev(&net_mc);
713 register_inetaddr_notifier(¨_inetaddr_notifier);
715 /* Devices may have been opened already, so the uml_inetaddr_notifier
716 * didn't get a chance to run for them. This fakes it so that
717 * addresses which have already been set up get handled properly.
719 list_for_each(ele, &opened){
720 lp = list_entry(ele, struct uml_net_private, list);
721 ip = lp->dev->ip_ptr;
722 if(ip == NULL) continue;
725 uml_inetaddr_event(NULL, NETDEV_UP, in);
733 __initcall(uml_net_init);
735 static void close_devices(void)
737 struct list_head *ele;
738 struct uml_net_private *lp;
740 list_for_each(ele, &opened){
741 lp = list_entry(ele, struct uml_net_private, list);
742 free_irq(lp->dev->irq, lp->dev);
743 if((lp->close != NULL) && (lp->fd >= 0))
744 (*lp->close)(lp->fd, &lp->user);
745 if(lp->remove != NULL) (*lp->remove)(&lp->user);
749 __uml_exitcall(close_devices);
751 int setup_etheraddr(char *str, unsigned char *addr)
759 addr[i] = simple_strtoul(str, &end, 16);
761 ((*end != ':') && (*end != ',') && (*end != '\0'))){
763 "setup_etheraddr: failed to parse '%s' "
764 "as an ethernet address\n", str);
771 "Attempt to assign a broadcast ethernet address to a "
772 "device disallowed\n");
778 void dev_ip_addr(void *d, unsigned char *bin_buf)
780 struct net_device *dev = d;
781 struct in_device *ip = dev->ip_ptr;
782 struct in_ifaddr *in;
784 if((ip == NULL) || ((in = ip->ifa_list) == NULL)){
785 printk(KERN_WARNING "dev_ip_addr - device not assigned an "
789 memcpy(bin_buf, &in->ifa_address, sizeof(in->ifa_address));
792 void set_ether_mac(void *d, unsigned char *addr)
794 struct net_device *dev = d;
796 memcpy(dev->dev_addr, addr, ETH_ALEN);
799 struct sk_buff *ether_adjust_skb(struct sk_buff *skb, int extra)
801 if((skb != NULL) && (skb_tailroom(skb) < extra)){
802 struct sk_buff *skb2;
804 skb2 = skb_copy_expand(skb, 0, extra, GFP_ATOMIC);
808 if(skb != NULL) skb_put(skb, extra);
812 void iter_addresses(void *d, void (*cb)(unsigned char *, unsigned char *,
816 struct net_device *dev = d;
817 struct in_device *ip = dev->ip_ptr;
818 struct in_ifaddr *in;
819 unsigned char address[4], netmask[4];
821 if(ip == NULL) return;
824 memcpy(address, &in->ifa_address, sizeof(address));
825 memcpy(netmask, &in->ifa_mask, sizeof(netmask));
826 (*cb)(address, netmask, arg);
831 int dev_netmask(void *d, void *m)
833 struct net_device *dev = d;
834 struct in_device *ip = dev->ip_ptr;
835 struct in_ifaddr *in;
845 *mask_out = in->ifa_mask;
849 void *get_output_buffer(int *len_out)
853 ret = (void *) __get_free_pages(GFP_KERNEL, 0);
854 if(ret) *len_out = PAGE_SIZE;
859 void free_output_buffer(void *buffer)
861 free_pages((unsigned long) buffer, 0);
864 int tap_setup_common(char *str, char *type, char **dev_name, char **mac_out,
869 remain = split_if_spec(str, dev_name, mac_out, gate_addr, NULL);
871 printk("tap_setup_common - Extra garbage on specification : "
879 unsigned short eth_protocol(struct sk_buff *skb)
881 return(eth_type_trans(skb, skb->dev));
885 * Overrides for Emacs so that we follow Linus's tabbing style.
886 * Emacs will notice this stuff at the end of the file and automatically
887 * adjust the settings for this buffer only. This must remain at the end
889 * ---------------------------------------------------------------------------
891 * c-file-style: "linux"